summaryrefslogtreecommitdiffstats
path: root/Tools/Scripts/webkitpy/layout_tests/layout_package
diff options
context:
space:
mode:
Diffstat (limited to 'Tools/Scripts/webkitpy/layout_tests/layout_package')
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py422
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py6
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py113
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py29
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py282
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py227
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py41
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker2.py196
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker2_unittest.py83
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py54
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py8
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py322
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py554
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py293
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py4
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py56
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py107
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py129
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/worker.py104
19 files changed, 2120 insertions, 910 deletions
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index 050eefa..7ddd7b0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -28,17 +28,11 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""A Thread object for running DumpRenderTree and processing URLs from a
-shared queue.
+"""This module implements a shared-memory, thread-based version of the worker
+task in new-run-webkit-tests: it receives a list of tests from TestShellThread
+and passes them one at a time to SingleTestRunner to execute."""
-Each thread runs a separate instance of the DumpRenderTree binary and validates
-the output. When there are no more URLs to process in the shared queue, the
-thread exits.
-"""
-
-import copy
import logging
-import os
import Queue
import signal
import sys
@@ -46,199 +40,13 @@ import thread
import threading
import time
-
-from webkitpy.layout_tests.test_types import image_diff
-from webkitpy.layout_tests.test_types import test_type_base
-from webkitpy.layout_tests.test_types import text_diff
-
-import test_failures
-import test_output
-import test_results
+from webkitpy.layout_tests.layout_package.single_test_runner import SingleTestRunner
_log = logging.getLogger("webkitpy.layout_tests.layout_package."
"dump_render_tree_thread")
-def _expected_test_output(port, filename):
- """Returns an expected TestOutput object."""
- return test_output.TestOutput(port.expected_text(filename),
- port.expected_image(filename),
- port.expected_checksum(filename))
-
-def _process_output(port, options, test_input, test_types, test_args,
- test_output, worker_name):
- """Receives the output from a DumpRenderTree process, subjects it to a
- number of tests, and returns a list of failure types the test produced.
-
- Args:
- port: port-specific hooks
- options: command line options argument from optparse
- proc: an active DumpRenderTree process
- test_input: Object containing the test filename and timeout
- test_types: list of test types to subject the output to
- test_args: arguments to be passed to each test
- test_output: a TestOutput object containing the output of the test
- worker_name: worker name for logging
-
- Returns: a TestResult object
- """
- failures = []
- fs = port._filesystem
-
- if test_output.crash:
- failures.append(test_failures.FailureCrash())
- if test_output.timeout:
- failures.append(test_failures.FailureTimeout())
-
- test_name = port.relative_test_filename(test_input.filename)
- if test_output.crash:
- _log.debug("%s Stacktrace for %s:\n%s" % (worker_name, test_name,
- test_output.error))
- filename = fs.join(options.results_directory, test_name)
- filename = fs.splitext(filename)[0] + "-stack.txt"
- fs.maybe_make_directory(fs.dirname(filename))
- fs.write_text_file(filename, test_output.error)
- elif test_output.error:
- _log.debug("%s %s output stderr lines:\n%s" % (worker_name, test_name,
- test_output.error))
-
- expected_test_output = _expected_test_output(port, test_input.filename)
-
- # Check the output and save the results.
- start_time = time.time()
- time_for_diffs = {}
- for test_type in test_types:
- start_diff_time = time.time()
- new_failures = test_type.compare_output(port, test_input.filename,
- test_args, test_output,
- expected_test_output)
- # Don't add any more failures if we already have a crash, so we don't
- # double-report those tests. We do double-report for timeouts since
- # we still want to see the text and image output.
- if not test_output.crash:
- failures.extend(new_failures)
- time_for_diffs[test_type.__class__.__name__] = (
- time.time() - start_diff_time)
-
- total_time_for_all_diffs = time.time() - start_diff_time
- return test_results.TestResult(test_input.filename, failures, test_output.test_time,
- total_time_for_all_diffs, time_for_diffs)
-
-
-def _pad_timeout(timeout):
- """Returns a safe multiple of the per-test timeout value to use
- to detect hung test threads.
-
- """
- # When we're running one test per DumpRenderTree process, we can
- # enforce a hard timeout. The DumpRenderTree watchdog uses 2.5x
- # the timeout; we want to be larger than that.
- return timeout * 3
-
-
-def _milliseconds_to_seconds(msecs):
- return float(msecs) / 1000.0
-
-
-def _should_fetch_expected_checksum(options):
- return options.pixel_tests and not (options.new_baseline or options.reset_results)
-
-
-def _run_single_test(port, options, test_input, test_types, test_args, driver, worker_name):
- # FIXME: Pull this into TestShellThread._run().
-
- # The image hash is used to avoid doing an image dump if the
- # checksums match, so it should be set to a blank value if we
- # are generating a new baseline. (Otherwise, an image from a
- # previous run will be copied into the baseline."""
- if _should_fetch_expected_checksum(options):
- test_input.image_hash = port.expected_checksum(test_input.filename)
- test_output = driver.run_test(test_input)
- return _process_output(port, options, test_input, test_types, test_args,
- test_output, worker_name)
-
-
-class SingleTestThread(threading.Thread):
- """Thread wrapper for running a single test file."""
-
- def __init__(self, port, options, worker_number, worker_name,
- test_input, test_types, test_args):
- """
- Args:
- port: object implementing port-specific hooks
- options: command line argument object from optparse
- worker_number: worker number for tests
- worker_name: for logging
- test_input: Object containing the test filename and timeout
- test_types: A list of TestType objects to run the test output
- against.
- test_args: A TestArguments object to pass to each TestType.
- """
-
- threading.Thread.__init__(self)
- self._port = port
- self._options = options
- self._test_input = test_input
- self._test_types = test_types
- self._test_args = test_args
- self._driver = None
- self._worker_number = worker_number
- self._name = worker_name
-
- def run(self):
- self._covered_run()
-
- def _covered_run(self):
- # FIXME: this is a separate routine to work around a bug
- # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
- self._driver = self._port.create_driver(self._worker_number)
- self._driver.start()
- self._test_result = _run_single_test(self._port, self._options,
- self._test_input, self._test_types,
- self._test_args, self._driver,
- self._name)
- self._driver.stop()
-
- def get_test_result(self):
- return self._test_result
-
-
-class WatchableThread(threading.Thread):
- """This class abstracts an interface used by
- run_webkit_tests.TestRunner._wait_for_threads_to_finish for thread
- management."""
- def __init__(self):
- threading.Thread.__init__(self)
- self._canceled = False
- self._exception_info = None
- self._next_timeout = None
- self._thread_id = None
-
- def cancel(self):
- """Set a flag telling this thread to quit."""
- self._canceled = True
-
- def clear_next_timeout(self):
- """Mark a flag telling this thread to stop setting timeouts."""
- self._timeout = 0
-
- def exception_info(self):
- """If run() terminated on an uncaught exception, return it here
- ((type, value, traceback) tuple).
- Returns None if run() terminated normally. Meant to be called after
- joining this thread."""
- return self._exception_info
-
- def id(self):
- """Return a thread identifier."""
- return self._thread_id
-
- def next_timeout(self):
- """Return the time the test is supposed to finish by."""
- return self._next_timeout
-
-
-class TestShellThread(WatchableThread):
+class TestShellThread(threading.Thread):
def __init__(self, port, options, worker_number, worker_name,
filename_list_queue, result_queue):
"""Initialize all the local state for this DumpRenderTree thread.
@@ -253,50 +61,51 @@ class TestShellThread(WatchableThread):
result_queue: A thread safe Queue class that will contain
serialized TestResult objects.
"""
- WatchableThread.__init__(self)
+ threading.Thread.__init__(self)
+ self._canceled = False
+ self._exception_info = None
+ self._next_timeout = None
+ self._thread_id = None
self._port = port
self._options = options
self._worker_number = worker_number
self._name = worker_name
self._filename_list_queue = filename_list_queue
self._result_queue = result_queue
+ self._current_group = None
self._filename_list = []
- self._driver = None
self._test_group_timing_stats = {}
self._test_results = []
self._num_tests = 0
self._start_time = 0
self._stop_time = 0
- self._have_http_lock = False
self._http_lock_wait_begin = 0
self._http_lock_wait_end = 0
- self._test_types = []
- for cls in self._get_test_type_classes():
- self._test_types.append(cls(self._port,
- self._options.results_directory))
- self._test_args = self._get_test_args(worker_number)
+ def cancel(self):
+ """Set a flag telling this thread to quit."""
+ self._canceled = True
- # Current group of tests we're running.
- self._current_group = None
- # Number of tests in self._current_group.
- self._num_tests_in_current_group = None
- # Time at which we started running tests from self._current_group.
- self._current_group_start_time = None
+ def clear_next_timeout(self):
+ """Mark a flag telling this thread to stop setting timeouts."""
+ self._timeout = 0
- def _get_test_args(self, worker_number):
- """Returns the tuple of arguments for tests and for DumpRenderTree."""
- test_args = test_type_base.TestArguments()
- test_args.new_baseline = self._options.new_baseline
- test_args.reset_results = self._options.reset_results
+ def exception_info(self):
+ """If run() terminated on an uncaught exception, return it here
+ ((type, value, traceback) tuple).
+ Returns None if run() terminated normally. Meant to be called after
+ joining this thread."""
+ return self._exception_info
- return test_args
+ def id(self):
+ """Return a thread identifier."""
+ return self._thread_id
- def _get_test_type_classes(self):
- classes = [text_diff.TestTextDiff]
- if self._options.pixel_tests:
- classes.append(image_diff.ImageDiff)
- return classes
+ def next_timeout(self):
+ """Return the time the test is supposed to finish by."""
+ if self._next_timeout:
+ return self._next_timeout + self._http_lock_wait_time()
+ return self._next_timeout
def get_test_group_timing_stats(self):
"""Returns a dictionary mapping test group to a tuple of
@@ -352,17 +161,6 @@ class TestShellThread(WatchableThread):
do multi-threaded debugging."""
self._run(test_runner, result_summary)
- def cancel(self):
- """Clean up http lock and set a flag telling this thread to quit."""
- self._stop_servers_with_lock()
- WatchableThread.cancel(self)
-
- def next_timeout(self):
- """Return the time the test is supposed to finish by."""
- if self._next_timeout:
- return self._next_timeout + self._http_lock_wait_time()
- return self._next_timeout
-
def _http_lock_wait_time(self):
"""Return the time what http locking takes."""
if self._http_lock_wait_begin == 0:
@@ -377,18 +175,23 @@ class TestShellThread(WatchableThread):
If test_runner is not None, then we call test_runner.UpdateSummary()
with the results of each test."""
+ single_test_runner = SingleTestRunner(self._options, self._port,
+ self._name, self._worker_number)
+
batch_size = self._options.batch_size
batch_count = 0
# Append tests we're running to the existing tests_run.txt file.
# This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
tests_run_filename = self._port._filesystem.join(self._options.results_directory,
- "tests_run.txt")
+ "tests_run%d.txt" % self._worker_number)
tests_run_file = self._port._filesystem.open_text_file_for_writing(tests_run_filename, append=False)
+
while True:
if self._canceled:
_log.debug('Testing cancelled')
tests_run_file.close()
+ single_test_runner.cleanup()
return
if len(self._filename_list) is 0:
@@ -401,15 +204,16 @@ class TestShellThread(WatchableThread):
self._current_group, self._filename_list = \
self._filename_list_queue.get_nowait()
except Queue.Empty:
- self._stop_servers_with_lock()
- self._kill_dump_render_tree()
tests_run_file.close()
+ single_test_runner.cleanup()
return
if self._current_group == "tests_to_http_lock":
- self._start_servers_with_lock()
- elif self._have_http_lock:
- self._stop_servers_with_lock()
+ self._http_lock_wait_begin = time.time()
+ single_test_runner.start_servers_with_lock()
+ self._http_lock_wait_end = time.time()
+ elif single_test_runner.has_http_lock:
+ single_test_runner.stop_servers_with_lock()
self._num_tests_in_current_group = len(self._filename_list)
self._current_group_start_time = time.time()
@@ -419,145 +223,31 @@ class TestShellThread(WatchableThread):
# We have a url, run tests.
batch_count += 1
self._num_tests += 1
- if self._options.run_singly:
- result = self._run_test_in_another_thread(test_input)
- else:
- result = self._run_test_in_this_thread(test_input)
- filename = test_input.filename
- tests_run_file.write(filename + "\n")
+ timeout = single_test_runner.timeout(test_input)
+ result = single_test_runner.run_test(test_input, timeout)
+
+ tests_run_file.write(test_input.filename + "\n")
+ test_name = self._port.relative_test_filename(test_input.filename)
if result.failures:
# Check and kill DumpRenderTree if we need to.
- if len([1 for f in result.failures
- if f.should_kill_dump_render_tree()]):
- self._kill_dump_render_tree()
+ if any([f.should_kill_dump_render_tree() for f in result.failures]):
+ single_test_runner.kill_dump_render_tree()
# Reset the batch count since the shell just bounced.
batch_count = 0
+
# Print the error message(s).
- error_str = '\n'.join([' ' + f.message() for
- f in result.failures])
- _log.debug("%s %s failed:\n%s" % (self.getName(),
- self._port.relative_test_filename(filename),
- error_str))
+ _log.debug("%s %s failed:" % (self._name, test_name))
+ for f in result.failures:
+ _log.debug("%s %s" % (self._name, f.message()))
else:
- _log.debug("%s %s passed" % (self.getName(),
- self._port.relative_test_filename(filename)))
+ _log.debug("%s %s passed" % (self._name, test_name))
self._result_queue.put(result.dumps())
if batch_size > 0 and batch_count >= batch_size:
# Bounce the shell and reset count.
- self._kill_dump_render_tree()
+ single_test_runner.kill_dump_render_tree()
batch_count = 0
if test_runner:
test_runner.update_summary(result_summary)
-
- def _run_test_in_another_thread(self, test_input):
- """Run a test in a separate thread, enforcing a hard time limit.
-
- Since we can only detect the termination of a thread, not any internal
- state or progress, we can only run per-test timeouts when running test
- files singly.
-
- Args:
- test_input: Object containing the test filename and timeout
-
- Returns:
- A TestResult
- """
- worker = SingleTestThread(self._port,
- self._options,
- self._worker_number,
- self._name,
- test_input,
- self._test_types,
- self._test_args)
-
- worker.start()
-
- thread_timeout = _milliseconds_to_seconds(
- _pad_timeout(int(test_input.timeout)))
- thread._next_timeout = time.time() + thread_timeout
- worker.join(thread_timeout)
- if worker.isAlive():
- # If join() returned with the thread still running, the
- # DumpRenderTree is completely hung and there's nothing
- # more we can do with it. We have to kill all the
- # DumpRenderTrees to free it up. If we're running more than
- # one DumpRenderTree thread, we'll end up killing the other
- # DumpRenderTrees too, introducing spurious crashes. We accept
- # that tradeoff in order to avoid losing the rest of this
- # thread's results.
- _log.error('Test thread hung: killing all DumpRenderTrees')
- if worker._driver:
- worker._driver.stop()
-
- try:
- result = worker.get_test_result()
- except AttributeError, e:
- # This gets raised if the worker thread has already exited.
- _log.error('Cannot get results of test: %s' % test_input.filename)
- # FIXME: Seems we want a unique failure type here.
- result = test_results.TestResult(test_input.filename)
-
- return result
-
- def _run_test_in_this_thread(self, test_input):
- """Run a single test file using a shared DumpRenderTree process.
-
- Args:
- test_input: Object containing the test filename, uri and timeout
-
- Returns: a TestResult object.
- """
- self._ensure_dump_render_tree_is_running()
- thread_timeout = _milliseconds_to_seconds(
- _pad_timeout(int(test_input.timeout)))
- self._next_timeout = time.time() + thread_timeout
- test_result = _run_single_test(self._port, self._options, test_input,
- self._test_types, self._test_args,
- self._driver, self._name)
- self._test_results.append(test_result)
- return test_result
-
- def _ensure_dump_render_tree_is_running(self):
- """Start the shared DumpRenderTree, if it's not running.
-
- This is not for use when running tests singly, since those each start
- a separate DumpRenderTree in their own thread.
-
- """
- # poll() is not threadsafe and can throw OSError due to:
- # http://bugs.python.org/issue1731717
- if not self._driver or self._driver.poll() is not None:
- self._driver = self._port.create_driver(self._worker_number)
- self._driver.start()
-
- def _start_servers_with_lock(self):
- """Acquire http lock and start the servers."""
- self._http_lock_wait_begin = time.time()
- _log.debug('Acquire http lock ...')
- self._port.acquire_http_lock()
- _log.debug('Starting HTTP server ...')
- self._port.start_http_server()
- _log.debug('Starting WebSocket server ...')
- self._port.start_websocket_server()
- self._http_lock_wait_end = time.time()
- self._have_http_lock = True
-
- def _stop_servers_with_lock(self):
- """Stop the servers and release http lock."""
- if self._have_http_lock:
- _log.debug('Stopping HTTP server ...')
- self._port.stop_http_server()
- _log.debug('Stopping WebSocket server ...')
- self._port.stop_websocket_server()
- _log.debug('Release http lock ...')
- self._port.release_http_lock()
- self._have_http_lock = False
-
- def _kill_dump_render_tree(self):
- """Kill the DumpRenderTree process if it's running."""
- if self._driver:
- self._driver.stop()
- self._driver = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
index 3267fb7..8226ed0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -55,8 +55,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_timings, expectations, result_summary, all_tests,
- generate_incremental_results=False, test_results_server=None,
- test_type="", master_name=""):
+ test_results_server=None, test_type="", master_name=""):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
@@ -67,8 +66,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
super(JSONLayoutResultsGenerator, self).__init__(
port, builder_name, build_name, build_number, results_file_base_path,
builder_base_url, {}, port.test_repository_paths(),
- generate_incremental_results, test_results_server,
- test_type, master_name)
+ test_results_server, test_type, master_name)
self._expectations = expectations
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index 32ffd71..05662c2 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -114,13 +114,16 @@ class JSONResultsGeneratorBase(object):
URL_FOR_TEST_LIST_JSON = \
"http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s"
+ # FIXME: Remove generate_incremental_results once the reference to it in
+ # http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/gtest_slave_utils.py
+ # has been removed.
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_results_map, svn_repositories=None,
- generate_incremental_results=False,
test_results_server=None,
test_type="",
- master_name=""):
+ master_name="",
+ generate_incremental_results=None):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
@@ -137,8 +140,6 @@ class JSONResultsGeneratorBase(object):
svn_repositories: A (json_field_name, svn_path) pair for SVN
repositories that tests rely on. The SVN revision will be
included in the JSON with the given json_field_name.
- generate_incremental_results: If true, generate incremental json file
- from current run results.
test_results_server: server that hosts test results json.
test_type: test type string (e.g. 'layout-tests').
master_name: the name of the buildbot master.
@@ -157,7 +158,6 @@ class JSONResultsGeneratorBase(object):
self._test_results_map = test_results_map
self._test_results = test_results_map.values()
- self._generate_incremental_results = generate_incremental_results
self._svn_repositories = svn_repositories
if not self._svn_repositories:
@@ -167,39 +167,20 @@ class JSONResultsGeneratorBase(object):
self._test_type = test_type
self._master_name = master_name
- self._json = None
self._archived_results = None
def generate_json_output(self):
- """Generates the JSON output file."""
-
- # Generate the JSON output file that has full results.
- # FIXME: stop writing out the full results file once all bots use
- # incremental results.
- if not self._json:
- self._json = self.get_json()
- if self._json:
- self._generate_json_file(self._json, self._results_file_path)
-
- # Generate the JSON output file that only has incremental results.
- if self._generate_incremental_results:
- json = self.get_json(incremental=True)
- if json:
- self._generate_json_file(
- json, self._incremental_results_file_path)
-
- def get_json(self, incremental=False):
+ json = self.get_json()
+ if json:
+ self._generate_json_file(
+ json, self._incremental_results_file_path)
+
+ def get_json(self):
"""Gets the results for the results.json file."""
results_json = {}
- if not incremental:
- if self._json:
- return self._json
-
- if self._archived_results:
- results_json = self._archived_results
if not results_json:
- results_json, error = self._get_archived_json_results(incremental)
+ results_json, error = self._get_archived_json_results()
if error:
# If there was an error don't write a results.json
# file at all as it would lose all the information on the
@@ -231,7 +212,7 @@ class JSONResultsGeneratorBase(object):
all_failing_tests = self._get_failed_test_names()
all_failing_tests.update(tests.iterkeys())
for test in all_failing_tests:
- self._insert_test_time_and_result(test, tests, incremental)
+ self._insert_test_time_and_result(test, tests)
return results_json
@@ -340,52 +321,39 @@ class JSONResultsGeneratorBase(object):
return ""
return ""
- def _get_archived_json_results(self, for_incremental=False):
- """Reads old results JSON file if it exists.
- Returns (archived_results, error) tuple where error is None if results
- were successfully read.
-
- if for_incremental is True, download JSON file that only contains test
+ def _get_archived_json_results(self):
+ """Download JSON file that only contains test
name list from test-results server. This is for generating incremental
JSON so the file generated has info for tests that failed before but
pass or are skipped from current run.
+
+ Returns (archived_results, error) tuple where error is None if results
+ were successfully read.
"""
results_json = {}
old_results = None
error = None
- if self._fs.exists(self._results_file_path) and not for_incremental:
- old_results = self._fs.read_text_file(self._results_file_path)
- elif self._builder_base_url or for_incremental:
- if for_incremental:
- if not self._test_results_server:
- # starting from fresh if no test results server specified.
- return {}, None
-
- results_file_url = (self.URL_FOR_TEST_LIST_JSON %
- (urllib2.quote(self._test_results_server),
- urllib2.quote(self._builder_name),
- self.RESULTS_FILENAME,
- urllib2.quote(self._test_type)))
- else:
- # Check if we have the archived JSON file on the buildbot
- # server.
- results_file_url = (self._builder_base_url +
- self._build_name + "/" + self.RESULTS_FILENAME)
- _log.error("Local results.json file does not exist. Grabbing "
- "it off the archive at " + results_file_url)
+ if not self._test_results_server:
+ return {}, None
- try:
- results_file = urllib2.urlopen(results_file_url)
- info = results_file.info()
- old_results = results_file.read()
- except urllib2.HTTPError, http_error:
- # A non-4xx status code means the bot is hosed for some reason
- # and we can't grab the results.json file off of it.
- if (http_error.code < 400 and http_error.code >= 500):
- error = http_error
- except urllib2.URLError, url_error:
- error = url_error
+ results_file_url = (self.URL_FOR_TEST_LIST_JSON %
+ (urllib2.quote(self._test_results_server),
+ urllib2.quote(self._builder_name),
+ self.RESULTS_FILENAME,
+ urllib2.quote(self._test_type)))
+
+ try:
+ results_file = urllib2.urlopen(results_file_url)
+ info = results_file.info()
+ old_results = results_file.read()
+ except urllib2.HTTPError, http_error:
+ # A non-4xx status code means the bot is hosed for some reason
+ # and we can't grab the results.json file off of it.
+ if (http_error.code < 400 and http_error.code >= 500):
+ error = http_error
+ except urllib2.URLError, url_error:
+ error = url_error
if old_results:
# Strip the prefix and suffix so we can get the actual JSON object.
@@ -490,7 +458,7 @@ class JSONResultsGeneratorBase(object):
int(time.time()),
self.TIME)
- def _insert_test_time_and_result(self, test_name, tests, incremental=False):
+ def _insert_test_time_and_result(self, test_name, tests):
""" Insert a test item with its results to the given tests dictionary.
Args:
@@ -514,11 +482,6 @@ class JSONResultsGeneratorBase(object):
else:
thisTest[self.TIMES] = [[1, time]]
- # Don't normalize the incremental results json because we need results
- # for tests that pass or have no data from current run.
- if not incremental:
- self._normalize_results_json(thisTest, test_name, tests)
-
def _convert_json_to_current_version(self, results_json):
"""If the JSON does not match the current version, converts it to the
current version and adds in the new version number.
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
index ce99765..95da8fb 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
@@ -94,7 +94,7 @@ class JSONGeneratorTest(unittest.TestCase):
failed_count_map = dict([(t, 1) for t in failed_tests])
# Test incremental json results
- incremental_json = generator.get_json(incremental=True)
+ incremental_json = generator.get_json()
self._verify_json_results(
tests_set,
test_timings,
@@ -106,33 +106,6 @@ class JSONGeneratorTest(unittest.TestCase):
incremental_json,
1)
- # Test aggregated json results
- generator.set_archived_results(self._json)
- json = generator.get_json(incremental=False)
- self._json = json
- self._num_runs += 1
- self._tests_set |= tests_set
- self._test_timings.update(test_timings)
- self._PASS_count += len(PASS_tests)
- self._DISABLED_count += len(DISABLED_tests)
- self._FLAKY_count += len(FLAKY_tests)
- self._fixable_count += len(DISABLED_tests | failed_tests)
-
- get = self._failed_count_map.get
- for test in failed_count_map.iterkeys():
- self._failed_count_map[test] = get(test, 0) + 1
-
- self._verify_json_results(
- self._tests_set,
- self._test_timings,
- self._failed_count_map,
- self._PASS_count,
- self._DISABLED_count,
- self._FLAKY_count,
- self._fixable_count,
- self._json,
- self._num_runs)
-
def _verify_json_results(self, tests_set, test_timings, failed_count_map,
PASS_count, DISABLED_count, FLAKY_count,
fixable_count,
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py
new file mode 100644
index 0000000..a0f252c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py
@@ -0,0 +1,282 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Module for handling messages and concurrency for run-webkit-tests.
+
+This module implements a message broker that connects the manager
+(TestRunner2) to the workers: it provides a messaging abstraction and
+message loops (building on top of message_broker2), and handles starting
+workers by launching threads and/or processes depending on the
+requested configuration.
+
+There are a lot of classes and objects involved in a fully connected system.
+They interact more or less like:
+
+TestRunner2 --> _InlineManager ---> _InlineWorker <-> Worker
+ ^ \ / ^
+ | v v |
+ \-------------------- MessageBroker -------------/
+"""
+
+import logging
+import optparse
+import Queue
+import thread
+import threading
+import time
+
+
+# Handle Python < 2.6 where multiprocessing isn't available.
+#
+# _Multiprocessing_Process is needed so that _MultiProcessWorker
+# can be defined with or without multiprocessing.
+try:
+ import multiprocessing
+ _Multiprocessing_Process = multiprocessing.Process
+except ImportError:
+ multiprocessing = None
+ _Multiprocessing_Process = threading.Thread
+
+
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests.layout_package import message_broker2
+
+
+_log = logging.getLogger(__name__)
+
+#
+# Topic names for Manager <-> Worker messaging
+#
+MANAGER_TOPIC = 'managers'
+ANY_WORKER_TOPIC = 'workers'
+
+
+def runtime_options():
+ """Return a list of optparse.Option objects for any runtime values used
+ by this module."""
+ options = [
+ optparse.make_option("--worker-model", action="store",
+ help=("controls worker model. Valid values are "
+ "'inline', 'threads', and 'processes'.")),
+ ]
+ return options
+
+
+def get(port, options, client, worker_class):
+ """Return a connection to a manager/worker message_broker
+
+ Args:
+ port - handle to layout_tests/port object for port-specific stuff
+ options - optparse argument for command-line options
+ client - message_broker2.BrokerClient implementation to dispatch
+ replies to.
+ worker_class - type of workers to create. This class must implement
+ the methods in AbstractWorker.
+ Returns:
+ A handle to an object that will talk to a message broker configured
+ for the normal manager/worker communication.
+ """
+ worker_model = options.worker_model
+ if worker_model == 'inline':
+ queue_class = Queue.Queue
+ manager_class = _InlineManager
+ elif worker_model == 'threads':
+ queue_class = Queue.Queue
+ manager_class = _ThreadedManager
+ elif worker_model == 'processes' and multiprocessing:
+ queue_class = multiprocessing.Queue
+ manager_class = _MultiProcessManager
+ else:
+ raise ValueError("unsupported value for --worker-model: %s" %
+ worker_model)
+
+ broker = message_broker2.Broker(options, queue_class)
+ return manager_class(broker, port, options, client, worker_class)
+
+
+class AbstractWorker(message_broker2.BrokerClient):
+ def __init__(self, broker_connection, worker_number, options):
+ """The constructor should be used to do any simple initialization
+ necessary, but should not do anything that creates data structures
+ that cannot be Pickled or sent across processes (like opening
+ files or sockets). Complex initialization should be done at the
+ start of the run() call.
+
+ Args:
+ broker_connection - handle to the BrokerConnection object creating
+ the worker and that can be used for messaging.
+ worker_number - identifier for this particular worker
+ options - command-line argument object from optparse"""
+
+ raise NotImplementedError
+
+ def run(self, port):
+ """Callback for the worker to start executing. Typically does any
+ remaining initialization and then calls broker_connection.run_message_loop()."""
+ raise NotImplementedError
+
+ def cancel(self):
+ """Called when possible to indicate to the worker to stop processing
+ messages and shut down. Note that workers may be stopped without this
+ method being called, so clients should not rely solely on this."""
+ raise NotImplementedError
+
+
+class _ManagerConnection(message_broker2.BrokerConnection):
+ def __init__(self, broker, options, client, worker_class):
+ """Base initialization for all Manager objects.
+
+ Args:
+ broker: handle to the message_broker2 object
+ options: command line options object
+ client: callback object (the caller)
+ worker_class: class object to use to create workers.
+ """
+ message_broker2.BrokerConnection.__init__(self, broker, client,
+ MANAGER_TOPIC, ANY_WORKER_TOPIC)
+ self._options = options
+ self._worker_class = worker_class
+
+ def start_worker(self, worker_number):
+ raise NotImplementedError
+
+
+class _InlineManager(_ManagerConnection):
+ def __init__(self, broker, port, options, client, worker_class):
+ _ManagerConnection.__init__(self, broker, options, client, worker_class)
+ self._port = port
+ self._inline_worker = None
+
+ def start_worker(self, worker_number):
+ self._inline_worker = _InlineWorkerConnection(self._broker, self._port,
+ self._client, self._worker_class, worker_number)
+ return self._inline_worker
+
+ def run_message_loop(self, delay_secs=None):
+ # Note that delay_secs is ignored in this case since we can't easily
+ # implement it.
+ self._inline_worker.run()
+ self._broker.run_all_pending(MANAGER_TOPIC, self._client)
+
+
+class _ThreadedManager(_ManagerConnection):
+ def __init__(self, broker, port, options, client, worker_class):
+ _ManagerConnection.__init__(self, broker, options, client, worker_class)
+ self._port = port
+
+ def start_worker(self, worker_number):
+ worker_connection = _ThreadedWorkerConnection(self._broker, self._port,
+ self._worker_class, worker_number)
+ worker_connection.start()
+ return worker_connection
+
+
+class _MultiProcessManager(_ManagerConnection):
+ def __init__(self, broker, port, options, client, worker_class):
+ # Note that this class does not keep a handle to the actual port
+ # object, because it isn't Picklable. Instead it keeps the port
+ # name and recreates the port in the child process from the name
+ # and options.
+ _ManagerConnection.__init__(self, broker, options, client, worker_class)
+ self._platform_name = port.real_name()
+
+ def start_worker(self, worker_number):
+ worker_connection = _MultiProcessWorkerConnection(self._broker, self._platform_name,
+ self._worker_class, worker_number, self._options)
+ worker_connection.start()
+ return worker_connection
+
+
+class _WorkerConnection(message_broker2.BrokerConnection):
+ def __init__(self, broker, worker_class, worker_number, options):
+ self._client = worker_class(self, worker_number, options)
+ self.name = self._client.name()
+ message_broker2.BrokerConnection.__init__(self, broker, self._client,
+ ANY_WORKER_TOPIC, MANAGER_TOPIC)
+
+ def yield_to_broker(self):
+ pass
+
+
+class _InlineWorkerConnection(_WorkerConnection):
+ def __init__(self, broker, port, manager_client, worker_class, worker_number):
+ _WorkerConnection.__init__(self, broker, worker_class, worker_number, port._options)
+ self._port = port
+ self._manager_client = manager_client
+
+ def run(self):
+ self._client.run(self._port)
+
+ def yield_to_broker(self):
+ self._broker.run_all_pending(MANAGER_TOPIC, self._manager_client)
+
+
+class _Thread(threading.Thread):
+ def __init__(self, worker_connection, port, client):
+ threading.Thread.__init__(self)
+ self._worker_connection = worker_connection
+ self._port = port
+ self._client = client
+
+ def run(self):
+ # FIXME: We can remove this once everyone is on 2.6.
+ if not hasattr(self, 'ident'):
+ self.ident = thread.get_ident()
+ self._client.run(self._port)
+
+
+class _ThreadedWorkerConnection(_WorkerConnection):
+ def __init__(self, broker, port, worker_class, worker_number):
+ _WorkerConnection.__init__(self, broker, worker_class, worker_number, port._options)
+ self._thread = _Thread(self, port, self._client)
+
+ def start(self):
+ self._thread.start()
+
+
+class _Process(_Multiprocessing_Process):
+ def __init__(self, worker_connection, platform_name, options, client):
+ _Multiprocessing_Process.__init__(self)
+ self._worker_connection = worker_connection
+ self._platform_name = platform_name
+ self._options = options
+ self._client = client
+
+ def run(self):
+ logging.basicConfig()
+ port_obj = port.get(self._platform_name, self._options)
+ self._client.run(port_obj)
+
+
+class _MultiProcessWorkerConnection(_WorkerConnection):
+ def __init__(self, broker, platform_name, worker_class, worker_number, options):
+ _WorkerConnection.__init__(self, broker, worker_class, worker_number, options)
+ self._proc = _Process(self, platform_name, options, self._client)
+
+ def start(self):
+ self._proc.start()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py
new file mode 100644
index 0000000..ffbe081
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py
@@ -0,0 +1,227 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+import Queue
+import sys
+import unittest
+
+try:
+ import multiprocessing
+except ImportError:
+ multiprocessing = None
+
+
+from webkitpy.common.system import outputcapture
+
+from webkitpy.layout_tests import port
+from webkitpy.layout_tests.layout_package import manager_worker_broker
+from webkitpy.layout_tests.layout_package import message_broker2
+
+
+class TestWorker(manager_worker_broker.AbstractWorker):
+ def __init__(self, broker_connection, worker_number, options):
+ self._broker_connection = broker_connection
+ self._options = options
+ self._worker_number = worker_number
+ self._name = 'TestWorker/%d' % worker_number
+ self._stopped = False
+
+ def handle_stop(self, src):
+ self._stopped = True
+
+ def handle_test(self, src, an_int, a_str):
+ assert an_int == 1
+ assert a_str == "hello, world"
+ self._broker_connection.post_message('test', 2, 'hi, everybody')
+
+ def is_done(self):
+ return self._stopped
+
+ def name(self):
+ return self._name
+
+ def start(self):
+ pass
+
+ def run(self, port):
+ try:
+ self._broker_connection.run_message_loop()
+ self._broker_connection.yield_to_broker()
+ self._broker_connection.post_message('done')
+ except Exception, e:
+ self._broker_connection.post_message('exception', (type(e), str(e), None))
+
+
+def get_options(worker_model):
+ option_list = manager_worker_broker.runtime_options()
+ parser = optparse.OptionParser(option_list=option_list)
+ options, args = parser.parse_args(args=['--worker-model', worker_model])
+ return options
+
+
+def make_broker(manager, worker_model):
+ options = get_options(worker_model)
+ return manager_worker_broker.get(port.get("test"), options, manager,
+ TestWorker)
+
+
+class FunctionTests(unittest.TestCase):
+ def test_get__inline(self):
+ self.assertTrue(make_broker(self, 'inline') is not None)
+
+ def test_get__threads(self):
+ self.assertTrue(make_broker(self, 'threads') is not None)
+
+ def test_get__processes(self):
+ if multiprocessing:
+ self.assertTrue(make_broker(self, 'processes') is not None)
+ else:
+ self.assertRaises(ValueError, make_broker, self, 'processes')
+
+ def test_get__unknown(self):
+ self.assertRaises(ValueError, make_broker, self, 'unknown')
+
+
+class _TestsMixin(object):
+ """Mixin class that implements a series of tests to enforce the
+ contract all implementations must follow."""
+
+ #
+ # Methods to implement the Manager side of the ClientInterface
+ #
+ def name(self):
+ return 'Tester'
+
+ def is_done(self):
+ return self._done
+
+ #
+ # Handlers for the messages the TestWorker may send.
+ #
+ def handle_done(self, src):
+ self._done = True
+
+ def handle_test(self, src, an_int, a_str):
+ self._an_int = an_int
+ self._a_str = a_str
+
+ def handle_exception(self, src, exc_info):
+ self._exception = exc_info
+ self._done = True
+
+ #
+ # Testing helper methods
+ #
+ def setUp(self):
+ self._an_int = None
+ self._a_str = None
+ self._broker = None
+ self._done = False
+ self._exception = None
+ self._worker_model = None
+
+ def make_broker(self):
+ self._broker = make_broker(self, self._worker_model)
+
+ #
+ # Actual unit tests
+ #
+ def test_done(self):
+ if not self._worker_model:
+ return
+ self.make_broker()
+ worker = self._broker.start_worker(0)
+ self._broker.post_message('test', 1, 'hello, world')
+ self._broker.post_message('stop')
+ self._broker.run_message_loop()
+ self.assertTrue(self.is_done())
+ self.assertEqual(self._an_int, 2)
+ self.assertEqual(self._a_str, 'hi, everybody')
+
+ def test_unknown_message(self):
+ if not self._worker_model:
+ return
+ self.make_broker()
+ worker = self._broker.start_worker(0)
+ self._broker.post_message('unknown')
+ self._broker.run_message_loop()
+
+ self.assertTrue(self.is_done())
+ self.assertEquals(self._exception[0], ValueError)
+ self.assertEquals(self._exception[1],
+ "TestWorker/0: received message 'unknown' it couldn't handle")
+
+
+class InlineBrokerTests(_TestsMixin, unittest.TestCase):
+ def setUp(self):
+ _TestsMixin.setUp(self)
+ self._worker_model = 'inline'
+
+
+class MultiProcessBrokerTests(_TestsMixin, unittest.TestCase):
+ def setUp(self):
+ _TestsMixin.setUp(self)
+ if multiprocessing:
+ self._worker_model = 'processes'
+ else:
+ self._worker_model = None
+
+ def queue(self):
+ return multiprocessing.Queue()
+
+
+class ThreadedBrokerTests(_TestsMixin, unittest.TestCase):
+ def setUp(self):
+ _TestsMixin.setUp(self)
+ self._worker_model = 'threads'
+
+
+class FunctionsTest(unittest.TestCase):
+ def test_runtime_options(self):
+ option_list = manager_worker_broker.runtime_options()
+ parser = optparse.OptionParser(option_list=option_list)
+ options, args = parser.parse_args([])
+ self.assertTrue(options)
+
+
+class InterfaceTest(unittest.TestCase):
+ # These tests mostly exist to pacify coverage.
+
+ # FIXME: There must be a better way to do this and also verify
+ # that classes do implement every abstract method in an interface.
+ def test_managerconnection_is_abstract(self):
+ # Test that all the base class methods are abstract and have the
+ # signature we expect.
+ broker = make_broker(self, 'inline')
+ obj = manager_worker_broker._ManagerConnection(broker._broker, None, self, None)
+ self.assertRaises(NotImplementedError, obj.start_worker, 0)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py
index 481c617..66a7aa8 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py
@@ -41,9 +41,9 @@ requested configuration.
"""
import logging
-import sys
import time
-import traceback
+
+from webkitpy.common.system import stack_utils
import dump_render_tree_thread
@@ -137,6 +137,7 @@ class MultiThreadedBroker(WorkerMessageBroker):
def run_message_loop(self):
threads = self._threads()
+ wedged_threads = set()
# Loop through all the threads waiting for them to finish.
some_thread_is_alive = True
@@ -145,11 +146,15 @@ class MultiThreadedBroker(WorkerMessageBroker):
t = time.time()
for thread in threads:
if thread.isAlive():
+ if thread in wedged_threads:
+ continue
+
some_thread_is_alive = True
next_timeout = thread.next_timeout()
if next_timeout and t > next_timeout:
- log_wedged_worker(thread.getName(), thread.id())
+ stack_utils.log_thread_state(_log.error, thread.getName(), thread.id(), "is wedged")
thread.clear_next_timeout()
+ wedged_threads.add(thread)
exception_info = thread.exception_info()
if exception_info is not None:
@@ -164,34 +169,10 @@ class MultiThreadedBroker(WorkerMessageBroker):
if some_thread_is_alive:
time.sleep(0.01)
+ if wedged_threads:
+ _log.warning("All remaining threads are wedged, bailing out.")
+
def cancel_workers(self):
threads = self._threads()
for thread in threads:
thread.cancel()
-
-
-def log_wedged_worker(name, id):
- """Log information about the given worker state."""
- stack = _find_thread_stack(id)
- assert(stack is not None)
- _log.error("")
- _log.error("%s (tid %d) is wedged" % (name, id))
- _log_stack(stack)
- _log.error("")
-
-
-def _find_thread_stack(id):
- """Returns a stack object that can be used to dump a stack trace for
- the given thread id (or None if the id is not found)."""
- for thread_id, stack in sys._current_frames().items():
- if thread_id == id:
- return stack
- return None
-
-
-def _log_stack(stack):
- """Log a stack trace to log.error()."""
- for filename, lineno, name, line in traceback.extract_stack(stack):
- _log.error('File: "%s", line %d, in %s' % (filename, lineno, name))
- if line:
- _log.error(' %s' % line.strip())
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker2.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker2.py
new file mode 100644
index 0000000..ec3c970
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker2.py
@@ -0,0 +1,196 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Module for handling messaging for run-webkit-tests.
+
+This module implements a simple message broker abstraction that will be
+used to coordinate messages between the main run-webkit-tests thread
+(aka TestRunner) and the individual worker threads (previously known as
+dump_render_tree_threads).
+
+The broker simply distributes messages onto topics (named queues); the actual
+queues themselves are provided by the caller, as the queue's implementation
+requirements varies vary depending on the desired concurrency model
+(none/threads/processes).
+
+In order for shared-nothing messaging between processing to be possible,
+Messages must be picklable.
+
+The module defines one interface and two classes. Callers of this package
+must implement the BrokerClient interface, and most callers will create
+BrokerConnections as well as Brokers.
+
+The classes relate to each other as:
+
+ BrokerClient ------> BrokerConnection
+ ^ |
+ | v
+ \---------------- Broker
+
+(The BrokerClient never calls broker directly after it is created, only
+BrokerConnection. BrokerConnection passes a reference to BrokerClient to
+Broker, and Broker only invokes that reference, never talking directly to
+BrokerConnection).
+"""
+
+import cPickle
+import logging
+import Queue
+import time
+
+
+_log = logging.getLogger(__name__)
+
+
+class BrokerClient(object):
+ """Abstract base class / interface that all message broker clients must
+ implement. In addition to the methods below, by convention clients
+ implement routines of the signature type
+
+ handle_MESSAGE_NAME(self, src, ...):
+
+ where MESSAGE_NAME matches the string passed to post_message(), and
+ src indicates the name of the sender. If the message contains values in
+ the message body, those will be provided as optparams."""
+
+ def __init__(self, *optargs, **kwargs):
+ raise NotImplementedError
+
+ def is_done(self):
+ """Called from inside run_message_loop() to indicate whether to exit."""
+ raise NotImplementedError
+
+ def name(self):
+ """Return a name that identifies the client."""
+ raise NotImplementedError
+
+
+class Broker(object):
+ """Brokers provide the basic model of a set of topics. Clients can post a
+ message to any topic using post_message(), and can process messages on one
+ topic at a time using run_message_loop()."""
+
+ def __init__(self, options, queue_maker):
+ """Args:
+ options: a runtime option class from optparse
+ queue_maker: a factory method that returns objects implementing a
+ Queue interface (put()/get()).
+ """
+ self._options = options
+ self._queue_maker = queue_maker
+ self._topics = {}
+
+ def add_topic(self, topic_name):
+ if topic_name not in self._topics:
+ self._topics[topic_name] = self._queue_maker()
+
+ def _get_queue_for_topic(self, topic_name):
+ return self._topics[topic_name]
+
+ def post_message(self, client, topic_name, message_name, *message_args):
+ """Post a message to the appropriate topic name.
+
+ Messages have a name and a tuple of optional arguments. Both must be picklable."""
+ message = _Message(client.name(), topic_name, message_name, message_args)
+ queue = self._get_queue_for_topic(topic_name)
+ queue.put(_Message.dumps(message))
+
+ def run_message_loop(self, topic_name, client, delay_secs=None):
+ """Loop processing messages until client.is_done() or delay passes.
+
+ To run indefinitely, set delay_secs to None."""
+ assert delay_secs is None or delay_secs > 0
+ self._run_loop(topic_name, client, block=True, delay_secs=delay_secs)
+
+ def run_all_pending(self, topic_name, client):
+ """Process messages until client.is_done() or caller would block."""
+ self._run_loop(topic_name, client, block=False, delay_secs=None)
+
+ def _run_loop(self, topic_name, client, block, delay_secs):
+ queue = self._get_queue_for_topic(topic_name)
+ while not client.is_done():
+ try:
+ s = queue.get(block, delay_secs)
+ except Queue.Empty:
+ return
+ msg = _Message.loads(s)
+ self._dispatch_message(msg, client)
+
+ def _dispatch_message(self, message, client):
+ if not hasattr(client, 'handle_' + message.name):
+ raise ValueError(
+ "%s: received message '%s' it couldn't handle" %
+ (client.name(), message.name))
+ optargs = message.args
+ message_handler = getattr(client, 'handle_' + message.name)
+ message_handler(message.src, *optargs)
+
+
+class _Message(object):
+ @staticmethod
+ def loads(str):
+ obj = cPickle.loads(str)
+ assert(isinstance(obj, _Message))
+ return obj
+
+ def __init__(self, src, topic_name, message_name, message_args):
+ self.src = src
+ self.topic_name = topic_name
+ self.name = message_name
+ self.args = message_args
+
+ def dumps(self):
+ return cPickle.dumps(self)
+
+ def __repr__(self):
+ return ("_Message(from='%s', topic_name='%s', message_name='%s')" %
+ (self.src, self.topic_name, self.name))
+
+
+class BrokerConnection(object):
+ """BrokerConnection provides a connection-oriented facade on top of a
+ Broker, so that callers don't have to repeatedly pass the same topic
+ names over and over."""
+
+ def __init__(self, broker, client, run_topic, post_topic):
+ """Create a BrokerConnection on top of a Broker. Note that the Broker
+ is passed in rather than created so that a single Broker can be used
+ by multiple BrokerConnections."""
+ self._broker = broker
+ self._client = client
+ self._post_topic = post_topic
+ self._run_topic = run_topic
+ broker.add_topic(run_topic)
+ broker.add_topic(post_topic)
+
+ def run_message_loop(self, delay_secs=None):
+ self._broker.run_message_loop(self._run_topic, self._client, delay_secs)
+
+ def post_message(self, message_name, *message_args):
+ self._broker.post_message(self._client, self._post_topic,
+ message_name, *message_args)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker2_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker2_unittest.py
new file mode 100644
index 0000000..0e0a88d
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker2_unittest.py
@@ -0,0 +1,83 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.layout_tests.layout_package import message_broker2
+
+# This file exists to test routines that aren't necessarily covered elsewhere;
+# most of the testing of message_broker2 will be covered under the tests in
+# the manager_worker_broker module.
+
+
+class MessageTest(unittest.TestCase):
+ def test__no_body(self):
+ msg = message_broker2._Message('src', 'topic_name', 'message_name', None)
+ self.assertTrue(repr(msg))
+ s = msg.dumps()
+ new_msg = message_broker2._Message.loads(s)
+ self.assertEqual(new_msg.name, 'message_name')
+ self.assertEqual(new_msg.args, None)
+ self.assertEqual(new_msg.topic_name, 'topic_name')
+ self.assertEqual(new_msg.src, 'src')
+
+ def test__body(self):
+ msg = message_broker2._Message('src', 'topic_name', 'message_name',
+ ('body', 0))
+ self.assertTrue(repr(msg))
+ s = msg.dumps()
+ new_msg = message_broker2._Message.loads(s)
+ self.assertEqual(new_msg.name, 'message_name')
+ self.assertEqual(new_msg.args, ('body', 0))
+ self.assertEqual(new_msg.topic_name, 'topic_name')
+ self.assertEqual(new_msg.src, 'src')
+
+
+class InterfaceTest(unittest.TestCase):
+ # These tests mostly exist to pacify coverage.
+
+ # FIXME: There must be a better way to do this and also verify
+ # that classes do implement every abstract method in an interface.
+
+ def test_brokerclient_is_abstract(self):
+ # Test that we can't create an instance directly.
+ self.assertRaises(NotImplementedError, message_broker2.BrokerClient)
+
+ class TestClient(message_broker2.BrokerClient):
+ def __init__(self):
+ pass
+
+ # Test that all the base class methods are abstract and have the
+ # signature we expect.
+ obj = TestClient()
+ self.assertRaises(NotImplementedError, obj.is_done)
+ self.assertRaises(NotImplementedError, obj.name)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py
index 6f04fd3..f4cb5d2 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py
@@ -84,7 +84,6 @@ class TestThread(threading.Thread):
def next_timeout(self):
if self._timeout:
- self._timeout_queue.put('done')
return time.time() - 10
return time.time()
@@ -125,7 +124,12 @@ class MultiThreadedBrokerTest(unittest.TestCase):
child_thread.start()
started_msg = starting_queue.get()
stopping_queue.put(msg)
- return broker.run_message_loop()
+ res = broker.run_message_loop()
+ if msg == 'Timeout':
+ child_thread._timeout_queue.put('done')
+ child_thread.join(1.0)
+ self.assertFalse(child_thread.isAlive())
+ return res
def test_basic(self):
interrupted = self.run_one_thread('')
@@ -135,48 +139,22 @@ class MultiThreadedBrokerTest(unittest.TestCase):
self.assertRaises(KeyboardInterrupt, self.run_one_thread, 'KeyboardInterrupt')
def test_timeout(self):
+ # Because the timeout shows up as a wedged thread, this also tests
+ # log_wedged_worker().
oc = outputcapture.OutputCapture()
- oc.capture_output()
- interrupted = self.run_one_thread('Timeout')
- self.assertFalse(interrupted)
- oc.restore_output()
-
- def test_exception(self):
- self.assertRaises(ValueError, self.run_one_thread, 'Exception')
-
-
-class Test(unittest.TestCase):
- def test_find_thread_stack_found(self):
- id, stack = sys._current_frames().items()[0]
- found_stack = message_broker._find_thread_stack(id)
- self.assertNotEqual(found_stack, None)
-
- def test_find_thread_stack_not_found(self):
- found_stack = message_broker._find_thread_stack(0)
- self.assertEqual(found_stack, None)
-
- def test_log_wedged_worker(self):
- oc = outputcapture.OutputCapture()
- oc.capture_output()
+ stdout, stderr = oc.capture_output()
logger = message_broker._log
astream = array_stream.ArrayStream()
handler = TestHandler(astream)
logger.addHandler(handler)
+ interrupted = self.run_one_thread('Timeout')
+ stdout, stderr = oc.restore_output()
+ self.assertFalse(interrupted)
+ logger.handlers.remove(handler)
+ self.assertTrue('All remaining threads are wedged, bailing out.' in astream.get())
- starting_queue = Queue.Queue()
- stopping_queue = Queue.Queue()
- child_thread = TestThread(starting_queue, stopping_queue)
- child_thread.start()
- msg = starting_queue.get()
-
- message_broker.log_wedged_worker(child_thread.getName(),
- child_thread.id())
- stopping_queue.put('')
- child_thread.join(timeout=1.0)
-
- self.assertFalse(astream.empty())
- self.assertFalse(child_thread.isAlive())
- oc.restore_output()
+ def test_exception(self):
+ self.assertRaises(ValueError, self.run_one_thread, 'Exception')
if __name__ == '__main__':
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
index 12a786e..7ab6da8 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py
@@ -144,7 +144,7 @@ class Testprinter(unittest.TestCase):
test in tests]
expectations = test_expectations.TestExpectations(
self._port, test_paths, expectations_str,
- self._port.test_platform_name(), is_debug_mode=False,
+ self._port.test_configuration(),
is_lint_mode=False)
rs = result_summary.ResultSummary(expectations, test_paths)
@@ -363,7 +363,7 @@ class Testprinter(unittest.TestCase):
def test_print_progress__detailed(self):
tests = ['passes/text.html', 'failures/expected/timeout.html',
'failures/expected/crash.html']
- expectations = 'failures/expected/timeout.html = TIMEOUT'
+ expectations = 'BUGX : failures/expected/timeout.html = TIMEOUT'
# first, test that it is disabled properly
# should still print one-line-progress
@@ -569,8 +569,8 @@ class Testprinter(unittest.TestCase):
self.assertFalse(out.empty())
expectations = """
-failures/expected/crash.html = CRASH
-failures/expected/timeout.html = TIMEOUT
+BUGX : failures/expected/crash.html = CRASH
+BUGX : failures/expected/timeout.html = TIMEOUT
"""
err.reset()
out.reset()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py
new file mode 100644
index 0000000..96e3ee6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py
@@ -0,0 +1,322 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import logging
+import threading
+import time
+
+from webkitpy.layout_tests.port import base
+
+from webkitpy.layout_tests.test_types import text_diff
+from webkitpy.layout_tests.test_types import image_diff
+
+from webkitpy.layout_tests.layout_package import test_failures
+from webkitpy.layout_tests.layout_package.test_results import TestResult
+
+
+_log = logging.getLogger(__name__)
+
+
+class ExpectedDriverOutput:
+ """Groups information about an expected driver output."""
+ def __init__(self, text, image, image_hash):
+ self.text = text
+ self.image = image
+ self.image_hash = image_hash
+
+
+class SingleTestRunner:
+
+ def __init__(self, options, port, worker_name, worker_number):
+ self._options = options
+ self._port = port
+ self._worker_name = worker_name
+ self._worker_number = worker_number
+ self._driver = None
+ self._test_types = []
+ self.has_http_lock = False
+ for cls in self._get_test_type_classes():
+ self._test_types.append(cls(self._port,
+ self._options.results_directory))
+
+ def cleanup(self):
+ self.kill_dump_render_tree()
+ if self.has_http_lock:
+ self.stop_servers_with_lock()
+
+ def _get_test_type_classes(self):
+ classes = [text_diff.TestTextDiff]
+ if self._options.pixel_tests:
+ classes.append(image_diff.ImageDiff)
+ return classes
+
+ def timeout(self, test_input):
+ # We calculate how long we expect the test to take.
+ #
+ # The DumpRenderTree watchdog uses 2.5x the timeout; we want to be
+ # larger than that. We also add a little more padding if we're
+ # running tests in a separate thread.
+ #
+ # Note that we need to convert the test timeout from a
+ # string value in milliseconds to a float for Python.
+ driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
+ if not self._options.run_singly:
+ return driver_timeout_sec
+
+ thread_padding_sec = 1.0
+ thread_timeout_sec = driver_timeout_sec + thread_padding_sec
+ return thread_timeout_sec
+
+ def run_test(self, test_input, timeout):
+ if self._options.run_singly:
+ return self._run_test_in_another_thread(test_input, timeout)
+ else:
+ return self._run_test_in_this_thread(test_input)
+ return result
+
+ def _run_test_in_another_thread(self, test_input, thread_timeout_sec):
+ """Run a test in a separate thread, enforcing a hard time limit.
+
+ Since we can only detect the termination of a thread, not any internal
+ state or progress, we can only run per-test timeouts when running test
+ files singly.
+
+ Args:
+ test_input: Object containing the test filename and timeout
+ thread_timeout_sec: time to wait before killing the driver process.
+ Returns:
+ A TestResult
+ """
+ worker = self
+ result = None
+
+ driver = worker._port.create_driver(worker._worker_number)
+ driver.start()
+
+ class SingleTestThread(threading.Thread):
+ def run(self):
+ result = worker.run(test_input, driver)
+
+ thread = SingleTestThread()
+ thread.start()
+ thread.join(thread_timeout_sec)
+ if thread.isAlive():
+ # If join() returned with the thread still running, the
+ # DumpRenderTree is completely hung and there's nothing
+ # more we can do with it. We have to kill all the
+ # DumpRenderTrees to free it up. If we're running more than
+ # one DumpRenderTree thread, we'll end up killing the other
+ # DumpRenderTrees too, introducing spurious crashes. We accept
+ # that tradeoff in order to avoid losing the rest of this
+ # thread's results.
+ _log.error('Test thread hung: killing all DumpRenderTrees')
+
+ driver.stop()
+
+ if not result:
+ result = TestResult(test_input.filename, failures=[],
+ test_run_time=0, total_time_for_all_diffs=0, time_for_diffs={})
+ return result
+
+ def _run_test_in_this_thread(self, test_input):
+ """Run a single test file using a shared DumpRenderTree process.
+
+ Args:
+ test_input: Object containing the test filename, uri and timeout
+
+ Returns: a TestResult object.
+ """
+ # poll() is not threadsafe and can throw OSError due to:
+ # http://bugs.python.org/issue1731717
+ if not self._driver or self._driver.poll() is not None:
+ self._driver = self._port.create_driver(self._worker_number)
+ self._driver.start()
+ return self._run(self._driver, test_input)
+
+ def _expected_driver_output(self):
+ return ExpectedDriverOutput(self._port.expected_text(self._filename),
+ self._port.expected_image(self._filename),
+ self._port.expected_checksum(self._filename))
+
+ def _should_fetch_expected_checksum(self):
+ return (self._options.pixel_tests and
+ not (self._options.new_baseline or self._options.reset_results))
+
+ def _driver_input(self, test_input):
+ self._filename = test_input.filename
+ self._timeout = test_input.timeout
+ self._testname = self._port.relative_test_filename(test_input.filename)
+
+ # The image hash is used to avoid doing an image dump if the
+ # checksums match, so it should be set to a blank value if we
+ # are generating a new baseline. (Otherwise, an image from a
+ # previous run will be copied into the baseline."""
+ image_hash = None
+ if self._should_fetch_expected_checksum():
+ image_hash = self._port.expected_checksum(self._filename)
+ return base.DriverInput(self._filename, self._timeout, image_hash)
+
+ def _run(self, driver, test_input):
+ if self._options.new_baseline or self._options.reset_results:
+ return self._run_rebaseline(driver, test_input)
+ return self._run_compare_test(driver, test_input)
+
+ def _run_compare_test(self, driver, test_input):
+ driver_output = self._driver.run_test(self._driver_input(test_input))
+ return self._process_output(driver_output)
+
+ def _run_rebaseline(self, driver, test_input):
+ driver_output = self._driver.run_test(self._driver_input(test_input))
+ failures = self._handle_error(driver_output)
+ # FIXME: It the test crashed or timed out, it might be bettter to avoid
+ # to write new baselines.
+ self._save_baselines(driver_output)
+ return TestResult(self._filename, failures, driver_output.test_time)
+
+ def _save_baselines(self, driver_output):
+ # Although all test_shell/DumpRenderTree output should be utf-8,
+ # we do not ever decode it inside run-webkit-tests. For some tests
+ # DumpRenderTree may not output utf-8 text (e.g. webarchives).
+ self._save_baseline_data(driver_output.text, ".txt",
+ generate_new_baseline=self._options.new_baseline)
+ if self._options.pixel_tests and driver_output.image_hash:
+ self._save_baseline_data(driver_output.image, ".png",
+ generate_new_baseline=self._options.new_baseline)
+ self._save_baseline_data(driver_output.image_hash, ".checksum",
+ generate_new_baseline=self._options.new_baseline)
+
+ def _save_baseline_data(self, data, modifier, generate_new_baseline=True):
+ """Saves a new baseline file into the port's baseline directory.
+
+ The file will be named simply "<test>-expected<modifier>", suitable for
+ use as the expected results in a later run.
+
+ Args:
+ data: result to be saved as the new baseline
+ modifier: type of the result file, e.g. ".txt" or ".png"
+ generate_new_baseline: whether to enerate a new, platform-specific
+ baseline, or update the existing one
+ """
+
+ port = self._port
+ fs = port._filesystem
+ if generate_new_baseline:
+ relative_dir = fs.dirname(self._testname)
+ baseline_path = port.baseline_path()
+ output_dir = fs.join(baseline_path, relative_dir)
+ output_file = fs.basename(fs.splitext(self._filename)[0] +
+ "-expected" + modifier)
+ fs.maybe_make_directory(output_dir)
+ output_path = fs.join(output_dir, output_file)
+ _log.debug('writing new baseline result "%s"' % (output_path))
+ else:
+ output_path = port.expected_filename(self._filename, modifier)
+ _log.debug('resetting baseline result "%s"' % output_path)
+
+ port.update_baseline(output_path, data)
+
+ def _handle_error(self, driver_output):
+ failures = []
+ fs = self._port._filesystem
+ if driver_output.timeout:
+ failures.append(test_failures.FailureTimeout())
+ if driver_output.crash:
+ failures.append(test_failures.FailureCrash())
+ _log.debug("%s Stacktrace for %s:\n%s" % (self._worker_name, self._testname,
+ driver_output.error))
+ stack_filename = fs.join(self._options.results_directory, self._testname)
+ stack_filename = fs.splitext(stack_filename)[0] + "-stack.txt"
+ fs.maybe_make_directory(fs.dirname(stack_filename))
+ fs.write_text_file(stack_filename, driver_output.error)
+ elif driver_output.error:
+ _log.debug("%s %s output stderr lines:\n%s" % (self._worker_name, self._testname,
+ driver_output.error))
+ return failures
+
+ def _run_test(self):
+ driver_output = self._driver.run_test(self._driver_input())
+ return self._process_output(driver_output)
+
+ def _process_output(self, driver_output):
+ """Receives the output from a DumpRenderTree process, subjects it to a
+ number of tests, and returns a list of failure types the test produced.
+ Args:
+ driver_output: a DriverOutput object containing the output from the driver
+
+ Returns: a TestResult object
+ """
+ fs = self._port._filesystem
+ failures = self._handle_error(driver_output)
+ expected_driver_output = self._expected_driver_output()
+
+ # Check the output and save the results.
+ start_time = time.time()
+ time_for_diffs = {}
+ for test_type in self._test_types:
+ start_diff_time = time.time()
+ new_failures = test_type.compare_output(
+ self._port, self._filename, self._options, driver_output,
+ expected_driver_output)
+ # Don't add any more failures if we already have a crash, so we don't
+ # double-report those tests. We do double-report for timeouts since
+ # we still want to see the text and image output.
+ if not driver_output.crash:
+ failures.extend(new_failures)
+ time_for_diffs[test_type.__class__.__name__] = (
+ time.time() - start_diff_time)
+
+ total_time_for_all_diffs = time.time() - start_diff_time
+ return TestResult(self._filename, failures, driver_output.test_time,
+ total_time_for_all_diffs, time_for_diffs)
+
+ def start_servers_with_lock(self):
+ _log.debug('Acquiring http lock ...')
+ self._port.acquire_http_lock()
+ _log.debug('Starting HTTP server ...')
+ self._port.start_http_server()
+ _log.debug('Starting WebSocket server ...')
+ self._port.start_websocket_server()
+ self.has_http_lock = True
+
+ def stop_servers_with_lock(self):
+ """Stop the servers and release http lock."""
+ if self.has_http_lock:
+ _log.debug('Stopping HTTP server ...')
+ self._port.stop_http_server()
+ _log.debug('Stopping WebSocket server ...')
+ self._port.stop_websocket_server()
+ _log.debug('Releasing server lock ...')
+ self._port.release_http_lock()
+ self.has_http_lock = False
+
+ def kill_dump_render_tree(self):
+ """Kill the DumpRenderTree process if it's running."""
+ if self._driver:
+ self._driver.stop()
+ self._driver = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
index 806b663..494395a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
@@ -31,6 +31,7 @@
for layout tests.
"""
+import itertools
import logging
import re
@@ -84,18 +85,16 @@ def remove_pixel_failures(expected_results):
class TestExpectations:
TEST_LIST = "test_expectations.txt"
- def __init__(self, port, tests, expectations, test_platform_name,
- is_debug_mode, is_lint_mode, overrides=None):
+ def __init__(self, port, tests, expectations, test_config,
+ is_lint_mode, overrides=None):
"""Loads and parses the test expectations given in the string.
Args:
port: handle to object containing platform-specific functionality
- test: list of all of the test files
+ tests: list of all of the test files
expectations: test expectations as a string
- test_platform_name: name of the platform to match expectations
- against. Note that this may be different than
- port.test_platform_name() when is_lint_mode is True.
- is_debug_mode: whether to use the DEBUG or RELEASE modifiers
- in the expectations
+ test_config: specific values to check against when
+ parsing the file (usually port.test_config(),
+ but may be different when linting or doing other things).
is_lint_mode: If True, just parse the expectations string
looking for errors.
overrides: test expectations that are allowed to override any
@@ -104,7 +103,7 @@ class TestExpectations:
and downstream expectations).
"""
self._expected_failures = TestExpectationsFile(port, expectations,
- tests, test_platform_name, is_debug_mode, is_lint_mode,
+ tests, test_config, is_lint_mode,
overrides=overrides)
# TODO(ojan): Allow for removing skipped tests when getting the list of
@@ -197,7 +196,7 @@ class ParseError(Exception):
return '\n'.join(map(str, self.errors))
def __repr__(self):
- return 'ParseError(fatal=%s, errors=%s)' % (fatal, errors)
+ return 'ParseError(fatal=%s, errors=%s)' % (self.fatal, self.errors)
class ModifiersAndExpectations:
@@ -302,29 +301,15 @@ class TestExpectationsFile:
'fail': FAIL,
'flaky': FLAKY}
- def __init__(self, port, expectations, full_test_list, test_platform_name,
- is_debug_mode, is_lint_mode, overrides=None):
- """
- expectations: Contents of the expectations file
- full_test_list: The list of all tests to be run pending processing of
- the expections for those tests.
- test_platform_name: name of the platform to match expectations
- against. Note that this may be different than
- port.test_platform_name() when is_lint_mode is True.
- is_debug_mode: Whether we testing a test_shell built debug mode.
- is_lint_mode: Whether this is just linting test_expecatations.txt.
- overrides: test expectations that are allowed to override any
- entries in |expectations|. This is used by callers
- that need to manage two sets of expectations (e.g., upstream
- and downstream expectations).
- """
+ def __init__(self, port, expectations, full_test_list,
+ test_config, is_lint_mode, overrides=None):
+ # See argument documentation in TestExpectation(), above.
self._port = port
self._fs = port._filesystem
self._expectations = expectations
self._full_test_list = full_test_list
- self._test_platform_name = test_platform_name
- self._is_debug_mode = is_debug_mode
+ self._test_config = test_config
self._is_lint_mode = is_lint_mode
self._overrides = overrides
self._errors = []
@@ -332,7 +317,9 @@ class TestExpectationsFile:
# Maps relative test paths as listed in the expectations file to a
# list of maps containing modifiers and expectations for each time
- # the test is listed in the expectations file.
+ # the test is listed in the expectations file. We use this to
+ # keep a representation of the entire list of expectations, even
+ # invalid ones.
self._all_expectations = {}
# Maps a test to its list of expectations.
@@ -345,7 +332,8 @@ class TestExpectationsFile:
# the options minus any bug or platform strings
self._test_to_modifiers = {}
- # Maps a test to the base path that it was listed with in the list.
+ # Maps a test to the base path that it was listed with in the list and
+ # the number of matches that base path had.
self._test_list_paths = {}
self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS)
@@ -372,13 +360,7 @@ class TestExpectationsFile:
def _handle_any_read_errors(self):
if len(self._errors) or len(self._non_fatal_errors):
- if self._is_debug_mode:
- build_type = 'DEBUG'
- else:
- build_type = 'RELEASE'
- _log.error('')
- _log.error("FAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" %
- (self._test_platform_name.upper(), build_type))
+ _log.error("FAILURES FOR %s" % str(self._test_config))
for error in self._errors:
_log.error(error)
@@ -394,11 +376,12 @@ class TestExpectationsFile:
expectations = set([PASS])
options = []
modifiers = []
+ num_matches = 0
if self._full_test_list:
for test in self._full_test_list:
if not test in self._test_list_paths:
- self._add_test(test, modifiers, expectations, options,
- overrides_allowed=False)
+ self._add_test(test, modifiers, num_matches, expectations,
+ options, overrides_allowed=False)
def _dict_of_sets(self, strings_to_constants):
"""Takes a dict of strings->constants and returns a dict mapping
@@ -505,7 +488,8 @@ class TestExpectationsFile:
_log.info(' new: %s', new_line)
elif action == ADD_PLATFORMS_EXCEPT_THIS:
parts = line.split(':')
- new_options = parts[0]
+ _log.info('Test updated: ')
+ _log.info(' old: %s', line)
for p in self._port.test_platform_names():
p = p.upper()
# This is a temp solution for rebaselining tool.
@@ -515,13 +499,11 @@ class TestExpectationsFile:
# TODO(victorw): Remove WIN-VISTA and WIN-7 once we have
# reliable Win 7 and Win Vista buildbots setup.
if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'):
- new_options += p + ' '
- new_line = ('%s:%s' % (new_options, parts[1]))
- f_new.append(new_line)
+ new_options = parts[0] + p + ' '
+ new_line = ('%s:%s' % (new_options, parts[1]))
+ f_new.append(new_line)
+ _log.info(' new: %s', new_line)
tests_updated += 1
- _log.info('Test updated: ')
- _log.info(' old: %s', line)
- _log.info(' new: %s', new_line)
_log.info('Total tests removed: %d', tests_removed)
_log.info('Total tests updated: %d', tests_updated)
@@ -537,12 +519,15 @@ class TestExpectationsFile:
options = []
if line.find(":") is -1:
- test_and_expectation = line.split("=")
- else:
- parts = line.split(":")
- options = self._get_options_list(parts[0])
- test_and_expectation = parts[1].split('=')
+ self._add_error(lineno, "Missing a ':'", line)
+ return (None, None, None)
+ parts = line.split(':')
+
+ # FIXME: verify that there is exactly one colon in the line.
+
+ options = self._get_options_list(parts[0])
+ test_and_expectation = parts[1].split('=')
test = test_and_expectation[0].strip()
if (len(test_and_expectation) is not 2):
self._add_error(lineno, "Missing expectations.",
@@ -588,69 +573,6 @@ class TestExpectationsFile:
return REMOVE_TEST
- def _has_valid_modifiers_for_current_platform(self, options, lineno,
- test_and_expectations, modifiers):
- """Returns true if the current platform is in the options list or if
- no platforms are listed and if there are no fatal errors in the
- options list.
-
- Args:
- options: List of lowercase options.
- lineno: The line in the file where the test is listed.
- test_and_expectations: The path and expectations for the test.
- modifiers: The set to populate with modifiers.
- """
- has_any_platform = False
- has_bug_id = False
- for option in options:
- if option in self.MODIFIERS:
- modifiers.add(option)
- elif option in self._port.test_platform_names():
- has_any_platform = True
- elif re.match(r'bug\d', option) != None:
- self._add_error(lineno, 'Bug must be either BUGCR, BUGWK, or BUGV8_ for test: %s' %
- option, test_and_expectations)
- elif option.startswith('bug'):
- has_bug_id = True
- elif option not in self.BUILD_TYPES:
- self._add_error(lineno, 'Invalid modifier for test: %s' %
- option, test_and_expectations)
-
- if has_any_platform and not self._match_platform(options):
- return False
-
- if not has_bug_id and 'wontfix' not in options:
- # TODO(ojan): Turn this into an AddError call once all the
- # tests have BUG identifiers.
- self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
- test_and_expectations)
-
- if 'release' in options or 'debug' in options:
- if self._is_debug_mode and 'debug' not in options:
- return False
- if not self._is_debug_mode and 'release' not in options:
- return False
-
- if self._is_lint_mode and 'rebaseline' in options:
- self._add_error(lineno,
- 'REBASELINE should only be used for running rebaseline.py. '
- 'Cannot be checked in.', test_and_expectations)
-
- return True
-
- def _match_platform(self, options):
- """Match the list of options against our specified platform. If any
- of the options prefix-match self._platform, return True. This handles
- the case where a test is marked WIN and the platform is WIN-VISTA.
-
- Args:
- options: list of options
- """
- for opt in options:
- if self._test_platform_name.startswith(opt):
- return True
- return False
-
def _add_to_all_expectations(self, test, options, expectations):
# Make all paths unix-style so the dashboard doesn't need to.
test = test.replace('\\', '/')
@@ -663,54 +585,43 @@ class TestExpectationsFile:
"""For each test in an expectations iterable, generate the
expectations for it."""
lineno = 0
+ matcher = ModifierMatcher(self._test_config)
for line in expectations:
lineno += 1
+ self._process_line(line, lineno, matcher, overrides_allowed)
- test_list_path, options, expectations = \
- self.parse_expectations_line(line, lineno)
- if not expectations:
- continue
+ def _process_line(self, line, lineno, matcher, overrides_allowed):
+ test_list_path, options, expectations = \
+ self.parse_expectations_line(line, lineno)
+ if not expectations:
+ return
- self._add_to_all_expectations(test_list_path,
- " ".join(options).upper(),
- " ".join(expectations).upper())
+ self._add_to_all_expectations(test_list_path,
+ " ".join(options).upper(),
+ " ".join(expectations).upper())
- modifiers = set()
- if options and not self._has_valid_modifiers_for_current_platform(
- options, lineno, test_list_path, modifiers):
- continue
+ num_matches = self._check_options(matcher, options, lineno,
+ test_list_path)
+ if num_matches == ModifierMatcher.NO_MATCH:
+ return
- expectations = self._parse_expectations(expectations, lineno,
- test_list_path)
+ expectations = self._parse_expectations(expectations, lineno,
+ test_list_path)
- if 'slow' in options and TIMEOUT in expectations:
- self._add_error(lineno,
- 'A test can not be both slow and timeout. If it times out '
- 'indefinitely, then it should be just timeout.',
- test_list_path)
+ self._check_options_against_expectations(options, expectations,
+ lineno, test_list_path)
- full_path = self._fs.join(self._port.layout_tests_dir(),
- test_list_path)
- full_path = self._fs.normpath(full_path)
- # WebKit's way of skipping tests is to add a -disabled suffix.
- # So we should consider the path existing if the path or the
- # -disabled version exists.
- if (not self._port.path_exists(full_path)
- and not self._port.path_exists(full_path + '-disabled')):
- # Log a non fatal error here since you hit this case any
- # time you update test_expectations.txt without syncing
- # the LayoutTests directory
- self._log_non_fatal_error(lineno, 'Path does not exist.',
- test_list_path)
- continue
+ if self._check_path_does_not_exist(lineno, test_list_path):
+ return
- if not self._full_test_list:
- tests = [test_list_path]
- else:
- tests = self._expand_tests(test_list_path)
+ if not self._full_test_list:
+ tests = [test_list_path]
+ else:
+ tests = self._expand_tests(test_list_path)
- self._add_tests(tests, expectations, test_list_path, lineno,
- modifiers, options, overrides_allowed)
+ modifiers = [o for o in options if o in self.MODIFIERS]
+ self._add_tests(tests, expectations, test_list_path, lineno,
+ modifiers, num_matches, options, overrides_allowed)
def _get_options_list(self, listString):
return [part.strip().lower() for part in listString.strip().split(' ')]
@@ -726,6 +637,65 @@ class TestExpectationsFile:
result.add(expectation)
return result
+ def _check_options(self, matcher, options, lineno, test_list_path):
+ match_result = self._check_syntax(matcher, options, lineno,
+ test_list_path)
+ self._check_semantics(options, lineno, test_list_path)
+ return match_result.num_matches
+
+ def _check_syntax(self, matcher, options, lineno, test_list_path):
+ match_result = matcher.match(options)
+ for error in match_result.errors:
+ self._add_error(lineno, error, test_list_path)
+ for warning in match_result.warnings:
+ self._log_non_fatal_error(lineno, warning, test_list_path)
+ return match_result
+
+ def _check_semantics(self, options, lineno, test_list_path):
+ has_wontfix = 'wontfix' in options
+ has_bug = False
+ for opt in options:
+ if opt.startswith('bug'):
+ has_bug = True
+ if re.match('bug\d+', opt):
+ self._add_error(lineno,
+ 'BUG\d+ is not allowed, must be one of '
+ 'BUGCR\d+, BUGWK\d+, BUGV8_\d+, '
+ 'or a non-numeric bug identifier.', test_list_path)
+
+ if not has_bug and not has_wontfix:
+ self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
+ test_list_path)
+
+ if self._is_lint_mode and 'rebaseline' in options:
+ self._add_error(lineno,
+ 'REBASELINE should only be used for running rebaseline.py. '
+ 'Cannot be checked in.', test_list_path)
+
+ def _check_options_against_expectations(self, options, expectations,
+ lineno, test_list_path):
+ if 'slow' in options and TIMEOUT in expectations:
+ self._add_error(lineno,
+ 'A test can not be both SLOW and TIMEOUT. If it times out '
+ 'indefinitely, then it should be just TIMEOUT.', test_list_path)
+
+ def _check_path_does_not_exist(self, lineno, test_list_path):
+ full_path = self._fs.join(self._port.layout_tests_dir(),
+ test_list_path)
+ full_path = self._fs.normpath(full_path)
+ # WebKit's way of skipping tests is to add a -disabled suffix.
+ # So we should consider the path existing if the path or the
+ # -disabled version exists.
+ if (not self._port.path_exists(full_path)
+ and not self._port.path_exists(full_path + '-disabled')):
+ # Log a non fatal error here since you hit this case any
+ # time you update test_expectations.txt without syncing
+ # the LayoutTests directory
+ self._log_non_fatal_error(lineno, 'Path does not exist.',
+ test_list_path)
+ return True
+ return False
+
def _expand_tests(self, test_list_path):
"""Convert the test specification to an absolute, normalized
path and make sure directories end with the OS path separator."""
@@ -751,27 +721,30 @@ class TestExpectationsFile:
return result
def _add_tests(self, tests, expectations, test_list_path, lineno,
- modifiers, options, overrides_allowed):
+ modifiers, num_matches, options, overrides_allowed):
for test in tests:
- if self._already_seen_test(test, test_list_path, lineno,
- overrides_allowed):
+ if self._already_seen_better_match(test, test_list_path,
+ num_matches, lineno, overrides_allowed):
continue
self._clear_expectations_for_test(test, test_list_path)
- self._add_test(test, modifiers, expectations, options,
+ self._test_list_paths[test] = (self._fs.normpath(test_list_path),
+ num_matches, lineno)
+ self._add_test(test, modifiers, num_matches, expectations, options,
overrides_allowed)
- def _add_test(self, test, modifiers, expectations, options,
+ def _add_test(self, test, modifiers, num_matches, expectations, options,
overrides_allowed):
"""Sets the expected state for a given test.
This routine assumes the test has not been added before. If it has,
- use _ClearExpectationsForTest() to reset the state prior to
+ use _clear_expectations_for_test() to reset the state prior to
calling this.
Args:
test: test to add
modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.)
+ num_matches: number of modifiers that matched the configuration
expectations: sequence of expectations (PASS, IMAGE, etc.)
options: sequence of keywords and bug identifiers.
overrides_allowed: whether we're parsing the regular expectations
@@ -828,32 +801,70 @@ class TestExpectationsFile:
if test in set_of_tests:
set_of_tests.remove(test)
- def _already_seen_test(self, test, test_list_path, lineno,
- allow_overrides):
- """Returns true if we've already seen a more precise path for this test
- than the test_list_path.
+ def _already_seen_better_match(self, test, test_list_path, num_matches,
+ lineno, overrides_allowed):
+ """Returns whether we've seen a better match already in the file.
+
+ Returns True if we've already seen a test_list_path that matches more of the test
+ than this path does
"""
+ # FIXME: See comment below about matching test configs and num_matches.
+
if not test in self._test_list_paths:
+ # We've never seen this test before.
return False
- prev_base_path = self._test_list_paths[test]
- if (prev_base_path == self._fs.normpath(test_list_path)):
- if (not allow_overrides or test in self._overridding_tests):
- if allow_overrides:
- expectation_source = "override"
- else:
- expectation_source = "expectation"
- self._add_error(lineno, 'Duplicate %s.' % expectation_source,
- test)
- return True
- else:
- # We have seen this path, but that's okay because its
- # in the overrides and the earlier path was in the
- # expectations.
- return False
+ prev_base_path, prev_num_matches, prev_lineno = self._test_list_paths[test]
+ base_path = self._fs.normpath(test_list_path)
+
+ if len(prev_base_path) > len(base_path):
+ # The previous path matched more of the test.
+ return True
+
+ if len(prev_base_path) < len(base_path):
+ # This path matches more of the test.
+ return False
+
+ if overrides_allowed and test not in self._overridding_tests:
+ # We have seen this path, but that's okay because it is
+ # in the overrides and the earlier path was in the
+ # expectations (not the overrides).
+ return False
+
+ # At this point we know we have seen a previous exact match on this
+ # base path, so we need to check the two sets of modifiers.
- # Check if we've already seen a more precise path.
- return prev_base_path.startswith(self._fs.normpath(test_list_path))
+ if overrides_allowed:
+ expectation_source = "override"
+ else:
+ expectation_source = "expectation"
+
+ # FIXME: This code was originally designed to allow lines that matched
+ # more modifiers to override lines that matched fewer modifiers.
+ # However, we currently view these as errors. If we decide to make
+ # this policy permanent, we can probably simplify this code
+ # and the ModifierMatcher code a fair amount.
+ #
+ # To use the "more modifiers wins" policy, change the "_add_error" lines for overrides
+ # to _log_non_fatal_error() and change the commented-out "return False".
+
+ if prev_num_matches == num_matches:
+ self._add_error(lineno,
+ 'Duplicate or ambiguous %s.' % expectation_source,
+ test)
+ return True
+
+ if prev_num_matches < num_matches:
+ self._add_error(lineno,
+ 'More specific entry on line %d overrides line %d' %
+ (lineno, prev_lineno), test_list_path)
+ # FIXME: return False if we want more specific to win.
+ return True
+
+ self._add_error(lineno,
+ 'More specific entry on line %d overrides line %d' %
+ (prev_lineno, lineno), test_list_path)
+ return True
def _add_error(self, lineno, msg, path):
"""Reports an error that will prevent running the tests. Does not
@@ -865,3 +876,188 @@ class TestExpectationsFile:
"""Reports an error that will not prevent running the tests. These are
still errors, but not bad enough to warrant breaking test running."""
self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path))
+
+
+class ModifierMatchResult(object):
+ def __init__(self, options):
+ self.num_matches = ModifierMatcher.NO_MATCH
+ self.options = options
+ self.errors = []
+ self.warnings = []
+ self.modifiers = []
+ self._matched_regexes = set()
+ self._matched_macros = set()
+
+
+class ModifierMatcher(object):
+
+ """
+ This class manages the interpretation of the "modifiers" for a given
+ line in the expectations file. Modifiers are the tokens that appear to the
+ left of the colon on a line. For example, "BUG1234", "DEBUG", and "WIN" are
+ all modifiers. This class gets what the valid modifiers are, and which
+ modifiers are allowed to exist together on a line, from the
+ TestConfiguration object that is passed in to the call.
+
+ This class detects *intra*-line errors like unknown modifiers, but
+ does not detect *inter*-line modifiers like duplicate expectations.
+
+ More importantly, this class is also used to determine if a given line
+ matches the port in question. Matches are ranked according to the number
+ of modifiers that match on a line. A line with no modifiers matches
+ everything and has a score of zero. A line with one modifier matches only
+ ports that have that modifier and gets a score of 1, and so one. Ports
+ that don't match at all get a score of -1.
+
+ Given two lines in a file that apply to the same test, if both expectations
+ match the current config, then the expectation is considered ambiguous,
+ even if one expectation matches more of the config than the other. For
+ example, in:
+
+ BUG1 RELEASE : foo.html = FAIL
+ BUG1 WIN RELEASE : foo.html = PASS
+ BUG2 WIN : bar.html = FAIL
+ BUG2 DEBUG : bar.html = PASS
+
+ lines 1 and 2 would produce an error on a Win XP Release bot (the scores
+ would be 1 and 2, respectively), and lines three and four would produce
+ a duplicate expectation on a Win Debug bot since both the 'win' and the
+ 'debug' expectations would apply (both had scores of 1).
+
+ In addition to the definitions of all of the modifiers, the class
+ supports "macros" that are expanded prior to interpretation, and "ignore
+ regexes" that can be used to skip over modifiers like the BUG* modifiers.
+ """
+ MACROS = {
+ 'mac-snowleopard': ['mac', 'snowleopard'],
+ 'mac-leopard': ['mac', 'leopard'],
+ 'win-xp': ['win', 'xp'],
+ 'win-vista': ['win', 'vista'],
+ 'win-7': ['win', 'win7'],
+ }
+
+ # We don't include the "none" modifier because it isn't actually legal.
+ REGEXES_TO_IGNORE = (['bug\w+'] +
+ TestExpectationsFile.MODIFIERS.keys()[:-1])
+ DUPLICATE_REGEXES_ALLOWED = ['bug\w+']
+
+ # Magic value returned when the options don't match.
+ NO_MATCH = -1
+
+ # FIXME: The code currently doesn't detect combinations of modifiers
+ # that are syntactically valid but semantically invalid, like
+ # 'MAC XP'. See ModifierMatchTest.test_invalid_combinations() in the
+ # _unittest.py file.
+
+ def __init__(self, test_config):
+ """Initialize a ModifierMatcher argument with the TestConfiguration it
+ should be matched against."""
+ self.test_config = test_config
+ self.allowed_configurations = test_config.all_test_configurations()
+ self.macros = self.MACROS
+
+ self.regexes_to_ignore = {}
+ for regex_str in self.REGEXES_TO_IGNORE:
+ self.regexes_to_ignore[regex_str] = re.compile(regex_str)
+
+ # Keep a set of all of the legal modifiers for quick checking.
+ self._all_modifiers = set()
+
+ # Keep a dict mapping values back to their categories.
+ self._categories_for_modifiers = {}
+ for config in self.allowed_configurations:
+ for category, modifier in config.items():
+ self._categories_for_modifiers[modifier] = category
+ self._all_modifiers.add(modifier)
+
+ def match(self, options):
+ """Checks a list of options against the config set in the constructor.
+ Options may be either actual modifier strings, "macro" strings
+ that get expanded to a list of modifiers, or strings that are allowed
+ to be ignored. All of the options must be passed in in lower case.
+
+ Returns the number of matching categories, or NO_MATCH (-1) if it
+ doesn't match or there were errors found. Matches are prioritized
+ by the number of matching categories, because the more specific
+ the options list, the more categories will match.
+
+ The results of the most recent match are available in the 'options',
+ 'modifiers', 'num_matches', 'errors', and 'warnings' properties.
+ """
+ result = ModifierMatchResult(options)
+ self._parse(result)
+ if result.errors:
+ return result
+ self._count_matches(result)
+ return result
+
+ def _parse(self, result):
+ # FIXME: Should we warn about lines having every value in a category?
+ for option in result.options:
+ self._parse_one(option, result)
+
+ def _parse_one(self, option, result):
+ if option in self._all_modifiers:
+ self._add_modifier(option, result)
+ elif option in self.macros:
+ self._expand_macro(option, result)
+ elif not self._matches_any_regex(option, result):
+ result.errors.append("Unrecognized option '%s'" % option)
+
+ def _add_modifier(self, option, result):
+ if option in result.modifiers:
+ result.errors.append("More than one '%s'" % option)
+ else:
+ result.modifiers.append(option)
+
+ def _expand_macro(self, macro, result):
+ if macro in result._matched_macros:
+ result.errors.append("More than one '%s'" % macro)
+ return
+
+ mods = []
+ for modifier in self.macros[macro]:
+ if modifier in result.options:
+ result.errors.append("Can't specify both modifier '%s' and "
+ "macro '%s'" % (modifier, macro))
+ else:
+ mods.append(modifier)
+ result._matched_macros.add(macro)
+ result.modifiers.extend(mods)
+
+ def _matches_any_regex(self, option, result):
+ for regex_str, pattern in self.regexes_to_ignore.iteritems():
+ if pattern.match(option):
+ self._handle_regex_match(regex_str, result)
+ return True
+ return False
+
+ def _handle_regex_match(self, regex_str, result):
+ if (regex_str in result._matched_regexes and
+ regex_str not in self.DUPLICATE_REGEXES_ALLOWED):
+ result.errors.append("More than one option matching '%s'" %
+ regex_str)
+ else:
+ result._matched_regexes.add(regex_str)
+
+ def _count_matches(self, result):
+ """Returns the number of modifiers that match the test config."""
+ categorized_modifiers = self._group_by_category(result.modifiers)
+ result.num_matches = 0
+ for category, modifier in self.test_config.items():
+ if category in categorized_modifiers:
+ if modifier in categorized_modifiers[category]:
+ result.num_matches += 1
+ else:
+ result.num_matches = self.NO_MATCH
+ return
+
+ def _group_by_category(self, modifiers):
+ # Returns a dict of category name -> list of modifiers.
+ modifiers_by_category = {}
+ for m in modifiers:
+ modifiers_by_category.setdefault(self._category(m), []).append(m)
+ return modifiers_by_category
+
+ def _category(self, modifier):
+ return self._categories_for_modifiers[modifier]
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
index 8f9e5dd..05d805d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
@@ -32,6 +32,7 @@
import unittest
from webkitpy.layout_tests import port
+from webkitpy.layout_tests.port import base
from webkitpy.layout_tests.layout_package.test_expectations import *
class FunctionsTest(unittest.TestCase):
@@ -78,8 +79,11 @@ class FunctionsTest(unittest.TestCase):
class Base(unittest.TestCase):
+ # Note that all of these tests are written assuming the configuration
+ # being tested is Windows XP, Release build.
+
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
- self._port = port.get('test', None)
+ self._port = port.get('test-win-xp', None)
self._fs = self._port._filesystem
self._exp = None
unittest.TestCase.__init__(self, testFunc)
@@ -101,16 +105,15 @@ BUG_TEST : failures/expected/text.html = TEXT
BUG_TEST WONTFIX SKIP : failures/expected/crash.html = CRASH
BUG_TEST REBASELINE : failures/expected/missing_image.html = MISSING
BUG_TEST WONTFIX : failures/expected/image_checksum.html = IMAGE
-BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE
+BUG_TEST WONTFIX MAC : failures/expected/image.html = IMAGE
"""
- def parse_exp(self, expectations, overrides=None, is_lint_mode=False,
- is_debug_mode=False):
+ def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
+ test_config = self._port.test_configuration()
self._exp = TestExpectations(self._port,
tests=self.get_basic_tests(),
expectations=expectations,
- test_platform_name=self._port.test_platform_name(),
- is_debug_mode=is_debug_mode,
+ test_config=test_config,
is_lint_mode=is_lint_mode,
overrides=overrides)
@@ -119,7 +122,7 @@ BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE
set([result]))
-class TestExpectationsTest(Base):
+class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', TEXT)
@@ -127,23 +130,14 @@ class TestExpectationsTest(Base):
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
+
+class MiscTests(Base):
def test_multiple_results(self):
self.parse_exp('BUGX : failures/expected/text.html = TEXT CRASH')
self.assertEqual(self._exp.get_expectations(
self.get_test('failures/expected/text.html')),
set([TEXT, CRASH]))
- def test_precedence(self):
- # This tests handling precedence of specific lines over directories
- # and tests expectations covering entire directories.
- exp_str = """
-BUGX : failures/expected/text.html = TEXT
-BUGX WONTFIX : failures/expected = IMAGE
-"""
- self.parse_exp(exp_str)
- self.assert_exp('failures/expected/text.html', TEXT)
- self.assert_exp('failures/expected/crash.html', IMAGE)
-
def test_category_expectations(self):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
@@ -158,20 +152,6 @@ BUGX WONTFIX : failures/expected = IMAGE
unknown_test)
self.assert_exp('failures/expected/crash.html', IMAGE)
- def test_release_mode(self):
- self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
- is_debug_mode=True)
- self.assert_exp('failures/expected/text.html', TEXT)
- self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
- is_debug_mode=True)
- self.assert_exp('failures/expected/text.html', PASS)
- self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT',
- is_debug_mode=False)
- self.assert_exp('failures/expected/text.html', PASS)
- self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT',
- is_debug_mode=False)
- self.assert_exp('failures/expected/text.html', TEXT)
-
def test_get_options(self):
self.parse_exp(self.get_basic_expectations())
self.assertEqual(self._exp.get_options(
@@ -216,7 +196,7 @@ SKIP : failures/expected/image.html""")
self.assertFalse(True, "ParseError wasn't raised")
except ParseError, e:
self.assertTrue(e.fatal)
- exp_errors = [u'Line:1 Invalid modifier for test: foo failures/expected/text.html',
+ exp_errors = [u"Line:1 Unrecognized option 'foo' failures/expected/text.html",
u"Line:2 Missing expectations. [' failures/expected/image.html']"]
self.assertEqual(str(e), '\n'.join(map(str, exp_errors)))
self.assertEqual(e.errors, exp_errors)
@@ -232,77 +212,167 @@ SKIP : failures/expected/image.html""")
self.assertEqual(str(e), '\n'.join(map(str, exp_errors)))
self.assertEqual(e.errors, exp_errors)
- def test_syntax_missing_expectation(self):
+ def test_overrides(self):
+ self.parse_exp("BUG_EXP: failures/expected/text.html = TEXT",
+ "BUG_OVERRIDE : failures/expected/text.html = IMAGE")
+ self.assert_exp('failures/expected/text.html', IMAGE)
+
+ def test_overrides__duplicate(self):
+ self.assertRaises(ParseError, self.parse_exp,
+ "BUG_EXP: failures/expected/text.html = TEXT",
+ """
+BUG_OVERRIDE : failures/expected/text.html = IMAGE
+BUG_OVERRIDE : failures/expected/text.html = CRASH
+""")
+
+ def test_pixel_tests_flag(self):
+ def match(test, result, pixel_tests_enabled):
+ return self._exp.matches_an_expected_result(
+ self.get_test(test), result, pixel_tests_enabled)
+
+ self.parse_exp(self.get_basic_expectations())
+ self.assertTrue(match('failures/expected/text.html', TEXT, True))
+ self.assertTrue(match('failures/expected/text.html', TEXT, False))
+ self.assertFalse(match('failures/expected/text.html', CRASH, True))
+ self.assertFalse(match('failures/expected/text.html', CRASH, False))
+ self.assertTrue(match('failures/expected/image_checksum.html', IMAGE,
+ True))
+ self.assertTrue(match('failures/expected/image_checksum.html', PASS,
+ False))
+ self.assertTrue(match('failures/expected/crash.html', SKIP, False))
+ self.assertTrue(match('passes/text.html', PASS, False))
+
+ def test_more_specific_override_resets_skip(self):
+ self.parse_exp("BUGX SKIP : failures/expected = TEXT\n"
+ "BUGX : failures/expected/text.html = IMAGE\n")
+ self.assert_exp('failures/expected/text.html', IMAGE)
+ self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
+ 'failures/expected/text.html') in
+ self._exp.get_tests_with_result_type(SKIP))
+
+class ExpectationSyntaxTests(Base):
+ def test_missing_expectation(self):
# This is missing the expectation.
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST: failures/expected/text.html',
- is_debug_mode=True)
+ 'BUG_TEST: failures/expected/text.html')
- def test_syntax_invalid_option(self):
+ def test_missing_colon(self):
+ # This is missing the modifiers and the ':'
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST FOO: failures/expected/text.html = PASS')
+ 'failures/expected/text.html = TEXT')
- def test_syntax_invalid_expectation(self):
- # This is missing the expectation.
+ def disabled_test_too_many_colons(self):
+ # FIXME: Enable this test and fix the underlying bug.
+ self.assertRaises(ParseError, self.parse_exp,
+ 'BUG_TEST: failures/expected/text.html = PASS :')
+
+ def test_too_many_equals_signs(self):
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST: failures/expected/text.html = FOO')
+ 'BUG_TEST: failures/expected/text.html = TEXT = IMAGE')
+
+ def test_unrecognized_expectation(self):
+ self.assertRaises(ParseError, self.parse_exp,
+ 'BUG_TEST: failures/expected/text.html = UNKNOWN')
+
+ def test_macro(self):
+ exp_str = """
+BUG_TEST WIN-XP : failures/expected/text.html = TEXT
+"""
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', TEXT)
+
+
+class SemanticTests(Base):
+ def test_bug_format(self):
+ self.assertRaises(ParseError, self.parse_exp, 'BUG1234 : failures/expected/text.html = TEXT')
- def test_syntax_missing_bugid(self):
+ def test_missing_bugid(self):
# This should log a non-fatal error.
self.parse_exp('SLOW : failures/expected/text.html = TEXT')
self.assertEqual(
len(self._exp._expected_failures.get_non_fatal_errors()), 1)
- def test_semantic_slow_and_timeout(self):
+ def test_slow_and_timeout(self):
# A test cannot be SLOW and expected to TIMEOUT.
self.assertRaises(ParseError, self.parse_exp,
'BUG_TEST SLOW : failures/expected/timeout.html = TIMEOUT')
- def test_semantic_rebaseline(self):
+ def test_rebaseline(self):
# Can't lint a file w/ 'REBASELINE' in it.
self.assertRaises(ParseError, self.parse_exp,
'BUG_TEST REBASELINE : failures/expected/text.html = TEXT',
is_lint_mode=True)
- def test_semantic_duplicates(self):
+ def test_duplicates(self):
self.assertRaises(ParseError, self.parse_exp, """
-BUG_TEST : failures/expected/text.html = TEXT
-BUG_TEST : failures/expected/text.html = IMAGE""")
+BUG_EXP : failures/expected/text.html = TEXT
+BUG_EXP : failures/expected/text.html = IMAGE""")
self.assertRaises(ParseError, self.parse_exp,
- self.get_basic_expectations(), """
-BUG_TEST : failures/expected/text.html = TEXT
-BUG_TEST : failures/expected/text.html = IMAGE""")
+ self.get_basic_expectations(), overrides="""
+BUG_OVERRIDE : failures/expected/text.html = TEXT
+BUG_OVERRIDE : failures/expected/text.html = IMAGE""", )
- def test_semantic_missing_file(self):
+ def test_missing_file(self):
# This should log a non-fatal error.
self.parse_exp('BUG_TEST : missing_file.html = TEXT')
self.assertEqual(
len(self._exp._expected_failures.get_non_fatal_errors()), 1)
- def test_overrides(self):
- self.parse_exp(self.get_basic_expectations(), """
-BUG_OVERRIDE : failures/expected/text.html = IMAGE""")
- self.assert_exp('failures/expected/text.html', IMAGE)
+class PrecedenceTests(Base):
+ def test_file_over_directory(self):
+ # This tests handling precedence of specific lines over directories
+ # and tests expectations covering entire directories.
+ exp_str = """
+BUGX : failures/expected/text.html = TEXT
+BUGX WONTFIX : failures/expected = IMAGE
+"""
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', TEXT)
+ self.assert_exp('failures/expected/crash.html', IMAGE)
- def test_matches_an_expected_result(self):
+ exp_str = """
+BUGX WONTFIX : failures/expected = IMAGE
+BUGX : failures/expected/text.html = TEXT
+"""
+ self.parse_exp(exp_str)
+ self.assert_exp('failures/expected/text.html', TEXT)
+ self.assert_exp('failures/expected/crash.html', IMAGE)
- def match(test, result, pixel_tests_enabled):
- return self._exp.matches_an_expected_result(
- self.get_test(test), result, pixel_tests_enabled)
+ def test_ambiguous(self):
+ self.assertRaises(ParseError, self.parse_exp, """
+BUG_TEST RELEASE : passes/text.html = PASS
+BUG_TEST WIN : passes/text.html = FAIL
+""")
- self.parse_exp(self.get_basic_expectations())
- self.assertTrue(match('failures/expected/text.html', TEXT, True))
- self.assertTrue(match('failures/expected/text.html', TEXT, False))
- self.assertFalse(match('failures/expected/text.html', CRASH, True))
- self.assertFalse(match('failures/expected/text.html', CRASH, False))
- self.assertTrue(match('failures/expected/image_checksum.html', IMAGE,
- True))
- self.assertTrue(match('failures/expected/image_checksum.html', PASS,
- False))
- self.assertTrue(match('failures/expected/crash.html', SKIP, False))
- self.assertTrue(match('passes/text.html', PASS, False))
+ def test_more_modifiers(self):
+ exp_str = """
+BUG_TEST RELEASE : passes/text.html = PASS
+BUG_TEST WIN RELEASE : passes/text.html = TEXT
+"""
+ self.assertRaises(ParseError, self.parse_exp, exp_str)
+
+ def test_order_in_file(self):
+ exp_str = """
+BUG_TEST WIN RELEASE : passes/text.html = TEXT
+BUG_TEST RELEASE : passes/text.html = PASS
+"""
+ self.assertRaises(ParseError, self.parse_exp, exp_str)
+
+ def test_version_overrides(self):
+ exp_str = """
+BUG_TEST WIN : passes/text.html = PASS
+BUG_TEST WIN XP : passes/text.html = TEXT
+"""
+ self.assertRaises(ParseError, self.parse_exp, exp_str)
+
+ def test_macro_overrides(self):
+ exp_str = """
+BUG_TEST WIN : passes/text.html = PASS
+BUG_TEST WIN-XP : passes/text.html = TEXT
+"""
+ self.assertRaises(ParseError, self.parse_exp, exp_str)
class RebaseliningTest(Base):
@@ -327,7 +397,8 @@ BUG_TEST REBASELINE : failures/expected/text.html = TEXT
def test_remove_expand(self):
self.assertRemove('mac',
'BUGX REBASELINE : failures/expected/text.html = TEXT\n',
- 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n')
+ 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n'
+ 'BUGX REBASELINE WIN-XP : failures/expected/text.html = TEXT\n')
def test_remove_mac_win(self):
self.assertRemove('mac',
@@ -345,5 +416,85 @@ BUG_TEST REBASELINE : failures/expected/text.html = TEXT
'\n\n')
+class ModifierTests(unittest.TestCase):
+ def setUp(self):
+ port_obj = port.get('test-win-xp', None)
+ self.config = port_obj.test_configuration()
+ self.matcher = ModifierMatcher(self.config)
+
+ def match(self, modifiers, expected_num_matches=-1, values=None, num_errors=0):
+ matcher = self.matcher
+ if values:
+ matcher = ModifierMatcher(self.FakeTestConfiguration(values))
+ match_result = matcher.match(modifiers)
+ self.assertEqual(len(match_result.warnings), 0)
+ self.assertEqual(len(match_result.errors), num_errors)
+ self.assertEqual(match_result.num_matches, expected_num_matches,
+ 'match(%s, %s) returned -> %d, expected %d' %
+ (modifiers, str(self.config.values()),
+ match_result.num_matches, expected_num_matches))
+
+ def test_bad_match_modifier(self):
+ self.match(['foo'], num_errors=1)
+
+ def test_none(self):
+ self.match([], 0)
+
+ def test_one(self):
+ self.match(['xp'], 1)
+ self.match(['win'], 1)
+ self.match(['release'], 1)
+ self.match(['cpu'], 1)
+ self.match(['x86'], 1)
+ self.match(['leopard'], -1)
+ self.match(['gpu'], -1)
+ self.match(['debug'], -1)
+
+ def test_two(self):
+ self.match(['xp', 'release'], 2)
+ self.match(['win7', 'release'], -1)
+ self.match(['win7', 'xp'], 1)
+
+ def test_three(self):
+ self.match(['win7', 'xp', 'release'], 2)
+ self.match(['xp', 'debug', 'x86'], -1)
+ self.match(['xp', 'release', 'x86'], 3)
+ self.match(['xp', 'cpu', 'release'], 3)
+
+ def test_four(self):
+ self.match(['xp', 'release', 'cpu', 'x86'], 4)
+ self.match(['win7', 'xp', 'release', 'cpu'], 3)
+ self.match(['win7', 'xp', 'debug', 'cpu'], -1)
+
+ def test_case_insensitivity(self):
+ self.match(['Win'], num_errors=1)
+ self.match(['WIN'], num_errors=1)
+ self.match(['win'], 1)
+
+ def test_duplicates(self):
+ self.match(['release', 'release'], num_errors=1)
+ self.match(['win-xp', 'xp'], num_errors=1)
+ self.match(['win-xp', 'win-xp'], num_errors=1)
+ self.match(['xp', 'release', 'xp', 'release'], num_errors=2)
+ self.match(['rebaseline', 'rebaseline'], num_errors=1)
+
+ def test_unknown_option(self):
+ self.match(['vms'], num_errors=1)
+
+ def test_duplicate_bugs(self):
+ # BUG* regexes can appear multiple times.
+ self.match(['bugfoo', 'bugbar'], 0)
+
+ def test_invalid_combinations(self):
+ # FIXME: This should probably raise an error instead of NO_MATCH.
+ self.match(['mac', 'xp'], num_errors=0)
+
+ def test_regexes_are_ignored(self):
+ self.match(['bug123xy', 'rebaseline', 'wontfix', 'slow', 'skip'], 0)
+
+ def test_none_is_invalid(self):
+ self.match(['none'], num_errors=1)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py
index 4b027c0..0aed1dd 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py
@@ -41,7 +41,3 @@ class TestInput:
# FIXME: filename should really be test_name as a relative path.
self.filename = filename
self.timeout = timeout
- # The image_hash is used to avoid doing an image dump if the
- # checksums match. The image_hash is set later, and only if it is needed
- # for the test.
- self.image_hash = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py
deleted file mode 100644
index e809be6..0000000
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-class TestOutput(object):
- """Groups information about a test output for easy passing of data.
-
- This is used not only for a actual test output, but also for grouping
- expected test output.
- """
-
- def __init__(self, text, image, image_hash,
- crash=None, test_time=None, timeout=None, error=None):
- """Initializes a TestOutput object.
-
- Args:
- text: a text output
- image: an image output
- image_hash: a string containing the checksum of the image
- crash: a boolean indicating whether the driver crashed on the test
- test_time: a time which the test has taken
- timeout: a boolean indicating whehter the test timed out
- error: any unexpected or additional (or error) text output
- """
- self.text = text
- self.image = image
- self.image_hash = image_hash
- self.crash = crash
- self.test_time = test_time
- self.timeout = timeout
- self.error = error
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
index 6c07850..e3bd4ad 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py
@@ -214,21 +214,13 @@ class TestRunner:
def lint(self):
lint_failed = False
-
- # Creating the expecations for each platform/configuration pair does
- # all the test list parsing and ensures it's correct syntax (e.g. no
- # dupes).
- for platform_name in self._port.test_platform_names():
- try:
- self.parse_expectations(platform_name, is_debug_mode=True)
- except test_expectations.ParseError:
- lint_failed = True
+ for test_configuration in self._port.all_test_configurations():
try:
- self.parse_expectations(platform_name, is_debug_mode=False)
+ self.lint_expectations(test_configuration)
except test_expectations.ParseError:
lint_failed = True
+ self._printer.write("")
- self._printer.write("")
if lint_failed:
_log.error("Lint failed.")
return -1
@@ -236,22 +228,28 @@ class TestRunner:
_log.info("Lint succeeded.")
return 0
- def parse_expectations(self, test_platform_name, is_debug_mode):
+ def lint_expectations(self, config):
+ port = self._port
+ test_expectations.TestExpectations(
+ port,
+ None,
+ port.test_expectations(),
+ config,
+ self._options.lint_test_files,
+ port.test_expectations_overrides())
+
+ def parse_expectations(self):
"""Parse the expectations from the test_list files and return a data
structure holding them. Throws an error if the test_list files have
invalid syntax."""
- if self._options.lint_test_files:
- test_files = None
- else:
- test_files = self._test_files
-
- expectations_str = self._port.test_expectations()
- overrides_str = self._port.test_expectations_overrides()
+ port = self._port
self._expectations = test_expectations.TestExpectations(
- self._port, test_files, expectations_str, test_platform_name,
- is_debug_mode, self._options.lint_test_files,
- overrides=overrides_str)
- return self._expectations
+ port,
+ self._test_files,
+ port.test_expectations(),
+ port.test_configuration(),
+ self._options.lint_test_files,
+ port.test_expectations_overrides())
# FIXME: This method is way too long and needs to be broken into pieces.
def prepare_lists_and_print_output(self):
@@ -358,9 +356,7 @@ class TestRunner:
self._test_files_list = files + skip_chunk_list
self._test_files = set(self._test_files_list)
- self._expectations = self.parse_expectations(
- self._port.test_platform_name(),
- self._options.configuration == 'Debug')
+ self.parse_expectations()
self._test_files = set(files)
self._test_files_list = files
@@ -691,6 +687,8 @@ class TestRunner:
self._expectations, result_summary, retry_summary)
self._printer.print_unexpected_results(unexpected_results)
+ # FIXME: remove record_results. It's just used for testing. There's no need
+ # for it to be a commandline argument.
if (self._options.record_results and not self._options.dry_run and
not interrupted):
# Write the same data to log files and upload generated JSON files
@@ -731,28 +729,31 @@ class TestRunner:
except Queue.Empty:
return
- expected = self._expectations.matches_an_expected_result(
- result.filename, result.type, self._options.pixel_tests)
- result_summary.add(result, expected)
- exp_str = self._expectations.get_expectations_string(
- result.filename)
- got_str = self._expectations.expectation_to_string(result.type)
- self._printer.print_test_result(result, expected, exp_str, got_str)
- self._printer.print_progress(result_summary, self._retrying,
- self._test_files_list)
-
- def interrupt_if_at_failure_limit(limit, count, message):
- if limit and count >= limit:
- raise TestRunInterruptedException(message % count)
-
- interrupt_if_at_failure_limit(
- self._options.exit_after_n_failures,
- result_summary.unexpected_failures,
- "Aborting run since %d failures were reached")
- interrupt_if_at_failure_limit(
- self._options.exit_after_n_crashes_or_timeouts,
- result_summary.unexpected_crashes_or_timeouts,
- "Aborting run since %d crashes or timeouts were reached")
+ self._update_summary_with_result(result_summary, result)
+
+ def _update_summary_with_result(self, result_summary, result):
+ expected = self._expectations.matches_an_expected_result(
+ result.filename, result.type, self._options.pixel_tests)
+ result_summary.add(result, expected)
+ exp_str = self._expectations.get_expectations_string(
+ result.filename)
+ got_str = self._expectations.expectation_to_string(result.type)
+ self._printer.print_test_result(result, expected, exp_str, got_str)
+ self._printer.print_progress(result_summary, self._retrying,
+ self._test_files_list)
+
+ def interrupt_if_at_failure_limit(limit, count, message):
+ if limit and count >= limit:
+ raise TestRunInterruptedException(message % count)
+
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_failures,
+ result_summary.unexpected_failures,
+ "Aborting run since %d failures were reached")
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_crashes_or_timeouts,
+ result_summary.unexpected_crashes_or_timeouts,
+ "Aborting run since %d crashes or timeouts were reached")
def _clobber_old_results(self):
# Just clobber the actual test results directories since the other
@@ -789,7 +790,7 @@ class TestRunner:
return failed_results
def _upload_json_files(self, unexpected_results, result_summary,
- individual_test_timings):
+ individual_test_timings):
"""Writes the results of the test run as JSON files into the results
dir and upload the files to the appengine server.
@@ -825,18 +826,13 @@ class TestRunner:
self._options.build_number, self._options.results_directory,
BUILDER_BASE_URL, individual_test_timings,
self._expectations, result_summary, self._test_files_list,
- not self._options.upload_full_results,
self._options.test_results_server,
"layout-tests",
self._options.master_name)
_log.debug("Finished writing JSON files.")
- json_files = ["expectations.json"]
- if self._options.upload_full_results:
- json_files.append("results.json")
- else:
- json_files.append("incremental_results.json")
+ json_files = ["expectations.json", "incremental_results.json"]
generator.upload_json_files(json_files)
@@ -844,6 +840,7 @@ class TestRunner:
"""Prints the configuration for the test run."""
p = self._printer
p.print_config("Using port '%s'" % self._port.name())
+ p.print_config("Test configuration: %s" % self._port.test_configuration())
p.print_config("Placing test results in %s" %
self._options.results_directory)
if self._options.new_baseline:
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py
new file mode 100644
index 0000000..f097b83
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py
@@ -0,0 +1,129 @@
+#!/usr/bin/env python
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+The TestRunner2 package is an alternate implementation of the TestRunner
+class that uses the manager_worker_broker module to send sets of tests to
+workers and receive their completion messages accordingly.
+"""
+
+import logging
+
+
+from webkitpy.layout_tests.layout_package import manager_worker_broker
+from webkitpy.layout_tests.layout_package import test_runner
+from webkitpy.layout_tests.layout_package import worker
+
+_log = logging.getLogger(__name__)
+
+
+class TestRunner2(test_runner.TestRunner):
+ def __init__(self, port, options, printer):
+ test_runner.TestRunner.__init__(self, port, options, printer)
+ self._all_results = []
+ self._group_stats = {}
+ self._current_result_summary = None
+ self._done = False
+
+ def is_done(self):
+ return self._done
+
+ def name(self):
+ return 'TestRunner2'
+
+ def _run_tests(self, file_list, result_summary):
+ """Runs the tests in the file_list.
+
+ Return: A tuple (keyboard_interrupted, thread_timings, test_timings,
+ individual_test_timings)
+ keyboard_interrupted is whether someone typed Ctrl^C
+ thread_timings is a list of dicts with the total runtime
+ of each thread with 'name', 'num_tests', 'total_time' properties
+ test_timings is a list of timings for each sharded subdirectory
+ of the form [time, directory_name, num_tests]
+ individual_test_timings is a list of run times for each test
+ in the form {filename:filename, test_run_time:test_run_time}
+ result_summary: summary object to populate with the results
+ """
+ self._current_result_summary = result_summary
+
+ # FIXME: shard properly.
+
+ # FIXME: should shard_tests return a list of objects rather than tuples?
+ test_lists = self._shard_tests(file_list, False)
+
+ manager_connection = manager_worker_broker.get(self._port, self._options, self, worker.Worker)
+
+ # FIXME: start all of the workers.
+ manager_connection.start_worker(0)
+
+ for test_list in test_lists:
+ manager_connection.post_message('test_list', test_list[0], test_list[1])
+
+ manager_connection.post_message('stop')
+
+ keyboard_interrupted = False
+ interrupted = False
+ if not self._options.dry_run:
+ while not self._check_if_done():
+ manager_connection.run_message_loop(delay_secs=1.0)
+
+ # FIXME: implement stats.
+ thread_timings = []
+
+ # FIXME: should this be a class instead of a tuple?
+ return (keyboard_interrupted, interrupted, thread_timings,
+ self._group_stats, self._all_results)
+
+ def _check_if_done(self):
+ """Returns true iff all the workers have either completed or wedged."""
+ # FIXME: implement to check for wedged workers.
+ return self._done
+
+ def handle_started_test(self, src, test_info, hang_timeout):
+ # FIXME: implement
+ pass
+
+ def handle_done(self, src):
+ # FIXME: implement properly to handle multiple workers.
+ self._done = True
+ pass
+
+ def handle_exception(self, src, exception_info):
+ raise exception_info
+
+ def handle_finished_list(self, src, list_name, num_tests, elapsed_time):
+ # FIXME: update stats
+ pass
+
+ def handle_finished_test(self, src, result, elapsed_time):
+ self._update_summary_with_result(self._current_result_summary, result)
+
+ # FIXME: update stats.
+ self._all_results.append(result)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/worker.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/worker.py
new file mode 100644
index 0000000..47d4fbd
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/worker.py
@@ -0,0 +1,104 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Handle messages from the TestRunner and execute actual tests."""
+
+import logging
+import sys
+import time
+
+from webkitpy.common.system import stack_utils
+
+from webkitpy.layout_tests.layout_package import manager_worker_broker
+from webkitpy.layout_tests.layout_package import test_results
+
+
+_log = logging.getLogger(__name__)
+
+
+class Worker(manager_worker_broker.AbstractWorker):
+ def __init__(self, worker_connection, worker_number, options):
+ self._worker_connection = worker_connection
+ self._worker_number = worker_number
+ self._options = options
+ self._name = 'worker/%d' % worker_number
+ self._done = False
+ self._port = None
+
+ def _deferred_init(self, port):
+ self._port = port
+
+ def is_done(self):
+ return self._done
+
+ def name(self):
+ return self._name
+
+ def run(self, port):
+ self._deferred_init(port)
+
+ _log.debug("%s starting" % self._name)
+
+ # FIXME: need to add in error handling, better logging.
+ self._worker_connection.run_message_loop()
+ self._worker_connection.post_message('done')
+
+ def handle_test_list(self, src, list_name, test_list):
+ # FIXME: check to see if we need to get the http lock.
+
+ start_time = time.time()
+ num_tests = 0
+ for test_input in test_list:
+ self._run_test(test_input)
+ num_tests += 1
+ self._worker_connection.yield_to_broker()
+
+ elapsed_time = time.time() - start_time
+ self._worker_connection.post_message('finished_list', list_name, num_tests, elapsed_time)
+
+ # FIXME: release the lock if necessary
+
+ def handle_stop(self, src):
+ self._done = True
+
+ def _run_test(self, test_input):
+
+ # FIXME: get real timeout value from SingleTestRunner
+ test_timeout_sec = int(test_input.timeout) / 1000
+ start = time.time()
+ self._worker_connection.post_message('started_test', test_input, test_timeout_sec)
+
+ # FIXME: actually run the test.
+ result = test_results.TestResult(test_input.filename, failures=[],
+ test_run_time=0, total_time_for_all_diffs=0, time_for_diffs={})
+
+ elapsed_time = time.time() - start
+
+ # FIXME: update stats, check for failures.
+
+ self._worker_connection.post_message('finished_test', result, elapsed_time)