summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
diff options
context:
space:
mode:
Diffstat (limited to 'WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py')
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py147
1 files changed, 71 insertions, 76 deletions
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index 9f2de7e..88f493d 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -51,6 +51,7 @@ import time
import traceback
import test_failures
+import test_output
import test_results
_log = logging.getLogger("webkitpy.layout_tests.layout_package."
@@ -74,9 +75,14 @@ def log_stack(stack):
_log.error(' %s' % line.strip())
-def _process_output(port, options, test_info, test_types, test_args,
- crash, timeout, test_run_time, actual_checksum,
- output, error):
+def _expected_test_output(port, filename):
+ """Returns an expected TestOutput object."""
+ return test_output.TestOutput(port.expected_text(filename),
+ port.expected_image(filename),
+ port.expected_checksum(filename))
+
+def _process_output(port, options, test_input, test_types, test_args,
+ test_output):
"""Receives the output from a DumpRenderTree process, subjects it to a
number of tests, and returns a list of failure types the test produced.
@@ -84,57 +90,55 @@ def _process_output(port, options, test_info, test_types, test_args,
port: port-specific hooks
options: command line options argument from optparse
proc: an active DumpRenderTree process
- test_info: Object containing the test filename, uri and timeout
+ test_input: Object containing the test filename and timeout
test_types: list of test types to subject the output to
test_args: arguments to be passed to each test
+ test_output: a TestOutput object containing the output of the test
Returns: a TestResult object
"""
failures = []
- # Some test args, such as the image hash, may be added or changed on a
- # test-by-test basis.
- local_test_args = copy.copy(test_args)
-
- local_test_args.hash = actual_checksum
-
- if crash:
+ if test_output.crash:
failures.append(test_failures.FailureCrash())
- if timeout:
+ if test_output.timeout:
failures.append(test_failures.FailureTimeout())
- if crash:
- _log.debug("Stacktrace for %s:\n%s" % (test_info.filename, error))
+ if test_output.crash:
+ _log.debug("Stacktrace for %s:\n%s" % (test_input.filename,
+ test_output.error))
# Strip off "file://" since RelativeTestFilename expects
# filesystem paths.
filename = os.path.join(options.results_directory,
port.relative_test_filename(
- test_info.filename))
+ test_input.filename))
filename = os.path.splitext(filename)[0] + "-stack.txt"
port.maybe_make_directory(os.path.split(filename)[0])
with codecs.open(filename, "wb", "utf-8") as file:
- file.write(error)
- elif error:
- _log.debug("Previous test output stderr lines:\n%s" % error)
+ file.write(test_output.error)
+ elif test_output.error:
+ _log.debug("Previous test output stderr lines:\n%s" % test_output.error)
+
+ expected_test_output = _expected_test_output(port, test_input.filename)
# Check the output and save the results.
start_time = time.time()
time_for_diffs = {}
for test_type in test_types:
start_diff_time = time.time()
- new_failures = test_type.compare_output(port, test_info.filename,
- output, local_test_args,
- options.configuration)
+ new_failures = test_type.compare_output(port, test_input.filename,
+ test_args, test_output,
+ expected_test_output)
# Don't add any more failures if we already have a crash, so we don't
# double-report those tests. We do double-report for timeouts since
# we still want to see the text and image output.
- if not crash:
+ if not test_output.crash:
failures.extend(new_failures)
time_for_diffs[test_type.__class__.__name__] = (
time.time() - start_diff_time)
total_time_for_all_diffs = time.time() - start_diff_time
- return test_results.TestResult(test_info.filename, failures, test_run_time,
+ return test_results.TestResult(test_input.filename, failures, test_output.test_time,
total_time_for_all_diffs, time_for_diffs)
@@ -153,22 +157,36 @@ def _milliseconds_to_seconds(msecs):
return float(msecs) / 1000.0
-def _image_hash(test_info, test_args, options):
- """Returns the image hash of the test if it's needed, otherwise None."""
- if (test_args.new_baseline or test_args.reset_results or not options.pixel_tests):
- return None
- return test_info.image_hash()
+def _should_fetch_expected_checksum(options):
+ return options.pixel_tests and not (options.new_baseline or options.reset_results)
+
+
+def _run_single_test(port, options, test_input, test_types, test_args, driver):
+ # FIXME: Pull this into TestShellThread._run().
+
+ # The image hash is used to avoid doing an image dump if the
+ # checksums match, so it should be set to a blank value if we
+ # are generating a new baseline. (Otherwise, an image from a
+ # previous run will be copied into the baseline."""
+ if _should_fetch_expected_checksum(options):
+ image_hash_to_driver = port.expected_checksum(test_input.filename)
+ else:
+ image_hash_to_driver = None
+ uri = port.filename_to_uri(test_input.filename)
+ test_output = driver.run_test(uri, test_input.timeout, image_hash_to_driver)
+ return _process_output(port, options, test_input, test_types, test_args,
+ test_output)
class SingleTestThread(threading.Thread):
"""Thread wrapper for running a single test file."""
- def __init__(self, port, options, test_info, test_types, test_args):
+ def __init__(self, port, options, test_input, test_types, test_args):
"""
Args:
port: object implementing port-specific hooks
options: command line argument object from optparse
- test_info: Object containing the test filename, uri and timeout
+ test_input: Object containing the test filename and timeout
test_types: A list of TestType objects to run the test output
against.
test_args: A TestArguments object to pass to each TestType.
@@ -177,7 +195,7 @@ class SingleTestThread(threading.Thread):
threading.Thread.__init__(self)
self._port = port
self._options = options
- self._test_info = test_info
+ self._test_input = test_input
self._test_types = test_types
self._test_args = test_args
self._driver = None
@@ -188,20 +206,12 @@ class SingleTestThread(threading.Thread):
def _covered_run(self):
# FIXME: this is a separate routine to work around a bug
# in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
- test_info = self._test_info
self._driver = self._port.create_driver(self._test_args.png_path,
self._options)
self._driver.start()
- image_hash = _image_hash(test_info, self._test_args, self._options)
- start = time.time()
- crash, timeout, actual_checksum, output, error = \
- self._driver.run_test(test_info.uri.strip(), test_info.timeout,
- image_hash)
- end = time.time()
- self._test_result = _process_output(self._port, self._options,
- test_info, self._test_types, self._test_args,
- crash, timeout, end - start,
- actual_checksum, output, error)
+ self._test_result = _run_single_test(self._port, self._options,
+ self._test_input, self._test_types,
+ self._test_args, self._driver)
self._driver.stop()
def get_test_result(self):
@@ -258,7 +268,6 @@ class TestShellThread(WatchableThread):
test_types: A list of TestType objects to run the test output
against.
test_args: A TestArguments object to pass to each TestType.
-
"""
WatchableThread.__init__(self)
self._port = port
@@ -402,17 +411,17 @@ class TestShellThread(WatchableThread):
self._num_tests_in_current_group = len(self._filename_list)
self._current_group_start_time = time.time()
- test_info = self._filename_list.pop()
+ test_input = self._filename_list.pop()
# We have a url, run tests.
batch_count += 1
self._num_tests += 1
if self._options.run_singly:
- result = self._run_test_singly(test_info)
+ result = self._run_test_singly(test_input)
else:
- result = self._run_test(test_info)
+ result = self._run_test(test_input)
- filename = test_info.filename
+ filename = test_input.filename
tests_run_file.write(filename + "\n")
if result.failures:
# Check and kill DumpRenderTree if we need to.
@@ -440,7 +449,7 @@ class TestShellThread(WatchableThread):
if test_runner:
test_runner.update_summary(result_summary)
- def _run_test_singly(self, test_info):
+ def _run_test_singly(self, test_input):
"""Run a test in a separate thread, enforcing a hard time limit.
Since we can only detect the termination of a thread, not any internal
@@ -448,7 +457,7 @@ class TestShellThread(WatchableThread):
files singly.
Args:
- test_info: Object containing the test filename, uri and timeout
+ test_input: Object containing the test filename and timeout
Returns:
A TestResult
@@ -456,14 +465,14 @@ class TestShellThread(WatchableThread):
"""
worker = SingleTestThread(self._port,
self._options,
- test_info,
+ test_input,
self._test_types,
self._test_args)
worker.start()
thread_timeout = _milliseconds_to_seconds(
- _pad_timeout(int(test_info.timeout)))
+ _pad_timeout(int(test_input.timeout)))
thread._next_timeout = time.time() + thread_timeout
worker.join(thread_timeout)
if worker.isAlive():
@@ -485,43 +494,29 @@ class TestShellThread(WatchableThread):
# This gets raised if the worker thread has already exited.
failures = []
_log.error('Cannot get results of test: %s' %
- test_info.filename)
- result = test_results.TestResult(test_info.filename, failures=[],
+ test_input.filename)
+ result = test_results.TestResult(test_input.filename, failures=[],
test_run_time=0, total_time_for_all_diffs=0, time_for_diffs=0)
return result
- def _run_test(self, test_info):
+ def _run_test(self, test_input):
"""Run a single test file using a shared DumpRenderTree process.
Args:
- test_info: Object containing the test filename, uri and timeout
+ test_input: Object containing the test filename, uri and timeout
Returns: a TestResult object.
"""
self._ensure_dump_render_tree_is_running()
- # The pixel_hash is used to avoid doing an image dump if the
- # checksums match, so it should be set to a blank value if we
- # are generating a new baseline. (Otherwise, an image from a
- # previous run will be copied into the baseline.)
- image_hash = _image_hash(test_info, self._test_args, self._options)
- start = time.time()
-
thread_timeout = _milliseconds_to_seconds(
- _pad_timeout(int(test_info.timeout)))
- self._next_timeout = start + thread_timeout
-
- crash, timeout, actual_checksum, output, error = \
- self._driver.run_test(test_info.uri, test_info.timeout, image_hash)
- end = time.time()
-
- result = _process_output(self._port, self._options,
- test_info, self._test_types,
- self._test_args, crash,
- timeout, end - start, actual_checksum,
- output, error)
- self._test_results.append(result)
- return result
+ _pad_timeout(int(test_input.timeout)))
+ self._next_timeout = time.time() + thread_timeout
+ test_result = _run_single_test(self._port, self._options, test_input,
+ self._test_types, self._test_args,
+ self._driver)
+ self._test_results.append(test_result)
+ return test_result
def _ensure_dump_render_tree_is_running(self):
"""Start the shared DumpRenderTree, if it's not running.