summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts/webkitpy/layout_tests/layout_package
diff options
context:
space:
mode:
authorRussell Brenner <russellbrenner@google.com>2010-11-18 17:33:13 -0800
committerRussell Brenner <russellbrenner@google.com>2010-12-02 13:47:21 -0800
commit6b70adc33054f8aee8c54d0f460458a9df11b8a5 (patch)
tree103a13998c33944d6ab3b8318c509a037e639460 /WebKitTools/Scripts/webkitpy/layout_tests/layout_package
parentbdf4ebc8e70b2d221b6ee7a65660918ecb1d33aa (diff)
downloadexternal_webkit-6b70adc33054f8aee8c54d0f460458a9df11b8a5.zip
external_webkit-6b70adc33054f8aee8c54d0f460458a9df11b8a5.tar.gz
external_webkit-6b70adc33054f8aee8c54d0f460458a9df11b8a5.tar.bz2
Merge WebKit at r72274: Initial merge by git.
Change-Id: Ie51f0b4a16da82942bd516dce59cfb79ebbe25fb
Diffstat (limited to 'WebKitTools/Scripts/webkitpy/layout_tests/layout_package')
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py147
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py8
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py122
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py135
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py4
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py56
6 files changed, 298 insertions, 174 deletions
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index 9f2de7e..88f493d 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -51,6 +51,7 @@ import time
import traceback
import test_failures
+import test_output
import test_results
_log = logging.getLogger("webkitpy.layout_tests.layout_package."
@@ -74,9 +75,14 @@ def log_stack(stack):
_log.error(' %s' % line.strip())
-def _process_output(port, options, test_info, test_types, test_args,
- crash, timeout, test_run_time, actual_checksum,
- output, error):
+def _expected_test_output(port, filename):
+ """Returns an expected TestOutput object."""
+ return test_output.TestOutput(port.expected_text(filename),
+ port.expected_image(filename),
+ port.expected_checksum(filename))
+
+def _process_output(port, options, test_input, test_types, test_args,
+ test_output):
"""Receives the output from a DumpRenderTree process, subjects it to a
number of tests, and returns a list of failure types the test produced.
@@ -84,57 +90,55 @@ def _process_output(port, options, test_info, test_types, test_args,
port: port-specific hooks
options: command line options argument from optparse
proc: an active DumpRenderTree process
- test_info: Object containing the test filename, uri and timeout
+ test_input: Object containing the test filename and timeout
test_types: list of test types to subject the output to
test_args: arguments to be passed to each test
+ test_output: a TestOutput object containing the output of the test
Returns: a TestResult object
"""
failures = []
- # Some test args, such as the image hash, may be added or changed on a
- # test-by-test basis.
- local_test_args = copy.copy(test_args)
-
- local_test_args.hash = actual_checksum
-
- if crash:
+ if test_output.crash:
failures.append(test_failures.FailureCrash())
- if timeout:
+ if test_output.timeout:
failures.append(test_failures.FailureTimeout())
- if crash:
- _log.debug("Stacktrace for %s:\n%s" % (test_info.filename, error))
+ if test_output.crash:
+ _log.debug("Stacktrace for %s:\n%s" % (test_input.filename,
+ test_output.error))
# Strip off "file://" since RelativeTestFilename expects
# filesystem paths.
filename = os.path.join(options.results_directory,
port.relative_test_filename(
- test_info.filename))
+ test_input.filename))
filename = os.path.splitext(filename)[0] + "-stack.txt"
port.maybe_make_directory(os.path.split(filename)[0])
with codecs.open(filename, "wb", "utf-8") as file:
- file.write(error)
- elif error:
- _log.debug("Previous test output stderr lines:\n%s" % error)
+ file.write(test_output.error)
+ elif test_output.error:
+ _log.debug("Previous test output stderr lines:\n%s" % test_output.error)
+
+ expected_test_output = _expected_test_output(port, test_input.filename)
# Check the output and save the results.
start_time = time.time()
time_for_diffs = {}
for test_type in test_types:
start_diff_time = time.time()
- new_failures = test_type.compare_output(port, test_info.filename,
- output, local_test_args,
- options.configuration)
+ new_failures = test_type.compare_output(port, test_input.filename,
+ test_args, test_output,
+ expected_test_output)
# Don't add any more failures if we already have a crash, so we don't
# double-report those tests. We do double-report for timeouts since
# we still want to see the text and image output.
- if not crash:
+ if not test_output.crash:
failures.extend(new_failures)
time_for_diffs[test_type.__class__.__name__] = (
time.time() - start_diff_time)
total_time_for_all_diffs = time.time() - start_diff_time
- return test_results.TestResult(test_info.filename, failures, test_run_time,
+ return test_results.TestResult(test_input.filename, failures, test_output.test_time,
total_time_for_all_diffs, time_for_diffs)
@@ -153,22 +157,36 @@ def _milliseconds_to_seconds(msecs):
return float(msecs) / 1000.0
-def _image_hash(test_info, test_args, options):
- """Returns the image hash of the test if it's needed, otherwise None."""
- if (test_args.new_baseline or test_args.reset_results or not options.pixel_tests):
- return None
- return test_info.image_hash()
+def _should_fetch_expected_checksum(options):
+ return options.pixel_tests and not (options.new_baseline or options.reset_results)
+
+
+def _run_single_test(port, options, test_input, test_types, test_args, driver):
+ # FIXME: Pull this into TestShellThread._run().
+
+ # The image hash is used to avoid doing an image dump if the
+ # checksums match, so it should be set to a blank value if we
+ # are generating a new baseline. (Otherwise, an image from a
+ # previous run will be copied into the baseline."""
+ if _should_fetch_expected_checksum(options):
+ image_hash_to_driver = port.expected_checksum(test_input.filename)
+ else:
+ image_hash_to_driver = None
+ uri = port.filename_to_uri(test_input.filename)
+ test_output = driver.run_test(uri, test_input.timeout, image_hash_to_driver)
+ return _process_output(port, options, test_input, test_types, test_args,
+ test_output)
class SingleTestThread(threading.Thread):
"""Thread wrapper for running a single test file."""
- def __init__(self, port, options, test_info, test_types, test_args):
+ def __init__(self, port, options, test_input, test_types, test_args):
"""
Args:
port: object implementing port-specific hooks
options: command line argument object from optparse
- test_info: Object containing the test filename, uri and timeout
+ test_input: Object containing the test filename and timeout
test_types: A list of TestType objects to run the test output
against.
test_args: A TestArguments object to pass to each TestType.
@@ -177,7 +195,7 @@ class SingleTestThread(threading.Thread):
threading.Thread.__init__(self)
self._port = port
self._options = options
- self._test_info = test_info
+ self._test_input = test_input
self._test_types = test_types
self._test_args = test_args
self._driver = None
@@ -188,20 +206,12 @@ class SingleTestThread(threading.Thread):
def _covered_run(self):
# FIXME: this is a separate routine to work around a bug
# in coverage: see http://bitbucket.org/ned/coveragepy/issue/85.
- test_info = self._test_info
self._driver = self._port.create_driver(self._test_args.png_path,
self._options)
self._driver.start()
- image_hash = _image_hash(test_info, self._test_args, self._options)
- start = time.time()
- crash, timeout, actual_checksum, output, error = \
- self._driver.run_test(test_info.uri.strip(), test_info.timeout,
- image_hash)
- end = time.time()
- self._test_result = _process_output(self._port, self._options,
- test_info, self._test_types, self._test_args,
- crash, timeout, end - start,
- actual_checksum, output, error)
+ self._test_result = _run_single_test(self._port, self._options,
+ self._test_input, self._test_types,
+ self._test_args, self._driver)
self._driver.stop()
def get_test_result(self):
@@ -258,7 +268,6 @@ class TestShellThread(WatchableThread):
test_types: A list of TestType objects to run the test output
against.
test_args: A TestArguments object to pass to each TestType.
-
"""
WatchableThread.__init__(self)
self._port = port
@@ -402,17 +411,17 @@ class TestShellThread(WatchableThread):
self._num_tests_in_current_group = len(self._filename_list)
self._current_group_start_time = time.time()
- test_info = self._filename_list.pop()
+ test_input = self._filename_list.pop()
# We have a url, run tests.
batch_count += 1
self._num_tests += 1
if self._options.run_singly:
- result = self._run_test_singly(test_info)
+ result = self._run_test_singly(test_input)
else:
- result = self._run_test(test_info)
+ result = self._run_test(test_input)
- filename = test_info.filename
+ filename = test_input.filename
tests_run_file.write(filename + "\n")
if result.failures:
# Check and kill DumpRenderTree if we need to.
@@ -440,7 +449,7 @@ class TestShellThread(WatchableThread):
if test_runner:
test_runner.update_summary(result_summary)
- def _run_test_singly(self, test_info):
+ def _run_test_singly(self, test_input):
"""Run a test in a separate thread, enforcing a hard time limit.
Since we can only detect the termination of a thread, not any internal
@@ -448,7 +457,7 @@ class TestShellThread(WatchableThread):
files singly.
Args:
- test_info: Object containing the test filename, uri and timeout
+ test_input: Object containing the test filename and timeout
Returns:
A TestResult
@@ -456,14 +465,14 @@ class TestShellThread(WatchableThread):
"""
worker = SingleTestThread(self._port,
self._options,
- test_info,
+ test_input,
self._test_types,
self._test_args)
worker.start()
thread_timeout = _milliseconds_to_seconds(
- _pad_timeout(int(test_info.timeout)))
+ _pad_timeout(int(test_input.timeout)))
thread._next_timeout = time.time() + thread_timeout
worker.join(thread_timeout)
if worker.isAlive():
@@ -485,43 +494,29 @@ class TestShellThread(WatchableThread):
# This gets raised if the worker thread has already exited.
failures = []
_log.error('Cannot get results of test: %s' %
- test_info.filename)
- result = test_results.TestResult(test_info.filename, failures=[],
+ test_input.filename)
+ result = test_results.TestResult(test_input.filename, failures=[],
test_run_time=0, total_time_for_all_diffs=0, time_for_diffs=0)
return result
- def _run_test(self, test_info):
+ def _run_test(self, test_input):
"""Run a single test file using a shared DumpRenderTree process.
Args:
- test_info: Object containing the test filename, uri and timeout
+ test_input: Object containing the test filename, uri and timeout
Returns: a TestResult object.
"""
self._ensure_dump_render_tree_is_running()
- # The pixel_hash is used to avoid doing an image dump if the
- # checksums match, so it should be set to a blank value if we
- # are generating a new baseline. (Otherwise, an image from a
- # previous run will be copied into the baseline.)
- image_hash = _image_hash(test_info, self._test_args, self._options)
- start = time.time()
-
thread_timeout = _milliseconds_to_seconds(
- _pad_timeout(int(test_info.timeout)))
- self._next_timeout = start + thread_timeout
-
- crash, timeout, actual_checksum, output, error = \
- self._driver.run_test(test_info.uri, test_info.timeout, image_hash)
- end = time.time()
-
- result = _process_output(self._port, self._options,
- test_info, self._test_types,
- self._test_args, crash,
- timeout, end - start, actual_checksum,
- output, error)
- self._test_results.append(result)
- return result
+ _pad_timeout(int(test_input.timeout)))
+ self._next_timeout = time.time() + thread_timeout
+ test_result = _run_single_test(self._port, self._options, test_input,
+ self._test_types, self._test_args,
+ self._driver)
+ self._test_results.append(test_result)
+ return test_result
def _ensure_dump_render_tree_is_running(self):
"""Start the shared DumpRenderTree, if it's not running.
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
index 1cf88ef..101d30b 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -56,7 +56,8 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_timings, expectations, result_summary, all_tests,
- generate_incremental_results=False, test_results_server=None):
+ generate_incremental_results=False, test_results_server=None,
+ test_type="", master_name=""):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
@@ -67,7 +68,8 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
super(JSONLayoutResultsGenerator, self).__init__(
builder_name, build_name, build_number, results_file_base_path,
builder_base_url, {}, port.test_repository_paths(),
- generate_incremental_results, test_results_server)
+ generate_incremental_results, test_results_server,
+ test_type, master_name)
self._port = port
self._expectations = expectations
@@ -117,7 +119,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
return set(self._failures.keys())
# override
- def _get_result_type_char(self, test_name):
+ def _get_modifier_char(self, test_name):
if test_name not in self._all_tests:
return self.NO_DATA_RESULT
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index 765b4d8..3267718 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -46,17 +46,35 @@ import webkitpy.thirdparty.simplejson as simplejson
_log = logging.getLogger("webkitpy.layout_tests.layout_package.json_results_generator")
-
class TestResult(object):
"""A simple class that represents a single test result."""
- def __init__(self, name, failed=False, skipped=False, elapsed_time=0):
+
+ # Test modifier constants.
+ (NONE, FAILS, FLAKY, DISABLED) = range(4)
+
+ def __init__(self, name, failed=False, elapsed_time=0):
self.name = name
self.failed = failed
- self.skipped = skipped
self.time = elapsed_time
+ test_name = name
+ try:
+ test_name = name.split('.')[1]
+ except IndexError:
+ _log.warn("Invalid test name: %s.", name)
+ pass
+
+ if test_name.startswith('FAILS_'):
+ self.modifier = self.FAILS
+ elif test_name.startswith('FLAKY_'):
+ self.modifier = self.FLAKY
+ elif test_name.startswith('DISABLED_'):
+ self.modifier = self.DISABLED
+ else:
+ self.modifier = self.NONE
+
def fixable(self):
- return self.failed or self.skipped
+ return self.failed or self.modifier == self.DISABLED
class JSONResultsGeneratorBase(object):
@@ -67,10 +85,20 @@ class JSONResultsGeneratorBase(object):
MIN_TIME = 1
JSON_PREFIX = "ADD_RESULTS("
JSON_SUFFIX = ");"
+
+ # Note that in non-chromium tests those chars are used to indicate
+ # test modifiers (FAILS, FLAKY, etc) but not actual test results.
PASS_RESULT = "P"
SKIP_RESULT = "X"
FAIL_RESULT = "F"
+ FLAKY_RESULT = "L"
NO_DATA_RESULT = "N"
+
+ MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
+ TestResult.DISABLED: SKIP_RESULT,
+ TestResult.FAILS: FAIL_RESULT,
+ TestResult.FLAKY: FLAKY_RESULT}
+
VERSION = 3
VERSION_KEY = "version"
RESULTS = "results"
@@ -94,7 +122,8 @@ class JSONResultsGeneratorBase(object):
test_results_map, svn_repositories=None,
generate_incremental_results=False,
test_results_server=None,
- test_type=""):
+ test_type="",
+ master_name=""):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
@@ -113,11 +142,14 @@ class JSONResultsGeneratorBase(object):
generate_incremental_results: If true, generate incremental json file
from current run results.
test_results_server: server that hosts test results json.
+ test_type: test type string (e.g. 'layout-tests').
+ master_name: the name of the buildbot master.
"""
self._builder_name = builder_name
self._build_name = build_name
self._build_number = build_number
self._builder_base_url = builder_base_url
+ self._results_directory = results_file_base_path
self._results_file_path = os.path.join(results_file_base_path,
self.RESULTS_FILENAME)
self._incremental_results_file_path = os.path.join(
@@ -133,6 +165,7 @@ class JSONResultsGeneratorBase(object):
self._test_results_server = test_results_server
self._test_type = test_type
+ self._master_name = master_name
self._json = None
self._archived_results = None
@@ -205,6 +238,36 @@ class JSONResultsGeneratorBase(object):
def set_archived_results(self, archived_results):
self._archived_results = archived_results
+ def upload_json_files(self, json_files):
+ """Uploads the given json_files to the test_results_server (if the
+ test_results_server is given)."""
+ if not self._test_results_server:
+ return
+
+ if not self._master_name:
+ _log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
+ return
+
+ _log.info("Uploading JSON files for builder: %s", self._builder_name)
+ attrs = [("builder", self._builder_name),
+ ("testtype", self._test_type),
+ ("master", self._master_name)]
+
+ files = [(file, os.path.join(self._results_directory, file))
+ for file in json_files]
+
+ uploader = test_results_uploader.TestResultsUploader(
+ self._test_results_server)
+ try:
+ # Set uploading timeout in case appengine server is having problem.
+ # 120 seconds are more than enough to upload test results.
+ uploader.upload(attrs, files, 120)
+ except Exception, err:
+ _log.error("Upload failed: %s" % err)
+ return
+
+ _log.info("JSON files uploaded.")
+
def _generate_json_file(self, json, file_path):
# Specify separators in order to get compact encoding.
json_data = simplejson.dumps(json, separators=(',', ':'))
@@ -226,19 +289,17 @@ class JSONResultsGeneratorBase(object):
"""Returns a set of failed test names."""
return set([r.name for r in self._test_results if r.failed])
- def _get_result_type_char(self, test_name):
+ def _get_modifier_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
- PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
+ PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
for the given test_name.
"""
if test_name not in self._test_results_map:
return JSONResultsGenerator.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
- if test_result.skipped:
- return JSONResultsGenerator.SKIP_RESULT
- if test_result.failed:
- return JSONResultsGenerator.FAIL_RESULT
+ if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
+ return self.MODIFIER_TO_CHAR[test_result.modifier]
return JSONResultsGenerator.PASS_RESULT
@@ -344,10 +405,10 @@ class JSONResultsGeneratorBase(object):
self._insert_item_into_raw_list(results_for_builder,
fixable_count, self.FIXABLE_COUNT)
- # Create a pass/skip/failure summary dictionary.
+ # Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
entry = {}
for test_name in self._test_results_map.iterkeys():
- result_char = self._get_result_type_char(test_name)
+ result_char = self._get_modifier_char(test_name)
entry[result_char] = entry.get(result_char, 0) + 1
# Insert the pass/skip/failure summary dictionary.
@@ -423,7 +484,7 @@ class JSONResultsGeneratorBase(object):
tests: Dictionary containing test result entries.
"""
- result = self._get_result_type_char(test_name)
+ result = self._get_modifier_char(test_name)
time = self._get_test_timing(test_name)
if test_name not in tests:
@@ -523,33 +584,10 @@ class JSONResultsGenerator(JSONResultsGeneratorBase):
# The flag is for backward compatibility.
output_json_in_init = True
- def _upload_json_files(self):
- if not self._test_results_server or not self._test_type:
- return
-
- _log.info("Uploading JSON files for %s to the server: %s",
- self._builder_name, self._test_results_server)
- attrs = [("builder", self._builder_name), ("testtype", self._test_type)]
- json_files = [self.INCREMENTAL_RESULTS_FILENAME]
-
- files = [(file, os.path.join(self._results_directory, file))
- for file in json_files]
- uploader = test_results_uploader.TestResultsUploader(
- self._test_results_server)
- try:
- # Set uploading timeout in case appengine server is having problem.
- # 120 seconds are more than enough to upload test results.
- uploader.upload(attrs, files, 120)
- except Exception, err:
- _log.error("Upload failed: %s" % err)
- return
-
- _log.info("JSON files uploaded.")
-
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_timings, failures, passed_tests, skipped_tests, all_tests,
- test_results_server=None, test_type=None):
+ test_results_server=None, test_type=None, master_name=None):
"""Generates a JSON results file.
Args
@@ -567,6 +605,7 @@ class JSONResultsGenerator(JSONResultsGeneratorBase):
include skipped tests.
test_results_server: server that hosts test results json.
test_type: the test type.
+ master_name: the name of the buildbot master.
"""
self._test_type = test_type
@@ -582,11 +621,9 @@ class JSONResultsGenerator(JSONResultsGeneratorBase):
test_result.failed = True
for test in skipped_tests:
test_results_map[test] = test_result = get(test, TestResult(test))
- test_result.skipped = True
for test in passed_tests:
test_results_map[test] = test_result = get(test, TestResult(test))
test_result.failed = False
- test_result.skipped = False
for test in all_tests:
if test not in test_results_map:
test_results_map[test] = TestResult(test)
@@ -599,8 +636,9 @@ class JSONResultsGenerator(JSONResultsGeneratorBase):
svn_repositories=port.test_repository_paths(),
generate_incremental_results=True,
test_results_server=test_results_server,
- test_type=test_type)
+ test_type=test_type,
+ master_name=master_name)
if self.__class__.output_json_in_init:
self.generate_json_output()
- self._upload_json_files()
+ self.upload_json_files([self.INCREMENTAL_RESULTS_FILENAME])
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
index 785cc1c..606a613 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
@@ -47,43 +47,68 @@ class JSONGeneratorTest(unittest.TestCase):
self.build_number = 'DUMMY_BUILDER_NUMBER'
self._json = None
self._num_runs = 0
- self._tests_list = set([])
+ self._tests_set = set([])
self._test_timings = {}
- self._failed_tests = {}
- self._passed_tests = set([])
- self._skipped_tests = set([])
-
- def _test_json_generation(self, passed_tests, failed_tests, skipped_tests):
- # Make sure we have sets (rather than lists).
- passed_tests = set(passed_tests)
- skipped_tests = set(skipped_tests)
- tests_list = passed_tests | set(failed_tests.keys())
+ self._failed_tests = set([])
+
+ self._PASS_tests = set([])
+ self._DISABLED_tests = set([])
+ self._FLAKY_tests = set([])
+ self._FAILS_tests = set([])
+
+ def _get_test_modifier(self, test_name):
+ if test_name.startswith('DISABLED_'):
+ return json_results_generator.JSONResultsGenerator.SKIP_RESULT
+ elif test_name.startswith('FLAKY_'):
+ return json_results_generator.JSONResultsGenerator.FLAKY_RESULT
+ elif test_name.startswith('FAILS_'):
+ return json_results_generator.JSONResultsGenerator.FAIL_RESULT
+ return json_results_generator.JSONResultsGenerator.PASS_RESULT
+
+ def _test_json_generation(self, passed_tests_list, failed_tests_list):
+ tests_set = set(passed_tests_list) | set(failed_tests_list)
+
+ DISABLED_tests = set([t for t in tests_set
+ if t.startswith('DISABLED_')])
+ FLAKY_tests = set([t for t in tests_set
+ if t.startswith('FLAKY_')])
+ FAILS_tests = set([t for t in tests_set
+ if t.startswith('FAILS_')])
+ PASS_tests = tests_set ^ (DISABLED_tests | FLAKY_tests | FAILS_tests)
+
+ passed_tests = set(passed_tests_list) ^ DISABLED_tests
+ failed_tests = set(failed_tests_list)
+
test_timings = {}
i = 0
- for test in tests_list:
+ for test in tests_set:
test_timings[test] = float(self._num_runs * 100 + i)
i += 1
- port_obj = port.get(None)
+ # For backward compatibility.
+ reason = test_expectations.TEXT
+ failed_tests_dict = dict([(name, reason) for name in failed_tests])
+ port_obj = port.get(None)
generator = json_results_generator.JSONResultsGenerator(port_obj,
self.builder_name, self.build_name, self.build_number,
'',
None, # don't fetch past json results archive
test_timings,
- failed_tests,
+ failed_tests_dict,
passed_tests,
- skipped_tests,
- tests_list)
+ (),
+ tests_set)
# Test incremental json results
incremental_json = generator.get_json(incremental=True)
self._verify_json_results(
- tests_list,
+ tests_set,
test_timings,
- passed_tests,
failed_tests,
- skipped_tests,
+ PASS_tests,
+ DISABLED_tests,
+ FLAKY_tests,
incremental_json,
1)
@@ -92,23 +117,25 @@ class JSONGeneratorTest(unittest.TestCase):
json = generator.get_json(incremental=False)
self._json = json
self._num_runs += 1
- self._tests_list |= tests_list
+ self._tests_set |= tests_set
self._test_timings.update(test_timings)
self._failed_tests.update(failed_tests)
- self._passed_tests |= passed_tests
- self._skipped_tests |= skipped_tests
+ self._PASS_tests |= PASS_tests
+ self._DISABLED_tests |= DISABLED_tests
+ self._FLAKY_tests |= FLAKY_tests
self._verify_json_results(
- self._tests_list,
+ self._tests_set,
self._test_timings,
- self._passed_tests,
self._failed_tests,
- self._skipped_tests,
+ self._PASS_tests,
+ self._DISABLED_tests,
+ self._FLAKY_tests,
self._json,
self._num_runs)
- def _verify_json_results(self, tests_list, test_timings,
- passed_tests, failed_tests,
- skipped_tests, json, num_runs):
+ def _verify_json_results(self, tests_set, test_timings, failed_tests,
+ PASS_tests, DISABLED_tests, FLAKY_tests,
+ json, num_runs):
# Aliasing to a short name for better access to its constants.
JRG = json_results_generator.JSONResultsGenerator
@@ -118,10 +145,10 @@ class JSONGeneratorTest(unittest.TestCase):
buildinfo = json[self.builder_name]
self.assertTrue(JRG.FIXABLE in buildinfo)
self.assertTrue(JRG.TESTS in buildinfo)
- self.assertTrue(len(buildinfo[JRG.BUILD_NUMBERS]) == num_runs)
- self.assertTrue(buildinfo[JRG.BUILD_NUMBERS][0] == self.build_number)
+ self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
+ self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
- if tests_list or skipped_tests:
+ if tests_set or DISABLED_tests:
fixable = {}
for fixable_items in buildinfo[JRG.FIXABLE]:
for (type, count) in fixable_items.iteritems():
@@ -130,52 +157,58 @@ class JSONGeneratorTest(unittest.TestCase):
else:
fixable[type] = count
- if passed_tests:
- self.assertTrue(fixable[JRG.PASS_RESULT] == len(passed_tests))
+ if PASS_tests:
+ self.assertEqual(fixable[JRG.PASS_RESULT], len(PASS_tests))
else:
self.assertTrue(JRG.PASS_RESULT not in fixable or
fixable[JRG.PASS_RESULT] == 0)
- if skipped_tests:
- self.assertTrue(fixable[JRG.SKIP_RESULT] == len(skipped_tests))
+ if DISABLED_tests:
+ self.assertEqual(fixable[JRG.SKIP_RESULT], len(DISABLED_tests))
else:
self.assertTrue(JRG.SKIP_RESULT not in fixable or
fixable[JRG.SKIP_RESULT] == 0)
+ if FLAKY_tests:
+ self.assertEqual(fixable[JRG.FLAKY_RESULT], len(FLAKY_tests))
+ else:
+ self.assertTrue(JRG.FLAKY_RESULT not in fixable or
+ fixable[JRG.FLAKY_RESULT] == 0)
if failed_tests:
tests = buildinfo[JRG.TESTS]
- for test_name, failure in failed_tests.iteritems():
+ for test_name in failed_tests:
self.assertTrue(test_name in tests)
test = tests[test_name]
failed = 0
+ modifier = self._get_test_modifier(test_name)
for result in test[JRG.RESULTS]:
- if result[1] == JRG.FAIL_RESULT:
+ if result[1] == modifier:
failed = result[0]
- self.assertTrue(failed == 1)
+ self.assertEqual(1, failed)
timing_count = 0
for timings in test[JRG.TIMES]:
if timings[1] == test_timings[test_name]:
timing_count = timings[0]
- self.assertTrue(timing_count == 1)
+ self.assertEqual(1, timing_count)
- fixable_count = len(skipped_tests) + len(failed_tests.keys())
- if skipped_tests or failed_tests:
- self.assertTrue(sum(buildinfo[JRG.FIXABLE_COUNT]) == fixable_count)
+ fixable_count = len(DISABLED_tests | failed_tests)
+ if DISABLED_tests or failed_tests:
+ self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
def test_json_generation(self):
- reason = test_expectations.TEXT
-
- self._test_json_generation([], {}, [])
- self._test_json_generation(['A1', 'B1'], {}, [])
- self._test_json_generation([], {'A2': reason, 'B2': reason}, [])
- self._test_json_generation([], {}, ['A3', 'B3'])
- self._test_json_generation(['A4'], {'B4': reason, 'C4': reason}, [])
+ self._test_json_generation([], [])
+ self._test_json_generation(['A1', 'B1'], [])
+ self._test_json_generation([], ['FAILS_A2', 'FAILS_B2'])
+ self._test_json_generation(['DISABLED_A3', 'DISABLED_B3'], [])
+ self._test_json_generation(['A4'], ['B4', 'FAILS_C4'])
+ self._test_json_generation(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
self._test_json_generation(
- [], {'A5': reason, 'B5': reason}, ['C5', 'D5'])
+ ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
+ ['FAILS_D6'])
self._test_json_generation(
- ['A6', 'B6', 'C6'], {'D6': reason}, ['E6', 'F6'])
-
+ ['A7', 'FLAKY_B7', 'DISABLED_C7'],
+ ['FAILS_D7', 'FLAKY_D8'])
if __name__ == '__main__':
unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
index 00ff211..fb9fe6d 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py
@@ -182,8 +182,8 @@ def _configure_logging(stream, verbose):
log_datefmt = '%y%m%d %H:%M:%S'
log_level = logging.INFO
if verbose:
- log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
- '%(message)s')
+ log_fmt = ('%(asctime)s %(process)d %(filename)s:%(lineno)-4d %(levelname)s'
+ '%(message)s')
log_level = logging.DEBUG
root = logging.getLogger()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py
new file mode 100644
index 0000000..e809be6
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+class TestOutput(object):
+ """Groups information about a test output for easy passing of data.
+
+ This is used not only for a actual test output, but also for grouping
+ expected test output.
+ """
+
+ def __init__(self, text, image, image_hash,
+ crash=None, test_time=None, timeout=None, error=None):
+ """Initializes a TestOutput object.
+
+ Args:
+ text: a text output
+ image: an image output
+ image_hash: a string containing the checksum of the image
+ crash: a boolean indicating whether the driver crashed on the test
+ test_time: a time which the test has taken
+ timeout: a boolean indicating whehter the test timed out
+ error: any unexpected or additional (or error) text output
+ """
+ self.text = text
+ self.image = image
+ self.image_hash = image_hash
+ self.crash = crash
+ self.test_time = test_time
+ self.timeout = timeout
+ self.error = error