diff options
Diffstat (limited to 'WebKitTools/Scripts/webkitpy/layout_tests')
74 files changed, 0 insertions, 16827 deletions
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/__init__.py deleted file mode 100644 index ef65bee..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Required for Python to search this directory for module files diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/deduplicate_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/deduplicate_tests.py deleted file mode 100644 index 51dcac8..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/deduplicate_tests.py +++ /dev/null @@ -1,231 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""deduplicate_tests -- lists duplicated between platforms. - -If platform/mac-leopard is missing an expected test output, we fall back on -platform/mac. This means it's possible to grow redundant test outputs, -where we have the same expected data in both a platform directory and another -platform it falls back on. -""" - -import collections -import fnmatch -import os -import subprocess -import sys -import re -import webkitpy.common.checkout.scm as scm -import webkitpy.common.system.executive as executive -import webkitpy.common.system.logutils as logutils -import webkitpy.common.system.ospath as ospath -import webkitpy.layout_tests.port.factory as port_factory - -_log = logutils.get_logger(__file__) - -_BASE_PLATFORM = 'base' - - -def port_fallbacks(): - """Get the port fallback information. - Returns: - A dictionary mapping platform name to a list of other platforms to fall - back on. All platforms fall back on 'base'. - """ - fallbacks = {_BASE_PLATFORM: []} - platform_dir = os.path.join(scm.find_checkout_root(), 'LayoutTests', - 'platform') - for port_name in os.listdir(platform_dir): - try: - platforms = port_factory.get(port_name).baseline_search_path() - except NotImplementedError: - _log.error("'%s' lacks baseline_search_path(), please fix." - % port_name) - fallbacks[port_name] = [_BASE_PLATFORM] - continue - fallbacks[port_name] = [os.path.basename(p) for p in platforms][1:] - fallbacks[port_name].append(_BASE_PLATFORM) - return fallbacks - - -def parse_git_output(git_output, glob_pattern): - """Parses the output of git ls-tree and filters based on glob_pattern. - Args: - git_output: result of git ls-tree -r HEAD LayoutTests. - glob_pattern: a pattern to filter the files. - Returns: - A dictionary mapping (test name, hash of content) => [paths] - """ - hashes = collections.defaultdict(set) - for line in git_output.split('\n'): - if not line: - break - attrs, path = line.strip().split('\t') - if not fnmatch.fnmatch(path, glob_pattern): - continue - path = path[len('LayoutTests/'):] - match = re.match(r'^(platform/.*?/)?(.*)', path) - test = match.group(2) - _, _, hash = attrs.split(' ') - hashes[(test, hash)].add(path) - return hashes - - -def cluster_file_hashes(glob_pattern): - """Get the hashes of all the test expectations in the tree. - We cheat and use git's hashes. - Args: - glob_pattern: a pattern to filter the files. - Returns: - A dictionary mapping (test name, hash of content) => [paths] - """ - - # A map of file hash => set of all files with that hash. - hashes = collections.defaultdict(set) - - # Fill in the map. - cmd = ('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests') - try: - git_output = executive.Executive().run_command(cmd, - cwd=scm.find_checkout_root()) - except OSError, e: - if e.errno == 2: # No such file or directory. - _log.error("Error: 'No such file' when running git.") - _log.error("This script requires git.") - sys.exit(1) - raise e - return parse_git_output(git_output, glob_pattern) - - -def extract_platforms(paths): - """Extracts the platforms from a list of paths matching ^platform/(.*?)/. - Args: - paths: a list of paths. - Returns: - A dictionary containing all platforms from paths. - """ - platforms = {} - for path in paths: - match = re.match(r'^platform/(.*?)/', path) - if match: - platform = match.group(1) - else: - platform = _BASE_PLATFORM - platforms[platform] = path - return platforms - - -def has_intermediate_results(test, fallbacks, matching_platform, - path_exists=os.path.exists): - """Returns True if there is a test result that causes us to not delete - this duplicate. - - For example, chromium-linux may be a duplicate of the checked in result, - but chromium-win may have a different result checked in. In this case, - we need to keep the duplicate results. - - Args: - test: The test name. - fallbacks: A list of platforms we fall back on. - matching_platform: The platform that we found the duplicate test - result. We can stop checking here. - path_exists: Optional parameter that allows us to stub out - os.path.exists for testing. - """ - for platform in fallbacks: - if platform == matching_platform: - return False - test_path = os.path.join('LayoutTests', 'platform', platform, test) - if path_exists(test_path): - return True - return False - - -def get_relative_test_path(filename, relative_to, - checkout_root=scm.find_checkout_root()): - """Constructs a relative path to |filename| from |relative_to|. - Args: - filename: The test file we're trying to get a relative path to. - relative_to: The absolute path we're relative to. - Returns: - A relative path to filename or None if |filename| is not below - |relative_to|. - """ - layout_test_dir = os.path.join(checkout_root, 'LayoutTests') - abs_path = os.path.join(layout_test_dir, filename) - return ospath.relpath(abs_path, relative_to) - - -def find_dups(hashes, port_fallbacks, relative_to): - """Yields info about redundant test expectations. - Args: - hashes: a list of hashes as returned by cluster_file_hashes. - port_fallbacks: a list of fallback information as returned by - get_port_fallbacks. - relative_to: the directory that we want the results relative to - Returns: - a tuple containing (test, platform, fallback, platforms) - """ - for (test, hash), cluster in hashes.items(): - if len(cluster) < 2: - continue # Common case: only one file with that hash. - - # Compute the list of platforms we have this particular hash for. - platforms = extract_platforms(cluster) - if len(platforms) == 1: - continue - - # See if any of the platforms are redundant with each other. - for platform in platforms.keys(): - for fallback in port_fallbacks[platform]: - if fallback not in platforms.keys(): - continue - # We have to verify that there isn't an intermediate result - # that causes this duplicate hash to exist. - if has_intermediate_results(test, port_fallbacks[platform], - fallback): - continue - # We print the relative path so it's easy to pipe the results - # to xargs rm. - path = get_relative_test_path(platforms[platform], relative_to) - if not path: - continue - yield { - 'test': test, - 'platform': platform, - 'fallback': fallback, - 'path': path, - } - - -def deduplicate(glob_pattern): - """Traverses LayoutTests and returns information about duplicated files. - Args: - glob pattern to filter the files in LayoutTests. - Returns: - a dictionary containing test, path, platform and fallback. - """ - fallbacks = port_fallbacks() - hashes = cluster_file_hashes(glob_pattern) - return list(find_dups(hashes, fallbacks, os.getcwd())) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/deduplicate_tests_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/deduplicate_tests_unittest.py deleted file mode 100644 index 309bf8d..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/deduplicate_tests_unittest.py +++ /dev/null @@ -1,210 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Unit tests for deduplicate_tests.py.""" - -import deduplicate_tests -import os -import unittest -import webkitpy.common.checkout.scm as scm - - -class MockExecutive(object): - last_run_command = [] - response = '' - - class Executive(object): - def run_command(self, - args, - cwd=None, - input=None, - error_handler=None, - return_exit_code=False, - return_stderr=True, - decode_output=True): - MockExecutive.last_run_command += [args] - return MockExecutive.response - - -class ListDuplicatesTest(unittest.TestCase): - def setUp(self): - MockExecutive.last_run_command = [] - MockExecutive.response = '' - deduplicate_tests.executive = MockExecutive - self._original_cwd = os.getcwd() - checkout_root = scm.find_checkout_root() - self.assertNotEqual(checkout_root, None) - os.chdir(checkout_root) - - def tearDown(self): - os.chdir(self._original_cwd) - - def test_parse_git_output(self): - git_output = ( - '100644 blob 5053240b3353f6eb39f7cb00259785f16d121df2\tLayoutTests/mac/foo-expected.txt\n' - '100644 blob a004548d107ecc4e1ea08019daf0a14e8634a1ff\tLayoutTests/platform/chromium/foo-expected.txt\n' - '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/foo-expected.txt\n' - '100644 blob abcdebc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/animage.png\n' - '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/foo-expected.txt\n' - '100644 blob abcdebc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/animage.png\n' - '100644 blob 4303df5389ca87cae83dd3236b8dd84e16606517\tLayoutTests/platform/mac/foo-expected.txt\n') - hashes = deduplicate_tests.parse_git_output(git_output, '*') - expected = {('mac/foo-expected.txt', '5053240b3353f6eb39f7cb00259785f16d121df2'): set(['mac/foo-expected.txt']), - ('animage.png', 'abcdebc762e3aec5df03b5c04485b2cb3b65ffb1'): set(['platform/chromium-linux/animage.png', 'platform/chromium-win/animage.png']), - ('foo-expected.txt', '4303df5389ca87cae83dd3236b8dd84e16606517'): set(['platform/mac/foo-expected.txt']), - ('foo-expected.txt', 'd6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1'): set(['platform/chromium-linux/foo-expected.txt', 'platform/chromium-win/foo-expected.txt']), - ('foo-expected.txt', 'a004548d107ecc4e1ea08019daf0a14e8634a1ff'): set(['platform/chromium/foo-expected.txt'])} - self.assertEquals(expected, hashes) - - hashes = deduplicate_tests.parse_git_output(git_output, '*.png') - expected = {('animage.png', 'abcdebc762e3aec5df03b5c04485b2cb3b65ffb1'): set(['platform/chromium-linux/animage.png', 'platform/chromium-win/animage.png'])} - self.assertEquals(expected, hashes) - - def test_extract_platforms(self): - self.assertEquals({'foo': 'platform/foo/bar', - 'zoo': 'platform/zoo/com'}, - deduplicate_tests.extract_platforms(['platform/foo/bar', 'platform/zoo/com'])) - self.assertEquals({'foo': 'platform/foo/bar', - deduplicate_tests._BASE_PLATFORM: 'what/'}, - deduplicate_tests.extract_platforms(['platform/foo/bar', 'what/'])) - - def test_has_intermediate_results(self): - test_cases = ( - # If we found a duplicate in our first fallback, we have no - # intermediate results. - (False, ('fast/foo-expected.txt', - ['chromium-win', 'chromium', 'base'], - 'chromium-win', - lambda path: True)), - # Since chromium-win has a result, we have an intermediate result. - (True, ('fast/foo-expected.txt', - ['chromium-win', 'chromium', 'base'], - 'chromium', - lambda path: True)), - # There are no intermediate results. - (False, ('fast/foo-expected.txt', - ['chromium-win', 'chromium', 'base'], - 'chromium', - lambda path: False)), - # There are no intermediate results since a result for chromium is - # our duplicate file. - (False, ('fast/foo-expected.txt', - ['chromium-win', 'chromium', 'base'], - 'chromium', - lambda path: path == 'LayoutTests/platform/chromium/fast/foo-expected.txt')), - # We have an intermediate result in 'chromium' even though our - # duplicate is with the file in 'base'. - (True, ('fast/foo-expected.txt', - ['chromium-win', 'chromium', 'base'], - 'base', - lambda path: path == 'LayoutTests/platform/chromium/fast/foo-expected.txt')), - # We have an intermediate result in 'chromium-win' even though our - # duplicate is in 'base'. - (True, ('fast/foo-expected.txt', - ['chromium-win', 'chromium', 'base'], - 'base', - lambda path: path == 'LayoutTests/platform/chromium-win/fast/foo-expected.txt')), - ) - for expected, inputs in test_cases: - self.assertEquals(expected, - deduplicate_tests.has_intermediate_results(*inputs)) - - def test_unique(self): - MockExecutive.response = ( - '100644 blob 5053240b3353f6eb39f7cb00259785f16d121df2\tLayoutTests/mac/foo-expected.txt\n' - '100644 blob a004548d107ecc4e1ea08019daf0a14e8634a1ff\tLayoutTests/platform/chromium/foo-expected.txt\n' - '100644 blob abcd0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/foo-expected.txt\n' - '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/foo-expected.txt\n' - '100644 blob 4303df5389ca87cae83dd3236b8dd84e16606517\tLayoutTests/platform/mac/foo-expected.txt\n') - result = deduplicate_tests.deduplicate('*') - self.assertEquals(1, len(MockExecutive.last_run_command)) - self.assertEquals(('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests'), MockExecutive.last_run_command[-1]) - self.assertEquals(0, len(result)) - - def test_duplicates(self): - MockExecutive.response = ( - '100644 blob 5053240b3353f6eb39f7cb00259785f16d121df2\tLayoutTests/mac/foo-expected.txt\n' - '100644 blob a004548d107ecc4e1ea08019daf0a14e8634a1ff\tLayoutTests/platform/chromium/foo-expected.txt\n' - '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/foo-expected.txt\n' - '100644 blob abcdebc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/animage.png\n' - '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/foo-expected.txt\n' - '100644 blob abcdebc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/animage.png\n' - '100644 blob 4303df5389ca87cae83dd3236b8dd84e16606517\tLayoutTests/platform/mac/foo-expected.txt\n') - - result = deduplicate_tests.deduplicate('*') - self.assertEquals(1, len(MockExecutive.last_run_command)) - self.assertEquals(('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests'), MockExecutive.last_run_command[-1]) - self.assertEquals(2, len(result)) - self.assertEquals({'test': 'animage.png', - 'path': 'LayoutTests/platform/chromium-linux/animage.png', - 'fallback': 'chromium-win', - 'platform': 'chromium-linux'}, - result[0]) - self.assertEquals({'test': 'foo-expected.txt', - 'path': 'LayoutTests/platform/chromium-linux/foo-expected.txt', - 'fallback': 'chromium-win', - 'platform': 'chromium-linux'}, - result[1]) - - result = deduplicate_tests.deduplicate('*.txt') - self.assertEquals(2, len(MockExecutive.last_run_command)) - self.assertEquals(('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests'), MockExecutive.last_run_command[-1]) - self.assertEquals(1, len(result)) - self.assertEquals({'test': 'foo-expected.txt', - 'path': 'LayoutTests/platform/chromium-linux/foo-expected.txt', - 'fallback': 'chromium-win', - 'platform': 'chromium-linux'}, - result[0]) - - result = deduplicate_tests.deduplicate('*.png') - self.assertEquals(3, len(MockExecutive.last_run_command)) - self.assertEquals(('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests'), MockExecutive.last_run_command[-1]) - self.assertEquals(1, len(result)) - self.assertEquals({'test': 'animage.png', - 'path': 'LayoutTests/platform/chromium-linux/animage.png', - 'fallback': 'chromium-win', - 'platform': 'chromium-linux'}, - result[0]) - - def test_get_relative_test_path(self): - checkout_root = scm.find_checkout_root() - layout_test_dir = os.path.join(checkout_root, 'LayoutTests') - test_cases = ( - ('platform/mac/test.html', - ('platform/mac/test.html', layout_test_dir)), - ('LayoutTests/platform/mac/test.html', - ('platform/mac/test.html', checkout_root)), - (None, - ('platform/mac/test.html', os.path.join(checkout_root, 'WebCore'))), - ('test.html', - ('platform/mac/test.html', os.path.join(layout_test_dir, 'platform/mac'))), - (None, - ('platform/mac/test.html', os.path.join(layout_test_dir, 'platform/win'))), - ) - for expected, inputs in test_cases: - self.assertEquals(expected, - deduplicate_tests.get_relative_test_path(*inputs)) - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py deleted file mode 100644 index e69de29..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py +++ /dev/null diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py deleted file mode 100644 index fdb8da6..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py +++ /dev/null @@ -1,569 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""A Thread object for running DumpRenderTree and processing URLs from a -shared queue. - -Each thread runs a separate instance of the DumpRenderTree binary and validates -the output. When there are no more URLs to process in the shared queue, the -thread exits. -""" - -from __future__ import with_statement - -import codecs -import copy -import logging -import os -import Queue -import signal -import sys -import thread -import threading -import time - - -from webkitpy.layout_tests.test_types import image_diff -from webkitpy.layout_tests.test_types import test_type_base -from webkitpy.layout_tests.test_types import text_diff - -import test_failures -import test_output -import test_results - -_log = logging.getLogger("webkitpy.layout_tests.layout_package." - "dump_render_tree_thread") - - -def _expected_test_output(port, filename): - """Returns an expected TestOutput object.""" - return test_output.TestOutput(port.expected_text(filename), - port.expected_image(filename), - port.expected_checksum(filename)) - -def _process_output(port, options, test_input, test_types, test_args, - test_output, worker_name): - """Receives the output from a DumpRenderTree process, subjects it to a - number of tests, and returns a list of failure types the test produced. - - Args: - port: port-specific hooks - options: command line options argument from optparse - proc: an active DumpRenderTree process - test_input: Object containing the test filename and timeout - test_types: list of test types to subject the output to - test_args: arguments to be passed to each test - test_output: a TestOutput object containing the output of the test - worker_name: worker name for logging - - Returns: a TestResult object - """ - failures = [] - - if test_output.crash: - failures.append(test_failures.FailureCrash()) - if test_output.timeout: - failures.append(test_failures.FailureTimeout()) - - test_name = port.relative_test_filename(test_input.filename) - if test_output.crash: - _log.debug("%s Stacktrace for %s:\n%s" % (worker_name, test_name, - test_output.error)) - filename = os.path.join(options.results_directory, test_name) - filename = os.path.splitext(filename)[0] + "-stack.txt" - port.maybe_make_directory(os.path.split(filename)[0]) - with codecs.open(filename, "wb", "utf-8") as file: - file.write(test_output.error) - elif test_output.error: - _log.debug("%s %s output stderr lines:\n%s" % (worker_name, test_name, - test_output.error)) - - expected_test_output = _expected_test_output(port, test_input.filename) - - # Check the output and save the results. - start_time = time.time() - time_for_diffs = {} - for test_type in test_types: - start_diff_time = time.time() - new_failures = test_type.compare_output(port, test_input.filename, - test_args, test_output, - expected_test_output) - # Don't add any more failures if we already have a crash, so we don't - # double-report those tests. We do double-report for timeouts since - # we still want to see the text and image output. - if not test_output.crash: - failures.extend(new_failures) - time_for_diffs[test_type.__class__.__name__] = ( - time.time() - start_diff_time) - - total_time_for_all_diffs = time.time() - start_diff_time - return test_results.TestResult(test_input.filename, failures, test_output.test_time, - total_time_for_all_diffs, time_for_diffs) - - -def _pad_timeout(timeout): - """Returns a safe multiple of the per-test timeout value to use - to detect hung test threads. - - """ - # When we're running one test per DumpRenderTree process, we can - # enforce a hard timeout. The DumpRenderTree watchdog uses 2.5x - # the timeout; we want to be larger than that. - return timeout * 3 - - -def _milliseconds_to_seconds(msecs): - return float(msecs) / 1000.0 - - -def _should_fetch_expected_checksum(options): - return options.pixel_tests and not (options.new_baseline or options.reset_results) - - -def _run_single_test(port, options, test_input, test_types, test_args, driver, worker_name): - # FIXME: Pull this into TestShellThread._run(). - - # The image hash is used to avoid doing an image dump if the - # checksums match, so it should be set to a blank value if we - # are generating a new baseline. (Otherwise, an image from a - # previous run will be copied into the baseline.""" - if _should_fetch_expected_checksum(options): - test_input.image_hash = port.expected_checksum(test_input.filename) - test_output = driver.run_test(test_input) - return _process_output(port, options, test_input, test_types, test_args, - test_output, worker_name) - - -class SingleTestThread(threading.Thread): - """Thread wrapper for running a single test file.""" - - def __init__(self, port, options, worker_number, worker_name, - test_input, test_types, test_args): - """ - Args: - port: object implementing port-specific hooks - options: command line argument object from optparse - worker_number: worker number for tests - worker_name: for logging - test_input: Object containing the test filename and timeout - test_types: A list of TestType objects to run the test output - against. - test_args: A TestArguments object to pass to each TestType. - """ - - threading.Thread.__init__(self) - self._port = port - self._options = options - self._test_input = test_input - self._test_types = test_types - self._test_args = test_args - self._driver = None - self._worker_number = worker_number - self._name = worker_name - - def run(self): - self._covered_run() - - def _covered_run(self): - # FIXME: this is a separate routine to work around a bug - # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85. - self._driver = self._port.create_driver(self._worker_number) - self._driver.start() - self._test_result = _run_single_test(self._port, self._options, - self._test_input, self._test_types, - self._test_args, self._driver, - self._name) - self._driver.stop() - - def get_test_result(self): - return self._test_result - - -class WatchableThread(threading.Thread): - """This class abstracts an interface used by - run_webkit_tests.TestRunner._wait_for_threads_to_finish for thread - management.""" - def __init__(self): - threading.Thread.__init__(self) - self._canceled = False - self._exception_info = None - self._next_timeout = None - self._thread_id = None - - def cancel(self): - """Set a flag telling this thread to quit.""" - self._canceled = True - - def clear_next_timeout(self): - """Mark a flag telling this thread to stop setting timeouts.""" - self._timeout = 0 - - def exception_info(self): - """If run() terminated on an uncaught exception, return it here - ((type, value, traceback) tuple). - Returns None if run() terminated normally. Meant to be called after - joining this thread.""" - return self._exception_info - - def id(self): - """Return a thread identifier.""" - return self._thread_id - - def next_timeout(self): - """Return the time the test is supposed to finish by.""" - return self._next_timeout - - -class TestShellThread(WatchableThread): - def __init__(self, port, options, worker_number, worker_name, - filename_list_queue, result_queue): - """Initialize all the local state for this DumpRenderTree thread. - - Args: - port: interface to port-specific hooks - options: command line options argument from optparse - worker_number: identifier for a particular worker thread. - worker_name: for logging. - filename_list_queue: A thread safe Queue class that contains lists - of tuples of (filename, uri) pairs. - result_queue: A thread safe Queue class that will contain - serialized TestResult objects. - """ - WatchableThread.__init__(self) - self._port = port - self._options = options - self._worker_number = worker_number - self._name = worker_name - self._filename_list_queue = filename_list_queue - self._result_queue = result_queue - self._filename_list = [] - self._driver = None - self._test_group_timing_stats = {} - self._test_results = [] - self._num_tests = 0 - self._start_time = 0 - self._stop_time = 0 - self._have_http_lock = False - self._http_lock_wait_begin = 0 - self._http_lock_wait_end = 0 - - self._test_types = [] - for cls in self._get_test_type_classes(): - self._test_types.append(cls(self._port, - self._options.results_directory)) - self._test_args = self._get_test_args(worker_number) - - # Current group of tests we're running. - self._current_group = None - # Number of tests in self._current_group. - self._num_tests_in_current_group = None - # Time at which we started running tests from self._current_group. - self._current_group_start_time = None - - def _get_test_args(self, worker_number): - """Returns the tuple of arguments for tests and for DumpRenderTree.""" - test_args = test_type_base.TestArguments() - test_args.new_baseline = self._options.new_baseline - test_args.reset_results = self._options.reset_results - - return test_args - - def _get_test_type_classes(self): - classes = [text_diff.TestTextDiff] - if self._options.pixel_tests: - classes.append(image_diff.ImageDiff) - return classes - - def get_test_group_timing_stats(self): - """Returns a dictionary mapping test group to a tuple of - (number of tests in that group, time to run the tests)""" - return self._test_group_timing_stats - - def get_test_results(self): - """Return the list of all tests run on this thread. - - This is used to calculate per-thread statistics. - - """ - return self._test_results - - def get_total_time(self): - return max(self._stop_time - self._start_time - - self._http_lock_wait_time(), 0.0) - - def get_num_tests(self): - return self._num_tests - - def run(self): - """Delegate main work to a helper method and watch for uncaught - exceptions.""" - self._covered_run() - - def _covered_run(self): - # FIXME: this is a separate routine to work around a bug - # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85. - self._thread_id = thread.get_ident() - self._start_time = time.time() - self._num_tests = 0 - try: - _log.debug('%s starting' % (self.getName())) - self._run(test_runner=None, result_summary=None) - _log.debug('%s done (%d tests)' % (self.getName(), - self.get_num_tests())) - except KeyboardInterrupt: - self._exception_info = sys.exc_info() - _log.debug("%s interrupted" % self.getName()) - except: - # Save the exception for our caller to see. - self._exception_info = sys.exc_info() - self._stop_time = time.time() - _log.error('%s dying, exception raised' % self.getName()) - - self._stop_time = time.time() - - def run_in_main_thread(self, test_runner, result_summary): - """This hook allows us to run the tests from the main thread if - --num-test-shells==1, instead of having to always run two or more - threads. This allows us to debug the test harness without having to - do multi-threaded debugging.""" - self._run(test_runner, result_summary) - - def cancel(self): - """Clean up http lock and set a flag telling this thread to quit.""" - self._stop_servers_with_lock() - WatchableThread.cancel(self) - - def next_timeout(self): - """Return the time the test is supposed to finish by.""" - if self._next_timeout: - return self._next_timeout + self._http_lock_wait_time() - return self._next_timeout - - def _http_lock_wait_time(self): - """Return the time what http locking takes.""" - if self._http_lock_wait_begin == 0: - return 0 - if self._http_lock_wait_end == 0: - return time.time() - self._http_lock_wait_begin - return self._http_lock_wait_end - self._http_lock_wait_begin - - def _run(self, test_runner, result_summary): - """Main work entry point of the thread. Basically we pull urls from the - filename queue and run the tests until we run out of urls. - - If test_runner is not None, then we call test_runner.UpdateSummary() - with the results of each test.""" - batch_size = self._options.batch_size - batch_count = 0 - - # Append tests we're running to the existing tests_run.txt file. - # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput. - tests_run_filename = os.path.join(self._options.results_directory, - "tests_run.txt") - tests_run_file = codecs.open(tests_run_filename, "a", "utf-8") - - while True: - if self._canceled: - _log.debug('Testing cancelled') - tests_run_file.close() - return - - if len(self._filename_list) is 0: - if self._current_group is not None: - self._test_group_timing_stats[self._current_group] = \ - (self._num_tests_in_current_group, - time.time() - self._current_group_start_time) - - try: - self._current_group, self._filename_list = \ - self._filename_list_queue.get_nowait() - except Queue.Empty: - self._stop_servers_with_lock() - self._kill_dump_render_tree() - tests_run_file.close() - return - - if self._current_group == "tests_to_http_lock": - self._start_servers_with_lock() - elif self._have_http_lock: - self._stop_servers_with_lock() - - self._num_tests_in_current_group = len(self._filename_list) - self._current_group_start_time = time.time() - - test_input = self._filename_list.pop() - - # We have a url, run tests. - batch_count += 1 - self._num_tests += 1 - if self._options.run_singly: - result = self._run_test_in_another_thread(test_input) - else: - result = self._run_test_in_this_thread(test_input) - - filename = test_input.filename - tests_run_file.write(filename + "\n") - if result.failures: - # Check and kill DumpRenderTree if we need to. - if len([1 for f in result.failures - if f.should_kill_dump_render_tree()]): - self._kill_dump_render_tree() - # Reset the batch count since the shell just bounced. - batch_count = 0 - # Print the error message(s). - error_str = '\n'.join([' ' + f.message() for - f in result.failures]) - _log.debug("%s %s failed:\n%s" % (self.getName(), - self._port.relative_test_filename(filename), - error_str)) - else: - _log.debug("%s %s passed" % (self.getName(), - self._port.relative_test_filename(filename))) - self._result_queue.put(result.dumps()) - - if batch_size > 0 and batch_count >= batch_size: - # Bounce the shell and reset count. - self._kill_dump_render_tree() - batch_count = 0 - - if test_runner: - test_runner.update_summary(result_summary) - - def _run_test_in_another_thread(self, test_input): - """Run a test in a separate thread, enforcing a hard time limit. - - Since we can only detect the termination of a thread, not any internal - state or progress, we can only run per-test timeouts when running test - files singly. - - Args: - test_input: Object containing the test filename and timeout - - Returns: - A TestResult - """ - worker = SingleTestThread(self._port, - self._options, - self._worker_number, - self._name, - test_input, - self._test_types, - self._test_args) - - worker.start() - - thread_timeout = _milliseconds_to_seconds( - _pad_timeout(int(test_input.timeout))) - thread._next_timeout = time.time() + thread_timeout - worker.join(thread_timeout) - if worker.isAlive(): - # If join() returned with the thread still running, the - # DumpRenderTree is completely hung and there's nothing - # more we can do with it. We have to kill all the - # DumpRenderTrees to free it up. If we're running more than - # one DumpRenderTree thread, we'll end up killing the other - # DumpRenderTrees too, introducing spurious crashes. We accept - # that tradeoff in order to avoid losing the rest of this - # thread's results. - _log.error('Test thread hung: killing all DumpRenderTrees') - if worker._driver: - worker._driver.stop() - - try: - result = worker.get_test_result() - except AttributeError, e: - # This gets raised if the worker thread has already exited. - failures = [] - _log.error('Cannot get results of test: %s' % - test_input.filename) - result = test_results.TestResult(test_input.filename, failures=[], - test_run_time=0, total_time_for_all_diffs=0, time_for_diffs={}) - - return result - - def _run_test_in_this_thread(self, test_input): - """Run a single test file using a shared DumpRenderTree process. - - Args: - test_input: Object containing the test filename, uri and timeout - - Returns: a TestResult object. - """ - self._ensure_dump_render_tree_is_running() - thread_timeout = _milliseconds_to_seconds( - _pad_timeout(int(test_input.timeout))) - self._next_timeout = time.time() + thread_timeout - test_result = _run_single_test(self._port, self._options, test_input, - self._test_types, self._test_args, - self._driver, self._name) - self._test_results.append(test_result) - return test_result - - def _ensure_dump_render_tree_is_running(self): - """Start the shared DumpRenderTree, if it's not running. - - This is not for use when running tests singly, since those each start - a separate DumpRenderTree in their own thread. - - """ - # poll() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - if not self._driver or self._driver.poll() is not None: - self._driver = self._port.create_driver(self._worker_number) - self._driver.start() - - def _start_servers_with_lock(self): - """Acquire http lock and start the servers.""" - self._http_lock_wait_begin = time.time() - _log.debug('Acquire http lock ...') - self._port.acquire_http_lock() - _log.debug('Starting HTTP server ...') - self._port.start_http_server() - _log.debug('Starting WebSocket server ...') - self._port.start_websocket_server() - self._http_lock_wait_end = time.time() - self._have_http_lock = True - - def _stop_servers_with_lock(self): - """Stop the servers and release http lock.""" - if self._have_http_lock: - _log.debug('Stopping HTTP server ...') - self._port.stop_http_server() - _log.debug('Stopping WebSocket server ...') - self._port.stop_websocket_server() - _log.debug('Release http lock ...') - self._port.release_http_lock() - self._have_http_lock = False - - def _kill_dump_render_tree(self): - """Kill the DumpRenderTree process if it's running.""" - if self._driver: - self._driver.stop() - self._driver = None diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py deleted file mode 100644 index b054c5b..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import logging -import os - -from webkitpy.layout_tests.layout_package import json_results_generator -from webkitpy.layout_tests.layout_package import test_expectations -from webkitpy.layout_tests.layout_package import test_failures -import webkitpy.thirdparty.simplejson as simplejson - - -class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase): - """A JSON results generator for layout tests.""" - - LAYOUT_TESTS_PATH = "LayoutTests" - - # Additional JSON fields. - WONTFIX = "wontfixCounts" - - # Note that we omit test_expectations.FAIL from this list because - # it should never show up (it's a legacy input expectation, never - # an output expectation). - FAILURE_TO_CHAR = {test_expectations.CRASH: "C", - test_expectations.TIMEOUT: "T", - test_expectations.IMAGE: "I", - test_expectations.TEXT: "F", - test_expectations.MISSING: "O", - test_expectations.IMAGE_PLUS_TEXT: "Z"} - - def __init__(self, port, builder_name, build_name, build_number, - results_file_base_path, builder_base_url, - test_timings, expectations, result_summary, all_tests, - generate_incremental_results=False, test_results_server=None, - test_type="", master_name=""): - """Modifies the results.json file. Grabs it off the archive directory - if it is not found locally. - - Args: - result_summary: ResultsSummary object storing the summary of the test - results. - """ - super(JSONLayoutResultsGenerator, self).__init__( - builder_name, build_name, build_number, results_file_base_path, - builder_base_url, {}, port.test_repository_paths(), - generate_incremental_results, test_results_server, - test_type, master_name) - - self._port = port - self._expectations = expectations - - # We want relative paths to LayoutTest root for JSON output. - path_to_name = self._get_path_relative_to_layout_test_root - self._result_summary = result_summary - self._failures = dict( - (path_to_name(test), test_failures.determine_result_type(failures)) - for (test, failures) in result_summary.failures.iteritems()) - self._all_tests = [path_to_name(test) for test in all_tests] - self._test_timings = dict( - (path_to_name(test_tuple.filename), test_tuple.test_run_time) - for test_tuple in test_timings) - - self.generate_json_output() - - def _get_path_relative_to_layout_test_root(self, test): - """Returns the path of the test relative to the layout test root. - For example, for: - src/third_party/WebKit/LayoutTests/fast/forms/foo.html - We would return - fast/forms/foo.html - """ - index = test.find(self.LAYOUT_TESTS_PATH) - if index is not -1: - index += len(self.LAYOUT_TESTS_PATH) - - if index is -1: - # Already a relative path. - relativePath = test - else: - relativePath = test[index + 1:] - - # Make sure all paths are unix-style. - return relativePath.replace('\\', '/') - - # override - def _get_test_timing(self, test_name): - if test_name in self._test_timings: - # Floor for now to get time in seconds. - return int(self._test_timings[test_name]) - return 0 - - # override - def _get_failed_test_names(self): - return set(self._failures.keys()) - - # override - def _get_modifier_char(self, test_name): - if test_name not in self._all_tests: - return self.NO_DATA_RESULT - - if test_name in self._failures: - return self.FAILURE_TO_CHAR[self._failures[test_name]] - - return self.PASS_RESULT - - # override - def _get_result_char(self, test_name): - return self._get_modifier_char(test_name) - - # override - def _convert_json_to_current_version(self, results_json): - archive_version = None - if self.VERSION_KEY in results_json: - archive_version = results_json[self.VERSION_KEY] - - super(JSONLayoutResultsGenerator, - self)._convert_json_to_current_version(results_json) - - # version 2->3 - if archive_version == 2: - for results_for_builder in results_json.itervalues(): - try: - test_results = results_for_builder[self.TESTS] - except: - continue - - for test in test_results: - # Make sure all paths are relative - test_path = self._get_path_relative_to_layout_test_root(test) - if test_path != test: - test_results[test_path] = test_results[test] - del test_results[test] - - # override - def _insert_failure_summaries(self, results_for_builder): - summary = self._result_summary - - self._insert_item_into_raw_list(results_for_builder, - len((set(summary.failures.keys()) | - summary.tests_by_expectation[test_expectations.SKIP]) & - summary.tests_by_timeline[test_expectations.NOW]), - self.FIXABLE_COUNT) - self._insert_item_into_raw_list(results_for_builder, - self._get_failure_summary_entry(test_expectations.NOW), - self.FIXABLE) - self._insert_item_into_raw_list(results_for_builder, - len(self._expectations.get_tests_with_timeline( - test_expectations.NOW)), self.ALL_FIXABLE_COUNT) - self._insert_item_into_raw_list(results_for_builder, - self._get_failure_summary_entry(test_expectations.WONTFIX), - self.WONTFIX) - - # override - def _normalize_results_json(self, test, test_name, tests): - super(JSONLayoutResultsGenerator, self)._normalize_results_json( - test, test_name, tests) - - # Remove tests that don't exist anymore. - full_path = os.path.join(self._port.layout_tests_dir(), test_name) - full_path = os.path.normpath(full_path) - if not os.path.exists(full_path): - del tests[test_name] - - def _get_failure_summary_entry(self, timeline): - """Creates a summary object to insert into the JSON. - - Args: - summary ResultSummary object with test results - timeline current test_expectations timeline to build entry for - (e.g., test_expectations.NOW, etc.) - """ - entry = {} - summary = self._result_summary - timeline_tests = summary.tests_by_timeline[timeline] - entry[self.SKIP_RESULT] = len( - summary.tests_by_expectation[test_expectations.SKIP] & - timeline_tests) - entry[self.PASS_RESULT] = len( - summary.tests_by_expectation[test_expectations.PASS] & - timeline_tests) - for failure_type in summary.tests_by_expectation.keys(): - if failure_type not in self.FAILURE_TO_CHAR: - continue - count = len(summary.tests_by_expectation[failure_type] & - timeline_tests) - entry[self.FAILURE_TO_CHAR[failure_type]] = count - return entry diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py deleted file mode 100644 index 331e330..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py +++ /dev/null @@ -1,661 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from __future__ import with_statement - -import codecs -import logging -import os -import subprocess -import sys -import time -import urllib2 -import xml.dom.minidom - -from webkitpy.layout_tests.layout_package import test_results_uploader - -import webkitpy.thirdparty.simplejson as simplejson - -# A JSON results generator for generic tests. -# FIXME: move this code out of the layout_package directory. - -_log = logging.getLogger("webkitpy.layout_tests.layout_package.json_results_generator") - -class TestResult(object): - """A simple class that represents a single test result.""" - - # Test modifier constants. - (NONE, FAILS, FLAKY, DISABLED) = range(4) - - def __init__(self, name, failed=False, elapsed_time=0): - self.name = name - self.failed = failed - self.time = elapsed_time - - test_name = name - try: - test_name = name.split('.')[1] - except IndexError: - _log.warn("Invalid test name: %s.", name) - pass - - if test_name.startswith('FAILS_'): - self.modifier = self.FAILS - elif test_name.startswith('FLAKY_'): - self.modifier = self.FLAKY - elif test_name.startswith('DISABLED_'): - self.modifier = self.DISABLED - else: - self.modifier = self.NONE - - def fixable(self): - return self.failed or self.modifier == self.DISABLED - - -class JSONResultsGeneratorBase(object): - """A JSON results generator for generic tests.""" - - MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750 - # Min time (seconds) that will be added to the JSON. - MIN_TIME = 1 - JSON_PREFIX = "ADD_RESULTS(" - JSON_SUFFIX = ");" - - # Note that in non-chromium tests those chars are used to indicate - # test modifiers (FAILS, FLAKY, etc) but not actual test results. - PASS_RESULT = "P" - SKIP_RESULT = "X" - FAIL_RESULT = "F" - FLAKY_RESULT = "L" - NO_DATA_RESULT = "N" - - MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT, - TestResult.DISABLED: SKIP_RESULT, - TestResult.FAILS: FAIL_RESULT, - TestResult.FLAKY: FLAKY_RESULT} - - VERSION = 3 - VERSION_KEY = "version" - RESULTS = "results" - TIMES = "times" - BUILD_NUMBERS = "buildNumbers" - TIME = "secondsSinceEpoch" - TESTS = "tests" - - FIXABLE_COUNT = "fixableCount" - FIXABLE = "fixableCounts" - ALL_FIXABLE_COUNT = "allFixableCount" - - RESULTS_FILENAME = "results.json" - INCREMENTAL_RESULTS_FILENAME = "incremental_results.json" - - URL_FOR_TEST_LIST_JSON = \ - "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s" - - def __init__(self, builder_name, build_name, build_number, - results_file_base_path, builder_base_url, - test_results_map, svn_repositories=None, - generate_incremental_results=False, - test_results_server=None, - test_type="", - master_name=""): - """Modifies the results.json file. Grabs it off the archive directory - if it is not found locally. - - Args - builder_name: the builder name (e.g. Webkit). - build_name: the build name (e.g. webkit-rel). - build_number: the build number. - results_file_base_path: Absolute path to the directory containing the - results json file. - builder_base_url: the URL where we have the archived test results. - If this is None no archived results will be retrieved. - test_results_map: A dictionary that maps test_name to TestResult. - svn_repositories: A (json_field_name, svn_path) pair for SVN - repositories that tests rely on. The SVN revision will be - included in the JSON with the given json_field_name. - generate_incremental_results: If true, generate incremental json file - from current run results. - test_results_server: server that hosts test results json. - test_type: test type string (e.g. 'layout-tests'). - master_name: the name of the buildbot master. - """ - self._builder_name = builder_name - self._build_name = build_name - self._build_number = build_number - self._builder_base_url = builder_base_url - self._results_directory = results_file_base_path - self._results_file_path = os.path.join(results_file_base_path, - self.RESULTS_FILENAME) - self._incremental_results_file_path = os.path.join( - results_file_base_path, self.INCREMENTAL_RESULTS_FILENAME) - - self._test_results_map = test_results_map - self._test_results = test_results_map.values() - self._generate_incremental_results = generate_incremental_results - - self._svn_repositories = svn_repositories - if not self._svn_repositories: - self._svn_repositories = {} - - self._test_results_server = test_results_server - self._test_type = test_type - self._master_name = master_name - - self._json = None - self._archived_results = None - - def generate_json_output(self): - """Generates the JSON output file.""" - - # Generate the JSON output file that has full results. - # FIXME: stop writing out the full results file once all bots use - # incremental results. - if not self._json: - self._json = self.get_json() - if self._json: - self._generate_json_file(self._json, self._results_file_path) - - # Generate the JSON output file that only has incremental results. - if self._generate_incremental_results: - json = self.get_json(incremental=True) - if json: - self._generate_json_file( - json, self._incremental_results_file_path) - - def get_json(self, incremental=False): - """Gets the results for the results.json file.""" - results_json = {} - if not incremental: - if self._json: - return self._json - - if self._archived_results: - results_json = self._archived_results - - if not results_json: - results_json, error = self._get_archived_json_results(incremental) - if error: - # If there was an error don't write a results.json - # file at all as it would lose all the information on the - # bot. - _log.error("Archive directory is inaccessible. Not " - "modifying or clobbering the results.json " - "file: " + str(error)) - return None - - builder_name = self._builder_name - if results_json and builder_name not in results_json: - _log.debug("Builder name (%s) is not in the results.json file." - % builder_name) - - self._convert_json_to_current_version(results_json) - - if builder_name not in results_json: - results_json[builder_name] = ( - self._create_results_for_builder_json()) - - results_for_builder = results_json[builder_name] - - self._insert_generic_metadata(results_for_builder) - - self._insert_failure_summaries(results_for_builder) - - # Update the all failing tests with result type and time. - tests = results_for_builder[self.TESTS] - all_failing_tests = self._get_failed_test_names() - all_failing_tests.update(tests.iterkeys()) - for test in all_failing_tests: - self._insert_test_time_and_result(test, tests, incremental) - - return results_json - - def set_archived_results(self, archived_results): - self._archived_results = archived_results - - def upload_json_files(self, json_files): - """Uploads the given json_files to the test_results_server (if the - test_results_server is given).""" - if not self._test_results_server: - return - - if not self._master_name: - _log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.") - return - - _log.info("Uploading JSON files for builder: %s", self._builder_name) - attrs = [("builder", self._builder_name), - ("testtype", self._test_type), - ("master", self._master_name)] - - files = [(file, os.path.join(self._results_directory, file)) - for file in json_files] - - uploader = test_results_uploader.TestResultsUploader( - self._test_results_server) - try: - # Set uploading timeout in case appengine server is having problem. - # 120 seconds are more than enough to upload test results. - uploader.upload(attrs, files, 120) - except Exception, err: - _log.error("Upload failed: %s" % err) - return - - _log.info("JSON files uploaded.") - - def _generate_json_file(self, json, file_path): - # Specify separators in order to get compact encoding. - json_data = simplejson.dumps(json, separators=(',', ':')) - json_string = self.JSON_PREFIX + json_data + self.JSON_SUFFIX - - results_file = codecs.open(file_path, "w", "utf-8") - results_file.write(json_string) - results_file.close() - - def _get_test_timing(self, test_name): - """Returns test timing data (elapsed time) in second - for the given test_name.""" - if test_name in self._test_results_map: - # Floor for now to get time in seconds. - return int(self._test_results_map[test_name].time) - return 0 - - def _get_failed_test_names(self): - """Returns a set of failed test names.""" - return set([r.name for r in self._test_results if r.failed]) - - def _get_modifier_char(self, test_name): - """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, - PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier - for the given test_name. - """ - if test_name not in self._test_results_map: - return JSONResultsGenerator.NO_DATA_RESULT - - test_result = self._test_results_map[test_name] - if test_result.modifier in self.MODIFIER_TO_CHAR.keys(): - return self.MODIFIER_TO_CHAR[test_result.modifier] - - return JSONResultsGenerator.PASS_RESULT - - def _get_result_char(self, test_name): - """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, - PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result - for the given test_name. - """ - if test_name not in self._test_results_map: - return JSONResultsGenerator.NO_DATA_RESULT - - test_result = self._test_results_map[test_name] - if test_result.modifier == TestResult.DISABLED: - return JSONResultsGenerator.SKIP_RESULT - - if test_result.failed: - return JSONResultsGenerator.FAIL_RESULT - - return JSONResultsGenerator.PASS_RESULT - - # FIXME: Callers should use scm.py instead. - # FIXME: Identify and fix the run-time errors that were observed on Windows - # chromium buildbot when we had updated this code to use scm.py once before. - def _get_svn_revision(self, in_directory): - """Returns the svn revision for the given directory. - - Args: - in_directory: The directory where svn is to be run. - """ - if os.path.exists(os.path.join(in_directory, '.svn')): - # Note: Not thread safe: http://bugs.python.org/issue2320 - output = subprocess.Popen(["svn", "info", "--xml"], - cwd=in_directory, - shell=(sys.platform == 'win32'), - stdout=subprocess.PIPE).communicate()[0] - try: - dom = xml.dom.minidom.parseString(output) - return dom.getElementsByTagName('entry')[0].getAttribute( - 'revision') - except xml.parsers.expat.ExpatError: - return "" - return "" - - def _get_archived_json_results(self, for_incremental=False): - """Reads old results JSON file if it exists. - Returns (archived_results, error) tuple where error is None if results - were successfully read. - - if for_incremental is True, download JSON file that only contains test - name list from test-results server. This is for generating incremental - JSON so the file generated has info for tests that failed before but - pass or are skipped from current run. - """ - results_json = {} - old_results = None - error = None - - if os.path.exists(self._results_file_path) and not for_incremental: - with codecs.open(self._results_file_path, "r", "utf-8") as file: - old_results = file.read() - elif self._builder_base_url or for_incremental: - if for_incremental: - if not self._test_results_server: - # starting from fresh if no test results server specified. - return {}, None - - results_file_url = (self.URL_FOR_TEST_LIST_JSON % - (urllib2.quote(self._test_results_server), - urllib2.quote(self._builder_name), - self.RESULTS_FILENAME, - urllib2.quote(self._test_type))) - else: - # Check if we have the archived JSON file on the buildbot - # server. - results_file_url = (self._builder_base_url + - self._build_name + "/" + self.RESULTS_FILENAME) - _log.error("Local results.json file does not exist. Grabbing " - "it off the archive at " + results_file_url) - - try: - results_file = urllib2.urlopen(results_file_url) - info = results_file.info() - old_results = results_file.read() - except urllib2.HTTPError, http_error: - # A non-4xx status code means the bot is hosed for some reason - # and we can't grab the results.json file off of it. - if (http_error.code < 400 and http_error.code >= 500): - error = http_error - except urllib2.URLError, url_error: - error = url_error - - if old_results: - # Strip the prefix and suffix so we can get the actual JSON object. - old_results = old_results[len(self.JSON_PREFIX): - len(old_results) - len(self.JSON_SUFFIX)] - - try: - results_json = simplejson.loads(old_results) - except: - _log.debug("results.json was not valid JSON. Clobbering.") - # The JSON file is not valid JSON. Just clobber the results. - results_json = {} - else: - _log.debug('Old JSON results do not exist. Starting fresh.') - results_json = {} - - return results_json, error - - def _insert_failure_summaries(self, results_for_builder): - """Inserts aggregate pass/failure statistics into the JSON. - This method reads self._test_results and generates - FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries. - - Args: - results_for_builder: Dictionary containing the test results for a - single builder. - """ - # Insert the number of tests that failed or skipped. - fixable_count = len([r for r in self._test_results if r.fixable()]) - self._insert_item_into_raw_list(results_for_builder, - fixable_count, self.FIXABLE_COUNT) - - # Create a test modifiers (FAILS, FLAKY etc) summary dictionary. - entry = {} - for test_name in self._test_results_map.iterkeys(): - result_char = self._get_modifier_char(test_name) - entry[result_char] = entry.get(result_char, 0) + 1 - - # Insert the pass/skip/failure summary dictionary. - self._insert_item_into_raw_list(results_for_builder, entry, - self.FIXABLE) - - # Insert the number of all the tests that are supposed to pass. - all_test_count = len(self._test_results) - self._insert_item_into_raw_list(results_for_builder, - all_test_count, self.ALL_FIXABLE_COUNT) - - def _insert_item_into_raw_list(self, results_for_builder, item, key): - """Inserts the item into the list with the given key in the results for - this builder. Creates the list if no such list exists. - - Args: - results_for_builder: Dictionary containing the test results for a - single builder. - item: Number or string to insert into the list. - key: Key in results_for_builder for the list to insert into. - """ - if key in results_for_builder: - raw_list = results_for_builder[key] - else: - raw_list = [] - - raw_list.insert(0, item) - raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG] - results_for_builder[key] = raw_list - - def _insert_item_run_length_encoded(self, item, encoded_results): - """Inserts the item into the run-length encoded results. - - Args: - item: String or number to insert. - encoded_results: run-length encoded results. An array of arrays, e.g. - [[3,'A'],[1,'Q']] encodes AAAQ. - """ - if len(encoded_results) and item == encoded_results[0][1]: - num_results = encoded_results[0][0] - if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: - encoded_results[0][0] = num_results + 1 - else: - # Use a list instead of a class for the run-length encoding since - # we want the serialized form to be concise. - encoded_results.insert(0, [1, item]) - - def _insert_generic_metadata(self, results_for_builder): - """ Inserts generic metadata (such as version number, current time etc) - into the JSON. - - Args: - results_for_builder: Dictionary containing the test results for - a single builder. - """ - self._insert_item_into_raw_list(results_for_builder, - self._build_number, self.BUILD_NUMBERS) - - # Include SVN revisions for the given repositories. - for (name, path) in self._svn_repositories: - self._insert_item_into_raw_list(results_for_builder, - self._get_svn_revision(path), - name + 'Revision') - - self._insert_item_into_raw_list(results_for_builder, - int(time.time()), - self.TIME) - - def _insert_test_time_and_result(self, test_name, tests, incremental=False): - """ Insert a test item with its results to the given tests dictionary. - - Args: - tests: Dictionary containing test result entries. - """ - - result = self._get_result_char(test_name) - time = self._get_test_timing(test_name) - - if test_name not in tests: - tests[test_name] = self._create_results_and_times_json() - - thisTest = tests[test_name] - if self.RESULTS in thisTest: - self._insert_item_run_length_encoded(result, thisTest[self.RESULTS]) - else: - thisTest[self.RESULTS] = [[1, result]] - - if self.TIMES in thisTest: - self._insert_item_run_length_encoded(time, thisTest[self.TIMES]) - else: - thisTest[self.TIMES] = [[1, time]] - - # Don't normalize the incremental results json because we need results - # for tests that pass or have no data from current run. - if not incremental: - self._normalize_results_json(thisTest, test_name, tests) - - def _convert_json_to_current_version(self, results_json): - """If the JSON does not match the current version, converts it to the - current version and adds in the new version number. - """ - if (self.VERSION_KEY in results_json and - results_json[self.VERSION_KEY] == self.VERSION): - return - - results_json[self.VERSION_KEY] = self.VERSION - - def _create_results_and_times_json(self): - results_and_times = {} - results_and_times[self.RESULTS] = [] - results_and_times[self.TIMES] = [] - return results_and_times - - def _create_results_for_builder_json(self): - results_for_builder = {} - results_for_builder[self.TESTS] = {} - return results_for_builder - - def _remove_items_over_max_number_of_builds(self, encoded_list): - """Removes items from the run-length encoded list after the final - item that exceeds the max number of builds to track. - - Args: - encoded_results: run-length encoded results. An array of arrays, e.g. - [[3,'A'],[1,'Q']] encodes AAAQ. - """ - num_builds = 0 - index = 0 - for result in encoded_list: - num_builds = num_builds + result[0] - index = index + 1 - if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: - return encoded_list[:index] - return encoded_list - - def _normalize_results_json(self, test, test_name, tests): - """ Prune tests where all runs pass or tests that no longer exist and - truncate all results to maxNumberOfBuilds. - - Args: - test: ResultsAndTimes object for this test. - test_name: Name of the test. - tests: The JSON object with all the test results for this builder. - """ - test[self.RESULTS] = self._remove_items_over_max_number_of_builds( - test[self.RESULTS]) - test[self.TIMES] = self._remove_items_over_max_number_of_builds( - test[self.TIMES]) - - is_all_pass = self._is_results_all_of_type(test[self.RESULTS], - self.PASS_RESULT) - is_all_no_data = self._is_results_all_of_type(test[self.RESULTS], - self.NO_DATA_RESULT) - max_time = max([time[1] for time in test[self.TIMES]]) - - # Remove all passes/no-data from the results to reduce noise and - # filesize. If a test passes every run, but takes > MIN_TIME to run, - # don't throw away the data. - if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME): - del tests[test_name] - - def _is_results_all_of_type(self, results, type): - """Returns whether all the results are of the given type - (e.g. all passes).""" - return len(results) == 1 and results[0][1] == type - - -# A wrapper class for JSONResultsGeneratorBase. -# Note: There's a script outside the WebKit codebase calling this script. -# FIXME: Please keep the interface until the other script is cleaned up. -# (http://src.chromium.org/viewvc/chrome/trunk/src/webkit/tools/layout_tests/webkitpy/layout_tests/test_output_xml_to_json.py?view=markup) -class JSONResultsGenerator(JSONResultsGeneratorBase): - # The flag is for backward compatibility. - output_json_in_init = True - - def __init__(self, port, builder_name, build_name, build_number, - results_file_base_path, builder_base_url, - test_timings, failures, passed_tests, skipped_tests, all_tests, - test_results_server=None, test_type=None, master_name=None): - """Generates a JSON results file. - - Args - builder_name: the builder name (e.g. Webkit). - build_name: the build name (e.g. webkit-rel). - build_number: the build number. - results_file_base_path: Absolute path to the directory containing the - results json file. - builder_base_url: the URL where we have the archived test results. - test_timings: Map of test name to a test_run-time. - failures: Map of test name to a failure type (of test_expectations). - passed_tests: A set containing all the passed tests. - skipped_tests: A set containing all the skipped tests. - all_tests: List of all the tests that were run. This should not - include skipped tests. - test_results_server: server that hosts test results json. - test_type: the test type. - master_name: the name of the buildbot master. - """ - - self._test_type = test_type - self._results_directory = results_file_base_path - - # Create a map of (name, TestResult). - test_results_map = dict() - get = test_results_map.get - for (test, time) in test_timings.iteritems(): - test_results_map[test] = TestResult(test, elapsed_time=time) - for test in failures.iterkeys(): - test_results_map[test] = test_result = get(test, TestResult(test)) - test_result.failed = True - for test in skipped_tests: - test_results_map[test] = test_result = get(test, TestResult(test)) - for test in passed_tests: - test_results_map[test] = test_result = get(test, TestResult(test)) - test_result.failed = False - for test in all_tests: - if test not in test_results_map: - test_results_map[test] = TestResult(test) - - # Generate the JSON with incremental flag enabled. - # (This should also output the full result for now.) - super(JSONResultsGenerator, self).__init__( - builder_name, build_name, build_number, - results_file_base_path, builder_base_url, test_results_map, - svn_repositories=port.test_repository_paths(), - generate_incremental_results=True, - test_results_server=test_results_server, - test_type=test_type, - master_name=master_name) - - if self.__class__.output_json_in_init: - self.generate_json_output() - self.upload_json_files([self.INCREMENTAL_RESULTS_FILENAME]) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py deleted file mode 100644 index d6275ee..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Unit tests for json_results_generator.py.""" - -import unittest -import optparse -import random -import shutil -import tempfile - -from webkitpy.layout_tests.layout_package import json_results_generator -from webkitpy.layout_tests.layout_package import test_expectations -from webkitpy.layout_tests import port - - -class JSONGeneratorTest(unittest.TestCase): - def setUp(self): - json_results_generator.JSONResultsGenerator.output_json_in_init = False - self.builder_name = 'DUMMY_BUILDER_NAME' - self.build_name = 'DUMMY_BUILD_NAME' - self.build_number = 'DUMMY_BUILDER_NUMBER' - self._json = None - self._num_runs = 0 - self._tests_set = set([]) - self._test_timings = {} - self._failed_tests = set([]) - - self._PASS_tests = set([]) - self._DISABLED_tests = set([]) - self._FLAKY_tests = set([]) - self._FAILS_tests = set([]) - - def _test_json_generation(self, passed_tests_list, failed_tests_list): - tests_set = set(passed_tests_list) | set(failed_tests_list) - - DISABLED_tests = set([t for t in tests_set - if t.startswith('DISABLED_')]) - FLAKY_tests = set([t for t in tests_set - if t.startswith('FLAKY_')]) - FAILS_tests = set([t for t in tests_set - if t.startswith('FAILS_')]) - PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests) - - passed_tests = set(passed_tests_list) - DISABLED_tests - failed_tests = set(failed_tests_list) - - test_timings = {} - i = 0 - for test in tests_set: - test_timings[test] = float(self._num_runs * 100 + i) - i += 1 - - # For backward compatibility. - reason = test_expectations.TEXT - failed_tests_dict = dict([(name, reason) for name in failed_tests]) - - port_obj = port.get(None) - generator = json_results_generator.JSONResultsGenerator(port_obj, - self.builder_name, self.build_name, self.build_number, - '', - None, # don't fetch past json results archive - test_timings, - failed_tests_dict, - passed_tests, - (), - tests_set) - - # Test incremental json results - incremental_json = generator.get_json(incremental=True) - self._verify_json_results( - tests_set, - test_timings, - failed_tests, - PASS_tests, - DISABLED_tests, - FLAKY_tests, - incremental_json, - 1) - - # Test aggregated json results - generator.set_archived_results(self._json) - json = generator.get_json(incremental=False) - self._json = json - self._num_runs += 1 - self._tests_set |= tests_set - self._test_timings.update(test_timings) - self._failed_tests.update(failed_tests) - self._PASS_tests |= PASS_tests - self._DISABLED_tests |= DISABLED_tests - self._FLAKY_tests |= FLAKY_tests - self._verify_json_results( - self._tests_set, - self._test_timings, - self._failed_tests, - self._PASS_tests, - self._DISABLED_tests, - self._FLAKY_tests, - self._json, - self._num_runs) - - def _verify_json_results(self, tests_set, test_timings, failed_tests, - PASS_tests, DISABLED_tests, FLAKY_tests, - json, num_runs): - # Aliasing to a short name for better access to its constants. - JRG = json_results_generator.JSONResultsGenerator - - self.assertTrue(JRG.VERSION_KEY in json) - self.assertTrue(self.builder_name in json) - - buildinfo = json[self.builder_name] - self.assertTrue(JRG.FIXABLE in buildinfo) - self.assertTrue(JRG.TESTS in buildinfo) - self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs) - self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number) - - if tests_set or DISABLED_tests: - fixable = {} - for fixable_items in buildinfo[JRG.FIXABLE]: - for (type, count) in fixable_items.iteritems(): - if type in fixable: - fixable[type] = fixable[type] + count - else: - fixable[type] = count - - if PASS_tests: - self.assertEqual(fixable[JRG.PASS_RESULT], len(PASS_tests)) - else: - self.assertTrue(JRG.PASS_RESULT not in fixable or - fixable[JRG.PASS_RESULT] == 0) - if DISABLED_tests: - self.assertEqual(fixable[JRG.SKIP_RESULT], len(DISABLED_tests)) - else: - self.assertTrue(JRG.SKIP_RESULT not in fixable or - fixable[JRG.SKIP_RESULT] == 0) - if FLAKY_tests: - self.assertEqual(fixable[JRG.FLAKY_RESULT], len(FLAKY_tests)) - else: - self.assertTrue(JRG.FLAKY_RESULT not in fixable or - fixable[JRG.FLAKY_RESULT] == 0) - - if failed_tests: - tests = buildinfo[JRG.TESTS] - for test_name in failed_tests: - self.assertTrue(test_name in tests) - test = tests[test_name] - - failed = 0 - for result in test[JRG.RESULTS]: - if result[1] == JRG.FAIL_RESULT: - failed = result[0] - - self.assertEqual(1, failed) - - timing_count = 0 - for timings in test[JRG.TIMES]: - if timings[1] == test_timings[test_name]: - timing_count = timings[0] - self.assertEqual(1, timing_count) - - fixable_count = len(DISABLED_tests | failed_tests) - if DISABLED_tests or failed_tests: - self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count) - - def test_json_generation(self): - self._test_json_generation([], []) - self._test_json_generation(['A1', 'B1'], []) - self._test_json_generation([], ['FAILS_A2', 'FAILS_B2']) - self._test_json_generation(['DISABLED_A3', 'DISABLED_B3'], []) - self._test_json_generation(['A4'], ['B4', 'FAILS_C4']) - self._test_json_generation(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5']) - self._test_json_generation( - ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'], - ['FAILS_D6']) - self._test_json_generation( - ['A7', 'FLAKY_B7', 'DISABLED_C7'], - ['FAILS_D7', 'FLAKY_D8']) - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py deleted file mode 100644 index e520a9c..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Module for handling messages, threads, processes, and concurrency for run-webkit-tests. - -Testing is accomplished by having a manager (TestRunner) gather all of the -tests to be run, and sending messages to a pool of workers (TestShellThreads) -to run each test. Each worker communicates with one driver (usually -DumpRenderTree) to run one test at a time and then compare the output against -what we expected to get. - -This modules provides a message broker that connects the manager to the -workers: it provides a messaging abstraction and message loops, and -handles launching threads and/or processes depending on the -requested configuration. -""" - -import logging -import sys -import time -import traceback - -import dump_render_tree_thread - -_log = logging.getLogger(__name__) - - -def get(port, options): - """Return an instance of a WorkerMessageBroker.""" - worker_model = options.worker_model - if worker_model == 'inline': - return InlineBroker(port, options) - if worker_model == 'threads': - return MultiThreadedBroker(port, options) - raise ValueError('unsupported value for --worker-model: %s' % worker_model) - - -class _WorkerState(object): - def __init__(self, name): - self.name = name - self.thread = None - - -class WorkerMessageBroker(object): - def __init__(self, port, options): - self._port = port - self._options = options - self._num_workers = int(self._options.child_processes) - - # This maps worker names to their _WorkerState values. - self._workers = {} - - def _threads(self): - return tuple([w.thread for w in self._workers.values()]) - - def start_workers(self, test_runner): - """Starts up the pool of workers for running the tests. - - Args: - test_runner: a handle to the manager/TestRunner object - """ - self._test_runner = test_runner - for worker_number in xrange(self._num_workers): - worker = _WorkerState('worker-%d' % worker_number) - worker.thread = self._start_worker(worker_number, worker.name) - self._workers[worker.name] = worker - return self._threads() - - def _start_worker(self, worker_number, worker_name): - raise NotImplementedError - - def run_message_loop(self): - """Loop processing messages until done.""" - raise NotImplementedError - - def cancel_workers(self): - """Cancel/interrupt any workers that are still alive.""" - pass - - def cleanup(self): - """Perform any necessary cleanup on shutdown.""" - pass - - -class InlineBroker(WorkerMessageBroker): - def _start_worker(self, worker_number, worker_name): - # FIXME: Replace with something that isn't a thread. - thread = dump_render_tree_thread.TestShellThread(self._port, - self._options, worker_number, worker_name, - self._test_runner._current_filename_queue, - self._test_runner._result_queue) - # Note: Don't start() the thread! If we did, it would actually - # create another thread and start executing it, and we'd no longer - # be single-threaded. - return thread - - def run_message_loop(self): - thread = self._threads()[0] - thread.run_in_main_thread(self._test_runner, - self._test_runner._current_result_summary) - self._test_runner.update() - - -class MultiThreadedBroker(WorkerMessageBroker): - def _start_worker(self, worker_number, worker_name): - thread = dump_render_tree_thread.TestShellThread(self._port, - self._options, worker_number, worker_name, - self._test_runner._current_filename_queue, - self._test_runner._result_queue) - thread.start() - return thread - - def run_message_loop(self): - threads = self._threads() - - # Loop through all the threads waiting for them to finish. - some_thread_is_alive = True - while some_thread_is_alive: - some_thread_is_alive = False - t = time.time() - for thread in threads: - exception_info = thread.exception_info() - if exception_info is not None: - # Re-raise the thread's exception here to make it - # clear that testing was aborted. Otherwise, - # the tests that did not run would be assumed - # to have passed. - raise exception_info[0], exception_info[1], exception_info[2] - - if thread.isAlive(): - some_thread_is_alive = True - next_timeout = thread.next_timeout() - if next_timeout and t > next_timeout: - log_wedged_worker(thread.getName(), thread.id()) - thread.clear_next_timeout() - - self._test_runner.update() - - if some_thread_is_alive: - time.sleep(0.01) - - def cancel_workers(self): - threads = self._threads() - for thread in threads: - thread.cancel() - - -def log_wedged_worker(name, id): - """Log information about the given worker state.""" - stack = _find_thread_stack(id) - assert(stack is not None) - _log.error("") - _log.error("%s (tid %d) is wedged" % (name, id)) - _log_stack(stack) - _log.error("") - - -def _find_thread_stack(id): - """Returns a stack object that can be used to dump a stack trace for - the given thread id (or None if the id is not found).""" - for thread_id, stack in sys._current_frames().items(): - if thread_id == id: - return stack - return None - - -def _log_stack(stack): - """Log a stack trace to log.error().""" - for filename, lineno, name, line in traceback.extract_stack(stack): - _log.error('File: "%s", line %d, in %s' % (filename, lineno, name)) - if line: - _log.error(' %s' % line.strip()) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py deleted file mode 100644 index 6f04fd3..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import logging -import Queue -import sys -import thread -import threading -import time -import unittest - -from webkitpy.common import array_stream -from webkitpy.common.system import outputcapture -from webkitpy.tool import mocktool - -from webkitpy.layout_tests import run_webkit_tests - -import message_broker - - -class TestThread(threading.Thread): - def __init__(self, started_queue, stopping_queue): - threading.Thread.__init__(self) - self._thread_id = None - self._started_queue = started_queue - self._stopping_queue = stopping_queue - self._timeout = False - self._timeout_queue = Queue.Queue() - self._exception_info = None - - def id(self): - return self._thread_id - - def getName(self): - return "worker-0" - - def run(self): - self._covered_run() - - def _covered_run(self): - # FIXME: this is a separate routine to work around a bug - # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85. - self._thread_id = thread.get_ident() - try: - self._started_queue.put('') - msg = self._stopping_queue.get() - if msg == 'KeyboardInterrupt': - raise KeyboardInterrupt - elif msg == 'Exception': - raise ValueError() - elif msg == 'Timeout': - self._timeout = True - self._timeout_queue.get() - except: - self._exception_info = sys.exc_info() - - def exception_info(self): - return self._exception_info - - def next_timeout(self): - if self._timeout: - self._timeout_queue.put('done') - return time.time() - 10 - return time.time() - - def clear_next_timeout(self): - self._next_timeout = None - -class TestHandler(logging.Handler): - def __init__(self, astream): - logging.Handler.__init__(self) - self._stream = astream - - def emit(self, record): - self._stream.write(self.format(record)) - - -class MultiThreadedBrokerTest(unittest.TestCase): - class MockTestRunner(object): - def __init__(self): - pass - - def __del__(self): - pass - - def update(self): - pass - - def run_one_thread(self, msg): - runner = self.MockTestRunner() - port = None - options = mocktool.MockOptions(child_processes='1') - starting_queue = Queue.Queue() - stopping_queue = Queue.Queue() - broker = message_broker.MultiThreadedBroker(port, options) - broker._test_runner = runner - child_thread = TestThread(starting_queue, stopping_queue) - broker._workers['worker-0'] = message_broker._WorkerState('worker-0') - broker._workers['worker-0'].thread = child_thread - child_thread.start() - started_msg = starting_queue.get() - stopping_queue.put(msg) - return broker.run_message_loop() - - def test_basic(self): - interrupted = self.run_one_thread('') - self.assertFalse(interrupted) - - def test_interrupt(self): - self.assertRaises(KeyboardInterrupt, self.run_one_thread, 'KeyboardInterrupt') - - def test_timeout(self): - oc = outputcapture.OutputCapture() - oc.capture_output() - interrupted = self.run_one_thread('Timeout') - self.assertFalse(interrupted) - oc.restore_output() - - def test_exception(self): - self.assertRaises(ValueError, self.run_one_thread, 'Exception') - - -class Test(unittest.TestCase): - def test_find_thread_stack_found(self): - id, stack = sys._current_frames().items()[0] - found_stack = message_broker._find_thread_stack(id) - self.assertNotEqual(found_stack, None) - - def test_find_thread_stack_not_found(self): - found_stack = message_broker._find_thread_stack(0) - self.assertEqual(found_stack, None) - - def test_log_wedged_worker(self): - oc = outputcapture.OutputCapture() - oc.capture_output() - logger = message_broker._log - astream = array_stream.ArrayStream() - handler = TestHandler(astream) - logger.addHandler(handler) - - starting_queue = Queue.Queue() - stopping_queue = Queue.Queue() - child_thread = TestThread(starting_queue, stopping_queue) - child_thread.start() - msg = starting_queue.get() - - message_broker.log_wedged_worker(child_thread.getName(), - child_thread.id()) - stopping_queue.put('') - child_thread.join(timeout=1.0) - - self.assertFalse(astream.empty()) - self.assertFalse(child_thread.isAlive()) - oc.restore_output() - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py deleted file mode 100644 index 20646a1..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -""" -Package that implements a stream wrapper that has 'meters' as well as -regular output. A 'meter' is a single line of text that can be erased -and rewritten repeatedly, without producing multiple lines of output. It -can be used to produce effects like progress bars. - -This package should only be called by the printing module in the layout_tests -package. -""" - -import logging - -_log = logging.getLogger("webkitpy.layout_tests.metered_stream") - - -class MeteredStream: - """This class is a wrapper around a stream that allows you to implement - meters (progress bars, etc.). - - It can be used directly as a stream, by calling write(), but provides - two other methods for output, update(), and progress(). - - In normal usage, update() will overwrite the output of the immediately - preceding update() (write() also will overwrite update()). So, calling - multiple update()s in a row can provide an updating status bar (note that - if an update string contains newlines, only the text following the last - newline will be overwritten/erased). - - If the MeteredStream is constructed in "verbose" mode (i.e., by passing - verbose=true), then update() no longer overwrite a previous update(), and - instead the call is equivalent to write(), although the text is - actually sent to the logger rather than to the stream passed - to the constructor. - - progress() is just like update(), except that if you are in verbose mode, - progress messages are not output at all (they are dropped). This is - used for things like progress bars which are presumed to be unwanted in - verbose mode. - - Note that the usual usage for this class is as a destination for - a logger that can also be written to directly (i.e., some messages go - through the logger, some don't). We thus have to dance around a - layering inversion in update() for things to work correctly. - """ - - def __init__(self, verbose, stream): - """ - Args: - verbose: whether progress is a no-op and updates() aren't overwritten - stream: output stream to write to - """ - self._dirty = False - self._verbose = verbose - self._stream = stream - self._last_update = "" - - def write(self, txt): - """Write to the stream, overwriting and resetting the meter.""" - if self._dirty: - self._write(txt) - self._dirty = False - self._last_update = '' - else: - self._stream.write(txt) - - def flush(self): - """Flush any buffered output.""" - self._stream.flush() - - def progress(self, str): - """ - Write a message to the stream that will get overwritten. - - This is used for progress updates that don't need to be preserved in - the log. If the MeteredStream was initialized with verbose==True, - then this output is discarded. We have this in case we are logging - lots of output and the update()s will get lost or won't work - properly (typically because verbose streams are redirected to files). - - """ - if self._verbose: - return - self._write(str) - - def update(self, str): - """ - Write a message that is also included when logging verbosely. - - This routine preserves the same console logging behavior as progress(), - but will also log the message if verbose() was true. - - """ - # Note this is a separate routine that calls either into the logger - # or the metering stream. We have to be careful to avoid a layering - # inversion (stream calling back into the logger). - if self._verbose: - _log.info(str) - else: - self._write(str) - - def _write(self, str): - """Actually write the message to the stream.""" - - # FIXME: Figure out if there is a way to detect if we're writing - # to a stream that handles CRs correctly (e.g., terminals). That might - # be a cleaner way of handling this. - - # Print the necessary number of backspaces to erase the previous - # message. - if len(self._last_update): - self._stream.write("\b" * len(self._last_update) + - " " * len(self._last_update) + - "\b" * len(self._last_update)) - self._stream.write(str) - last_newline = str.rfind("\n") - self._last_update = str[(last_newline + 1):] - self._dirty = True diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py deleted file mode 100644 index 9421ff8..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Unit tests for metered_stream.py.""" - -import os -import optparse -import pdb -import sys -import unittest - -from webkitpy.common.array_stream import ArrayStream -from webkitpy.layout_tests.layout_package import metered_stream - - -class TestMeteredStream(unittest.TestCase): - def test_regular(self): - a = ArrayStream() - m = metered_stream.MeteredStream(verbose=False, stream=a) - self.assertTrue(a.empty()) - - # basic test - note that the flush() is a no-op, but we include it - # for coverage. - m.write("foo") - m.flush() - exp = ['foo'] - self.assertEquals(a.get(), exp) - - # now check that a second write() does not overwrite the first. - m.write("bar") - exp.append('bar') - self.assertEquals(a.get(), exp) - - m.update("batter") - exp.append('batter') - self.assertEquals(a.get(), exp) - - # The next update() should overwrite the laste update() but not the - # other text. Note that the cursor is effectively positioned at the - # end of 'foo', even though we had to erase three more characters. - m.update("foo") - exp.append('\b\b\b\b\b\b \b\b\b\b\b\b') - exp.append('foo') - self.assertEquals(a.get(), exp) - - m.progress("progress") - exp.append('\b\b\b \b\b\b') - exp.append('progress') - self.assertEquals(a.get(), exp) - - # now check that a write() does overwrite the progress bar - m.write("foo") - exp.append('\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\b') - exp.append('foo') - self.assertEquals(a.get(), exp) - - # Now test that we only back up to the most recent newline. - - # Note also that we do not back up to erase the most recent write(), - # i.e., write()s do not get erased. - a.reset() - m.update("foo\nbar") - m.update("baz") - self.assertEquals(a.get(), ['foo\nbar', '\b\b\b \b\b\b', 'baz']) - - def test_verbose(self): - a = ArrayStream() - m = metered_stream.MeteredStream(verbose=True, stream=a) - self.assertTrue(a.empty()) - m.write("foo") - self.assertEquals(a.get(), ['foo']) - - import logging - b = ArrayStream() - logger = logging.getLogger() - handler = logging.StreamHandler(b) - logger.addHandler(handler) - m.update("bar") - logger.handlers.remove(handler) - self.assertEquals(a.get(), ['foo']) - self.assertEquals(b.get(), ['bar\n']) - - m.progress("dropped") - self.assertEquals(a.get(), ['foo']) - self.assertEquals(b.get(), ['bar\n']) - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py deleted file mode 100644 index 7a6aad1..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing.py +++ /dev/null @@ -1,553 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Package that handles non-debug, non-file output for run-webkit-tests.""" - -import logging -import optparse -import os -import pdb - -from webkitpy.layout_tests.layout_package import metered_stream -from webkitpy.layout_tests.layout_package import test_expectations - -_log = logging.getLogger("webkitpy.layout_tests.printer") - -TestExpectationsFile = test_expectations.TestExpectationsFile - -NUM_SLOW_TESTS_TO_LOG = 10 - -PRINT_DEFAULT = ("misc,one-line-progress,one-line-summary,unexpected," - "unexpected-results,updates") -PRINT_EVERYTHING = ("actual,config,expected,misc,one-line-progress," - "one-line-summary,slowest,timing,unexpected," - "unexpected-results,updates") - -HELP_PRINTING = """ -Output for run-webkit-tests is controlled by a comma-separated list of -values passed to --print. Values either influence the overall output, or -the output at the beginning of the run, during the run, or at the end: - -Overall options: - nothing don't print anything. This overrides every other option - default include the default options. This is useful for logging - the default options plus additional settings. - everything print everything (except the trace-* options and the - detailed-progress option, see below for the full list ) - misc print miscellaneous things like blank lines - -At the beginning of the run: - config print the test run configuration - expected print a summary of what is expected to happen - (# passes, # failures, etc.) - -During the run: - detailed-progress print one dot per test completed - one-line-progress print a one-line progress bar - unexpected print any unexpected results as they occur - updates print updates on which stage is executing - trace-everything print detailed info on every test's results - (baselines, expectation, time it took to run). If - this is specified it will override the '*-progress' - options, the 'trace-unexpected' option, and the - 'unexpected' option. - trace-unexpected like 'trace-everything', but only for tests with - unexpected results. If this option is specified, - it will override the 'unexpected' option. - -At the end of the run: - actual print a summary of the actual results - slowest print %(slowest)d slowest tests and the time they took - timing print timing statistics - unexpected-results print a list of the tests with unexpected results - one-line-summary print a one-line summary of the run - -Notes: - - 'detailed-progress' can only be used if running in a single thread - (using --child-processes=1) or a single queue of tests (using - --experimental-fully-parallel). If these conditions aren't true, - 'one-line-progress' will be used instead. - - If both 'detailed-progress' and 'one-line-progress' are specified (and - both are possible), 'detailed-progress' will be used. - - If 'nothing' is specified, it overrides all of the other options. - - Specifying --verbose is equivalent to --print everything plus it - changes the format of the log messages to add timestamps and other - information. If you specify --verbose and --print X, then X overrides - the --print everything implied by --verbose. - ---print 'everything' is equivalent to --print '%(everything)s'. - -The default (--print default) is equivalent to --print '%(default)s'. -""" % {'slowest': NUM_SLOW_TESTS_TO_LOG, 'everything': PRINT_EVERYTHING, - 'default': PRINT_DEFAULT} - - -def print_options(): - return [ - # Note: We use print_options rather than just 'print' because print - # is a reserved word. - # Note: Also, we don't specify a default value so we can detect when - # no flag is specified on the command line and use different defaults - # based on whether or not --verbose is specified (since --print - # overrides --verbose). - optparse.make_option("--print", dest="print_options", - help=("controls print output of test run. " - "Use --help-printing for more.")), - optparse.make_option("--help-printing", action="store_true", - help="show detailed help on controlling print output"), - optparse.make_option("-v", "--verbose", action="store_true", - default=False, help="include debug-level logging"), - ] - - -def parse_print_options(print_options, verbose, child_processes, - is_fully_parallel): - """Parse the options provided to --print and dedup and rank them. - - Returns - a set() of switches that govern how logging is done - - """ - if print_options: - switches = set(print_options.split(',')) - elif verbose: - switches = set(PRINT_EVERYTHING.split(',')) - else: - switches = set(PRINT_DEFAULT.split(',')) - - if 'nothing' in switches: - return set() - - if (child_processes != 1 and not is_fully_parallel and - 'detailed-progress' in switches): - _log.warn("Can only print 'detailed-progress' if running " - "with --child-processes=1 or " - "with --experimental-fully-parallel. " - "Using 'one-line-progress' instead.") - switches.discard('detailed-progress') - switches.add('one-line-progress') - - if 'everything' in switches: - switches.discard('everything') - switches.update(set(PRINT_EVERYTHING.split(','))) - - if 'default' in switches: - switches.discard('default') - switches.update(set(PRINT_DEFAULT.split(','))) - - if 'detailed-progress' in switches: - switches.discard('one-line-progress') - - if 'trace-everything' in switches: - switches.discard('detailed-progress') - switches.discard('one-line-progress') - switches.discard('trace-unexpected') - switches.discard('unexpected') - - if 'trace-unexpected' in switches: - switches.discard('unexpected') - - return switches - - -def _configure_logging(stream, verbose): - log_fmt = '%(message)s' - log_datefmt = '%y%m%d %H:%M:%S' - log_level = logging.INFO - if verbose: - log_fmt = ('%(asctime)s %(process)d %(filename)s:%(lineno)d ' - '%(levelname)s %(message)s') - log_level = logging.DEBUG - - root = logging.getLogger() - handler = logging.StreamHandler(stream) - handler.setFormatter(logging.Formatter(log_fmt, None)) - root.addHandler(handler) - root.setLevel(log_level) - return handler - - -def _restore_logging(handler_to_remove): - root = logging.getLogger() - root.handlers.remove(handler_to_remove) - - -class Printer(object): - """Class handling all non-debug-logging printing done by run-webkit-tests. - - Printing from run-webkit-tests falls into two buckets: general or - regular output that is read only by humans and can be changed at any - time, and output that is parsed by buildbots (and humans) and hence - must be changed more carefully and in coordination with the buildbot - parsing code (in chromium.org's buildbot/master.chromium/scripts/master/ - log_parser/webkit_test_command.py script). - - By default the buildbot-parsed code gets logged to stdout, and regular - output gets logged to stderr.""" - def __init__(self, port, options, regular_output, buildbot_output, - child_processes, is_fully_parallel): - """ - Args - port interface to port-specific routines - options OptionParser object with command line settings - regular_output stream to which output intended only for humans - should be written - buildbot_output stream to which output intended to be read by - the buildbots (and humans) should be written - child_processes number of parallel threads running (usually - controlled by --child-processes) - is_fully_parallel are the tests running in a single queue, or - in shards (usually controlled by - --experimental-fully-parallel) - - Note that the last two args are separate rather than bundled into - the options structure so that this object does not assume any flags - set in options that weren't returned from logging_options(), above. - The two are used to determine whether or not we can sensibly use - the 'detailed-progress' option, or can only use 'one-line-progress'. - """ - self._buildbot_stream = buildbot_output - self._options = options - self._port = port - self._stream = regular_output - - # These are used for --print detailed-progress to track status by - # directory. - self._current_dir = None - self._current_progress_str = "" - self._current_test_number = 0 - - self._meter = metered_stream.MeteredStream(options.verbose, - regular_output) - self._logging_handler = _configure_logging(self._meter, - options.verbose) - - self.switches = parse_print_options(options.print_options, - options.verbose, child_processes, is_fully_parallel) - - def cleanup(self): - """Restore logging configuration to its initial settings.""" - if self._logging_handler: - _restore_logging(self._logging_handler) - self._logging_handler = None - - def __del__(self): - self.cleanup() - - # These two routines just hide the implementation of the switches. - def disabled(self, option): - return not option in self.switches - - def enabled(self, option): - return option in self.switches - - def help_printing(self): - self._write(HELP_PRINTING) - - def print_actual(self, msg): - if self.disabled('actual'): - return - self._buildbot_stream.write("%s\n" % msg) - - def print_config(self, msg): - self.write(msg, 'config') - - def print_expected(self, msg): - self.write(msg, 'expected') - - def print_timing(self, msg): - self.write(msg, 'timing') - - def print_one_line_summary(self, total, expected, unexpected): - """Print a one-line summary of the test run to stdout. - - Args: - total: total number of tests run - expected: number of expected results - unexpected: number of unexpected results - """ - if self.disabled('one-line-summary'): - return - - incomplete = total - expected - unexpected - if incomplete: - self._write("") - incomplete_str = " (%d didn't run)" % incomplete - expected_str = str(expected) - else: - incomplete_str = "" - expected_str = "All %d" % expected - - if unexpected == 0: - self._write("%s tests ran as expected%s." % - (expected_str, incomplete_str)) - elif expected == 1: - self._write("1 test ran as expected, %d didn't%s:" % - (unexpected, incomplete_str)) - else: - self._write("%d tests ran as expected, %d didn't%s:" % - (expected, unexpected, incomplete_str)) - self._write("") - - def print_test_result(self, result, expected, exp_str, got_str): - """Print the result of the test as determined by --print. - - This routine is used to print the details of each test as it completes. - - Args: - result - The actual TestResult object - expected - Whether the result we got was an expected result - exp_str - What we expected to get (used for tracing) - got_str - What we actually got (used for tracing) - - Note that we need all of these arguments even though they seem - somewhat redundant, in order to keep this routine from having to - known anything about the set of expectations. - """ - if (self.enabled('trace-everything') or - self.enabled('trace-unexpected') and not expected): - self._print_test_trace(result, exp_str, got_str) - elif (not expected and self.enabled('unexpected') and - self.disabled('detailed-progress')): - # Note: 'detailed-progress' handles unexpected results internally, - # so we skip it here. - self._print_unexpected_test_result(result) - - def _print_test_trace(self, result, exp_str, got_str): - """Print detailed results of a test (triggered by --print trace-*). - For each test, print: - - location of the expected baselines - - expected results - - actual result - - timing info - """ - filename = result.filename - test_name = self._port.relative_test_filename(filename) - self._write('trace: %s' % test_name) - txt_file = self._port.expected_filename(filename, '.txt') - if self._port.path_exists(txt_file): - self._write(' txt: %s' % - self._port.relative_test_filename(txt_file)) - else: - self._write(' txt: <none>') - checksum_file = self._port.expected_filename(filename, '.checksum') - if self._port.path_exists(checksum_file): - self._write(' sum: %s' % - self._port.relative_test_filename(checksum_file)) - else: - self._write(' sum: <none>') - png_file = self._port.expected_filename(filename, '.png') - if self._port.path_exists(png_file): - self._write(' png: %s' % - self._port.relative_test_filename(png_file)) - else: - self._write(' png: <none>') - self._write(' exp: %s' % exp_str) - self._write(' got: %s' % got_str) - self._write(' took: %-.3f' % result.test_run_time) - self._write('') - - def _print_unexpected_test_result(self, result): - """Prints one unexpected test result line.""" - desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0] - self.write(" %s -> unexpected %s" % - (self._port.relative_test_filename(result.filename), - desc), "unexpected") - - def print_progress(self, result_summary, retrying, test_list): - """Print progress through the tests as determined by --print.""" - if self.enabled('detailed-progress'): - self._print_detailed_progress(result_summary, test_list) - elif self.enabled('one-line-progress'): - self._print_one_line_progress(result_summary, retrying) - else: - return - - if result_summary.remaining == 0: - self._meter.update('') - - def _print_one_line_progress(self, result_summary, retrying): - """Displays the progress through the test run.""" - percent_complete = 100 * (result_summary.expected + - result_summary.unexpected) / result_summary.total - action = "Testing" - if retrying: - action = "Retrying" - self._meter.progress("%s (%d%%): %d ran as expected, %d didn't," - " %d left" % (action, percent_complete, result_summary.expected, - result_summary.unexpected, result_summary.remaining)) - - def _print_detailed_progress(self, result_summary, test_list): - """Display detailed progress output where we print the directory name - and one dot for each completed test. This is triggered by - "--log detailed-progress".""" - if self._current_test_number == len(test_list): - return - - next_test = test_list[self._current_test_number] - next_dir = os.path.dirname( - self._port.relative_test_filename(next_test)) - if self._current_progress_str == "": - self._current_progress_str = "%s: " % (next_dir) - self._current_dir = next_dir - - while next_test in result_summary.results: - if next_dir != self._current_dir: - self._meter.write("%s\n" % (self._current_progress_str)) - self._current_progress_str = "%s: ." % (next_dir) - self._current_dir = next_dir - else: - self._current_progress_str += "." - - if (next_test in result_summary.unexpected_results and - self.enabled('unexpected')): - self._meter.write("%s\n" % self._current_progress_str) - test_result = result_summary.results[next_test] - self._print_unexpected_test_result(test_result) - self._current_progress_str = "%s: " % self._current_dir - - self._current_test_number += 1 - if self._current_test_number == len(test_list): - break - - next_test = test_list[self._current_test_number] - next_dir = os.path.dirname( - self._port.relative_test_filename(next_test)) - - if result_summary.remaining: - remain_str = " (%d)" % (result_summary.remaining) - self._meter.progress("%s%s" % (self._current_progress_str, - remain_str)) - else: - self._meter.progress("%s" % (self._current_progress_str)) - - def print_unexpected_results(self, unexpected_results): - """Prints a list of the unexpected results to the buildbot stream.""" - if self.disabled('unexpected-results'): - return - - passes = {} - flaky = {} - regressions = {} - - for test, results in unexpected_results['tests'].iteritems(): - actual = results['actual'].split(" ") - expected = results['expected'].split(" ") - if actual == ['PASS']: - if 'CRASH' in expected: - _add_to_dict_of_lists(passes, - 'Expected to crash, but passed', - test) - elif 'TIMEOUT' in expected: - _add_to_dict_of_lists(passes, - 'Expected to timeout, but passed', - test) - else: - _add_to_dict_of_lists(passes, - 'Expected to fail, but passed', - test) - elif len(actual) > 1: - # We group flaky tests by the first actual result we got. - _add_to_dict_of_lists(flaky, actual[0], test) - else: - _add_to_dict_of_lists(regressions, results['actual'], test) - - if len(passes) or len(flaky) or len(regressions): - self._buildbot_stream.write("\n") - - if len(passes): - for key, tests in passes.iteritems(): - self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests))) - tests.sort() - for test in tests: - self._buildbot_stream.write(" %s\n" % test) - self._buildbot_stream.write("\n") - self._buildbot_stream.write("\n") - - if len(flaky): - descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS - for key, tests in flaky.iteritems(): - result = TestExpectationsFile.EXPECTATIONS[key.lower()] - self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n" - % (descriptions[result][1], len(tests))) - tests.sort() - - for test in tests: - result = unexpected_results['tests'][test] - actual = result['actual'].split(" ") - expected = result['expected'].split(" ") - result = TestExpectationsFile.EXPECTATIONS[key.lower()] - new_expectations_list = list(set(actual) | set(expected)) - self._buildbot_stream.write(" %s = %s\n" % - (test, " ".join(new_expectations_list))) - self._buildbot_stream.write("\n") - self._buildbot_stream.write("\n") - - if len(regressions): - descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS - for key, tests in regressions.iteritems(): - result = TestExpectationsFile.EXPECTATIONS[key.lower()] - self._buildbot_stream.write( - "Regressions: Unexpected %s : (%d)\n" % ( - descriptions[result][1], len(tests))) - tests.sort() - for test in tests: - self._buildbot_stream.write(" %s = %s\n" % (test, key)) - self._buildbot_stream.write("\n") - self._buildbot_stream.write("\n") - - if len(unexpected_results['tests']) and self._options.verbose: - self._buildbot_stream.write("%s\n" % ("-" * 78)) - - def print_update(self, msg): - if self.disabled('updates'): - return - self._meter.update(msg) - - def write(self, msg, option="misc"): - if self.disabled(option): - return - self._write(msg) - - def _write(self, msg): - # FIXME: we could probably get away with calling _log.info() all of - # the time, but there doesn't seem to be a good way to test the output - # from the logger :(. - if self._options.verbose: - _log.info(msg) - else: - self._meter.write("%s\n" % msg) - -# -# Utility routines used by the Controller class -# - - -def _add_to_dict_of_lists(dict, key, value): - dict.setdefault(key, []).append(value) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py deleted file mode 100644 index 27a6a29..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py +++ /dev/null @@ -1,606 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Unit tests for printing.py.""" - -import os -import optparse -import pdb -import sys -import unittest -import logging - -from webkitpy.common import array_stream -from webkitpy.common.system import logtesting -from webkitpy.layout_tests import port -from webkitpy.layout_tests.layout_package import printing -from webkitpy.layout_tests.layout_package import test_results -from webkitpy.layout_tests.layout_package import test_expectations -from webkitpy.layout_tests.layout_package import test_failures -from webkitpy.layout_tests import run_webkit_tests - - -def get_options(args): - print_options = printing.print_options() - option_parser = optparse.OptionParser(option_list=print_options) - return option_parser.parse_args(args) - - -class TestUtilityFunctions(unittest.TestCase): - def test_configure_logging(self): - options, args = get_options([]) - stream = array_stream.ArrayStream() - handler = printing._configure_logging(stream, options.verbose) - logging.info("this should be logged") - self.assertFalse(stream.empty()) - - stream.reset() - logging.debug("this should not be logged") - self.assertTrue(stream.empty()) - - printing._restore_logging(handler) - - stream.reset() - options, args = get_options(['--verbose']) - handler = printing._configure_logging(stream, options.verbose) - logging.debug("this should be logged") - self.assertFalse(stream.empty()) - printing._restore_logging(handler) - - def test_print_options(self): - options, args = get_options([]) - self.assertTrue(options is not None) - - def test_parse_print_options(self): - def test_switches(args, expected_switches_str, - verbose=False, child_processes=1, - is_fully_parallel=False): - options, args = get_options(args) - if expected_switches_str: - expected_switches = set(expected_switches_str.split(',')) - else: - expected_switches = set() - switches = printing.parse_print_options(options.print_options, - verbose, - child_processes, - is_fully_parallel) - self.assertEqual(expected_switches, switches) - - # test that we default to the default set of switches - test_switches([], printing.PRINT_DEFAULT) - - # test that verbose defaults to everything - test_switches([], printing.PRINT_EVERYTHING, verbose=True) - - # test that --print default does what it's supposed to - test_switches(['--print', 'default'], printing.PRINT_DEFAULT) - - # test that --print nothing does what it's supposed to - test_switches(['--print', 'nothing'], None) - - # test that --print everything does what it's supposed to - test_switches(['--print', 'everything'], printing.PRINT_EVERYTHING) - - # this tests that '--print X' overrides '--verbose' - test_switches(['--print', 'actual'], 'actual', verbose=True) - - - -class Testprinter(unittest.TestCase): - def get_printer(self, args=None, single_threaded=False, - is_fully_parallel=False): - printing_options = printing.print_options() - option_parser = optparse.OptionParser(option_list=printing_options) - options, args = option_parser.parse_args(args) - self._port = port.get('test', options) - nproc = 2 - if single_threaded: - nproc = 1 - - regular_output = array_stream.ArrayStream() - buildbot_output = array_stream.ArrayStream() - printer = printing.Printer(self._port, options, regular_output, - buildbot_output, single_threaded, - is_fully_parallel) - return printer, regular_output, buildbot_output - - def get_result(self, test, result_type=test_expectations.PASS, run_time=0): - failures = [] - if result_type == test_expectations.TIMEOUT: - failures = [test_failures.FailureTimeout()] - elif result_type == test_expectations.CRASH: - failures = [test_failures.FailureCrash()] - path = os.path.join(self._port.layout_tests_dir(), test) - return test_results.TestResult(path, failures, run_time, - total_time_for_all_diffs=0, - time_for_diffs=0) - - def get_result_summary(self, tests, expectations_str): - test_paths = [os.path.join(self._port.layout_tests_dir(), test) for - test in tests] - expectations = test_expectations.TestExpectations( - self._port, test_paths, expectations_str, - self._port.test_platform_name(), is_debug_mode=False, - is_lint_mode=False) - - rs = run_webkit_tests.ResultSummary(expectations, test_paths) - return test_paths, rs, expectations - - def test_help_printer(self): - # Here and below we'll call the "regular" printer err and the - # buildbot printer out; this corresponds to how things run on the - # bots with stderr and stdout. - printer, err, out = self.get_printer() - - # This routine should print something to stdout. testing what it is - # is kind of pointless. - printer.help_printing() - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - def do_switch_tests(self, method_name, switch, to_buildbot, - message='hello', exp_err=None, exp_bot=None): - def do_helper(method_name, switch, message, exp_err, exp_bot): - printer, err, bot = self.get_printer(['--print', switch]) - getattr(printer, method_name)(message) - self.assertEqual(err.get(), exp_err) - self.assertEqual(bot.get(), exp_bot) - - if to_buildbot: - if exp_err is None: - exp_err = [] - if exp_bot is None: - exp_bot = [message + "\n"] - else: - if exp_err is None: - exp_err = [message + "\n"] - if exp_bot is None: - exp_bot = [] - do_helper(method_name, 'nothing', 'hello', [], []) - do_helper(method_name, switch, 'hello', exp_err, exp_bot) - do_helper(method_name, 'everything', 'hello', exp_err, exp_bot) - - def test_configure_and_cleanup(self): - # This test verifies that calling cleanup repeatedly and deleting - # the object is safe. - printer, err, out = self.get_printer(['--print', 'everything']) - printer.cleanup() - printer.cleanup() - printer = None - - def test_print_actual(self): - # Actual results need to be logged to the buildbot's stream. - self.do_switch_tests('print_actual', 'actual', to_buildbot=True) - - def test_print_actual_buildbot(self): - # FIXME: Test that the format of the actual results matches what the - # buildbot is expecting. - pass - - def test_print_config(self): - self.do_switch_tests('print_config', 'config', to_buildbot=False) - - def test_print_expected(self): - self.do_switch_tests('print_expected', 'expected', to_buildbot=False) - - def test_print_timing(self): - self.do_switch_tests('print_timing', 'timing', to_buildbot=False) - - def test_print_update(self): - # Note that there shouldn't be a carriage return here; updates() - # are meant to be overwritten. - self.do_switch_tests('print_update', 'updates', to_buildbot=False, - message='hello', exp_err=['hello']) - - def test_print_one_line_summary(self): - printer, err, out = self.get_printer(['--print', 'nothing']) - printer.print_one_line_summary(1, 1, 0) - self.assertTrue(err.empty()) - - printer, err, out = self.get_printer(['--print', 'one-line-summary']) - printer.print_one_line_summary(1, 1, 0) - self.assertEquals(err.get(), ["All 1 tests ran as expected.\n", "\n"]) - - printer, err, out = self.get_printer(['--print', 'everything']) - printer.print_one_line_summary(1, 1, 0) - self.assertEquals(err.get(), ["All 1 tests ran as expected.\n", "\n"]) - - err.reset() - printer.print_one_line_summary(2, 1, 1) - self.assertEquals(err.get(), - ["1 test ran as expected, 1 didn't:\n", "\n"]) - - err.reset() - printer.print_one_line_summary(3, 2, 1) - self.assertEquals(err.get(), - ["2 tests ran as expected, 1 didn't:\n", "\n"]) - - err.reset() - printer.print_one_line_summary(3, 2, 0) - self.assertEquals(err.get(), - ['\n', "2 tests ran as expected (1 didn't run).\n", - '\n']) - - - def test_print_test_result(self): - # Note here that we don't use meaningful exp_str and got_str values; - # the actual contents of the string are treated opaquely by - # print_test_result() when tracing, and usually we don't want - # to test what exactly is printed, just that something - # was printed (or that nothing was printed). - # - # FIXME: this is actually some goofy layering; it would be nice - # we could refactor it so that the args weren't redundant. Maybe - # the TestResult should contain what was expected, and the - # strings could be derived from the TestResult? - printer, err, out = self.get_printer(['--print', 'nothing']) - result = self.get_result('passes/image.html') - printer.print_test_result(result, expected=False, exp_str='', - got_str='') - self.assertTrue(err.empty()) - - printer, err, out = self.get_printer(['--print', 'unexpected']) - printer.print_test_result(result, expected=True, exp_str='', - got_str='') - self.assertTrue(err.empty()) - printer.print_test_result(result, expected=False, exp_str='', - got_str='') - self.assertEquals(err.get(), - [' passes/image.html -> unexpected pass\n']) - - printer, err, out = self.get_printer(['--print', 'everything']) - printer.print_test_result(result, expected=True, exp_str='', - got_str='') - self.assertTrue(err.empty()) - - printer.print_test_result(result, expected=False, exp_str='', - got_str='') - self.assertEquals(err.get(), - [' passes/image.html -> unexpected pass\n']) - - printer, err, out = self.get_printer(['--print', 'nothing']) - printer.print_test_result(result, expected=False, exp_str='', - got_str='') - self.assertTrue(err.empty()) - - printer, err, out = self.get_printer(['--print', - 'trace-unexpected']) - printer.print_test_result(result, expected=True, exp_str='', - got_str='') - self.assertTrue(err.empty()) - - printer, err, out = self.get_printer(['--print', - 'trace-unexpected']) - printer.print_test_result(result, expected=False, exp_str='', - got_str='') - self.assertFalse(err.empty()) - - printer, err, out = self.get_printer(['--print', - 'trace-unexpected']) - result = self.get_result("passes/text.html") - printer.print_test_result(result, expected=False, exp_str='', - got_str='') - self.assertFalse(err.empty()) - - err.reset() - printer.print_test_result(result, expected=False, exp_str='', - got_str='') - self.assertFalse(err.empty()) - - printer, err, out = self.get_printer(['--print', 'trace-everything']) - result = self.get_result('passes/image.html') - printer.print_test_result(result, expected=True, exp_str='', - got_str='') - result = self.get_result('failures/expected/missing_text.html') - printer.print_test_result(result, expected=True, exp_str='', - got_str='') - result = self.get_result('failures/expected/missing_check.html') - printer.print_test_result(result, expected=True, exp_str='', - got_str='') - result = self.get_result('failures/expected/missing_image.html') - printer.print_test_result(result, expected=True, exp_str='', - got_str='') - self.assertFalse(err.empty()) - - err.reset() - printer.print_test_result(result, expected=False, exp_str='', - got_str='') - - def test_print_progress(self): - expectations = '' - - # test that we print nothing - printer, err, out = self.get_printer(['--print', 'nothing']) - tests = ['passes/text.html', 'failures/expected/timeout.html', - 'failures/expected/crash.html'] - paths, rs, exp = self.get_result_summary(tests, expectations) - - printer.print_progress(rs, False, paths) - self.assertTrue(out.empty()) - self.assertTrue(err.empty()) - - printer.print_progress(rs, True, paths) - self.assertTrue(out.empty()) - self.assertTrue(err.empty()) - - # test regular functionality - printer, err, out = self.get_printer(['--print', - 'one-line-progress']) - printer.print_progress(rs, False, paths) - self.assertTrue(out.empty()) - self.assertFalse(err.empty()) - - err.reset() - out.reset() - printer.print_progress(rs, True, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - def test_print_progress__detailed(self): - tests = ['passes/text.html', 'failures/expected/timeout.html', - 'failures/expected/crash.html'] - expectations = 'failures/expected/timeout.html = TIMEOUT' - - # first, test that it is disabled properly - # should still print one-line-progress - printer, err, out = self.get_printer( - ['--print', 'detailed-progress'], single_threaded=False) - paths, rs, exp = self.get_result_summary(tests, expectations) - printer.print_progress(rs, False, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - # now test the enabled paths - printer, err, out = self.get_printer( - ['--print', 'detailed-progress'], single_threaded=True) - paths, rs, exp = self.get_result_summary(tests, expectations) - printer.print_progress(rs, False, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - err.reset() - out.reset() - printer.print_progress(rs, True, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), False) - rs.add(self.get_result('failures/expected/timeout.html'), True) - rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), True) - err.reset() - out.reset() - printer.print_progress(rs, False, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - # We only clear the meter when retrying w/ detailed-progress. - err.reset() - out.reset() - printer.print_progress(rs, True, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - printer, err, out = self.get_printer( - ['--print', 'detailed-progress,unexpected'], single_threaded=True) - paths, rs, exp = self.get_result_summary(tests, expectations) - printer.print_progress(rs, False, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - err.reset() - out.reset() - printer.print_progress(rs, True, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), False) - rs.add(self.get_result('failures/expected/timeout.html'), True) - rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), True) - err.reset() - out.reset() - printer.print_progress(rs, False, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - # We only clear the meter when retrying w/ detailed-progress. - err.reset() - out.reset() - printer.print_progress(rs, True, paths) - self.assertFalse(err.empty()) - self.assertTrue(out.empty()) - - def test_write_nothing(self): - printer, err, out = self.get_printer(['--print', 'nothing']) - printer.write("foo") - self.assertTrue(err.empty()) - - def test_write_misc(self): - printer, err, out = self.get_printer(['--print', 'misc']) - printer.write("foo") - self.assertFalse(err.empty()) - err.reset() - printer.write("foo", "config") - self.assertTrue(err.empty()) - - def test_write_everything(self): - printer, err, out = self.get_printer(['--print', 'everything']) - printer.write("foo") - self.assertFalse(err.empty()) - err.reset() - printer.write("foo", "config") - self.assertFalse(err.empty()) - - def test_write_verbose(self): - printer, err, out = self.get_printer(['--verbose']) - printer.write("foo") - self.assertTrue(not err.empty() and "foo" in err.get()[0]) - self.assertTrue(out.empty()) - - def test_print_unexpected_results(self): - # This routine is the only one that prints stuff that the bots - # care about. - # - # FIXME: there's some weird layering going on here. It seems - # like we shouldn't be both using an expectations string and - # having to specify whether or not the result was expected. - # This whole set of tests should probably be rewritten. - # - # FIXME: Plus, the fact that we're having to call into - # run_webkit_tests is clearly a layering inversion. - def get_unexpected_results(expected, passing, flaky): - """Return an unexpected results summary matching the input description. - - There are a lot of different combinations of test results that - can be tested; this routine produces various combinations based - on the values of the input flags. - - Args - expected: whether the tests ran as expected - passing: whether the tests should all pass - flaky: whether the tests should be flaky (if False, they - produce the same results on both runs; if True, they - all pass on the second run). - - """ - paths, rs, exp = self.get_result_summary(tests, expectations) - if expected: - rs.add(self.get_result('passes/text.html', test_expectations.PASS), - expected) - rs.add(self.get_result('failures/expected/timeout.html', - test_expectations.TIMEOUT), expected) - rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), - expected) - elif passing: - rs.add(self.get_result('passes/text.html'), expected) - rs.add(self.get_result('failures/expected/timeout.html'), expected) - rs.add(self.get_result('failures/expected/crash.html'), expected) - else: - rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), - expected) - rs.add(self.get_result('failures/expected/timeout.html', - test_expectations.CRASH), expected) - rs.add(self.get_result('failures/expected/crash.html', - test_expectations.TIMEOUT), - expected) - retry = rs - if flaky: - paths, retry, exp = self.get_result_summary(tests, - expectations) - retry.add(self.get_result('passes/text.html'), True) - retry.add(self.get_result('failures/expected/timeout.html'), True) - retry.add(self.get_result('failures/expected/crash.html'), True) - unexpected_results = run_webkit_tests.summarize_unexpected_results( - self._port, exp, rs, retry) - return unexpected_results - - tests = ['passes/text.html', 'failures/expected/timeout.html', - 'failures/expected/crash.html'] - expectations = '' - - printer, err, out = self.get_printer(['--print', 'nothing']) - ur = get_unexpected_results(expected=False, passing=False, flaky=False) - printer.print_unexpected_results(ur) - self.assertTrue(err.empty()) - self.assertTrue(out.empty()) - - printer, err, out = self.get_printer(['--print', - 'unexpected-results']) - - # test everything running as expected - ur = get_unexpected_results(expected=True, passing=False, flaky=False) - printer.print_unexpected_results(ur) - self.assertTrue(err.empty()) - self.assertTrue(out.empty()) - - # test failures - err.reset() - out.reset() - ur = get_unexpected_results(expected=False, passing=False, flaky=False) - printer.print_unexpected_results(ur) - self.assertTrue(err.empty()) - self.assertFalse(out.empty()) - - # test unexpected flaky results - err.reset() - out.reset() - ur = get_unexpected_results(expected=False, passing=True, flaky=False) - printer.print_unexpected_results(ur) - self.assertTrue(err.empty()) - self.assertFalse(out.empty()) - - # test unexpected passes - err.reset() - out.reset() - ur = get_unexpected_results(expected=False, passing=False, flaky=True) - printer.print_unexpected_results(ur) - self.assertTrue(err.empty()) - self.assertFalse(out.empty()) - - err.reset() - out.reset() - printer, err, out = self.get_printer(['--print', 'everything']) - ur = get_unexpected_results(expected=False, passing=False, flaky=False) - printer.print_unexpected_results(ur) - self.assertTrue(err.empty()) - self.assertFalse(out.empty()) - - expectations = """ -failures/expected/crash.html = CRASH -failures/expected/timeout.html = TIMEOUT -""" - err.reset() - out.reset() - ur = get_unexpected_results(expected=False, passing=False, flaky=False) - printer.print_unexpected_results(ur) - self.assertTrue(err.empty()) - self.assertFalse(out.empty()) - - err.reset() - out.reset() - ur = get_unexpected_results(expected=False, passing=True, flaky=False) - printer.print_unexpected_results(ur) - self.assertTrue(err.empty()) - self.assertFalse(out.empty()) - - # Test handling of --verbose as well. - err.reset() - out.reset() - printer, err, out = self.get_printer(['--verbose']) - ur = get_unexpected_results(expected=False, passing=False, flaky=False) - printer.print_unexpected_results(ur) - self.assertTrue(err.empty()) - self.assertFalse(out.empty()) - - def test_print_unexpected_results_buildbot(self): - # FIXME: Test that print_unexpected_results() produces the printer the - # buildbot is expecting. - pass - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py deleted file mode 100644 index 67873a8..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py +++ /dev/null @@ -1,843 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""A helper class for reading in and dealing with tests expectations -for layout tests. -""" - -import logging -import os -import re -import sys - -import webkitpy.thirdparty.simplejson as simplejson - -_log = logging.getLogger("webkitpy.layout_tests.layout_package." - "test_expectations") - -# Test expectation and modifier constants. -(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX, - SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(15) - -# Test expectation file update action constants -(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4) - - -def result_was_expected(result, expected_results, test_needs_rebaselining, - test_is_skipped): - """Returns whether we got a result we were expecting. - Args: - result: actual result of a test execution - expected_results: set of results listed in test_expectations - test_needs_rebaselining: whether test was marked as REBASELINE - test_is_skipped: whether test was marked as SKIP""" - if result in expected_results: - return True - if result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and FAIL in expected_results: - return True - if result == MISSING and test_needs_rebaselining: - return True - if result == SKIP and test_is_skipped: - return True - return False - - -def remove_pixel_failures(expected_results): - """Returns a copy of the expected results for a test, except that we - drop any pixel failures and return the remaining expectations. For example, - if we're not running pixel tests, then tests expected to fail as IMAGE - will PASS.""" - expected_results = expected_results.copy() - if IMAGE in expected_results: - expected_results.remove(IMAGE) - expected_results.add(PASS) - if IMAGE_PLUS_TEXT in expected_results: - expected_results.remove(IMAGE_PLUS_TEXT) - expected_results.add(TEXT) - return expected_results - - -class TestExpectations: - TEST_LIST = "test_expectations.txt" - - def __init__(self, port, tests, expectations, test_platform_name, - is_debug_mode, is_lint_mode, overrides=None): - """Loads and parses the test expectations given in the string. - Args: - port: handle to object containing platform-specific functionality - test: list of all of the test files - expectations: test expectations as a string - test_platform_name: name of the platform to match expectations - against. Note that this may be different than - port.test_platform_name() when is_lint_mode is True. - is_debug_mode: whether to use the DEBUG or RELEASE modifiers - in the expectations - is_lint_mode: If True, just parse the expectations string - looking for errors. - overrides: test expectations that are allowed to override any - entries in |expectations|. This is used by callers - that need to manage two sets of expectations (e.g., upstream - and downstream expectations). - """ - self._expected_failures = TestExpectationsFile(port, expectations, - tests, test_platform_name, is_debug_mode, is_lint_mode, - overrides=overrides) - - # TODO(ojan): Allow for removing skipped tests when getting the list of - # tests to run, but not when getting metrics. - # TODO(ojan): Replace the Get* calls here with the more sane API exposed - # by TestExpectationsFile below. Maybe merge the two classes entirely? - - def get_expectations_json_for_all_platforms(self): - return ( - self._expected_failures.get_expectations_json_for_all_platforms()) - - def get_rebaselining_failures(self): - return (self._expected_failures.get_test_set(REBASELINE, FAIL) | - self._expected_failures.get_test_set(REBASELINE, IMAGE) | - self._expected_failures.get_test_set(REBASELINE, TEXT) | - self._expected_failures.get_test_set(REBASELINE, - IMAGE_PLUS_TEXT)) - - def get_options(self, test): - return self._expected_failures.get_options(test) - - def get_expectations(self, test): - return self._expected_failures.get_expectations(test) - - def get_expectations_string(self, test): - """Returns the expectatons for the given test as an uppercase string. - If there are no expectations for the test, then "PASS" is returned.""" - expectations = self.get_expectations(test) - retval = [] - - for expectation in expectations: - retval.append(self.expectation_to_string(expectation)) - - return " ".join(retval) - - def expectation_to_string(self, expectation): - """Return the uppercased string equivalent of a given expectation.""" - for item in TestExpectationsFile.EXPECTATIONS.items(): - if item[1] == expectation: - return item[0].upper() - raise ValueError(expectation) - - def get_tests_with_result_type(self, result_type): - return self._expected_failures.get_tests_with_result_type(result_type) - - def get_tests_with_timeline(self, timeline): - return self._expected_failures.get_tests_with_timeline(timeline) - - def matches_an_expected_result(self, test, result, - pixel_tests_are_enabled): - expected_results = self._expected_failures.get_expectations(test) - if not pixel_tests_are_enabled: - expected_results = remove_pixel_failures(expected_results) - return result_was_expected(result, expected_results, - self.is_rebaselining(test), self.has_modifier(test, SKIP)) - - def is_rebaselining(self, test): - return self._expected_failures.has_modifier(test, REBASELINE) - - def has_modifier(self, test, modifier): - return self._expected_failures.has_modifier(test, modifier) - - def remove_platform_from_expectations(self, tests, platform): - return self._expected_failures.remove_platform_from_expectations( - tests, platform) - - -def strip_comments(line): - """Strips comments from a line and return None if the line is empty - or else the contents of line with leading and trailing spaces removed - and all other whitespace collapsed""" - - commentIndex = line.find('//') - if commentIndex is -1: - commentIndex = len(line) - - line = re.sub(r'\s+', ' ', line[:commentIndex].strip()) - if line == '': - return None - else: - return line - - -class ModifiersAndExpectations: - """A holder for modifiers and expectations on a test that serializes to - JSON.""" - - def __init__(self, modifiers, expectations): - self.modifiers = modifiers - self.expectations = expectations - - -class ExpectationsJsonEncoder(simplejson.JSONEncoder): - """JSON encoder that can handle ModifiersAndExpectations objects.""" - def default(self, obj): - # A ModifiersAndExpectations object has two fields, each of which - # is a dict. Since JSONEncoders handle all the builtin types directly, - # the only time this routine should be called is on the top level - # object (i.e., the encoder shouldn't recurse). - assert isinstance(obj, ModifiersAndExpectations) - return {"modifiers": obj.modifiers, - "expectations": obj.expectations} - - -class TestExpectationsFile: - """Test expectation files consist of lines with specifications of what - to expect from layout test cases. The test cases can be directories - in which case the expectations apply to all test cases in that - directory and any subdirectory. The format of the file is along the - lines of: - - LayoutTests/fast/js/fixme.js = FAIL - LayoutTests/fast/js/flaky.js = FAIL PASS - LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS - ... - - To add other options: - SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS - - SKIP: Doesn't run the test. - SLOW: The test takes a long time to run, but does not timeout indefinitely. - WONTFIX: For tests that we never intend to pass on a given platform. - DEBUG: Expectations apply only to the debug build. - RELEASE: Expectations apply only to release build. - LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these - platforms. - - Notes: - -A test cannot be both SLOW and TIMEOUT - -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is - a migratory state that currently means either IMAGE, TEXT, or - IMAGE+TEXT. Once we have finished migrating the expectations, we will - change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT - identifier. - -A test can be included twice, but not via the same path. - -If a test is included twice, then the more precise path wins. - -CRASH tests cannot be WONTFIX - """ - - EXPECTATIONS = {'pass': PASS, - 'fail': FAIL, - 'text': TEXT, - 'image': IMAGE, - 'image+text': IMAGE_PLUS_TEXT, - 'timeout': TIMEOUT, - 'crash': CRASH, - 'missing': MISSING} - - EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'), - PASS: ('pass', 'passes'), - FAIL: ('failure', 'failures'), - TEXT: ('text diff mismatch', - 'text diff mismatch'), - IMAGE: ('image mismatch', 'image mismatch'), - IMAGE_PLUS_TEXT: ('image and text mismatch', - 'image and text mismatch'), - CRASH: ('DumpRenderTree crash', - 'DumpRenderTree crashes'), - TIMEOUT: ('test timed out', 'tests timed out'), - MISSING: ('no expected result found', - 'no expected results found')} - - EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT, - TEXT, IMAGE, FAIL, SKIP) - - BUILD_TYPES = ('debug', 'release') - - MODIFIERS = {'skip': SKIP, - 'wontfix': WONTFIX, - 'slow': SLOW, - 'rebaseline': REBASELINE, - 'none': NONE} - - TIMELINES = {'wontfix': WONTFIX, - 'now': NOW} - - RESULT_TYPES = {'skip': SKIP, - 'pass': PASS, - 'fail': FAIL, - 'flaky': FLAKY} - - def __init__(self, port, expectations, full_test_list, test_platform_name, - is_debug_mode, is_lint_mode, suppress_errors=False, overrides=None): - """ - expectations: Contents of the expectations file - full_test_list: The list of all tests to be run pending processing of - the expections for those tests. - test_platform_name: name of the platform to match expectations - against. Note that this may be different than - port.test_platform_name() when is_lint_mode is True. - is_debug_mode: Whether we testing a test_shell built debug mode. - is_lint_mode: Whether this is just linting test_expecatations.txt. - suppress_errors: Whether to suppress lint errors. - overrides: test expectations that are allowed to override any - entries in |expectations|. This is used by callers - that need to manage two sets of expectations (e.g., upstream - and downstream expectations). - """ - - self._port = port - self._expectations = expectations - self._full_test_list = full_test_list - self._test_platform_name = test_platform_name - self._is_debug_mode = is_debug_mode - self._is_lint_mode = is_lint_mode - self._overrides = overrides - self._suppress_errors = suppress_errors - self._errors = [] - self._non_fatal_errors = [] - - # Maps relative test paths as listed in the expectations file to a - # list of maps containing modifiers and expectations for each time - # the test is listed in the expectations file. - self._all_expectations = {} - - # Maps a test to its list of expectations. - self._test_to_expectations = {} - - # Maps a test to its list of options (string values) - self._test_to_options = {} - - # Maps a test to its list of modifiers: the constants associated with - # the options minus any bug or platform strings - self._test_to_modifiers = {} - - # Maps a test to the base path that it was listed with in the list. - self._test_list_paths = {} - - self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS) - self._expectation_to_tests = self._dict_of_sets(self.EXPECTATIONS) - self._timeline_to_tests = self._dict_of_sets(self.TIMELINES) - self._result_type_to_tests = self._dict_of_sets(self.RESULT_TYPES) - - self._read(self._get_iterable_expectations(self._expectations), - overrides_allowed=False) - - # List of tests that are in the overrides file (used for checking for - # duplicates inside the overrides file itself). Note that just because - # a test is in this set doesn't mean it's necessarily overridding a - # expectation in the regular expectations; the test might not be - # mentioned in the regular expectations file at all. - self._overridding_tests = set() - - if overrides: - self._read(self._get_iterable_expectations(self._overrides), - overrides_allowed=True) - - self._handle_any_read_errors() - self._process_tests_without_expectations() - - def _handle_any_read_errors(self): - if not self._suppress_errors and ( - len(self._errors) or len(self._non_fatal_errors)): - if self._is_debug_mode: - build_type = 'DEBUG' - else: - build_type = 'RELEASE' - _log.error('') - _log.error("FAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" % - (self._test_platform_name.upper(), build_type)) - - for error in self._non_fatal_errors: - _log.error(error) - _log.error('') - - if len(self._errors): - raise SyntaxError('\n'.join(map(str, self._errors))) - - def _process_tests_without_expectations(self): - expectations = set([PASS]) - options = [] - modifiers = [] - if self._full_test_list: - for test in self._full_test_list: - if not test in self._test_list_paths: - self._add_test(test, modifiers, expectations, options, - overrides_allowed=False) - - def _dict_of_sets(self, strings_to_constants): - """Takes a dict of strings->constants and returns a dict mapping - each constant to an empty set.""" - d = {} - for c in strings_to_constants.values(): - d[c] = set() - return d - - def _get_iterable_expectations(self, expectations_str): - """Returns an object that can be iterated over. Allows for not caring - about whether we're iterating over a file or a new-line separated - string.""" - iterable = [x + "\n" for x in expectations_str.split("\n")] - # Strip final entry if it's empty to avoid added in an extra - # newline. - if iterable[-1] == "\n": - return iterable[:-1] - return iterable - - def get_test_set(self, modifier, expectation=None, include_skips=True): - if expectation is None: - tests = self._modifier_to_tests[modifier] - else: - tests = (self._expectation_to_tests[expectation] & - self._modifier_to_tests[modifier]) - - if not include_skips: - tests = tests - self.get_test_set(SKIP, expectation) - - return tests - - def get_tests_with_result_type(self, result_type): - return self._result_type_to_tests[result_type] - - def get_tests_with_timeline(self, timeline): - return self._timeline_to_tests[timeline] - - def get_options(self, test): - """This returns the entire set of options for the given test - (the modifiers plus the BUGXXXX identifier). This is used by the - LTTF dashboard.""" - return self._test_to_options[test] - - def has_modifier(self, test, modifier): - return test in self._modifier_to_tests[modifier] - - def get_expectations(self, test): - return self._test_to_expectations[test] - - def get_expectations_json_for_all_platforms(self): - # Specify separators in order to get compact encoding. - return ExpectationsJsonEncoder(separators=(',', ':')).encode( - self._all_expectations) - - def get_non_fatal_errors(self): - return self._non_fatal_errors - - def remove_platform_from_expectations(self, tests, platform): - """Returns a copy of the expectations with the tests matching the - platform removed. - - If a test is in the test list and has an option that matches the given - platform, remove the matching platform and save the updated test back - to the file. If no other platforms remaining after removal, delete the - test from the file. - - Args: - tests: list of tests that need to update.. - platform: which platform option to remove. - - Returns: - the updated string. - """ - - assert(platform) - f_orig = self._get_iterable_expectations(self._expectations) - f_new = [] - - tests_removed = 0 - tests_updated = 0 - lineno = 0 - for line in f_orig: - lineno += 1 - action = self._get_platform_update_action(line, lineno, tests, - platform) - assert(action in (NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, - ADD_PLATFORMS_EXCEPT_THIS)) - if action == NO_CHANGE: - # Save the original line back to the file - _log.debug('No change to test: %s', line) - f_new.append(line) - elif action == REMOVE_TEST: - tests_removed += 1 - _log.info('Test removed: %s', line) - elif action == REMOVE_PLATFORM: - parts = line.split(':') - new_options = parts[0].replace(platform.upper() + ' ', '', 1) - new_line = ('%s:%s' % (new_options, parts[1])) - f_new.append(new_line) - tests_updated += 1 - _log.info('Test updated: ') - _log.info(' old: %s', line) - _log.info(' new: %s', new_line) - elif action == ADD_PLATFORMS_EXCEPT_THIS: - parts = line.split(':') - new_options = parts[0] - for p in self._port.test_platform_names(): - p = p.upper() - # This is a temp solution for rebaselining tool. - # Do not add tags WIN-7 and WIN-VISTA to test expectations - # if the original line does not specify the platform - # option. - # TODO(victorw): Remove WIN-VISTA and WIN-7 once we have - # reliable Win 7 and Win Vista buildbots setup. - if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'): - new_options += p + ' ' - new_line = ('%s:%s' % (new_options, parts[1])) - f_new.append(new_line) - tests_updated += 1 - _log.info('Test updated: ') - _log.info(' old: %s', line) - _log.info(' new: %s', new_line) - - _log.info('Total tests removed: %d', tests_removed) - _log.info('Total tests updated: %d', tests_updated) - - return "".join(f_new) - - def parse_expectations_line(self, line, lineno): - """Parses a line from test_expectations.txt and returns a tuple - with the test path, options as a list, expectations as a list.""" - line = strip_comments(line) - if not line: - return (None, None, None) - - options = [] - if line.find(":") is -1: - test_and_expectation = line.split("=") - else: - parts = line.split(":") - options = self._get_options_list(parts[0]) - test_and_expectation = parts[1].split('=') - - test = test_and_expectation[0].strip() - if (len(test_and_expectation) is not 2): - self._add_error(lineno, "Missing expectations.", - test_and_expectation) - expectations = None - else: - expectations = self._get_options_list(test_and_expectation[1]) - - return (test, options, expectations) - - def _get_platform_update_action(self, line, lineno, tests, platform): - """Check the platform option and return the action needs to be taken. - - Args: - line: current line in test expectations file. - lineno: current line number of line - tests: list of tests that need to update.. - platform: which platform option to remove. - - Returns: - NO_CHANGE: no change to the line (comments, test not in the list etc) - REMOVE_TEST: remove the test from file. - REMOVE_PLATFORM: remove this platform option from the test. - ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one. - """ - test, options, expectations = self.parse_expectations_line(line, - lineno) - if not test or test not in tests: - return NO_CHANGE - - has_any_platform = False - for option in options: - if option in self._port.test_platform_names(): - has_any_platform = True - if not option == platform: - return REMOVE_PLATFORM - - # If there is no platform specified, then it means apply to all - # platforms. Return the action to add all the platforms except this - # one. - if not has_any_platform: - return ADD_PLATFORMS_EXCEPT_THIS - - return REMOVE_TEST - - def _has_valid_modifiers_for_current_platform(self, options, lineno, - test_and_expectations, modifiers): - """Returns true if the current platform is in the options list or if - no platforms are listed and if there are no fatal errors in the - options list. - - Args: - options: List of lowercase options. - lineno: The line in the file where the test is listed. - test_and_expectations: The path and expectations for the test. - modifiers: The set to populate with modifiers. - """ - has_any_platform = False - has_bug_id = False - for option in options: - if option in self.MODIFIERS: - modifiers.add(option) - elif option in self._port.test_platform_names(): - has_any_platform = True - elif option.startswith('bug'): - has_bug_id = True - elif option not in self.BUILD_TYPES: - self._add_error(lineno, 'Invalid modifier for test: %s' % - option, test_and_expectations) - - if has_any_platform and not self._match_platform(options): - return False - - if not has_bug_id and 'wontfix' not in options: - # TODO(ojan): Turn this into an AddError call once all the - # tests have BUG identifiers. - self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.', - test_and_expectations) - - if 'release' in options or 'debug' in options: - if self._is_debug_mode and 'debug' not in options: - return False - if not self._is_debug_mode and 'release' not in options: - return False - - if self._is_lint_mode and 'rebaseline' in options: - self._add_error(lineno, - 'REBASELINE should only be used for running rebaseline.py. ' - 'Cannot be checked in.', test_and_expectations) - - return True - - def _match_platform(self, options): - """Match the list of options against our specified platform. If any - of the options prefix-match self._platform, return True. This handles - the case where a test is marked WIN and the platform is WIN-VISTA. - - Args: - options: list of options - """ - for opt in options: - if self._test_platform_name.startswith(opt): - return True - return False - - def _add_to_all_expectations(self, test, options, expectations): - # Make all paths unix-style so the dashboard doesn't need to. - test = test.replace('\\', '/') - if not test in self._all_expectations: - self._all_expectations[test] = [] - self._all_expectations[test].append( - ModifiersAndExpectations(options, expectations)) - - def _read(self, expectations, overrides_allowed): - """For each test in an expectations iterable, generate the - expectations for it.""" - lineno = 0 - for line in expectations: - lineno += 1 - - test_list_path, options, expectations = \ - self.parse_expectations_line(line, lineno) - if not expectations: - continue - - self._add_to_all_expectations(test_list_path, - " ".join(options).upper(), - " ".join(expectations).upper()) - - modifiers = set() - if options and not self._has_valid_modifiers_for_current_platform( - options, lineno, test_list_path, modifiers): - continue - - expectations = self._parse_expectations(expectations, lineno, - test_list_path) - - if 'slow' in options and TIMEOUT in expectations: - self._add_error(lineno, - 'A test can not be both slow and timeout. If it times out ' - 'indefinitely, then it should be just timeout.', - test_list_path) - - full_path = os.path.join(self._port.layout_tests_dir(), - test_list_path) - full_path = os.path.normpath(full_path) - # WebKit's way of skipping tests is to add a -disabled suffix. - # So we should consider the path existing if the path or the - # -disabled version exists. - if (not self._port.path_exists(full_path) - and not self._port.path_exists(full_path + '-disabled')): - # Log a non fatal error here since you hit this case any - # time you update test_expectations.txt without syncing - # the LayoutTests directory - self._log_non_fatal_error(lineno, 'Path does not exist.', - test_list_path) - continue - - if not self._full_test_list: - tests = [test_list_path] - else: - tests = self._expand_tests(test_list_path) - - self._add_tests(tests, expectations, test_list_path, lineno, - modifiers, options, overrides_allowed) - - def _get_options_list(self, listString): - return [part.strip().lower() for part in listString.strip().split(' ')] - - def _parse_expectations(self, expectations, lineno, test_list_path): - result = set() - for part in expectations: - if not part in self.EXPECTATIONS: - self._add_error(lineno, 'Unsupported expectation: %s' % part, - test_list_path) - continue - expectation = self.EXPECTATIONS[part] - result.add(expectation) - return result - - def _expand_tests(self, test_list_path): - """Convert the test specification to an absolute, normalized - path and make sure directories end with the OS path separator.""" - path = os.path.join(self._port.layout_tests_dir(), test_list_path) - path = os.path.normpath(path) - if self._port.path_isdir(path): - path = os.path.join(path, '') - - result = [] - for test in self._full_test_list: - if test.startswith(path): - result.append(test) - return result - - def _add_tests(self, tests, expectations, test_list_path, lineno, - modifiers, options, overrides_allowed): - for test in tests: - if self._already_seen_test(test, test_list_path, lineno, - overrides_allowed): - continue - - self._clear_expectations_for_test(test, test_list_path) - self._add_test(test, modifiers, expectations, options, - overrides_allowed) - - def _add_test(self, test, modifiers, expectations, options, - overrides_allowed): - """Sets the expected state for a given test. - - This routine assumes the test has not been added before. If it has, - use _ClearExpectationsForTest() to reset the state prior to - calling this. - - Args: - test: test to add - modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.) - expectations: sequence of expectations (PASS, IMAGE, etc.) - options: sequence of keywords and bug identifiers. - overrides_allowed: whether we're parsing the regular expectations - or the overridding expectations""" - self._test_to_expectations[test] = expectations - for expectation in expectations: - self._expectation_to_tests[expectation].add(test) - - self._test_to_options[test] = options - self._test_to_modifiers[test] = set() - for modifier in modifiers: - mod_value = self.MODIFIERS[modifier] - self._modifier_to_tests[mod_value].add(test) - self._test_to_modifiers[test].add(mod_value) - - if 'wontfix' in modifiers: - self._timeline_to_tests[WONTFIX].add(test) - else: - self._timeline_to_tests[NOW].add(test) - - if 'skip' in modifiers: - self._result_type_to_tests[SKIP].add(test) - elif expectations == set([PASS]): - self._result_type_to_tests[PASS].add(test) - elif len(expectations) > 1: - self._result_type_to_tests[FLAKY].add(test) - else: - self._result_type_to_tests[FAIL].add(test) - - if overrides_allowed: - self._overridding_tests.add(test) - - def _clear_expectations_for_test(self, test, test_list_path): - """Remove prexisting expectations for this test. - This happens if we are seeing a more precise path - than a previous listing. - """ - if test in self._test_list_paths: - self._test_to_expectations.pop(test, '') - self._remove_from_sets(test, self._expectation_to_tests) - self._remove_from_sets(test, self._modifier_to_tests) - self._remove_from_sets(test, self._timeline_to_tests) - self._remove_from_sets(test, self._result_type_to_tests) - - self._test_list_paths[test] = os.path.normpath(test_list_path) - - def _remove_from_sets(self, test, dict): - """Removes the given test from the sets in the dictionary. - - Args: - test: test to look for - dict: dict of sets of files""" - for set_of_tests in dict.itervalues(): - if test in set_of_tests: - set_of_tests.remove(test) - - def _already_seen_test(self, test, test_list_path, lineno, - allow_overrides): - """Returns true if we've already seen a more precise path for this test - than the test_list_path. - """ - if not test in self._test_list_paths: - return False - - prev_base_path = self._test_list_paths[test] - if (prev_base_path == os.path.normpath(test_list_path)): - if (not allow_overrides or test in self._overridding_tests): - if allow_overrides: - expectation_source = "override" - else: - expectation_source = "expectation" - self._add_error(lineno, 'Duplicate %s.' % expectation_source, - test) - return True - else: - # We have seen this path, but that's okay because its - # in the overrides and the earlier path was in the - # expectations. - return False - - # Check if we've already seen a more precise path. - return prev_base_path.startswith(os.path.normpath(test_list_path)) - - def _add_error(self, lineno, msg, path): - """Reports an error that will prevent running the tests. Does not - immediately raise an exception because we'd like to aggregate all the - errors so they can all be printed out.""" - self._errors.append('\nLine:%s %s %s' % (lineno, msg, path)) - - def _log_non_fatal_error(self, lineno, msg, path): - """Reports an error that will not prevent running the tests. These are - still errors, but not bad enough to warrant breaking test running.""" - self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path)) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py deleted file mode 100644 index 55eaf99..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Unit tests for test_expectations.py.""" - -import os -import sys -import unittest - -from webkitpy.layout_tests import port -from webkitpy.layout_tests.layout_package.test_expectations import * - -class FunctionsTest(unittest.TestCase): - def test_result_was_expected(self): - # test basics - self.assertEquals(result_was_expected(PASS, set([PASS]), - False, False), True) - self.assertEquals(result_was_expected(TEXT, set([PASS]), - False, False), False) - - # test handling of FAIL expectations - self.assertEquals(result_was_expected(IMAGE_PLUS_TEXT, set([FAIL]), - False, False), True) - self.assertEquals(result_was_expected(IMAGE, set([FAIL]), - False, False), True) - self.assertEquals(result_was_expected(TEXT, set([FAIL]), - False, False), True) - self.assertEquals(result_was_expected(CRASH, set([FAIL]), - False, False), False) - - # test handling of SKIPped tests and results - self.assertEquals(result_was_expected(SKIP, set([CRASH]), - False, True), True) - self.assertEquals(result_was_expected(SKIP, set([CRASH]), - False, False), False) - - # test handling of MISSING results and the REBASELINE modifier - self.assertEquals(result_was_expected(MISSING, set([PASS]), - True, False), True) - self.assertEquals(result_was_expected(MISSING, set([PASS]), - False, False), False) - - def test_remove_pixel_failures(self): - self.assertEquals(remove_pixel_failures(set([TEXT])), - set([TEXT])) - self.assertEquals(remove_pixel_failures(set([PASS])), - set([PASS])) - self.assertEquals(remove_pixel_failures(set([IMAGE])), - set([PASS])) - self.assertEquals(remove_pixel_failures(set([IMAGE_PLUS_TEXT])), - set([TEXT])) - self.assertEquals(remove_pixel_failures(set([PASS, IMAGE, CRASH])), - set([PASS, CRASH])) - - -class Base(unittest.TestCase): - def __init__(self, testFunc, setUp=None, tearDown=None, description=None): - self._port = port.get('test', None) - self._exp = None - unittest.TestCase.__init__(self, testFunc) - - def get_test(self, test_name): - return os.path.join(self._port.layout_tests_dir(), test_name) - - def get_basic_tests(self): - return [self.get_test('failures/expected/text.html'), - self.get_test('failures/expected/image_checksum.html'), - self.get_test('failures/expected/crash.html'), - self.get_test('failures/expected/missing_text.html'), - self.get_test('failures/expected/image.html'), - self.get_test('passes/text.html')] - - def get_basic_expectations(self): - return """ -BUG_TEST : failures/expected/text.html = TEXT -BUG_TEST WONTFIX SKIP : failures/expected/crash.html = CRASH -BUG_TEST REBASELINE : failures/expected/missing_image.html = MISSING -BUG_TEST WONTFIX : failures/expected/image_checksum.html = IMAGE -BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE -""" - - def parse_exp(self, expectations, overrides=None, is_lint_mode=False, - is_debug_mode=False): - self._exp = TestExpectations(self._port, - tests=self.get_basic_tests(), - expectations=expectations, - test_platform_name=self._port.test_platform_name(), - is_debug_mode=is_debug_mode, - is_lint_mode=is_lint_mode, - overrides=overrides) - - def assert_exp(self, test, result): - self.assertEquals(self._exp.get_expectations(self.get_test(test)), - set([result])) - - -class TestExpectationsTest(Base): - def test_basic(self): - self.parse_exp(self.get_basic_expectations()) - self.assert_exp('failures/expected/text.html', TEXT) - self.assert_exp('failures/expected/image_checksum.html', IMAGE) - self.assert_exp('passes/text.html', PASS) - self.assert_exp('failures/expected/image.html', PASS) - - def test_multiple_results(self): - self.parse_exp('BUGX : failures/expected/text.html = TEXT CRASH') - self.assertEqual(self._exp.get_expectations( - self.get_test('failures/expected/text.html')), - set([TEXT, CRASH])) - - def test_precedence(self): - # This tests handling precedence of specific lines over directories - # and tests expectations covering entire directories. - exp_str = """ -BUGX : failures/expected/text.html = TEXT -BUGX WONTFIX : failures/expected = IMAGE -""" - self.parse_exp(exp_str) - self.assert_exp('failures/expected/text.html', TEXT) - self.assert_exp('failures/expected/crash.html', IMAGE) - - def test_release_mode(self): - self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT', - is_debug_mode=True) - self.assert_exp('failures/expected/text.html', TEXT) - self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT', - is_debug_mode=True) - self.assert_exp('failures/expected/text.html', PASS) - self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT', - is_debug_mode=False) - self.assert_exp('failures/expected/text.html', PASS) - self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT', - is_debug_mode=False) - self.assert_exp('failures/expected/text.html', TEXT) - - def test_get_options(self): - self.parse_exp(self.get_basic_expectations()) - self.assertEqual(self._exp.get_options( - self.get_test('passes/text.html')), []) - - def test_expectations_json_for_all_platforms(self): - self.parse_exp(self.get_basic_expectations()) - json_str = self._exp.get_expectations_json_for_all_platforms() - # FIXME: test actual content? - self.assertTrue(json_str) - - def test_get_expectations_string(self): - self.parse_exp(self.get_basic_expectations()) - self.assertEquals(self._exp.get_expectations_string( - self.get_test('failures/expected/text.html')), - 'TEXT') - - def test_expectation_to_string(self): - # Normal cases are handled by other tests. - self.parse_exp(self.get_basic_expectations()) - self.assertRaises(ValueError, self._exp.expectation_to_string, - -1) - - def test_get_test_set(self): - # Handle some corner cases for this routine not covered by other tests. - self.parse_exp(self.get_basic_expectations()) - s = self._exp._expected_failures.get_test_set(WONTFIX) - self.assertEqual(s, - set([self.get_test('failures/expected/crash.html'), - self.get_test('failures/expected/image_checksum.html')])) - s = self._exp._expected_failures.get_test_set(WONTFIX, CRASH) - self.assertEqual(s, - set([self.get_test('failures/expected/crash.html')])) - s = self._exp._expected_failures.get_test_set(WONTFIX, CRASH, - include_skips=False) - self.assertEqual(s, set([])) - - def test_syntax_missing_expectation(self): - # This is missing the expectation. - self.assertRaises(SyntaxError, self.parse_exp, - 'BUG_TEST: failures/expected/text.html', - is_debug_mode=True) - - def test_syntax_invalid_option(self): - self.assertRaises(SyntaxError, self.parse_exp, - 'BUG_TEST FOO: failures/expected/text.html = PASS') - - def test_syntax_invalid_expectation(self): - # This is missing the expectation. - self.assertRaises(SyntaxError, self.parse_exp, - 'BUG_TEST: failures/expected/text.html = FOO') - - def test_syntax_missing_bugid(self): - # This should log a non-fatal error. - self.parse_exp('SLOW : failures/expected/text.html = TEXT') - self.assertEqual( - len(self._exp._expected_failures.get_non_fatal_errors()), 1) - - def test_semantic_slow_and_timeout(self): - # A test cannot be SLOW and expected to TIMEOUT. - self.assertRaises(SyntaxError, self.parse_exp, - 'BUG_TEST SLOW : failures/expected/timeout.html = TIMEOUT') - - def test_semantic_rebaseline(self): - # Can't lint a file w/ 'REBASELINE' in it. - self.assertRaises(SyntaxError, self.parse_exp, - 'BUG_TEST REBASELINE : failures/expected/text.html = TEXT', - is_lint_mode=True) - - def test_semantic_duplicates(self): - self.assertRaises(SyntaxError, self.parse_exp, """ -BUG_TEST : failures/expected/text.html = TEXT -BUG_TEST : failures/expected/text.html = IMAGE""") - - self.assertRaises(SyntaxError, self.parse_exp, - self.get_basic_expectations(), """ -BUG_TEST : failures/expected/text.html = TEXT -BUG_TEST : failures/expected/text.html = IMAGE""") - - def test_semantic_missing_file(self): - # This should log a non-fatal error. - self.parse_exp('BUG_TEST : missing_file.html = TEXT') - self.assertEqual( - len(self._exp._expected_failures.get_non_fatal_errors()), 1) - - - def test_overrides(self): - self.parse_exp(self.get_basic_expectations(), """ -BUG_OVERRIDE : failures/expected/text.html = IMAGE""") - self.assert_exp('failures/expected/text.html', IMAGE) - - def test_matches_an_expected_result(self): - - def match(test, result, pixel_tests_enabled): - return self._exp.matches_an_expected_result( - self.get_test(test), result, pixel_tests_enabled) - - self.parse_exp(self.get_basic_expectations()) - self.assertTrue(match('failures/expected/text.html', TEXT, True)) - self.assertTrue(match('failures/expected/text.html', TEXT, False)) - self.assertFalse(match('failures/expected/text.html', CRASH, True)) - self.assertFalse(match('failures/expected/text.html', CRASH, False)) - self.assertTrue(match('failures/expected/image_checksum.html', IMAGE, - True)) - self.assertTrue(match('failures/expected/image_checksum.html', PASS, - False)) - self.assertTrue(match('failures/expected/crash.html', SKIP, False)) - self.assertTrue(match('passes/text.html', PASS, False)) - - -class RebaseliningTest(Base): - """Test rebaselining-specific functionality.""" - def assertRemove(self, platform, input_expectations, expected_expectations): - self.parse_exp(input_expectations) - test = self.get_test('failures/expected/text.html') - actual_expectations = self._exp.remove_platform_from_expectations( - test, platform) - self.assertEqual(expected_expectations, actual_expectations) - - def test_no_get_rebaselining_failures(self): - self.parse_exp(self.get_basic_expectations()) - self.assertEqual(len(self._exp.get_rebaselining_failures()), 0) - - def test_get_rebaselining_failures_expand(self): - self.parse_exp(""" -BUG_TEST REBASELINE : failures/expected/text.html = TEXT -""") - self.assertEqual(len(self._exp.get_rebaselining_failures()), 1) - - def test_remove_expand(self): - self.assertRemove('mac', - 'BUGX REBASELINE : failures/expected/text.html = TEXT\n', - 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n') - - def test_remove_mac_win(self): - self.assertRemove('mac', - 'BUGX REBASELINE MAC WIN : failures/expected/text.html = TEXT\n', - 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n') - - def test_remove_mac_mac(self): - self.assertRemove('mac', - 'BUGX REBASELINE MAC : failures/expected/text.html = TEXT\n', - '') - - def test_remove_nothing(self): - self.assertRemove('mac', - '\n\n', - '\n\n') - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py deleted file mode 100644 index 6d55761..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Classes for failures that occur during tests.""" - -import os -import test_expectations - -import cPickle - - -def determine_result_type(failure_list): - """Takes a set of test_failures and returns which result type best fits - the list of failures. "Best fits" means we use the worst type of failure. - - Returns: - one of the test_expectations result types - PASS, TEXT, CRASH, etc.""" - - if not failure_list or len(failure_list) == 0: - return test_expectations.PASS - - failure_types = [type(f) for f in failure_list] - if FailureCrash in failure_types: - return test_expectations.CRASH - elif FailureTimeout in failure_types: - return test_expectations.TIMEOUT - elif (FailureMissingResult in failure_types or - FailureMissingImage in failure_types or - FailureMissingImageHash in failure_types): - return test_expectations.MISSING - else: - is_text_failure = FailureTextMismatch in failure_types - is_image_failure = (FailureImageHashIncorrect in failure_types or - FailureImageHashMismatch in failure_types) - if is_text_failure and is_image_failure: - return test_expectations.IMAGE_PLUS_TEXT - elif is_text_failure: - return test_expectations.TEXT - elif is_image_failure: - return test_expectations.IMAGE - else: - raise ValueError("unclassifiable set of failures: " - + str(failure_types)) - - -class TestFailure(object): - """Abstract base class that defines the failure interface.""" - - @staticmethod - def loads(s): - """Creates a TestFailure object from the specified string.""" - return cPickle.loads(s) - - @staticmethod - def message(): - """Returns a string describing the failure in more detail.""" - raise NotImplementedError - - def __eq__(self, other): - return self.__class__.__name__ == other.__class__.__name__ - - def __ne__(self, other): - return self.__class__.__name__ != other.__class__.__name__ - - def dumps(self): - """Returns the string/JSON representation of a TestFailure.""" - return cPickle.dumps(self) - - def result_html_output(self, filename): - """Returns an HTML string to be included on the results.html page.""" - raise NotImplementedError - - def should_kill_dump_render_tree(self): - """Returns True if we should kill DumpRenderTree before the next - test.""" - return False - - def relative_output_filename(self, filename, modifier): - """Returns a relative filename inside the output dir that contains - modifier. - - For example, if filename is fast\dom\foo.html and modifier is - "-expected.txt", the return value is fast\dom\foo-expected.txt - - Args: - filename: relative filename to test file - modifier: a string to replace the extension of filename with - - Return: - The relative windows path to the output filename - """ - return os.path.splitext(filename)[0] + modifier - - -class FailureWithType(TestFailure): - """Base class that produces standard HTML output based on the test type. - - Subclasses may commonly choose to override the ResultHtmlOutput, but still - use the standard OutputLinks. - """ - - def __init__(self): - TestFailure.__init__(self) - - # Filename suffixes used by ResultHtmlOutput. - OUT_FILENAMES = () - - def output_links(self, filename, out_names): - """Returns a string holding all applicable output file links. - - Args: - filename: the test filename, used to construct the result file names - out_names: list of filename suffixes for the files. If three or more - suffixes are in the list, they should be [actual, expected, diff, - wdiff]. Two suffixes should be [actual, expected], and a - single item is the [actual] filename suffix. - If out_names is empty, returns the empty string. - """ - # FIXME: Seems like a bad idea to separate the display name data - # from the path data by hard-coding the display name here - # and passing in the path information via out_names. - # - # FIXME: Also, we don't know for sure that these files exist, - # and we shouldn't be creating links to files that don't exist - # (for example, if we don't actually have wdiff output). - links = [''] - uris = [self.relative_output_filename(filename, fn) for - fn in out_names] - if len(uris) > 1: - links.append("<a href='%s'>expected</a>" % uris[1]) - if len(uris) > 0: - links.append("<a href='%s'>actual</a>" % uris[0]) - if len(uris) > 2: - links.append("<a href='%s'>diff</a>" % uris[2]) - if len(uris) > 3: - links.append("<a href='%s'>wdiff</a>" % uris[3]) - if len(uris) > 4: - links.append("<a href='%s'>pretty diff</a>" % uris[4]) - return ' '.join(links) - - def result_html_output(self, filename): - return self.message() + self.output_links(filename, self.OUT_FILENAMES) - - -class FailureTimeout(TestFailure): - """Test timed out. We also want to restart DumpRenderTree if this - happens.""" - - @staticmethod - def message(): - return "Test timed out" - - def result_html_output(self, filename): - return "<strong>%s</strong>" % self.message() - - def should_kill_dump_render_tree(self): - return True - - -class FailureCrash(TestFailure): - """Test shell crashed.""" - - @staticmethod - def message(): - return "Test shell crashed" - - def result_html_output(self, filename): - # FIXME: create a link to the minidump file - stack = self.relative_output_filename(filename, "-stack.txt") - return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(), - stack) - - def should_kill_dump_render_tree(self): - return True - - -class FailureMissingResult(FailureWithType): - """Expected result was missing.""" - OUT_FILENAMES = ("-actual.txt",) - - @staticmethod - def message(): - return "No expected results found" - - def result_html_output(self, filename): - return ("<strong>%s</strong>" % self.message() + - self.output_links(filename, self.OUT_FILENAMES)) - - -class FailureTextMismatch(FailureWithType): - """Text diff output failed.""" - # Filename suffixes used by ResultHtmlOutput. - # FIXME: Why don't we use the constants from TestTypeBase here? - OUT_FILENAMES = ("-actual.txt", "-expected.txt", "-diff.txt", - "-wdiff.html", "-pretty-diff.html") - - @staticmethod - def message(): - return "Text diff mismatch" - - -class FailureMissingImageHash(FailureWithType): - """Actual result hash was missing.""" - # Chrome doesn't know to display a .checksum file as text, so don't bother - # putting in a link to the actual result. - - @staticmethod - def message(): - return "No expected image hash found" - - def result_html_output(self, filename): - return "<strong>%s</strong>" % self.message() - - -class FailureMissingImage(FailureWithType): - """Actual result image was missing.""" - OUT_FILENAMES = ("-actual.png",) - - @staticmethod - def message(): - return "No expected image found" - - def result_html_output(self, filename): - return ("<strong>%s</strong>" % self.message() + - self.output_links(filename, self.OUT_FILENAMES)) - - -class FailureImageHashMismatch(FailureWithType): - """Image hashes didn't match.""" - OUT_FILENAMES = ("-actual.png", "-expected.png", "-diff.png") - - @staticmethod - def message(): - # We call this a simple image mismatch to avoid confusion, since - # we link to the PNGs rather than the checksums. - return "Image mismatch" - - -class FailureImageHashIncorrect(FailureWithType): - """Actual result hash is incorrect.""" - # Chrome doesn't know to display a .checksum file as text, so don't bother - # putting in a link to the actual result. - - @staticmethod - def message(): - return "Images match, expected image hash incorrect. " - - def result_html_output(self, filename): - return "<strong>%s</strong>" % self.message() - -# Convenient collection of all failure classes for anything that might -# need to enumerate over them all. -ALL_FAILURE_CLASSES = (FailureTimeout, FailureCrash, FailureMissingResult, - FailureTextMismatch, FailureMissingImageHash, - FailureMissingImage, FailureImageHashMismatch, - FailureImageHashIncorrect) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py deleted file mode 100644 index 3e3528d..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -""""Tests code paths not covered by the regular unit tests.""" - -import unittest - -from webkitpy.layout_tests.layout_package.test_failures import * - - -class Test(unittest.TestCase): - def assertResultHtml(self, failure_obj): - self.assertNotEqual(failure_obj.result_html_output('foo'), None) - - def assert_loads(self, cls): - failure_obj = cls() - s = failure_obj.dumps() - new_failure_obj = TestFailure.loads(s) - self.assertTrue(isinstance(new_failure_obj, cls)) - - self.assertEqual(failure_obj, new_failure_obj) - - # Also test that != is implemented. - self.assertFalse(failure_obj != new_failure_obj) - - def test_crash(self): - self.assertResultHtml(FailureCrash()) - - def test_hash_incorrect(self): - self.assertResultHtml(FailureImageHashIncorrect()) - - def test_missing(self): - self.assertResultHtml(FailureMissingResult()) - - def test_missing_image(self): - self.assertResultHtml(FailureMissingImage()) - - def test_missing_image_hash(self): - self.assertResultHtml(FailureMissingImageHash()) - - def test_timeout(self): - self.assertResultHtml(FailureTimeout()) - - def test_unknown_failure_type(self): - class UnknownFailure(TestFailure): - pass - - failure_obj = UnknownFailure() - self.assertRaises(ValueError, determine_result_type, [failure_obj]) - self.assertRaises(NotImplementedError, failure_obj.message) - self.assertRaises(NotImplementedError, failure_obj.result_html_output, - "foo.txt") - - def test_loads(self): - for c in ALL_FAILURE_CLASSES: - self.assert_loads(c) - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py deleted file mode 100644 index e809be6..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_output.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -class TestOutput(object): - """Groups information about a test output for easy passing of data. - - This is used not only for a actual test output, but also for grouping - expected test output. - """ - - def __init__(self, text, image, image_hash, - crash=None, test_time=None, timeout=None, error=None): - """Initializes a TestOutput object. - - Args: - text: a text output - image: an image output - image_hash: a string containing the checksum of the image - crash: a boolean indicating whether the driver crashed on the test - test_time: a time which the test has taken - timeout: a boolean indicating whehter the test timed out - error: any unexpected or additional (or error) text output - """ - self.text = text - self.image = image - self.image_hash = image_hash - self.crash = crash - self.test_time = test_time - self.timeout = timeout - self.error = error diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results.py deleted file mode 100644 index 2417fb7..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import cPickle - -import test_failures - - -class TestResult(object): - """Data object containing the results of a single test.""" - - @staticmethod - def loads(str): - return cPickle.loads(str) - - def __init__(self, filename, failures, test_run_time, - total_time_for_all_diffs, time_for_diffs): - self.failures = failures - self.filename = filename - self.test_run_time = test_run_time - self.time_for_diffs = time_for_diffs - self.total_time_for_all_diffs = total_time_for_all_diffs - self.type = test_failures.determine_result_type(failures) - - def __eq__(self, other): - return (self.filename == other.filename and - self.failures == other.failures and - self.test_run_time == other.test_run_time and - self.time_for_diffs == other.time_for_diffs and - self.total_time_for_all_diffs == other.total_time_for_all_diffs) - - def __ne__(self, other): - return not (self == other) - - def dumps(self): - return cPickle.dumps(self) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py deleted file mode 100644 index 5921666..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import unittest - -from test_results import TestResult - - -class Test(unittest.TestCase): - def test_loads(self): - result = TestResult(filename='foo', - failures=[], - test_run_time=1.1, - total_time_for_all_diffs=0.5, - time_for_diffs=0.5) - s = result.dumps() - new_result = TestResult.loads(s) - self.assertTrue(isinstance(new_result, TestResult)) - - self.assertEqual(new_result, result) - - # Also check that != is implemented. - self.assertFalse(new_result != result) - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py deleted file mode 100644 index 033c8c6..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py +++ /dev/null @@ -1,107 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from __future__ import with_statement - -import codecs -import mimetypes -import socket -import urllib2 - -from webkitpy.common.net.networktransaction import NetworkTransaction - -def get_mime_type(filename): - return mimetypes.guess_type(filename)[0] or 'application/octet-stream' - - -def _encode_multipart_form_data(fields, files): - """Encode form fields for multipart/form-data. - - Args: - fields: A sequence of (name, value) elements for regular form fields. - files: A sequence of (name, filename, value) elements for data to be - uploaded as files. - Returns: - (content_type, body) ready for httplib.HTTP instance. - - Source: - http://code.google.com/p/rietveld/source/browse/trunk/upload.py - """ - BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' - CRLF = '\r\n' - lines = [] - - for key, value in fields: - lines.append('--' + BOUNDARY) - lines.append('Content-Disposition: form-data; name="%s"' % key) - lines.append('') - if isinstance(value, unicode): - value = value.encode('utf-8') - lines.append(value) - - for key, filename, value in files: - lines.append('--' + BOUNDARY) - lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) - lines.append('Content-Type: %s' % get_mime_type(filename)) - lines.append('') - if isinstance(value, unicode): - value = value.encode('utf-8') - lines.append(value) - - lines.append('--' + BOUNDARY + '--') - lines.append('') - body = CRLF.join(lines) - content_type = 'multipart/form-data; boundary=%s' % BOUNDARY - return content_type, body - - -class TestResultsUploader: - def __init__(self, host): - self._host = host - - def _upload_files(self, attrs, file_objs): - url = "http://%s/testfile/upload" % self._host - content_type, data = _encode_multipart_form_data(attrs, file_objs) - headers = {"Content-Type": content_type} - request = urllib2.Request(url, data, headers) - urllib2.urlopen(request) - - def upload(self, params, files, timeout_seconds): - file_objs = [] - for filename, path in files: - with codecs.open(path, "rb") as file: - file_objs.append(('file', filename, file.read())) - - orig_timeout = socket.getdefaulttimeout() - try: - socket.setdefaulttimeout(timeout_seconds) - NetworkTransaction(timeout_seconds=timeout_seconds).run( - lambda: self._upload_files(params, file_objs)) - finally: - socket.setdefaulttimeout(orig_timeout) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/__init__.py deleted file mode 100644 index e3ad6f4..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Port-specific entrypoints for the layout tests test infrastructure.""" - -from factory import get diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py deleted file mode 100644 index 46617f6..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py +++ /dev/null @@ -1,230 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""A class to start/stop the apache http server used by layout tests.""" - - -from __future__ import with_statement - -import codecs -import logging -import optparse -import os -import re -import subprocess -import sys - -import http_server_base - -_log = logging.getLogger("webkitpy.layout_tests.port.apache_http_server") - - -class LayoutTestApacheHttpd(http_server_base.HttpServerBase): - - def __init__(self, port_obj, output_dir): - """Args: - port_obj: handle to the platform-specific routines - output_dir: the absolute path to the layout test result directory - """ - http_server_base.HttpServerBase.__init__(self, port_obj) - self._output_dir = output_dir - self._httpd_proc = None - port_obj.maybe_make_directory(output_dir) - - self.mappings = [{'port': 8000}, - {'port': 8080}, - {'port': 8081}, - {'port': 8443, 'sslcert': True}] - - # The upstream .conf file assumed the existence of /tmp/WebKit for - # placing apache files like the lock file there. - self._runtime_path = os.path.join("/tmp", "WebKit") - port_obj.maybe_make_directory(self._runtime_path) - - # The PID returned when Apache is started goes away (due to dropping - # privileges?). The proper controlling PID is written to a file in the - # apache runtime directory. - self._pid_file = os.path.join(self._runtime_path, 'httpd.pid') - - test_dir = self._port_obj.layout_tests_dir() - js_test_resources_dir = self._cygwin_safe_join(test_dir, "fast", "js", - "resources") - mime_types_path = self._cygwin_safe_join(test_dir, "http", "conf", - "mime.types") - cert_file = self._cygwin_safe_join(test_dir, "http", "conf", - "webkit-httpd.pem") - access_log = self._cygwin_safe_join(output_dir, "access_log.txt") - error_log = self._cygwin_safe_join(output_dir, "error_log.txt") - document_root = self._cygwin_safe_join(test_dir, "http", "tests") - - # FIXME: We shouldn't be calling a protected method of _port_obj! - executable = self._port_obj._path_to_apache() - if self._is_cygwin(): - executable = self._get_cygwin_path(executable) - - cmd = [executable, - '-f', "\"%s\"" % self._get_apache_config_file_path(test_dir, output_dir), - '-C', "\'DocumentRoot \"%s\"\'" % document_root, - '-c', "\'Alias /js-test-resources \"%s\"'" % js_test_resources_dir, - '-C', "\'Listen %s\'" % "127.0.0.1:8000", - '-C', "\'Listen %s\'" % "127.0.0.1:8081", - '-c', "\'TypesConfig \"%s\"\'" % mime_types_path, - '-c', "\'CustomLog \"%s\" common\'" % access_log, - '-c', "\'ErrorLog \"%s\"\'" % error_log, - '-C', "\'User \"%s\"\'" % os.environ.get("USERNAME", - os.environ.get("USER", ""))] - - if self._is_cygwin(): - cygbin = self._port_obj._path_from_base('third_party', 'cygwin', - 'bin') - # Not entirely sure why, but from cygwin we need to run the - # httpd command through bash. - self._start_cmd = [ - os.path.join(cygbin, 'bash.exe'), - '-c', - 'PATH=%s %s' % (self._get_cygwin_path(cygbin), " ".join(cmd)), - ] - else: - # TODO(ojan): When we get cygwin using Apache 2, use set the - # cert file for cygwin as well. - cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file]) - # Join the string here so that Cygwin/Windows and Mac/Linux - # can use the same code. Otherwise, we could remove the single - # quotes above and keep cmd as a sequence. - self._start_cmd = " ".join(cmd) - - def _is_cygwin(self): - return sys.platform in ("win32", "cygwin") - - def _cygwin_safe_join(self, *parts): - """Returns a platform appropriate path.""" - path = os.path.join(*parts) - if self._is_cygwin(): - return self._get_cygwin_path(path) - return path - - def _get_cygwin_path(self, path): - """Convert a Windows path to a cygwin path. - - The cygpath utility insists on converting paths that it thinks are - Cygwin root paths to what it thinks the correct roots are. So paths - such as "C:\b\slave\webkit-release\build\third_party\cygwin\bin" - are converted to plain "/usr/bin". To avoid this, we - do the conversion manually. - - The path is expected to be an absolute path, on any drive. - """ - drive_regexp = re.compile(r'([a-z]):[/\\]', re.IGNORECASE) - - def lower_drive(matchobj): - return '/cygdrive/%s/' % matchobj.group(1).lower() - path = drive_regexp.sub(lower_drive, path) - return path.replace('\\', '/') - - def _get_apache_config_file_path(self, test_dir, output_dir): - """Returns the path to the apache config file to use. - Args: - test_dir: absolute path to the LayoutTests directory. - output_dir: absolute path to the layout test results directory. - """ - httpd_config = self._port_obj._path_to_apache_config_file() - httpd_config_copy = os.path.join(output_dir, "httpd.conf") - # httpd.conf is always utf-8 according to http://archive.apache.org/gnats/10125 - with codecs.open(httpd_config, "r", "utf-8") as httpd_config_file: - httpd_conf = httpd_config_file.read() - if self._is_cygwin(): - # This is a gross hack, but it lets us use the upstream .conf file - # and our checked in cygwin. This tells the server the root - # directory to look in for .so modules. It will use this path - # plus the relative paths to the .so files listed in the .conf - # file. We have apache/cygwin checked into our tree so - # people don't have to install it into their cygwin. - cygusr = self._port_obj._path_from_base('third_party', 'cygwin', - 'usr') - httpd_conf = httpd_conf.replace('ServerRoot "/usr"', - 'ServerRoot "%s"' % self._get_cygwin_path(cygusr)) - - with codecs.open(httpd_config_copy, "w", "utf-8") as file: - file.write(httpd_conf) - - if self._is_cygwin(): - return self._get_cygwin_path(httpd_config_copy) - return httpd_config_copy - - def _get_virtual_host_config(self, document_root, port, ssl=False): - """Returns a <VirtualHost> directive block for an httpd.conf file. - It will listen to 127.0.0.1 on each of the given port. - """ - return '\n'.join(('<VirtualHost 127.0.0.1:%s>' % port, - 'DocumentRoot "%s"' % document_root, - ssl and 'SSLEngine On' or '', - '</VirtualHost>', '')) - - def _start_httpd_process(self): - """Starts the httpd process and returns whether there were errors.""" - # Use shell=True because we join the arguments into a string for - # the sake of Window/Cygwin and it needs quoting that breaks - # shell=False. - # FIXME: We should not need to be joining shell arguments into strings. - # shell=True is a trail of tears. - # Note: Not thread safe: http://bugs.python.org/issue2320 - self._httpd_proc = subprocess.Popen(self._start_cmd, - stderr=subprocess.PIPE, - shell=True) - err = self._httpd_proc.stderr.read() - if len(err): - _log.debug(err) - return False - return True - - def start(self): - """Starts the apache http server.""" - # Stop any currently running servers. - self.stop() - - _log.debug("Starting apache http server") - server_started = self.wait_for_action(self._start_httpd_process) - if server_started: - _log.debug("Apache started. Testing ports") - server_started = self.wait_for_action( - self.is_server_running_on_all_ports) - - if server_started: - _log.debug("Server successfully started") - else: - raise Exception('Failed to start http server') - - def stop(self): - """Stops the apache http server.""" - _log.debug("Shutting down any running http servers") - httpd_pid = None - if os.path.exists(self._pid_file): - httpd_pid = int(open(self._pid_file).readline()) - # FIXME: We shouldn't be calling a protected method of _port_obj! - self._port_obj._shut_down_http_server(httpd_pid) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py deleted file mode 100644 index 757318d..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py +++ /dev/null @@ -1,861 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the Google name nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Abstract base class of Port-specific entrypoints for the layout tests -test infrastructure (the Port and Driver classes).""" - -import cgi -import difflib -import errno -import os -import shlex -import sys -import time - -import apache_http_server -import config as port_config -import http_lock -import http_server -import test_files -import websocket_server - -from webkitpy.common import system -from webkitpy.common.system import filesystem -from webkitpy.common.system import logutils -from webkitpy.common.system import path -from webkitpy.common.system.executive import Executive, ScriptError -from webkitpy.common.system.user import User - - -_log = logutils.get_logger(__file__) - - -class DummyOptions(object): - """Fake implementation of optparse.Values. Cloned from - webkitpy.tool.mocktool.MockOptions. - - """ - - def __init__(self, **kwargs): - # The caller can set option values using keyword arguments. We don't - # set any values by default because we don't know how this - # object will be used. Generally speaking unit tests should - # subclass this or provider wrapper functions that set a common - # set of options. - for key, value in kwargs.items(): - self.__dict__[key] = value - - -# FIXME: This class should merge with webkitpy.webkit_port at some point. -class Port(object): - """Abstract class for Port-specific hooks for the layout_test package.""" - - def __init__(self, port_name=None, options=None, - executive=None, - user=None, - filesystem=None, - config=None, - **kwargs): - self._name = port_name - self._options = options - if self._options is None: - # FIXME: Ideally we'd have a package-wide way to get a - # well-formed options object that had all of the necessary - # options defined on it. - self._options = DummyOptions() - self._executive = executive or Executive() - self._user = user or User() - self._filesystem = filesystem or system.filesystem.FileSystem() - self._config = config or port_config.Config(self._executive, - self._filesystem) - self._helper = None - self._http_server = None - self._webkit_base_dir = None - self._websocket_server = None - self._http_lock = None - - # Python's Popen has a bug that causes any pipes opened to a - # process that can't be executed to be leaked. Since this - # code is specifically designed to tolerate exec failures - # to gracefully handle cases where wdiff is not installed, - # the bug results in a massive file descriptor leak. As a - # workaround, if an exec failure is ever experienced for - # wdiff, assume it's not available. This will leak one - # file descriptor but that's better than leaking each time - # wdiff would be run. - # - # http://mail.python.org/pipermail/python-list/ - # 2008-August/505753.html - # http://bugs.python.org/issue3210 - self._wdiff_available = True - - self._pretty_patch_path = self.path_from_webkit_base("BugsSite", - "PrettyPatch", "prettify.rb") - self._pretty_patch_available = True - self.set_option_default('configuration', None) - if self._options.configuration is None: - self._options.configuration = self.default_configuration() - - def default_child_processes(self): - """Return the number of DumpRenderTree instances to use for this - port.""" - return self._executive.cpu_count() - - def baseline_path(self): - """Return the absolute path to the directory to store new baselines - in for this port.""" - raise NotImplementedError('Port.baseline_path') - - def baseline_search_path(self): - """Return a list of absolute paths to directories to search under for - baselines. The directories are searched in order.""" - raise NotImplementedError('Port.baseline_search_path') - - def check_build(self, needs_http): - """This routine is used to ensure that the build is up to date - and all the needed binaries are present.""" - raise NotImplementedError('Port.check_build') - - def check_sys_deps(self, needs_http): - """If the port needs to do some runtime checks to ensure that the - tests can be run successfully, it should override this routine. - This step can be skipped with --nocheck-sys-deps. - - Returns whether the system is properly configured.""" - return True - - def check_image_diff(self, override_step=None, logging=True): - """This routine is used to check whether image_diff binary exists.""" - raise NotImplementedError('Port.check_image_diff') - - def check_pretty_patch(self): - """Checks whether we can use the PrettyPatch ruby script.""" - - # check if Ruby is installed - try: - result = self._executive.run_command(['ruby', '--version']) - except OSError, e: - if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]: - _log.error("Ruby is not installed; " - "can't generate pretty patches.") - _log.error('') - return False - - if not self.path_exists(self._pretty_patch_path): - _log.error('Unable to find %s .' % self._pretty_patch_path) - _log.error("Can't generate pretty patches.") - _log.error('') - return False - - return True - - def compare_text(self, expected_text, actual_text): - """Return whether or not the two strings are *not* equal. This - routine is used to diff text output. - - While this is a generic routine, we include it in the Port - interface so that it can be overriden for testing purposes.""" - return expected_text != actual_text - - def diff_image(self, expected_contents, actual_contents, - diff_filename=None, tolerance=0): - """Compare two images and produce a delta image file. - - Return True if the two images are different, False if they are the same. - Also produce a delta image of the two images and write that into - |diff_filename| if it is not None. - - |tolerance| should be a percentage value (0.0 - 100.0). - If it is omitted, the port default tolerance value is used. - - """ - raise NotImplementedError('Port.diff_image') - - - def diff_text(self, expected_text, actual_text, - expected_filename, actual_filename): - """Returns a string containing the diff of the two text strings - in 'unified diff' format. - - While this is a generic routine, we include it in the Port - interface so that it can be overriden for testing purposes.""" - - # The filenames show up in the diff output, make sure they're - # raw bytes and not unicode, so that they don't trigger join() - # trying to decode the input. - def to_raw_bytes(str): - if isinstance(str, unicode): - return str.encode('utf-8') - return str - expected_filename = to_raw_bytes(expected_filename) - actual_filename = to_raw_bytes(actual_filename) - diff = difflib.unified_diff(expected_text.splitlines(True), - actual_text.splitlines(True), - expected_filename, - actual_filename) - return ''.join(diff) - - def driver_name(self): - """Returns the name of the actual binary that is performing the test, - so that it can be referred to in log messages. In most cases this - will be DumpRenderTree, but if a port uses a binary with a different - name, it can be overridden here.""" - return "DumpRenderTree" - - def expected_baselines(self, filename, suffix, all_baselines=False): - """Given a test name, finds where the baseline results are located. - - Args: - filename: absolute filename to test file - suffix: file suffix of the expected results, including dot; e.g. - '.txt' or '.png'. This should not be None, but may be an empty - string. - all_baselines: If True, return an ordered list of all baseline paths - for the given platform. If False, return only the first one. - Returns - a list of ( platform_dir, results_filename ), where - platform_dir - abs path to the top of the results tree (or test - tree) - results_filename - relative path from top of tree to the results - file - (os.path.join of the two gives you the full path to the file, - unless None was returned.) - Return values will be in the format appropriate for the current - platform (e.g., "\\" for path separators on Windows). If the results - file is not found, then None will be returned for the directory, - but the expected relative pathname will still be returned. - - This routine is generic but lives here since it is used in - conjunction with the other baseline and filename routines that are - platform specific. - """ - testname = os.path.splitext(self.relative_test_filename(filename))[0] - - baseline_filename = testname + '-expected' + suffix - - baseline_search_path = self.baseline_search_path() - - baselines = [] - for platform_dir in baseline_search_path: - if self.path_exists(self._filesystem.join(platform_dir, - baseline_filename)): - baselines.append((platform_dir, baseline_filename)) - - if not all_baselines and baselines: - return baselines - - # If it wasn't found in a platform directory, return the expected - # result in the test directory, even if no such file actually exists. - platform_dir = self.layout_tests_dir() - if self.path_exists(self._filesystem.join(platform_dir, - baseline_filename)): - baselines.append((platform_dir, baseline_filename)) - - if baselines: - return baselines - - return [(None, baseline_filename)] - - def expected_filename(self, filename, suffix): - """Given a test name, returns an absolute path to its expected results. - - If no expected results are found in any of the searched directories, - the directory in which the test itself is located will be returned. - The return value is in the format appropriate for the platform - (e.g., "\\" for path separators on windows). - - Args: - filename: absolute filename to test file - suffix: file suffix of the expected results, including dot; e.g. '.txt' - or '.png'. This should not be None, but may be an empty string. - platform: the most-specific directory name to use to build the - search list of directories, e.g., 'chromium-win', or - 'chromium-mac-leopard' (we follow the WebKit format) - - This routine is generic but is implemented here to live alongside - the other baseline and filename manipulation routines. - """ - platform_dir, baseline_filename = self.expected_baselines( - filename, suffix)[0] - if platform_dir: - return self._filesystem.join(platform_dir, baseline_filename) - return self._filesystem.join(self.layout_tests_dir(), baseline_filename) - - def expected_checksum(self, test): - """Returns the checksum of the image we expect the test to produce, or None if it is a text-only test.""" - path = self.expected_filename(test, '.checksum') - if not self.path_exists(path): - return None - return self._filesystem.read_text_file(path) - - def expected_image(self, test): - """Returns the image we expect the test to produce.""" - path = self.expected_filename(test, '.png') - if not self.path_exists(path): - return None - return self._filesystem.read_binary_file(path) - - def expected_text(self, test): - """Returns the text output we expect the test to produce.""" - # FIXME: DRT output is actually utf-8, but since we don't decode the - # output from DRT (instead treating it as a binary string), we read the - # baselines as a binary string, too. - path = self.expected_filename(test, '.txt') - if not self.path_exists(path): - return '' - text = self._filesystem.read_binary_file(path) - return text.strip("\r\n").replace("\r\n", "\n") + "\n" - - def filename_to_uri(self, filename): - """Convert a test file (which is an absolute path) to a URI.""" - LAYOUTTEST_HTTP_DIR = "http/tests/" - LAYOUTTEST_WEBSOCKET_DIR = "http/tests/websocket/tests/" - - relative_path = self.relative_test_filename(filename) - port = None - use_ssl = False - - if (relative_path.startswith(LAYOUTTEST_WEBSOCKET_DIR) - or relative_path.startswith(LAYOUTTEST_HTTP_DIR)): - relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):] - port = 8000 - - # Make http/tests/local run as local files. This is to mimic the - # logic in run-webkit-tests. - # - # TODO(dpranke): remove the media reference and the SSL reference? - if (port and not relative_path.startswith("local/") and - not relative_path.startswith("media/")): - if relative_path.startswith("ssl/"): - port += 443 - protocol = "https" - else: - protocol = "http" - return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path) - - return path.abspath_to_uri(os.path.abspath(filename)) - - def tests(self, paths): - """Return the list of tests found (relative to layout_tests_dir().""" - return test_files.find(self, paths) - - def test_dirs(self): - """Returns the list of top-level test directories. - - Used by --clobber-old-results.""" - layout_tests_dir = self.layout_tests_dir() - return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)), - self._filesystem.listdir(layout_tests_dir)) - - def path_isdir(self, path): - """Return True if the path refers to a directory of tests.""" - # Used by test_expectations.py to apply rules to whole directories. - return self._filesystem.isdir(path) - - def path_exists(self, path): - """Return True if the path refers to an existing test or baseline.""" - # Used by test_expectations.py to determine if an entry refers to a - # valid test and by printing.py to determine if baselines exist. - return self._filesystem.exists(path) - - def driver_cmd_line(self): - """Prints the DRT command line that will be used.""" - driver = self.create_driver(0) - return driver.cmd_line() - - def update_baseline(self, path, data, encoding): - """Updates the baseline for a test. - - Args: - path: the actual path to use for baseline, not the path to - the test. This function is used to update either generic or - platform-specific baselines, but we can't infer which here. - data: contents of the baseline. - encoding: file encoding to use for the baseline. - """ - # FIXME: remove the encoding parameter in favor of text/binary - # functions. - if encoding is None: - self._filesystem.write_binary_file(path, data) - else: - self._filesystem.write_text_file(path, data) - - def uri_to_test_name(self, uri): - """Return the base layout test name for a given URI. - - This returns the test name for a given URI, e.g., if you passed in - "file:///src/LayoutTests/fast/html/keygen.html" it would return - "fast/html/keygen.html". - - """ - test = uri - if uri.startswith("file:///"): - prefix = path.abspath_to_uri(self.layout_tests_dir()) + "/" - return test[len(prefix):] - - if uri.startswith("http://127.0.0.1:8880/"): - # websocket tests - return test.replace('http://127.0.0.1:8880/', '') - - if uri.startswith("http://"): - # regular HTTP test - return test.replace('http://127.0.0.1:8000/', 'http/tests/') - - if uri.startswith("https://"): - return test.replace('https://127.0.0.1:8443/', 'http/tests/') - - raise NotImplementedError('unknown url type: %s' % uri) - - def layout_tests_dir(self): - """Return the absolute path to the top of the LayoutTests directory.""" - return self.path_from_webkit_base('LayoutTests') - - def skips_layout_test(self, test_name): - """Figures out if the givent test is being skipped or not. - - Test categories are handled as well.""" - for test_or_category in self.skipped_layout_tests(): - if test_or_category == test_name: - return True - category = self._filesystem.join(self.layout_tests_dir(), - test_or_category) - if (self._filesystem.isdir(category) and - test_name.startswith(test_or_category)): - return True - return False - - def maybe_make_directory(self, *path): - """Creates the specified directory if it doesn't already exist.""" - self._filesystem.maybe_make_directory(*path) - - def name(self): - """Return the name of the port (e.g., 'mac', 'chromium-win-xp'). - - Note that this is different from the test_platform_name(), which - may be different (e.g., 'win-xp' instead of 'chromium-win-xp'.""" - return self._name - - def get_option(self, name, default_value=None): - # FIXME: Eventually we should not have to do a test for - # hasattr(), and we should be able to just do - # self.options.value. See additional FIXME in the constructor. - if hasattr(self._options, name): - return getattr(self._options, name) - return default_value - - def set_option_default(self, name, default_value): - if not hasattr(self._options, name): - return setattr(self._options, name, default_value) - - def path_from_webkit_base(self, *comps): - """Returns the full path to path made by joining the top of the - WebKit source tree and the list of path components in |*comps|.""" - return self._config.path_from_webkit_base(*comps) - - def script_path(self, script_name): - return self._config.script_path(script_name) - - def path_to_test_expectations_file(self): - """Update the test expectations to the passed-in string. - - This is used by the rebaselining tool. Raises NotImplementedError - if the port does not use expectations files.""" - raise NotImplementedError('Port.path_to_test_expectations_file') - - def relative_test_filename(self, filename): - """Relative unix-style path for a filename under the LayoutTests - directory. Filenames outside the LayoutTests directory should raise - an error.""" - #assert(filename.startswith(self.layout_tests_dir())) - return filename[len(self.layout_tests_dir()) + 1:] - - def results_directory(self): - """Absolute path to the place to store the test results.""" - raise NotImplementedError('Port.results_directory') - - def setup_test_run(self): - """Perform port-specific work at the beginning of a test run.""" - pass - - def setup_environ_for_server(self): - """Perform port-specific work at the beginning of a server launch. - - Returns: - Operating-system's environment. - """ - return os.environ.copy() - - def show_results_html_file(self, results_filename): - """This routine should display the HTML file pointed at by - results_filename in a users' browser.""" - return self._user.open_url(results_filename) - - def create_driver(self, worker_number): - """Return a newly created base.Driver subclass for starting/stopping - the test driver.""" - raise NotImplementedError('Port.create_driver') - - def start_helper(self): - """If a port needs to reconfigure graphics settings or do other - things to ensure a known test configuration, it should override this - method.""" - pass - - def start_http_server(self): - """Start a web server if it is available. Do nothing if - it isn't. This routine is allowed to (and may) fail if a server - is already running.""" - if self.get_option('use_apache'): - self._http_server = apache_http_server.LayoutTestApacheHttpd(self, - self.get_option('results_directory')) - else: - self._http_server = http_server.Lighttpd(self, - self.get_option('results_directory')) - self._http_server.start() - - def start_websocket_server(self): - """Start a websocket server if it is available. Do nothing if - it isn't. This routine is allowed to (and may) fail if a server - is already running.""" - self._websocket_server = websocket_server.PyWebSocket(self, - self.get_option('results_directory')) - self._websocket_server.start() - - def acquire_http_lock(self): - self._http_lock = http_lock.HttpLock(None) - self._http_lock.wait_for_httpd_lock() - - def stop_helper(self): - """Shut down the test helper if it is running. Do nothing if - it isn't, or it isn't available. If a port overrides start_helper() - it must override this routine as well.""" - pass - - def stop_http_server(self): - """Shut down the http server if it is running. Do nothing if - it isn't, or it isn't available.""" - if self._http_server: - self._http_server.stop() - - def stop_websocket_server(self): - """Shut down the websocket server if it is running. Do nothing if - it isn't, or it isn't available.""" - if self._websocket_server: - self._websocket_server.stop() - - def release_http_lock(self): - if self._http_lock: - self._http_lock.cleanup_http_lock() - - def test_expectations(self): - """Returns the test expectations for this port. - - Basically this string should contain the equivalent of a - test_expectations file. See test_expectations.py for more details.""" - raise NotImplementedError('Port.test_expectations') - - def test_expectations_overrides(self): - """Returns an optional set of overrides for the test_expectations. - - This is used by ports that have code in two repositories, and where - it is possible that you might need "downstream" expectations that - temporarily override the "upstream" expectations until the port can - sync up the two repos.""" - return None - - def test_base_platform_names(self): - """Return a list of the 'base' platforms on your port. The base - platforms represent different architectures, operating systems, - or implementations (as opposed to different versions of a single - platform). For example, 'mac' and 'win' might be different base - platforms, wherease 'mac-tiger' and 'mac-leopard' might be - different platforms. This routine is used by the rebaselining tool - and the dashboards, and the strings correspond to the identifiers - in your test expectations (*not* necessarily the platform names - themselves).""" - raise NotImplementedError('Port.base_test_platforms') - - def test_platform_name(self): - """Returns the string that corresponds to the given platform name - in the test expectations. This may be the same as name(), or it - may be different. For example, chromium returns 'mac' for - 'chromium-mac'.""" - raise NotImplementedError('Port.test_platform_name') - - def test_platforms(self): - """Returns the list of test platform identifiers as used in the - test_expectations and on dashboards, the rebaselining tool, etc. - - Note that this is not necessarily the same as the list of ports, - which must be globally unique (e.g., both 'chromium-mac' and 'mac' - might return 'mac' as a test_platform name'.""" - raise NotImplementedError('Port.platforms') - - def test_platform_name_to_name(self, test_platform_name): - """Returns the Port platform name that corresponds to the name as - referenced in the expectations file. E.g., "mac" returns - "chromium-mac" on the Chromium ports.""" - raise NotImplementedError('Port.test_platform_name_to_name') - - def version(self): - """Returns a string indicating the version of a given platform, e.g. - '-leopard' or '-xp'. - - This is used to help identify the exact port when parsing test - expectations, determining search paths, and logging information.""" - raise NotImplementedError('Port.version') - - def test_repository_paths(self): - """Returns a list of (repository_name, repository_path) tuples - of its depending code base. By default it returns a list that only - contains a ('webkit', <webkitRepossitoryPath>) tuple. - """ - return [('webkit', self.layout_tests_dir())] - - - _WDIFF_DEL = '##WDIFF_DEL##' - _WDIFF_ADD = '##WDIFF_ADD##' - _WDIFF_END = '##WDIFF_END##' - - def _format_wdiff_output_as_html(self, wdiff): - wdiff = cgi.escape(wdiff) - wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>") - wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>") - wdiff = wdiff.replace(self._WDIFF_END, "</span>") - html = "<head><style>.del { background: #faa; } " - html += ".add { background: #afa; }</style></head>" - html += "<pre>%s</pre>" % wdiff - return html - - def _wdiff_command(self, actual_filename, expected_filename): - executable = self._path_to_wdiff() - return [executable, - "--start-delete=%s" % self._WDIFF_DEL, - "--end-delete=%s" % self._WDIFF_END, - "--start-insert=%s" % self._WDIFF_ADD, - "--end-insert=%s" % self._WDIFF_END, - actual_filename, - expected_filename] - - @staticmethod - def _handle_wdiff_error(script_error): - # Exit 1 means the files differed, any other exit code is an error. - if script_error.exit_code != 1: - raise script_error - - def _run_wdiff(self, actual_filename, expected_filename): - """Runs wdiff and may throw exceptions. - This is mostly a hook for unit testing.""" - # Diffs are treated as binary as they may include multiple files - # with conflicting encodings. Thus we do not decode the output. - command = self._wdiff_command(actual_filename, expected_filename) - wdiff = self._executive.run_command(command, decode_output=False, - error_handler=self._handle_wdiff_error) - return self._format_wdiff_output_as_html(wdiff) - - def wdiff_text(self, actual_filename, expected_filename): - """Returns a string of HTML indicating the word-level diff of the - contents of the two filenames. Returns an empty string if word-level - diffing isn't available.""" - if not self._wdiff_available: - return "" - try: - # It's possible to raise a ScriptError we pass wdiff invalid paths. - return self._run_wdiff(actual_filename, expected_filename) - except OSError, e: - if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]: - # Silently ignore cases where wdiff is missing. - self._wdiff_available = False - return "" - raise - - # This is a class variable so we can test error output easily. - _pretty_patch_error_html = "Failed to run PrettyPatch, see error log." - - def pretty_patch_text(self, diff_path): - if not self._pretty_patch_available: - return self._pretty_patch_error_html - command = ("ruby", "-I", os.path.dirname(self._pretty_patch_path), - self._pretty_patch_path, diff_path) - try: - # Diffs are treated as binary (we pass decode_output=False) as they - # may contain multiple files of conflicting encodings. - return self._executive.run_command(command, decode_output=False) - except OSError, e: - # If the system is missing ruby log the error and stop trying. - self._pretty_patch_available = False - _log.error("Failed to run PrettyPatch (%s): %s" % (command, e)) - return self._pretty_patch_error_html - except ScriptError, e: - # If ruby failed to run for some reason, log the command - # output and stop trying. - self._pretty_patch_available = False - _log.error("Failed to run PrettyPatch (%s):\n%s" % (command, - e.message_with_output())) - return self._pretty_patch_error_html - - def default_configuration(self): - return self._config.default_configuration() - - # - # PROTECTED ROUTINES - # - # The routines below should only be called by routines in this class - # or any of its subclasses. - # - def _webkit_build_directory(self, args): - return self._config.build_directory(args[0]) - - def _path_to_apache(self): - """Returns the full path to the apache binary. - - This is needed only by ports that use the apache_http_server module.""" - raise NotImplementedError('Port.path_to_apache') - - def _path_to_apache_config_file(self): - """Returns the full path to the apache binary. - - This is needed only by ports that use the apache_http_server module.""" - raise NotImplementedError('Port.path_to_apache_config_file') - - def _path_to_driver(self, configuration=None): - """Returns the full path to the test driver (DumpRenderTree).""" - raise NotImplementedError('Port._path_to_driver') - - def _path_to_webcore_library(self): - """Returns the full path to a built copy of WebCore.""" - raise NotImplementedError('Port.path_to_webcore_library') - - def _path_to_helper(self): - """Returns the full path to the layout_test_helper binary, which - is used to help configure the system for the test run, or None - if no helper is needed. - - This is likely only used by start/stop_helper().""" - raise NotImplementedError('Port._path_to_helper') - - def _path_to_image_diff(self): - """Returns the full path to the image_diff binary, or None if it - is not available. - - This is likely used only by diff_image()""" - raise NotImplementedError('Port.path_to_image_diff') - - def _path_to_lighttpd(self): - """Returns the path to the LigHTTPd binary. - - This is needed only by ports that use the http_server.py module.""" - raise NotImplementedError('Port._path_to_lighttpd') - - def _path_to_lighttpd_modules(self): - """Returns the path to the LigHTTPd modules directory. - - This is needed only by ports that use the http_server.py module.""" - raise NotImplementedError('Port._path_to_lighttpd_modules') - - def _path_to_lighttpd_php(self): - """Returns the path to the LigHTTPd PHP executable. - - This is needed only by ports that use the http_server.py module.""" - raise NotImplementedError('Port._path_to_lighttpd_php') - - def _path_to_wdiff(self): - """Returns the full path to the wdiff binary, or None if it is - not available. - - This is likely used only by wdiff_text()""" - raise NotImplementedError('Port._path_to_wdiff') - - def _shut_down_http_server(self, pid): - """Forcefully and synchronously kills the web server. - - This routine should only be called from http_server.py or its - subclasses.""" - raise NotImplementedError('Port._shut_down_http_server') - - def _webkit_baseline_path(self, platform): - """Return the full path to the top of the baseline tree for a - given platform.""" - return self._filesystem.join(self.layout_tests_dir(), 'platform', - platform) - - -class Driver: - """Abstract interface for the DumpRenderTree interface.""" - - def __init__(self, port, worker_number): - """Initialize a Driver to subsequently run tests. - - Typically this routine will spawn DumpRenderTree in a config - ready for subsequent input. - - port - reference back to the port object. - worker_number - identifier for a particular worker/driver instance - """ - raise NotImplementedError('Driver.__init__') - - def run_test(self, test_input): - """Run a single test and return the results. - - Note that it is okay if a test times out or crashes and leaves - the driver in an indeterminate state. The upper layers of the program - are responsible for cleaning up and ensuring things are okay. - - Args: - test_input: a TestInput object - - Returns a TestOutput object. - """ - raise NotImplementedError('Driver.run_test') - - # FIXME: This is static so we can test it w/o creating a Base instance. - @classmethod - def _command_wrapper(cls, wrapper_option): - # Hook for injecting valgrind or other runtime instrumentation, - # used by e.g. tools/valgrind/valgrind_tests.py. - wrapper = [] - browser_wrapper = os.environ.get("BROWSER_WRAPPER", None) - if browser_wrapper: - # FIXME: There seems to be no reason to use BROWSER_WRAPPER over --wrapper. - # Remove this code any time after the date listed below. - _log.error("BROWSER_WRAPPER is deprecated, please use --wrapper instead.") - _log.error("BROWSER_WRAPPER will be removed any time after June 1st 2010 and your scripts will break.") - wrapper += [browser_wrapper] - - if wrapper_option: - wrapper += shlex.split(wrapper_option) - return wrapper - - def poll(self): - """Returns None if the Driver is still running. Returns the returncode - if it has exited.""" - raise NotImplementedError('Driver.poll') - - def stop(self): - raise NotImplementedError('Driver.stop') diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/base_unittest.py deleted file mode 100644 index 8d586e3..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/base_unittest.py +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import optparse -import os -import sys -import tempfile -import unittest - -from webkitpy.common.system.executive import Executive, ScriptError -from webkitpy.common.system import executive_mock -from webkitpy.common.system import filesystem -from webkitpy.common.system import outputcapture -from webkitpy.common.system.path import abspath_to_uri -from webkitpy.thirdparty.mock import Mock -from webkitpy.tool import mocktool - -import base -import config -import config_mock - - -class PortTest(unittest.TestCase): - def test_format_wdiff_output_as_html(self): - output = "OUTPUT %s %s %s" % (base.Port._WDIFF_DEL, base.Port._WDIFF_ADD, base.Port._WDIFF_END) - html = base.Port()._format_wdiff_output_as_html(output) - expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>" - self.assertEqual(html, expected_html) - - def test_wdiff_command(self): - port = base.Port() - port._path_to_wdiff = lambda: "/path/to/wdiff" - command = port._wdiff_command("/actual/path", "/expected/path") - expected_command = [ - "/path/to/wdiff", - "--start-delete=##WDIFF_DEL##", - "--end-delete=##WDIFF_END##", - "--start-insert=##WDIFF_ADD##", - "--end-insert=##WDIFF_END##", - "/actual/path", - "/expected/path", - ] - self.assertEqual(command, expected_command) - - def _file_with_contents(self, contents, encoding="utf-8"): - new_file = tempfile.NamedTemporaryFile() - new_file.write(contents.encode(encoding)) - new_file.flush() - return new_file - - def test_pretty_patch_os_error(self): - port = base.Port(executive=executive_mock.MockExecutive2(exception=OSError)) - oc = outputcapture.OutputCapture() - oc.capture_output() - self.assertEqual(port.pretty_patch_text("patch.txt"), - port._pretty_patch_error_html) - - # This tests repeated calls to make sure we cache the result. - self.assertEqual(port.pretty_patch_text("patch.txt"), - port._pretty_patch_error_html) - oc.restore_output() - - def test_pretty_patch_script_error(self): - # FIXME: This is some ugly white-box test hacking ... - base._pretty_patch_available = True - port = base.Port(executive=executive_mock.MockExecutive2(exception=ScriptError)) - self.assertEqual(port.pretty_patch_text("patch.txt"), - port._pretty_patch_error_html) - - # This tests repeated calls to make sure we cache the result. - self.assertEqual(port.pretty_patch_text("patch.txt"), - port._pretty_patch_error_html) - - def test_run_wdiff(self): - executive = Executive() - # This may fail on some systems. We could ask the port - # object for the wdiff path, but since we don't know what - # port object to use, this is sufficient for now. - try: - wdiff_path = executive.run_command(["which", "wdiff"]).rstrip() - except Exception, e: - wdiff_path = None - - port = base.Port() - port._path_to_wdiff = lambda: wdiff_path - - if wdiff_path: - # "with tempfile.NamedTemporaryFile() as actual" does not seem to work in Python 2.5 - actual = self._file_with_contents(u"foo") - expected = self._file_with_contents(u"bar") - wdiff = port._run_wdiff(actual.name, expected.name) - expected_wdiff = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre><span class=del>foo</span><span class=add>bar</span></pre>" - self.assertEqual(wdiff, expected_wdiff) - # Running the full wdiff_text method should give the same result. - port._wdiff_available = True # In case it's somehow already disabled. - wdiff = port.wdiff_text(actual.name, expected.name) - self.assertEqual(wdiff, expected_wdiff) - # wdiff should still be available after running wdiff_text with a valid diff. - self.assertTrue(port._wdiff_available) - actual.close() - expected.close() - - # Bogus paths should raise a script error. - self.assertRaises(ScriptError, port._run_wdiff, "/does/not/exist", "/does/not/exist2") - self.assertRaises(ScriptError, port.wdiff_text, "/does/not/exist", "/does/not/exist2") - # wdiff will still be available after running wdiff_text with invalid paths. - self.assertTrue(port._wdiff_available) - base._wdiff_available = True - - # If wdiff does not exist _run_wdiff should throw an OSError. - port._path_to_wdiff = lambda: "/invalid/path/to/wdiff" - self.assertRaises(OSError, port._run_wdiff, "foo", "bar") - - # wdiff_text should not throw an error if wdiff does not exist. - self.assertEqual(port.wdiff_text("foo", "bar"), "") - # However wdiff should not be available after running wdiff_text if wdiff is missing. - self.assertFalse(port._wdiff_available) - - def test_diff_text(self): - port = base.Port() - # Make sure that we don't run into decoding exceptions when the - # filenames are unicode, with regular or malformed input (expected or - # actual input is always raw bytes, not unicode). - port.diff_text('exp', 'act', 'exp.txt', 'act.txt') - port.diff_text('exp', 'act', u'exp.txt', 'act.txt') - port.diff_text('exp', 'act', u'a\xac\u1234\u20ac\U00008000', 'act.txt') - - port.diff_text('exp' + chr(255), 'act', 'exp.txt', 'act.txt') - port.diff_text('exp' + chr(255), 'act', u'exp.txt', 'act.txt') - - # Though expected and actual files should always be read in with no - # encoding (and be stored as str objects), test unicode inputs just to - # be safe. - port.diff_text(u'exp', 'act', 'exp.txt', 'act.txt') - port.diff_text( - u'a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', 'act.txt') - - # And make sure we actually get diff output. - diff = port.diff_text('foo', 'bar', 'exp.txt', 'act.txt') - self.assertTrue('foo' in diff) - self.assertTrue('bar' in diff) - self.assertTrue('exp.txt' in diff) - self.assertTrue('act.txt' in diff) - self.assertFalse('nosuchthing' in diff) - - def test_default_configuration_notfound(self): - # Test that we delegate to the config object properly. - port = base.Port(config=config_mock.MockConfig(default_configuration='default')) - self.assertEqual(port.default_configuration(), 'default') - - def test_layout_tests_skipping(self): - port = base.Port() - port.skipped_layout_tests = lambda: ['foo/bar.html', 'media'] - self.assertTrue(port.skips_layout_test('foo/bar.html')) - self.assertTrue(port.skips_layout_test('media/video-zoom.html')) - self.assertFalse(port.skips_layout_test('foo/foo.html')) - - def test_setup_test_run(self): - port = base.Port() - # This routine is a no-op. We just test it for coverage. - port.setup_test_run() - - def test_test_dirs(self): - port = base.Port() - dirs = port.test_dirs() - self.assertTrue('canvas' in dirs) - self.assertTrue('css2.1' in dirs) - - def test_filename_to_uri(self): - port = base.Port() - layout_test_dir = port.layout_tests_dir() - test_file = os.path.join(layout_test_dir, "foo", "bar.html") - - # On Windows, absolute paths are of the form "c:\foo.txt". However, - # all current browsers (except for Opera) normalize file URLs by - # prepending an additional "/" as if the absolute path was - # "/c:/foo.txt". This means that all file URLs end up with "file:///" - # at the beginning. - if sys.platform == 'win32': - prefix = "file:///" - path = test_file.replace("\\", "/") - else: - prefix = "file://" - path = test_file - - self.assertEqual(port.filename_to_uri(test_file), - abspath_to_uri(test_file)) - - def test_get_option__set(self): - options, args = optparse.OptionParser().parse_args([]) - options.foo = 'bar' - port = base.Port(options=options) - self.assertEqual(port.get_option('foo'), 'bar') - - def test_get_option__unset(self): - port = base.Port() - self.assertEqual(port.get_option('foo'), None) - - def test_get_option__default(self): - port = base.Port() - self.assertEqual(port.get_option('foo', 'bar'), 'bar') - - def test_set_option_default__unset(self): - port = base.Port() - port.set_option_default('foo', 'bar') - self.assertEqual(port.get_option('foo'), 'bar') - - def test_set_option_default__set(self): - options, args = optparse.OptionParser().parse_args([]) - options.foo = 'bar' - port = base.Port(options=options) - # This call should have no effect. - port.set_option_default('foo', 'new_bar') - self.assertEqual(port.get_option('foo'), 'bar') - - def test_name__unset(self): - port = base.Port() - self.assertEqual(port.name(), None) - - def test_name__set(self): - port = base.Port(port_name='foo') - self.assertEqual(port.name(), 'foo') - - -class VirtualTest(unittest.TestCase): - """Tests that various methods expected to be virtual are.""" - def assertVirtual(self, method, *args, **kwargs): - self.assertRaises(NotImplementedError, method, *args, **kwargs) - - def test_virtual_methods(self): - port = base.Port() - self.assertVirtual(port.baseline_path) - self.assertVirtual(port.baseline_search_path) - self.assertVirtual(port.check_build, None) - self.assertVirtual(port.check_image_diff) - self.assertVirtual(port.create_driver, 0) - self.assertVirtual(port.diff_image, None, None) - self.assertVirtual(port.path_to_test_expectations_file) - self.assertVirtual(port.test_platform_name) - self.assertVirtual(port.results_directory) - self.assertVirtual(port.test_expectations) - self.assertVirtual(port.test_base_platform_names) - self.assertVirtual(port.test_platform_name) - self.assertVirtual(port.test_platforms) - self.assertVirtual(port.test_platform_name_to_name, None) - self.assertVirtual(port.version) - self.assertVirtual(port._path_to_apache) - self.assertVirtual(port._path_to_apache_config_file) - self.assertVirtual(port._path_to_driver) - self.assertVirtual(port._path_to_helper) - self.assertVirtual(port._path_to_image_diff) - self.assertVirtual(port._path_to_lighttpd) - self.assertVirtual(port._path_to_lighttpd_modules) - self.assertVirtual(port._path_to_lighttpd_php) - self.assertVirtual(port._path_to_wdiff) - self.assertVirtual(port._shut_down_http_server, None) - - def test_virtual_driver_method(self): - self.assertRaises(NotImplementedError, base.Driver, base.Port(), - 0) - - def test_virtual_driver_methods(self): - class VirtualDriver(base.Driver): - def __init__(self): - pass - - driver = VirtualDriver() - self.assertVirtual(driver.run_test, None) - self.assertVirtual(driver.poll) - self.assertVirtual(driver.stop) - - -class DriverTest(unittest.TestCase): - - def _assert_wrapper(self, wrapper_string, expected_wrapper): - wrapper = base.Driver._command_wrapper(wrapper_string) - self.assertEqual(wrapper, expected_wrapper) - - def test_command_wrapper(self): - self._assert_wrapper(None, []) - self._assert_wrapper("valgrind", ["valgrind"]) - - # Validate that shlex works as expected. - command_with_spaces = "valgrind --smc-check=\"check with spaces!\" --foo" - expected_parse = ["valgrind", "--smc-check=check with spaces!", "--foo"] - self._assert_wrapper(command_with_spaces, expected_parse) - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py deleted file mode 100644 index 8fe685a..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py +++ /dev/null @@ -1,555 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Chromium implementations of the Port interface.""" - -from __future__ import with_statement - -import codecs -import errno -import logging -import os -import re -import shutil -import signal -import subprocess -import sys -import tempfile -import time -import webbrowser - -from webkitpy.common.system.path import cygpath -from webkitpy.layout_tests.layout_package import test_expectations -from webkitpy.layout_tests.layout_package import test_output - -import base -import http_server - -# Chromium DRT on OSX uses WebKitDriver. -if sys.platform == 'darwin': - import webkit - -import websocket_server - -_log = logging.getLogger("webkitpy.layout_tests.port.chromium") - - -# FIXME: This function doesn't belong in this package. -def check_file_exists(path_to_file, file_description, override_step=None, - logging=True): - """Verify the file is present where expected or log an error. - - Args: - file_name: The (human friendly) name or description of the file - you're looking for (e.g., "HTTP Server"). Used for error logging. - override_step: An optional string to be logged if the check fails. - logging: Whether or not log the error messages.""" - if not os.path.exists(path_to_file): - if logging: - _log.error('Unable to find %s' % file_description) - _log.error(' at %s' % path_to_file) - if override_step: - _log.error(' %s' % override_step) - _log.error('') - return False - return True - - -class ChromiumPort(base.Port): - """Abstract base class for Chromium implementations of the Port class.""" - - def __init__(self, **kwargs): - base.Port.__init__(self, **kwargs) - self._chromium_base_dir = None - - def baseline_path(self): - return self._webkit_baseline_path(self._name) - - def check_build(self, needs_http): - result = True - - dump_render_tree_binary_path = self._path_to_driver() - result = check_file_exists(dump_render_tree_binary_path, - 'test driver') and result - if result and self.get_option('build'): - result = self._check_driver_build_up_to_date( - self.get_option('configuration')) - else: - _log.error('') - - helper_path = self._path_to_helper() - if helper_path: - result = check_file_exists(helper_path, - 'layout test helper') and result - - if self.get_option('pixel_tests'): - result = self.check_image_diff( - 'To override, invoke with --no-pixel-tests') and result - - # It's okay if pretty patch isn't available, but we will at - # least log a message. - self.check_pretty_patch() - - return result - - def check_sys_deps(self, needs_http): - cmd = [self._path_to_driver(), '--check-layout-test-sys-deps'] - if self._executive.run_command(cmd, return_exit_code=True): - _log.error('System dependencies check failed.') - _log.error('To override, invoke with --nocheck-sys-deps') - _log.error('') - return False - return True - - def check_image_diff(self, override_step=None, logging=True): - image_diff_path = self._path_to_image_diff() - return check_file_exists(image_diff_path, 'image diff exe', - override_step, logging) - - def diff_image(self, expected_contents, actual_contents, - diff_filename=None): - executable = self._path_to_image_diff() - - tempdir = tempfile.mkdtemp() - expected_filename = os.path.join(tempdir, "expected.png") - with open(expected_filename, 'w+b') as file: - file.write(expected_contents) - actual_filename = os.path.join(tempdir, "actual.png") - with open(actual_filename, 'w+b') as file: - file.write(actual_contents) - - if diff_filename: - cmd = [executable, '--diff', expected_filename, - actual_filename, diff_filename] - else: - cmd = [executable, expected_filename, actual_filename] - - result = True - try: - exit_code = self._executive.run_command(cmd, return_exit_code=True) - if exit_code == 0: - # The images are the same. - result = False - elif exit_code != 1: - _log.error("image diff returned an exit code of " - + str(exit_code)) - # Returning False here causes the script to think that we - # successfully created the diff even though we didn't. If - # we return True, we think that the images match but the hashes - # don't match. - # FIXME: Figure out why image_diff returns other values. - result = False - except OSError, e: - if e.errno == errno.ENOENT or e.errno == errno.EACCES: - _compare_available = False - else: - raise e - finally: - shutil.rmtree(tempdir, ignore_errors=True) - return result - - def driver_name(self): - if self._options.use_drt: - return "DumpRenderTree" - return "test_shell" - - def path_from_chromium_base(self, *comps): - """Returns the full path to path made by joining the top of the - Chromium source tree and the list of path components in |*comps|.""" - if not self._chromium_base_dir: - abspath = os.path.abspath(__file__) - offset = abspath.find('third_party') - if offset == -1: - self._chromium_base_dir = os.path.join( - abspath[0:abspath.find('WebKitTools')], - 'WebKit', 'chromium') - else: - self._chromium_base_dir = abspath[0:offset] - return os.path.join(self._chromium_base_dir, *comps) - - def path_to_test_expectations_file(self): - return self.path_from_webkit_base('LayoutTests', 'platform', - 'chromium', 'test_expectations.txt') - - def results_directory(self): - try: - return self.path_from_chromium_base('webkit', - self.get_option('configuration'), - self.get_option('results_directory')) - except AssertionError: - return self._build_path(self.get_option('configuration'), - self.get_option('results_directory')) - - def setup_test_run(self): - # Delete the disk cache if any to ensure a clean test run. - dump_render_tree_binary_path = self._path_to_driver() - cachedir = os.path.split(dump_render_tree_binary_path)[0] - cachedir = os.path.join(cachedir, "cache") - if os.path.exists(cachedir): - shutil.rmtree(cachedir) - - def create_driver(self, worker_number): - """Starts a new Driver and returns a handle to it.""" - if self.get_option('use_drt') and sys.platform == 'darwin': - return webkit.WebKitDriver(self, worker_number) - return ChromiumDriver(self, worker_number) - - def start_helper(self): - helper_path = self._path_to_helper() - if helper_path: - _log.debug("Starting layout helper %s" % helper_path) - # Note: Not thread safe: http://bugs.python.org/issue2320 - self._helper = subprocess.Popen([helper_path], - stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None) - is_ready = self._helper.stdout.readline() - if not is_ready.startswith('ready'): - _log.error("layout_test_helper failed to be ready") - - def stop_helper(self): - if self._helper: - _log.debug("Stopping layout test helper") - self._helper.stdin.write("x\n") - self._helper.stdin.close() - # wait() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - self._helper.wait() - - def test_base_platform_names(self): - return ('linux', 'mac', 'win') - - def test_expectations(self): - """Returns the test expectations for this port. - - Basically this string should contain the equivalent of a - test_expectations file. See test_expectations.py for more details.""" - expectations_path = self.path_to_test_expectations_file() - with codecs.open(expectations_path, "r", "utf-8") as file: - return file.read() - - def test_expectations_overrides(self): - # FIXME: This drt_overrides handling should be removed when we switch - # from tes_shell to DRT. - drt_overrides = '' - if self.get_option('use_drt'): - drt_overrides_path = self.path_from_webkit_base('LayoutTests', - 'platform', 'chromium', 'drt_expectations.txt') - if os.path.exists(drt_overrides_path): - with codecs.open(drt_overrides_path, "r", "utf-8") as file: - drt_overrides = file.read() - - try: - overrides_path = self.path_from_chromium_base('webkit', 'tools', - 'layout_tests', 'test_expectations.txt') - except AssertionError: - return None - if not os.path.exists(overrides_path): - return None - with codecs.open(overrides_path, "r", "utf-8") as file: - return file.read() + drt_overrides - - def skipped_layout_tests(self, extra_test_files=None): - expectations_str = self.test_expectations() - overrides_str = self.test_expectations_overrides() - test_platform_name = self.test_platform_name() - is_debug_mode = False - - all_test_files = self.tests([]) - if extra_test_files: - all_test_files.update(extra_test_files) - - expectations = test_expectations.TestExpectations( - self, all_test_files, expectations_str, test_platform_name, - is_debug_mode, is_lint_mode=True, overrides=overrides_str) - tests_dir = self.layout_tests_dir() - return [self.relative_test_filename(test) - for test in expectations.get_tests_with_result_type(test_expectations.SKIP)] - - def test_platform_names(self): - return self.test_base_platform_names() + ('win-xp', - 'win-vista', 'win-7') - - def test_platform_name_to_name(self, test_platform_name): - if test_platform_name in self.test_platform_names(): - return 'chromium-' + test_platform_name - raise ValueError('Unsupported test_platform_name: %s' % - test_platform_name) - - def test_repository_paths(self): - # Note: for JSON file's backward-compatibility we use 'chrome' rather - # than 'chromium' here. - repos = super(ChromiumPort, self).test_repository_paths() - repos.append(('chrome', self.path_from_chromium_base())) - return repos - - # - # PROTECTED METHODS - # - # These routines should only be called by other methods in this file - # or any subclasses. - # - - def _check_driver_build_up_to_date(self, configuration): - if configuration in ('Debug', 'Release'): - try: - debug_path = self._path_to_driver('Debug') - release_path = self._path_to_driver('Release') - - debug_mtime = os.stat(debug_path).st_mtime - release_mtime = os.stat(release_path).st_mtime - - if (debug_mtime > release_mtime and configuration == 'Release' or - release_mtime > debug_mtime and configuration == 'Debug'): - _log.warning('You are not running the most ' - 'recent DumpRenderTree binary. You need to ' - 'pass --debug or not to select between ' - 'Debug and Release.') - _log.warning('') - # This will fail if we don't have both a debug and release binary. - # That's fine because, in this case, we must already be running the - # most up-to-date one. - except OSError: - pass - return True - - def _chromium_baseline_path(self, platform): - if platform is None: - platform = self.name() - return self.path_from_webkit_base('LayoutTests', 'platform', platform) - - def _convert_path(self, path): - """Handles filename conversion for subprocess command line args.""" - # See note above in diff_image() for why we need this. - if sys.platform == 'cygwin': - return cygpath(path) - return path - - def _path_to_image_diff(self): - binary_name = 'image_diff' - if self.get_option('use_drt'): - binary_name = 'ImageDiff' - return self._build_path(self.get_option('configuration'), binary_name) - - -class ChromiumDriver(base.Driver): - """Abstract interface for test_shell.""" - - def __init__(self, port, worker_number): - self._port = port - self._worker_number = worker_number - self._image_path = None - if self._port.get_option('pixel_tests'): - self._image_path = os.path.join( - self._port.get_option('results_directory'), - 'png_result%s.png' % self._worker_number) - - def cmd_line(self): - cmd = self._command_wrapper(self._port.get_option('wrapper')) - cmd.append(self._port._path_to_driver()) - if self._port.get_option('pixel_tests'): - # See note above in diff_image() for why we need _convert_path(). - cmd.append("--pixel-tests=" + - self._port._convert_path(self._image_path)) - - if self._port.get_option('use_drt'): - cmd.append('--test-shell') - else: - cmd.append('--layout-tests') - - if self._port.get_option('startup_dialog'): - cmd.append('--testshell-startup-dialog') - - if self._port.get_option('gp_fault_error_box'): - cmd.append('--gp-fault-error-box') - - if self._port.get_option('js_flags') is not None: - cmd.append('--js-flags="' + self._port.get_option('js_flags') + '"') - - if self._port.get_option('multiple_loads') > 0: - cmd.append('--multiple-loads=' + str(self._port.get_option('multiple_loads'))) - - if self._port.get_option('accelerated_compositing'): - cmd.append('--enable-accelerated-compositing') - - if self._port.get_option('accelerated_2d_canvas'): - cmd.append('--enable-accelerated-2d-canvas') - return cmd - - def start(self): - # FIXME: Should be an error to call this method twice. - cmd = self.cmd_line() - - # We need to pass close_fds=True to work around Python bug #2320 - # (otherwise we can hang when we kill DumpRenderTree when we are running - # multiple threads). See http://bugs.python.org/issue2320 . - # Note that close_fds isn't supported on Windows, but this bug only - # shows up on Mac and Linux. - close_flag = sys.platform not in ('win32', 'cygwin') - self._proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - close_fds=close_flag) - - def poll(self): - # poll() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - return self._proc.poll() - - def _write_command_and_read_line(self, input=None): - """Returns a tuple: (line, did_crash)""" - try: - if input: - if isinstance(input, unicode): - # TestShell expects utf-8 - input = input.encode("utf-8") - self._proc.stdin.write(input) - # DumpRenderTree text output is always UTF-8. However some tests - # (e.g. webarchive) may spit out binary data instead of text so we - # don't bother to decode the output (for either DRT or test_shell). - line = self._proc.stdout.readline() - # We could assert() here that line correctly decodes as UTF-8. - return (line, False) - except IOError, e: - _log.error("IOError communicating w/ test_shell: " + str(e)) - return (None, True) - - def _test_shell_command(self, uri, timeoutms, checksum): - cmd = uri - if timeoutms: - cmd += ' ' + str(timeoutms) - if checksum: - cmd += ' ' + checksum - cmd += "\n" - return cmd - - def _output_image(self): - """Returns the image output which driver generated.""" - png_path = self._image_path - if png_path and os.path.isfile(png_path): - with open(png_path, 'rb') as image_file: - return image_file.read() - else: - return None - - def _output_image_with_retry(self): - # Retry a few more times because open() sometimes fails on Windows, - # raising "IOError: [Errno 13] Permission denied:" - retry_num = 50 - timeout_seconds = 5.0 - for i in range(retry_num): - try: - return self._output_image() - except IOError, e: - if e.errno == errno.EACCES: - time.sleep(timeout_seconds / retry_num) - else: - raise e - return self._output_image() - - def run_test(self, test_input): - output = [] - error = [] - crash = False - timeout = False - actual_uri = None - actual_checksum = None - - start_time = time.time() - - uri = self._port.filename_to_uri(test_input.filename) - cmd = self._test_shell_command(uri, test_input.timeout, - test_input.image_hash) - (line, crash) = self._write_command_and_read_line(input=cmd) - - while not crash and line.rstrip() != "#EOF": - # Make sure we haven't crashed. - if line == '' and self.poll() is not None: - # This is hex code 0xc000001d, which is used for abrupt - # termination. This happens if we hit ctrl+c from the prompt - # and we happen to be waiting on test_shell. - # sdoyon: Not sure for which OS and in what circumstances the - # above code is valid. What works for me under Linux to detect - # ctrl+c is for the subprocess returncode to be negative - # SIGINT. And that agrees with the subprocess documentation. - if (-1073741510 == self._proc.returncode or - - signal.SIGINT == self._proc.returncode): - raise KeyboardInterrupt - crash = True - break - - # Don't include #URL lines in our output - if line.startswith("#URL:"): - actual_uri = line.rstrip()[5:] - if uri != actual_uri: - # GURL capitalizes the drive letter of a file URL. - if (not re.search("^file:///[a-z]:", uri) or - uri.lower() != actual_uri.lower()): - _log.fatal("Test got out of sync:\n|%s|\n|%s|" % - (uri, actual_uri)) - raise AssertionError("test out of sync") - elif line.startswith("#MD5:"): - actual_checksum = line.rstrip()[5:] - elif line.startswith("#TEST_TIMED_OUT"): - timeout = True - # Test timed out, but we still need to read until #EOF. - elif actual_uri: - output.append(line) - else: - error.append(line) - - (line, crash) = self._write_command_and_read_line(input=None) - - run_time = time.time() - start_time - return test_output.TestOutput( - ''.join(output), self._output_image_with_retry(), actual_checksum, - crash, run_time, timeout, ''.join(error)) - - def stop(self): - if self._proc: - self._proc.stdin.close() - self._proc.stdout.close() - if self._proc.stderr: - self._proc.stderr.close() - if sys.platform not in ('win32', 'cygwin'): - # Closing stdin/stdout/stderr hangs sometimes on OS X, - # (see __init__(), above), and anyway we don't want to hang - # the harness if test_shell is buggy, so we wait a couple - # seconds to give test_shell a chance to clean up, but then - # force-kill the process if necessary. - KILL_TIMEOUT = 3.0 - timeout = time.time() + KILL_TIMEOUT - # poll() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - while self._proc.poll() is None and time.time() < timeout: - time.sleep(0.1) - # poll() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - if self._proc.poll() is None: - _log.warning('stopping test driver timed out, ' - 'killing it') - self._port._executive.kill_process(self._proc.pid) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py deleted file mode 100644 index 54a0fee..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. - -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from __future__ import with_statement - -import codecs -import os -import sys - -import chromium_linux -import chromium_mac -import chromium_win - - -def get(**kwargs): - """Some tests have slightly different results when run while using - hardware acceleration. In those cases, we prepend an additional directory - to the baseline paths.""" - port_name = kwargs.get('port_name', None) - if port_name == 'chromium-gpu': - if sys.platform in ('cygwin', 'win32'): - port_name = 'chromium-gpu-win' - elif sys.platform == 'linux2': - port_name = 'chromium-gpu-linux' - elif sys.platform == 'darwin': - port_name = 'chromium-gpu-mac' - else: - raise NotImplementedError('unsupported platform: %s' % - sys.platform) - - if port_name == 'chromium-gpu-linux': - return ChromiumGpuLinuxPort(**kwargs) - - if port_name.startswith('chromium-gpu-mac'): - return ChromiumGpuMacPort(**kwargs) - - if port_name.startswith('chromium-gpu-win'): - return ChromiumGpuWinPort(**kwargs) - - raise NotImplementedError('unsupported port: %s' % port_name) - - -def _set_gpu_options(options): - if options: - if options.accelerated_compositing is None: - options.accelerated_compositing = True - if options.accelerated_2d_canvas is None: - options.accelerated_2d_canvas = True - if options.use_drt is None: - options.use_drt = True - - # FIXME: Remove this after http://codereview.chromium.org/5133001/ is enabled - # on the bots. - if options.builder_name is not None and not ' - GPU' in options.builder_name: - options.builder_name = options.builder_name + ' - GPU' - - -def _gpu_overrides(port): - try: - overrides_path = port.path_from_chromium_base('webkit', 'tools', - 'layout_tests', 'test_expectations_gpu.txt') - except AssertionError: - return None - if not os.path.exists(overrides_path): - return None - with codecs.open(overrides_path, "r", "utf-8") as file: - return file.read() - - -class ChromiumGpuLinuxPort(chromium_linux.ChromiumLinuxPort): - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'chromium-gpu-linux') - _set_gpu_options(kwargs.get('options')) - chromium_linux.ChromiumLinuxPort.__init__(self, **kwargs) - - def baseline_search_path(self): - # Mimic the Linux -> Win expectations fallback in the ordinary Chromium port. - return (map(self._webkit_baseline_path, ['chromium-gpu-linux', 'chromium-gpu-win', 'chromium-gpu']) + - chromium_linux.ChromiumLinuxPort.baseline_search_path(self)) - - def default_child_processes(self): - return 1 - - def path_to_test_expectations_file(self): - return self.path_from_webkit_base('LayoutTests', 'platform', - 'chromium-gpu', 'test_expectations.txt') - - def test_expectations_overrides(self): - return _gpu_overrides(self) - - -class ChromiumGpuMacPort(chromium_mac.ChromiumMacPort): - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'chromium-gpu-mac') - _set_gpu_options(kwargs.get('options')) - chromium_mac.ChromiumMacPort.__init__(self, **kwargs) - - def baseline_search_path(self): - return (map(self._webkit_baseline_path, ['chromium-gpu-mac', 'chromium-gpu']) + - chromium_mac.ChromiumMacPort.baseline_search_path(self)) - - def default_child_processes(self): - return 1 - - def path_to_test_expectations_file(self): - return self.path_from_webkit_base('LayoutTests', 'platform', - 'chromium-gpu', 'test_expectations.txt') - - def test_expectations_overrides(self): - return _gpu_overrides(self) - - -class ChromiumGpuWinPort(chromium_win.ChromiumWinPort): - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'chromium-gpu-win' + self.version()) - _set_gpu_options(kwargs.get('options')) - chromium_win.ChromiumWinPort.__init__(self, **kwargs) - - def baseline_search_path(self): - return (map(self._webkit_baseline_path, ['chromium-gpu-win', 'chromium-gpu']) + - chromium_win.ChromiumWinPort.baseline_search_path(self)) - - def default_child_processes(self): - return 1 - - def path_to_test_expectations_file(self): - return self.path_from_webkit_base('LayoutTests', 'platform', - 'chromium-gpu', 'test_expectations.txt') - - def test_expectations_overrides(self): - return _gpu_overrides(self) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_gpu_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_gpu_unittest.py deleted file mode 100644 index 03bc98a..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_gpu_unittest.py +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. - -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os -import unittest - -from webkitpy.tool import mocktool -import chromium_gpu - - -class ChromiumGpuTest(unittest.TestCase): - def test_get_chromium_gpu_linux(self): - self.assertOverridesWorked('chromium-gpu-linux') - - def test_get_chromium_gpu_mac(self): - self.assertOverridesWorked('chromium-gpu-mac') - - def test_get_chromium_gpu_win(self): - self.assertOverridesWorked('chromium-gpu-win') - - def assertOverridesWorked(self, port_name): - # test that we got the right port - mock_options = mocktool.MockOptions(accelerated_compositing=None, - accelerated_2d_canvas=None, - builder_name='foo', - use_drt=None, - child_processes=None) - port = chromium_gpu.get(port_name=port_name, options=mock_options) - self.assertTrue(port._options.accelerated_compositing) - self.assertTrue(port._options.accelerated_2d_canvas) - self.assertTrue(port._options.use_drt) - self.assertEqual(port.default_child_processes(), 1) - self.assertEqual(port._options.builder_name, 'foo - GPU') - - # we use startswith() instead of Equal to gloss over platform versions. - self.assertTrue(port.name().startswith(port_name)) - - # test that it has the right directories in front of the search path. - paths = port.baseline_search_path() - self.assertEqual(port._webkit_baseline_path(port_name), paths[0]) - if port_name == 'chromium-gpu-linux': - self.assertEqual(port._webkit_baseline_path('chromium-gpu-win'), paths[1]) - self.assertEqual(port._webkit_baseline_path('chromium-gpu'), paths[2]) - else: - self.assertEqual(port._webkit_baseline_path('chromium-gpu'), paths[1]) - - # Test that we have the right expectations file. - self.assertTrue('chromium-gpu' in - port.path_to_test_expectations_file()) - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py deleted file mode 100644 index b26a6b5..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Chromium Linux implementation of the Port interface.""" - -import logging -import os -import signal - -import chromium - -_log = logging.getLogger("webkitpy.layout_tests.port.chromium_linux") - - -class ChromiumLinuxPort(chromium.ChromiumPort): - """Chromium Linux implementation of the Port class.""" - - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'chromium-linux') - chromium.ChromiumPort.__init__(self, **kwargs) - - def baseline_search_path(self): - port_names = ["chromium-linux", "chromium-win", "chromium", "win", "mac"] - return map(self._webkit_baseline_path, port_names) - - def check_build(self, needs_http): - result = chromium.ChromiumPort.check_build(self, needs_http) - if needs_http: - if self.get_option('use_apache'): - result = self._check_apache_install() and result - else: - result = self._check_lighttpd_install() and result - result = self._check_wdiff_install() and result - - if not result: - _log.error('For complete Linux build requirements, please see:') - _log.error('') - _log.error(' http://code.google.com/p/chromium/wiki/' - 'LinuxBuildInstructions') - return result - - def test_platform_name(self): - # We use 'linux' instead of 'chromium-linux' in test_expectations.txt. - return 'linux' - - def version(self): - # We don't have different versions on linux. - return '' - - # - # PROTECTED METHODS - # - - def _build_path(self, *comps): - base = self.path_from_chromium_base() - if os.path.exists(os.path.join(base, 'sconsbuild')): - return os.path.join(base, 'sconsbuild', *comps) - if os.path.exists(os.path.join(base, 'out', *comps)) or not self.get_option('use_drt'): - return os.path.join(base, 'out', *comps) - base = self.path_from_webkit_base() - if os.path.exists(os.path.join(base, 'sconsbuild')): - return os.path.join(base, 'sconsbuild', *comps) - return os.path.join(base, 'out', *comps) - - def _check_apache_install(self): - result = chromium.check_file_exists(self._path_to_apache(), - "apache2") - result = chromium.check_file_exists(self._path_to_apache_config_file(), - "apache2 config file") and result - if not result: - _log.error(' Please install using: "sudo apt-get install ' - 'apache2 libapache2-mod-php5"') - _log.error('') - return result - - def _check_lighttpd_install(self): - result = chromium.check_file_exists( - self._path_to_lighttpd(), "LigHTTPd executable") - result = chromium.check_file_exists(self._path_to_lighttpd_php(), - "PHP CGI executable") and result - result = chromium.check_file_exists(self._path_to_lighttpd_modules(), - "LigHTTPd modules") and result - if not result: - _log.error(' Please install using: "sudo apt-get install ' - 'lighttpd php5-cgi"') - _log.error('') - return result - - def _check_wdiff_install(self): - result = chromium.check_file_exists(self._path_to_wdiff(), 'wdiff') - if not result: - _log.error(' Please install using: "sudo apt-get install ' - 'wdiff"') - _log.error('') - # FIXME: The ChromiumMac port always returns True. - return result - - def _path_to_apache(self): - if self._is_redhat_based(): - return '/usr/sbin/httpd' - else: - return '/usr/sbin/apache2' - - def _path_to_apache_config_file(self): - if self._is_redhat_based(): - config_name = 'fedora-httpd.conf' - else: - config_name = 'apache2-debian-httpd.conf' - - return os.path.join(self.layout_tests_dir(), 'http', 'conf', - config_name) - - def _path_to_lighttpd(self): - return "/usr/sbin/lighttpd" - - def _path_to_lighttpd_modules(self): - return "/usr/lib/lighttpd" - - def _path_to_lighttpd_php(self): - return "/usr/bin/php-cgi" - - def _path_to_driver(self, configuration=None): - if not configuration: - configuration = self.get_option('configuration') - binary_name = 'test_shell' - if self.get_option('use_drt'): - binary_name = 'DumpRenderTree' - return self._build_path(configuration, binary_name) - - def _path_to_helper(self): - return None - - def _path_to_wdiff(self): - if self._is_redhat_based(): - return '/usr/bin/dwdiff' - else: - return '/usr/bin/wdiff' - - def _is_redhat_based(self): - return os.path.exists(os.path.join('/etc', 'redhat-release')) - - def _shut_down_http_server(self, server_pid): - """Shut down the lighttpd web server. Blocks until it's fully - shut down. - - Args: - server_pid: The process ID of the running server. - """ - # server_pid is not set when "http_server.py stop" is run manually. - if server_pid is None: - # TODO(mmoss) This isn't ideal, since it could conflict with - # lighttpd processes not started by http_server.py, - # but good enough for now. - self._executive.kill_all("lighttpd") - self._executive.kill_all("apache2") - else: - try: - os.kill(server_pid, signal.SIGTERM) - # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? - except OSError: - # Sometimes we get a bad PID (e.g. from a stale httpd.pid - # file), so if kill fails on the given PID, just try to - # 'killall' web servers. - self._shut_down_http_server(None) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py deleted file mode 100644 index d1c383c..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py +++ /dev/null @@ -1,176 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Chromium Mac implementation of the Port interface.""" - -import logging -import os -import platform -import signal - -import chromium - -from webkitpy.common.system.executive import Executive - -_log = logging.getLogger("webkitpy.layout_tests.port.chromium_mac") - - -class ChromiumMacPort(chromium.ChromiumPort): - """Chromium Mac implementation of the Port class.""" - - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'chromium-mac') - chromium.ChromiumPort.__init__(self, **kwargs) - - def baseline_search_path(self): - port_names = ["chromium-mac", "chromium", "mac" + self.version(), "mac"] - return map(self._webkit_baseline_path, port_names) - - def check_build(self, needs_http): - result = chromium.ChromiumPort.check_build(self, needs_http) - result = self._check_wdiff_install() and result - if not result: - _log.error('For complete Mac build requirements, please see:') - _log.error('') - _log.error(' http://code.google.com/p/chromium/wiki/' - 'MacBuildInstructions') - return result - - def default_child_processes(self): - # FIXME: we need to run single-threaded for now. See - # https://bugs.webkit.org/show_bug.cgi?id=38553. Unfortunately this - # routine is called right before the logger is configured, so if we - # try to _log.warning(), it gets thrown away. - import sys - sys.stderr.write("Defaulting to one child - see https://bugs.webkit.org/show_bug.cgi?id=38553\n") - return 1 - - def driver_name(self): - """name for this port's equivalent of DumpRenderTree.""" - if self.get_option('use_drt'): - return "DumpRenderTree" - return "TestShell" - - def test_platform_name(self): - # We use 'mac' instead of 'chromium-mac' - return 'mac' - - def version(self): - # FIXME: It's strange that this string is -version, not just version. - os_version_string = platform.mac_ver()[0] # e.g. "10.5.6" - if not os_version_string: - return '-leopard' - release_version = int(os_version_string.split('.')[1]) - # we don't support 'tiger' or earlier releases - if release_version == 5: - return '-leopard' - elif release_version == 6: - return '-snowleopard' - return '' - - # - # PROTECTED METHODS - # - - def _build_path(self, *comps): - path = self.path_from_chromium_base('xcodebuild', *comps) - if os.path.exists(path) or not self.get_option('use_drt'): - return path - return self.path_from_webkit_base('WebKit', 'chromium', 'xcodebuild', - *comps) - - def _check_wdiff_install(self): - try: - # We're ignoring the return and always returning True - self._executive.run_command([self._path_to_wdiff()], error_handler=Executive.ignore_error) - except OSError: - _log.warning('wdiff not found. Install using MacPorts or some ' - 'other means') - return True - - def _lighttpd_path(self, *comps): - return self.path_from_chromium_base('third_party', 'lighttpd', - 'mac', *comps) - - def _path_to_apache(self): - return '/usr/sbin/httpd' - - def _path_to_apache_config_file(self): - return os.path.join(self.layout_tests_dir(), 'http', 'conf', - 'apache2-httpd.conf') - - def _path_to_lighttpd(self): - return self._lighttpd_path('bin', 'lighttpd') - - def _path_to_lighttpd_modules(self): - return self._lighttpd_path('lib') - - def _path_to_lighttpd_php(self): - return self._lighttpd_path('bin', 'php-cgi') - - def _path_to_driver(self, configuration=None): - # FIXME: make |configuration| happy with case-sensitive file - # systems. - if not configuration: - configuration = self.get_option('configuration') - return self._build_path(configuration, self.driver_name() + '.app', - 'Contents', 'MacOS', self.driver_name()) - - def _path_to_helper(self): - binary_name = 'layout_test_helper' - if self.get_option('use_drt'): - binary_name = 'LayoutTestHelper' - return self._build_path(self.get_option('configuration'), binary_name) - - def _path_to_wdiff(self): - return 'wdiff' - - def _shut_down_http_server(self, server_pid): - """Shut down the lighttpd web server. Blocks until it's fully - shut down. - - Args: - server_pid: The process ID of the running server. - """ - # server_pid is not set when "http_server.py stop" is run manually. - if server_pid is None: - # TODO(mmoss) This isn't ideal, since it could conflict with - # lighttpd processes not started by http_server.py, - # but good enough for now. - self._executive.kill_all('lighttpd') - self._executive.kill_all('httpd') - else: - try: - os.kill(server_pid, signal.SIGTERM) - # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? - except OSError: - # Sometimes we get a bad PID (e.g. from a stale httpd.pid - # file), so if kill fails on the given PID, just try to - # 'killall' web servers. - self._shut_down_http_server(None) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py deleted file mode 100644 index d63faa0..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import chromium_mac -import unittest - -from webkitpy.thirdparty.mock import Mock - - -class ChromiumMacPortTest(unittest.TestCase): - - def test_check_wdiff_install(self): - port = chromium_mac.ChromiumMacPort() - # Currently is always true, just logs if missing. - self.assertTrue(port._check_wdiff_install()) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py deleted file mode 100644 index 5396522..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os -import unittest -import StringIO - -from webkitpy.tool import mocktool -from webkitpy.thirdparty.mock import Mock - -import chromium -import chromium_linux -import chromium_mac -import chromium_win - -class ChromiumDriverTest(unittest.TestCase): - - def setUp(self): - mock_port = Mock() - mock_port.get_option = lambda option_name: '' - self.driver = chromium.ChromiumDriver(mock_port, worker_number=0) - - def test_test_shell_command(self): - expected_command = "test.html 2 checksum\n" - self.assertEqual(self.driver._test_shell_command("test.html", 2, "checksum"), expected_command) - - def _assert_write_command_and_read_line(self, input=None, expected_line=None, expected_stdin=None, expected_crash=False): - if not expected_stdin: - if input: - expected_stdin = input - else: - # We reset stdin, so we should expect stdin.getValue = "" - expected_stdin = "" - self.driver._proc.stdin = StringIO.StringIO() - line, did_crash = self.driver._write_command_and_read_line(input) - self.assertEqual(self.driver._proc.stdin.getvalue(), expected_stdin) - self.assertEqual(line, expected_line) - self.assertEqual(did_crash, expected_crash) - - def test_write_command_and_read_line(self): - self.driver._proc = Mock() - # Set up to read 3 lines before we get an IOError - self.driver._proc.stdout = StringIO.StringIO("first\nsecond\nthird\n") - - unicode_input = u"I \u2661 Unicode" - utf8_input = unicode_input.encode("utf-8") - # Test unicode input conversion to utf-8 - self._assert_write_command_and_read_line(input=unicode_input, expected_stdin=utf8_input, expected_line="first\n") - # Test str() input. - self._assert_write_command_and_read_line(input="foo", expected_line="second\n") - # Test input=None - self._assert_write_command_and_read_line(expected_line="third\n") - # Test reading from a closed/empty stream. - # reading from a StringIO does not raise IOError like a real file would, so raise IOError manually. - def mock_readline(): - raise IOError - self.driver._proc.stdout.readline = mock_readline - self._assert_write_command_and_read_line(expected_crash=True) - - -class ChromiumPortTest(unittest.TestCase): - class TestMacPort(chromium_mac.ChromiumMacPort): - def __init__(self, options): - chromium_mac.ChromiumMacPort.__init__(self, - port_name='test-port', - options=options) - - def default_configuration(self): - self.default_configuration_called = True - return 'default' - - class TestLinuxPort(chromium_linux.ChromiumLinuxPort): - def __init__(self, options): - chromium_linux.ChromiumLinuxPort.__init__(self, - port_name='test-port', - options=options) - - def default_configuration(self): - self.default_configuration_called = True - return 'default' - - def test_path_to_image_diff(self): - mock_options = mocktool.MockOptions(use_drt=True) - port = ChromiumPortTest.TestLinuxPort(options=mock_options) - self.assertTrue(port._path_to_image_diff().endswith( - '/out/default/ImageDiff'), msg=port._path_to_image_diff()) - port = ChromiumPortTest.TestMacPort(options=mock_options) - self.assertTrue(port._path_to_image_diff().endswith( - '/xcodebuild/default/ImageDiff')) - # FIXME: Figure out how this is going to work on Windows. - #port = chromium_win.ChromiumWinPort('test-port', options=MockOptions()) - - def test_skipped_layout_tests(self): - mock_options = mocktool.MockOptions(use_drt=True) - port = ChromiumPortTest.TestLinuxPort(options=mock_options) - - fake_test = os.path.join(port.layout_tests_dir(), "fast/js/not-good.js") - - port.test_expectations = lambda: """BUG_TEST SKIP : fast/js/not-good.js = TEXT -LINUX WIN : fast/js/very-good.js = TIMEOUT PASS""" - port.test_expectations_overrides = lambda: '' - port.tests = lambda paths: set() - port.path_exists = lambda test: True - - skipped_tests = port.skipped_layout_tests(extra_test_files=[fake_test, ]) - self.assertTrue("fast/js/not-good.js" in skipped_tests) - - def test_default_configuration(self): - mock_options = mocktool.MockOptions() - port = ChromiumPortTest.TestLinuxPort(options=mock_options) - self.assertEquals(mock_options.configuration, 'default') - self.assertTrue(port.default_configuration_called) - - mock_options = mocktool.MockOptions(configuration=None) - port = ChromiumPortTest.TestLinuxPort(mock_options) - self.assertEquals(mock_options.configuration, 'default') - self.assertTrue(port.default_configuration_called) - - def test_diff_image(self): - class TestPort(ChromiumPortTest.TestLinuxPort): - def _path_to_image_diff(self): - return "/path/to/image_diff" - - class MockExecute: - def __init__(self, result): - self._result = result - - def run_command(self, - args, - cwd=None, - input=None, - error_handler=None, - return_exit_code=False, - return_stderr=True, - decode_output=False): - if return_exit_code: - return self._result - return '' - - mock_options = mocktool.MockOptions(use_drt=False) - port = ChromiumPortTest.TestLinuxPort(mock_options) - - # Images are different. - port._executive = MockExecute(0) - self.assertEquals(False, port.diff_image("EXPECTED", "ACTUAL")) - - # Images are the same. - port._executive = MockExecute(1) - self.assertEquals(True, port.diff_image("EXPECTED", "ACTUAL")) - - # There was some error running image_diff. - port._executive = MockExecute(2) - exception_raised = False - try: - port.diff_image("EXPECTED", "ACTUAL") - except ValueError, e: - exception_raised = True - self.assertFalse(exception_raised) - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py deleted file mode 100644 index 69b529a..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py +++ /dev/null @@ -1,174 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Chromium Win implementation of the Port interface.""" - -import logging -import os -import sys - -import chromium - -_log = logging.getLogger("webkitpy.layout_tests.port.chromium_win") - - -class ChromiumWinPort(chromium.ChromiumPort): - """Chromium Win implementation of the Port class.""" - - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'chromium-win' + self.version()) - chromium.ChromiumPort.__init__(self, **kwargs) - - def setup_environ_for_server(self): - env = chromium.ChromiumPort.setup_environ_for_server(self) - # Put the cygwin directory first in the path to find cygwin1.dll. - env["PATH"] = "%s;%s" % ( - self.path_from_chromium_base("third_party", "cygwin", "bin"), - env["PATH"]) - # Configure the cygwin directory so that pywebsocket finds proper - # python executable to run cgi program. - env["CYGWIN_PATH"] = self.path_from_chromium_base( - "third_party", "cygwin", "bin") - if (sys.platform == "win32" and self.get_option('register_cygwin')): - setup_mount = self.path_from_chromium_base("third_party", - "cygwin", - "setup_mount.bat") - self._executive.run_command([setup_mount]) - return env - - def baseline_search_path(self): - port_names = [] - if self._name.endswith('-win-xp'): - port_names.append("chromium-win-xp") - if self._name.endswith('-win-xp') or self._name.endswith('-win-vista'): - port_names.append("chromium-win-vista") - # FIXME: This may need to include mac-snowleopard like win.py. - port_names.extend(["chromium-win", "chromium", "win", "mac"]) - return map(self._webkit_baseline_path, port_names) - - def check_build(self, needs_http): - result = chromium.ChromiumPort.check_build(self, needs_http) - if not result: - _log.error('For complete Windows build requirements, please ' - 'see:') - _log.error('') - _log.error(' http://dev.chromium.org/developers/how-tos/' - 'build-instructions-windows') - return result - - def relative_test_filename(self, filename): - path = filename[len(self.layout_tests_dir()) + 1:] - return path.replace('\\', '/') - - def test_platform_name(self): - # We return 'win-xp', not 'chromium-win-xp' here, for convenience. - return 'win' + self.version() - - def version(self): - if not hasattr(sys, 'getwindowsversion'): - return '' - winver = sys.getwindowsversion() - if winver[0] == 6 and (winver[1] == 1): - return '-7' - if winver[0] == 6 and (winver[1] == 0): - return '-vista' - if winver[0] == 5 and (winver[1] == 1 or winver[1] == 2): - return '-xp' - return '' - - # - # PROTECTED ROUTINES - # - - def _build_path(self, *comps): - p = self.path_from_chromium_base('webkit', *comps) - if os.path.exists(p): - return p - p = self.path_from_chromium_base('chrome', *comps) - if os.path.exists(p) or not self.get_option('use_drt'): - return p - return os.path.join(self.path_from_webkit_base(), 'WebKit', 'chromium', - *comps) - - def _lighttpd_path(self, *comps): - return self.path_from_chromium_base('third_party', 'lighttpd', 'win', - *comps) - - def _path_to_apache(self): - return self.path_from_chromium_base('third_party', 'cygwin', 'usr', - 'sbin', 'httpd') - - def _path_to_apache_config_file(self): - return os.path.join(self.layout_tests_dir(), 'http', 'conf', - 'cygwin-httpd.conf') - - def _path_to_lighttpd(self): - return self._lighttpd_path('LightTPD.exe') - - def _path_to_lighttpd_modules(self): - return self._lighttpd_path('lib') - - def _path_to_lighttpd_php(self): - return self._lighttpd_path('php5', 'php-cgi.exe') - - def _path_to_driver(self, configuration=None): - if not configuration: - configuration = self.get_option('configuration') - binary_name = 'test_shell.exe' - if self.get_option('use_drt'): - binary_name = 'DumpRenderTree.exe' - return self._build_path(configuration, binary_name) - - def _path_to_helper(self): - binary_name = 'layout_test_helper.exe' - if self.get_option('use_drt'): - binary_name = 'LayoutTestHelper.exe' - return self._build_path(self.get_option('configuration'), binary_name) - - def _path_to_image_diff(self): - binary_name = 'image_diff.exe' - if self.get_option('use_drt'): - binary_name = 'ImageDiff.exe' - return self._build_path(self.get_option('configuration'), binary_name) - - def _path_to_wdiff(self): - return self.path_from_chromium_base('third_party', 'cygwin', 'bin', - 'wdiff.exe') - - def _shut_down_http_server(self, server_pid): - """Shut down the lighttpd web server. Blocks until it's fully - shut down. - - Args: - server_pid: The process ID of the running server. - """ - # FIXME: Why are we ignoring server_pid and calling - # _kill_all instead of Executive.kill_process(pid)? - self._executive.kill_all("LightTPD.exe") - self._executive.kill_all("httpd.exe") diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py deleted file mode 100644 index 36f3c6b..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os -import sys -import unittest -import chromium_win -from webkitpy.common.system import outputcapture -from webkitpy.tool import mocktool - - -class ChromiumWinTest(unittest.TestCase): - - class RegisterCygwinOption(object): - def __init__(self): - self.register_cygwin = True - - def setUp(self): - self.orig_platform = sys.platform - - def tearDown(self): - sys.platform = self.orig_platform - - def _mock_path_from_chromium_base(self, *comps): - return os.path.join("/chromium/src", *comps) - - def test_setup_environ_for_server(self): - port = chromium_win.ChromiumWinPort() - port._executive = mocktool.MockExecutive(should_log=True) - port.path_from_chromium_base = self._mock_path_from_chromium_base - output = outputcapture.OutputCapture() - orig_environ = os.environ.copy() - env = output.assert_outputs(self, port.setup_environ_for_server) - self.assertEqual(orig_environ["PATH"], os.environ["PATH"]) - self.assertNotEqual(env["PATH"], os.environ["PATH"]) - - def test_setup_environ_for_server_register_cygwin(self): - sys.platform = "win32" - port = chromium_win.ChromiumWinPort( - options=ChromiumWinTest.RegisterCygwinOption()) - port._executive = mocktool.MockExecutive(should_log=True) - port.path_from_chromium_base = self._mock_path_from_chromium_base - setup_mount = self._mock_path_from_chromium_base("third_party", - "cygwin", - "setup_mount.bat") - expected_stderr = "MOCK run_command: %s\n" % [setup_mount] - output = outputcapture.OutputCapture() - output.assert_outputs(self, port.setup_environ_for_server, - expected_stderr=expected_stderr) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/config.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/config.py deleted file mode 100644 index 9aec637..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/config.py +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Wrapper objects for WebKit-specific utility routines.""" - -# FIXME: This file needs to be unified with common/checkout/scm.py and -# common/config/ports.py . - -import os - -from webkitpy.common.system import logutils -from webkitpy.common.system import executive - - -_log = logutils.get_logger(__file__) - -# -# FIXME: This is used to record if we've already hit the filesystem to look -# for a default configuration. We cache this to speed up the unit tests, -# but this can be reset with clear_cached_configuration(). This should be -# replaced with us consistently using MockConfigs() for tests that don't -# hit the filesystem at all and provide a reliable value. -# -_have_determined_configuration = False -_configuration = "Release" - - -def clear_cached_configuration(): - global _have_determined_configuration, _configuration - _have_determined_configuration = False - _configuration = "Release" - - -class Config(object): - _FLAGS_FROM_CONFIGURATIONS = { - "Debug": "--debug", - "Release": "--release", - } - - def __init__(self, executive, filesystem): - self._executive = executive - self._filesystem = filesystem - self._webkit_base_dir = None - self._default_configuration = None - self._build_directories = {} - - def build_directory(self, configuration): - """Returns the path to the build directory for the configuration.""" - if configuration: - flags = ["--configuration", - self._FLAGS_FROM_CONFIGURATIONS[configuration]] - else: - configuration = "" - flags = ["--top-level"] - - if not self._build_directories.get(configuration): - args = ["perl", self._script_path("webkit-build-directory")] + flags - self._build_directories[configuration] = ( - self._executive.run_command(args).rstrip()) - - return self._build_directories[configuration] - - def build_dumprendertree(self, configuration): - """Builds DRT in the given configuration. - - Returns True if the build was successful and up-to-date.""" - flag = self._FLAGS_FROM_CONFIGURATIONS[configuration] - exit_code = self._executive.run_command([ - self._script_path("build-dumprendertree"), flag], - return_exit_code=True) - if exit_code != 0: - _log.error("Failed to build DumpRenderTree") - return False - return True - - def default_configuration(self): - """Returns the default configuration for the user. - - Returns the value set by 'set-webkit-configuration', or "Release" - if that has not been set. This mirrors the logic in webkitdirs.pm.""" - if not self._default_configuration: - self._default_configuration = self._determine_configuration() - if not self._default_configuration: - self._default_configuration = 'Release' - if self._default_configuration not in self._FLAGS_FROM_CONFIGURATIONS: - _log.warn("Configuration \"%s\" is not a recognized value.\n" % - self._default_configuration) - _log.warn("Scripts may fail. " - "See 'set-webkit-configuration --help'.") - return self._default_configuration - - def path_from_webkit_base(self, *comps): - return self._filesystem.join(self.webkit_base_dir(), *comps) - - def webkit_base_dir(self): - """Returns the absolute path to the top of the WebKit tree. - - Raises an AssertionError if the top dir can't be determined.""" - # Note: this code somewhat duplicates the code in - # scm.find_checkout_root(). However, that code only works if the top - # of the SCM repository also matches the top of the WebKit tree. The - # Chromium ports, for example, only check out subdirectories like - # WebKitTools/Scripts, and so we still have to do additional work - # to find the top of the tree. - # - # This code will also work if there is no SCM system at all. - if not self._webkit_base_dir: - abspath = os.path.abspath(__file__) - self._webkit_base_dir = abspath[0:abspath.find('WebKitTools')] - return self._webkit_base_dir - - def _script_path(self, script_name): - return self._filesystem.join(self.webkit_base_dir(), "WebKitTools", - "Scripts", script_name) - - def _determine_configuration(self): - # This mirrors the logic in webkitdirs.pm:determineConfiguration(). - # - # FIXME: See the comment at the top of the file regarding unit tests - # and our use of global mutable static variables. - global _have_determined_configuration, _configuration - if not _have_determined_configuration: - contents = self._read_configuration() - if not contents: - contents = "Release" - if contents == "Deployment": - contents = "Release" - if contents == "Development": - contents = "Debug" - _configuration = contents - _have_determined_configuration = True - return _configuration - - def _read_configuration(self): - try: - configuration_path = self._filesystem.join(self.build_directory(None), - "Configuration") - if not self._filesystem.exists(configuration_path): - return None - except (OSError, executive.ScriptError): - return None - - return self._filesystem.read_text_file(configuration_path).rstrip() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/config_mock.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/config_mock.py deleted file mode 100644 index af71fa3..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/config_mock.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Wrapper objects for WebKit-specific utility routines.""" - - -class MockConfig(object): - def __init__(self, default_configuration='Release'): - self._default_configuration = default_configuration - - def build_directory(self, configuration): - return "/build" - - def build_dumprendertree(self, configuration): - return True - - def default_configuration(self): - return self._default_configuration - - def path_from_webkit_base(self, *comps): - return "/" + "/".join(list(comps)) - - def webkit_base_dir(self): - return "/" diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/config_standalone.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/config_standalone.py deleted file mode 100644 index 3dec3b9..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/config_standalone.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""FIXME: This script is used by -config_unittest.test_default_configuration__standalone() to read the -default configuration to work around any possible caching / reset bugs. See -https://bugs.webkit.org/show_bug?id=49360 for the motivation. We can remove -this test when we remove the global configuration cache in config.py.""" - -import os -import unittest -import sys - - -# Ensure that webkitpy is in PYTHONPATH. -this_dir = os.path.abspath(sys.path[0]) -up = os.path.dirname -script_dir = up(up(up(this_dir))) -if script_dir not in sys.path: - sys.path.append(script_dir) - -from webkitpy.common.system import executive -from webkitpy.common.system import executive_mock -from webkitpy.common.system import filesystem -from webkitpy.common.system import filesystem_mock - -import config - - -def main(argv=None): - if not argv: - argv = sys.argv - - if len(argv) == 3 and argv[1] == '--mock': - e = executive_mock.MockExecutive2(output='foo') - fs = filesystem_mock.MockFileSystem({'foo/Configuration': argv[2]}) - else: - e = executive.Executive() - fs = filesystem.FileSystem() - - c = config.Config(e, fs) - print c.default_configuration() - -if __name__ == '__main__': - main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/config_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/config_unittest.py deleted file mode 100644 index 2d23691..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/config_unittest.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os -import sys -import unittest - -from webkitpy.common.system import executive -from webkitpy.common.system import executive_mock -from webkitpy.common.system import filesystem -from webkitpy.common.system import filesystem_mock -from webkitpy.common.system import outputcapture - -import config - - -def mock_run_command(arg_list): - # Set this to True to test actual output (where possible). - integration_test = False - if integration_test: - return executive.Executive().run_command(arg_list) - - if 'webkit-build-directory' in arg_list[1]: - return mock_webkit_build_directory(arg_list[2:]) - return 'Error' - - -def mock_webkit_build_directory(arg_list): - if arg_list == ['--top-level']: - return '/WebKitBuild' - elif arg_list == ['--configuration', '--debug']: - return '/WebKitBuild/Debug' - elif arg_list == ['--configuration', '--release']: - return '/WebKitBuild/Release' - return 'Error' - - -class ConfigTest(unittest.TestCase): - def tearDown(self): - config.clear_cached_configuration() - - def make_config(self, output='', files={}, exit_code=0, exception=None, - run_command_fn=None): - e = executive_mock.MockExecutive2(output=output, exit_code=exit_code, - exception=exception, - run_command_fn=run_command_fn) - fs = filesystem_mock.MockFileSystem(files) - return config.Config(e, fs) - - def assert_configuration(self, contents, expected): - # This tests that a configuration file containing - # _contents_ ends up being interpreted as _expected_. - c = self.make_config('foo', {'foo/Configuration': contents}) - self.assertEqual(c.default_configuration(), expected) - - def test_build_directory(self): - # --top-level - c = self.make_config(run_command_fn=mock_run_command) - self.assertTrue(c.build_directory(None).endswith('WebKitBuild')) - - # Test again to check caching - self.assertTrue(c.build_directory(None).endswith('WebKitBuild')) - - # Test other values - self.assertTrue(c.build_directory('Release').endswith('/Release')) - self.assertTrue(c.build_directory('Debug').endswith('/Debug')) - self.assertRaises(KeyError, c.build_directory, 'Unknown') - - def test_build_dumprendertree__success(self): - c = self.make_config(exit_code=0) - self.assertTrue(c.build_dumprendertree("Debug")) - self.assertTrue(c.build_dumprendertree("Release")) - self.assertRaises(KeyError, c.build_dumprendertree, "Unknown") - - def test_build_dumprendertree__failure(self): - c = self.make_config(exit_code=-1) - - # FIXME: Build failures should log errors. However, the message we - # get depends on how we're being called; as a standalone test, - # we'll get the "no handlers found" message. As part of - # test-webkitpy, we get the actual message. Really, we need - # outputcapture to install its own handler. - oc = outputcapture.OutputCapture() - oc.capture_output() - self.assertFalse(c.build_dumprendertree('Debug')) - oc.restore_output() - - oc.capture_output() - self.assertFalse(c.build_dumprendertree('Release')) - oc.restore_output() - - def test_default_configuration__release(self): - self.assert_configuration('Release', 'Release') - - def test_default_configuration__debug(self): - self.assert_configuration('Debug', 'Debug') - - def test_default_configuration__deployment(self): - self.assert_configuration('Deployment', 'Release') - - def test_default_configuration__development(self): - self.assert_configuration('Development', 'Debug') - - def test_default_configuration__notfound(self): - # This tests what happens if the default configuration file - # doesn't exist. - c = self.make_config(output='foo', files={'foo/Configuration': None}) - self.assertEqual(c.default_configuration(), "Release") - - def test_default_configuration__unknown(self): - # Ignore the warning about an unknown configuration value. - oc = outputcapture.OutputCapture() - oc.capture_output() - self.assert_configuration('Unknown', 'Unknown') - oc.restore_output() - - def test_default_configuration__standalone(self): - # FIXME: This test runs a standalone python script to test - # reading the default configuration to work around any possible - # caching / reset bugs. See https://bugs.webkit.org/show_bug?id=49360 - # for the motivation. We can remove this test when we remove the - # global configuration cache in config.py. - e = executive.Executive() - fs = filesystem.FileSystem() - c = config.Config(e, fs) - script = c.path_from_webkit_base('WebKitTools', 'Scripts', - 'webkitpy', 'layout_tests', 'port', 'config_standalone.py') - - # Note: don't use 'Release' here, since that's the normal default. - expected = 'Debug' - - args = [sys.executable, script, '--mock', expected] - actual = e.run_command(args).rstrip() - self.assertEqual(actual, expected) - - def test_default_configuration__no_perl(self): - # We need perl to run webkit-build-directory to find out where the - # default configuration file is. See what happens if perl isn't - # installed. (We should get the default value, 'Release'). - c = self.make_config(exception=OSError) - actual = c.default_configuration() - self.assertEqual(actual, 'Release') - - def test_default_configuration__scripterror(self): - # We run webkit-build-directory to find out where the default - # configuration file is. See what happens if that script fails. - # (We should get the default value, 'Release'). - c = self.make_config(exception=executive.ScriptError()) - actual = c.default_configuration() - self.assertEqual(actual, 'Release') - - def test_path_from_webkit_base(self): - # FIXME: We use a real filesystem here. Should this move to a - # mocked one? - c = config.Config(executive.Executive(), filesystem.FileSystem()) - self.assertTrue(c.path_from_webkit_base('foo')) - - def test_webkit_base_dir(self): - # FIXME: We use a real filesystem here. Should this move to a - # mocked one? - c = config.Config(executive.Executive(), filesystem.FileSystem()) - base_dir = c.webkit_base_dir() - self.assertTrue(base_dir) - - orig_cwd = os.getcwd() - os.chdir(os.environ['HOME']) - c = config.Config(executive.Executive(), filesystem.FileSystem()) - try: - base_dir_2 = c.webkit_base_dir() - self.assertEqual(base_dir, base_dir_2) - finally: - os.chdir(orig_cwd) - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py deleted file mode 100644 index 4ed34e6..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the Google name nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""This is a test implementation of the Port interface that generates the - correct output for every test. It can be used for perf testing, because - it is pretty much a lower limit on how fast a port can possibly run. - - This implementation acts as a wrapper around a real port (the real port - is held as a delegate object). To specify which port, use the port name - 'dryrun-XXX' (e.g., 'dryrun-chromium-mac-leopard'). If you use just - 'dryrun', it uses the default port. - - Note that because this is really acting as a wrapper around the underlying - port, you must be able to run the underlying port as well - (check_build() and check_sys_deps() must pass and auxiliary binaries - like layout_test_helper and httpd must work). - - This implementation also modifies the test expectations so that all - tests are either SKIPPED or expected to PASS.""" - -from __future__ import with_statement - -import os -import sys -import time - -from webkitpy.layout_tests.layout_package import test_output - -import base -import factory - - -class DryRunPort(object): - """DryRun implementation of the Port interface.""" - - def __init__(self, **kwargs): - pfx = 'dryrun-' - if 'port_name' in kwargs: - if kwargs['port_name'].startswith(pfx): - kwargs['port_name'] = kwargs['port_name'][len(pfx):] - else: - kwargs['port_name'] = None - self.__delegate = factory.get(**kwargs) - - def __getattr__(self, name): - return getattr(self.__delegate, name) - - def check_build(self, needs_http): - return True - - def check_sys_deps(self, needs_http): - return True - - def start_helper(self): - pass - - def start_http_server(self): - pass - - def start_websocket_server(self): - pass - - def stop_helper(self): - pass - - def stop_http_server(self): - pass - - def stop_websocket_server(self): - pass - - def create_driver(self, worker_number): - return DryrunDriver(self, worker_number) - - -class DryrunDriver(base.Driver): - """Dryrun implementation of the DumpRenderTree / Driver interface.""" - - def __init__(self, port, worker_number): - self._port = port - self._worker_number = worker_number - - def cmd_line(self): - return ['None'] - - def poll(self): - return None - - def run_test(self, test_input): - start_time = time.time() - text_output = self._port.expected_text(test_input.filename) - - if test_input.image_hash is not None: - image = self._port.expected_image(test_input.filename) - hash = self._port.expected_checksum(test_input.filename) - else: - image = None - hash = None - return test_output.TestOutput(text_output, image, hash, False, - time.time() - start_time, False, None) - - def start(self): - pass - - def stop(self): - pass diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/factory.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/factory.py deleted file mode 100644 index 6935744..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/factory.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Factory method to retrieve the appropriate port implementation.""" - - -import sys - -ALL_PORT_NAMES = ['test', 'dryrun', 'mac', 'win', 'gtk', 'qt', 'chromium-mac', - 'chromium-linux', 'chromium-win', 'google-chrome-win', - 'google-chrome-mac', 'google-chrome-linux32', 'google-chrome-linux64'] - - -def get(port_name=None, options=None, **kwargs): - """Returns an object implementing the Port interface. If - port_name is None, this routine attempts to guess at the most - appropriate port on this platform.""" - # Wrapped for backwards-compatibility - if port_name: - kwargs['port_name'] = port_name - if options: - kwargs['options'] = options - return _get_kwargs(**kwargs) - - -def _get_kwargs(**kwargs): - port_to_use = kwargs.get('port_name', None) - options = kwargs.get('options', None) - if port_to_use is None: - if sys.platform == 'win32' or sys.platform == 'cygwin': - if options and hasattr(options, 'chromium') and options.chromium: - port_to_use = 'chromium-win' - else: - port_to_use = 'win' - elif sys.platform == 'linux2': - port_to_use = 'chromium-linux' - elif sys.platform == 'darwin': - if options and hasattr(options, 'chromium') and options.chromium: - port_to_use = 'chromium-mac' - else: - port_to_use = 'mac' - - if port_to_use is None: - raise NotImplementedError('unknown port; sys.platform = "%s"' % - sys.platform) - - if port_to_use == 'test': - import test - maker = test.TestPort - elif port_to_use.startswith('dryrun'): - import dryrun - maker = dryrun.DryRunPort - elif port_to_use.startswith('mac'): - import mac - maker = mac.MacPort - elif port_to_use.startswith('win'): - import win - maker = win.WinPort - elif port_to_use.startswith('gtk'): - import gtk - maker = gtk.GtkPort - elif port_to_use.startswith('qt'): - import qt - maker = qt.QtPort - elif port_to_use.startswith('chromium-gpu'): - import chromium_gpu - maker = chromium_gpu.get - elif port_to_use.startswith('chromium-mac'): - import chromium_mac - maker = chromium_mac.ChromiumMacPort - elif port_to_use.startswith('chromium-linux'): - import chromium_linux - maker = chromium_linux.ChromiumLinuxPort - elif port_to_use.startswith('chromium-win'): - import chromium_win - maker = chromium_win.ChromiumWinPort - elif port_to_use.startswith('google-chrome'): - import google_chrome - maker = google_chrome.GetGoogleChromePort - else: - raise NotImplementedError('unsupported port: %s' % port_to_use) - return maker(**kwargs) - -def get_all(options=None): - """Returns all the objects implementing the Port interface.""" - return dict([(port_name, get(port_name, options=options)) - for port_name in ALL_PORT_NAMES]) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/factory_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/factory_unittest.py deleted file mode 100644 index 978a557..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/factory_unittest.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import sys -import unittest - -from webkitpy.tool import mocktool - -import chromium_gpu -import chromium_linux -import chromium_mac -import chromium_win -import dryrun -import factory -import google_chrome -import gtk -import mac -import qt -import test -import win - - -class FactoryTest(unittest.TestCase): - """Test factory creates proper port object for the target. - - Target is specified by port_name, sys.platform and options. - - """ - # FIXME: The ports themselves should expose what options they require, - # instead of passing generic "options". - - def setUp(self): - self.real_sys_platform = sys.platform - self.webkit_options = mocktool.MockOptions(pixel_tests=False) - self.chromium_options = mocktool.MockOptions(pixel_tests=False, - chromium=True) - - def tearDown(self): - sys.platform = self.real_sys_platform - - def assert_port(self, port_name, expected_port, port_obj=None): - """Helper assert for port_name. - - Args: - port_name: port name to get port object. - expected_port: class of expected port object. - port_obj: optional port object - """ - port_obj = port_obj or factory.get(port_name=port_name) - self.assertTrue(isinstance(port_obj, expected_port)) - - def assert_platform_port(self, platform, options, expected_port): - """Helper assert for platform and options. - - Args: - platform: sys.platform. - options: options to get port object. - expected_port: class of expected port object. - - """ - orig_platform = sys.platform - sys.platform = platform - self.assertTrue(isinstance(factory.get(options=options), - expected_port)) - sys.platform = orig_platform - - def test_test(self): - self.assert_port("test", test.TestPort) - - def test_dryrun(self): - self.assert_port("dryrun-test", dryrun.DryRunPort) - self.assert_port("dryrun-mac", dryrun.DryRunPort) - - def test_mac(self): - self.assert_port("mac", mac.MacPort) - self.assert_platform_port("darwin", None, mac.MacPort) - self.assert_platform_port("darwin", self.webkit_options, mac.MacPort) - - def test_win(self): - self.assert_port("win", win.WinPort) - self.assert_platform_port("win32", None, win.WinPort) - self.assert_platform_port("win32", self.webkit_options, win.WinPort) - self.assert_platform_port("cygwin", None, win.WinPort) - self.assert_platform_port("cygwin", self.webkit_options, win.WinPort) - - def test_google_chrome(self): - # The actual Chrome class names aren't available so we test that the - # objects we get are at least subclasses of the Chromium versions. - self.assert_port("google-chrome-linux32", - chromium_linux.ChromiumLinuxPort) - self.assert_port("google-chrome-linux64", - chromium_linux.ChromiumLinuxPort) - self.assert_port("google-chrome-win", - chromium_win.ChromiumWinPort) - self.assert_port("google-chrome-mac", - chromium_mac.ChromiumMacPort) - - def test_gtk(self): - self.assert_port("gtk", gtk.GtkPort) - - def test_qt(self): - self.assert_port("qt", qt.QtPort) - - def test_chromium_gpu_linux(self): - self.assert_port("chromium-gpu-linux", chromium_gpu.ChromiumGpuLinuxPort) - - def test_chromium_gpu_mac(self): - self.assert_port("chromium-gpu-mac", chromium_gpu.ChromiumGpuMacPort) - - def test_chromium_gpu_win(self): - self.assert_port("chromium-gpu-win", chromium_gpu.ChromiumGpuWinPort) - - def test_chromium_mac(self): - self.assert_port("chromium-mac", chromium_mac.ChromiumMacPort) - self.assert_platform_port("darwin", self.chromium_options, - chromium_mac.ChromiumMacPort) - - def test_chromium_linux(self): - self.assert_port("chromium-linux", chromium_linux.ChromiumLinuxPort) - self.assert_platform_port("linux2", self.chromium_options, - chromium_linux.ChromiumLinuxPort) - - def test_chromium_win(self): - self.assert_port("chromium-win", chromium_win.ChromiumWinPort) - self.assert_platform_port("win32", self.chromium_options, - chromium_win.ChromiumWinPort) - self.assert_platform_port("cygwin", self.chromium_options, - chromium_win.ChromiumWinPort) - - def test_get_all_ports(self): - ports = factory.get_all() - for name in factory.ALL_PORT_NAMES: - self.assertTrue(name in ports.keys()) - self.assert_port("test", test.TestPort, ports["test"]) - self.assert_port("dryrun-test", dryrun.DryRunPort, ports["dryrun"]) - self.assert_port("dryrun-mac", dryrun.DryRunPort, ports["dryrun"]) - self.assert_port("mac", mac.MacPort, ports["mac"]) - self.assert_port("win", win.WinPort, ports["win"]) - self.assert_port("gtk", gtk.GtkPort, ports["gtk"]) - self.assert_port("qt", qt.QtPort, ports["qt"]) - self.assert_port("chromium-mac", chromium_mac.ChromiumMacPort, - ports["chromium-mac"]) - self.assert_port("chromium-linux", chromium_linux.ChromiumLinuxPort, - ports["chromium-linux"]) - self.assert_port("chromium-win", chromium_win.ChromiumWinPort, - ports["chromium-win"]) - - def test_unknown_specified(self): - # Test what happens when you specify an unknown port. - orig_platform = sys.platform - self.assertRaises(NotImplementedError, factory.get, - port_name='unknown') - - def test_unknown_default(self): - # Test what happens when you're running on an unknown platform. - orig_platform = sys.platform - sys.platform = 'unknown' - self.assertRaises(NotImplementedError, factory.get) - sys.platform = orig_platform - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/google_chrome.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/google_chrome.py deleted file mode 100644 index 8d94bb5..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/google_chrome.py +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. - -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from __future__ import with_statement - -import codecs -import os - - -def _test_expectations_overrides(port, super): - # The chrome ports use the regular overrides plus anything in the - # official test_expectations as well. Hopefully we don't get collisions. - chromium_overrides = super.test_expectations_overrides(port) - - # FIXME: It used to be that AssertionError would get raised by - # path_from_chromium_base() if we weren't in a Chromium checkout, but - # this changed in r60427. This should probably be changed back. - overrides_path = port.path_from_chromium_base('webkit', 'tools', - 'layout_tests', 'test_expectations_chrome.txt') - if not os.path.exists(overrides_path): - return chromium_overrides - - with codecs.open(overrides_path, "r", "utf-8") as file: - if chromium_overrides: - return chromium_overrides + file.read() - else: - return file.read() - -def GetGoogleChromePort(**kwargs): - """Some tests have slightly different results when compiled as Google - Chrome vs Chromium. In those cases, we prepend an additional directory to - to the baseline paths.""" - port_name = kwargs['port_name'] - del kwargs['port_name'] - if port_name == 'google-chrome-linux32': - import chromium_linux - - class GoogleChromeLinux32Port(chromium_linux.ChromiumLinuxPort): - def baseline_search_path(self): - paths = chromium_linux.ChromiumLinuxPort.baseline_search_path( - self) - paths.insert(0, self._webkit_baseline_path( - 'google-chrome-linux32')) - return paths - - def test_expectations_overrides(self): - return _test_expectations_overrides(self, - chromium_linux.ChromiumLinuxPort) - - return GoogleChromeLinux32Port(**kwargs) - elif port_name == 'google-chrome-linux64': - import chromium_linux - - class GoogleChromeLinux64Port(chromium_linux.ChromiumLinuxPort): - def baseline_search_path(self): - paths = chromium_linux.ChromiumLinuxPort.baseline_search_path( - self) - paths.insert(0, self._webkit_baseline_path( - 'google-chrome-linux64')) - return paths - - def test_expectations_overrides(self): - return _test_expectations_overrides(self, - chromium_linux.ChromiumLinuxPort) - - return GoogleChromeLinux64Port(**kwargs) - elif port_name.startswith('google-chrome-mac'): - import chromium_mac - - class GoogleChromeMacPort(chromium_mac.ChromiumMacPort): - def baseline_search_path(self): - paths = chromium_mac.ChromiumMacPort.baseline_search_path( - self) - paths.insert(0, self._webkit_baseline_path( - 'google-chrome-mac')) - return paths - - def test_expectations_overrides(self): - return _test_expectations_overrides(self, - chromium_mac.ChromiumMacPort) - - return GoogleChromeMacPort(**kwargs) - elif port_name.startswith('google-chrome-win'): - import chromium_win - - class GoogleChromeWinPort(chromium_win.ChromiumWinPort): - def baseline_search_path(self): - paths = chromium_win.ChromiumWinPort.baseline_search_path( - self) - paths.insert(0, self._webkit_baseline_path( - 'google-chrome-win')) - return paths - - def test_expectations_overrides(self): - return _test_expectations_overrides(self, - chromium_win.ChromiumWinPort) - - return GoogleChromeWinPort(**kwargs) - raise NotImplementedError('unsupported port: %s' % port_name) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/google_chrome_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/google_chrome_unittest.py deleted file mode 100644 index e60c274..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/google_chrome_unittest.py +++ /dev/null @@ -1,103 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. - -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import codecs -import os -import unittest - -from webkitpy.common import newstringio - -import factory -import google_chrome - - -class GetGoogleChromePortTest(unittest.TestCase): - def test_get_google_chrome_port(self): - test_ports = ('google-chrome-linux32', 'google-chrome-linux64', - 'google-chrome-mac', 'google-chrome-win') - for port in test_ports: - self._verify_baseline_path(port, port) - self._verify_expectations_overrides(port) - - self._verify_baseline_path('google-chrome-mac', 'google-chrome-mac-leopard') - self._verify_baseline_path('google-chrome-win', 'google-chrome-win-xp') - self._verify_baseline_path('google-chrome-win', 'google-chrome-win-vista') - - def _verify_baseline_path(self, expected_path, port_name): - port = google_chrome.GetGoogleChromePort(port_name=port_name, - options=None) - path = port.baseline_search_path()[0] - self.assertEqual(expected_path, os.path.split(path)[1]) - - def _verify_expectations_overrides(self, port_name): - # FIXME: make this more robust when we have the Tree() abstraction. - # we should be able to test for the files existing or not, and - # be able to control the contents better. - - chromium_port = factory.get("chromium-mac") - chromium_overrides = chromium_port.test_expectations_overrides() - port = google_chrome.GetGoogleChromePort(port_name=port_name, - options=None) - - orig_exists = os.path.exists - orig_open = codecs.open - expected_string = "// hello, world\n" - - def mock_exists_chrome_not_found(path): - if 'test_expectations_chrome.txt' in path: - return False - return orig_exists(path) - - def mock_exists_chrome_found(path): - if 'test_expectations_chrome.txt' in path: - return True - return orig_exists(path) - - def mock_open(path, mode, encoding): - if 'test_expectations_chrome.txt' in path: - return newstringio.StringIO(expected_string) - return orig_open(path, mode, encoding) - - try: - os.path.exists = mock_exists_chrome_not_found - chrome_overrides = port.test_expectations_overrides() - self.assertEqual(chromium_overrides, chrome_overrides) - - os.path.exists = mock_exists_chrome_found - codecs.open = mock_open - chrome_overrides = port.test_expectations_overrides() - if chromium_overrides: - self.assertEqual(chrome_overrides, - chromium_overrides + expected_string) - else: - self.assertEqual(chrome_overrides, expected_string) - finally: - os.path.exists = orig_exists - codecs.open = orig_open - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py deleted file mode 100644 index c60909e..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the Google name nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""WebKit Gtk implementation of the Port interface.""" - -import logging -import os - -from webkitpy.layout_tests.port.webkit import WebKitPort - -_log = logging.getLogger("webkitpy.layout_tests.port.gtk") - - -class GtkPort(WebKitPort): - """WebKit Gtk implementation of the Port class.""" - - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'gtk') - WebKitPort.__init__(self, **kwargs) - - def _tests_for_other_platforms(self): - # FIXME: This list could be dynamic based on platform name and - # pushed into base.Port. - # This really need to be automated. - return [ - "platform/chromium", - "platform/win", - "platform/qt", - "platform/mac", - ] - - def _path_to_apache_config_file(self): - # FIXME: This needs to detect the distribution and change config files. - return os.path.join(self.layout_tests_dir(), 'http', 'conf', - 'apache2-debian-httpd.conf') - - def _shut_down_http_server(self, server_pid): - """Shut down the httpd web server. Blocks until it's fully - shut down. - - Args: - server_pid: The process ID of the running server. - """ - # server_pid is not set when "http_server.py stop" is run manually. - if server_pid is None: - # FIXME: This isn't ideal, since it could conflict with - # lighttpd processes not started by http_server.py, - # but good enough for now. - self._executive.kill_all('apache2') - else: - try: - os.kill(server_pid, signal.SIGTERM) - # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? - except OSError: - # Sometimes we get a bad PID (e.g. from a stale httpd.pid - # file), so if kill fails on the given PID, just try to - # 'killall' web servers. - self._shut_down_http_server(None) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_lock.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_lock.py deleted file mode 100644 index 8995b21..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_lock.py +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged -# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged -# -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""This class helps to block NRWT threads when more NRWTs run -http and websocket tests in a same time.""" - -import glob -import logging -import os -import sys -import tempfile -import time - -from webkitpy.common.system.executive import Executive -from webkitpy.common.system.file_lock import FileLock - - -_log = logging.getLogger("webkitpy.layout_tests.port.http_lock") - - -class HttpLock(object): - - def __init__(self, lock_path, lock_file_prefix="WebKitHttpd.lock.", - guard_lock="WebKit.lock"): - self._lock_path = lock_path - if not self._lock_path: - self._lock_path = tempfile.gettempdir() - self._lock_file_prefix = lock_file_prefix - self._lock_file_path_prefix = os.path.join(self._lock_path, - self._lock_file_prefix) - self._guard_lock_file = os.path.join(self._lock_path, guard_lock) - self._guard_lock = FileLock(self._guard_lock_file) - self._process_lock_file_name = "" - self._executive = Executive() - - def cleanup_http_lock(self): - """Delete the lock file if exists.""" - if os.path.exists(self._process_lock_file_name): - _log.debug("Removing lock file: %s" % self._process_lock_file_name) - os.unlink(self._process_lock_file_name) - - def _extract_lock_number(self, lock_file_name): - """Return the lock number from lock file.""" - prefix_length = len(self._lock_file_path_prefix) - return int(lock_file_name[prefix_length:]) - - def _lock_file_list(self): - """Return the list of lock files sequentially.""" - lock_list = glob.glob(self._lock_file_path_prefix + '*') - lock_list.sort(key=self._extract_lock_number) - return lock_list - - def _next_lock_number(self): - """Return the next available lock number.""" - lock_list = self._lock_file_list() - if not lock_list: - return 0 - return self._extract_lock_number(lock_list[-1]) + 1 - - def _curent_lock_pid(self): - """Return with the current lock pid. If the lock is not valid - it deletes the lock file.""" - lock_list = self._lock_file_list() - if not lock_list: - return - try: - current_lock_file = open(lock_list[0], 'r') - current_pid = current_lock_file.readline() - current_lock_file.close() - if not (current_pid and self._executive.check_running_pid(int(current_pid))): - _log.debug("Removing stuck lock file: %s" % lock_list[0]) - os.unlink(lock_list[0]) - return - except (IOError, OSError): - return - return int(current_pid) - - def _create_lock_file(self): - """The lock files are used to schedule the running test sessions in first - come first served order. The guard lock ensures that the lock numbers are - sequential.""" - if not os.path.exists(self._lock_path): - _log.debug("Lock directory does not exist: %s" % self._lock_path) - return False - - if not self._guard_lock.acquire_lock(): - _log.debug("Guard lock timed out!") - return False - - self._process_lock_file_name = (self._lock_file_path_prefix + - str(self._next_lock_number())) - _log.debug("Creating lock file: %s" % self._process_lock_file_name) - lock_file = open(self._process_lock_file_name, 'w') - lock_file.write(str(os.getpid())) - lock_file.close() - self._guard_lock.release_lock() - return True - - - def wait_for_httpd_lock(self): - """Create a lock file and wait until it's turn comes. If something goes wrong - it wont do any locking.""" - if not self._create_lock_file(): - _log.debug("Warning, http locking failed!") - return - - while self._curent_lock_pid() != os.getpid(): - time.sleep(1) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py deleted file mode 100644 index 85c760a..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py +++ /dev/null @@ -1,111 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged -# -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import glob -import http_lock -import os -import unittest - - -class HttpLockTest(unittest.TestCase): - - def __init__(self, testFunc): - self.http_lock_obj = http_lock.HttpLock(None, "WebKitTestHttpd.lock.", "WebKitTest.lock") - self.lock_file_path_prefix = os.path.join(self.http_lock_obj._lock_path, - self.http_lock_obj._lock_file_prefix) - self.lock_file_name = self.lock_file_path_prefix + "0" - self.guard_lock_file = self.http_lock_obj._guard_lock_file - self.clean_all_lockfile() - unittest.TestCase.__init__(self, testFunc) - - def clean_all_lockfile(self): - if os.path.exists(self.guard_lock_file): - os.unlink(self.guard_lock_file) - lock_list = glob.glob(self.lock_file_path_prefix + '*') - for file_name in lock_list: - os.unlink(file_name) - - def assertEqual(self, first, second): - if first != second: - self.clean_all_lockfile() - unittest.TestCase.assertEqual(self, first, second) - - def _check_lock_file(self): - if os.path.exists(self.lock_file_name): - pid = os.getpid() - lock_file = open(self.lock_file_name, 'r') - lock_file_pid = lock_file.readline() - lock_file.close() - self.assertEqual(pid, int(lock_file_pid)) - return True - return False - - def test_lock_lifecycle(self): - self.http_lock_obj._create_lock_file() - - self.assertEqual(True, self._check_lock_file()) - self.assertEqual(1, self.http_lock_obj._next_lock_number()) - - self.http_lock_obj.cleanup_http_lock() - - self.assertEqual(False, self._check_lock_file()) - self.assertEqual(0, self.http_lock_obj._next_lock_number()) - - def test_extract_lock_number(self,): - lock_file_list = ( - self.lock_file_path_prefix + "00", - self.lock_file_path_prefix + "9", - self.lock_file_path_prefix + "001", - self.lock_file_path_prefix + "021", - ) - - expected_number_list = (0, 9, 1, 21) - - for lock_file, expected in zip(lock_file_list, expected_number_list): - self.assertEqual(self.http_lock_obj._extract_lock_number(lock_file), expected) - - def test_lock_file_list(self): - lock_file_list = [ - self.lock_file_path_prefix + "6", - self.lock_file_path_prefix + "1", - self.lock_file_path_prefix + "4", - self.lock_file_path_prefix + "3", - ] - - expected_file_list = [ - self.lock_file_path_prefix + "1", - self.lock_file_path_prefix + "3", - self.lock_file_path_prefix + "4", - self.lock_file_path_prefix + "6", - ] - - for file_name in lock_file_list: - open(file_name, 'w') - - self.assertEqual(self.http_lock_obj._lock_file_list(), expected_file_list) - - for file_name in lock_file_list: - os.unlink(file_name) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py deleted file mode 100755 index 0f8a21e..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py +++ /dev/null @@ -1,233 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""A class to help start/stop the lighttpd server used by layout tests.""" - -from __future__ import with_statement - -import codecs -import logging -import optparse -import os -import shutil -import subprocess -import sys -import tempfile -import time -import urllib - -import factory -import http_server_base - -_log = logging.getLogger("webkitpy.layout_tests.port.http_server") - - -class HttpdNotStarted(Exception): - pass - - -class Lighttpd(http_server_base.HttpServerBase): - - def __init__(self, port_obj, output_dir, background=False, port=None, - root=None, run_background=None): - """Args: - output_dir: the absolute path to the layout test result directory - """ - # Webkit tests - http_server_base.HttpServerBase.__init__(self, port_obj) - self._output_dir = output_dir - self._process = None - self._port = port - self._root = root - self._run_background = run_background - if self._port: - self._port = int(self._port) - - try: - self._webkit_tests = os.path.join( - self._port_obj.layout_tests_dir(), 'http', 'tests') - self._js_test_resource = os.path.join( - self._port_obj.layout_tests_dir(), 'fast', 'js', 'resources') - except: - self._webkit_tests = None - self._js_test_resource = None - - # Self generated certificate for SSL server (for client cert get - # <base-path>\chrome\test\data\ssl\certs\root_ca_cert.crt) - self._pem_file = os.path.join( - os.path.dirname(os.path.abspath(__file__)), 'httpd2.pem') - - # One mapping where we can get to everything - self.VIRTUALCONFIG = [] - - if self._webkit_tests: - self.VIRTUALCONFIG.extend( - # Three mappings (one with SSL) for LayoutTests http tests - [{'port': 8000, 'docroot': self._webkit_tests}, - {'port': 8080, 'docroot': self._webkit_tests}, - {'port': 8443, 'docroot': self._webkit_tests, - 'sslcert': self._pem_file}]) - - def is_running(self): - return self._process != None - - def start(self): - if self.is_running(): - raise 'Lighttpd already running' - - base_conf_file = self._port_obj.path_from_webkit_base('WebKitTools', - 'Scripts', 'webkitpy', 'layout_tests', 'port', 'lighttpd.conf') - out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf') - time_str = time.strftime("%d%b%Y-%H%M%S") - access_file_name = "access.log-" + time_str + ".txt" - access_log = os.path.join(self._output_dir, access_file_name) - log_file_name = "error.log-" + time_str + ".txt" - error_log = os.path.join(self._output_dir, log_file_name) - - # Remove old log files. We only need to keep the last ones. - self.remove_log_files(self._output_dir, "access.log-") - self.remove_log_files(self._output_dir, "error.log-") - - # Write out the config - with codecs.open(base_conf_file, "r", "utf-8") as file: - base_conf = file.read() - - # FIXME: This should be re-worked so that this block can - # use with open() instead of a manual file.close() call. - # lighttpd.conf files seem to be UTF-8 without BOM: - # http://redmine.lighttpd.net/issues/992 - f = codecs.open(out_conf_file, "w", "utf-8") - f.write(base_conf) - - # Write out our cgi handlers. Run perl through env so that it - # processes the #! line and runs perl with the proper command - # line arguments. Emulate apache's mod_asis with a cat cgi handler. - f.write(('cgi.assign = ( ".cgi" => "/usr/bin/env",\n' - ' ".pl" => "/usr/bin/env",\n' - ' ".asis" => "/bin/cat",\n' - ' ".php" => "%s" )\n\n') % - self._port_obj._path_to_lighttpd_php()) - - # Setup log files - f.write(('server.errorlog = "%s"\n' - 'accesslog.filename = "%s"\n\n') % (error_log, access_log)) - - # Setup upload folders. Upload folder is to hold temporary upload files - # and also POST data. This is used to support XHR layout tests that - # does POST. - f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir)) - - # Setup a link to where the js test templates are stored - f.write(('alias.url = ( "/js-test-resources" => "%s" )\n\n') % - (self._js_test_resource)) - - # dump out of virtual host config at the bottom. - if self._root: - if self._port: - # Have both port and root dir. - mappings = [{'port': self._port, 'docroot': self._root}] - else: - # Have only a root dir - set the ports as for LayoutTests. - # This is used in ui_tests to run http tests against a browser. - - # default set of ports as for LayoutTests but with a - # specified root. - mappings = [{'port': 8000, 'docroot': self._root}, - {'port': 8080, 'docroot': self._root}, - {'port': 8443, 'docroot': self._root, - 'sslcert': self._pem_file}] - else: - mappings = self.VIRTUALCONFIG - for mapping in mappings: - ssl_setup = '' - if 'sslcert' in mapping: - ssl_setup = (' ssl.engine = "enable"\n' - ' ssl.pemfile = "%s"\n' % mapping['sslcert']) - - f.write(('$SERVER["socket"] == "127.0.0.1:%d" {\n' - ' server.document-root = "%s"\n' + - ssl_setup + - '}\n\n') % (mapping['port'], mapping['docroot'])) - f.close() - - executable = self._port_obj._path_to_lighttpd() - module_path = self._port_obj._path_to_lighttpd_modules() - start_cmd = [executable, - # Newly written config file - '-f', os.path.join(self._output_dir, 'lighttpd.conf'), - # Where it can find its module dynamic libraries - '-m', module_path] - - if not self._run_background: - start_cmd.append(# Don't background - '-D') - - # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the - # bug that mod_alias.so loads it from the hard coded path. - if sys.platform == 'darwin': - tmp_module_path = '/tmp/lighttpd/lib' - if not os.path.exists(tmp_module_path): - os.makedirs(tmp_module_path) - lib_file = 'liblightcomp.dylib' - shutil.copyfile(os.path.join(module_path, lib_file), - os.path.join(tmp_module_path, lib_file)) - - env = self._port_obj.setup_environ_for_server() - _log.debug('Starting http server') - # FIXME: Should use Executive.run_command - self._process = subprocess.Popen(start_cmd, env=env) - - # Wait for server to start. - self.mappings = mappings - server_started = self.wait_for_action( - self.is_server_running_on_all_ports) - - # Our process terminated already - if not server_started or self._process.returncode != None: - raise google.httpd_utils.HttpdNotStarted('Failed to start httpd.') - - _log.debug("Server successfully started") - - # TODO(deanm): Find a nicer way to shutdown cleanly. Our log files are - # probably not being flushed, etc... why doesn't our python have os.kill ? - - def stop(self, force=False): - if not force and not self.is_running(): - return - - httpd_pid = None - if self._process: - httpd_pid = self._process.pid - self._port_obj._shut_down_http_server(httpd_pid) - - if self._process: - # wait() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - self._process.wait() - self._process = None diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server_base.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server_base.py deleted file mode 100644 index 2745cce..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server_base.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Base class with common routines between the Apache and Lighttpd servers.""" - -import logging -import os -import time -import urllib - -_log = logging.getLogger("webkitpy.layout_tests.port.http_server_base") - - -class HttpServerBase(object): - - def __init__(self, port_obj): - self._port_obj = port_obj - - def wait_for_action(self, action): - """Repeat the action for 20 seconds or until it succeeds. Returns - whether it succeeded.""" - start_time = time.time() - while time.time() - start_time < 20: - if action(): - return True - _log.debug("Waiting for action: %s" % action) - time.sleep(1) - - return False - - def is_server_running_on_all_ports(self): - """Returns whether the server is running on all the desired ports.""" - for mapping in self.mappings: - if 'sslcert' in mapping: - http_suffix = 's' - else: - http_suffix = '' - - url = 'http%s://127.0.0.1:%d/' % (http_suffix, mapping['port']) - - try: - response = urllib.urlopen(url) - _log.debug("Server running at %s" % url) - except IOError, e: - _log.debug("Server NOT running at %s: %s" % (url, e)) - return False - - return True - - def remove_log_files(self, folder, starts_with): - files = os.listdir(folder) - for file in files: - if file.startswith(starts_with): - full_path = os.path.join(folder, file) - os.remove(full_path) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/httpd2.pem b/WebKitTools/Scripts/webkitpy/layout_tests/port/httpd2.pem deleted file mode 100644 index 6349b78..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/httpd2.pem +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEZDCCAkygAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMRAwDgYDVQQDEwdUZXN0 -IENBMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMN -TW91bnRhaW4gVmlldzESMBAGA1UEChMJQ2VydCBUZXN0MB4XDTA4MDcyODIyMzIy -OFoXDTEzMDcyNzIyMzIyOFowSjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm -b3JuaWExEjAQBgNVBAoTCUNlcnQgVGVzdDESMBAGA1UEAxMJMTI3LjAuMC4xMIGf -MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU3 -3BdRCd67DFM44GRrsjDSH4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYG -qgNiV2ywxTlMj7NlN2C7SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a -3JPJe1TaIab5GwIDAQABo4HCMIG/MAkGA1UdEwQCMAAwHQYDVR0OBBYEFCYLBv5K -x5sLNVlpLh5FwTwhdDl7MIGSBgNVHSMEgYowgYeAFF3Of5nj1BlBMU/Gz7El9Vqv -45cxoWSkYjBgMRAwDgYDVQQDEwdUZXN0IENBMQswCQYDVQQGEwJVUzETMBEGA1UE -CBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzESMBAGA1UEChMJ -Q2VydCBUZXN0ggkA1FGT1D/e2U4wDQYJKoZIhvcNAQEFBQADggIBAEtkVmLObUgk -b2cIA2S+QDtifq1UgVfBbytvR2lFmnADOR55mo0gHQG3HHqq4g034LmoVXDHhUk8 -Gb6aFiv4QubmVhLXcUelTRXwiNvGzkW7pC6Jrq105hdPjzXMKTcmiLaopm5Fqfc7 -hj5Cn1Sjspc8pdeQjrbeMdvca7KlFrGP8YkwCU2xOOX9PiN9G0966BWfjnr/fZZp -+OQVuUFHdiAZwthEMuDpAAXHqYXIsermgdOpgJaA53cf8NqBV2QGhtFgtsJCRoiu -7DKqhyRWBGyz19VIH2b7y+6qvQVxuHk19kKRM0nftw/yNcJnm7gtttespMUPsOMa -a2SD1G0hm0TND6vxaBhgR3cVqpl/qIpAdFi00Tm7hTyYE7I43zPW03t+/DpCt3Um -EMRZsQ90co5q+bcx/vQ7YAtwUh30uMb0wpibeyCwDp8cqNmSiRkEuc/FjTYes5t8 -5gR//WX1l0+qjrjusO9NmoLnq2Yk6UcioX+z+q6Z/dudGfqhLfeWD2Q0LWYA242C -d7km5Y3KAt1PJdVsof/aiVhVdddY/OIEKTRQhWEdDbosy2eh16BCKXT2FFvhNDg1 -AYFvn6I8nj9IldMJiIc3DdhacEAEzRMeRgPdzAa1griKUGknxsyTyRii8ru0WS6w -DCNrlDOVXdzYGEZooBI76BDVY0W0akjV ------END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -MIICXQIBAAKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU33BdRCd67DFM44GRrsjDS -H4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYGqgNiV2ywxTlMj7NlN2C7 -SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a3JPJe1TaIab5GwIDAQAB -AoGANHXu8z2YIzlhE+bwhGm8MGBpKL3qhRuKjeriqMA36tWezOw8lY4ymEAU+Ulv -BsCdaxqydQoTYou57m4TyUHEcxq9pq3H0zB0qL709DdHi/t4zbV9XIoAzC5v0/hG -9+Ca29TwC02FCw+qLkNrtwCpwOcQmc+bPxqvFu1iMiahURECQQD2I/Hi2413CMZz -TBjl8fMiVO9GhA2J0sc8Qi+YcgJakaLD9xcbaiLkTzPZDlA389C1b6Ia+poAr4YA -Ve0FFbxpAkEA2OobayyHE/QtPEqoy6NLR57jirmVBNmSWWd4lAyL5UIHIYVttJZg -8CLvbzaU/iDGwR+wKsM664rKPHEmtlyo4wJBAMeSqYO5ZOCJGu9NWjrHjM3fdAsG -8zs2zhiLya+fcU0iHIksBW5TBmt71Jw/wMc9R5J1K0kYvFml98653O5si1ECQBCk -RV4/mE1rmlzZzYFyEcB47DQkcM5ictvxGEsje0gnfKyRtAz6zI0f4QbDRUMJ+LWw -XK+rMsYHa+SfOb0b9skCQQCLdeonsIpFDv/Uv+flHISy0WA+AFkLXrRkBKh6G/OD -dMHaNevkJgUnpceVEnkrdenp5CcEoFTI17pd+nBgDm/B ------END RSA PRIVATE KEY----- diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/lighttpd.conf b/WebKitTools/Scripts/webkitpy/layout_tests/port/lighttpd.conf deleted file mode 100644 index 26ca22f..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/lighttpd.conf +++ /dev/null @@ -1,90 +0,0 @@ -server.tag = "LightTPD/1.4.19 (Win32)" -server.modules = ( "mod_accesslog", - "mod_alias", - "mod_cgi", - "mod_rewrite" ) - -# default document root required -server.document-root = "." - -# files to check for if .../ is requested -index-file.names = ( "index.php", "index.pl", "index.cgi", - "index.html", "index.htm", "default.htm" ) -# mimetype mapping -mimetype.assign = ( - ".gif" => "image/gif", - ".jpg" => "image/jpeg", - ".jpeg" => "image/jpeg", - ".png" => "image/png", - ".svg" => "image/svg+xml", - ".css" => "text/css", - ".html" => "text/html", - ".htm" => "text/html", - ".xhtml" => "application/xhtml+xml", - ".xhtmlmp" => "application/vnd.wap.xhtml+xml", - ".js" => "application/x-javascript", - ".log" => "text/plain", - ".conf" => "text/plain", - ".text" => "text/plain", - ".txt" => "text/plain", - ".dtd" => "text/xml", - ".xml" => "text/xml", - ".manifest" => "text/cache-manifest", - ) - -# Use the "Content-Type" extended attribute to obtain mime type if possible -mimetype.use-xattr = "enable" - -## -# which extensions should not be handle via static-file transfer -# -# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi -static-file.exclude-extensions = ( ".php", ".pl", ".cgi" ) - -server.bind = "localhost" -server.port = 8001 - -## virtual directory listings -dir-listing.activate = "enable" -#dir-listing.encoding = "iso-8859-2" -#dir-listing.external-css = "style/oldstyle.css" - -## enable debugging -#debug.log-request-header = "enable" -#debug.log-response-header = "enable" -#debug.log-request-handling = "enable" -#debug.log-file-not-found = "enable" - -#### SSL engine -#ssl.engine = "enable" -#ssl.pemfile = "server.pem" - -# Rewrite rule for utf-8 path test (LayoutTests/http/tests/uri/utf8-path.html) -# See the apache rewrite rule at LayoutTests/http/tests/uri/intercept/.htaccess -# Rewrite rule for LayoutTests/http/tests/appcache/cyrillic-uri.html. -# See the apache rewrite rule at -# LayoutTests/http/tests/appcache/resources/intercept/.htaccess -url.rewrite-once = ( - "^/uri/intercept/(.*)" => "/uri/resources/print-uri.php", - "^/appcache/resources/intercept/(.*)" => "/appcache/resources/print-uri.php" -) - -# LayoutTests/http/tests/xmlhttprequest/response-encoding.html uses an htaccess -# to override charset for reply2.txt, reply2.xml, and reply4.txt. -$HTTP["url"] =~ "^/xmlhttprequest/resources/reply2.(txt|xml)" { - mimetype.assign = ( - ".txt" => "text/plain; charset=windows-1251", - ".xml" => "text/xml; charset=windows-1251" - ) -} -$HTTP["url"] =~ "^/xmlhttprequest/resources/reply4.txt" { - mimetype.assign = ( ".txt" => "text/plain; charset=koi8-r" ) -} - -# LayoutTests/http/tests/appcache/wrong-content-type.html uses an htaccess -# to override mime type for wrong-content-type.manifest. -$HTTP["url"] =~ "^/appcache/resources/wrong-content-type.manifest" { - mimetype.assign = ( ".manifest" => "text/plain" ) -} - -# Autogenerated test-specific config follows. diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py deleted file mode 100644 index 696e339..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the Google name nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""WebKit Mac implementation of the Port interface.""" - -import logging -import os -import platform -import signal - -import webkitpy.common.system.ospath as ospath -import webkitpy.layout_tests.port.server_process as server_process -from webkitpy.layout_tests.port.webkit import WebKitPort, WebKitDriver - -_log = logging.getLogger("webkitpy.layout_tests.port.mac") - - -class MacPort(WebKitPort): - """WebKit Mac implementation of the Port class.""" - - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'mac' + self.version()) - WebKitPort.__init__(self, **kwargs) - - def default_child_processes(self): - # FIXME: new-run-webkit-tests is unstable on Mac running more than - # four threads in parallel. - # See https://bugs.webkit.org/show_bug.cgi?id=36622 - child_processes = WebKitPort.default_child_processes(self) - if child_processes > 4: - return 4 - return child_processes - - def baseline_search_path(self): - port_names = [] - if self._name == 'mac-tiger': - port_names.append("mac-tiger") - if self._name in ('mac-tiger', 'mac-leopard'): - port_names.append("mac-leopard") - if self._name in ('mac-tiger', 'mac-leopard', 'mac-snowleopard'): - port_names.append("mac-snowleopard") - port_names.append("mac") - return map(self._webkit_baseline_path, port_names) - - def path_to_test_expectations_file(self): - return self.path_from_webkit_base('LayoutTests', 'platform', - 'mac', 'test_expectations.txt') - - def _skipped_file_paths(self): - # FIXME: This method will need to be made work for non-mac - # platforms and moved into base.Port. - skipped_files = [] - if self._name in ('mac-tiger', 'mac-leopard', 'mac-snowleopard'): - skipped_files.append(os.path.join( - self._webkit_baseline_path(self._name), 'Skipped')) - skipped_files.append(os.path.join(self._webkit_baseline_path('mac'), - 'Skipped')) - return skipped_files - - def test_platform_name(self): - return 'mac' + self.version() - - def version(self): - os_version_string = platform.mac_ver()[0] # e.g. "10.5.6" - if not os_version_string: - return '-leopard' - release_version = int(os_version_string.split('.')[1]) - if release_version == 4: - return '-tiger' - elif release_version == 5: - return '-leopard' - elif release_version == 6: - return '-snowleopard' - return '' - - def _build_java_test_support(self): - java_tests_path = os.path.join(self.layout_tests_dir(), "java") - build_java = ["/usr/bin/make", "-C", java_tests_path] - if self._executive.run_command(build_java, return_exit_code=True): - _log.error("Failed to build Java support files: %s" % build_java) - return False - return True - - def _check_port_build(self): - return self._build_java_test_support() - - def _tests_for_other_platforms(self): - # The original run-webkit-tests builds up a "whitelist" of tests to - # run, and passes that to DumpRenderTree. new-run-webkit-tests assumes - # we run *all* tests and test_expectations.txt functions as a - # blacklist. - # FIXME: This list could be dynamic based on platform name and - # pushed into base.Port. - return [ - "platform/chromium", - "platform/gtk", - "platform/qt", - "platform/win", - ] - - def _path_to_apache_config_file(self): - return os.path.join(self.layout_tests_dir(), 'http', 'conf', - 'apache2-httpd.conf') - - # FIXME: This doesn't have anything to do with WebKit. - def _shut_down_http_server(self, server_pid): - """Shut down the lighttpd web server. Blocks until it's fully - shut down. - - Args: - server_pid: The process ID of the running server. - """ - # server_pid is not set when "http_server.py stop" is run manually. - if server_pid is None: - # FIXME: This isn't ideal, since it could conflict with - # lighttpd processes not started by http_server.py, - # but good enough for now. - self._executive.kill_all('httpd') - else: - try: - os.kill(server_pid, signal.SIGTERM) - # FIXME: Maybe throw in a SIGKILL just to be sure? - except OSError: - # Sometimes we get a bad PID (e.g. from a stale httpd.pid - # file), so if kill fails on the given PID, just try to - # 'killall' web servers. - self._shut_down_http_server(None) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/mac_unittest.py deleted file mode 100644 index d383a4c..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac_unittest.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import StringIO -import sys -import unittest - -import mac -import port_testcase - - -class MacTest(port_testcase.PortTestCase): - def make_port(self, options=port_testcase.mock_options): - if sys.platform != 'darwin': - return None - port_obj = mac.MacPort(options=options) - port_obj._options.results_directory = port_obj.results_directory() - port_obj._options.configuration = 'Release' - return port_obj - - def test_skipped_file_paths(self): - port = self.make_port() - if not port: - return - skipped_paths = port._skipped_file_paths() - # FIXME: _skipped_file_paths should return WebKit-relative paths. - # So to make it unit testable, we strip the WebKit directory from the path. - relative_paths = [path[len(port.path_from_webkit_base()):] for path in skipped_paths] - self.assertEqual(relative_paths, ['LayoutTests/platform/mac-leopard/Skipped', 'LayoutTests/platform/mac/Skipped']) - - example_skipped_file = u""" -# <rdar://problem/5647952> fast/events/mouseout-on-window.html needs mac DRT to issue mouse out events -fast/events/mouseout-on-window.html - -# <rdar://problem/5643675> window.scrollTo scrolls a window with no scrollbars -fast/events/attempt-scroll-with-no-scrollbars.html - -# see bug <rdar://problem/5646437> REGRESSION (r28015): svg/batik/text/smallFonts fails -svg/batik/text/smallFonts.svg -""" - example_skipped_tests = [ - "fast/events/mouseout-on-window.html", - "fast/events/attempt-scroll-with-no-scrollbars.html", - "svg/batik/text/smallFonts.svg", - ] - - def test_skipped_file_paths(self): - port = self.make_port() - if not port: - return - skipped_file = StringIO.StringIO(self.example_skipped_file) - self.assertEqual(port._tests_from_skipped_file(skipped_file), self.example_skipped_tests) - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/port_testcase.py deleted file mode 100644 index c4b36ac..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/port_testcase.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Unit testing base class for Port implementations.""" - -import os -import tempfile -import unittest - -from webkitpy.tool import mocktool -mock_options = mocktool.MockOptions(results_directory='layout-test-results', - use_apache=True, - configuration='Release') - -# FIXME: This should be used for all ports, not just WebKit Mac. See -# https://bugs.webkit.org/show_bug.cgi?id=50043 . - -class PortTestCase(unittest.TestCase): - """Tests the WebKit port implementation.""" - def make_port(self, options=mock_options): - """Override in subclass.""" - raise NotImplementedError() - - def test_driver_cmd_line(self): - port = self.make_port() - if not port: - return - self.assertTrue(len(port.driver_cmd_line())) - - def test_http_server(self): - port = self.make_port() - if not port: - return - port.start_http_server() - port.stop_http_server() - - def test_image_diff(self): - port = self.make_port() - if not port: - return - - # FIXME: not sure why this shouldn't always be True - #self.assertTrue(port.check_image_diff()) - if not port.check_image_diff(): - return - - dir = port.layout_tests_dir() - file1 = os.path.join(dir, 'fast', 'css', 'button_center.png') - fh1 = file(file1) - contents1 = fh1.read() - file2 = os.path.join(dir, 'fast', 'css', - 'remove-shorthand-expected.png') - fh2 = file(file2) - contents2 = fh2.read() - tmpfile = tempfile.mktemp() - - self.assertFalse(port.diff_image(contents1, contents1)) - self.assertTrue(port.diff_image(contents1, contents2)) - - self.assertTrue(port.diff_image(contents1, contents2, tmpfile)) - fh1.close() - fh2.close() - # FIXME: this may not be being written? - # self.assertTrue(os.path.exists(tmpfile)) - # os.remove(tmpfile) - - def test_websocket_server(self): - port = self.make_port() - if not port: - return - port.start_websocket_server() - port.stop_websocket_server() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py deleted file mode 100644 index af94acc..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the Google name nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""QtWebKit implementation of the Port interface.""" - -import logging -import os -import signal -import sys - -import webkit - -from webkitpy.layout_tests.port.webkit import WebKitPort - -_log = logging.getLogger("webkitpy.layout_tests.port.qt") - - -class QtPort(WebKitPort): - """QtWebKit implementation of the Port class.""" - - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'qt') - WebKitPort.__init__(self, **kwargs) - - def baseline_search_path(self): - port_names = [] - if sys.platform == 'linux2': - port_names.append("qt-linux") - elif sys.platform in ('win32', 'cygwin'): - port_names.append("qt-win") - elif sys.platform == 'darwin': - port_names.append("qt-mac") - port_names.append("qt") - return map(self._webkit_baseline_path, port_names) - - def _tests_for_other_platforms(self): - # FIXME: This list could be dynamic based on platform name and - # pushed into base.Port. - # This really need to be automated. - return [ - "platform/chromium", - "platform/win", - "platform/gtk", - "platform/mac", - ] - - def _path_to_apache_config_file(self): - # FIXME: This needs to detect the distribution and change config files. - return os.path.join(self.layout_tests_dir(), 'http', 'conf', - 'apache2-debian-httpd.conf') - - def _shut_down_http_server(self, server_pid): - """Shut down the httpd web server. Blocks until it's fully - shut down. - - Args: - server_pid: The process ID of the running server. - """ - # server_pid is not set when "http_server.py stop" is run manually. - if server_pid is None: - # FIXME: This isn't ideal, since it could conflict with - # lighttpd processes not started by http_server.py, - # but good enough for now. - self._executive.kill_all('apache2') - else: - try: - os.kill(server_pid, signal.SIGTERM) - # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? - except OSError: - # Sometimes we get a bad PID (e.g. from a stale httpd.pid - # file), so if kill fails on the given PID, just try to - # 'killall' web servers. - self._shut_down_http_server(None) - - def _build_driver(self): - # The Qt port builds DRT as part of the main build step - return True - - def _path_to_driver(self): - return self._build_path('bin/DumpRenderTree') - - def _path_to_image_diff(self): - return self._build_path('bin/ImageDiff') - - def _path_to_webcore_library(self): - return self._build_path('lib/libQtWebKit.so') - - def _runtime_feature_list(self): - return None - - def setup_environ_for_server(self): - env = webkit.WebKitPort.setup_environ_for_server(self) - env['QTWEBKIT_PLUGIN_PATH'] = self._build_path('lib/plugins') - return env diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py deleted file mode 100644 index 5a0a40c..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py +++ /dev/null @@ -1,225 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the Google name nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Package that implements the ServerProcess wrapper class""" - -import logging -import os -import select -import signal -import subprocess -import sys -import time -if sys.platform != 'win32': - import fcntl - -from webkitpy.common.system.executive import Executive - -_log = logging.getLogger("webkitpy.layout_tests.port.server_process") - - -class ServerProcess: - """This class provides a wrapper around a subprocess that - implements a simple request/response usage model. The primary benefit - is that reading responses takes a timeout, so that we don't ever block - indefinitely. The class also handles transparently restarting processes - as necessary to keep issuing commands.""" - - def __init__(self, port_obj, name, cmd, env=None, executive=Executive()): - self._port = port_obj - self._name = name - self._cmd = cmd - self._env = env - self._reset() - self._executive = executive - - def _reset(self): - self._proc = None - self._output = '' - self.crashed = False - self.timed_out = False - self.error = '' - - def _start(self): - if self._proc: - raise ValueError("%s already running" % self._name) - self._reset() - # close_fds is a workaround for http://bugs.python.org/issue2320 - close_fds = sys.platform not in ('win32', 'cygwin') - self._proc = subprocess.Popen(self._cmd, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=close_fds, - env=self._env) - fd = self._proc.stdout.fileno() - fl = fcntl.fcntl(fd, fcntl.F_GETFL) - fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) - fd = self._proc.stderr.fileno() - fl = fcntl.fcntl(fd, fcntl.F_GETFL) - fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) - - def handle_interrupt(self): - """This routine checks to see if the process crashed or exited - because of a keyboard interrupt and raises KeyboardInterrupt - accordingly.""" - if self.crashed: - # This is hex code 0xc000001d, which is used for abrupt - # termination. This happens if we hit ctrl+c from the prompt - # and we happen to be waiting on the DumpRenderTree. - # sdoyon: Not sure for which OS and in what circumstances the - # above code is valid. What works for me under Linux to detect - # ctrl+c is for the subprocess returncode to be negative - # SIGINT. And that agrees with the subprocess documentation. - if (-1073741510 == self._proc.returncode or - - signal.SIGINT == self._proc.returncode): - raise KeyboardInterrupt - return - - def poll(self): - """Check to see if the underlying process is running; returns None - if it still is (wrapper around subprocess.poll).""" - if self._proc: - # poll() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - return self._proc.poll() - return None - - def write(self, input): - """Write a request to the subprocess. The subprocess is (re-)start()'ed - if is not already running.""" - if not self._proc: - self._start() - self._proc.stdin.write(input) - - def read_line(self, timeout): - """Read a single line from the subprocess, waiting until the deadline. - If the deadline passes, the call times out. Note that even if the - subprocess has crashed or the deadline has passed, if there is output - pending, it will be returned. - - Args: - timeout: floating-point number of seconds the call is allowed - to block for. A zero or negative number will attempt to read - any existing data, but will not block. There is no way to - block indefinitely. - Returns: - output: data returned, if any. If no data is available and the - call times out or crashes, an empty string is returned. Note - that the returned string includes the newline ('\n').""" - return self._read(timeout, size=0) - - def read(self, timeout, size): - """Attempts to read size characters from the subprocess, waiting until - the deadline passes. If the deadline passes, any available data will be - returned. Note that even if the deadline has passed or if the - subprocess has crashed, any available data will still be returned. - - Args: - timeout: floating-point number of seconds the call is allowed - to block for. A zero or negative number will attempt to read - any existing data, but will not block. There is no way to - block indefinitely. - size: amount of data to read. Must be a postive integer. - Returns: - output: data returned, if any. If no data is available, an empty - string is returned. - """ - if size <= 0: - raise ValueError('ServerProcess.read() called with a ' - 'non-positive size: %d ' % size) - return self._read(timeout, size) - - def _read(self, timeout, size): - """Internal routine that actually does the read.""" - index = -1 - out_fd = self._proc.stdout.fileno() - err_fd = self._proc.stderr.fileno() - select_fds = (out_fd, err_fd) - deadline = time.time() + timeout - while not self.timed_out and not self.crashed: - # poll() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - if self._proc.poll() != None: - self.crashed = True - self.handle_interrupt() - - now = time.time() - if now > deadline: - self.timed_out = True - - # Check to see if we have any output we can return. - if size and len(self._output) >= size: - index = size - elif size == 0: - index = self._output.find('\n') + 1 - - if index > 0 or self.crashed or self.timed_out: - output = self._output[0:index] - self._output = self._output[index:] - return output - - # Nope - wait for more data. - (read_fds, write_fds, err_fds) = select.select(select_fds, [], - select_fds, - deadline - now) - try: - if out_fd in read_fds: - self._output += self._proc.stdout.read() - if err_fd in read_fds: - self.error += self._proc.stderr.read() - except IOError, e: - pass - - def stop(self): - """Stop (shut down) the subprocess), if it is running.""" - pid = self._proc.pid - self._proc.stdin.close() - self._proc.stdout.close() - if self._proc.stderr: - self._proc.stderr.close() - if sys.platform not in ('win32', 'cygwin'): - # Closing stdin/stdout/stderr hangs sometimes on OS X, - # (see restart(), above), and anyway we don't want to hang - # the harness if DumpRenderTree is buggy, so we wait a couple - # seconds to give DumpRenderTree a chance to clean up, but then - # force-kill the process if necessary. - KILL_TIMEOUT = 3.0 - timeout = time.time() + KILL_TIMEOUT - # poll() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - while self._proc.poll() is None and time.time() < timeout: - time.sleep(0.1) - # poll() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - if self._proc.poll() is None: - _log.warning('stopping %s timed out, killing it' % - self._name) - self._executive.kill_process(self._proc.pid) - _log.warning('killed') - self._reset() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py deleted file mode 100644 index 8e27f35..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the Google name nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Dummy Port implementation used for testing.""" -from __future__ import with_statement - -import codecs -import fnmatch -import os -import sys -import time - -from webkitpy.layout_tests.layout_package import test_output - -import base - - -# This sets basic expectations for a test. Each individual expectation -# can be overridden by a keyword argument in TestList.add(). -class TestInstance: - def __init__(self, name): - self.name = name - self.base = name[(name.rfind("/") + 1):name.rfind(".html")] - self.crash = False - self.exception = False - self.hang = False - self.keyboard = False - self.error = '' - self.timeout = False - self.actual_text = self.base + '-txt\n' - self.actual_checksum = self.base + '-checksum\n' - self.actual_image = self.base + '-png\n' - self.expected_text = self.actual_text - self.expected_checksum = self.actual_checksum - self.expected_image = self.actual_image - - -# This is an in-memory list of tests, what we want them to produce, and -# what we want to claim are the expected results. -class TestList: - def __init__(self, port): - self.port = port - self.tests = {} - - def add(self, name, **kwargs): - test = TestInstance(name) - for key, value in kwargs.items(): - test.__dict__[key] = value - self.tests[name] = test - - def keys(self): - return self.tests.keys() - - def __contains__(self, item): - return item in self.tests - - def __getitem__(self, item): - return self.tests[item] - - -class TestPort(base.Port): - """Test implementation of the Port interface.""" - - def __init__(self, **kwargs): - base.Port.__init__(self, **kwargs) - tests = TestList(self) - tests.add('passes/image.html') - tests.add('passes/text.html') - tests.add('failures/expected/checksum.html', - actual_checksum='checksum_fail-checksum') - tests.add('failures/expected/crash.html', crash=True) - tests.add('failures/expected/exception.html', exception=True) - tests.add('failures/expected/timeout.html', timeout=True) - tests.add('failures/expected/hang.html', hang=True) - tests.add('failures/expected/missing_text.html', - expected_text=None) - tests.add('failures/expected/image.html', - actual_image='image_fail-png', - expected_image='image-png') - tests.add('failures/expected/image_checksum.html', - actual_checksum='image_checksum_fail-checksum', - actual_image='image_checksum_fail-png') - tests.add('failures/expected/keyboard.html', - keyboard=True) - tests.add('failures/expected/missing_check.html', - expected_checksum=None) - tests.add('failures/expected/missing_image.html', - expected_image=None) - tests.add('failures/expected/missing_text.html', - expected_text=None) - tests.add('failures/expected/text.html', - actual_text='text_fail-png') - tests.add('failures/unexpected/text-image-checksum.html', - actual_text='text-image-checksum_fail-txt', - actual_checksum='text-image-checksum_fail-checksum') - tests.add('http/tests/passes/text.html') - tests.add('http/tests/ssl/text.html') - tests.add('passes/error.html', error='stuff going to stderr') - tests.add('passes/image.html') - tests.add('passes/platform_image.html') - tests.add('passes/text.html') - tests.add('websocket/tests/passes/text.html') - self._tests = tests - - def baseline_path(self): - return os.path.join(self.layout_tests_dir(), 'platform', - self.name() + self.version()) - - def baseline_search_path(self): - return [self.baseline_path()] - - def check_build(self, needs_http): - return True - - def diff_image(self, expected_contents, actual_contents, - diff_filename=None): - diffed = actual_contents != expected_contents - if diffed and diff_filename: - with codecs.open(diff_filename, "w", "utf-8") as diff_fh: - diff_fh.write("< %s\n---\n> %s\n" % - (expected_contents, actual_contents)) - return diffed - - def expected_checksum(self, test): - test = self.relative_test_filename(test) - return self._tests[test].expected_checksum - - def expected_image(self, test): - test = self.relative_test_filename(test) - return self._tests[test].expected_image - - def expected_text(self, test): - test = self.relative_test_filename(test) - text = self._tests[test].expected_text - if not text: - text = '' - return text - - def tests(self, paths): - # Test the idea of port-specific overrides for test lists. Also - # keep in memory to speed up the test harness. - if not paths: - paths = ['*'] - - matched_tests = [] - for p in paths: - if self.path_isdir(p): - matched_tests.extend(fnmatch.filter(self._tests.keys(), p + '*')) - else: - matched_tests.extend(fnmatch.filter(self._tests.keys(), p)) - layout_tests_dir = self.layout_tests_dir() - return set([os.path.join(layout_tests_dir, p) for p in matched_tests]) - - def path_exists(self, path): - # used by test_expectations.py and printing.py - rpath = self.relative_test_filename(path) - if rpath in self._tests: - return True - if self.path_isdir(rpath): - return True - if rpath.endswith('-expected.txt'): - test = rpath.replace('-expected.txt', '.html') - return (test in self._tests and - self._tests[test].expected_text) - if rpath.endswith('-expected.checksum'): - test = rpath.replace('-expected.checksum', '.html') - return (test in self._tests and - self._tests[test].expected_checksum) - if rpath.endswith('-expected.png'): - test = rpath.replace('-expected.png', '.html') - return (test in self._tests and - self._tests[test].expected_image) - return False - - def layout_tests_dir(self): - return self.path_from_webkit_base('WebKitTools', 'Scripts', - 'webkitpy', 'layout_tests', 'data') - - def path_isdir(self, path): - # Used by test_expectations.py - # - # We assume that a path is a directory if we have any tests that - # whose prefix matches the path plus a directory modifier. - if path[-1] != '/': - path += '/' - return any([t.startswith(path) for t in self._tests.keys()]) - - def test_dirs(self): - return ['passes', 'failures'] - - def name(self): - return self._name - - def _path_to_wdiff(self): - return None - - def results_directory(self): - return '/tmp/' + self.get_option('results_directory') - - def setup_test_run(self): - pass - - def create_driver(self, worker_number): - return TestDriver(self, worker_number) - - def start_http_server(self): - pass - - def start_websocket_server(self): - pass - - def stop_http_server(self): - pass - - def stop_websocket_server(self): - pass - - def test_expectations(self): - """Returns the test expectations for this port. - - Basically this string should contain the equivalent of a - test_expectations file. See test_expectations.py for more details.""" - return """ -WONTFIX : failures/expected/checksum.html = IMAGE -WONTFIX : failures/expected/crash.html = CRASH -// This one actually passes because the checksums will match. -WONTFIX : failures/expected/image.html = PASS -WONTFIX : failures/expected/image_checksum.html = IMAGE -WONTFIX : failures/expected/missing_check.html = MISSING PASS -WONTFIX : failures/expected/missing_image.html = MISSING PASS -WONTFIX : failures/expected/missing_text.html = MISSING PASS -WONTFIX : failures/expected/text.html = TEXT -WONTFIX : failures/expected/timeout.html = TIMEOUT -WONTFIX SKIP : failures/expected/hang.html = TIMEOUT -WONTFIX SKIP : failures/expected/keyboard.html = CRASH -WONTFIX SKIP : failures/expected/exception.html = CRASH -""" - - def test_base_platform_names(self): - return ('mac', 'win') - - def test_platform_name(self): - return 'mac' - - def test_platform_names(self): - return self.test_base_platform_names() - - def test_platform_name_to_name(self, test_platform_name): - return test_platform_name - - def version(self): - return '' - - -class TestDriver(base.Driver): - """Test/Dummy implementation of the DumpRenderTree interface.""" - - def __init__(self, port, worker_number): - self._port = port - - def cmd_line(self): - return ['None'] - - def poll(self): - return True - - def run_test(self, test_input): - start_time = time.time() - test_name = self._port.relative_test_filename(test_input.filename) - test = self._port._tests[test_name] - if test.keyboard: - raise KeyboardInterrupt - if test.exception: - raise ValueError('exception from ' + test_name) - if test.hang: - time.sleep((float(test_input.timeout) * 4) / 1000.0) - return test_output.TestOutput(test.actual_text, test.actual_image, - test.actual_checksum, test.crash, - time.time() - start_time, test.timeout, - test.error) - - def start(self): - pass - - def stop(self): - pass diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/test_files.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/test_files.py deleted file mode 100644 index 2c0a7b6..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/test_files.py +++ /dev/null @@ -1,128 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""This module is used to find all of the layout test files used by -run-webkit-tests. It exposes one public function - find() - -which takes an optional list of paths. If a list is passed in, the returned -list of test files is constrained to those found under the paths passed in, -i.e. calling find(["LayoutTests/fast"]) will only return files -under that directory.""" - -import glob -import os -import time - -from webkitpy.common.system import logutils - - -_log = logutils.get_logger(__file__) - - -# When collecting test cases, we include any file with these extensions. -_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.xhtmlmp', '.pl', - '.php', '.svg']) -# When collecting test cases, skip these directories -_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests']) - - -def find(port, paths): - """Finds the set of tests under port.layout_tests_dir(). - - Args: - paths: a list of command line paths relative to the layout_tests_dir() - to limit the search to. glob patterns are ok. - """ - gather_start_time = time.time() - paths_to_walk = set() - # if paths is empty, provide a pre-defined list. - if paths: - _log.debug("Gathering tests from: %s relative to %s" % (paths, port.layout_tests_dir())) - for path in paths: - # If there's an * in the name, assume it's a glob pattern. - path = os.path.join(port.layout_tests_dir(), path) - if path.find('*') > -1: - filenames = glob.glob(path) - paths_to_walk.update(filenames) - else: - paths_to_walk.add(path) - else: - _log.debug("Gathering tests from: %s" % port.layout_tests_dir()) - paths_to_walk.add(port.layout_tests_dir()) - - # Now walk all the paths passed in on the command line and get filenames - test_files = set() - for path in paths_to_walk: - if os.path.isfile(path) and _is_test_file(path): - test_files.add(os.path.normpath(path)) - continue - - for root, dirs, files in os.walk(path): - # Don't walk skipped directories or their sub-directories. - if os.path.basename(root) in _skipped_directories: - del dirs[:] - continue - # This copy and for-in is slightly inefficient, but - # the extra walk avoidance consistently shaves .5 seconds - # off of total walk() time on my MacBook Pro. - for directory in dirs[:]: - if directory in _skipped_directories: - dirs.remove(directory) - - for filename in files: - if _is_test_file(filename): - filename = os.path.join(root, filename) - filename = os.path.normpath(filename) - test_files.add(filename) - - gather_time = time.time() - gather_start_time - _log.debug("Test gathering took %f seconds" % gather_time) - - return test_files - - -def _has_supported_extension(filename): - """Return true if filename is one of the file extensions we want to run a - test on.""" - extension = os.path.splitext(filename)[1] - return extension in _supported_file_extensions - - -def _is_reference_html_file(filename): - """Return true if the filename points to a reference HTML file.""" - if (filename.endswith('-expected.html') or - filename.endswith('-expected-mismatch.html')): - _log.warn("Reftests are not supported - ignoring %s" % filename) - return True - return False - - -def _is_test_file(filename): - """Return true if the filename points to a test file.""" - return (_has_supported_extension(filename) and - not _is_reference_html_file(filename)) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/test_files_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/test_files_unittest.py deleted file mode 100644 index 83525c8..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/test_files_unittest.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os -import unittest - -import base -import test_files - - -class TestFilesTest(unittest.TestCase): - def test_find_no_paths_specified(self): - port = base.Port() - layout_tests_dir = port.layout_tests_dir() - port.layout_tests_dir = lambda: os.path.join(layout_tests_dir, - 'fast', 'html') - tests = test_files.find(port, []) - self.assertNotEqual(tests, 0) - - def test_find_one_test(self): - port = base.Port() - # This is just a test picked at random but known to exist. - tests = test_files.find(port, ['fast/html/keygen.html']) - self.assertEqual(len(tests), 1) - - def test_find_glob(self): - port = base.Port() - tests = test_files.find(port, ['fast/html/key*']) - self.assertEqual(len(tests), 1) - - def test_find_with_skipped_directories(self): - port = base.Port() - tests = port.tests('userscripts') - self.assertTrue('userscripts/resources/frame1.html' not in tests) - - def test_find_with_skipped_directories_2(self): - port = base.Port() - tests = test_files.find(port, ['userscripts/resources']) - self.assertEqual(tests, set([])) - - def test_is_test_file(self): - self.assertTrue(test_files._is_test_file('foo.html')) - self.assertTrue(test_files._is_test_file('foo.shtml')) - self.assertFalse(test_files._is_test_file('foo.png')) - self.assertFalse(test_files._is_test_file('foo-expected.html')) - self.assertFalse(test_files._is_test_file('foo-expected-mismatch.html')) - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py deleted file mode 100644 index 09be833..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py +++ /dev/null @@ -1,504 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the Google name nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""WebKit implementations of the Port interface.""" - - -from __future__ import with_statement - -import codecs -import logging -import os -import re -import shutil -import signal -import sys -import time -import webbrowser -import operator -import tempfile -import shutil - -import webkitpy.common.system.ospath as ospath -import webkitpy.layout_tests.layout_package.test_output as test_output -import webkitpy.layout_tests.port.base as base -import webkitpy.layout_tests.port.server_process as server_process - -_log = logging.getLogger("webkitpy.layout_tests.port.webkit") - - -class WebKitPort(base.Port): - """WebKit implementation of the Port class.""" - - def __init__(self, **kwargs): - base.Port.__init__(self, **kwargs) - self._cached_apache_path = None - - # FIXME: disable pixel tests until they are run by default on the - # build machines. - self.set_option_default('pixel_tests', False) - - def baseline_path(self): - return self._webkit_baseline_path(self._name) - - def baseline_search_path(self): - return [self._webkit_baseline_path(self._name)] - - def path_to_test_expectations_file(self): - return os.path.join(self._webkit_baseline_path(self._name), - 'test_expectations.txt') - - # Only needed by ports which maintain versioned test expectations (like mac-tiger vs. mac-leopard) - def version(self): - return '' - - def _build_driver(self): - configuration = self.get_option('configuration') - return self._config.build_dumprendertree(configuration) - - def _check_driver(self): - driver_path = self._path_to_driver() - if not os.path.exists(driver_path): - _log.error("DumpRenderTree was not found at %s" % driver_path) - return False - return True - - def check_build(self, needs_http): - if self.get_option('build') and not self._build_driver(): - return False - if not self._check_driver(): - return False - if self.get_option('pixel_tests'): - if not self.check_image_diff(): - return False - if not self._check_port_build(): - return False - return True - - def _check_port_build(self): - # Ports can override this method to do additional checks. - return True - - def check_image_diff(self, override_step=None, logging=True): - image_diff_path = self._path_to_image_diff() - if not os.path.exists(image_diff_path): - _log.error("ImageDiff was not found at %s" % image_diff_path) - return False - return True - - def diff_image(self, expected_contents, actual_contents, - diff_filename=None): - """Return True if the two files are different. Also write a delta - image of the two images into |diff_filename| if it is not None.""" - - # Handle the case where the test didn't actually generate an image. - if not actual_contents: - return True - - sp = self._diff_image_request(expected_contents, actual_contents) - return self._diff_image_reply(sp, diff_filename) - - def _diff_image_request(self, expected_contents, actual_contents): - # FIXME: use self.get_option('tolerance') and - # self.set_option_default('tolerance', 0.1) once that behaves correctly - # with default values. - if self.get_option('tolerance') is not None: - tolerance = self.get_option('tolerance') - else: - tolerance = 0.1 - command = [self._path_to_image_diff(), '--tolerance', str(tolerance)] - sp = server_process.ServerProcess(self, 'ImageDiff', command) - - sp.write('Content-Length: %d\n%sContent-Length: %d\n%s' % - (len(actual_contents), actual_contents, - len(expected_contents), expected_contents)) - - return sp - - def _diff_image_reply(self, sp, diff_filename): - timeout = 2.0 - deadline = time.time() + timeout - output = sp.read_line(timeout) - while not sp.timed_out and not sp.crashed and output: - if output.startswith('Content-Length'): - m = re.match('Content-Length: (\d+)', output) - content_length = int(m.group(1)) - timeout = deadline - time.time() - output = sp.read(timeout, content_length) - break - elif output.startswith('diff'): - break - else: - timeout = deadline - time.time() - output = sp.read_line(deadline) - - result = True - if output.startswith('diff'): - m = re.match('diff: (.+)% (passed|failed)', output) - if m.group(2) == 'passed': - result = False - elif output and diff_filename: - with open(diff_filename, 'w') as file: - file.write(output) - elif sp.timed_out: - _log.error("ImageDiff timed out") - elif sp.crashed: - _log.error("ImageDiff crashed") - sp.stop() - return result - - def results_directory(self): - # Results are store relative to the built products to make it easy - # to have multiple copies of webkit checked out and built. - return self._build_path(self.get_option('results_directory')) - - def setup_test_run(self): - # This port doesn't require any specific configuration. - pass - - def create_driver(self, worker_number): - return WebKitDriver(self, worker_number) - - def test_base_platform_names(self): - # At the moment we don't use test platform names, but we have - # to return something. - return ('mac', 'win') - - def _tests_for_other_platforms(self): - raise NotImplementedError('WebKitPort._tests_for_other_platforms') - # The original run-webkit-tests builds up a "whitelist" of tests to - # run, and passes that to DumpRenderTree. new-run-webkit-tests assumes - # we run *all* tests and test_expectations.txt functions as a - # blacklist. - # FIXME: This list could be dynamic based on platform name and - # pushed into base.Port. - return [ - "platform/chromium", - "platform/gtk", - "platform/qt", - "platform/win", - ] - - def _runtime_feature_list(self): - """Return the supported features of DRT. If a port doesn't support - this DRT switch, it has to override this method to return None""" - driver_path = self._path_to_driver() - feature_list = ' '.join(os.popen(driver_path + " --print-supported-features 2>&1").readlines()) - if "SupportedFeatures:" in feature_list: - return feature_list - return None - - def _supported_symbol_list(self): - """Return the supported symbols of WebCore.""" - webcore_library_path = self._path_to_webcore_library() - if not webcore_library_path: - return None - symbol_list = ' '.join(os.popen("nm " + webcore_library_path).readlines()) - return symbol_list - - def _directories_for_features(self): - """Return the supported feature dictionary. The keys are the - features and the values are the directories in lists.""" - directories_for_features = { - "Accelerated Compositing": ["compositing"], - "3D Rendering": ["animations/3d", "transforms/3d"], - } - return directories_for_features - - def _directories_for_symbols(self): - """Return the supported feature dictionary. The keys are the - symbols and the values are the directories in lists.""" - directories_for_symbol = { - "MathMLElement": ["mathml"], - "GraphicsLayer": ["compositing"], - "WebCoreHas3DRendering": ["animations/3d", "transforms/3d"], - "WebGLShader": ["fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl"], - "WMLElement": ["http/tests/wml", "fast/wml", "wml"], - "parseWCSSInputProperty": ["fast/wcss"], - "isXHTMLMPDocument": ["fast/xhtmlmp"], - } - return directories_for_symbol - - def _skipped_tests_for_unsupported_features(self): - """Return the directories of unsupported tests. Search for the - symbols in the symbol_list, if found add the corresponding - directories to the skipped directory list.""" - feature_list = self._runtime_feature_list() - directories = self._directories_for_features() - - # if DRT feature detection not supported - if not feature_list: - feature_list = self._supported_symbol_list() - directories = self._directories_for_symbols() - - if not feature_list: - return [] - - skipped_directories = [directories[feature] - for feature in directories.keys() - if feature not in feature_list] - return reduce(operator.add, skipped_directories) - - def _tests_for_disabled_features(self): - # FIXME: This should use the feature detection from - # webkitperl/features.pm to match run-webkit-tests. - # For now we hard-code a list of features known to be disabled on - # the Mac platform. - disabled_feature_tests = [ - "fast/xhtmlmp", - "http/tests/wml", - "mathml", - "wml", - ] - # FIXME: webarchive tests expect to read-write from - # -expected.webarchive files instead of .txt files. - # This script doesn't know how to do that yet, so pretend they're - # just "disabled". - webarchive_tests = [ - "webarchive", - "svg/webarchive", - "http/tests/webarchive", - "svg/custom/image-with-prefix-in-webarchive.svg", - ] - unsupported_feature_tests = self._skipped_tests_for_unsupported_features() - return disabled_feature_tests + webarchive_tests + unsupported_feature_tests - - def _tests_from_skipped_file(self, skipped_file): - tests_to_skip = [] - for line in skipped_file.readlines(): - line = line.strip() - if line.startswith('#') or not len(line): - continue - tests_to_skip.append(line) - return tests_to_skip - - def _skipped_file_paths(self): - return [os.path.join(self._webkit_baseline_path(self._name), - 'Skipped')] - - def _expectations_from_skipped_files(self): - tests_to_skip = [] - for filename in self._skipped_file_paths(): - if not os.path.exists(filename): - _log.warn("Failed to open Skipped file: %s" % filename) - continue - with codecs.open(filename, "r", "utf-8") as skipped_file: - tests_to_skip.extend(self._tests_from_skipped_file(skipped_file)) - return tests_to_skip - - def test_expectations(self): - # The WebKit mac port uses a combination of a test_expectations file - # and 'Skipped' files. - expectations_path = self.path_to_test_expectations_file() - with codecs.open(expectations_path, "r", "utf-8") as file: - return file.read() + self._skips() - - def _skips(self): - # Each Skipped file contains a list of files - # or directories to be skipped during the test run. The total list - # of tests to skipped is given by the contents of the generic - # Skipped file found in platform/X plus a version-specific file - # found in platform/X-version. Duplicate entries are allowed. - # This routine reads those files and turns contents into the - # format expected by test_expectations. - - tests_to_skip = self.skipped_layout_tests() - skip_lines = map(lambda test_path: "BUG_SKIPPED SKIP : %s = FAIL" % - test_path, tests_to_skip) - return "\n".join(skip_lines) - - def skipped_layout_tests(self): - # Use a set to allow duplicates - tests_to_skip = set(self._expectations_from_skipped_files()) - tests_to_skip.update(self._tests_for_other_platforms()) - tests_to_skip.update(self._tests_for_disabled_features()) - return tests_to_skip - - def test_platform_name(self): - return self._name + self.version() - - def test_platform_names(self): - return self.test_base_platform_names() + ( - 'mac-tiger', 'mac-leopard', 'mac-snowleopard') - - def _build_path(self, *comps): - return self._filesystem.join(self._config.build_directory( - self.get_option('configuration')), *comps) - - def _path_to_driver(self): - return self._build_path('DumpRenderTree') - - def _path_to_webcore_library(self): - return None - - def _path_to_helper(self): - return None - - def _path_to_image_diff(self): - return self._build_path('ImageDiff') - - def _path_to_wdiff(self): - # FIXME: This does not exist on a default Mac OS X Leopard install. - return 'wdiff' - - def _path_to_apache(self): - if not self._cached_apache_path: - # The Apache binary path can vary depending on OS and distribution - # See http://wiki.apache.org/httpd/DistrosDefaultLayout - for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]: - if os.path.exists(path): - self._cached_apache_path = path - break - - if not self._cached_apache_path: - _log.error("Could not find apache. Not installed or unknown path.") - - return self._cached_apache_path - - -class WebKitDriver(base.Driver): - """WebKit implementation of the DumpRenderTree interface.""" - - def __init__(self, port, worker_number): - self._worker_number = worker_number - self._port = port - self._driver_tempdir = tempfile.mkdtemp(prefix='DumpRenderTree-') - - def __del__(self): - shutil.rmtree(self._driver_tempdir) - - def cmd_line(self): - cmd = self._command_wrapper(self._port.get_option('wrapper')) - cmd += [self._port._path_to_driver(), '-'] - - if self._port.get_option('pixel_tests'): - cmd.append('--pixel-tests') - - if self._port.get_option('use_drt'): - if self._port.get_option('accelerated_compositing'): - cmd.append('--enable-accelerated-compositing') - - if self._port.get_option('accelerated_2d_canvas'): - cmd.append('--enable-accelerated-2d-canvas') - - return cmd - - def start(self): - environment = self._port.setup_environ_for_server() - environment['DYLD_FRAMEWORK_PATH'] = self._port._build_path() - environment['DUMPRENDERTREE_TEMP'] = self._driver_tempdir - self._server_process = server_process.ServerProcess(self._port, - "DumpRenderTree", self.cmd_line(), environment) - - def poll(self): - return self._server_process.poll() - - def restart(self): - self._server_process.stop() - self._server_process.start() - return - - # FIXME: This function is huge. - def run_test(self, test_input): - uri = self._port.filename_to_uri(test_input.filename) - if uri.startswith("file:///"): - command = uri[7:] - else: - command = uri - - if test_input.image_hash: - command += "'" + test_input.image_hash - command += "\n" - - start_time = time.time() - self._server_process.write(command) - - have_seen_content_type = False - actual_image_hash = None - output = str() # Use a byte array for output, even though it should be UTF-8. - image = str() - - timeout = int(test_input.timeout) / 1000.0 - deadline = time.time() + timeout - line = self._server_process.read_line(timeout) - while (not self._server_process.timed_out - and not self._server_process.crashed - and line.rstrip() != "#EOF"): - if (line.startswith('Content-Type:') and not - have_seen_content_type): - have_seen_content_type = True - else: - # Note: Text output from DumpRenderTree is always UTF-8. - # However, some tests (e.g. webarchives) spit out binary - # data instead of text. So to make things simple, we - # always treat the output as binary. - output += line - line = self._server_process.read_line(timeout) - timeout = deadline - time.time() - - # Now read a second block of text for the optional image data - remaining_length = -1 - HASH_HEADER = 'ActualHash: ' - LENGTH_HEADER = 'Content-Length: ' - line = self._server_process.read_line(timeout) - while (not self._server_process.timed_out - and not self._server_process.crashed - and line.rstrip() != "#EOF"): - if line.startswith(HASH_HEADER): - actual_image_hash = line[len(HASH_HEADER):].strip() - elif line.startswith('Content-Type:'): - pass - elif line.startswith(LENGTH_HEADER): - timeout = deadline - time.time() - content_length = int(line[len(LENGTH_HEADER):]) - image = self._server_process.read(timeout, content_length) - timeout = deadline - time.time() - line = self._server_process.read_line(timeout) - - error_lines = self._server_process.error.splitlines() - # FIXME: This is a hack. It is unclear why sometimes - # we do not get any error lines from the server_process - # probably we are not flushing stderr. - if error_lines and error_lines[-1] == "#EOF": - error_lines.pop() # Remove the expected "#EOF" - error = "\n".join(error_lines) - # FIXME: This seems like the wrong section of code to be doing - # this reset in. - self._server_process.error = "" - return test_output.TestOutput(output, image, actual_image_hash, - self._server_process.crashed, - time.time() - start_time, - self._server_process.timed_out, - error) - - def stop(self): - if self._server_process: - self._server_process.stop() - self._server_process = None diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py deleted file mode 100644 index 7b68310..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged -# -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import unittest - -from webkitpy.layout_tests.port.webkit import WebKitPort - - -class TestWebKitPort(WebKitPort): - def __init__(self, symbol_list=None, feature_list=None): - self.symbol_list = symbol_list - self.feature_list = feature_list - - def _runtime_feature_list(self): - return self.feature_list - - def _supported_symbol_list(self): - return self.symbol_list - - def _tests_for_other_platforms(self): - return ["media", ] - - def _tests_for_disabled_features(self): - return ["accessibility", ] - - def _skipped_file_paths(self): - return [] - -class WebKitPortTest(unittest.TestCase): - - def test_skipped_directories_for_symbols(self): - supported_symbols = ["GraphicsLayer", "WebCoreHas3DRendering", "isXHTMLMPDocument", "fooSymbol"] - expected_directories = set(["mathml", "fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl", "http/tests/wml", "fast/wml", "wml", "fast/wcss"]) - result_directories = set(TestWebKitPort(supported_symbols, None)._skipped_tests_for_unsupported_features()) - self.assertEqual(result_directories, expected_directories) - - def test_skipped_directories_for_features(self): - supported_features = ["Accelerated Compositing", "Foo Feature"] - expected_directories = set(["animations/3d", "transforms/3d"]) - result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features()) - self.assertEqual(result_directories, expected_directories) - - def test_skipped_layout_tests(self): - self.assertEqual(TestWebKitPort(None, None).skipped_layout_tests(), - set(["media", "accessibility"])) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py deleted file mode 100644 index 926bc04..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py +++ /dev/null @@ -1,257 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""A class to help start/stop the PyWebSocket server used by layout tests.""" - - -from __future__ import with_statement - -import codecs -import logging -import optparse -import os -import subprocess -import sys -import tempfile -import time -import urllib - -import factory -import http_server - -from webkitpy.common.system.executive import Executive -from webkitpy.thirdparty.autoinstalled.pywebsocket import mod_pywebsocket - - -_log = logging.getLogger("webkitpy.layout_tests.port.websocket_server") - -_WS_LOG_PREFIX = 'pywebsocket.ws.log-' -_WSS_LOG_PREFIX = 'pywebsocket.wss.log-' - -_DEFAULT_WS_PORT = 8880 -_DEFAULT_WSS_PORT = 9323 - - -def url_is_alive(url): - """Checks to see if we get an http response from |url|. - We poll the url 20 times with a 0.5 second delay. If we don't - get a reply in that time, we give up and assume the httpd - didn't start properly. - - Args: - url: The URL to check. - Return: - True if the url is alive. - """ - sleep_time = 0.5 - wait_time = 10 - while wait_time > 0: - try: - response = urllib.urlopen(url) - # Server is up and responding. - return True - except IOError: - pass - # Wait for sleep_time before trying again. - wait_time -= sleep_time - time.sleep(sleep_time) - - return False - - -class PyWebSocketNotStarted(Exception): - pass - - -class PyWebSocketNotFound(Exception): - pass - - -class PyWebSocket(http_server.Lighttpd): - - def __init__(self, port_obj, output_dir, port=_DEFAULT_WS_PORT, - root=None, use_tls=False, - pidfile=None): - """Args: - output_dir: the absolute path to the layout test result directory - """ - http_server.Lighttpd.__init__(self, port_obj, output_dir, - port=_DEFAULT_WS_PORT, - root=root) - self._output_dir = output_dir - self._process = None - self._port = port - self._root = root - self._use_tls = use_tls - self._private_key = self._pem_file - self._certificate = self._pem_file - if self._port: - self._port = int(self._port) - if self._use_tls: - self._server_name = 'PyWebSocket(Secure)' - else: - self._server_name = 'PyWebSocket' - self._pidfile = pidfile - self._wsout = None - - # Webkit tests - if self._root: - self._layout_tests = os.path.abspath(self._root) - self._web_socket_tests = os.path.abspath( - os.path.join(self._root, 'http', 'tests', - 'websocket', 'tests')) - else: - try: - self._layout_tests = self._port_obj.layout_tests_dir() - self._web_socket_tests = os.path.join(self._layout_tests, - 'http', 'tests', 'websocket', 'tests') - except: - self._web_socket_tests = None - - def start(self): - if not self._web_socket_tests: - _log.info('No need to start %s server.' % self._server_name) - return - if self.is_running(): - raise PyWebSocketNotStarted('%s is already running.' % - self._server_name) - - time_str = time.strftime('%d%b%Y-%H%M%S') - if self._use_tls: - log_prefix = _WSS_LOG_PREFIX - else: - log_prefix = _WS_LOG_PREFIX - log_file_name = log_prefix + time_str - - # Remove old log files. We only need to keep the last ones. - self.remove_log_files(self._output_dir, log_prefix) - - error_log = os.path.join(self._output_dir, log_file_name + "-err.txt") - - output_log = os.path.join(self._output_dir, log_file_name + "-out.txt") - self._wsout = codecs.open(output_log, "w", "utf-8") - - python_interp = sys.executable - pywebsocket_base = os.path.join( - os.path.dirname(os.path.dirname(os.path.dirname( - os.path.abspath(__file__)))), 'thirdparty', - 'autoinstalled', 'pywebsocket') - pywebsocket_script = os.path.join(pywebsocket_base, 'mod_pywebsocket', - 'standalone.py') - start_cmd = [ - python_interp, '-u', pywebsocket_script, - '--server-host', '127.0.0.1', - '--port', str(self._port), - '--document-root', os.path.join(self._layout_tests, 'http', 'tests'), - '--scan-dir', self._web_socket_tests, - '--cgi-paths', '/websocket/tests', - '--log-file', error_log, - ] - - handler_map_file = os.path.join(self._web_socket_tests, - 'handler_map.txt') - if os.path.exists(handler_map_file): - _log.debug('Using handler_map_file: %s' % handler_map_file) - start_cmd.append('--websock-handlers-map-file') - start_cmd.append(handler_map_file) - else: - _log.warning('No handler_map_file found') - - if self._use_tls: - start_cmd.extend(['-t', '-k', self._private_key, - '-c', self._certificate]) - - env = self._port_obj.setup_environ_for_server() - env['PYTHONPATH'] = (pywebsocket_base + os.path.pathsep + - env.get('PYTHONPATH', '')) - - _log.debug('Starting %s server on %d.' % ( - self._server_name, self._port)) - _log.debug('cmdline: %s' % ' '.join(start_cmd)) - # FIXME: We should direct this call through Executive for testing. - # Note: Not thread safe: http://bugs.python.org/issue2320 - self._process = subprocess.Popen(start_cmd, - stdin=open(os.devnull, 'r'), - stdout=self._wsout, - stderr=subprocess.STDOUT, - env=env) - - if self._use_tls: - url = 'https' - else: - url = 'http' - url = url + '://127.0.0.1:%d/' % self._port - if not url_is_alive(url): - if self._process.returncode == None: - # FIXME: We should use a non-static Executive for easier - # testing. - Executive().kill_process(self._process.pid) - with codecs.open(output_log, "r", "utf-8") as fp: - for line in fp: - _log.error(line) - raise PyWebSocketNotStarted( - 'Failed to start %s server on port %s.' % - (self._server_name, self._port)) - - # Our process terminated already - if self._process.returncode != None: - raise PyWebSocketNotStarted( - 'Failed to start %s server.' % self._server_name) - if self._pidfile: - with codecs.open(self._pidfile, "w", "ascii") as file: - file.write("%d" % self._process.pid) - - def stop(self, force=False): - if not force and not self.is_running(): - return - - pid = None - if self._process: - pid = self._process.pid - elif self._pidfile: - with codecs.open(self._pidfile, "r", "ascii") as file: - pid = int(file.read().strip()) - - if not pid: - raise PyWebSocketNotFound( - 'Failed to find %s server pid.' % self._server_name) - - _log.debug('Shutting down %s server %d.' % (self._server_name, pid)) - # FIXME: We should use a non-static Executive for easier testing. - Executive().kill_process(pid) - - if self._process: - # wait() is not threadsafe and can throw OSError due to: - # http://bugs.python.org/issue1731717 - self._process.wait() - self._process = None - - if self._wsout: - self._wsout.close() - self._wsout = None diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py deleted file mode 100644 index 9e30155..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the Google name nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""WebKit Win implementation of the Port interface.""" - -import logging -import os - -from webkitpy.layout_tests.port.webkit import WebKitPort - -_log = logging.getLogger("webkitpy.layout_tests.port.win") - - -class WinPort(WebKitPort): - """WebKit Win implementation of the Port class.""" - - def __init__(self, **kwargs): - kwargs.setdefault('port_name', 'win') - WebKitPort.__init__(self, **kwargs) - - def baseline_search_path(self): - # Based on code from old-run-webkit-tests expectedDirectoryForTest() - port_names = ["win", "mac-snowleopard", "mac"] - return map(self._webkit_baseline_path, port_names) - - def _tests_for_other_platforms(self): - # FIXME: This list could be dynamic based on platform name and - # pushed into base.Port. - # This really need to be automated. - return [ - "platform/chromium", - "platform/gtk", - "platform/qt", - "platform/mac", - ] - - def _path_to_apache_config_file(self): - return os.path.join(self.layout_tests_dir(), 'http', 'conf', - 'cygwin-httpd.conf') - - def _shut_down_http_server(self, server_pid): - """Shut down the httpd web server. Blocks until it's fully - shut down. - - Args: - server_pid: The process ID of the running server. - """ - # Looks like we ignore server_pid. - # Copy/pasted from chromium-win. - self._executive.kill_all("httpd.exe") diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py deleted file mode 100644 index 55c4558..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py +++ /dev/null @@ -1,966 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Rebaselining tool that automatically produces baselines for all platforms. - -The script does the following for each platform specified: - 1. Compile a list of tests that need rebaselining. - 2. Download test result archive from buildbot for the platform. - 3. Extract baselines from the archive file for all identified files. - 4. Add new baselines to SVN repository. - 5. For each test that has been rebaselined, remove this platform option from - the test in test_expectation.txt. If no other platforms remain after - removal, delete the rebaselined test from the file. - -At the end, the script generates a html that compares old and new baselines. -""" - -from __future__ import with_statement - -import codecs -import copy -import logging -import optparse -import os -import re -import shutil -import subprocess -import sys -import tempfile -import time -import urllib -import zipfile - -from webkitpy.common.system import path -from webkitpy.common.system import user -from webkitpy.common.system.executive import Executive, ScriptError -import webkitpy.common.checkout.scm as scm - -import port -from layout_package import test_expectations - -_log = logging.getLogger("webkitpy.layout_tests." - "rebaseline_chromium_webkit_tests") - -BASELINE_SUFFIXES = ['.txt', '.png', '.checksum'] -REBASELINE_PLATFORM_ORDER = ['mac', 'win', 'win-xp', 'win-vista', 'linux'] -ARCHIVE_DIR_NAME_DICT = {'win': 'Webkit_Win', - 'win-vista': 'webkit-dbg-vista', - 'win-xp': 'Webkit_Win', - 'mac': 'Webkit_Mac10_5', - 'linux': 'webkit-rel-linux64', - 'win-canary': 'webkit-rel-webkit-org', - 'win-vista-canary': 'webkit-dbg-vista', - 'win-xp-canary': 'webkit-rel-webkit-org', - 'mac-canary': 'webkit-rel-mac-webkit-org', - 'linux-canary': 'webkit-rel-linux-webkit-org'} - - -def log_dashed_string(text, platform, logging_level=logging.INFO): - """Log text message with dashes on both sides.""" - - msg = text - if platform: - msg += ': ' + platform - if len(msg) < 78: - dashes = '-' * ((78 - len(msg)) / 2) - msg = '%s %s %s' % (dashes, msg, dashes) - - if logging_level == logging.ERROR: - _log.error(msg) - elif logging_level == logging.WARNING: - _log.warn(msg) - else: - _log.info(msg) - - -def setup_html_directory(html_directory): - """Setup the directory to store html results. - - All html related files are stored in the "rebaseline_html" subdirectory. - - Args: - html_directory: parent directory that stores the rebaselining results. - If None, a temp directory is created. - - Returns: - the directory that stores the html related rebaselining results. - """ - - if not html_directory: - html_directory = tempfile.mkdtemp() - elif not os.path.exists(html_directory): - os.mkdir(html_directory) - - html_directory = os.path.join(html_directory, 'rebaseline_html') - _log.info('Html directory: "%s"', html_directory) - - if os.path.exists(html_directory): - shutil.rmtree(html_directory, True) - _log.info('Deleted file at html directory: "%s"', html_directory) - - if not os.path.exists(html_directory): - os.mkdir(html_directory) - return html_directory - - -def get_result_file_fullpath(html_directory, baseline_filename, platform, - result_type): - """Get full path of the baseline result file. - - Args: - html_directory: directory that stores the html related files. - baseline_filename: name of the baseline file. - platform: win, linux or mac - result_type: type of the baseline result: '.txt', '.png'. - - Returns: - Full path of the baseline file for rebaselining result comparison. - """ - - base, ext = os.path.splitext(baseline_filename) - result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext) - fullpath = os.path.join(html_directory, result_filename) - _log.debug(' Result file full path: "%s".', fullpath) - return fullpath - - -class Rebaseliner(object): - """Class to produce new baselines for a given platform.""" - - REVISION_REGEX = r'<a href=\"(\d+)/\">' - - def __init__(self, running_port, target_port, platform, options): - """ - Args: - running_port: the Port the script is running on. - target_port: the Port the script uses to find port-specific - configuration information like the test_expectations.txt - file location and the list of test platforms. - platform: the test platform to rebaseline - options: the command-line options object.""" - self._platform = platform - self._options = options - self._port = running_port - self._target_port = target_port - self._rebaseline_port = port.get( - self._target_port.test_platform_name_to_name(platform), options) - self._rebaselining_tests = [] - self._rebaselined_tests = [] - - # Create tests and expectations helper which is used to: - # -. compile list of tests that need rebaselining. - # -. update the tests in test_expectations file after rebaseline - # is done. - expectations_str = self._rebaseline_port.test_expectations() - self._test_expectations = \ - test_expectations.TestExpectations(self._rebaseline_port, - None, - expectations_str, - self._platform, - False, - False) - self._scm = scm.default_scm() - - def run(self, backup): - """Run rebaseline process.""" - - log_dashed_string('Compiling rebaselining tests', self._platform) - if not self._compile_rebaselining_tests(): - return True - - log_dashed_string('Downloading archive', self._platform) - archive_file = self._download_buildbot_archive() - _log.info('') - if not archive_file: - _log.error('No archive found.') - return False - - log_dashed_string('Extracting and adding new baselines', - self._platform) - if not self._extract_and_add_new_baselines(archive_file): - return False - - log_dashed_string('Updating rebaselined tests in file', - self._platform) - self._update_rebaselined_tests_in_file(backup) - _log.info('') - - if len(self._rebaselining_tests) != len(self._rebaselined_tests): - _log.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN ' - 'REBASELINED.') - _log.warning(' Total tests needing rebaselining: %d', - len(self._rebaselining_tests)) - _log.warning(' Total tests rebaselined: %d', - len(self._rebaselined_tests)) - return False - - _log.warning('All tests needing rebaselining were successfully ' - 'rebaselined.') - - return True - - def get_rebaselining_tests(self): - return self._rebaselining_tests - - def _compile_rebaselining_tests(self): - """Compile list of tests that need rebaselining for the platform. - - Returns: - List of tests that need rebaselining or - None if there is no such test. - """ - - self._rebaselining_tests = \ - self._test_expectations.get_rebaselining_failures() - if not self._rebaselining_tests: - _log.warn('No tests found that need rebaselining.') - return None - - _log.info('Total number of tests needing rebaselining ' - 'for "%s": "%d"', self._platform, - len(self._rebaselining_tests)) - - test_no = 1 - for test in self._rebaselining_tests: - _log.info(' %d: %s', test_no, test) - test_no += 1 - - return self._rebaselining_tests - - def _get_latest_revision(self, url): - """Get the latest layout test revision number from buildbot. - - Args: - url: Url to retrieve layout test revision numbers. - - Returns: - latest revision or - None on failure. - """ - - _log.debug('Url to retrieve revision: "%s"', url) - - f = urllib.urlopen(url) - content = f.read() - f.close() - - revisions = re.findall(self.REVISION_REGEX, content) - if not revisions: - _log.error('Failed to find revision, content: "%s"', content) - return None - - revisions.sort(key=int) - _log.info('Latest revision: "%s"', revisions[len(revisions) - 1]) - return revisions[len(revisions) - 1] - - def _get_archive_dir_name(self, platform, webkit_canary): - """Get name of the layout test archive directory. - - Returns: - Directory name or - None on failure - """ - - if webkit_canary: - platform += '-canary' - - if platform in ARCHIVE_DIR_NAME_DICT: - return ARCHIVE_DIR_NAME_DICT[platform] - else: - _log.error('Cannot find platform key %s in archive ' - 'directory name dictionary', platform) - return None - - def _get_archive_url(self): - """Generate the url to download latest layout test archive. - - Returns: - Url to download archive or - None on failure - """ - - if self._options.force_archive_url: - return self._options.force_archive_url - - dir_name = self._get_archive_dir_name(self._platform, - self._options.webkit_canary) - if not dir_name: - return None - - _log.debug('Buildbot platform dir name: "%s"', dir_name) - - url_base = '%s/%s/' % (self._options.archive_url, dir_name) - latest_revision = self._get_latest_revision(url_base) - if latest_revision is None or latest_revision <= 0: - return None - archive_url = ('%s%s/layout-test-results.zip' % (url_base, - latest_revision)) - _log.info('Archive url: "%s"', archive_url) - return archive_url - - def _download_buildbot_archive(self): - """Download layout test archive file from buildbot. - - Returns: - True if download succeeded or - False otherwise. - """ - - url = self._get_archive_url() - if url is None: - return None - - fn = urllib.urlretrieve(url)[0] - _log.info('Archive downloaded and saved to file: "%s"', fn) - return fn - - def _extract_and_add_new_baselines(self, archive_file): - """Extract new baselines from archive and add them to SVN repository. - - Args: - archive_file: full path to the archive file. - - Returns: - List of tests that have been rebaselined or - None on failure. - """ - - zip_file = zipfile.ZipFile(archive_file, 'r') - zip_namelist = zip_file.namelist() - - _log.debug('zip file namelist:') - for name in zip_namelist: - _log.debug(' ' + name) - - platform = self._rebaseline_port.test_platform_name_to_name( - self._platform) - _log.debug('Platform dir: "%s"', platform) - - test_no = 1 - self._rebaselined_tests = [] - for test in self._rebaselining_tests: - _log.info('Test %d: %s', test_no, test) - - found = False - scm_error = False - test_basename = os.path.splitext(test)[0] - for suffix in BASELINE_SUFFIXES: - archive_test_name = ('layout-test-results/%s-actual%s' % - (test_basename, suffix)) - _log.debug(' Archive test file name: "%s"', - archive_test_name) - if not archive_test_name in zip_namelist: - _log.info(' %s file not in archive.', suffix) - continue - - found = True - _log.info(' %s file found in archive.', suffix) - - # Extract new baseline from archive and save it to a temp file. - data = zip_file.read(archive_test_name) - temp_fd, temp_name = tempfile.mkstemp(suffix) - f = os.fdopen(temp_fd, 'wb') - f.write(data) - f.close() - - expected_filename = '%s-expected%s' % (test_basename, suffix) - expected_fullpath = os.path.join( - self._rebaseline_port.baseline_path(), expected_filename) - expected_fullpath = os.path.normpath(expected_fullpath) - _log.debug(' Expected file full path: "%s"', - expected_fullpath) - - # TODO(victorw): for now, the rebaselining tool checks whether - # or not THIS baseline is duplicate and should be skipped. - # We could improve the tool to check all baselines in upper - # and lower - # levels and remove all duplicated baselines. - if self._is_dup_baseline(temp_name, - expected_fullpath, - test, - suffix, - self._platform): - os.remove(temp_name) - self._delete_baseline(expected_fullpath) - continue - - # Create the new baseline directory if it doesn't already - # exist. - self._port.maybe_make_directory( - os.path.dirname(expected_fullpath)) - - shutil.move(temp_name, expected_fullpath) - - if 0 != self._scm.add(expected_fullpath, return_exit_code=True): - # FIXME: print detailed diagnose messages - scm_error = True - elif suffix != '.checksum': - self._create_html_baseline_files(expected_fullpath) - - if not found: - _log.warn(' No new baselines found in archive.') - else: - if scm_error: - _log.warn(' Failed to add baselines to your repository.') - else: - _log.info(' Rebaseline succeeded.') - self._rebaselined_tests.append(test) - - test_no += 1 - - zip_file.close() - os.remove(archive_file) - - return self._rebaselined_tests - - def _is_dup_baseline(self, new_baseline, baseline_path, test, suffix, - platform): - """Check whether a baseline is duplicate and can fallback to same - baseline for another platform. For example, if a test has same - baseline on linux and windows, then we only store windows - baseline and linux baseline will fallback to the windows version. - - Args: - expected_filename: baseline expectation file name. - test: test name. - suffix: file suffix of the expected results, including dot; - e.g. '.txt' or '.png'. - platform: baseline platform 'mac', 'win' or 'linux'. - - Returns: - True if the baseline is unnecessary. - False otherwise. - """ - test_filepath = os.path.join(self._target_port.layout_tests_dir(), - test) - all_baselines = self._rebaseline_port.expected_baselines( - test_filepath, suffix, True) - for (fallback_dir, fallback_file) in all_baselines: - if fallback_dir and fallback_file: - fallback_fullpath = os.path.normpath( - os.path.join(fallback_dir, fallback_file)) - if fallback_fullpath.lower() != baseline_path.lower(): - with codecs.open(new_baseline, "r", - None) as file_handle1: - new_output = file_handle1.read() - with codecs.open(fallback_fullpath, "r", - None) as file_handle2: - fallback_output = file_handle2.read() - is_image = baseline_path.lower().endswith('.png') - if not self._diff_baselines(new_output, fallback_output, - is_image): - _log.info(' Found same baseline at %s', - fallback_fullpath) - return True - else: - return False - - return False - - def _diff_baselines(self, output1, output2, is_image): - """Check whether two baselines are different. - - Args: - output1, output2: contents of the baselines to compare. - - Returns: - True if two files are different or have different extensions. - False otherwise. - """ - - if is_image: - return self._port.diff_image(output1, output2, None) - else: - return self._port.compare_text(output1, output2) - - def _delete_baseline(self, filename): - """Remove the file from repository and delete it from disk. - - Args: - filename: full path of the file to delete. - """ - - if not filename or not os.path.isfile(filename): - return - self._scm.delete(filename) - - def _update_rebaselined_tests_in_file(self, backup): - """Update the rebaselined tests in test expectations file. - - Args: - backup: if True, backup the original test expectations file. - - Returns: - no - """ - - if self._rebaselined_tests: - new_expectations = ( - self._test_expectations.remove_platform_from_expectations( - self._rebaselined_tests, self._platform)) - path = self._target_port.path_to_test_expectations_file() - if backup: - date_suffix = time.strftime('%Y%m%d%H%M%S', - time.localtime(time.time())) - backup_file = ('%s.orig.%s' % (path, date_suffix)) - if os.path.exists(backup_file): - os.remove(backup_file) - _log.info('Saving original file to "%s"', backup_file) - os.rename(path, backup_file) - # FIXME: What encoding are these files? - # Or is new_expectations always a byte array? - with open(path, "w") as file: - file.write(new_expectations) - # self._scm.add(path) - else: - _log.info('No test was rebaselined so nothing to remove.') - - def _create_html_baseline_files(self, baseline_fullpath): - """Create baseline files (old, new and diff) in html directory. - - The files are used to compare the rebaselining results. - - Args: - baseline_fullpath: full path of the expected baseline file. - """ - - if not baseline_fullpath or not os.path.exists(baseline_fullpath): - return - - # Copy the new baseline to html directory for result comparison. - baseline_filename = os.path.basename(baseline_fullpath) - new_file = get_result_file_fullpath(self._options.html_directory, - baseline_filename, self._platform, - 'new') - shutil.copyfile(baseline_fullpath, new_file) - _log.info(' Html: copied new baseline file from "%s" to "%s".', - baseline_fullpath, new_file) - - # Get the old baseline from the repository and save to the html directory. - try: - output = self._scm.show_head(baseline_fullpath) - except ScriptError, e: - _log.info(e) - output = "" - - if (not output) or (output.upper().rstrip().endswith( - 'NO SUCH FILE OR DIRECTORY')): - _log.info(' No base file: "%s"', baseline_fullpath) - return - base_file = get_result_file_fullpath(self._options.html_directory, - baseline_filename, self._platform, - 'old') - # We should be using an explicit encoding here. - with open(base_file, "wb") as file: - file.write(output) - _log.info(' Html: created old baseline file: "%s".', - base_file) - - # Get the diff between old and new baselines and save to the html dir. - if baseline_filename.upper().endswith('.TXT'): - output = self._scm.diff_for_file(baseline_fullpath, log=_log) - if output: - diff_file = get_result_file_fullpath( - self._options.html_directory, baseline_filename, - self._platform, 'diff') - with open(diff_file, 'wb') as file: - file.write(output) - _log.info(' Html: created baseline diff file: "%s".', - diff_file) - - -class HtmlGenerator(object): - """Class to generate rebaselining result comparison html.""" - - HTML_REBASELINE = ('<html>' - '<head>' - '<style>' - 'body {font-family: sans-serif;}' - '.mainTable {background: #666666;}' - '.mainTable td , .mainTable th {background: white;}' - '.detail {margin-left: 10px; margin-top: 3px;}' - '</style>' - '<title>Rebaselining Result Comparison (%(time)s)' - '</title>' - '</head>' - '<body>' - '<h2>Rebaselining Result Comparison (%(time)s)</h2>' - '%(body)s' - '</body>' - '</html>') - HTML_NO_REBASELINING_TESTS = ( - '<p>No tests found that need rebaselining.</p>') - HTML_TABLE_TEST = ('<table class="mainTable" cellspacing=1 cellpadding=5>' - '%s</table><br>') - HTML_TR_TEST = ('<tr>' - '<th style="background-color: #CDECDE; border-bottom: ' - '1px solid black; font-size: 18pt; font-weight: bold" ' - 'colspan="5">' - '<a href="%s">%s</a>' - '</th>' - '</tr>') - HTML_TEST_DETAIL = ('<div class="detail">' - '<tr>' - '<th width="100">Baseline</th>' - '<th width="100">Platform</th>' - '<th width="200">Old</th>' - '<th width="200">New</th>' - '<th width="150">Difference</th>' - '</tr>' - '%s' - '</div>') - HTML_TD_NOLINK = '<td align=center><a>%s</a></td>' - HTML_TD_LINK = '<td align=center><a href="%(uri)s">%(name)s</a></td>' - HTML_TD_LINK_IMG = ('<td><a href="%(uri)s">' - '<img style="width: 200" src="%(uri)s" /></a></td>') - HTML_TR = '<tr>%s</tr>' - - def __init__(self, target_port, options, platforms, rebaselining_tests, - executive): - self._html_directory = options.html_directory - self._target_port = target_port - self._platforms = platforms - self._rebaselining_tests = rebaselining_tests - self._executive = executive - self._html_file = os.path.join(options.html_directory, - 'rebaseline.html') - - def abspath_to_uri(self, filename): - """Converts an absolute path to a file: URI.""" - return path.abspath_to_uri(filename, self._executive) - - def generate_html(self): - """Generate html file for rebaselining result comparison.""" - - _log.info('Generating html file') - - html_body = '' - if not self._rebaselining_tests: - html_body += self.HTML_NO_REBASELINING_TESTS - else: - tests = list(self._rebaselining_tests) - tests.sort() - - test_no = 1 - for test in tests: - _log.info('Test %d: %s', test_no, test) - html_body += self._generate_html_for_one_test(test) - - html = self.HTML_REBASELINE % ({'time': time.asctime(), - 'body': html_body}) - _log.debug(html) - - with codecs.open(self._html_file, "w", "utf-8") as file: - file.write(html) - - _log.info('Baseline comparison html generated at "%s"', - self._html_file) - - def show_html(self): - """Launch the rebaselining html in brwoser.""" - - _log.info('Launching html: "%s"', self._html_file) - user.User().open_url(self._html_file) - _log.info('Html launched.') - - def _generate_baseline_links(self, test_basename, suffix, platform): - """Generate links for baseline results (old, new and diff). - - Args: - test_basename: base filename of the test - suffix: baseline file suffixes: '.txt', '.png' - platform: win, linux or mac - - Returns: - html links for showing baseline results (old, new and diff) - """ - - baseline_filename = '%s-expected%s' % (test_basename, suffix) - _log.debug(' baseline filename: "%s"', baseline_filename) - - new_file = get_result_file_fullpath(self._html_directory, - baseline_filename, platform, 'new') - _log.info(' New baseline file: "%s"', new_file) - if not os.path.exists(new_file): - _log.info(' No new baseline file: "%s"', new_file) - return '' - - old_file = get_result_file_fullpath(self._html_directory, - baseline_filename, platform, 'old') - _log.info(' Old baseline file: "%s"', old_file) - if suffix == '.png': - html_td_link = self.HTML_TD_LINK_IMG - else: - html_td_link = self.HTML_TD_LINK - - links = '' - if os.path.exists(old_file): - links += html_td_link % { - 'uri': self.abspath_to_uri(old_file), - 'name': baseline_filename} - else: - _log.info(' No old baseline file: "%s"', old_file) - links += self.HTML_TD_NOLINK % '' - - links += html_td_link % {'uri': self.abspath_to_uri(new_file), - 'name': baseline_filename} - - diff_file = get_result_file_fullpath(self._html_directory, - baseline_filename, platform, - 'diff') - _log.info(' Baseline diff file: "%s"', diff_file) - if os.path.exists(diff_file): - links += html_td_link % {'uri': self.abspath_to_uri(diff_file), - 'name': 'Diff'} - else: - _log.info(' No baseline diff file: "%s"', diff_file) - links += self.HTML_TD_NOLINK % '' - - return links - - def _generate_html_for_one_test(self, test): - """Generate html for one rebaselining test. - - Args: - test: layout test name - - Returns: - html that compares baseline results for the test. - """ - - test_basename = os.path.basename(os.path.splitext(test)[0]) - _log.info(' basename: "%s"', test_basename) - rows = [] - for suffix in BASELINE_SUFFIXES: - if suffix == '.checksum': - continue - - _log.info(' Checking %s files', suffix) - for platform in self._platforms: - links = self._generate_baseline_links(test_basename, suffix, - platform) - if links: - row = self.HTML_TD_NOLINK % self._get_baseline_result_type( - suffix) - row += self.HTML_TD_NOLINK % platform - row += links - _log.debug(' html row: %s', row) - - rows.append(self.HTML_TR % row) - - if rows: - test_path = os.path.join(self._target_port.layout_tests_dir(), - test) - html = self.HTML_TR_TEST % (self.abspath_to_uri(test_path), test) - html += self.HTML_TEST_DETAIL % ' '.join(rows) - - _log.debug(' html for test: %s', html) - return self.HTML_TABLE_TEST % html - - return '' - - def _get_baseline_result_type(self, suffix): - """Name of the baseline result type.""" - - if suffix == '.png': - return 'Pixel' - elif suffix == '.txt': - return 'Render Tree' - else: - return 'Other' - - -def get_host_port_object(options): - """Return a port object for the platform we're running on.""" - # The only thing we really need on the host is a way to diff - # text files and image files, which means we need to check that some - # version of ImageDiff has been built. We will look for either Debug - # or Release versions of the default port on the platform. - options.configuration = "Release" - port_obj = port.get(None, options) - if not port_obj.check_image_diff(override_step=None, logging=False): - _log.debug('No release version of the image diff binary was found.') - options.configuration = "Debug" - port_obj = port.get(None, options) - if not port_obj.check_image_diff(override_step=None, logging=False): - _log.error('No version of image diff was found. Check your build.') - return None - else: - _log.debug('Found the debug version of the image diff binary.') - else: - _log.debug('Found the release version of the image diff binary.') - return port_obj - - -def parse_options(args): - """Parse options and return a pair of host options and target options.""" - option_parser = optparse.OptionParser() - option_parser.add_option('-v', '--verbose', - action='store_true', - default=False, - help='include debug-level logging.') - - option_parser.add_option('-q', '--quiet', - action='store_true', - help='Suppress result HTML viewing') - - option_parser.add_option('-p', '--platforms', - default='mac,win,win-xp,win-vista,linux', - help=('Comma delimited list of platforms ' - 'that need rebaselining.')) - - option_parser.add_option('-u', '--archive_url', - default=('http://build.chromium.org/buildbot/' - 'layout_test_results'), - help=('Url to find the layout test result archive' - ' file.')) - option_parser.add_option('-U', '--force_archive_url', - help=('Url of result zip file. This option is for debugging ' - 'purposes')) - - option_parser.add_option('-w', '--webkit_canary', - action='store_true', - default=False, - help=('If True, pull baselines from webkit.org ' - 'canary bot.')) - - option_parser.add_option('-b', '--backup', - action='store_true', - default=False, - help=('Whether or not to backup the original test' - ' expectations file after rebaseline.')) - - option_parser.add_option('-d', '--html_directory', - default='', - help=('The directory that stores the results for ' - 'rebaselining comparison.')) - - option_parser.add_option('', '--use_drt', - action='store_true', - default=False, - help=('Use ImageDiff from DumpRenderTree instead ' - 'of image_diff for pixel tests.')) - - option_parser.add_option('', '--target-platform', - default='chromium', - help=('The target platform to rebaseline ' - '("mac", "chromium", "qt", etc.). Defaults ' - 'to "chromium".')) - options = option_parser.parse_args(args)[0] - - target_options = copy.copy(options) - if options.target_platform == 'chromium': - target_options.chromium = True - options.tolerance = 0 - - return (options, target_options) - - -def main(executive=Executive()): - """Main function to produce new baselines.""" - - (options, target_options) = parse_options(sys.argv[1:]) - - # We need to create three different Port objects over the life of this - # script. |target_port_obj| is used to determine configuration information: - # location of the expectations file, names of ports to rebaseline, etc. - # |port_obj| is used for runtime functionality like actually diffing - # Then we create a rebaselining port to actual find and manage the - # baselines. - target_port_obj = port.get(None, target_options) - - # Set up our logging format. - log_level = logging.INFO - if options.verbose: - log_level = logging.DEBUG - logging.basicConfig(level=log_level, - format=('%(asctime)s %(filename)s:%(lineno)-3d ' - '%(levelname)s %(message)s'), - datefmt='%y%m%d %H:%M:%S') - - host_port_obj = get_host_port_object(options) - if not host_port_obj: - sys.exit(1) - - # Verify 'platforms' option is valid. - if not options.platforms: - _log.error('Invalid "platforms" option. --platforms must be ' - 'specified in order to rebaseline.') - sys.exit(1) - platforms = [p.strip().lower() for p in options.platforms.split(',')] - for platform in platforms: - if not platform in REBASELINE_PLATFORM_ORDER: - _log.error('Invalid platform: "%s"' % (platform)) - sys.exit(1) - - # Adjust the platform order so rebaseline tool is running at the order of - # 'mac', 'win' and 'linux'. This is in same order with layout test baseline - # search paths. It simplifies how the rebaseline tool detects duplicate - # baselines. Check _IsDupBaseline method for details. - rebaseline_platforms = [] - for platform in REBASELINE_PLATFORM_ORDER: - if platform in platforms: - rebaseline_platforms.append(platform) - - options.html_directory = setup_html_directory(options.html_directory) - - rebaselining_tests = set() - backup = options.backup - for platform in rebaseline_platforms: - rebaseliner = Rebaseliner(host_port_obj, target_port_obj, - platform, options) - - _log.info('') - log_dashed_string('Rebaseline started', platform) - if rebaseliner.run(backup): - # Only need to backup one original copy of test expectation file. - backup = False - log_dashed_string('Rebaseline done', platform) - else: - log_dashed_string('Rebaseline failed', platform, logging.ERROR) - - rebaselining_tests |= set(rebaseliner.get_rebaselining_tests()) - - _log.info('') - log_dashed_string('Rebaselining result comparison started', None) - html_generator = HtmlGenerator(target_port_obj, - options, - rebaseline_platforms, - rebaselining_tests, - executive=executive) - html_generator.generate_html() - if not options.quiet: - html_generator.show_html() - log_dashed_string('Rebaselining result comparison done', None) - - sys.exit(0) - -if '__main__' == __name__: - main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py deleted file mode 100644 index 7c55b94..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py +++ /dev/null @@ -1,157 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Unit tests for rebaseline_chromium_webkit_tests.py.""" - -import os -import sys -import unittest - -from webkitpy.tool import mocktool -from webkitpy.layout_tests import port -from webkitpy.layout_tests import rebaseline_chromium_webkit_tests -from webkitpy.common.system.executive import Executive, ScriptError - - -class MockPort(object): - def __init__(self, image_diff_exists): - self.image_diff_exists = image_diff_exists - - def check_image_diff(self, override_step, logging): - return self.image_diff_exists - - -def get_mock_get(config_expectations): - def mock_get(port_name, options): - return MockPort(config_expectations[options.configuration]) - return mock_get - - -class TestGetHostPortObject(unittest.TestCase): - def assert_result(self, release_present, debug_present, valid_port_obj): - # Tests whether we get a valid port object returned when we claim - # that Image diff is (or isn't) present in the two configs. - port.get = get_mock_get({'Release': release_present, - 'Debug': debug_present}) - options = mocktool.MockOptions(configuration=None, - html_directory=None) - port_obj = rebaseline_chromium_webkit_tests.get_host_port_object( - options) - if valid_port_obj: - self.assertNotEqual(port_obj, None) - else: - self.assertEqual(port_obj, None) - - def test_get_host_port_object(self): - # Save the normal port.get() function for future testing. - old_get = port.get - - # Test whether we get a valid port object back for the four - # possible cases of having ImageDiffs built. It should work when - # there is at least one binary present. - self.assert_result(False, False, False) - self.assert_result(True, False, True) - self.assert_result(False, True, True) - self.assert_result(True, True, True) - - # Restore the normal port.get() function. - port.get = old_get - - -class TestRebaseliner(unittest.TestCase): - def make_rebaseliner(self): - options = mocktool.MockOptions(configuration=None, - html_directory=None) - host_port_obj = port.get('test', options) - target_options = options - target_port_obj = port.get('test', target_options) - platform = 'test' - return rebaseline_chromium_webkit_tests.Rebaseliner( - host_port_obj, target_port_obj, platform, options) - - def test_parse_options(self): - (options, target_options) = rebaseline_chromium_webkit_tests.parse_options([]) - self.assertTrue(target_options.chromium) - self.assertEqual(options.tolerance, 0) - - (options, target_options) = rebaseline_chromium_webkit_tests.parse_options(['--target-platform', 'qt']) - self.assertFalse(hasattr(target_options, 'chromium')) - self.assertEqual(options.tolerance, 0) - - def test_noop(self): - # this method tests that was can at least instantiate an object, even - # if there is nothing to do. - rebaseliner = self.make_rebaseliner() - self.assertNotEqual(rebaseliner, None) - - def test_diff_baselines_txt(self): - rebaseliner = self.make_rebaseliner() - output = rebaseliner._port.expected_text( - os.path.join(rebaseliner._port.layout_tests_dir(), - 'passes/text.html')) - self.assertFalse(rebaseliner._diff_baselines(output, output, - is_image=False)) - - def test_diff_baselines_png(self): - rebaseliner = self.make_rebaseliner() - image = rebaseliner._port.expected_image( - os.path.join(rebaseliner._port.layout_tests_dir(), - 'passes/image.html')) - self.assertFalse(rebaseliner._diff_baselines(image, image, - is_image=True)) - - -class TestHtmlGenerator(unittest.TestCase): - def make_generator(self, tests): - return rebaseline_chromium_webkit_tests.HtmlGenerator( - target_port=None, - options=mocktool.MockOptions(configuration=None, - html_directory='/tmp'), - platforms=['mac'], - rebaselining_tests=tests, - executive=Executive()) - - def test_generate_baseline_links(self): - orig_platform = sys.platform - orig_exists = os.path.exists - - try: - sys.platform = 'darwin' - os.path.exists = lambda x: True - generator = self.make_generator(["foo.txt"]) - links = generator._generate_baseline_links("foo", ".txt", "mac") - expected_links = '<td align=center><a href="file:///tmp/foo-expected-mac-old.txt">foo-expected.txt</a></td><td align=center><a href="file:///tmp/foo-expected-mac-new.txt">foo-expected.txt</a></td><td align=center><a href="file:///tmp/foo-expected-mac-diff.txt">Diff</a></td>' - self.assertEqual(links, expected_links) - finally: - sys.platform = orig_platform - os.path.exists = orig_exists - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py deleted file mode 100755 index 643e204..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py +++ /dev/null @@ -1,1634 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Run layout tests. - -This is a port of the existing webkit test script run-webkit-tests. - -The TestRunner class runs a series of tests (TestType interface) against a set -of test files. If a test file fails a TestType, it returns a list TestFailure -objects to the TestRunner. The TestRunner then aggregates the TestFailures to -create a final report. - -This script reads several files, if they exist in the test_lists subdirectory -next to this script itself. Each should contain a list of paths to individual -tests or entire subdirectories of tests, relative to the outermost test -directory. Entire lines starting with '//' (comments) will be ignored. - -For details of the files' contents and purposes, see test_lists/README. -""" - -from __future__ import with_statement - -import codecs -import errno -import glob -import logging -import math -import optparse -import os -import platform -import Queue -import random -import re -import shutil -import signal -import sys -import time -import traceback - -from layout_package import dump_render_tree_thread -from layout_package import json_layout_results_generator -from layout_package import message_broker -from layout_package import printing -from layout_package import test_expectations -from layout_package import test_failures -from layout_package import test_results -from layout_package import test_results_uploader - -from webkitpy.common.system import user -from webkitpy.thirdparty import simplejson -from webkitpy.tool import grammar - -import port - -_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests") - -# Builder base URL where we have the archived test results. -BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" - -LAYOUT_TESTS_DIRECTORY = "LayoutTests" + os.sep - -TestExpectationsFile = test_expectations.TestExpectationsFile - - -class TestInput: - """Groups information about a test for easy passing of data.""" - - def __init__(self, filename, timeout): - """Holds the input parameters for a test. - Args: - filename: Full path to the test. - timeout: Timeout in msecs the driver should use while running the test - """ - # FIXME: filename should really be test_name as a relative path. - self.filename = filename - self.timeout = timeout - # The image_hash is used to avoid doing an image dump if the - # checksums match. The image_hash is set later, and only if it is needed - # for the test. - self.image_hash = None - - -class ResultSummary(object): - """A class for partitioning the test results we get into buckets. - - This class is basically a glorified struct and it's private to this file - so we don't bother with any information hiding.""" - - def __init__(self, expectations, test_files): - self.total = len(test_files) - self.remaining = self.total - self.expectations = expectations - self.expected = 0 - self.unexpected = 0 - self.tests_by_expectation = {} - self.tests_by_timeline = {} - self.results = {} - self.unexpected_results = {} - self.failures = {} - self.tests_by_expectation[test_expectations.SKIP] = set() - for expectation in TestExpectationsFile.EXPECTATIONS.values(): - self.tests_by_expectation[expectation] = set() - for timeline in TestExpectationsFile.TIMELINES.values(): - self.tests_by_timeline[timeline] = ( - expectations.get_tests_with_timeline(timeline)) - - def add(self, result, expected): - """Add a TestResult into the appropriate bin. - - Args: - result: TestResult from dump_render_tree_thread. - expected: whether the result was what we expected it to be. - """ - - self.tests_by_expectation[result.type].add(result.filename) - self.results[result.filename] = result - self.remaining -= 1 - if len(result.failures): - self.failures[result.filename] = result.failures - if expected: - self.expected += 1 - else: - self.unexpected_results[result.filename] = result.type - self.unexpected += 1 - - -def summarize_unexpected_results(port_obj, expectations, result_summary, - retry_summary): - """Summarize any unexpected results as a dict. - - FIXME: split this data structure into a separate class? - - Args: - port_obj: interface to port-specific hooks - expectations: test_expectations.TestExpectations object - result_summary: summary object from initial test runs - retry_summary: summary object from final test run of retried tests - Returns: - A dictionary containing a summary of the unexpected results from the - run, with the following fields: - 'version': a version indicator (1 in this version) - 'fixable': # of fixable tests (NOW - PASS) - 'skipped': # of skipped tests (NOW & SKIPPED) - 'num_regressions': # of non-flaky failures - 'num_flaky': # of flaky failures - 'num_passes': # of unexpected passes - 'tests': a dict of tests -> {'expected': '...', 'actual': '...'} - """ - results = {} - results['version'] = 1 - - tbe = result_summary.tests_by_expectation - tbt = result_summary.tests_by_timeline - results['fixable'] = len(tbt[test_expectations.NOW] - - tbe[test_expectations.PASS]) - results['skipped'] = len(tbt[test_expectations.NOW] & - tbe[test_expectations.SKIP]) - - num_passes = 0 - num_flaky = 0 - num_regressions = 0 - keywords = {} - for k, v in TestExpectationsFile.EXPECTATIONS.iteritems(): - keywords[v] = k.upper() - - tests = {} - for filename, result in result_summary.unexpected_results.iteritems(): - # Note that if a test crashed in the original run, we ignore - # whether or not it crashed when we retried it (if we retried it), - # and always consider the result not flaky. - test = port_obj.relative_test_filename(filename) - expected = expectations.get_expectations_string(filename) - actual = [keywords[result]] - - if result == test_expectations.PASS: - num_passes += 1 - elif result == test_expectations.CRASH: - num_regressions += 1 - else: - if filename not in retry_summary.unexpected_results: - actual.extend(expectations.get_expectations_string( - filename).split(" ")) - num_flaky += 1 - else: - retry_result = retry_summary.unexpected_results[filename] - if result != retry_result: - actual.append(keywords[retry_result]) - num_flaky += 1 - else: - num_regressions += 1 - - tests[test] = {} - tests[test]['expected'] = expected - tests[test]['actual'] = " ".join(actual) - - results['tests'] = tests - results['num_passes'] = num_passes - results['num_flaky'] = num_flaky - results['num_regressions'] = num_regressions - - return results - - -class TestRunner: - """A class for managing running a series of tests on a series of layout - test files.""" - - HTTP_SUBDIR = os.sep.join(['', 'http', '']) - WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', '']) - - # The per-test timeout in milliseconds, if no --time-out-ms option was - # given to run_webkit_tests. This should correspond to the default timeout - # in DumpRenderTree. - DEFAULT_TEST_TIMEOUT_MS = 6 * 1000 - - def __init__(self, port, options, printer, message_broker): - """Initialize test runner data structures. - - Args: - port: an object implementing port-specific - options: a dictionary of command line options - printer: a Printer object to record updates to. - message_broker: object used to communicate with workers. - """ - self._port = port - self._options = options - self._printer = printer - self._message_broker = message_broker - - # disable wss server. need to install pyOpenSSL on buildbots. - # self._websocket_secure_server = websocket_server.PyWebSocket( - # options.results_directory, use_tls=True, port=9323) - - # a set of test files, and the same tests as a list - self._test_files = set() - self._test_files_list = None - self._result_queue = Queue.Queue() - self._retrying = False - - def collect_tests(self, args, last_unexpected_results): - """Find all the files to test. - - Args: - args: list of test arguments from the command line - last_unexpected_results: list of unexpected results to retest, if any - - """ - paths = [self._strip_test_dir_prefix(arg) for arg in args if arg and arg != ''] - paths += last_unexpected_results - if self._options.test_list: - paths += read_test_files(self._options.test_list) - self._test_files = self._port.tests(paths) - - def _strip_test_dir_prefix(self, path): - if path.startswith(LAYOUT_TESTS_DIRECTORY): - return path[len(LAYOUT_TESTS_DIRECTORY):] - return path - - def lint(self): - # Creating the expecations for each platform/configuration pair does - # all the test list parsing and ensures it's correct syntax (e.g. no - # dupes). - for platform_name in self._port.test_platform_names(): - self.parse_expectations(platform_name, is_debug_mode=True) - self.parse_expectations(platform_name, is_debug_mode=False) - self._printer.write("") - _log.info("If there are no fail messages, errors or exceptions, " - "then the lint succeeded.") - return 0 - - def parse_expectations(self, test_platform_name, is_debug_mode): - """Parse the expectations from the test_list files and return a data - structure holding them. Throws an error if the test_list files have - invalid syntax.""" - if self._options.lint_test_files: - test_files = None - else: - test_files = self._test_files - - try: - expectations_str = self._port.test_expectations() - overrides_str = self._port.test_expectations_overrides() - self._expectations = test_expectations.TestExpectations( - self._port, test_files, expectations_str, test_platform_name, - is_debug_mode, self._options.lint_test_files, - overrides=overrides_str) - return self._expectations - except SyntaxError, err: - if self._options.lint_test_files: - print str(err) - else: - raise err - - def prepare_lists_and_print_output(self): - """Create appropriate subsets of test lists and returns a - ResultSummary object. Also prints expected test counts. - """ - - # Remove skipped - both fixable and ignored - files from the - # top-level list of files to test. - num_all_test_files = len(self._test_files) - self._printer.print_expected("Found: %d tests" % - (len(self._test_files))) - if not num_all_test_files: - _log.critical('No tests to run.') - return None - - skipped = set() - if num_all_test_files > 1 and not self._options.force: - skipped = self._expectations.get_tests_with_result_type( - test_expectations.SKIP) - self._test_files -= skipped - - # Create a sorted list of test files so the subset chunk, - # if used, contains alphabetically consecutive tests. - self._test_files_list = list(self._test_files) - if self._options.randomize_order: - random.shuffle(self._test_files_list) - else: - self._test_files_list.sort() - - # If the user specifies they just want to run a subset of the tests, - # just grab a subset of the non-skipped tests. - if self._options.run_chunk or self._options.run_part: - chunk_value = self._options.run_chunk or self._options.run_part - test_files = self._test_files_list - try: - (chunk_num, chunk_len) = chunk_value.split(":") - chunk_num = int(chunk_num) - assert(chunk_num >= 0) - test_size = int(chunk_len) - assert(test_size > 0) - except: - _log.critical("invalid chunk '%s'" % chunk_value) - return None - - # Get the number of tests - num_tests = len(test_files) - - # Get the start offset of the slice. - if self._options.run_chunk: - chunk_len = test_size - # In this case chunk_num can be really large. We need - # to make the slave fit in the current number of tests. - slice_start = (chunk_num * chunk_len) % num_tests - else: - # Validate the data. - assert(test_size <= num_tests) - assert(chunk_num <= test_size) - - # To count the chunk_len, and make sure we don't skip - # some tests, we round to the next value that fits exactly - # all the parts. - rounded_tests = num_tests - if rounded_tests % test_size != 0: - rounded_tests = (num_tests + test_size - - (num_tests % test_size)) - - chunk_len = rounded_tests / test_size - slice_start = chunk_len * (chunk_num - 1) - # It does not mind if we go over test_size. - - # Get the end offset of the slice. - slice_end = min(num_tests, slice_start + chunk_len) - - files = test_files[slice_start:slice_end] - - tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ( - (slice_end - slice_start), slice_start, slice_end, num_tests) - self._printer.print_expected(tests_run_msg) - - # If we reached the end and we don't have enough tests, we run some - # from the beginning. - if slice_end - slice_start < chunk_len: - extra = chunk_len - (slice_end - slice_start) - extra_msg = (' last chunk is partial, appending [0:%d]' % - extra) - self._printer.print_expected(extra_msg) - tests_run_msg += "\n" + extra_msg - files.extend(test_files[0:extra]) - tests_run_filename = os.path.join(self._options.results_directory, - "tests_run.txt") - with codecs.open(tests_run_filename, "w", "utf-8") as file: - file.write(tests_run_msg + "\n") - - len_skip_chunk = int(len(files) * len(skipped) / - float(len(self._test_files))) - skip_chunk_list = list(skipped)[0:len_skip_chunk] - skip_chunk = set(skip_chunk_list) - - # Update expectations so that the stats are calculated correctly. - # We need to pass a list that includes the right # of skipped files - # to ParseExpectations so that ResultSummary() will get the correct - # stats. So, we add in the subset of skipped files, and then - # subtract them back out. - self._test_files_list = files + skip_chunk_list - self._test_files = set(self._test_files_list) - - self._expectations = self.parse_expectations( - self._port.test_platform_name(), - self._options.configuration == 'Debug') - - self._test_files = set(files) - self._test_files_list = files - else: - skip_chunk = skipped - - result_summary = ResultSummary(self._expectations, - self._test_files | skip_chunk) - self._print_expected_results_of_type(result_summary, - test_expectations.PASS, "passes") - self._print_expected_results_of_type(result_summary, - test_expectations.FAIL, "failures") - self._print_expected_results_of_type(result_summary, - test_expectations.FLAKY, "flaky") - self._print_expected_results_of_type(result_summary, - test_expectations.SKIP, "skipped") - - if self._options.force: - self._printer.print_expected('Running all tests, including ' - 'skips (--force)') - else: - # Note that we don't actually run the skipped tests (they were - # subtracted out of self._test_files, above), but we stub out the - # results here so the statistics can remain accurate. - for test in skip_chunk: - result = test_results.TestResult(test, - failures=[], test_run_time=0, total_time_for_all_diffs=0, - time_for_diffs=0) - result.type = test_expectations.SKIP - result_summary.add(result, expected=True) - self._printer.print_expected('') - - return result_summary - - def _get_dir_for_test_file(self, test_file): - """Returns the highest-level directory by which to shard the given - test file.""" - index = test_file.rfind(os.sep + LAYOUT_TESTS_DIRECTORY) - - test_file = test_file[index + len(LAYOUT_TESTS_DIRECTORY):] - test_file_parts = test_file.split(os.sep, 1) - directory = test_file_parts[0] - test_file = test_file_parts[1] - - # The http tests are very stable on mac/linux. - # TODO(ojan): Make the http server on Windows be apache so we can - # turn shard the http tests there as well. Switching to apache is - # what made them stable on linux/mac. - return_value = directory - while ((directory != 'http' or sys.platform in ('darwin', 'linux2')) - and test_file.find(os.sep) >= 0): - test_file_parts = test_file.split(os.sep, 1) - directory = test_file_parts[0] - return_value = os.path.join(return_value, directory) - test_file = test_file_parts[1] - - return return_value - - def _get_test_input_for_file(self, test_file): - """Returns the appropriate TestInput object for the file. Mostly this - is used for looking up the timeout value (in ms) to use for the given - test.""" - if self._test_is_slow(test_file): - return TestInput(test_file, self._options.slow_time_out_ms) - return TestInput(test_file, self._options.time_out_ms) - - def _test_requires_lock(self, test_file): - """Return True if the test needs to be locked when - running multiple copies of NRWTs.""" - split_path = test_file.split(os.sep) - return 'http' in split_path or 'websocket' in split_path - - def _test_is_slow(self, test_file): - return self._expectations.has_modifier(test_file, - test_expectations.SLOW) - - def _shard_tests(self, test_files, use_real_shards): - """Groups tests into batches. - This helps ensure that tests that depend on each other (aka bad tests!) - continue to run together as most cross-tests dependencies tend to - occur within the same directory. If use_real_shards is false, we - put each (non-HTTP/websocket) test into its own shard for maximum - concurrency instead of trying to do any sort of real sharding. - - Return: - A list of lists of TestInput objects. - """ - # FIXME: when we added http locking, we changed how this works such - # that we always lump all of the HTTP threads into a single shard. - # That will slow down experimental-fully-parallel, but it's unclear - # what the best alternative is completely revamping how we track - # when to grab the lock. - - test_lists = [] - tests_to_http_lock = [] - if not use_real_shards: - for test_file in test_files: - test_input = self._get_test_input_for_file(test_file) - if self._test_requires_lock(test_file): - tests_to_http_lock.append(test_input) - else: - test_lists.append((".", [test_input])) - else: - tests_by_dir = {} - for test_file in test_files: - directory = self._get_dir_for_test_file(test_file) - test_input = self._get_test_input_for_file(test_file) - if self._test_requires_lock(test_file): - tests_to_http_lock.append(test_input) - else: - tests_by_dir.setdefault(directory, []) - tests_by_dir[directory].append(test_input) - # Sort by the number of tests in the dir so that the ones with the - # most tests get run first in order to maximize parallelization. - # Number of tests is a good enough, but not perfect, approximation - # of how long that set of tests will take to run. We can't just use - # a PriorityQueue until we move to Python 2.6. - for directory in tests_by_dir: - test_list = tests_by_dir[directory] - # Keep the tests in alphabetical order. - # FIXME: Remove once tests are fixed so they can be run in any - # order. - test_list.reverse() - test_list_tuple = (directory, test_list) - test_lists.append(test_list_tuple) - test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1]))) - - # Put the http tests first. There are only a couple hundred of them, - # but each http test takes a very long time to run, so sorting by the - # number of tests doesn't accurately capture how long they take to run. - if tests_to_http_lock: - tests_to_http_lock.reverse() - test_lists.insert(0, ("tests_to_http_lock", tests_to_http_lock)) - - return test_lists - - def _contains_tests(self, subdir): - for test_file in self._test_files: - if test_file.find(subdir) >= 0: - return True - return False - - def _num_workers(self): - return int(self._options.child_processes) - - def _run_tests(self, file_list, result_summary): - """Runs the tests in the file_list. - - Return: A tuple (keyboard_interrupted, thread_timings, test_timings, - individual_test_timings) - keyboard_interrupted is whether someone typed Ctrl^C - thread_timings is a list of dicts with the total runtime - of each thread with 'name', 'num_tests', 'total_time' properties - test_timings is a list of timings for each sharded subdirectory - of the form [time, directory_name, num_tests] - individual_test_timings is a list of run times for each test - in the form {filename:filename, test_run_time:test_run_time} - result_summary: summary object to populate with the results - """ - - self._printer.print_update('Sharding tests ...') - num_workers = self._num_workers() - test_lists = self._shard_tests(file_list, - num_workers > 1 and not self._options.experimental_fully_parallel) - filename_queue = Queue.Queue() - for item in test_lists: - filename_queue.put(item) - - self._printer.print_update('Starting %s ...' % - grammar.pluralize('worker', num_workers)) - message_broker = self._message_broker - self._current_filename_queue = filename_queue - self._current_result_summary = result_summary - threads = message_broker.start_workers(self) - - self._printer.print_update("Starting testing ...") - keyboard_interrupted = False - try: - message_broker.run_message_loop() - except KeyboardInterrupt: - _log.info("Interrupted, exiting") - message_broker.cancel_workers() - keyboard_interrupted = True - except: - # Unexpected exception; don't try to clean up workers. - _log.info("Exception raised, exiting") - raise - - thread_timings, test_timings, individual_test_timings = \ - self._collect_timing_info(threads) - - return (keyboard_interrupted, thread_timings, test_timings, - individual_test_timings) - - def update(self): - self.update_summary(self._current_result_summary) - - def _collect_timing_info(self, threads): - test_timings = {} - individual_test_timings = [] - thread_timings = [] - - for thread in threads: - thread_timings.append({'name': thread.getName(), - 'num_tests': thread.get_num_tests(), - 'total_time': thread.get_total_time()}) - test_timings.update(thread.get_test_group_timing_stats()) - individual_test_timings.extend(thread.get_test_results()) - - return (thread_timings, test_timings, individual_test_timings) - - def needs_http(self): - """Returns whether the test runner needs an HTTP server.""" - return self._contains_tests(self.HTTP_SUBDIR) - - def needs_websocket(self): - """Returns whether the test runner needs a WEBSOCKET server.""" - return self._contains_tests(self.WEBSOCKET_SUBDIR) - - def set_up_run(self): - """Configures the system to be ready to run tests. - - Returns a ResultSummary object if we should continue to run tests, - or None if we should abort. - - """ - # This must be started before we check the system dependencies, - # since the helper may do things to make the setup correct. - self._printer.print_update("Starting helper ...") - self._port.start_helper() - - # Check that the system dependencies (themes, fonts, ...) are correct. - if not self._options.nocheck_sys_deps: - self._printer.print_update("Checking system dependencies ...") - if not self._port.check_sys_deps(self.needs_http()): - self._port.stop_helper() - return None - - if self._options.clobber_old_results: - self._clobber_old_results() - - # Create the output directory if it doesn't already exist. - self._port.maybe_make_directory(self._options.results_directory) - - self._port.setup_test_run() - - self._printer.print_update("Preparing tests ...") - result_summary = self.prepare_lists_and_print_output() - if not result_summary: - return None - - return result_summary - - def run(self, result_summary): - """Run all our tests on all our test files. - - For each test file, we run each test type. If there are any failures, - we collect them for reporting. - - Args: - result_summary: a summary object tracking the test results. - - Return: - The number of unexpected results (0 == success) - """ - # gather_test_files() must have been called first to initialize us. - # If we didn't find any files to test, we've errored out already in - # prepare_lists_and_print_output(). - assert(len(self._test_files)) - - start_time = time.time() - - keyboard_interrupted, thread_timings, test_timings, \ - individual_test_timings = ( - self._run_tests(self._test_files_list, result_summary)) - - # We exclude the crashes from the list of results to retry, because - # we want to treat even a potentially flaky crash as an error. - failures = self._get_failures(result_summary, include_crashes=False) - retry_summary = result_summary - while (len(failures) and self._options.retry_failures and - not self._retrying and not keyboard_interrupted): - _log.info('') - _log.info("Retrying %d unexpected failure(s) ..." % len(failures)) - _log.info('') - self._retrying = True - retry_summary = ResultSummary(self._expectations, failures.keys()) - # Note that we intentionally ignore the return value here. - self._run_tests(failures.keys(), retry_summary) - failures = self._get_failures(retry_summary, include_crashes=True) - - end_time = time.time() - - self._print_timing_statistics(end_time - start_time, - thread_timings, test_timings, - individual_test_timings, - result_summary) - - self._print_result_summary(result_summary) - - sys.stdout.flush() - sys.stderr.flush() - - self._printer.print_one_line_summary(result_summary.total, - result_summary.expected, - result_summary.unexpected) - - unexpected_results = summarize_unexpected_results(self._port, - self._expectations, result_summary, retry_summary) - self._printer.print_unexpected_results(unexpected_results) - - if self._options.record_results: - # Write the same data to log files and upload generated JSON files - # to appengine server. - self._upload_json_files(unexpected_results, result_summary, - individual_test_timings) - - # Write the summary to disk (results.html) and display it if requested. - wrote_results = self._write_results_html_file(result_summary) - if self._options.show_results and wrote_results: - self._show_results_html_file() - - # Now that we've completed all the processing we can, we re-raise - # a KeyboardInterrupt if necessary so the caller can handle it. - if keyboard_interrupted: - raise KeyboardInterrupt - - # Ignore flaky failures and unexpected passes so we don't turn the - # bot red for those. - return unexpected_results['num_regressions'] - - def clean_up_run(self): - """Restores the system after we're done running tests.""" - - _log.debug("flushing stdout") - sys.stdout.flush() - _log.debug("flushing stderr") - sys.stderr.flush() - _log.debug("stopping helper") - self._port.stop_helper() - - def update_summary(self, result_summary): - """Update the summary and print results with any completed tests.""" - while True: - try: - result = test_results.TestResult.loads(self._result_queue.get_nowait()) - except Queue.Empty: - return - - expected = self._expectations.matches_an_expected_result( - result.filename, result.type, self._options.pixel_tests) - result_summary.add(result, expected) - exp_str = self._expectations.get_expectations_string( - result.filename) - got_str = self._expectations.expectation_to_string(result.type) - self._printer.print_test_result(result, expected, exp_str, got_str) - self._printer.print_progress(result_summary, self._retrying, - self._test_files_list) - - def _clobber_old_results(self): - # Just clobber the actual test results directories since the other - # files in the results directory are explicitly used for cross-run - # tracking. - self._printer.print_update("Clobbering old results in %s" % - self._options.results_directory) - layout_tests_dir = self._port.layout_tests_dir() - possible_dirs = self._port.test_dirs() - for dirname in possible_dirs: - if os.path.isdir(os.path.join(layout_tests_dir, dirname)): - shutil.rmtree(os.path.join(self._options.results_directory, - dirname), - ignore_errors=True) - - def _get_failures(self, result_summary, include_crashes): - """Filters a dict of results and returns only the failures. - - Args: - result_summary: the results of the test run - include_crashes: whether crashes are included in the output. - We use False when finding the list of failures to retry - to see if the results were flaky. Although the crashes may also be - flaky, we treat them as if they aren't so that they're not ignored. - Returns: - a dict of files -> results - """ - failed_results = {} - for test, result in result_summary.unexpected_results.iteritems(): - if (result == test_expectations.PASS or - result == test_expectations.CRASH and not include_crashes): - continue - failed_results[test] = result - - return failed_results - - def _upload_json_files(self, unexpected_results, result_summary, - individual_test_timings): - """Writes the results of the test run as JSON files into the results - dir and upload the files to the appengine server. - - There are three different files written into the results dir: - unexpected_results.json: A short list of any unexpected results. - This is used by the buildbots to display results. - expectations.json: This is used by the flakiness dashboard. - results.json: A full list of the results - used by the flakiness - dashboard and the aggregate results dashboard. - - Args: - unexpected_results: dict of unexpected results - result_summary: full summary object - individual_test_timings: list of test times (used by the flakiness - dashboard). - """ - results_directory = self._options.results_directory - _log.debug("Writing JSON files in %s." % results_directory) - unexpected_json_path = os.path.join(results_directory, "unexpected_results.json") - with codecs.open(unexpected_json_path, "w", "utf-8") as file: - simplejson.dump(unexpected_results, file, sort_keys=True, indent=2) - - # Write a json file of the test_expectations.txt file for the layout - # tests dashboard. - expectations_path = os.path.join(results_directory, "expectations.json") - expectations_json = \ - self._expectations.get_expectations_json_for_all_platforms() - with codecs.open(expectations_path, "w", "utf-8") as file: - file.write(u"ADD_EXPECTATIONS(%s);" % expectations_json) - - generator = json_layout_results_generator.JSONLayoutResultsGenerator( - self._port, self._options.builder_name, self._options.build_name, - self._options.build_number, self._options.results_directory, - BUILDER_BASE_URL, individual_test_timings, - self._expectations, result_summary, self._test_files_list, - not self._options.upload_full_results, - self._options.test_results_server, - "layout-tests", - self._options.master_name) - - _log.debug("Finished writing JSON files.") - - json_files = ["expectations.json"] - if self._options.upload_full_results: - json_files.append("results.json") - else: - json_files.append("incremental_results.json") - - generator.upload_json_files(json_files) - - def _print_config(self): - """Prints the configuration for the test run.""" - p = self._printer - p.print_config("Using port '%s'" % self._port.name()) - p.print_config("Placing test results in %s" % - self._options.results_directory) - if self._options.new_baseline: - p.print_config("Placing new baselines in %s" % - self._port.baseline_path()) - p.print_config("Using %s build" % self._options.configuration) - if self._options.pixel_tests: - p.print_config("Pixel tests enabled") - else: - p.print_config("Pixel tests disabled") - - p.print_config("Regular timeout: %s, slow test timeout: %s" % - (self._options.time_out_ms, - self._options.slow_time_out_ms)) - - if self._num_workers() == 1: - p.print_config("Running one %s" % self._port.driver_name()) - else: - p.print_config("Running %s %ss in parallel" % - (self._options.child_processes, - self._port.driver_name())) - p.print_config('Command line: ' + - ' '.join(self._port.driver_cmd_line())) - p.print_config("Worker model: %s" % self._options.worker_model) - p.print_config("") - - def _print_expected_results_of_type(self, result_summary, - result_type, result_type_str): - """Print the number of the tests in a given result class. - - Args: - result_summary - the object containing all the results to report on - result_type - the particular result type to report in the summary. - result_type_str - a string description of the result_type. - """ - tests = self._expectations.get_tests_with_result_type(result_type) - now = result_summary.tests_by_timeline[test_expectations.NOW] - wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] - - # We use a fancy format string in order to print the data out in a - # nicely-aligned table. - fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" - % (self._num_digits(now), self._num_digits(wontfix))) - self._printer.print_expected(fmtstr % - (len(tests), result_type_str, len(tests & now), len(tests & wontfix))) - - def _num_digits(self, num): - """Returns the number of digits needed to represent the length of a - sequence.""" - ndigits = 1 - if len(num): - ndigits = int(math.log10(len(num))) + 1 - return ndigits - - def _print_timing_statistics(self, total_time, thread_timings, - directory_test_timings, individual_test_timings, - result_summary): - """Record timing-specific information for the test run. - - Args: - total_time: total elapsed time (in seconds) for the test run - thread_timings: wall clock time each thread ran for - directory_test_timings: timing by directory - individual_test_timings: timing by file - result_summary: summary object for the test run - """ - self._printer.print_timing("Test timing:") - self._printer.print_timing(" %6.2f total testing time" % total_time) - self._printer.print_timing("") - self._printer.print_timing("Thread timing:") - cuml_time = 0 - for t in thread_timings: - self._printer.print_timing(" %10s: %5d tests, %6.2f secs" % - (t['name'], t['num_tests'], t['total_time'])) - cuml_time += t['total_time'] - self._printer.print_timing(" %6.2f cumulative, %6.2f optimal" % - (cuml_time, cuml_time / int(self._options.child_processes))) - self._printer.print_timing("") - - self._print_aggregate_test_statistics(individual_test_timings) - self._print_individual_test_times(individual_test_timings, - result_summary) - self._print_directory_timings(directory_test_timings) - - def _print_aggregate_test_statistics(self, individual_test_timings): - """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. - Args: - individual_test_timings: List of dump_render_tree_thread.TestStats - for all tests. - """ - test_types = [] # Unit tests don't actually produce any timings. - if individual_test_timings: - test_types = individual_test_timings[0].time_for_diffs.keys() - times_for_dump_render_tree = [] - times_for_diff_processing = [] - times_per_test_type = {} - for test_type in test_types: - times_per_test_type[test_type] = [] - - for test_stats in individual_test_timings: - times_for_dump_render_tree.append(test_stats.test_run_time) - times_for_diff_processing.append( - test_stats.total_time_for_all_diffs) - time_for_diffs = test_stats.time_for_diffs - for test_type in test_types: - times_per_test_type[test_type].append( - time_for_diffs[test_type]) - - self._print_statistics_for_test_timings( - "PER TEST TIME IN TESTSHELL (seconds):", - times_for_dump_render_tree) - self._print_statistics_for_test_timings( - "PER TEST DIFF PROCESSING TIMES (seconds):", - times_for_diff_processing) - for test_type in test_types: - self._print_statistics_for_test_timings( - "PER TEST TIMES BY TEST TYPE: %s" % test_type, - times_per_test_type[test_type]) - - def _print_individual_test_times(self, individual_test_timings, - result_summary): - """Prints the run times for slow, timeout and crash tests. - Args: - individual_test_timings: List of dump_render_tree_thread.TestStats - for all tests. - result_summary: summary object for test run - """ - # Reverse-sort by the time spent in DumpRenderTree. - individual_test_timings.sort(lambda a, b: - cmp(b.test_run_time, a.test_run_time)) - - num_printed = 0 - slow_tests = [] - timeout_or_crash_tests = [] - unexpected_slow_tests = [] - for test_tuple in individual_test_timings: - filename = test_tuple.filename - is_timeout_crash_or_slow = False - if self._test_is_slow(filename): - is_timeout_crash_or_slow = True - slow_tests.append(test_tuple) - - if filename in result_summary.failures: - result = result_summary.results[filename].type - if (result == test_expectations.TIMEOUT or - result == test_expectations.CRASH): - is_timeout_crash_or_slow = True - timeout_or_crash_tests.append(test_tuple) - - if (not is_timeout_crash_or_slow and - num_printed < printing.NUM_SLOW_TESTS_TO_LOG): - num_printed = num_printed + 1 - unexpected_slow_tests.append(test_tuple) - - self._printer.print_timing("") - self._print_test_list_timing("%s slowest tests that are not " - "marked as SLOW and did not timeout/crash:" % - printing.NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests) - self._printer.print_timing("") - self._print_test_list_timing("Tests marked as SLOW:", slow_tests) - self._printer.print_timing("") - self._print_test_list_timing("Tests that timed out or crashed:", - timeout_or_crash_tests) - self._printer.print_timing("") - - def _print_test_list_timing(self, title, test_list): - """Print timing info for each test. - - Args: - title: section heading - test_list: tests that fall in this section - """ - if self._printer.disabled('slowest'): - return - - self._printer.print_timing(title) - for test_tuple in test_list: - filename = test_tuple.filename[len( - self._port.layout_tests_dir()) + 1:] - filename = filename.replace('\\', '/') - test_run_time = round(test_tuple.test_run_time, 1) - self._printer.print_timing(" %s took %s seconds" % - (filename, test_run_time)) - - def _print_directory_timings(self, directory_test_timings): - """Print timing info by directory for any directories that - take > 10 seconds to run. - - Args: - directory_test_timing: time info for each directory - """ - timings = [] - for directory in directory_test_timings: - num_tests, time_for_directory = directory_test_timings[directory] - timings.append((round(time_for_directory, 1), directory, - num_tests)) - timings.sort() - - self._printer.print_timing("Time to process slowest subdirectories:") - min_seconds_to_print = 10 - for timing in timings: - if timing[0] > min_seconds_to_print: - self._printer.print_timing( - " %s took %s seconds to run %s tests." % (timing[1], - timing[0], timing[2])) - self._printer.print_timing("") - - def _print_statistics_for_test_timings(self, title, timings): - """Prints the median, mean and standard deviation of the values in - timings. - - Args: - title: Title for these timings. - timings: A list of floats representing times. - """ - self._printer.print_timing(title) - timings.sort() - - num_tests = len(timings) - if not num_tests: - return - percentile90 = timings[int(.9 * num_tests)] - percentile99 = timings[int(.99 * num_tests)] - - if num_tests % 2 == 1: - median = timings[((num_tests - 1) / 2) - 1] - else: - lower = timings[num_tests / 2 - 1] - upper = timings[num_tests / 2] - median = (float(lower + upper)) / 2 - - mean = sum(timings) / num_tests - - for time in timings: - sum_of_deviations = math.pow(time - mean, 2) - - std_deviation = math.sqrt(sum_of_deviations / num_tests) - self._printer.print_timing(" Median: %6.3f" % median) - self._printer.print_timing(" Mean: %6.3f" % mean) - self._printer.print_timing(" 90th percentile: %6.3f" % percentile90) - self._printer.print_timing(" 99th percentile: %6.3f" % percentile99) - self._printer.print_timing(" Standard dev: %6.3f" % std_deviation) - self._printer.print_timing("") - - def _print_result_summary(self, result_summary): - """Print a short summary about how many tests passed. - - Args: - result_summary: information to log - """ - failed = len(result_summary.failures) - skipped = len( - result_summary.tests_by_expectation[test_expectations.SKIP]) - total = result_summary.total - passed = total - failed - skipped - pct_passed = 0.0 - if total > 0: - pct_passed = float(passed) * 100 / total - - self._printer.print_actual("") - self._printer.print_actual("=> Results: %d/%d tests passed (%.1f%%)" % - (passed, total, pct_passed)) - self._printer.print_actual("") - self._print_result_summary_entry(result_summary, - test_expectations.NOW, "Tests to be fixed") - - self._printer.print_actual("") - self._print_result_summary_entry(result_summary, - test_expectations.WONTFIX, - "Tests that will only be fixed if they crash (WONTFIX)") - self._printer.print_actual("") - - def _print_result_summary_entry(self, result_summary, timeline, - heading): - """Print a summary block of results for a particular timeline of test. - - Args: - result_summary: summary to print results for - timeline: the timeline to print results for (NOT, WONTFIX, etc.) - heading: a textual description of the timeline - """ - total = len(result_summary.tests_by_timeline[timeline]) - not_passing = (total - - len(result_summary.tests_by_expectation[test_expectations.PASS] & - result_summary.tests_by_timeline[timeline])) - self._printer.print_actual("=> %s (%d):" % (heading, not_passing)) - - for result in TestExpectationsFile.EXPECTATION_ORDER: - if result == test_expectations.PASS: - continue - results = (result_summary.tests_by_expectation[result] & - result_summary.tests_by_timeline[timeline]) - desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result] - if not_passing and len(results): - pct = len(results) * 100.0 / not_passing - self._printer.print_actual(" %5d %-24s (%4.1f%%)" % - (len(results), desc[len(results) != 1], pct)) - - def _results_html(self, test_files, failures, title="Test Failures", override_time=None): - """ - test_files = a list of file paths - failures = dictionary mapping test paths to failure objects - title = title printed at top of test - override_time = current time (used by unit tests) - """ - page = """<html> - <head> - <title>Layout Test Results (%(time)s)</title> - </head> - <body> - <h2>%(title)s (%(time)s)</h2> - """ % {'title': title, 'time': override_time or time.asctime()} - - for test_file in sorted(test_files): - test_name = self._port.relative_test_filename(test_file) - test_url = self._port.filename_to_uri(test_file) - page += u"<p><a href='%s'>%s</a><br />\n" % (test_url, test_name) - test_failures = failures.get(test_file, []) - for failure in test_failures: - page += (u" %s<br/>" % - failure.result_html_output(test_name)) - page += "</p>\n" - page += "</body></html>\n" - return page - - def _write_results_html_file(self, result_summary): - """Write results.html which is a summary of tests that failed. - - Args: - result_summary: a summary of the results :) - - Returns: - True if any results were written (since expected failures may be - omitted) - """ - # test failures - if self._options.full_results_html: - results_title = "Test Failures" - test_files = result_summary.failures.keys() - else: - results_title = "Unexpected Test Failures" - unexpected_failures = self._get_failures(result_summary, - include_crashes=True) - test_files = unexpected_failures.keys() - if not len(test_files): - return False - - out_filename = os.path.join(self._options.results_directory, - "results.html") - with codecs.open(out_filename, "w", "utf-8") as results_file: - html = self._results_html(test_files, result_summary.failures, results_title) - results_file.write(html) - - return True - - def _show_results_html_file(self): - """Shows the results.html page.""" - results_filename = os.path.join(self._options.results_directory, - "results.html") - self._port.show_results_html_file(results_filename) - - -def read_test_files(files): - tests = [] - for file in files: - try: - with codecs.open(file, 'r', 'utf-8') as file_contents: - # FIXME: This could be cleaner using a list comprehension. - for line in file_contents: - line = test_expectations.strip_comments(line) - if line: - tests.append(line) - except IOError, e: - if e.errno == errno.ENOENT: - _log.critical('') - _log.critical('--test-list file "%s" not found' % file) - raise - return tests - - -def run(port, options, args, regular_output=sys.stderr, - buildbot_output=sys.stdout): - """Run the tests. - - Args: - port: Port object for port-specific behavior - options: a dictionary of command line options - args: a list of sub directories or files to test - regular_output: a stream-like object that we can send logging/debug - output to - buildbot_output: a stream-like object that we can write all output that - is intended to be parsed by the buildbot to - Returns: - the number of unexpected results that occurred, or -1 if there is an - error. - - """ - _set_up_derived_options(port, options) - - printer = printing.Printer(port, options, regular_output, buildbot_output, - int(options.child_processes), options.experimental_fully_parallel) - if options.help_printing: - printer.help_printing() - printer.cleanup() - return 0 - - last_unexpected_results = _gather_unexpected_results(options) - if options.print_last_failures: - printer.write("\n".join(last_unexpected_results) + "\n") - printer.cleanup() - return 0 - - broker = message_broker.get(port, options) - - # We wrap any parts of the run that are slow or likely to raise exceptions - # in a try/finally to ensure that we clean up the logging configuration. - num_unexpected_results = -1 - try: - test_runner = TestRunner(port, options, printer, broker) - test_runner._print_config() - - printer.print_update("Collecting tests ...") - try: - test_runner.collect_tests(args, last_unexpected_results) - except IOError, e: - if e.errno == errno.ENOENT: - return -1 - raise - - printer.print_update("Parsing expectations ...") - if options.lint_test_files: - return test_runner.lint() - test_runner.parse_expectations(port.test_platform_name(), - options.configuration == 'Debug') - - printer.print_update("Checking build ...") - if not port.check_build(test_runner.needs_http()): - _log.error("Build check failed") - return -1 - - result_summary = test_runner.set_up_run() - if result_summary: - num_unexpected_results = test_runner.run(result_summary) - test_runner.clean_up_run() - _log.debug("Testing completed, Exit status: %d" % - num_unexpected_results) - finally: - broker.cleanup() - printer.cleanup() - - return num_unexpected_results - - -def _set_up_derived_options(port_obj, options): - """Sets the options values that depend on other options values.""" - - if options.worker_model == 'inline': - if options.child_processes and int(options.child_processes) > 1: - _log.warning("--worker-model=inline overrides --child-processes") - options.child_processes = "1" - if not options.child_processes: - options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES", - str(port_obj.default_child_processes())) - - if not options.configuration: - options.configuration = port_obj.default_configuration() - - if options.pixel_tests is None: - options.pixel_tests = True - - if not options.use_apache: - options.use_apache = sys.platform in ('darwin', 'linux2') - - if not os.path.isabs(options.results_directory): - # This normalizes the path to the build dir. - # FIXME: how this happens is not at all obvious; this is a dumb - # interface and should be cleaned up. - options.results_directory = port_obj.results_directory() - - if not options.time_out_ms: - if options.configuration == "Debug": - options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS) - else: - options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS) - - options.slow_time_out_ms = str(5 * int(options.time_out_ms)) - - -def _gather_unexpected_results(options): - """Returns the unexpected results from the previous run, if any.""" - last_unexpected_results = [] - if options.print_last_failures or options.retest_last_failures: - unexpected_results_filename = os.path.join( - options.results_directory, "unexpected_results.json") - with codecs.open(unexpected_results_filename, "r", "utf-8") as file: - results = simplejson.load(file) - last_unexpected_results = results['tests'].keys() - return last_unexpected_results - - -def _compat_shim_callback(option, opt_str, value, parser): - print "Ignoring unsupported option: %s" % opt_str - - -def _compat_shim_option(option_name, **kwargs): - return optparse.make_option(option_name, action="callback", - callback=_compat_shim_callback, - help="Ignored, for old-run-webkit-tests compat only.", **kwargs) - - -def parse_args(args=None): - """Provides a default set of command line args. - - Returns a tuple of options, args from optparse""" - - # FIXME: All of these options should be stored closer to the code which - # FIXME: actually uses them. configuration_options should move - # FIXME: to WebKitPort and be shared across all scripts. - configuration_options = [ - optparse.make_option("-t", "--target", dest="configuration", - help="(DEPRECATED)"), - # FIXME: --help should display which configuration is default. - optparse.make_option('--debug', action='store_const', const='Debug', - dest="configuration", - help='Set the configuration to Debug'), - optparse.make_option('--release', action='store_const', - const='Release', dest="configuration", - help='Set the configuration to Release'), - # old-run-webkit-tests also accepts -c, --configuration CONFIGURATION. - ] - - print_options = printing.print_options() - - # FIXME: These options should move onto the ChromiumPort. - chromium_options = [ - optparse.make_option("--chromium", action="store_true", default=False, - help="use the Chromium port"), - optparse.make_option("--startup-dialog", action="store_true", - default=False, help="create a dialog on DumpRenderTree startup"), - optparse.make_option("--gp-fault-error-box", action="store_true", - default=False, help="enable Windows GP fault error box"), - optparse.make_option("--multiple-loads", - type="int", help="turn on multiple loads of each test"), - optparse.make_option("--js-flags", - type="string", help="JavaScript flags to pass to tests"), - optparse.make_option("--nocheck-sys-deps", action="store_true", - default=False, - help="Don't check the system dependencies (themes)"), - optparse.make_option("--use-drt", action="store_true", - default=None, - help="Use DumpRenderTree instead of test_shell"), - optparse.make_option("--accelerated-compositing", - action="store_true", - help="Use hardware-accelated compositing for rendering"), - optparse.make_option("--no-accelerated-compositing", - action="store_false", - dest="accelerated_compositing", - help="Don't use hardware-accelerated compositing for rendering"), - optparse.make_option("--accelerated-2d-canvas", - action="store_true", - help="Use hardware-accelerated 2D Canvas calls"), - optparse.make_option("--no-accelerated-2d-canvas", - action="store_false", - dest="accelerated_2d_canvas", - help="Don't use hardware-accelerated 2D Canvas calls"), - ] - - # Missing Mac-specific old-run-webkit-tests options: - # FIXME: Need: -g, --guard for guard malloc support on Mac. - # FIXME: Need: -l --leaks Enable leaks checking. - # FIXME: Need: --sample-on-timeout Run sample on timeout - - old_run_webkit_tests_compat = [ - # NRWT doesn't generate results by default anyway. - _compat_shim_option("--no-new-test-results"), - # NRWT doesn't sample on timeout yet anyway. - _compat_shim_option("--no-sample-on-timeout"), - # FIXME: NRWT needs to support remote links eventually. - _compat_shim_option("--use-remote-links-to-tests"), - # FIXME: NRWT doesn't need this option as much since failures are - # designed to be cheap. We eventually plan to add this support. - _compat_shim_option("--exit-after-n-failures", nargs=1, type="int"), - ] - - results_options = [ - # NEED for bots: --use-remote-links-to-tests Link to test files - # within the SVN repository in the results. - optparse.make_option("-p", "--pixel-tests", action="store_true", - dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"), - optparse.make_option("--no-pixel-tests", action="store_false", - dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"), - optparse.make_option("--tolerance", - help="Ignore image differences less than this percentage (some " - "ports may ignore this option)", type="float"), - optparse.make_option("--results-directory", - default="layout-test-results", - help="Output results directory source dir, relative to Debug or " - "Release"), - optparse.make_option("--new-baseline", action="store_true", - default=False, help="Save all generated results as new baselines " - "into the platform directory, overwriting whatever's " - "already there."), - optparse.make_option("--reset-results", action="store_true", - default=False, help="Reset any existing baselines to the " - "generated results"), - optparse.make_option("--no-show-results", action="store_false", - default=True, dest="show_results", - help="Don't launch a browser with results after the tests " - "are done"), - # FIXME: We should have a helper function to do this sort of - # deprectated mapping and automatically log, etc. - optparse.make_option("--noshow-results", action="store_false", - dest="show_results", - help="Deprecated, same as --no-show-results."), - optparse.make_option("--no-launch-safari", action="store_false", - dest="show_results", - help="old-run-webkit-tests compat, same as --noshow-results."), - # old-run-webkit-tests: - # --[no-]launch-safari Launch (or do not launch) Safari to display - # test results (default: launch) - optparse.make_option("--full-results-html", action="store_true", - default=False, - help="Show all failures in results.html, rather than only " - "regressions"), - optparse.make_option("--clobber-old-results", action="store_true", - default=False, help="Clobbers test results from previous runs."), - optparse.make_option("--platform", - help="Override the platform for expected results"), - optparse.make_option("--no-record-results", action="store_false", - default=True, dest="record_results", - help="Don't record the results."), - # old-run-webkit-tests also has HTTP toggle options: - # --[no-]http Run (or do not run) http tests - # (default: run) - ] - - test_options = [ - optparse.make_option("--build", dest="build", - action="store_true", default=True, - help="Check to ensure the DumpRenderTree build is up-to-date " - "(default)."), - optparse.make_option("--no-build", dest="build", - action="store_false", help="Don't check to see if the " - "DumpRenderTree build is up-to-date."), - # old-run-webkit-tests has --valgrind instead of wrapper. - optparse.make_option("--wrapper", - help="wrapper command to insert before invocations of " - "DumpRenderTree; option is split on whitespace before " - "running. (Example: --wrapper='valgrind --smc-check=all')"), - # old-run-webkit-tests: - # -i|--ignore-tests Comma-separated list of directories - # or tests to ignore - optparse.make_option("--test-list", action="append", - help="read list of tests to run from file", metavar="FILE"), - # old-run-webkit-tests uses --skipped==[default|ignore|only] - # instead of --force: - optparse.make_option("--force", action="store_true", default=False, - help="Run all tests, even those marked SKIP in the test list"), - optparse.make_option("--use-apache", action="store_true", - default=False, help="Whether to use apache instead of lighttpd."), - optparse.make_option("--time-out-ms", - help="Set the timeout for each test"), - # old-run-webkit-tests calls --randomize-order --random: - optparse.make_option("--randomize-order", action="store_true", - default=False, help=("Run tests in random order (useful " - "for tracking down corruption)")), - optparse.make_option("--run-chunk", - help=("Run a specified chunk (n:l), the nth of len l, " - "of the layout tests")), - optparse.make_option("--run-part", help=("Run a specified part (n:m), " - "the nth of m parts, of the layout tests")), - # old-run-webkit-tests calls --batch-size: --nthly n - # Restart DumpRenderTree every n tests (default: 1000) - optparse.make_option("--batch-size", - help=("Run a the tests in batches (n), after every n tests, " - "DumpRenderTree is relaunched."), type="int", default=0), - # old-run-webkit-tests calls --run-singly: -1|--singly - # Isolate each test case run (implies --nthly 1 --verbose) - optparse.make_option("--run-singly", action="store_true", - default=False, help="run a separate DumpRenderTree for each test"), - optparse.make_option("--child-processes", - help="Number of DumpRenderTrees to run in parallel."), - # FIXME: Display default number of child processes that will run. - optparse.make_option("--worker-model", action="store", - default="threads", help=("controls worker model. Valid values are " - "'inline' and 'threads' (default).")), - optparse.make_option("--experimental-fully-parallel", - action="store_true", default=False, - help="run all tests in parallel"), - # FIXME: Need --exit-after-n-failures N - # Exit after the first N failures instead of running all tests - # FIXME: Need --exit-after-n-crashes N - # Exit after the first N crashes instead of running all tests - # FIXME: consider: --iterations n - # Number of times to run the set of tests (e.g. ABCABCABC) - optparse.make_option("--print-last-failures", action="store_true", - default=False, help="Print the tests in the last run that " - "had unexpected failures (or passes) and then exit."), - optparse.make_option("--retest-last-failures", action="store_true", - default=False, help="re-test the tests in the last run that " - "had unexpected failures (or passes)."), - optparse.make_option("--retry-failures", action="store_true", - default=True, - help="Re-try any tests that produce unexpected results (default)"), - optparse.make_option("--no-retry-failures", action="store_false", - dest="retry_failures", - help="Don't re-try any tests that produce unexpected results."), - ] - - misc_options = [ - optparse.make_option("--lint-test-files", action="store_true", - default=False, help=("Makes sure the test files parse for all " - "configurations. Does not run any tests.")), - ] - - # FIXME: Move these into json_results_generator.py - results_json_options = [ - optparse.make_option("--master-name", help="The name of the buildbot master."), - optparse.make_option("--builder-name", default="DUMMY_BUILDER_NAME", - help=("The name of the builder shown on the waterfall running " - "this script e.g. WebKit.")), - optparse.make_option("--build-name", default="DUMMY_BUILD_NAME", - help=("The name of the builder used in its path, e.g. " - "webkit-rel.")), - optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER", - help=("The build number of the builder running this script.")), - optparse.make_option("--test-results-server", default="", - help=("If specified, upload results json files to this appengine " - "server.")), - optparse.make_option("--upload-full-results", - action="store_true", - default=False, - help="If true, upload full json results to server."), - ] - - option_list = (configuration_options + print_options + - chromium_options + results_options + test_options + - misc_options + results_json_options + - old_run_webkit_tests_compat) - option_parser = optparse.OptionParser(option_list=option_list) - - return option_parser.parse_args(args) - - -def main(): - options, args = parse_args() - port_obj = port.get(options.platform, options) - return run(port_obj, options, args) - - -if '__main__' == __name__: - try: - sys.exit(main()) - except KeyboardInterrupt: - # this mirrors what the shell normally does - sys.exit(signal.SIGINT + 128) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py deleted file mode 100644 index 20a4ac0..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py +++ /dev/null @@ -1,540 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2010 Google Inc. All rights reserved. -# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Unit tests for run_webkit_tests.""" - -import codecs -import itertools -import logging -import os -import Queue -import shutil -import sys -import tempfile -import thread -import time -import threading -import unittest - -from webkitpy.common import array_stream -from webkitpy.common.system import outputcapture -from webkitpy.common.system import user -from webkitpy.layout_tests import port -from webkitpy.layout_tests import run_webkit_tests -from webkitpy.layout_tests.layout_package import dump_render_tree_thread -from webkitpy.layout_tests.port.test import TestPort, TestDriver -from webkitpy.python24.versioning import compare_version -from webkitpy.test.skip import skip_if - -from webkitpy.thirdparty.mock import Mock - - -class MockUser(): - def __init__(self): - self.url = None - - def open_url(self, url): - self.url = url - - -def passing_run(extra_args=None, port_obj=None, record_results=False, - tests_included=False): - extra_args = extra_args or [] - args = ['--print', 'nothing'] - if not '--platform' in extra_args: - args.extend(['--platform', 'test']) - if not record_results: - args.append('--no-record-results') - if not '--child-processes' in extra_args: - args.extend(['--worker-model', 'inline']) - args.extend(extra_args) - if not tests_included: - # We use the glob to test that globbing works. - args.extend(['passes', - 'http/tests', - 'websocket/tests', - 'failures/expected/*']) - options, parsed_args = run_webkit_tests.parse_args(args) - if not port_obj: - port_obj = port.get(port_name=options.platform, options=options, - user=MockUser()) - res = run_webkit_tests.run(port_obj, options, parsed_args) - return res == 0 - - -def logging_run(extra_args=None, port_obj=None, tests_included=False): - extra_args = extra_args or [] - args = ['--no-record-results'] - if not '--platform' in extra_args: - args.extend(['--platform', 'test']) - if not '--child-processes' in extra_args: - args.extend(['--worker-model', 'inline']) - args.extend(extra_args) - if not tests_included: - args.extend(['passes', - 'http/tests', - 'websocket/tests', - 'failures/expected/*']) - - oc = outputcapture.OutputCapture() - try: - oc.capture_output() - options, parsed_args = run_webkit_tests.parse_args(args) - user = MockUser() - if not port_obj: - port_obj = port.get(port_name=options.platform, options=options, - user=user) - buildbot_output = array_stream.ArrayStream() - regular_output = array_stream.ArrayStream() - res = run_webkit_tests.run(port_obj, options, parsed_args, - buildbot_output=buildbot_output, - regular_output=regular_output) - finally: - oc.restore_output() - return (res, buildbot_output, regular_output, user) - - -def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False): - extra_args = extra_args or [] - args = [ - '--print', 'nothing', - '--platform', 'test', - '--no-record-results', - '--worker-model', 'inline'] - args.extend(extra_args) - if not tests_included: - # Not including http tests since they get run out of order (that - # behavior has its own test, see test_get_test_file_queue) - args.extend(['passes', 'failures']) - options, parsed_args = run_webkit_tests.parse_args(args) - user = MockUser() - - test_batches = [] - - class RecordingTestDriver(TestDriver): - def __init__(self, port, worker_number): - TestDriver.__init__(self, port, worker_number) - self._current_test_batch = None - - def poll(self): - # So that we don't create a new driver for every test - return None - - def stop(self): - self._current_test_batch = None - - def run_test(self, test_input): - if self._current_test_batch is None: - self._current_test_batch = [] - test_batches.append(self._current_test_batch) - test_name = self._port.relative_test_filename(test_input.filename) - self._current_test_batch.append(test_name) - return TestDriver.run_test(self, test_input) - - class RecordingTestPort(TestPort): - def create_driver(self, worker_number): - return RecordingTestDriver(self, worker_number) - - recording_port = RecordingTestPort(options=options, user=user) - logging_run(extra_args=args, port_obj=recording_port, tests_included=True) - - if flatten_batches: - return list(itertools.chain(*test_batches)) - - return test_batches - -class MainTest(unittest.TestCase): - def test_accelerated_compositing(self): - # This just tests that we recognize the command line args - self.assertTrue(passing_run(['--accelerated-compositing'])) - self.assertTrue(passing_run(['--no-accelerated-compositing'])) - - def test_accelerated_2d_canvas(self): - # This just tests that we recognize the command line args - self.assertTrue(passing_run(['--accelerated-2d-canvas'])) - self.assertTrue(passing_run(['--no-accelerated-2d-canvas'])) - - def test_basic(self): - self.assertTrue(passing_run()) - - def test_batch_size(self): - batch_tests_run = get_tests_run(['--batch-size', '2']) - for batch in batch_tests_run: - self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch)) - - def test_child_process_1(self): - (res, buildbot_output, regular_output, user) = logging_run( - ['--print', 'config', '--child-processes', '1']) - self.assertTrue('Running one DumpRenderTree\n' - in regular_output.get()) - - def test_child_processes_2(self): - (res, buildbot_output, regular_output, user) = logging_run( - ['--print', 'config', '--child-processes', '2']) - self.assertTrue('Running 2 DumpRenderTrees in parallel\n' - in regular_output.get()) - - def test_exception_raised(self): - self.assertRaises(ValueError, logging_run, - ['failures/expected/exception.html'], tests_included=True) - - def test_full_results_html(self): - # FIXME: verify html? - self.assertTrue(passing_run(['--full-results-html'])) - - def test_help_printing(self): - res, out, err, user = logging_run(['--help-printing']) - self.assertEqual(res, 0) - self.assertTrue(out.empty()) - self.assertFalse(err.empty()) - - def test_hung_thread(self): - res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50', - 'failures/expected/hang.html'], - tests_included=True) - self.assertEqual(res, 0) - self.assertFalse(out.empty()) - self.assertFalse(err.empty()) - - def test_keyboard_interrupt(self): - # Note that this also tests running a test marked as SKIP if - # you specify it explicitly. - self.assertRaises(KeyboardInterrupt, logging_run, - ['failures/expected/keyboard.html'], tests_included=True) - - def test_last_results(self): - passing_run(['--clobber-old-results'], record_results=True) - (res, buildbot_output, regular_output, user) = logging_run( - ['--print-last-failures']) - self.assertEqual(regular_output.get(), ['\n\n']) - self.assertEqual(buildbot_output.get(), []) - - def test_lint_test_files(self): - # FIXME: add errors? - res, out, err, user = logging_run(['--lint-test-files'], - tests_included=True) - self.assertEqual(res, 0) - self.assertTrue(out.empty()) - self.assertTrue(any(['lint succeeded' in msg for msg in err.get()])) - - def test_no_tests_found(self): - res, out, err, user = logging_run(['resources'], tests_included=True) - self.assertEqual(res, -1) - self.assertTrue(out.empty()) - self.assertTrue('No tests to run.\n' in err.get()) - - def test_no_tests_found_2(self): - res, out, err, user = logging_run(['foo'], tests_included=True) - self.assertEqual(res, -1) - self.assertTrue(out.empty()) - self.assertTrue('No tests to run.\n' in err.get()) - - def test_randomize_order(self): - # FIXME: verify order was shuffled - self.assertTrue(passing_run(['--randomize-order'])) - - def test_run_chunk(self): - # Test that we actually select the right chunk - all_tests_run = get_tests_run(flatten_batches=True) - chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True) - self.assertEquals(all_tests_run[4:8], chunk_tests_run) - - # Test that we wrap around if the number of tests is not evenly divisible by the chunk size - tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html'] - chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True) - self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run) - - def test_run_force(self): - # This raises an exception because we run - # failures/expected/exception.html, which is normally SKIPped. - self.assertRaises(ValueError, logging_run, ['--force']) - - def test_run_part(self): - # Test that we actually select the right part - tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html'] - tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True) - self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run) - - # Test that we wrap around if the number of tests is not evenly divisible by the chunk size - # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the - # last part repeats the first two tests). - chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True) - self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run) - - def test_run_singly(self): - batch_tests_run = get_tests_run(['--run-singly']) - for batch in batch_tests_run: - self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch)) - - def test_single_file(self): - tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True) - self.assertEquals(['passes/text.html'], tests_run) - - def test_test_list(self): - filename = tempfile.mktemp() - tmpfile = file(filename, mode='w+') - tmpfile.write('passes/text.html') - tmpfile.close() - tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True) - self.assertEquals(['passes/text.html'], tests_run) - os.remove(filename) - res, out, err, user = logging_run(['--test-list=%s' % filename], - tests_included=True) - self.assertEqual(res, -1) - self.assertFalse(err.empty()) - - def test_unexpected_failures(self): - # Run tests including the unexpected failures. - self._url_opened = None - res, out, err, user = logging_run(tests_included=True) - self.assertEqual(res, 1) - self.assertFalse(out.empty()) - self.assertFalse(err.empty()) - self.assertEqual(user.url, '/tmp/layout-test-results/results.html') - - def test_results_directory_absolute(self): - # We run a configuration that should fail, to generate output, then - # look for what the output results url was. - - tmpdir = tempfile.mkdtemp() - res, out, err, user = logging_run(['--results-directory=' + tmpdir], - tests_included=True) - self.assertEqual(user.url, os.path.join(tmpdir, 'results.html')) - shutil.rmtree(tmpdir, ignore_errors=True) - - def test_results_directory_default(self): - # We run a configuration that should fail, to generate output, then - # look for what the output results url was. - - # This is the default location. - res, out, err, user = logging_run(tests_included=True) - self.assertEqual(user.url, '/tmp/layout-test-results/results.html') - - def test_results_directory_relative(self): - # We run a configuration that should fail, to generate output, then - # look for what the output results url was. - - res, out, err, user = logging_run(['--results-directory=foo'], - tests_included=True) - self.assertEqual(user.url, '/tmp/foo/results.html') - - def test_tolerance(self): - class ImageDiffTestPort(TestPort): - def diff_image(self, expected_contents, actual_contents, - diff_filename=None): - self.tolerance_used_for_diff_image = self._options.tolerance - return True - - def get_port_for_run(args): - options, parsed_args = run_webkit_tests.parse_args(args) - test_port = ImageDiffTestPort(options=options, user=MockUser()) - passing_run(args, port_obj=test_port, tests_included=True) - return test_port - - base_args = ['--pixel-tests', 'failures/expected/*'] - - # If we pass in an explicit tolerance argument, then that will be used. - test_port = get_port_for_run(base_args + ['--tolerance', '.1']) - self.assertEqual(0.1, test_port.tolerance_used_for_diff_image) - test_port = get_port_for_run(base_args + ['--tolerance', '0']) - self.assertEqual(0, test_port.tolerance_used_for_diff_image) - - # Otherwise the port's default tolerance behavior (including ignoring it) - # should be used. - test_port = get_port_for_run(base_args) - self.assertEqual(None, test_port.tolerance_used_for_diff_image) - - def test_worker_model__inline(self): - self.assertTrue(passing_run(['--worker-model', 'inline'])) - - def test_worker_model__threads(self): - self.assertTrue(passing_run(['--worker-model', 'threads'])) - - def test_worker_model__processes(self): - self.assertRaises(ValueError, logging_run, - ['--worker-model', 'processes']) - - def test_worker_model__unknown(self): - self.assertRaises(ValueError, logging_run, - ['--worker-model', 'unknown']) - -MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2') - - - -def _mocked_open(original_open, file_list): - def _wrapper(name, mode, encoding): - if name.find("-expected.") != -1 and mode.find("w") != -1: - # we don't want to actually write new baselines, so stub these out - name.replace('\\', '/') - file_list.append(name) - return original_open(os.devnull, mode, encoding) - return original_open(name, mode, encoding) - return _wrapper - - -class RebaselineTest(unittest.TestCase): - def assertBaselines(self, file_list, file): - "assert that the file_list contains the baselines.""" - for ext in [".txt", ".png", ".checksum"]: - baseline = file + "-expected" + ext - self.assertTrue(any(f.find(baseline) != -1 for f in file_list)) - - # FIXME: Add tests to ensure that we're *not* writing baselines when we're not - # supposed to be. - - def disabled_test_reset_results(self): - # FIXME: This test is disabled until we can rewrite it to use a - # mock filesystem. - # - # Test that we update expectations in place. If the expectation - # is missing, update the expected generic location. - file_list = [] - passing_run(['--pixel-tests', - '--reset-results', - 'passes/image.html', - 'failures/expected/missing_image.html'], - tests_included=True) - self.assertEqual(len(file_list), 6) - self.assertBaselines(file_list, - "data/passes/image") - self.assertBaselines(file_list, - "data/failures/expected/missing_image") - - def disabled_test_new_baseline(self): - # FIXME: This test is disabled until we can rewrite it to use a - # mock filesystem. - # - # Test that we update the platform expectations. If the expectation - # is mssing, then create a new expectation in the platform dir. - file_list = [] - original_open = codecs.open - try: - # Test that we update the platform expectations. If the expectation - # is mssing, then create a new expectation in the platform dir. - file_list = [] - codecs.open = _mocked_open(original_open, file_list) - passing_run(['--pixel-tests', - '--new-baseline', - 'passes/image.html', - 'failures/expected/missing_image.html'], - tests_included=True) - self.assertEqual(len(file_list), 6) - self.assertBaselines(file_list, - "data/platform/test/passes/image") - self.assertBaselines(file_list, - "data/platform/test/failures/expected/missing_image") - finally: - codecs.open = original_open - - -class TestRunnerWrapper(run_webkit_tests.TestRunner): - def _get_test_input_for_file(self, test_file): - return test_file - - -class TestRunnerTest(unittest.TestCase): - def test_results_html(self): - mock_port = Mock() - mock_port.relative_test_filename = lambda name: name - mock_port.filename_to_uri = lambda name: name - - runner = run_webkit_tests.TestRunner(port=mock_port, options=Mock(), - printer=Mock(), message_broker=Mock()) - expected_html = u"""<html> - <head> - <title>Layout Test Results (time)</title> - </head> - <body> - <h2>Title (time)</h2> - <p><a href='test_path'>test_path</a><br /> -</p> -</body></html> -""" - html = runner._results_html(["test_path"], {}, "Title", override_time="time") - self.assertEqual(html, expected_html) - - def test_shard_tests(self): - # Test that _shard_tests in run_webkit_tests.TestRunner really - # put the http tests first in the queue. - runner = TestRunnerWrapper(port=Mock(), options=Mock(), - printer=Mock(), message_broker=Mock()) - - test_list = [ - "LayoutTests/websocket/tests/unicode.htm", - "LayoutTests/animations/keyframes.html", - "LayoutTests/http/tests/security/view-source-no-refresh.html", - "LayoutTests/websocket/tests/websocket-protocol-ignored.html", - "LayoutTests/fast/css/display-none-inline-style-change-crash.html", - "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html", - "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html", - "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html", - "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html", - ] - - expected_tests_to_http_lock = set([ - 'LayoutTests/websocket/tests/unicode.htm', - 'LayoutTests/http/tests/security/view-source-no-refresh.html', - 'LayoutTests/websocket/tests/websocket-protocol-ignored.html', - 'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html', - ]) - - # FIXME: Ideally the HTTP tests don't have to all be in one shard. - single_thread_results = runner._shard_tests(test_list, False) - multi_thread_results = runner._shard_tests(test_list, True) - - self.assertEqual("tests_to_http_lock", single_thread_results[0][0]) - self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1])) - self.assertEqual("tests_to_http_lock", multi_thread_results[0][0]) - self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1])) - - -class DryrunTest(unittest.TestCase): - # FIXME: it's hard to know which platforms are safe to test; the - # chromium platforms require a chromium checkout, and the mac platform - # requires fcntl, so it can't be tested on win32, etc. There is - # probably a better way of handling this. - def test_darwin(self): - if sys.platform != "darwin": - return - - self.assertTrue(passing_run(['--platform', 'test'])) - self.assertTrue(passing_run(['--platform', 'dryrun', - 'fast/html'])) - self.assertTrue(passing_run(['--platform', 'dryrun-mac', - 'fast/html'])) - - def test_test(self): - self.assertTrue(passing_run(['--platform', 'dryrun-test', - '--pixel-tests'])) - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py deleted file mode 100644 index e69de29..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py +++ /dev/null diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py deleted file mode 100644 index da466c8..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Compares the image output of a test to the expected image output. - -Compares hashes for the generated and expected images. If the output doesn't -match, returns FailureImageHashMismatch and outputs both hashes into the layout -test results directory. -""" - -from __future__ import with_statement - -import codecs -import errno -import logging -import os -import shutil - -from webkitpy.layout_tests.layout_package import test_failures -from webkitpy.layout_tests.test_types import test_type_base - -# Cache whether we have the image_diff executable available. -_compare_available = True -_compare_msg_printed = False - -_log = logging.getLogger("webkitpy.layout_tests.test_types.image_diff") - - -class ImageDiff(test_type_base.TestTypeBase): - - def _save_baseline_files(self, filename, image, image_hash, - generate_new_baseline): - """Saves new baselines for the PNG and checksum. - - Args: - filename: test filename - image: a image output - image_hash: a checksum of the image - generate_new_baseline: whether to generate a new, platform-specific - baseline, or update the existing one - """ - self._save_baseline_data(filename, image, ".png", encoding=None, - generate_new_baseline=generate_new_baseline) - self._save_baseline_data(filename, image_hash, ".checksum", - encoding="ascii", - generate_new_baseline=generate_new_baseline) - - def _copy_image(self, filename, actual_image, expected_image): - self.write_output_files(filename, '.png', - output=actual_image, expected=expected_image, - encoding=None, print_text_diffs=False) - - def _copy_image_hash(self, filename, actual_image_hash, expected_image_hash): - self.write_output_files(filename, '.checksum', - actual_image_hash, expected_image_hash, - encoding="ascii", print_text_diffs=False) - - def _create_diff_image(self, port, filename, actual_image, expected_image): - """Creates the visual diff of the expected/actual PNGs. - - Returns True if the images are different. - """ - diff_filename = self.output_filename(filename, - self.FILENAME_SUFFIX_COMPARE) - return port.diff_image(actual_image, expected_image, diff_filename) - - def compare_output(self, port, filename, test_args, actual_test_output, - expected_test_output): - """Implementation of CompareOutput that checks the output image and - checksum against the expected files from the LayoutTest directory. - """ - failures = [] - - # If we didn't produce a hash file, this test must be text-only. - if actual_test_output.image_hash is None: - return failures - - # If we're generating a new baseline, we pass. - if test_args.new_baseline or test_args.reset_results: - self._save_baseline_files(filename, actual_test_output.image, - actual_test_output.image_hash, - test_args.new_baseline) - return failures - - if not expected_test_output.image: - # Report a missing expected PNG file. - self._copy_image(filename, actual_test_output.image, expected_image=None) - self._copy_image_hash(filename, actual_test_output.image_hash, - expected_test_output.image_hash) - failures.append(test_failures.FailureMissingImage()) - return failures - if not expected_test_output.image_hash: - # Report a missing expected checksum file. - self._copy_image(filename, actual_test_output.image, - expected_test_output.image) - self._copy_image_hash(filename, actual_test_output.image_hash, - expected_image_hash=None) - failures.append(test_failures.FailureMissingImageHash()) - return failures - - if actual_test_output.image_hash == expected_test_output.image_hash: - # Hash matched (no diff needed, okay to return). - return failures - - self._copy_image(filename, actual_test_output.image, - expected_test_output.image) - self._copy_image_hash(filename, actual_test_output.image_hash, - expected_test_output.image_hash) - - # Even though we only use the result in one codepath below but we - # still need to call CreateImageDiff for other codepaths. - images_are_different = self._create_diff_image(port, filename, - actual_test_output.image, - expected_test_output.image) - if not images_are_different: - failures.append(test_failures.FailureImageHashIncorrect()) - else: - failures.append(test_failures.FailureImageHashMismatch()) - - return failures diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py deleted file mode 100644 index 4b96b3a..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py +++ /dev/null @@ -1,223 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Defines the interface TestTypeBase which other test types inherit from. - -Also defines the TestArguments "struct" to pass them additional arguments. -""" - -from __future__ import with_statement - -import codecs -import cgi -import errno -import logging -import os.path - -_log = logging.getLogger("webkitpy.layout_tests.test_types.test_type_base") - - -class TestArguments(object): - """Struct-like wrapper for additional arguments needed by - specific tests.""" - # Whether to save new baseline results. - new_baseline = False - - # Path to the actual PNG file generated by pixel tests - png_path = None - - # Value of checksum generated by pixel tests. - hash = None - - # Whether to use wdiff to generate by-word diffs. - wdiff = False - -# Python bug workaround. See the wdiff code in WriteOutputFiles for an -# explanation. -_wdiff_available = True - - -class TestTypeBase(object): - - # Filename pieces when writing failures to the test results directory. - FILENAME_SUFFIX_ACTUAL = "-actual" - FILENAME_SUFFIX_EXPECTED = "-expected" - FILENAME_SUFFIX_DIFF = "-diff" - FILENAME_SUFFIX_WDIFF = "-wdiff.html" - FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html" - FILENAME_SUFFIX_COMPARE = "-diff.png" - - def __init__(self, port, root_output_dir): - """Initialize a TestTypeBase object. - - Args: - port: object implementing port-specific information and methods - root_output_dir: The unix style path to the output dir. - """ - self._root_output_dir = root_output_dir - self._port = port - - def _make_output_directory(self, filename): - """Creates the output directory (if needed) for a given test - filename.""" - output_filename = os.path.join(self._root_output_dir, - self._port.relative_test_filename(filename)) - self._port.maybe_make_directory(os.path.split(output_filename)[0]) - - def _save_baseline_data(self, filename, data, modifier, encoding, - generate_new_baseline=True): - """Saves a new baseline file into the port's baseline directory. - - The file will be named simply "<test>-expected<modifier>", suitable for - use as the expected results in a later run. - - Args: - filename: path to the test file - data: result to be saved as the new baseline - modifier: type of the result file, e.g. ".txt" or ".png" - encoding: file encoding (none, "utf-8", etc.) - generate_new_baseline: whether to enerate a new, platform-specific - baseline, or update the existing one - """ - - if generate_new_baseline: - relative_dir = os.path.dirname( - self._port.relative_test_filename(filename)) - baseline_path = self._port.baseline_path() - output_dir = os.path.join(baseline_path, relative_dir) - output_file = os.path.basename(os.path.splitext(filename)[0] + - self.FILENAME_SUFFIX_EXPECTED + modifier) - self._port.maybe_make_directory(output_dir) - output_path = os.path.join(output_dir, output_file) - _log.debug('writing new baseline result "%s"' % (output_path)) - else: - output_path = self._port.expected_filename(filename, modifier) - _log.debug('resetting baseline result "%s"' % output_path) - - self._port.update_baseline(output_path, data, encoding) - - def output_filename(self, filename, modifier): - """Returns a filename inside the output dir that contains modifier. - - For example, if filename is c:/.../fast/dom/foo.html and modifier is - "-expected.txt", the return value is - c:/cygwin/tmp/layout-test-results/fast/dom/foo-expected.txt - - Args: - filename: absolute filename to test file - modifier: a string to replace the extension of filename with - - Return: - The absolute windows path to the output filename - """ - output_filename = os.path.join(self._root_output_dir, - self._port.relative_test_filename(filename)) - return os.path.splitext(output_filename)[0] + modifier - - def compare_output(self, port, filename, test_args, actual_test_output, - expected_test_output): - """Method that compares the output from the test with the - expected value. - - This is an abstract method to be implemented by all sub classes. - - Args: - port: object implementing port-specific information and methods - filename: absolute filename to test file - test_args: a TestArguments object holding optional additional - arguments - actual_test_output: a TestOutput object which represents actual test - output - expected_test_output: a TestOutput object which represents a expected - test output - - Return: - a list of TestFailure objects, empty if the test passes - """ - raise NotImplementedError - - def _write_into_file_at_path(self, file_path, contents, encoding): - """This method assumes that byte_array is already encoded - into the right format.""" - open_mode = 'w' - if encoding is None: - open_mode = 'w+b' - with codecs.open(file_path, open_mode, encoding=encoding) as file: - file.write(contents) - - def write_output_files(self, filename, file_type, - output, expected, encoding, - print_text_diffs=False): - """Writes the test output, the expected output and optionally the diff - between the two to files in the results directory. - - The full output filename of the actual, for example, will be - <filename>-actual<file_type> - For instance, - my_test-actual.txt - - Args: - filename: The test filename - file_type: A string describing the test output file type, e.g. ".txt" - output: A string containing the test output - expected: A string containing the expected test output - print_text_diffs: True for text diffs. (FIXME: We should be able to get this from the file type?) - """ - self._make_output_directory(filename) - actual_filename = self.output_filename(filename, self.FILENAME_SUFFIX_ACTUAL + file_type) - expected_filename = self.output_filename(filename, self.FILENAME_SUFFIX_EXPECTED + file_type) - # FIXME: This function is poorly designed. We should be passing in some sort of - # encoding information from the callers. - if output: - self._write_into_file_at_path(actual_filename, output, encoding) - if expected: - self._write_into_file_at_path(expected_filename, expected, encoding) - - if not output or not expected: - return - - if not print_text_diffs: - return - - # Note: We pass encoding=None for all diff writes, as we treat diff - # output as binary. Diff output may contain multiple files in - # conflicting encodings. - diff = self._port.diff_text(expected, output, expected_filename, actual_filename) - diff_filename = self.output_filename(filename, self.FILENAME_SUFFIX_DIFF + file_type) - self._write_into_file_at_path(diff_filename, diff, encoding=None) - - # Shell out to wdiff to get colored inline diffs. - wdiff = self._port.wdiff_text(expected_filename, actual_filename) - wdiff_filename = self.output_filename(filename, self.FILENAME_SUFFIX_WDIFF) - self._write_into_file_at_path(wdiff_filename, wdiff, encoding=None) - - # Use WebKit's PrettyPatch.rb to get an HTML diff. - pretty_patch = self._port.pretty_patch_text(diff_filename) - pretty_patch_filename = self.output_filename(filename, self.FILENAME_SUFFIX_PRETTY_PATCH) - self._write_into_file_at_path(pretty_patch_filename, pretty_patch, encoding=None) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base_unittest.py deleted file mode 100644 index 5dbfcb6..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base_unittest.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -""""Tests stray tests not covered by regular code paths.""" - -import test_type_base -import unittest - -from webkitpy.thirdparty.mock import Mock - - -class Test(unittest.TestCase): - - def test_compare_output_notimplemented(self): - test_type = test_type_base.TestTypeBase(None, None) - self.assertRaises(NotImplementedError, test_type.compare_output, - None, "foo.txt", '', - test_type_base.TestArguments(), 'Debug') - - -if __name__ == '__main__': - unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py deleted file mode 100644 index ca4b17d..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Compares the text output of a test to the expected text output. - -If the output doesn't match, returns FailureTextMismatch and outputs the diff -files into the layout test results directory. -""" - -from __future__ import with_statement - -import codecs -import errno -import logging -import os.path - -from webkitpy.layout_tests.layout_package import test_failures -from webkitpy.layout_tests.test_types import test_type_base - -_log = logging.getLogger("webkitpy.layout_tests.test_types.text_diff") - - -class TestTextDiff(test_type_base.TestTypeBase): - - def _get_normalized_output_text(self, output): - # Some tests produce "\r\n" explicitly. Our system (Python/Cygwin) - # helpfully changes the "\n" to "\r\n", resulting in "\r\r\n". - norm = output.replace("\r\r\n", "\r\n").strip("\r\n").replace( - "\r\n", "\n") - return norm + "\n" - - def compare_output(self, port, filename, test_args, actual_test_output, - expected_test_output): - """Implementation of CompareOutput that checks the output text against - the expected text from the LayoutTest directory.""" - failures = [] - - # If we're generating a new baseline, we pass. - if test_args.new_baseline or test_args.reset_results: - # Although all test_shell/DumpRenderTree output should be utf-8, - # we do not ever decode it inside run-webkit-tests. For some tests - # DumpRenderTree may not output utf-8 text (e.g. webarchives). - self._save_baseline_data(filename, actual_test_output.text, - ".txt", encoding=None, - generate_new_baseline=test_args.new_baseline) - return failures - - # Normalize text to diff - actual_text = self._get_normalized_output_text(actual_test_output.text) - # Assuming expected_text is already normalized. - expected_text = expected_test_output.text - - # Write output files for new tests, too. - if port.compare_text(actual_text, expected_text): - # Text doesn't match, write output files. - self.write_output_files(filename, ".txt", actual_text, - expected_text, encoding=None, - print_text_diffs=True) - - if expected_text == '': - failures.append(test_failures.FailureMissingResult()) - else: - failures.append(test_failures.FailureTextMismatch()) - - return failures diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests.py deleted file mode 100755 index f4c8098..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from __future__ import with_statement - -import glob -import logging -import optparse -import os -import re -import sys -import webkitpy.common.checkout.scm as scm - -_log = logging.getLogger("webkitpy.layout_tests." - "update-webgl-conformance-tests") - - -def remove_first_line_comment(text): - return re.compile(r'^<!--.*?-->\s*', re.DOTALL).sub('', text) - - -def translate_includes(text): - # Mapping of single filename to relative path under WebKit root. - # Assumption: these filenames are globally unique. - include_mapping = { - "js-test-style.css": "../../js/resources", - "js-test-pre.js": "../../js/resources", - "js-test-post.js": "../../js/resources", - "desktop-gl-constants.js": "resources", - } - - for filename, path in include_mapping.items(): - search = r'(?:[^"\'= ]*/)?' + re.escape(filename) - replace = os.path.join(path, filename) - text = re.sub(search, replace, text) - - return text - - -def translate_khronos_test(text): - """ - This method translates the contents of a Khronos test to a WebKit test. - """ - - translateFuncs = [ - remove_first_line_comment, - translate_includes, - ] - - for f in translateFuncs: - text = f(text) - - return text - - -def update_file(in_filename, out_dir): - # check in_filename exists - # check out_dir exists - out_filename = os.path.join(out_dir, os.path.basename(in_filename)) - - _log.debug("Processing " + in_filename) - with open(in_filename, 'r') as in_file: - with open(out_filename, 'w') as out_file: - out_file.write(translate_khronos_test(in_file.read())) - - -def update_directory(in_dir, out_dir): - for filename in glob.glob(os.path.join(in_dir, '*.html')): - update_file(os.path.join(in_dir, filename), out_dir) - - -def default_out_dir(): - current_scm = scm.detect_scm_system(os.path.dirname(sys.argv[0])) - if not current_scm: - return os.getcwd() - root_dir = current_scm.checkout_root - if not root_dir: - return os.getcwd() - out_dir = os.path.join(root_dir, "LayoutTests/fast/canvas/webgl") - if os.path.isdir(out_dir): - return out_dir - return os.getcwd() - - -def configure_logging(options): - """Configures the logging system.""" - log_fmt = '%(levelname)s: %(message)s' - log_datefmt = '%y%m%d %H:%M:%S' - log_level = logging.INFO - if options.verbose: - log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s ' - '%(message)s') - log_level = logging.DEBUG - logging.basicConfig(level=log_level, format=log_fmt, - datefmt=log_datefmt) - - -def option_parser(): - usage = "usage: %prog [options] (input file or directory)" - parser = optparse.OptionParser(usage=usage) - parser.add_option('-v', '--verbose', - action='store_true', - default=False, - help='include debug-level logging') - parser.add_option('-o', '--output', - action='store', - type='string', - default=default_out_dir(), - metavar='DIR', - help='specify an output directory to place files ' - 'in [default: %default]') - return parser - - -def main(): - parser = option_parser() - (options, args) = parser.parse_args() - configure_logging(options) - - if len(args) == 0: - _log.error("Must specify an input directory or filename.") - parser.print_help() - return 1 - - in_name = args[0] - if os.path.isfile(in_name): - update_file(in_name, options.output) - elif os.path.isdir(in_name): - update_directory(in_name, options.output) - else: - _log.error("'%s' is not a directory or a file.", in_name) - return 2 - - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests_unittest.py deleted file mode 100644 index 7393b70..0000000 --- a/WebKitTools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests_unittest.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/python -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Unit tests for update_webgl_conformance_tests.""" - -import unittest -from webkitpy.layout_tests import update_webgl_conformance_tests as webgl - - -def construct_script(name): - return "<script src=\"" + name + "\"></script>\n" - - -def construct_style(name): - return "<link rel=\"stylesheet\" href=\"" + name + "\">" - - -class TestTranslation(unittest.TestCase): - def assert_unchanged(self, text): - self.assertEqual(text, webgl.translate_khronos_test(text)) - - def assert_translate(self, input, output): - self.assertEqual(output, webgl.translate_khronos_test(input)) - - def test_simple_unchanged(self): - self.assert_unchanged("") - self.assert_unchanged("<html></html>") - - def test_header_strip(self): - single_line_header = "<!-- single line header. -->" - multi_line_header = """<!-- this is a multi-line - header. it should all be removed too. - -->""" - text = "<html></html>" - self.assert_translate(single_line_header, "") - self.assert_translate(single_line_header + text, text) - self.assert_translate(multi_line_header + text, text) - - def dont_strip_other_headers(self): - self.assert_unchanged("<html>\n<!-- don't remove comments on other lines. -->\n</html>") - - def test_include_rewriting(self): - # Mappings to None are unchanged - styles = { - "../resources/js-test-style.css": "../../js/resources/js-test-style.css", - "fail.css": None, - "resources/stylesheet.css": None, - "../resources/style.css": None, - } - scripts = { - "../resources/js-test-pre.js": "../../js/resources/js-test-pre.js", - "../resources/js-test-post.js": "../../js/resources/js-test-post.js", - "../resources/desktop-gl-constants.js": "resources/desktop-gl-constants.js", - - "resources/shadow-offset.js": None, - "../resources/js-test-post-async.js": None, - } - - input_text = "" - output_text = "" - for input, output in styles.items(): - input_text += construct_style(input) - output_text += construct_style(output if output else input) - for input, output in scripts.items(): - input_text += construct_script(input) - output_text += construct_script(output if output else input) - - head = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">\n<html>\n<head>\n' - foot = '</head>\n<body>\n</body>\n</html>' - input_text = head + input_text + foot - output_text = head + output_text + foot - self.assert_translate(input_text, output_text) - - -if __name__ == '__main__': - unittest.main() |