summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts/webkitpy/layout_tests/layout_package
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2010-02-15 12:23:52 +0000
committerSteve Block <steveblock@google.com>2010-02-16 11:48:32 +0000
commit8a0914b749bbe7da7768e07a7db5c6d4bb09472b (patch)
tree73f9065f370435d6fde32ae129d458a8c77c8dff /WebKitTools/Scripts/webkitpy/layout_tests/layout_package
parentbf14be70295513b8076f3fa47a268a7e42b2c478 (diff)
downloadexternal_webkit-8a0914b749bbe7da7768e07a7db5c6d4bb09472b.zip
external_webkit-8a0914b749bbe7da7768e07a7db5c6d4bb09472b.tar.gz
external_webkit-8a0914b749bbe7da7768e07a7db5c6d4bb09472b.tar.bz2
Merge webkit.org at r54731 : Initial merge by git
Change-Id: Ia79977b6cf3b0b00c06ef39419989b28e57e4f4a
Diffstat (limited to 'WebKitTools/Scripts/webkitpy/layout_tests/layout_package')
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py8
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py56
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py112
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py7
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py219
5 files changed, 166 insertions, 236 deletions
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
index f38a7ab..520ab1f 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -29,9 +29,9 @@
import logging
import os
+import simplejson
from layout_package import json_results_generator
-from port import path_utils
from layout_package import test_expectations
from layout_package import test_failures
@@ -45,7 +45,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator):
WONTFIX = "wontfixCounts"
DEFERRED = "deferredCounts"
- def __init__(self, builder_name, build_name, build_number,
+ def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_timings, expectations, result_summary, all_tests):
"""Modifies the results.json file. Grabs it off the archive directory
@@ -56,7 +56,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator):
results.
(see the comment of JSONResultsGenerator.__init__ for other Args)
"""
-
+ self._port = port
self._builder_name = builder_name
self._build_name = build_name
self._build_number = build_number
@@ -153,7 +153,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator):
test, test_name, tests)
# Remove tests that don't exist anymore.
- full_path = os.path.join(path_utils.layout_tests_dir(), test_name)
+ full_path = os.path.join(self._port.layout_tests_dir(), test_name)
full_path = os.path.normpath(full_path)
if not os.path.exists(full_path):
del tests[test_name]
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index dc24ade..84be0e1 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -29,19 +29,15 @@
import logging
import os
+import simplejson
import subprocess
import sys
import time
import urllib2
import xml.dom.minidom
-from port import path_utils
from layout_package import test_expectations
-sys.path.append(path_utils.path_from_base('third_party', 'WebKit',
- 'WebKitTools'))
-import simplejson
-
class JSONResultsGenerator(object):
@@ -80,7 +76,7 @@ class JSONResultsGenerator(object):
RESULTS_FILENAME = "results.json"
- def __init__(self, builder_name, build_name, build_number,
+ def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_timings, failures, passed_tests, skipped_tests, all_tests):
"""Modifies the results.json file. Grabs it off the archive directory
@@ -100,6 +96,7 @@ class JSONResultsGenerator(object):
all_tests: List of all the tests that were run. This should not
include skipped tests.
"""
+ self._port = port
self._builder_name = builder_name
self._build_name = build_name
self._build_number = build_number
@@ -122,22 +119,24 @@ class JSONResultsGenerator(object):
results_file.write(json)
results_file.close()
- def _get_svn_revision(self, in_directory=None):
+ def _get_svn_revision(self, in_directory):
"""Returns the svn revision for the given directory.
Args:
in_directory: The directory where svn is to be run.
"""
- output = subprocess.Popen(["svn", "info", "--xml"],
- cwd=in_directory,
- shell=(sys.platform == 'win32'),
- stdout=subprocess.PIPE).communicate()[0]
- try:
- dom = xml.dom.minidom.parseString(output)
- return dom.getElementsByTagName('entry')[0].getAttribute(
- 'revision')
- except xml.parsers.expat.ExpatError:
- return ""
+ if os.path.exists(os.path.join(in_directory, '.svn')):
+ output = subprocess.Popen(["svn", "info", "--xml"],
+ cwd=in_directory,
+ shell=(sys.platform == 'win32'),
+ stdout=subprocess.PIPE).communicate()[0]
+ try:
+ dom = xml.dom.minidom.parseString(output)
+ return dom.getElementsByTagName('entry')[0].getAttribute(
+ 'revision')
+ except xml.parsers.expat.ExpatError:
+ return ""
+ return ""
def _get_archived_json_results(self):
"""Reads old results JSON file if it exists.
@@ -305,16 +304,19 @@ class JSONResultsGenerator(object):
self._insert_item_into_raw_list(results_for_builder,
self._build_number, self.BUILD_NUMBERS)
- path_to_webkit = path_utils.path_from_base('third_party', 'WebKit',
- 'WebCore')
- self._insert_item_into_raw_list(results_for_builder,
- self._get_svn_revision(path_to_webkit),
- self.WEBKIT_SVN)
-
- path_to_chrome_base = path_utils.path_from_base()
- self._insert_item_into_raw_list(results_for_builder,
- self._get_svn_revision(path_to_chrome_base),
- self.CHROME_SVN)
+ # These next two branches test to see which source repos we can
+ # pull revisions from.
+ if hasattr(self._port, 'path_from_webkit_base'):
+ path_to_webkit = self._port.path_from_webkit_base()
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_svn_revision(path_to_webkit),
+ self.WEBKIT_SVN)
+
+ if hasattr(self._port, 'path_from_chromium_base'):
+ path_to_chrome = self._port.path_from_chromium_base()
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_svn_revision(path_to_chrome),
+ self.CHROME_SVN)
self._insert_item_into_raw_list(results_for_builder,
int(time.time()),
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
index 5b0d186..a3650ed 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
@@ -36,10 +36,7 @@ import os
import re
import sys
import time
-from port import path_utils
-sys.path.append(path_utils.path_from_base('third_party', 'WebKit',
- 'WebKitTools'))
import simplejson
# Test expectation and modifier constants.
@@ -53,12 +50,28 @@ import simplejson
class TestExpectations:
TEST_LIST = "test_expectations.txt"
- def __init__(self, tests, directory, platform, is_debug_mode, is_lint_mode,
- tests_are_present=True):
- """Reads the test expectations files from the given directory."""
- path = os.path.join(directory, self.TEST_LIST)
- self._expected_failures = TestExpectationsFile(path, tests, platform,
- is_debug_mode, is_lint_mode, tests_are_present=tests_are_present)
+ def __init__(self, port, tests, expectations, test_platform_name,
+ is_debug_mode, is_lint_mode, tests_are_present=True):
+ """Loads and parses the test expectations given in the string.
+ Args:
+ port: handle to object containing platform-specific functionality
+ test: list of all of the test files
+ expectations: test expectations as a string
+ test_platform_name: name of the platform to match expectations
+ against. Note that this may be different than
+ port.test_platform_name() when is_lint_mode is True.
+ is_debug_mode: whether to use the DEBUG or RELEASE modifiers
+ in the expectations
+ is_lint_mode: If True, just parse the expectations string
+ looking for errors.
+ tests_are_present: whether the test files exist in the file
+ system and can be probed for. This is useful for distinguishing
+ test files from directories, and is needed by the LTTF
+ dashboard, where the files aren't actually locally present.
+ """
+ self._expected_failures = TestExpectationsFile(port, expectations,
+ tests, test_platform_name, is_debug_mode, is_lint_mode,
+ tests_are_present=tests_are_present)
# TODO(ojan): Allow for removing skipped tests when getting the list of
# tests to run, but not when getting metrics.
@@ -230,9 +243,6 @@ class TestExpectationsFile:
EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT,
TEXT, IMAGE, FAIL, SKIP)
- BASE_PLATFORMS = ('linux', 'mac', 'win')
- PLATFORMS = BASE_PLATFORMS + ('win-xp', 'win-vista', 'win-7')
-
BUILD_TYPES = ('debug', 'release')
MODIFIERS = {'skip': SKIP,
@@ -251,37 +261,34 @@ class TestExpectationsFile:
'fail': FAIL,
'flaky': FLAKY}
- def __init__(self, path, full_test_list, platform, is_debug_mode,
- is_lint_mode, expectations_as_str=None, suppress_errors=False,
+ def __init__(self, port, expectations, full_test_list, test_platform_name,
+ is_debug_mode, is_lint_mode, suppress_errors=False,
tests_are_present=True):
"""
- path: The path to the expectation file. An error is thrown if a test is
- listed more than once.
+ expectations: Contents of the expectations file
full_test_list: The list of all tests to be run pending processing of
the expections for those tests.
- platform: Which platform from self.PLATFORMS to filter tests for.
+ test_platform_name: name of the platform to match expectations
+ against. Note that this may be different than
+ port.test_platform_name() when is_lint_mode is True.
is_debug_mode: Whether we testing a test_shell built debug mode.
is_lint_mode: Whether this is just linting test_expecatations.txt.
- expectations_as_str: Contents of the expectations file. Used instead of
- the path. This makes unittesting sane.
suppress_errors: Whether to suppress lint errors.
tests_are_present: Whether the test files are present in the local
filesystem. The LTTF Dashboard uses False here to avoid having to
keep a local copy of the tree.
"""
- self._path = path
- self._expectations_as_str = expectations_as_str
+ self._port = port
+ self._expectations = expectations
+ self._full_test_list = full_test_list
+ self._test_platform_name = test_platform_name
+ self._is_debug_mode = is_debug_mode
self._is_lint_mode = is_lint_mode
self._tests_are_present = tests_are_present
- self._full_test_list = full_test_list
self._suppress_errors = suppress_errors
self._errors = []
self._non_fatal_errors = []
- self._platform = self.to_test_platform_name(platform)
- if self._platform is None:
- raise Exception("Unknown platform '%s'" % (platform))
- self._is_debug_mode = is_debug_mode
# Maps relative test paths as listed in the expectations file to a
# list of maps containing modifiers and expectations for each time
@@ -320,27 +327,13 @@ class TestExpectationsFile:
"""Returns an object that can be iterated over. Allows for not caring
about whether we're iterating over a file or a new-line separated
string."""
- if self._expectations_as_str:
- iterable = [x + "\n" for x in
- self._expectations_as_str.split("\n")]
- # Strip final entry if it's empty to avoid added in an extra
- # newline.
- if iterable[len(iterable) - 1] == "\n":
- return iterable[:len(iterable) - 1]
- return iterable
- else:
- return open(self._path)
-
- def to_test_platform_name(self, name):
- """Returns the test expectation platform that will be used for a
- given platform name, or None if there is no match."""
- chromium_prefix = 'chromium-'
- name = name.lower()
- if name.startswith(chromium_prefix):
- name = name[len(chromium_prefix):]
- if name in self.PLATFORMS:
- return name
- return None
+ iterable = [x + "\n" for x in
+ self._expectations.split("\n")]
+ # Strip final entry if it's empty to avoid added in an extra
+ # newline.
+ if iterable[-1] == "\n":
+ return iterable[:-1]
+ return iterable
def get_test_set(self, modifier, expectation=None, include_skips=True):
if expectation is None:
@@ -398,6 +391,12 @@ class TestExpectationsFile:
no
"""
+ # FIXME - remove_platform_from file worked by writing a new
+ # test_expectations.txt file over the old one. Now that we're just
+ # parsing strings, we need to change this to return the new
+ # expectations string.
+ raise NotImplementedException('remove_platform_from_file')
+
new_file = self._path + '.new'
logging.debug('Original file: "%s"', self._path)
logging.debug('New file: "%s"', new_file)
@@ -430,11 +429,12 @@ class TestExpectationsFile:
elif action == ADD_PLATFORMS_EXCEPT_THIS:
parts = line.split(':')
new_options = parts[0]
- for p in self.PLATFORMS:
- p = p.upper();
+ for p in self._port.test_platform_names():
+ p = p.upper()
# This is a temp solution for rebaselining tool.
# Do not add tags WIN-7 and WIN-VISTA to test expectations
- # if the original line does not specify the platform option.
+ # if the original line does not specify the platform
+ # option.
# TODO(victorw): Remove WIN-VISTA and WIN-7 once we have
# reliable Win 7 and Win Vista buildbots setup.
if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'):
@@ -517,7 +517,7 @@ class TestExpectationsFile:
has_any_platform = False
for option in options:
- if option in self.PLATFORMS:
+ if option in self._port.test_platform_names():
has_any_platform = True
if not option == platform:
return REMOVE_PLATFORM
@@ -547,7 +547,7 @@ class TestExpectationsFile:
for option in options:
if option in self.MODIFIERS:
modifiers.add(option)
- elif option in self.PLATFORMS:
+ elif option in self._port.test_platform_names():
has_any_platform = True
elif option.startswith('bug'):
has_bug_id = True
@@ -590,7 +590,7 @@ class TestExpectationsFile:
options: list of options
"""
for opt in options:
- if self._platform.startswith(opt):
+ if self._test_platform_name.startswith(opt):
return True
return False
@@ -632,7 +632,7 @@ class TestExpectationsFile:
'indefinitely, then it should be just timeout.',
test_list_path)
- full_path = os.path.join(path_utils.layout_tests_dir(),
+ full_path = os.path.join(self._port.layout_tests_dir(),
test_list_path)
full_path = os.path.normpath(full_path)
# WebKit's way of skipping tests is to add a -disabled suffix.
@@ -662,7 +662,7 @@ class TestExpectationsFile:
else:
build_type = 'RELEASE'
print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \
- % (self._platform.upper(), build_type)
+ % (self._test_platform_name.upper(), build_type)
for error in self._non_fatal_errors:
logging.error(error)
@@ -695,7 +695,7 @@ class TestExpectationsFile:
def _expand_tests(self, test_list_path):
"""Convert the test specification to an absolute, normalized
path and make sure directories end with the OS path separator."""
- path = os.path.join(path_utils.layout_tests_dir(), test_list_path)
+ path = os.path.join(self._port.layout_tests_dir(), test_list_path)
path = os.path.normpath(path)
path = self._fix_dir(path)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
index b7e620d..3c087c0 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
@@ -36,7 +36,6 @@ under that directory."""
import glob
import os
-from port import path_utils
# When collecting test cases, we include any file with these extensions.
_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl',
@@ -45,7 +44,7 @@ _supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl',
_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests'])
-def gather_test_files(paths):
+def gather_test_files(port, paths):
"""Generate a set of test files and return them.
Args:
@@ -57,14 +56,14 @@ def gather_test_files(paths):
if paths:
for path in paths:
# If there's an * in the name, assume it's a glob pattern.
- path = os.path.join(path_utils.layout_tests_dir(), path)
+ path = os.path.join(port.layout_tests_dir(), path)
if path.find('*') > -1:
filenames = glob.glob(path)
paths_to_walk.update(filenames)
else:
paths_to_walk.add(path)
else:
- paths_to_walk.add(path_utils.layout_tests_dir())
+ paths_to_walk.add(port.layout_tests_dir())
# Now walk all the paths passed in on the command line and get filenames
test_files = set()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py
index 9f52686..3452035 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py
@@ -40,21 +40,22 @@ import logging
import os
import Queue
import signal
-import subprocess
import sys
import thread
import threading
import time
-from port import path_utils
import test_failures
-def process_output(proc, test_info, test_types, test_args, target, output_dir):
+def process_output(port, test_info, test_types, test_args, target, output_dir,
+ crash, timeout, test_run_time, actual_checksum,
+ output, error):
"""Receives the output from a test_shell process, subjects it to a number
of tests, and returns a list of failure types the test produced.
Args:
+ port: port-specific hooks
proc: an active test_shell process
test_info: Object containing the test filename, uri and timeout
test_types: list of test types to subject the output to
@@ -64,84 +65,39 @@ def process_output(proc, test_info, test_types, test_args, target, output_dir):
Returns: a list of failure objects and times for the test being processed
"""
- outlines = []
- extra_lines = []
failures = []
- crash = False
# Some test args, such as the image hash, may be added or changed on a
# test-by-test basis.
local_test_args = copy.copy(test_args)
- start_time = time.time()
-
- line = proc.stdout.readline()
-
- # Only start saving output lines once we've loaded the URL for the test.
- url = None
- test_string = test_info.uri.strip()
-
- while line.rstrip() != "#EOF":
- # Make sure we haven't crashed.
- if line == '' and proc.poll() is not None:
- failures.append(test_failures.FailureCrash())
-
- # This is hex code 0xc000001d, which is used for abrupt
- # termination. This happens if we hit ctrl+c from the prompt and
- # we happen to be waiting on the test_shell.
- # sdoyon: Not sure for which OS and in what circumstances the
- # above code is valid. What works for me under Linux to detect
- # ctrl+c is for the subprocess returncode to be negative SIGINT.
- # And that agrees with the subprocess documentation.
- if (-1073741510 == proc.returncode or
- - signal.SIGINT == proc.returncode):
- raise KeyboardInterrupt
- crash = True
- break
-
- # Don't include #URL lines in our output
- if line.startswith("#URL:"):
- url = line.rstrip()[5:]
- if url != test_string:
- logging.fatal("Test got out of sync:\n|%s|\n|%s|" %
- (url, test_string))
- raise AssertionError("test out of sync")
- elif line.startswith("#MD5:"):
- local_test_args.hash = line.rstrip()[5:]
- elif line.startswith("#TEST_TIMED_OUT"):
- # Test timed out, but we still need to read until #EOF.
- failures.append(test_failures.FailureTimeout())
- elif url:
- outlines.append(line)
- else:
- extra_lines.append(line)
-
- line = proc.stdout.readline()
-
- end_test_time = time.time()
-
- if len(extra_lines):
- extra = "".join(extra_lines)
- if crash:
- logging.debug("Stacktrace for %s:\n%s" % (test_string, extra))
- # Strip off "file://" since RelativeTestFilename expects
- # filesystem paths.
- filename = os.path.join(output_dir,
- path_utils.relative_test_filename(test_string[7:]))
- filename = os.path.splitext(filename)[0] + "-stack.txt"
- path_utils.maybe_make_directory(os.path.split(filename)[0])
- open(filename, "wb").write(extra)
- else:
- logging.debug("Previous test output extra lines after dump:\n%s" %
- extra)
+ local_test_args.hash = actual_checksum
+
+ if crash:
+ failures.append(test_failures.FailureCrash())
+ if timeout:
+ failures.append(test_failures.FailureTimeout())
+
+ if crash:
+ logging.debug("Stacktrace for %s:\n%s" % (test_info.filename, error))
+ # Strip off "file://" since RelativeTestFilename expects
+ # filesystem paths.
+ filename = os.path.join(output_dir, test_info.filename)
+ filename = os.path.splitext(filename)[0] + "-stack.txt"
+ port.maybe_make_directory(os.path.split(filename)[0])
+ open(filename, "wb").write(error)
+ elif error:
+ logging.debug("Previous test output extra lines after dump:\n%s" %
+ error)
# Check the output and save the results.
+ start_time = time.time()
time_for_diffs = {}
for test_type in test_types:
start_diff_time = time.time()
- new_failures = test_type.compare_output(test_info.filename,
- proc, ''.join(outlines),
- local_test_args, target)
+ new_failures = test_type.compare_output(port, test_info.filename,
+ output, local_test_args,
+ target)
# Don't add any more failures if we already have a crash, so we don't
# double-report those tests. We do double-report for timeouts since
# we still want to see the text and image output.
@@ -150,28 +106,11 @@ def process_output(proc, test_info, test_types, test_args, target, output_dir):
time_for_diffs[test_type.__class__.__name__] = (
time.time() - start_diff_time)
- total_time_for_all_diffs = time.time() - end_test_time
- test_run_time = end_test_time - start_time
+ total_time_for_all_diffs = time.time() - start_diff_time
return TestStats(test_info.filename, failures, test_run_time,
total_time_for_all_diffs, time_for_diffs)
-def start_test_shell(command, args):
- """Returns the process for a new test_shell started in layout-tests mode.
- """
- cmd = []
- # Hook for injecting valgrind or other runtime instrumentation,
- # used by e.g. tools/valgrind/valgrind_tests.py.
- wrapper = os.environ.get("BROWSER_WRAPPER", None)
- if wrapper != None:
- cmd += [wrapper]
- cmd += command + ['--layout-tests'] + args
- return subprocess.Popen(cmd,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
-
-
class TestStats:
def __init__(self, filename, failures, test_run_time,
@@ -186,17 +125,19 @@ class TestStats:
class SingleTestThread(threading.Thread):
"""Thread wrapper for running a single test file."""
- def __init__(self, test_shell_command, shell_args, test_info, test_types,
- test_args, target, output_dir):
+ def __init__(self, port, image_path, shell_args, test_info,
+ test_types, test_args, target, output_dir):
"""
Args:
+ port: object implementing port-specific hooks
test_info: Object containing the test filename, uri and timeout
output_dir: Directory to put crash stacks into.
See TestShellThread for documentation of the remaining arguments.
"""
threading.Thread.__init__(self)
- self._command = test_shell_command
+ self._port = port
+ self._image_path = image_path
self._shell_args = shell_args
self._test_info = test_info
self._test_types = test_types
@@ -205,10 +146,18 @@ class SingleTestThread(threading.Thread):
self._output_dir = output_dir
def run(self):
- proc = start_test_shell(self._command, self._shell_args +
- ["--time-out-ms=" + self._test_info.timeout, self._test_info.uri])
- self._test_stats = process_output(proc, self._test_info,
- self._test_types, self._test_args, self._target, self._output_dir)
+ driver = self._port.start_test_driver(self._image_path,
+ self._shell_args)
+ start = time.time()
+ crash, timeout, actual_checksum, output, error = \
+ driver.run_test(test_info.uri.strip(), test_info.timeout,
+ test_info.image_hash)
+ end = time.time()
+ self._test_stats = process_output(self._port,
+ self._test_info, self._test_types, self._test_args,
+ self._target, self._output_dir, crash, timeout, end - start,
+ actual_checksum, output, error)
+ driver.stop()
def get_test_stats(self):
return self._test_stats
@@ -216,17 +165,16 @@ class SingleTestThread(threading.Thread):
class TestShellThread(threading.Thread):
- def __init__(self, filename_list_queue, result_queue, test_shell_command,
- test_types, test_args, shell_args, options):
+ def __init__(self, port, filename_list_queue, result_queue,
+ test_types, test_args, image_path, shell_args, options):
"""Initialize all the local state for this test shell thread.
Args:
+ port: interface to port-specific hooks
filename_list_queue: A thread safe Queue class that contains lists
of tuples of (filename, uri) pairs.
result_queue: A thread safe Queue class that will contain tuples of
(test, failure lists) for the test results.
- test_shell_command: A list specifying the command+args for
- test_shell
test_types: A list of TestType objects to run the test output
against.
test_args: A TestArguments object to pass to each TestType.
@@ -236,13 +184,14 @@ class TestShellThread(threading.Thread):
run_webkit_tests; they are typically passed via the
run_webkit_tests.TestRunner class."""
threading.Thread.__init__(self)
+ self._port = port
self._filename_list_queue = filename_list_queue
self._result_queue = result_queue
self._filename_list = []
- self._test_shell_command = test_shell_command
self._test_types = test_types
self._test_args = test_args
- self._test_shell_proc = None
+ self._driver = None
+ self._image_path = image_path
self._shell_args = shell_args
self._options = options
self._canceled = False
@@ -379,11 +328,11 @@ class TestShellThread(threading.Thread):
# Print the error message(s).
error_str = '\n'.join([' ' + f.message() for f in failures])
logging.debug("%s %s failed:\n%s" % (self.getName(),
- path_utils.relative_test_filename(filename),
+ self._port.relative_test_filename(filename),
error_str))
else:
logging.debug("%s %s passed" % (self.getName(),
- path_utils.relative_test_filename(filename)))
+ self._port.relative_test_filename(filename)))
self._result_queue.put((filename, failures))
if batch_size > 0 and batch_count > batch_size:
@@ -407,7 +356,7 @@ class TestShellThread(threading.Thread):
Return:
A list of TestFailure objects describing the error.
"""
- worker = SingleTestThread(self._test_shell_command,
+ worker = SingleTestThread(self._port, self._image_path,
self._shell_args,
test_info,
self._test_types,
@@ -431,7 +380,7 @@ class TestShellThread(threading.Thread):
# tradeoff in order to avoid losing the rest of this thread's
# results.
logging.error('Test thread hung: killing all test_shells')
- path_utils.kill_all_test_shells()
+ worker._driver.stop()
try:
stats = worker.get_test_stats()
@@ -454,32 +403,23 @@ class TestShellThread(threading.Thread):
A list of TestFailure objects describing the error.
"""
self._ensure_test_shell_is_running()
- # Args to test_shell is a space-separated list of
- # "uri timeout pixel_hash"
- # The timeout and pixel_hash are optional. The timeout is used if this
- # test has a custom timeout. The pixel_hash is used to avoid doing an
- # image dump if the checksums match, so it should be set to a blank
- # value if we are generating a new baseline.
- # (Otherwise, an image from a previous run will be copied into
- # the baseline.)
+ # The pixel_hash is used to avoid doing an image dump if the
+ # checksums match, so it should be set to a blank value if we
+ # are generating a new baseline. (Otherwise, an image from a
+ # previous run will be copied into the baseline.)
image_hash = test_info.image_hash
if image_hash and self._test_args.new_baseline:
image_hash = ""
- self._test_shell_proc.stdin.write(("%s %s %s\n" %
- (test_info.uri, test_info.timeout, image_hash)))
-
- # If the test shell is dead, the above may cause an IOError as we
- # try to write onto the broken pipe. If this is the first test for
- # this test shell process, than the test shell did not
- # successfully start. If this is not the first test, then the
- # previous tests have caused some kind of delayed crash. We don't
- # try to recover here.
- self._test_shell_proc.stdin.flush()
-
- stats = process_output(self._test_shell_proc, test_info,
- self._test_types, self._test_args,
- self._options.target,
- self._options.results_directory)
+ start = time.time()
+ crash, timeout, actual_checksum, output, error = \
+ self._driver.run_test(test_info.uri, test_info.timeout, image_hash)
+ end = time.time()
+
+ stats = process_output(self._port, test_info, self._test_types,
+ self._test_args, self._options.target,
+ self._options.results_directory, crash,
+ timeout, end - start, actual_checksum,
+ output, error)
self._test_stats.append(stats)
return stats.failures
@@ -489,23 +429,12 @@ class TestShellThread(threading.Thread):
running tests singly, since those each start a separate test shell in
their own thread.
"""
- if (not self._test_shell_proc or
- self._test_shell_proc.poll() is not None):
- self._test_shell_proc = start_test_shell(self._test_shell_command,
- self._shell_args)
+ if (not self._driver or self._driver.poll() is not None):
+ self._driver = self._port.start_driver(
+ self._image_path, self._shell_args)
def _kill_test_shell(self):
"""Kill the test shell process if it's running."""
- if self._test_shell_proc:
- self._test_shell_proc.stdin.close()
- self._test_shell_proc.stdout.close()
- if self._test_shell_proc.stderr:
- self._test_shell_proc.stderr.close()
- if (sys.platform not in ('win32', 'cygwin') and
- not self._test_shell_proc.poll()):
- # Closing stdin/stdout/stderr hangs sometimes on OS X.
- null = open(os.devnull, "w")
- subprocess.Popen(["kill", "-9",
- str(self._test_shell_proc.pid)], stderr=null)
- null.close()
- self._test_shell_proc = None
+ if self._driver:
+ self._driver.stop()
+ self._driver = None