summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts/webkitpy/layout_tests
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2010-04-27 16:31:00 +0100
committerSteve Block <steveblock@google.com>2010-05-11 14:42:12 +0100
commitdcc8cf2e65d1aa555cce12431a16547e66b469ee (patch)
tree92a8d65cd5383bca9749f5327fb5e440563926e6 /WebKitTools/Scripts/webkitpy/layout_tests
parentccac38a6b48843126402088a309597e682f40fe6 (diff)
downloadexternal_webkit-dcc8cf2e65d1aa555cce12431a16547e66b469ee.zip
external_webkit-dcc8cf2e65d1aa555cce12431a16547e66b469ee.tar.gz
external_webkit-dcc8cf2e65d1aa555cce12431a16547e66b469ee.tar.bz2
Merge webkit.org at r58033 : Initial merge by git
Change-Id: If006c38561af287c50cd578d251629b51e4d8cd1
Diffstat (limited to 'WebKitTools/Scripts/webkitpy/layout_tests')
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py24
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py (renamed from WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py)206
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py9
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py37
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py50
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py227
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py169
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py20
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py21
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/__init__.py35
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py22
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/base.py155
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py239
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py111
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py82
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py60
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py189
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/factory.py87
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py91
-rwxr-xr-xWebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py46
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/http_server_base.py7
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/lighttpd.conf1
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py400
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py99
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py223
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/test.py25
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py448
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py110
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/port/win.py75
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py285
-rwxr-xr-xWebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py (renamed from WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py)785
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py74
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py12
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py50
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py81
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py14
36 files changed, 3169 insertions, 1400 deletions
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py b/WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py
index 6e4ba99..231ed70 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/driver_test.py
@@ -61,17 +61,19 @@ def run_tests(port, options, tests):
if __name__ == '__main__':
- optparser = optparse.OptionParser()
- optparser.add_option('-p', '--platform', action='store', default='mac',
- help='Platform to test (e.g., "mac", "chromium-mac", etc.')
- optparser.add_option('-t', '--target', action='store', default='Release',
- help='build type ("Debug" or "Release")')
- optparser.add_option('', '--timeout', action='store', default='2000',
- help='test timeout in milliseconds (2000 by default)')
- optparser.add_option('', '--wrapper', action='store')
- optparser.add_option('', '--no-pixel-tests', action='store_true',
- default=False,
- help='disable pixel-to-pixel PNG comparisons')
+ # FIXME: configuration_options belong in a shared location.
+ configuration_options = [
+ optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration", help='Set the configuration to Debug'),
+ optparse.make_option('--release', action='store_const', const='Release', dest="configuration", help='Set the configuration to Release'),
+ ]
+ misc_options = [
+ optparse.make_option('-p', '--platform', action='store', default='mac', help='Platform to test (e.g., "mac", "chromium-mac", etc.'),
+ optparse.make_option('--timeout', action='store', default='2000', help='test timeout in milliseconds (2000 by default)'),
+ optparse.make_option('--wrapper', action='store'),
+ optparse.make_option('--no-pixel-tests', action='store_true', default=False, help='disable pixel-to-pixel PNG comparisons'),
+ ]
+ option_list = configuration_options + misc_options
+ optparser = optparse.OptionParser(option_list=option_list)
options, args = optparser.parse_args()
p = port.get(options.platform, options)
run_tests(p, options, args)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
index 3452035..e61d11f 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py
@@ -27,10 +27,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""A Thread object for running the test shell and processing URLs from a
+"""A Thread object for running DumpRenderTree and processing URLs from a
shared queue.
-Each thread runs a separate instance of the test_shell binary and validates
+Each thread runs a separate instance of the DumpRenderTree binary and validates
the output. When there are no more URLs to process in the shared queue, the
thread exits.
"""
@@ -47,23 +47,26 @@ import time
import test_failures
+_log = logging.getLogger("webkitpy.layout_tests.layout_package."
+ "dump_render_tree_thread")
-def process_output(port, test_info, test_types, test_args, target, output_dir,
- crash, timeout, test_run_time, actual_checksum,
+
+def process_output(port, test_info, test_types, test_args, configuration,
+ output_dir, crash, timeout, test_run_time, actual_checksum,
output, error):
- """Receives the output from a test_shell process, subjects it to a number
- of tests, and returns a list of failure types the test produced.
+ """Receives the output from a DumpRenderTree process, subjects it to a
+ number of tests, and returns a list of failure types the test produced.
Args:
port: port-specific hooks
- proc: an active test_shell process
+ proc: an active DumpRenderTree process
test_info: Object containing the test filename, uri and timeout
test_types: list of test types to subject the output to
test_args: arguments to be passed to each test
- target: Debug or Release
+ configuration: Debug or Release
output_dir: directory to put crash stack traces into
- Returns: a list of failure objects and times for the test being processed
+ Returns: a TestResult object
"""
failures = []
@@ -79,16 +82,17 @@ def process_output(port, test_info, test_types, test_args, target, output_dir,
failures.append(test_failures.FailureTimeout())
if crash:
- logging.debug("Stacktrace for %s:\n%s" % (test_info.filename, error))
+ _log.debug("Stacktrace for %s:\n%s" % (test_info.filename, error))
# Strip off "file://" since RelativeTestFilename expects
# filesystem paths.
- filename = os.path.join(output_dir, test_info.filename)
+ filename = os.path.join(output_dir, port.relative_test_filename(
+ test_info.filename))
filename = os.path.splitext(filename)[0] + "-stack.txt"
port.maybe_make_directory(os.path.split(filename)[0])
- open(filename, "wb").write(error)
+ open(filename, "wb").write(error) # FIXME: This leaks a file handle.
elif error:
- logging.debug("Previous test output extra lines after dump:\n%s" %
- error)
+ _log.debug("Previous test output extra lines after dump:\n%s" %
+ error)
# Check the output and save the results.
start_time = time.time()
@@ -97,7 +101,7 @@ def process_output(port, test_info, test_types, test_args, target, output_dir,
start_diff_time = time.time()
new_failures = test_type.compare_output(port, test_info.filename,
output, local_test_args,
- target)
+ configuration)
# Don't add any more failures if we already have a crash, so we don't
# double-report those tests. We do double-report for timeouts since
# we still want to see the text and image output.
@@ -107,26 +111,27 @@ def process_output(port, test_info, test_types, test_args, target, output_dir,
time.time() - start_diff_time)
total_time_for_all_diffs = time.time() - start_diff_time
- return TestStats(test_info.filename, failures, test_run_time,
- total_time_for_all_diffs, time_for_diffs)
+ return TestResult(test_info.filename, failures, test_run_time,
+ total_time_for_all_diffs, time_for_diffs)
-class TestStats:
+class TestResult(object):
def __init__(self, filename, failures, test_run_time,
total_time_for_all_diffs, time_for_diffs):
- self.filename = filename
self.failures = failures
+ self.filename = filename
self.test_run_time = test_run_time
- self.total_time_for_all_diffs = total_time_for_all_diffs
self.time_for_diffs = time_for_diffs
+ self.total_time_for_all_diffs = total_time_for_all_diffs
+ self.type = test_failures.determine_result_type(failures)
class SingleTestThread(threading.Thread):
"""Thread wrapper for running a single test file."""
def __init__(self, port, image_path, shell_args, test_info,
- test_types, test_args, target, output_dir):
+ test_types, test_args, configuration, output_dir):
"""
Args:
port: object implementing port-specific hooks
@@ -142,32 +147,32 @@ class SingleTestThread(threading.Thread):
self._test_info = test_info
self._test_types = test_types
self._test_args = test_args
- self._target = target
+ self._configuration = configuration
self._output_dir = output_dir
def run(self):
- driver = self._port.start_test_driver(self._image_path,
- self._shell_args)
+ test_info = self._test_info
+ driver = self._port.start_driver(self._image_path, self._shell_args)
start = time.time()
crash, timeout, actual_checksum, output, error = \
driver.run_test(test_info.uri.strip(), test_info.timeout,
- test_info.image_hash)
+ test_info.image_hash())
end = time.time()
- self._test_stats = process_output(self._port,
- self._test_info, self._test_types, self._test_args,
- self._target, self._output_dir, crash, timeout, end - start,
+ self._test_result = process_output(self._port,
+ test_info, self._test_types, self._test_args,
+ self._configuration, self._output_dir, crash, timeout, end - start,
actual_checksum, output, error)
driver.stop()
- def get_test_stats(self):
- return self._test_stats
+ def get_test_result(self):
+ return self._test_result
class TestShellThread(threading.Thread):
def __init__(self, port, filename_list_queue, result_queue,
test_types, test_args, image_path, shell_args, options):
- """Initialize all the local state for this test shell thread.
+ """Initialize all the local state for this DumpRenderTree thread.
Args:
port: interface to port-specific hooks
@@ -178,7 +183,7 @@ class TestShellThread(threading.Thread):
test_types: A list of TestType objects to run the test output
against.
test_args: A TestArguments object to pass to each TestType.
- shell_args: Any extra arguments to be passed to test_shell.exe.
+ shell_args: Any extra arguments to be passed to DumpRenderTree.
options: A property dictionary as produced by optparse. The
command-line options should match those expected by
run_webkit_tests; they are typically passed via the
@@ -197,7 +202,7 @@ class TestShellThread(threading.Thread):
self._canceled = False
self._exception_info = None
self._directory_timing_stats = {}
- self._test_stats = []
+ self._test_results = []
self._num_tests = 0
self._start_time = 0
self._stop_time = 0
@@ -214,10 +219,13 @@ class TestShellThread(threading.Thread):
(number of tests in that directory, time to run the tests)"""
return self._directory_timing_stats
- def get_individual_test_stats(self):
- """Returns a list of (test_filename, time_to_run_test,
- total_time_for_all_diffs, time_for_diffs) tuples."""
- return self._test_stats
+ def get_test_results(self):
+ """Return the list of all tests run on this thread.
+
+ This is used to calculate per-thread statistics.
+
+ """
+ return self._test_results
def cancel(self):
"""Set a flag telling this thread to quit."""
@@ -242,17 +250,17 @@ class TestShellThread(threading.Thread):
self._start_time = time.time()
self._num_tests = 0
try:
- logging.debug('%s starting' % (self.getName()))
+ _log.debug('%s starting' % (self.getName()))
self._run(test_runner=None, result_summary=None)
- logging.debug('%s done (%d tests)' % (self.getName(),
- self.get_num_tests()))
+ _log.debug('%s done (%d tests)' % (self.getName(),
+ self.get_num_tests()))
except:
# Save the exception for our caller to see.
self._exception_info = sys.exc_info()
self._stop_time = time.time()
# Re-raise it and die.
- logging.error('%s dying: %s' % (self.getName(),
- self._exception_info))
+ _log.error('%s dying: %s' % (self.getName(),
+ self._exception_info))
raise
self._stop_time = time.time()
@@ -275,8 +283,8 @@ class TestShellThread(threading.Thread):
try:
batch_size = int(self._options.batch_size)
except:
- logging.info("Ignoring invalid batch size '%s'" %
- self._options.batch_size)
+ _log.info("Ignoring invalid batch size '%s'" %
+ self._options.batch_size)
# Append tests we're running to the existing tests_run.txt file.
# This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
@@ -286,7 +294,7 @@ class TestShellThread(threading.Thread):
while True:
if self._canceled:
- logging.info('Testing canceled')
+ _log.info('Testing canceled')
tests_run_file.close()
return
@@ -300,7 +308,7 @@ class TestShellThread(threading.Thread):
self._current_dir, self._filename_list = \
self._filename_list_queue.get_nowait()
except Queue.Empty:
- self._kill_test_shell()
+ self._kill_dump_render_tree()
tests_run_file.close()
return
@@ -313,31 +321,33 @@ class TestShellThread(threading.Thread):
batch_count += 1
self._num_tests += 1
if self._options.run_singly:
- failures = self._run_test_singly(test_info)
+ result = self._run_test_singly(test_info)
else:
- failures = self._run_test(test_info)
+ result = self._run_test(test_info)
filename = test_info.filename
tests_run_file.write(filename + "\n")
- if failures:
- # Check and kill test shell if we need too.
- if len([1 for f in failures if f.should_kill_test_shell()]):
- self._kill_test_shell()
+ if result.failures:
+ # Check and kill DumpRenderTree if we need to.
+ if len([1 for f in result.failures
+ if f.should_kill_dump_render_tree()]):
+ self._kill_dump_render_tree()
# Reset the batch count since the shell just bounced.
batch_count = 0
# Print the error message(s).
- error_str = '\n'.join([' ' + f.message() for f in failures])
- logging.debug("%s %s failed:\n%s" % (self.getName(),
- self._port.relative_test_filename(filename),
- error_str))
+ error_str = '\n'.join([' ' + f.message() for
+ f in result.failures])
+ _log.debug("%s %s failed:\n%s" % (self.getName(),
+ self._port.relative_test_filename(filename),
+ error_str))
else:
- logging.debug("%s %s passed" % (self.getName(),
- self._port.relative_test_filename(filename)))
- self._result_queue.put((filename, failures))
+ _log.debug("%s %s passed" % (self.getName(),
+ self._port.relative_test_filename(filename)))
+ self._result_queue.put(result)
if batch_size > 0 and batch_count > batch_size:
# Bounce the shell and reset count.
- self._kill_test_shell()
+ self._kill_dump_render_tree()
batch_count = 0
if test_runner:
@@ -353,61 +363,64 @@ class TestShellThread(threading.Thread):
Args:
test_info: Object containing the test filename, uri and timeout
- Return:
- A list of TestFailure objects describing the error.
+ Returns:
+ A TestResult
+
"""
worker = SingleTestThread(self._port, self._image_path,
self._shell_args,
test_info,
self._test_types,
self._test_args,
- self._options.target,
+ self._options.configuration,
self._options.results_directory)
worker.start()
- # When we're running one test per test_shell process, we can enforce
- # a hard timeout. the test_shell watchdog uses 2.5x the timeout
- # We want to be larger than that.
+ # When we're running one test per DumpRenderTree process, we can
+ # enforce a hard timeout. The DumpRenderTree watchdog uses 2.5x
+ # the timeout; we want to be larger than that.
worker.join(int(test_info.timeout) * 3.0 / 1000.0)
if worker.isAlive():
# If join() returned with the thread still running, the
- # test_shell.exe is completely hung and there's nothing
+ # DumpRenderTree is completely hung and there's nothing
# more we can do with it. We have to kill all the
- # test_shells to free it up. If we're running more than
- # one test_shell thread, we'll end up killing the other
- # test_shells too, introducing spurious crashes. We accept that
- # tradeoff in order to avoid losing the rest of this thread's
- # results.
- logging.error('Test thread hung: killing all test_shells')
+ # DumpRenderTrees to free it up. If we're running more than
+ # one DumpRenderTree thread, we'll end up killing the other
+ # DumpRenderTrees too, introducing spurious crashes. We accept
+ # that tradeoff in order to avoid losing the rest of this
+ # thread's results.
+ _log.error('Test thread hung: killing all DumpRenderTrees')
worker._driver.stop()
try:
- stats = worker.get_test_stats()
- self._test_stats.append(stats)
- failures = stats.failures
+ result = worker.get_test_result()
except AttributeError, e:
failures = []
- logging.error('Cannot get results of test: %s' %
- test_info.filename)
+ _log.error('Cannot get results of test: %s' %
+ test_info.filename)
+ result = TestResult(test_info.filename, failures=[],
+ test_run_time=0, total_time_for_all_diffs=0,
+ time_for_diffs=0)
- return failures
+ return result
def _run_test(self, test_info):
- """Run a single test file using a shared test_shell process.
+ """Run a single test file using a shared DumpRenderTree process.
Args:
test_info: Object containing the test filename, uri and timeout
- Return:
+ Returns:
A list of TestFailure objects describing the error.
+
"""
- self._ensure_test_shell_is_running()
+ self._ensure_dump_render_tree_is_running()
# The pixel_hash is used to avoid doing an image dump if the
# checksums match, so it should be set to a blank value if we
# are generating a new baseline. (Otherwise, an image from a
# previous run will be copied into the baseline.)
- image_hash = test_info.image_hash
+ image_hash = test_info.image_hash()
if image_hash and self._test_args.new_baseline:
image_hash = ""
start = time.time()
@@ -415,26 +428,27 @@ class TestShellThread(threading.Thread):
self._driver.run_test(test_info.uri, test_info.timeout, image_hash)
end = time.time()
- stats = process_output(self._port, test_info, self._test_types,
- self._test_args, self._options.target,
- self._options.results_directory, crash,
- timeout, end - start, actual_checksum,
- output, error)
+ result = process_output(self._port, test_info, self._test_types,
+ self._test_args, self._options.configuration,
+ self._options.results_directory, crash,
+ timeout, end - start, actual_checksum,
+ output, error)
+ self._test_results.append(result)
+ return result
+
+ def _ensure_dump_render_tree_is_running(self):
+ """Start the shared DumpRenderTree, if it's not running.
- self._test_stats.append(stats)
- return stats.failures
+ This is not for use when running tests singly, since those each start
+ a separate DumpRenderTree in their own thread.
- def _ensure_test_shell_is_running(self):
- """Start the shared test shell, if it's not running. Not for use when
- running tests singly, since those each start a separate test shell in
- their own thread.
"""
if (not self._driver or self._driver.poll() is not None):
self._driver = self._port.start_driver(
self._image_path, self._shell_args)
- def _kill_test_shell(self):
- """Kill the test shell process if it's running."""
+ def _kill_dump_render_tree(self):
+ """Kill the DumpRenderTree process if it's running."""
if self._driver:
self._driver.stop()
self._driver = None
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
index 520ab1f..cee44ad 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -29,12 +29,11 @@
import logging
import os
-import simplejson
-
-from layout_package import json_results_generator
-from layout_package import test_expectations
-from layout_package import test_failures
+from webkitpy.layout_tests.layout_package import json_results_generator
+from webkitpy.layout_tests.layout_package import test_expectations
+from webkitpy.layout_tests.layout_package import test_failures
+import webkitpy.thirdparty.simplejson as simplejson
class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator):
"""A JSON results generator for layout tests."""
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index 84be0e1..6263540 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -29,14 +29,17 @@
import logging
import os
-import simplejson
import subprocess
import sys
import time
import urllib2
import xml.dom.minidom
-from layout_package import test_expectations
+from webkitpy.layout_tests.layout_package import test_expectations
+import webkitpy.thirdparty.simplejson as simplejson
+
+_log = logging.getLogger("webkitpy.layout_tests.layout_package."
+ "json_results_generator")
class JSONResultsGenerator(object):
@@ -154,8 +157,8 @@ class JSONResultsGenerator(object):
# Check if we have the archived JSON file on the buildbot server.
results_file_url = (self._builder_base_url +
self._build_name + "/" + self.RESULTS_FILENAME)
- logging.error("Local results.json file does not exist. Grabbing "
- "it off the archive at " + results_file_url)
+ _log.error("Local results.json file does not exist. Grabbing "
+ "it off the archive at " + results_file_url)
try:
results_file = urllib2.urlopen(results_file_url)
@@ -177,11 +180,11 @@ class JSONResultsGenerator(object):
try:
results_json = simplejson.loads(old_results)
except:
- logging.debug("results.json was not valid JSON. Clobbering.")
+ _log.debug("results.json was not valid JSON. Clobbering.")
# The JSON file is not valid JSON. Just clobber the results.
results_json = {}
else:
- logging.debug('Old JSON results do not exist. Starting fresh.')
+ _log.debug('Old JSON results do not exist. Starting fresh.')
results_json = {}
return results_json, error
@@ -192,14 +195,14 @@ class JSONResultsGenerator(object):
if error:
# If there was an error don't write a results.json
# file at all as it would lose all the information on the bot.
- logging.error("Archive directory is inaccessible. Not modifying "
- "or clobbering the results.json file: " + str(error))
+ _log.error("Archive directory is inaccessible. Not modifying "
+ "or clobbering the results.json file: " + str(error))
return None
builder_name = self._builder_name
if results_json and builder_name not in results_json:
- logging.debug("Builder name (%s) is not in the results.json file."
- % builder_name)
+ _log.debug("Builder name (%s) is not in the results.json file."
+ % builder_name)
self._convert_json_to_current_version(results_json)
@@ -307,16 +310,20 @@ class JSONResultsGenerator(object):
# These next two branches test to see which source repos we can
# pull revisions from.
if hasattr(self._port, 'path_from_webkit_base'):
- path_to_webkit = self._port.path_from_webkit_base()
+ path_to_webkit = self._port.path_from_webkit_base('WebCore')
self._insert_item_into_raw_list(results_for_builder,
self._get_svn_revision(path_to_webkit),
self.WEBKIT_SVN)
if hasattr(self._port, 'path_from_chromium_base'):
- path_to_chrome = self._port.path_from_chromium_base()
- self._insert_item_into_raw_list(results_for_builder,
- self._get_svn_revision(path_to_chrome),
- self.CHROME_SVN)
+ try:
+ path_to_chrome = self._port.path_from_chromium_base()
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_svn_revision(path_to_chrome),
+ self.CHROME_SVN)
+ except AssertionError:
+ # We're not in a Chromium checkout, that's ok.
+ pass
self._insert_item_into_raw_list(results_for_builder,
int(time.time()),
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
index 72b30a1..930b9e4 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
@@ -34,6 +34,10 @@ and rewritten repeatedly, without producing multiple lines of output. It
can be used to produce effects like progress bars.
"""
+import logging
+
+_log = logging.getLogger("webkitpy.layout_tests.metered_stream")
+
class MeteredStream:
"""This class is a wrapper around a stream that allows you to implement
@@ -57,8 +61,7 @@ class MeteredStream:
self._last_update = ""
def write(self, txt):
- """Write text directly to the stream, overwriting and resetting the
- meter."""
+ """Write to the stream, overwriting and resetting the meter."""
if self._dirty:
self.update("")
self._dirty = False
@@ -68,22 +71,43 @@ class MeteredStream:
"""Flush any buffered output."""
self._stream.flush()
- def update(self, str):
- """Write an update to the stream that will get overwritten by the next
- update() or by a write().
+ def progress(self, str):
+ """
+ Write a message to the stream that will get overwritten.
This is used for progress updates that don't need to be preserved in
- the log. Note that verbose disables this routine; we have this in
- case we are logging lots of output and the update()s will get lost
- or won't work properly (typically because verbose streams are
- redirected to files.
-
- TODO(dpranke): figure out if there is a way to detect if we're writing
- to a stream that handles CRs correctly (e.g., terminals). That might
- be a cleaner way of handling this.
+ the log. If the MeteredStream was initialized with verbose==True,
+ then this output is discarded. We have this in case we are logging
+ lots of output and the update()s will get lost or won't work
+ properly (typically because verbose streams are redirected to files).
+
"""
if self._verbose:
return
+ self._write(str)
+
+ def update(self, str):
+ """
+ Write a message that is also included when logging verbosely.
+
+ This routine preserves the same console logging behavior as progress(),
+ but will also log the message if verbose() was true.
+
+ """
+ # Note this is a separate routine that calls either into the logger
+ # or the metering stream. We have to be careful to avoid a layering
+ # inversion (stream calling back into the logger).
+ if self._verbose:
+ _log.info(str)
+ else:
+ self._write(str)
+
+ def _write(self, str):
+ """Actually write the message to the stream."""
+
+ # FIXME: Figure out if there is a way to detect if we're writing
+ # to a stream that handles CRs correctly (e.g., terminals). That might
+ # be a cleaner way of handling this.
# Print the necessary number of backspaces to erase the previous
# message.
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
index 01add62..38223dd 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
@@ -36,7 +36,10 @@ import os
import re
import sys
-import simplejson
+import webkitpy.thirdparty.simplejson as simplejson
+
+_log = logging.getLogger("webkitpy.layout_tests.layout_package."
+ "test_expectations")
# Test expectation and modifier constants.
(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX,
@@ -46,11 +49,46 @@ import simplejson
(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4)
+def result_was_expected(result, expected_results, test_needs_rebaselining,
+ test_is_skipped):
+ """Returns whether we got a result we were expecting.
+ Args:
+ result: actual result of a test execution
+ expected_results: set of results listed in test_expectations
+ test_needs_rebaselining: whether test was marked as REBASELINE
+ test_is_skipped: whether test was marked as SKIP"""
+ if result in expected_results:
+ return True
+ if result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and FAIL in expected_results:
+ return True
+ if result == MISSING and test_needs_rebaselining:
+ return True
+ if result == SKIP and test_is_skipped:
+ return True
+ return False
+
+
+def remove_pixel_failures(expected_results):
+ """Returns a copy of the expected results for a test, except that we
+ drop any pixel failures and return the remaining expectations. For example,
+ if we're not running pixel tests, then tests expected to fail as IMAGE
+ will PASS."""
+ expected_results = expected_results.copy()
+ if IMAGE in expected_results:
+ expected_results.remove(IMAGE)
+ expected_results.add(PASS)
+ if IMAGE_PLUS_TEXT in expected_results:
+ expected_results.remove(IMAGE_PLUS_TEXT)
+ expected_results.add(TEXT)
+ return expected_results
+
+
class TestExpectations:
TEST_LIST = "test_expectations.txt"
def __init__(self, port, tests, expectations, test_platform_name,
- is_debug_mode, is_lint_mode, tests_are_present=True):
+ is_debug_mode, is_lint_mode, tests_are_present=True,
+ overrides=None):
"""Loads and parses the test expectations given in the string.
Args:
port: handle to object containing platform-specific functionality
@@ -67,10 +105,14 @@ class TestExpectations:
system and can be probed for. This is useful for distinguishing
test files from directories, and is needed by the LTTF
dashboard, where the files aren't actually locally present.
+ overrides: test expectations that are allowed to override any
+ entries in |expectations|. This is used by callers
+ that need to manage two sets of expectations (e.g., upstream
+ and downstream expectations).
"""
self._expected_failures = TestExpectationsFile(port, expectations,
tests, test_platform_name, is_debug_mode, is_lint_mode,
- tests_are_present=tests_are_present)
+ tests_are_present=tests_are_present, overrides=overrides)
# TODO(ojan): Allow for removing skipped tests when getting the list of
# tests to run, but not when getting metrics.
@@ -101,12 +143,16 @@ class TestExpectations:
retval = []
for expectation in expectations:
- for item in TestExpectationsFile.EXPECTATIONS.items():
- if item[1] == expectation:
- retval.append(item[0])
- break
+ retval.append(self.expectation_to_string(expectation))
- return " ".join(retval).upper()
+ return " ".join(retval)
+
+ def expectation_to_string(self, expectation):
+ """Return the uppercased string equivalent of a given expectation."""
+ for item in TestExpectationsFile.EXPECTATIONS.items():
+ if item[1] == expectation:
+ return item[0].upper()
+ return ""
def get_timeline_for_test(self, test):
return self._expected_failures.get_timeline_for_test(test)
@@ -117,14 +163,13 @@ class TestExpectations:
def get_tests_with_timeline(self, timeline):
return self._expected_failures.get_tests_with_timeline(timeline)
- def matches_an_expected_result(self, test, result):
- """Returns whether we got one of the expected results for this test."""
- return (result in self._expected_failures.get_expectations(test) or
- (result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and
- FAIL in self._expected_failures.get_expectations(test)) or
- result == MISSING and self.is_rebaselining(test) or
- result == SKIP and self._expected_failures.has_modifier(test,
- SKIP))
+ def matches_an_expected_result(self, test, result,
+ pixel_tests_are_enabled):
+ expected_results = self._expected_failures.get_expectations(test)
+ if not pixel_tests_are_enabled:
+ expected_results = remove_pixel_failures(expected_results)
+ return result_was_expected(result, expected_results,
+ self.is_rebaselining(test), self.has_modifier(test, SKIP))
def is_rebaselining(self, test):
return self._expected_failures.has_modifier(test, REBASELINE)
@@ -232,8 +277,8 @@ class TestExpectationsFile:
IMAGE: ('image mismatch', 'image mismatch'),
IMAGE_PLUS_TEXT: ('image and text mismatch',
'image and text mismatch'),
- CRASH: ('test shell crash',
- 'test shell crashes'),
+ CRASH: ('DumpRenderTree crash',
+ 'DumpRenderTree crashes'),
TIMEOUT: ('test timed out', 'tests timed out'),
MISSING: ('no expected result found',
'no expected results found')}
@@ -261,7 +306,7 @@ class TestExpectationsFile:
def __init__(self, port, expectations, full_test_list, test_platform_name,
is_debug_mode, is_lint_mode, suppress_errors=False,
- tests_are_present=True):
+ tests_are_present=True, overrides=None):
"""
expectations: Contents of the expectations file
full_test_list: The list of all tests to be run pending processing of
@@ -275,6 +320,10 @@ class TestExpectationsFile:
tests_are_present: Whether the test files are present in the local
filesystem. The LTTF Dashboard uses False here to avoid having to
keep a local copy of the tree.
+ overrides: test expectations that are allowed to override any
+ entries in |expectations|. This is used by callers
+ that need to manage two sets of expectations (e.g., upstream
+ and downstream expectations).
"""
self._port = port
@@ -284,6 +333,7 @@ class TestExpectationsFile:
self._is_debug_mode = is_debug_mode
self._is_lint_mode = is_lint_mode
self._tests_are_present = tests_are_present
+ self._overrides = overrides
self._suppress_errors = suppress_errors
self._errors = []
self._non_fatal_errors = []
@@ -311,7 +361,50 @@ class TestExpectationsFile:
self._timeline_to_tests = self._dict_of_sets(self.TIMELINES)
self._result_type_to_tests = self._dict_of_sets(self.RESULT_TYPES)
- self._read(self._get_iterable_expectations())
+ self._read(self._get_iterable_expectations(self._expectations),
+ overrides_allowed=False)
+
+ # List of tests that are in the overrides file (used for checking for
+ # duplicates inside the overrides file itself). Note that just because
+ # a test is in this set doesn't mean it's necessarily overridding a
+ # expectation in the regular expectations; the test might not be
+ # mentioned in the regular expectations file at all.
+ self._overridding_tests = set()
+
+ if overrides:
+ self._read(self._get_iterable_expectations(self._overrides),
+ overrides_allowed=True)
+
+ self._handle_any_read_errors()
+ self._process_tests_without_expectations()
+
+ def _handle_any_read_errors(self):
+ if not self._suppress_errors and (
+ len(self._errors) or len(self._non_fatal_errors)):
+ if self._is_debug_mode:
+ build_type = 'DEBUG'
+ else:
+ build_type = 'RELEASE'
+ _log.error('')
+ _log.error("FAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" %
+ (self._test_platform_name.upper(), build_type))
+
+ for error in self._non_fatal_errors:
+ _log.error(error)
+ _log.error('')
+
+ if len(self._errors):
+ raise SyntaxError('\n'.join(map(str, self._errors)))
+
+ def _process_tests_without_expectations(self):
+ expectations = set([PASS])
+ options = []
+ modifiers = []
+ if self._full_test_list:
+ for test in self._full_test_list:
+ if not test in self._test_list_paths:
+ self._add_test(test, modifiers, expectations, options,
+ overrides_allowed=False)
def _dict_of_sets(self, strings_to_constants):
"""Takes a dict of strings->constants and returns a dict mapping
@@ -321,12 +414,11 @@ class TestExpectationsFile:
d[c] = set()
return d
- def _get_iterable_expectations(self):
+ def _get_iterable_expectations(self, expectations_str):
"""Returns an object that can be iterated over. Allows for not caring
about whether we're iterating over a file or a new-line separated
string."""
- iterable = [x + "\n" for x in
- self._expectations.split("\n")]
+ iterable = [x + "\n" for x in expectations_str.split("\n")]
# Strip final entry if it's empty to avoid added in an extra
# newline.
if iterable[-1] == "\n":
@@ -388,7 +480,7 @@ class TestExpectationsFile:
the updated string.
"""
- f_orig = self._get_iterable_expectations()
+ f_orig = self._get_iterable_expectations(self._expectations)
f_new = []
tests_removed = 0
@@ -400,20 +492,20 @@ class TestExpectationsFile:
platform)
if action == NO_CHANGE:
# Save the original line back to the file
- logging.debug('No change to test: %s', line)
+ _log.debug('No change to test: %s', line)
f_new.append(line)
elif action == REMOVE_TEST:
tests_removed += 1
- logging.info('Test removed: %s', line)
+ _log.info('Test removed: %s', line)
elif action == REMOVE_PLATFORM:
parts = line.split(':')
new_options = parts[0].replace(platform.upper() + ' ', '', 1)
new_line = ('%s:%s' % (new_options, parts[1]))
f_new.append(new_line)
tests_updated += 1
- logging.info('Test updated: ')
- logging.info(' old: %s', line)
- logging.info(' new: %s', new_line)
+ _log.info('Test updated: ')
+ _log.info(' old: %s', line)
+ _log.info(' new: %s', new_line)
elif action == ADD_PLATFORMS_EXCEPT_THIS:
parts = line.split(':')
new_options = parts[0]
@@ -430,15 +522,15 @@ class TestExpectationsFile:
new_line = ('%s:%s' % (new_options, parts[1]))
f_new.append(new_line)
tests_updated += 1
- logging.info('Test updated: ')
- logging.info(' old: %s', line)
- logging.info(' new: %s', new_line)
+ _log.info('Test updated: ')
+ _log.info(' old: %s', line)
+ _log.info(' new: %s', new_line)
else:
- logging.error('Unknown update action: %d; line: %s',
- action, line)
+ _log.error('Unknown update action: %d; line: %s',
+ action, line)
- logging.info('Total tests removed: %d', tests_removed)
- logging.info('Total tests updated: %d', tests_updated)
+ _log.info('Total tests removed: %d', tests_removed)
+ _log.info('Total tests updated: %d', tests_updated)
return "".join(f_new)
@@ -574,7 +666,7 @@ class TestExpectationsFile:
self._all_expectations[test].append(
ModifiersAndExpectations(options, expectations))
- def _read(self, expectations):
+ def _read(self, expectations, overrides_allowed):
"""For each test in an expectations iterable, generate the
expectations for it."""
lineno = 0
@@ -625,30 +717,7 @@ class TestExpectationsFile:
tests = self._expand_tests(test_list_path)
self._add_tests(tests, expectations, test_list_path, lineno,
- modifiers, options)
-
- if not self._suppress_errors and (
- len(self._errors) or len(self._non_fatal_errors)):
- if self._is_debug_mode:
- build_type = 'DEBUG'
- else:
- build_type = 'RELEASE'
- print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \
- % (self._test_platform_name.upper(), build_type)
-
- for error in self._non_fatal_errors:
- logging.error(error)
- if len(self._errors):
- raise SyntaxError('\n'.join(map(str, self._errors)))
-
- # Now add in the tests that weren't present in the expectations file
- expectations = set([PASS])
- options = []
- modifiers = []
- if self._full_test_list:
- for test in self._full_test_list:
- if not test in self._test_list_paths:
- self._add_test(test, modifiers, expectations, options)
+ modifiers, options, overrides_allowed)
def _get_options_list(self, listString):
return [part.strip().lower() for part in listString.strip().split(' ')]
@@ -692,15 +761,18 @@ class TestExpectationsFile:
return path
def _add_tests(self, tests, expectations, test_list_path, lineno,
- modifiers, options):
+ modifiers, options, overrides_allowed):
for test in tests:
- if self._already_seen_test(test, test_list_path, lineno):
+ if self._already_seen_test(test, test_list_path, lineno,
+ overrides_allowed):
continue
self._clear_expectations_for_test(test, test_list_path)
- self._add_test(test, modifiers, expectations, options)
+ self._add_test(test, modifiers, expectations, options,
+ overrides_allowed)
- def _add_test(self, test, modifiers, expectations, options):
+ def _add_test(self, test, modifiers, expectations, options,
+ overrides_allowed):
"""Sets the expected state for a given test.
This routine assumes the test has not been added before. If it has,
@@ -711,7 +783,9 @@ class TestExpectationsFile:
test: test to add
modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.)
expectations: sequence of expectations (PASS, IMAGE, etc.)
- options: sequence of keywords and bug identifiers."""
+ options: sequence of keywords and bug identifiers.
+ overrides_allowed: whether we're parsing the regular expectations
+ or the overridding expectations"""
self._test_to_expectations[test] = expectations
for expectation in expectations:
self._expectation_to_tests[expectation].add(test)
@@ -739,6 +813,9 @@ class TestExpectationsFile:
else:
self._result_type_to_tests[FAIL].add(test)
+ if overrides_allowed:
+ self._overridding_tests.add(test)
+
def _clear_expectations_for_test(self, test, test_list_path):
"""Remove prexisting expectations for this test.
This happens if we are seeing a more precise path
@@ -763,7 +840,8 @@ class TestExpectationsFile:
if test in set_of_tests:
set_of_tests.remove(test)
- def _already_seen_test(self, test, test_list_path, lineno):
+ def _already_seen_test(self, test, test_list_path, lineno,
+ allow_overrides):
"""Returns true if we've already seen a more precise path for this test
than the test_list_path.
"""
@@ -772,8 +850,19 @@ class TestExpectationsFile:
prev_base_path = self._test_list_paths[test]
if (prev_base_path == os.path.normpath(test_list_path)):
- self._add_error(lineno, 'Duplicate expectations.', test)
- return True
+ if (not allow_overrides or test in self._overridding_tests):
+ if allow_overrides:
+ expectation_source = "override"
+ else:
+ expectation_source = "expectation"
+ self._add_error(lineno, 'Duplicate %s.' % expectation_source,
+ test)
+ return True
+ else:
+ # We have seen this path, but that's okay because its
+ # in the overrides and the earlier path was in the
+ # expectations.
+ return False
# Check if we've already seen a more precise path.
return prev_base_path.startswith(os.path.normpath(test_list_path))
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
new file mode 100644
index 0000000..d11f3e2
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for test_expectations.py."""
+
+import os
+import sys
+import unittest
+
+try:
+ d = os.path.dirname(__file__)
+except NameError:
+ d = os.path.dirname(sys.argv[0])
+
+sys.path.append(os.path.abspath(os.path.join(d, '..')))
+sys.path.append(os.path.abspath(os.path.join(d, '../../thirdparty')))
+
+import port
+from test_expectations import *
+
+class FunctionsTest(unittest.TestCase):
+ def test_result_was_expected(self):
+ # test basics
+ self.assertEquals(result_was_expected(PASS, set([PASS]),
+ False, False), True)
+ self.assertEquals(result_was_expected(TEXT, set([PASS]),
+ False, False), False)
+
+ # test handling of FAIL expectations
+ self.assertEquals(result_was_expected(IMAGE_PLUS_TEXT, set([FAIL]),
+ False, False), True)
+ self.assertEquals(result_was_expected(IMAGE, set([FAIL]),
+ False, False), True)
+ self.assertEquals(result_was_expected(TEXT, set([FAIL]),
+ False, False), True)
+ self.assertEquals(result_was_expected(CRASH, set([FAIL]),
+ False, False), False)
+
+ # test handling of SKIPped tests and results
+ self.assertEquals(result_was_expected(SKIP, set([CRASH]),
+ False, True), True)
+ self.assertEquals(result_was_expected(SKIP, set([CRASH]),
+ False, False), False)
+
+ # test handling of MISSING results and the REBASELINE modifier
+ self.assertEquals(result_was_expected(MISSING, set([PASS]),
+ True, False), True)
+ self.assertEquals(result_was_expected(MISSING, set([PASS]),
+ False, False), False)
+
+ def test_remove_pixel_failures(self):
+ self.assertEquals(remove_pixel_failures(set([TEXT])),
+ set([TEXT]))
+ self.assertEquals(remove_pixel_failures(set([PASS])),
+ set([PASS]))
+ self.assertEquals(remove_pixel_failures(set([IMAGE])),
+ set([PASS]))
+ self.assertEquals(remove_pixel_failures(set([IMAGE_PLUS_TEXT])),
+ set([TEXT]))
+ self.assertEquals(remove_pixel_failures(set([PASS, IMAGE, CRASH])),
+ set([PASS, CRASH]))
+
+
+class TestExpectationsTest(unittest.TestCase):
+
+ def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
+ self._port = port.get('test', None)
+ self._exp = None
+ unittest.TestCase.__init__(self, testFunc)
+
+ def get_test(self, test_name):
+ return os.path.join(self._port.layout_tests_dir(), test_name)
+
+ def get_basic_tests(self):
+ return [self.get_test('fast/html/article-element.html'),
+ self.get_test('fast/html/header-element.html'),
+ self.get_test('fast/html/keygen.html'),
+ self.get_test('fast/html/tab-order.html'),
+ self.get_test('fast/events/space-scroll-event.html'),
+ self.get_test('fast/events/tab-imagemap.html')]
+
+ def get_basic_expectations(self):
+ return """
+BUG_TEST : fast/html/article-element.html = TEXT
+BUG_TEST SKIP : fast/html/keygen.html = CRASH
+BUG_TEST REBASELINE : fast/htmltab-order.html = MISSING
+BUG_TEST : fast/events = IMAGE
+"""
+
+ def parse_exp(self, expectations, overrides=None):
+ self._exp = TestExpectations(self._port,
+ tests=self.get_basic_tests(),
+ expectations=expectations,
+ test_platform_name=self._port.test_platform_name(),
+ is_debug_mode=False,
+ is_lint_mode=False,
+ tests_are_present=True,
+ overrides=overrides)
+
+ def assert_exp(self, test, result):
+ self.assertEquals(self._exp.get_expectations(self.get_test(test)),
+ set([result]))
+
+ def test_basic(self):
+ self.parse_exp(self.get_basic_expectations())
+ self.assert_exp('fast/html/article-element.html', TEXT)
+ self.assert_exp('fast/events/tab-imagemap.html', IMAGE)
+ self.assert_exp('fast/html/header-element.html', PASS)
+
+ def test_duplicates(self):
+ self.assertRaises(SyntaxError, self.parse_exp, """
+BUG_TEST : fast/html/article-element.html = TEXT
+BUG_TEST : fast/html/article-element.html = IMAGE""")
+ self.assertRaises(SyntaxError, self.parse_exp,
+ self.get_basic_expectations(), """
+BUG_TEST : fast/html/article-element.html = TEXT
+BUG_TEST : fast/html/article-element.html = IMAGE""")
+
+ def test_overrides(self):
+ self.parse_exp(self.get_basic_expectations(), """
+BUG_OVERRIDE : fast/html/article-element.html = IMAGE""")
+ self.assert_exp('fast/html/article-element.html', IMAGE)
+
+ def test_matches_an_expected_result(self):
+
+ def match(test, result, pixel_tests_enabled):
+ return self._exp.matches_an_expected_result(
+ self.get_test(test), result, pixel_tests_enabled)
+
+ self.parse_exp(self.get_basic_expectations())
+ self.assertTrue(match('fast/html/article-element.html', TEXT, True))
+ self.assertTrue(match('fast/html/article-element.html', TEXT, False))
+ self.assertFalse(match('fast/html/article-element.html', CRASH, True))
+ self.assertFalse(match('fast/html/article-element.html', CRASH, False))
+
+ self.assertTrue(match('fast/events/tab-imagemap.html', IMAGE, True))
+ self.assertTrue(match('fast/events/tab-imagemap.html', PASS, False))
+
+ self.assertTrue(match('fast/html/keygen.html', SKIP, False))
+ self.assertTrue(match('fast/html/tab-order.html', PASS, False))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
index 56d7b5a..60bdbca 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
@@ -79,8 +79,8 @@ class TestFailure(object):
"""Returns an HTML string to be included on the results.html page."""
raise NotImplemented
- def should_kill_test_shell(self):
- """Returns True if we should kill the test shell before the next
+ def should_kill_dump_render_tree(self):
+ """Returns True if we should kill DumpRenderTree before the next
test."""
return False
@@ -110,7 +110,7 @@ class FailureWithType(TestFailure):
def __init__(self, test_type):
TestFailure.__init__(self)
- # TODO(ojan): This class no longer needs to know the test_type.
+ # FIXME: This class no longer needs to know the test_type.
self._test_type = test_type
# Filename suffixes used by ResultHtmlOutput.
@@ -127,6 +127,9 @@ class FailureWithType(TestFailure):
single item is the [actual] filename suffix.
If out_names is empty, returns the empty string.
"""
+ # FIXME: Seems like a bad idea to separate the display name data
+ # from the path data by hard-coding the display name here
+ # and passing in the path information via out_names.
links = ['']
uris = [self.relative_output_filename(filename, fn) for
fn in out_names]
@@ -138,6 +141,8 @@ class FailureWithType(TestFailure):
links.append("<a href='%s'>diff</a>" % uris[2])
if len(uris) > 3:
links.append("<a href='%s'>wdiff</a>" % uris[3])
+ if len(uris) > 4:
+ links.append("<a href='%s'>pretty diff</a>" % uris[4])
return ' '.join(links)
def result_html_output(self, filename):
@@ -145,7 +150,7 @@ class FailureWithType(TestFailure):
class FailureTimeout(TestFailure):
- """Test timed out. We also want to restart the test shell if this
+ """Test timed out. We also want to restart DumpRenderTree if this
happens."""
@staticmethod
@@ -155,7 +160,7 @@ class FailureTimeout(TestFailure):
def result_html_output(self, filename):
return "<strong>%s</strong>" % self.message()
- def should_kill_test_shell(self):
+ def should_kill_dump_render_tree(self):
return True
@@ -172,7 +177,7 @@ class FailureCrash(TestFailure):
return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(),
stack)
- def should_kill_test_shell(self):
+ def should_kill_dump_render_tree(self):
return True
@@ -192,9 +197,10 @@ class FailureMissingResult(FailureWithType):
class FailureTextMismatch(FailureWithType):
"""Text diff output failed."""
# Filename suffixes used by ResultHtmlOutput.
+ # FIXME: Why don't we use the constants from TestTypeBase here?
OUT_FILENAMES = ["-actual.txt", "-expected.txt", "-diff.txt"]
OUT_FILENAMES_WDIFF = ["-actual.txt", "-expected.txt", "-diff.txt",
- "-wdiff.html"]
+ "-wdiff.html", "-pretty-diff.html"]
def __init__(self, test_type, has_wdiff):
FailureWithType.__init__(self, test_type)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
index 3c087c0..6754fa6 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
@@ -36,9 +36,16 @@ under that directory."""
import glob
import os
+import time
+
+from webkitpy.common.system import logutils
+
+
+_log = logutils.get_logger(__file__)
+
# When collecting test cases, we include any file with these extensions.
-_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl',
+_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.xhtmlmp', '.pl',
'.php', '.svg'])
# When collecting test cases, skip these directories
_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests'])
@@ -51,6 +58,7 @@ def gather_test_files(port, paths):
paths: a list of command line paths relative to the webkit/tests
directory. glob patterns are ok.
"""
+ gather_start_time = time.time()
paths_to_walk = set()
# if paths is empty, provide a pre-defined list.
if paths:
@@ -73,10 +81,16 @@ def gather_test_files(port, paths):
continue
for root, dirs, files in os.walk(path):
- # don't walk skipped directories and sub directories
+ # Don't walk skipped directories or their sub-directories.
if os.path.basename(root) in _skipped_directories:
del dirs[:]
continue
+ # This copy and for-in is slightly inefficient, but
+ # the extra walk avoidance consistently shaves .5 seconds
+ # off of total walk() time on my MacBook Pro.
+ for directory in dirs[:]:
+ if directory in _skipped_directories:
+ dirs.remove(directory)
for filename in files:
if _has_supported_extension(filename):
@@ -84,6 +98,9 @@ def gather_test_files(port, paths):
filename = os.path.normpath(filename)
test_files.add(filename)
+ gather_time = time.time() - gather_start_time
+ _log.debug("Test gathering took %f seconds" % gather_time)
+
return test_files
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/__init__.py
index 3509675..e3ad6f4 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/__init__.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/__init__.py
@@ -29,37 +29,4 @@
"""Port-specific entrypoints for the layout tests test infrastructure."""
-
-import sys
-
-
-def get(port_name=None, options=None):
- """Returns an object implementing the Port interface. If
- port_name is None, this routine attempts to guess at the most
- appropriate port on this platform."""
- port_to_use = port_name
- if port_to_use is None:
- if sys.platform == 'win32':
- port_to_use = 'chromium-win'
- elif sys.platform == 'linux2':
- port_to_use = 'chromium-linux'
- elif sys.platform == 'darwin':
- port_to_use = 'chromium-mac'
-
- if port_to_use == 'test':
- import test
- return test.TestPort(port_name, options)
- elif port_to_use.startswith('mac'):
- import mac
- return mac.MacPort(port_name, options)
- elif port_to_use.startswith('chromium-mac'):
- import chromium_mac
- return chromium_mac.ChromiumMacPort(port_name, options)
- elif port_to_use.startswith('chromium-linux'):
- import chromium_linux
- return chromium_linux.ChromiumLinuxPort(port_name, options)
- elif port_to_use.startswith('chromium-win'):
- import chromium_win
- return chromium_win.ChromiumWinPort(port_name, options)
-
- raise NotImplementedError('unsupported port: %s' % port_to_use)
+from factory import get
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py
index 9ff3671..1dd5b93 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/apache_http_server.py
@@ -38,6 +38,8 @@ import sys
import http_server_base
+_log = logging.getLogger("webkitpy.layout_tests.port.apache_http_server")
+
class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
@@ -77,14 +79,15 @@ class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
error_log = self._cygwin_safe_join(output_dir, "error_log.txt")
document_root = self._cygwin_safe_join(test_dir, "http", "tests")
+ # FIXME: We shouldn't be calling a protected method of _port_obj!
executable = self._port_obj._path_to_apache()
if self._is_cygwin():
executable = self._get_cygwin_path(executable)
cmd = [executable,
- '-f', self._get_apache_config_file_path(test_dir, output_dir),
- '-C', "\'DocumentRoot %s\'" % document_root,
- '-c', "\'Alias /js-test-resources %s\'" % js_test_resources_dir,
+ '-f', "\"%s\"" % self._get_apache_config_file_path(test_dir, output_dir),
+ '-C', "\'DocumentRoot \"%s\"\'" % document_root,
+ '-c', "\'Alias /js-test-resources \"%s\"'" % js_test_resources_dir,
'-C', "\'Listen %s\'" % "127.0.0.1:8000",
'-C', "\'Listen %s\'" % "127.0.0.1:8081",
'-c', "\'TypesConfig \"%s\"\'" % mime_types_path,
@@ -174,7 +177,7 @@ class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
It will listen to 127.0.0.1 on each of the given port.
"""
return '\n'.join(('<VirtualHost 127.0.0.1:%s>' % port,
- 'DocumentRoot %s' % document_root,
+ 'DocumentRoot "%s"' % document_root,
ssl and 'SSLEngine On' or '',
'</VirtualHost>', ''))
@@ -188,7 +191,7 @@ class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
shell=True)
err = self._httpd_proc.stderr.read()
if len(err):
- logging.debug(err)
+ _log.debug(err)
return False
return True
@@ -197,22 +200,23 @@ class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
# Stop any currently running servers.
self.stop()
- logging.debug("Starting apache http server")
+ _log.debug("Starting apache http server")
server_started = self.wait_for_action(self._start_httpd_process)
if server_started:
- logging.debug("Apache started. Testing ports")
+ _log.debug("Apache started. Testing ports")
server_started = self.wait_for_action(
self.is_server_running_on_all_ports)
if server_started:
- logging.debug("Server successfully started")
+ _log.debug("Server successfully started")
else:
raise Exception('Failed to start http server')
def stop(self):
"""Stops the apache http server."""
- logging.debug("Shutting down any running http servers")
+ _log.debug("Shutting down any running http servers")
httpd_pid = None
if os.path.exists(self._pid_file):
httpd_pid = int(open(self._pid_file).readline())
+ # FIXME: We shouldn't be calling a protected method of _port_obj!
self._port_obj._shut_down_http_server(httpd_pid)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
index 2b25e29..fb6fddf 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/base.py
@@ -36,27 +36,49 @@ import errno
import os
import subprocess
import sys
+import time
import apache_http_server
import http_server
import websocket_server
+from webkitpy.common.system import logutils
+from webkitpy.common.system.executive import Executive, ScriptError
+
+
+_log = logutils.get_logger(__file__)
+
+
# Python bug workaround. See Port.wdiff_text() for an explanation.
_wdiff_available = True
-
+_pretty_patch_available = True
# FIXME: This class should merge with webkitpy.webkit_port at some point.
class Port(object):
"""Abstract class for Port-specific hooks for the layout_test package.
"""
- def __init__(self, port_name=None, options=None):
+ @staticmethod
+ def flag_from_configuration(configuration):
+ flags_by_configuration = {
+ "Debug": "--debug",
+ "Release": "--release",
+ }
+ return flags_by_configuration[configuration]
+
+ def __init__(self, port_name=None, options=None, executive=Executive()):
self._name = port_name
self._options = options
self._helper = None
self._http_server = None
self._webkit_base_dir = None
self._websocket_server = None
+ self._executive = executive
+
+ def default_child_processes(self):
+ """Return the number of DumpRenderTree instances to use for this
+ port."""
+ return self._executive.cpu_count()
def baseline_path(self):
"""Return the absolute path to the directory to store new baselines
@@ -68,38 +90,53 @@ class Port(object):
baselines. The directories are searched in order."""
raise NotImplementedError('Port.baseline_search_path')
- def check_sys_deps(self):
+ def check_build(self, needs_http):
+ """This routine is used to ensure that the build is up to date
+ and all the needed binaries are present."""
+ raise NotImplementedError('Port.check_build')
+
+ def check_sys_deps(self, needs_http):
"""If the port needs to do some runtime checks to ensure that the
- tests can be run successfully, they should be done here.
+ tests can be run successfully, it should override this routine.
+ This step can be skipped with --nocheck-sys-deps.
Returns whether the system is properly configured."""
- raise NotImplementedError('Port.check_sys_deps')
+ return True
+
+ def check_image_diff(self, override_step=None, logging=True):
+ """This routine is used to check whether image_diff binary exists."""
+ raise NotImplemented('Port.check_image_diff')
- def compare_text(self, actual_text, expected_text):
+ def compare_text(self, expected_text, actual_text):
"""Return whether or not the two strings are *not* equal. This
routine is used to diff text output.
While this is a generic routine, we include it in the Port
interface so that it can be overriden for testing purposes."""
- return actual_text != expected_text
+ return expected_text != actual_text
- def diff_image(self, actual_filename, expected_filename,
+ def diff_image(self, expected_filename, actual_filename,
diff_filename=None):
"""Compare two image files and produce a delta image file.
- Return 1 if the two files are different, 0 if they are the same.
+ Return True if the two files are different, False if they are the same.
Also produce a delta image of the two images and write that into
|diff_filename| if it is not None.
While this is a generic routine, we include it in the Port
interface so that it can be overriden for testing purposes."""
executable = self._path_to_image_diff()
- cmd = [executable, '--diff', actual_filename, expected_filename]
+
if diff_filename:
- cmd.append(diff_filename)
- result = 1
+ cmd = [executable, '--diff', expected_filename, actual_filename,
+ diff_filename]
+ else:
+ cmd = [executable, expected_filename, actual_filename]
+
+ result = True
try:
- result = subprocess.call(cmd)
+ if subprocess.call(cmd) == 0:
+ return False
except OSError, e:
if e.errno == errno.ENOENT or e.errno == errno.EACCES:
_compare_available = False
@@ -111,8 +148,8 @@ class Port(object):
pass
return result
- def diff_text(self, actual_text, expected_text,
- actual_filename, expected_filename):
+ def diff_text(self, expected_text, actual_text,
+ expected_filename, actual_filename):
"""Returns a string containing the diff of the two text strings
in 'unified diff' format.
@@ -124,6 +161,13 @@ class Port(object):
actual_filename)
return ''.join(diff)
+ def driver_name(self):
+ """Returns the name of the actual binary that is performing the test,
+ so that it can be referred to in log messages. In most cases this
+ will be DumpRenderTree, but if a port uses a binary with a different
+ name, it can be overridden here."""
+ return "DumpRenderTree"
+
def expected_baselines(self, filename, suffix, all_baselines=False):
"""Given a test name, finds where the baseline results are located.
@@ -262,14 +306,7 @@ class Port(object):
may be different (e.g., 'win-xp' instead of 'chromium-win-xp'."""
return self._name
- def num_cores(self):
- """Return the number of cores/cpus available on this machine.
-
- This routine is used to determine the default amount of parallelism
- used by run-chromium-webkit-tests."""
- raise NotImplementedError('Port.num_cores')
-
- # FIXME: This could be replaced by functions in webkitpy.scm.
+ # FIXME: This could be replaced by functions in webkitpy.common.checkout.scm.
def path_from_webkit_base(self, *comps):
"""Returns the full path to path made by joining the top of the
WebKit source tree and the list of path components in |*comps|."""
@@ -288,7 +325,7 @@ class Port(object):
This is used by the rebaselining tool. Raises NotImplementedError
if the port does not use expectations files."""
raise NotImplementedError('Port.path_to_test_expectations_file')
-
+
def remove_directory(self, *path):
"""Recursively removes a directory, even if it's marked read-only.
@@ -321,7 +358,7 @@ class Port(object):
win32 = False
def remove_with_retry(rmfunc, path):
- os.chmod(path, stat.S_IWRITE)
+ os.chmod(path, os.stat.S_IWRITE)
if win32:
win32api.SetFileAttributes(path,
win32con.FILE_ATTRIBUTE_NORMAL)
@@ -381,10 +418,10 @@ class Port(object):
raise NotImplementedError('Port.start_driver')
def start_helper(self):
- """Start a layout test helper if needed on this port. The test helper
- is used to reconfigure graphics settings and do other things that
- may be necessary to ensure a known test configuration."""
- raise NotImplementedError('Port.start_helper')
+ """If a port needs to reconfigure graphics settings or do other
+ things to ensure a known test configuration, it should override this
+ method."""
+ pass
def start_http_server(self):
"""Start a web server if it is available. Do nothing if
@@ -408,8 +445,9 @@ class Port(object):
def stop_helper(self):
"""Shut down the test helper if it is running. Do nothing if
- it isn't, or it isn't available."""
- raise NotImplementedError('Port.stop_helper')
+ it isn't, or it isn't available. If a port overrides start_helper()
+ it must override this routine as well."""
+ pass
def stop_http_server(self):
"""Shut down the http server if it is running. Do nothing if
@@ -430,6 +468,15 @@ class Port(object):
test_expectations file. See test_expectations.py for more details."""
raise NotImplementedError('Port.test_expectations')
+ def test_expectations_overrides(self):
+ """Returns an optional set of overrides for the test_expectations.
+
+ This is used by ports that have code in two repositories, and where
+ it is possible that you might need "downstream" expectations that
+ temporarily override the "upstream" expectations until the port can
+ sync up the two repos."""
+ return None
+
def test_base_platform_names(self):
"""Return a list of the 'base' platforms on your port. The base
platforms represent different architectures, operating systems,
@@ -458,6 +505,12 @@ class Port(object):
might return 'mac' as a test_platform name'."""
raise NotImplementedError('Port.platforms')
+ def test_platform_name_to_name(self, test_platform_name):
+ """Returns the Port platform name that corresponds to the name as
+ referenced in the expectations file. E.g., "mac" returns
+ "chromium-mac" on the Chromium ports."""
+ raise NotImplementedError('Port.test_platform_name_to_name')
+
def version(self):
"""Returns a string indicating the version of a given platform, e.g.
'-leopard' or '-xp'.
@@ -476,8 +529,9 @@ class Port(object):
'--end-delete=##WDIFF_END##',
'--start-insert=##WDIFF_ADD##',
'--end-insert=##WDIFF_END##',
- expected_filename,
- actual_filename]
+ actual_filename,
+ expected_filename]
+ # FIXME: Why not just check os.exists(executable) once?
global _wdiff_available
result = ''
try:
@@ -500,6 +554,7 @@ class Port(object):
# http://bugs.python.org/issue1236
if _wdiff_available:
try:
+ # FIXME: Use Executive() here.
wdiff = subprocess.Popen(cmd,
stdout=subprocess.PIPE).communicate()[0]
except ValueError, e:
@@ -521,6 +576,31 @@ class Port(object):
raise e
return result
+ _pretty_patch_error_html = "Failed to run PrettyPatch, see error console."
+
+ def pretty_patch_text(self, diff_path):
+ global _pretty_patch_available
+ if not _pretty_patch_available:
+ return self._pretty_patch_error_html
+ pretty_patch_path = self.path_from_webkit_base("BugsSite", "PrettyPatch")
+ prettify_path = os.path.join(pretty_patch_path, "prettify.rb")
+ command = ["ruby", "-I", pretty_patch_path, prettify_path, diff_path]
+ try:
+ return self._executive.run_command(command)
+ except OSError, e:
+ # If the system is missing ruby log the error and stop trying.
+ _pretty_patch_available = False
+ _log.error("Failed to run PrettyPatch (%s): %s" % (command, e))
+ return self._pretty_patch_error_html
+ except ScriptError, e:
+ # If ruby failed to run for some reason, log the command output and stop trying.
+ _pretty_patch_available = False
+ _log.error("Failed to run PrettyPatch (%s):\n%s" % (command, e.message_with_output()))
+ return self._pretty_patch_error_html
+
+ def default_configuration(self):
+ return "Release"
+
#
# PROTECTED ROUTINES
#
@@ -528,13 +608,6 @@ class Port(object):
# or any of its subclasses.
#
- def _kill_process(self, pid):
- """Forcefully kill a process.
-
- This routine should not be used or needed generically, but can be
- used in helper files like http_server.py."""
- raise NotImplementedError('Port.kill_process')
-
def _path_to_apache(self):
"""Returns the full path to the apache binary.
@@ -547,7 +620,7 @@ class Port(object):
This is needed only by ports that use the apache_http_server module."""
raise NotImplementedError('Port.path_to_apache_config_file')
- def _path_to_driver(self):
+ def _path_to_driver(self, configuration=None):
"""Returns the full path to the test driver (DumpRenderTree)."""
raise NotImplementedError('Port.path_to_driver')
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py
index 1123376..8bae2a9 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -36,11 +36,43 @@ import signal
import subprocess
import sys
import time
+import webbrowser
import base
import http_server
+
+# FIXME: To use the DRT-based version of this file, we need to be able to
+# run the webkit code, which uses server_process, which requires UNIX-style
+# non-blocking I/O with selects(), which requires fcntl() which doesn't exist
+# on Windows.
+if sys.platform not in ('win32', 'cygwin'):
+ import webkit
+
import websocket_server
+_log = logging.getLogger("webkitpy.layout_tests.port.chromium")
+
+
+# FIXME: This function doesn't belong in this package.
+def check_file_exists(path_to_file, file_description, override_step=None,
+ logging=True):
+ """Verify the file is present where expected or log an error.
+
+ Args:
+ file_name: The (human friendly) name or description of the file
+ you're looking for (e.g., "HTTP Server"). Used for error logging.
+ override_step: An optional string to be logged if the check fails.
+ logging: Whether or not log the error messages."""
+ if not os.path.exists(path_to_file):
+ if logging:
+ _log.error('Unable to find %s' % file_description)
+ _log.error(' at %s' % path_to_file)
+ if override_step:
+ _log.error(' %s' % override_step)
+ _log.error('')
+ return False
+ return True
+
class ChromiumPort(base.Port):
"""Abstract base class for Chromium implementations of the Port class."""
@@ -50,81 +82,116 @@ class ChromiumPort(base.Port):
self._chromium_base_dir = None
def baseline_path(self):
- return self._chromium_baseline_path(self._name)
+ return self._webkit_baseline_path(self._name)
- def check_sys_deps(self):
+ def check_build(self, needs_http):
result = True
- test_shell_binary_path = self._path_to_driver()
- if os.path.exists(test_shell_binary_path):
- proc = subprocess.Popen([test_shell_binary_path,
- '--check-layout-test-sys-deps'])
- if proc.wait() != 0:
- logging.error("Aborting because system dependencies check "
- "failed.")
- logging.error("To override, invoke with --nocheck-sys-deps")
- result = False
- else:
- logging.error('test driver is not found at %s' %
- test_shell_binary_path)
- result = False
- image_diff_path = self._path_to_image_diff()
- if (not os.path.exists(image_diff_path) and not
- self._options.no_pixel_tests):
- logging.error('image diff not found at %s' % image_diff_path)
- logging.error("To override, invoke with --no-pixel-tests")
+ # FIXME: see comment above re: import webkit
+ if (sys.platform in ('win32', 'cygwin') and self._options and
+ hasattr(self._options, 'use_drt') and self._options.use_drt):
+ _log.error('--use-drt is not supported on Windows yet')
+ _log.error('')
result = False
+ dump_render_tree_binary_path = self._path_to_driver()
+ result = check_file_exists(dump_render_tree_binary_path,
+ 'test driver') and result
+ if result and self._options.build:
+ result = self._check_driver_build_up_to_date(
+ self._options.configuration)
+ else:
+ _log.error('')
+
+ helper_path = self._path_to_helper()
+ if helper_path:
+ result = check_file_exists(helper_path,
+ 'layout test helper') and result
+
+ if self._options.pixel_tests:
+ result = self.check_image_diff(
+ 'To override, invoke with --no-pixel-tests') and result
+
return result
- def compare_text(self, actual_text, expected_text):
- return actual_text != expected_text
+ def check_sys_deps(self, needs_http):
+ dump_render_tree_binary_path = self._path_to_driver()
+ proc = subprocess.Popen([dump_render_tree_binary_path,
+ '--check-layout-test-sys-deps'])
+ if proc.wait():
+ _log.error('System dependencies check failed.')
+ _log.error('To override, invoke with --nocheck-sys-deps')
+ _log.error('')
+ return False
+ return True
+
+ def check_image_diff(self, override_step=None, logging=True):
+ image_diff_path = self._path_to_image_diff()
+ return check_file_exists(image_diff_path, 'image diff exe',
+ override_step, logging)
+
+ def driver_name(self):
+ return "test_shell"
def path_from_chromium_base(self, *comps):
"""Returns the full path to path made by joining the top of the
Chromium source tree and the list of path components in |*comps|."""
if not self._chromium_base_dir:
abspath = os.path.abspath(__file__)
- self._chromium_base_dir = abspath[0:abspath.find('third_party')]
+ offset = abspath.find('third_party')
+ if offset == -1:
+ raise AssertionError('could not find Chromium base dir from ' +
+ abspath)
+ self._chromium_base_dir = abspath[0:offset]
return os.path.join(self._chromium_base_dir, *comps)
def path_to_test_expectations_file(self):
- return self.path_from_chromium_base('webkit', 'tools', 'layout_tests',
- 'test_expectations.txt')
+ return self.path_from_webkit_base('LayoutTests', 'platform',
+ 'chromium', 'test_expectations.txt')
def results_directory(self):
- return self.path_from_chromium_base('webkit', self._options.target,
- self._options.results_directory)
+ try:
+ return self.path_from_chromium_base('webkit',
+ self._options.configuration, self._options.results_directory)
+ except AssertionError:
+ return self.path_from_webkit_base('WebKit', 'chromium',
+ 'xcodebuild', self._options.configuration,
+ self._options.results_directory)
def setup_test_run(self):
# Delete the disk cache if any to ensure a clean test run.
- test_shell_binary_path = self._path_to_driver()
- cachedir = os.path.split(test_shell_binary_path)[0]
+ dump_render_tree_binary_path = self._path_to_driver()
+ cachedir = os.path.split(dump_render_tree_binary_path)[0]
cachedir = os.path.join(cachedir, "cache")
if os.path.exists(cachedir):
shutil.rmtree(cachedir)
def show_results_html_file(self, results_filename):
- subprocess.Popen([self._path_to_driver(),
- self.filename_to_uri(results_filename)])
+ uri = self.filename_to_uri(results_filename)
+ if self._options.use_drt:
+ webbrowser.open(uri, new=1)
+ else:
+ subprocess.Popen([self._path_to_driver(), uri])
def start_driver(self, image_path, options):
"""Starts a new Driver and returns a handle to it."""
+ if self._options.use_drt:
+ return webkit.WebKitDriver(self, image_path, options)
return ChromiumDriver(self, image_path, options)
def start_helper(self):
helper_path = self._path_to_helper()
if helper_path:
- logging.debug("Starting layout helper %s" % helper_path)
+ _log.debug("Starting layout helper %s" % helper_path)
self._helper = subprocess.Popen([helper_path],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
is_ready = self._helper.stdout.readline()
if not is_ready.startswith('ready'):
- logging.error("layout_test_helper failed to be ready")
+ _log.error("layout_test_helper failed to be ready")
def stop_helper(self):
if self._helper:
- logging.debug("Stopping layout test helper")
+ _log.debug("Stopping layout test helper")
self._helper.stdin.write("x\n")
self._helper.stdin.close()
self._helper.wait()
@@ -140,10 +207,27 @@ class ChromiumPort(base.Port):
expectations_file = self.path_to_test_expectations_file()
return file(expectations_file, "r").read()
+ def test_expectations_overrides(self):
+ try:
+ overrides_file = self.path_from_chromium_base('webkit', 'tools',
+ 'layout_tests', 'test_expectations.txt')
+ except AssertionError:
+ return None
+ if os.path.exists(overrides_file):
+ return file(overrides_file, "r").read()
+ else:
+ return None
+
def test_platform_names(self):
return self.test_base_platform_names() + ('win-xp',
'win-vista', 'win-7')
+ def test_platform_name_to_name(self, test_platform_name):
+ if test_platform_name in self.test_platform_names():
+ return 'chromium-' + test_platform_name
+ raise ValueError('Unsupported test_platform_name: %s' %
+ test_platform_name)
+
#
# PROTECTED METHODS
#
@@ -151,11 +235,34 @@ class ChromiumPort(base.Port):
# or any subclasses.
#
+ def _check_driver_build_up_to_date(self, configuration):
+ if configuration in ('Debug', 'Release'):
+ try:
+ debug_path = self._path_to_driver('Debug')
+ release_path = self._path_to_driver('Release')
+
+ debug_mtime = os.stat(debug_path).st_mtime
+ release_mtime = os.stat(release_path).st_mtime
+
+ if (debug_mtime > release_mtime and configuration == 'Release' or
+ release_mtime > debug_mtime and configuration == 'Debug'):
+ _log.warning('You are not running the most '
+ 'recent DumpRenderTree binary. You need to '
+ 'pass --debug or not to select between '
+ 'Debug and Release.')
+ _log.warning('')
+ # This will fail if we don't have both a debug and release binary.
+ # That's fine because, in this case, we must already be running the
+ # most up-to-date one.
+ except OSError:
+ pass
+ return True
+
def _chromium_baseline_path(self, platform):
if platform is None:
platform = self.name()
- return self.path_from_chromium_base('webkit', 'data', 'layout_tests',
- 'platform', platform, 'LayoutTests')
+ return self.path_from_webkit_base('LayoutTests', 'platform', platform)
+
class ChromiumDriver(base.Driver):
"""Abstract interface for the DumpRenderTree interface."""
@@ -163,7 +270,7 @@ class ChromiumDriver(base.Driver):
def __init__(self, port, image_path, options):
self._port = port
self._options = options
- self._target = port._options.target
+ self._configuration = port._options.configuration
self._image_path = image_path
cmd = []
@@ -181,10 +288,17 @@ class ChromiumDriver(base.Driver):
cmd += [port._path_to_driver(), '--layout-tests']
if options:
cmd += options
+
+ # We need to pass close_fds=True to work around Python bug #2320
+ # (otherwise we can hang when we kill DumpRenderTree when we are running
+ # multiple threads). See http://bugs.python.org/issue2320 .
+ # Note that close_fds isn't supported on Windows, but this bug only
+ # shows up on Mac and Linux.
+ close_flag = sys.platform not in ('win32', 'cygwin')
self._proc = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
-
+ stderr=subprocess.STDOUT,
+ close_fds=close_flag)
def poll(self):
return self._proc.poll()
@@ -207,14 +321,19 @@ class ChromiumDriver(base.Driver):
cmd += ' ' + checksum
cmd += "\n"
- self._proc.stdin.write(cmd)
- line = self._proc.stdout.readline()
- while line.rstrip() != "#EOF":
+ try:
+ self._proc.stdin.write(cmd)
+ line = self._proc.stdout.readline()
+ except IOError, e:
+ _log.error("IOError communicating w/ test_shell: " + str(e))
+ crash = True
+
+ while not crash and line.rstrip() != "#EOF":
# Make sure we haven't crashed.
if line == '' and self.poll() is not None:
# This is hex code 0xc000001d, which is used for abrupt
# termination. This happens if we hit ctrl+c from the prompt
- # and we happen to be waiting on the test_shell.
+ # and we happen to be waiting on the DumpRenderTree.
# sdoyon: Not sure for which OS and in what circumstances the
# above code is valid. What works for me under Linux to detect
# ctrl+c is for the subprocess returncode to be negative
@@ -229,8 +348,8 @@ class ChromiumDriver(base.Driver):
if line.startswith("#URL:"):
actual_uri = line.rstrip()[5:]
if uri != actual_uri:
- logging.fatal("Test got out of sync:\n|%s|\n|%s|" %
- (uri, actual_uri))
+ _log.fatal("Test got out of sync:\n|%s|\n|%s|" %
+ (uri, actual_uri))
raise AssertionError("test out of sync")
elif line.startswith("#MD5:"):
actual_checksum = line.rstrip()[5:]
@@ -242,7 +361,11 @@ class ChromiumDriver(base.Driver):
else:
error.append(line)
- line = self._proc.stdout.readline()
+ try:
+ line = self._proc.stdout.readline()
+ except IOError, e:
+ _log.error("IOError while reading: " + str(e))
+ crash = True
return (crash, timeout, actual_checksum, ''.join(output),
''.join(error))
@@ -253,10 +376,20 @@ class ChromiumDriver(base.Driver):
self._proc.stdout.close()
if self._proc.stderr:
self._proc.stderr.close()
- if (sys.platform not in ('win32', 'cygwin') and
- not self._proc.poll()):
- # Closing stdin/stdout/stderr hangs sometimes on OS X.
- null = open(os.devnull, "w")
- subprocess.Popen(["kill", "-9",
- str(self._proc.pid)], stderr=null)
- null.close()
+ if sys.platform not in ('win32', 'cygwin'):
+ # Closing stdin/stdout/stderr hangs sometimes on OS X,
+ # (see __init__(), above), and anyway we don't want to hang
+ # the harness if DumpRenderTree is buggy, so we wait a couple
+ # seconds to give DumpRenderTree a chance to clean up, but then
+ # force-kill the process if necessary.
+ KILL_TIMEOUT = 3.0
+ timeout = time.time() + KILL_TIMEOUT
+ while self._proc.poll() is None and time.time() < timeout:
+ time.sleep(0.1)
+ if self._proc.poll() is None:
+ _log.warning('stopping test driver timed out, '
+ 'killing it')
+ null = open(os.devnull, "w")
+ subprocess.Popen(["kill", "-9",
+ str(self._proc.pid)], stderr=null)
+ null.close()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
index b817251..9a595f2 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
@@ -27,8 +27,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""Chromium Mac implementation of the Port interface."""
+"""Chromium Linux implementation of the Port interface."""
+import logging
import os
import platform
import signal
@@ -36,6 +37,8 @@ import subprocess
import chromium
+_log = logging.getLogger("webkitpy.layout_tests.port.chromium_linux")
+
class ChromiumLinuxPort(chromium.ChromiumPort):
"""Chromium Linux implementation of the Port class."""
@@ -43,25 +46,32 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
def __init__(self, port_name=None, options=None):
if port_name is None:
port_name = 'chromium-linux'
- if options and not hasattr(options, 'target'):
- options.target = 'Release'
+ if options and not hasattr(options, 'configuration'):
+ options.configuration = 'Release'
chromium.ChromiumPort.__init__(self, port_name, options)
def baseline_search_path(self):
- return [self.baseline_path(),
- self._chromium_baseline_path('chromium-win'),
+ return [self._webkit_baseline_path('chromium-linux'),
+ self._webkit_baseline_path('chromium-win'),
+ self._webkit_baseline_path('chromium'),
self._webkit_baseline_path('win'),
self._webkit_baseline_path('mac')]
- def check_sys_deps(self):
- # We have no platform-specific dependencies to check.
- return True
-
- def num_cores(self):
- num_cores = os.sysconf("SC_NPROCESSORS_ONLN")
- if isinstance(num_cores, int) and num_cores > 0:
- return num_cores
- return 1
+ def check_build(self, needs_http):
+ result = chromium.ChromiumPort.check_build(self, needs_http)
+ if needs_http:
+ if self._options.use_apache:
+ result = self._check_apache_install() and result
+ else:
+ result = self._check_lighttpd_install() and result
+ result = self._check_wdiff_install() and result
+
+ if not result:
+ _log.error('For complete Linux build requirements, please see:')
+ _log.error('')
+ _log.error(' http://code.google.com/p/chromium/wiki/'
+ 'LinuxBuildInstructions')
+ return result
def test_platform_name(self):
# We use 'linux' instead of 'chromium-linux' in test_expectations.txt.
@@ -78,19 +88,42 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
def _build_path(self, *comps):
base = self.path_from_chromium_base()
if os.path.exists(os.path.join(base, 'sconsbuild')):
- return self.path_from_chromium_base('sconsbuild',
- self._options.target, *comps)
+ return self.path_from_chromium_base('sconsbuild', *comps)
else:
- return self.path_from_chromium_base('out',
- self._options.target, *comps)
-
- def _kill_process(self, pid):
- """Forcefully kill the process.
+ return self.path_from_chromium_base('out', *comps)
+
+ def _check_apache_install(self):
+ result = chromium.check_file_exists(self._path_to_apache(),
+ "apache2")
+ result = chromium.check_file_exists(self._path_to_apache_config_file(),
+ "apache2 config file") and result
+ if not result:
+ _log.error(' Please install using: "sudo apt-get install '
+ 'apache2 libapache2-mod-php5"')
+ _log.error('')
+ return result
+
+ def _check_lighttpd_install(self):
+ result = chromium.check_file_exists(
+ self._path_to_lighttpd(), "LigHTTPd executable")
+ result = chromium.check_file_exists(self._path_to_lighttpd_php(),
+ "PHP CGI executable") and result
+ result = chromium.check_file_exists(self._path_to_lighttpd_modules(),
+ "LigHTTPd modules") and result
+ if not result:
+ _log.error(' Please install using: "sudo apt-get install '
+ 'lighttpd php5-cgi"')
+ _log.error('')
+ return result
+
+ def _check_wdiff_install(self):
+ result = chromium.check_file_exists(self._path_to_wdiff(), 'wdiff')
+ if not result:
+ _log.error(' Please install using: "sudo apt-get install '
+ 'wdiff"')
+ _log.error('')
+ return result
- Args:
- pid: The id of the process to be killed.
- """
- os.kill(pid, signal.SIGKILL)
def _kill_all_process(self, process_name):
null = open(os.devnull)
@@ -99,11 +132,19 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
null.close()
def _path_to_apache(self):
- return '/usr/sbin/apache2'
+ if self._is_redhat_based():
+ return '/usr/sbin/httpd'
+ else:
+ return '/usr/sbin/apache2'
def _path_to_apache_config_file(self):
+ if self._is_redhat_based():
+ config_name = 'fedora-httpd.conf'
+ else:
+ config_name = 'apache2-debian-httpd.conf'
+
return os.path.join(self.layout_tests_dir(), 'http', 'conf',
- 'apache2-debian-httpd.conf')
+ config_name)
def _path_to_lighttpd(self):
return "/usr/sbin/lighttpd"
@@ -114,17 +155,25 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
def _path_to_lighttpd_php(self):
return "/usr/bin/php-cgi"
- def _path_to_driver(self):
- return self._build_path('test_shell')
+ def _path_to_driver(self, configuration=None):
+ if not configuration:
+ configuration = self._options.configuration
+ return self._build_path(configuration, 'test_shell')
def _path_to_helper(self):
return None
def _path_to_image_diff(self):
- return self._build_path('image_diff')
+ return self._build_path(self._options.configuration, 'image_diff')
def _path_to_wdiff(self):
- return 'wdiff'
+ if self._is_redhat_based():
+ return '/usr/bin/dwdiff'
+ else:
+ return '/usr/bin/wdiff'
+
+ def _is_redhat_based(self):
+ return os.path.exists(os.path.join('/etc', 'redhat-release'))
def _shut_down_http_server(self, server_pid):
"""Shut down the lighttpd web server. Blocks until it's fully
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
index bcffcf8..d5e1757 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
@@ -29,6 +29,7 @@
"""Chromium Mac implementation of the Port interface."""
+import logging
import os
import platform
import signal
@@ -36,6 +37,8 @@ import subprocess
import chromium
+_log = logging.getLogger("webkitpy.layout_tests.port.chromium_mac")
+
class ChromiumMacPort(chromium.ChromiumPort):
"""Chromium Mac implementation of the Port class."""
@@ -43,22 +46,31 @@ class ChromiumMacPort(chromium.ChromiumPort):
def __init__(self, port_name=None, options=None):
if port_name is None:
port_name = 'chromium-mac'
- if options and not hasattr(options, 'target'):
- options.target = 'Release'
+ if options and not hasattr(options, 'configuration'):
+ options.configuration = 'Release'
chromium.ChromiumPort.__init__(self, port_name, options)
def baseline_search_path(self):
- return [self.baseline_path(),
+ return [self._webkit_baseline_path('chromium-mac'),
+ self._webkit_baseline_path('chromium'),
self._webkit_baseline_path('mac' + self.version()),
self._webkit_baseline_path('mac')]
- def check_sys_deps(self):
- # We have no specific platform dependencies.
- return True
-
- def num_cores(self):
- return int(subprocess.Popen(['sysctl','-n','hw.ncpu'],
- stdout=subprocess.PIPE).stdout.read())
+ def check_build(self, needs_http):
+ result = chromium.ChromiumPort.check_build(self, needs_http)
+ result = self._check_wdiff_install() and result
+ if not result:
+ _log.error('For complete Mac build requirements, please see:')
+ _log.error('')
+ _log.error(' http://code.google.com/p/chromium/wiki/'
+ 'MacBuildInstructions')
+ return result
+
+ def driver_name(self):
+ """name for this port's equivalent of DumpRenderTree."""
+ if self._options.use_drt:
+ return "DumpRenderTree"
+ return "TestShell"
def test_platform_name(self):
# We use 'mac' instead of 'chromium-mac'
@@ -81,21 +93,27 @@ class ChromiumMacPort(chromium.ChromiumPort):
#
def _build_path(self, *comps):
- return self.path_from_chromium_base('xcodebuild', self._options.target,
- *comps)
+ if self._options.use_drt:
+ return self.path_from_webkit_base('WebKit', 'chromium',
+ 'xcodebuild', *comps)
+ return self.path_from_chromium_base('xcodebuild', *comps)
+
+ def _check_wdiff_install(self):
+ f = open(os.devnull, 'w')
+ rcode = 0
+ try:
+ rcode = subprocess.call(['wdiff'], stderr=f)
+ except OSError:
+ _log.warning('wdiff not found. Install using MacPorts or some '
+ 'other means')
+ pass
+ f.close()
+ return True
def _lighttpd_path(self, *comps):
return self.path_from_chromium_base('third_party', 'lighttpd',
'mac', *comps)
- def _kill_process(self, pid):
- """Forcefully kill the process.
-
- Args:
- pid: The id of the process to be killed.
- """
- os.kill(pid, signal.SIGKILL)
-
def _kill_all_process(self, process_name):
"""Kill any processes running under this name."""
# On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or
@@ -116,25 +134,33 @@ class ChromiumMacPort(chromium.ChromiumPort):
'apache2-httpd.conf')
def _path_to_lighttpd(self):
- return self._lighttp_path('bin', 'lighttp')
+ return self._lighttpd_path('bin', 'lighttpd')
def _path_to_lighttpd_modules(self):
- return self._lighttp_path('lib')
+ return self._lighttpd_path('lib')
def _path_to_lighttpd_php(self):
return self._lighttpd_path('bin', 'php-cgi')
- def _path_to_driver(self):
- # TODO(pinkerton): make |target| happy with case-sensitive file
+ def _path_to_driver(self, configuration=None):
+ # FIXME: make |configuration| happy with case-sensitive file
# systems.
- return self._build_path('TestShell.app', 'Contents', 'MacOS',
- 'TestShell')
+ if not configuration:
+ configuration = self._options.configuration
+ return self._build_path(configuration, self.driver_name() + '.app',
+ 'Contents', 'MacOS', self.driver_name())
def _path_to_helper(self):
- return self._build_path('layout_test_helper')
+ binary_name = 'layout_test_helper'
+ if self._options.use_drt:
+ binary_name = 'LayoutTestHelper'
+ return self._build_path(self._options.configuration, binary_name)
def _path_to_image_diff(self):
- return self._build_path('image_diff')
+ binary_name = 'image_diff'
+ if self._options.use_drt:
+ binary_name = 'ImageDiff'
+ return self._build_path(self._options.configuration, binary_name)
def _path_to_wdiff(self):
return 'wdiff'
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py
index 5eb0ba1..2e3de85 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/chromium_win.py
@@ -29,6 +29,7 @@
"""Chromium Win implementation of the Port interface."""
+import logging
import os
import platform
import signal
@@ -37,6 +38,8 @@ import sys
import chromium
+_log = logging.getLogger("webkitpy.layout_tests.port.chromium_win")
+
class ChromiumWinPort(chromium.ChromiumPort):
"""Chromium Win implementation of the Port class."""
@@ -44,33 +47,37 @@ class ChromiumWinPort(chromium.ChromiumPort):
def __init__(self, port_name=None, options=None):
if port_name is None:
port_name = 'chromium-win' + self.version()
- if options and not hasattr(options, 'target'):
- options.target = 'Release'
+ if options and not hasattr(options, 'configuration'):
+ options.configuration = 'Release'
chromium.ChromiumPort.__init__(self, port_name, options)
def baseline_search_path(self):
dirs = []
if self._name == 'chromium-win-xp':
- dirs.append(self._chromium_baseline_path(self._name))
+ dirs.append(self._webkit_baseline_path('chromium-win-xp'))
if self._name in ('chromium-win-xp', 'chromium-win-vista'):
- dirs.append(self._chromium_baseline_path('chromium-win-vista'))
- dirs.append(self._chromium_baseline_path('chromium-win'))
+ dirs.append(self._webkit_baseline_path('chromium-win-vista'))
+ dirs.append(self._webkit_baseline_path('chromium-win'))
+ dirs.append(self._webkit_baseline_path('chromium'))
dirs.append(self._webkit_baseline_path('win'))
dirs.append(self._webkit_baseline_path('mac'))
return dirs
- def check_sys_deps(self):
- # TODO(dpranke): implement this
- return True
+ def check_build(self, needs_http):
+ result = chromium.ChromiumPort.check_build(self, needs_http)
+ if not result:
+ _log.error('For complete Windows build requirements, please '
+ 'see:')
+ _log.error('')
+ _log.error(' http://dev.chromium.org/developers/how-tos/'
+ 'build-instructions-windows')
+ return result
def get_absolute_path(self, filename):
"""Return the absolute path in unix format for the given filename."""
abspath = os.path.abspath(filename)
return abspath.replace('\\', '/')
- def num_cores(self):
- return int(os.environ.get('NUMBER_OF_PROCESSORS', 1))
-
def relative_test_filename(self, filename):
path = filename[len(self.layout_tests_dir()) + 1:]
return path.replace('\\', '/')
@@ -80,6 +87,8 @@ class ChromiumWinPort(chromium.ChromiumPort):
return 'win' + self.version()
def version(self):
+ if not hasattr(sys, 'getwindowsversion'):
+ return ''
winver = sys.getwindowsversion()
if winver[0] == 6 and (winver[1] == 1):
return '-7'
@@ -94,24 +103,15 @@ class ChromiumWinPort(chromium.ChromiumPort):
#
def _build_path(self, *comps):
- # FIXME(dpranke): allow for builds under 'chrome' as well.
- return self.path_from_chromium_base('webkit', self._options.target,
- *comps)
+ p = self.path_from_chromium_base('webkit', *comps)
+ if os.path.exists(p):
+ return p
+ return self.path_from_chromium_base('chrome', *comps)
def _lighttpd_path(self, *comps):
return self.path_from_chromium_base('third_party', 'lighttpd', 'win',
*comps)
- def _kill_process(self, pid):
- """Forcefully kill the process.
-
- Args:
- pid: The id of the process to be killed.
- """
- subprocess.call(('taskkill.exe', '/f', '/pid', str(pid)),
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
-
def _path_to_apache(self):
return self.path_from_chromium_base('third_party', 'cygwin', 'usr',
'sbin', 'httpd')
@@ -129,14 +129,16 @@ class ChromiumWinPort(chromium.ChromiumPort):
def _path_to_lighttpd_php(self):
return self._lighttpd_path('php5', 'php-cgi.exe')
- def _path_to_driver(self):
- return self._build_path('test_shell.exe')
+ def _path_to_driver(self, configuration=None):
+ if not configuration:
+ configuration = self._options.configuration
+ return self._build_path(configuration, 'test_shell.exe')
def _path_to_helper(self):
- return self._build_path('layout_test_helper.exe')
+ return self._build_path(self._options.configuration, 'layout_test_helper.exe')
def _path_to_image_diff(self):
- return self._build_path('image_diff.exe')
+ return self._build_path(self._options.configuration, 'image_diff.exe')
def _path_to_wdiff(self):
return self.path_from_chromium_base('third_party', 'cygwin', 'bin',
@@ -150,8 +152,10 @@ class ChromiumWinPort(chromium.ChromiumPort):
server_pid: The process ID of the running server.
"""
subprocess.Popen(('taskkill.exe', '/f', '/im', 'LightTPD.exe'),
+ stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).wait()
subprocess.Popen(('taskkill.exe', '/f', '/im', 'httpd.exe'),
+ stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).wait()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py
new file mode 100644
index 0000000..7a6717f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/dryrun.py
@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This is a test implementation of the Port interface that generates the
+ correct output for every test. It can be used for perf testing, because
+ it is pretty much a lower limit on how fast a port can possibly run.
+
+ This implementation acts as a wrapper around a real port (the real port
+ is held as a delegate object). To specify which port, use the port name
+ 'dryrun-XXX' (e.g., 'dryrun-chromium-mac-leopard'). If you use just
+ 'dryrun', it uses the default port.
+
+ Note that because this is really acting as a wrapper around the underlying
+ port, you must be able to run the underlying port as well
+ (check_build() and check_sys_deps() must pass and auxiliary binaries
+ like layout_test_helper and httpd must work).
+
+ This implementation also modifies the test expectations so that all
+ tests are either SKIPPED or expected to PASS."""
+
+from __future__ import with_statement
+
+import base
+import factory
+
+
+def _read_file(path, mode='r'):
+ """Return the contents of a file as a string.
+
+ Returns '' if anything goes wrong, instead of throwing an IOError.
+
+ """
+ contents = ''
+ try:
+ with open(path, mode) as f:
+ contents = f.read()
+ except IOError:
+ pass
+ return contents
+
+
+def _write_file(path, contents, mode='w'):
+ """Write the string to the specified path.
+
+ Returns nothing if the write fails, instead of raising an IOError.
+
+ """
+ try:
+ with open(path, mode) as f:
+ f.write(contents)
+ except IOError:
+ pass
+
+
+class DryRunPort(object):
+ """DryRun implementation of the Port interface."""
+
+ def __init__(self, port_name=None, options=None):
+ pfx = 'dryrun-'
+ if port_name.startswith(pfx):
+ port_name = port_name[len(pfx):]
+ else:
+ port_name = None
+ self.__delegate = factory.get(port_name, options)
+
+ def __getattr__(self, name):
+ return getattr(self.__delegate, name)
+
+ def check_build(self, needs_http):
+ return True
+
+ def check_sys_deps(self, needs_http):
+ return True
+
+ def start_helper(self):
+ pass
+
+ def start_http_server(self):
+ pass
+
+ def start_websocket_server(self):
+ pass
+
+ def stop_helper(self):
+ pass
+
+ def stop_http_server(self):
+ pass
+
+ def stop_websocket_server(self):
+ pass
+
+ def start_driver(self, image_path, options):
+ return DryrunDriver(self, image_path, options)
+
+
+class DryrunDriver(base.Driver):
+ """Dryrun implementation of the DumpRenderTree / Driver interface."""
+
+ def __init__(self, port, image_path, test_driver_options):
+ self._port = port
+ self._driver_options = test_driver_options
+ self._image_path = image_path
+ self._layout_tests_dir = None
+
+ def poll(self):
+ return None
+
+ def returncode(self):
+ return 0
+
+ def run_test(self, uri, timeoutms, image_hash):
+ test_name = self._uri_to_test(uri)
+
+ text_filename = self._port.expected_filename(test_name, '.txt')
+ text_output = _read_file(text_filename)
+
+ if image_hash:
+ image_filename = self._port.expected_filename(test_name, '.png')
+ image = _read_file(image_filename, 'rb')
+ if self._image_path:
+ _write_file(self._image_path, image)
+ hash_filename = self._port.expected_filename(test_name,
+ '.checksum')
+ hash = _read_file(hash_filename)
+ else:
+ hash = None
+ return (False, False, hash, text_output, None)
+
+ def stop(self):
+ pass
+
+ def _uri_to_test(self, uri):
+ """Return the base layout test name for a given URI.
+
+ This returns the test name for a given URI, e.g., if you passed in
+ "file:///src/LayoutTests/fast/html/keygen.html" it would return
+ "fast/html/keygen.html".
+
+ """
+ if not self._layout_tests_dir:
+ self._layout_tests_dir = self._port.layout_tests_dir()
+ test = uri
+
+ if uri.startswith("file:///"):
+ test = test.replace('file://', '')
+ return test
+ elif uri.startswith("http://127.0.0.1:8880/"):
+ # websocket tests
+ test = test.replace('http://127.0.0.1:8880/',
+ self._layout_tests_dir + '/')
+ return test
+ elif uri.startswith("http://"):
+ # regular HTTP test
+ test = test.replace('http://127.0.0.1:8000/',
+ self._layout_tests_dir + '/http/tests/')
+ return test
+ elif uri.startswith("https://"):
+ test = test.replace('https://127.0.0.1:8443/',
+ self._layout_tests_dir + '/http/tests/')
+ return test
+ else:
+ raise NotImplementedError('unknown url type: %s' % uri)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/factory.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/factory.py
new file mode 100644
index 0000000..95b90da
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/factory.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Factory method to retrieve the appropriate port implementation."""
+
+
+import sys
+
+
+def get(port_name=None, options=None):
+ """Returns an object implementing the Port interface. If
+ port_name is None, this routine attempts to guess at the most
+ appropriate port on this platform."""
+ port_to_use = port_name
+ if port_to_use is None:
+ if sys.platform == 'win32' or sys.platform == 'cygwin':
+ if options and hasattr(options, 'chromium') and options.chromium:
+ port_to_use = 'chromium-win'
+ else:
+ port_to_use = 'win'
+ elif sys.platform == 'linux2':
+ port_to_use = 'chromium-linux'
+ elif sys.platform == 'darwin':
+ if options and hasattr(options, 'chromium') and options.chromium:
+ port_to_use = 'chromium-mac'
+ else:
+ port_to_use = 'mac'
+
+ if port_to_use is None:
+ raise NotImplementedError('unknown port; sys.platform = "%s"' %
+ sys.platform)
+
+ if port_to_use == 'test':
+ import test
+ return test.TestPort(port_name, options)
+ elif port_to_use.startswith('dryrun'):
+ import dryrun
+ return dryrun.DryRunPort(port_name, options)
+ elif port_to_use.startswith('mac'):
+ import mac
+ return mac.MacPort(port_name, options)
+ elif port_to_use.startswith('win'):
+ import win
+ return win.WinPort(port_name, options)
+ elif port_to_use.startswith('gtk'):
+ import gtk
+ return gtk.GtkPort(port_name, options)
+ elif port_to_use.startswith('qt'):
+ import qt
+ return qt.QtPort(port_name, options)
+ elif port_to_use.startswith('chromium-mac'):
+ import chromium_mac
+ return chromium_mac.ChromiumMacPort(port_name, options)
+ elif port_to_use.startswith('chromium-linux'):
+ import chromium_linux
+ return chromium_linux.ChromiumLinuxPort(port_name, options)
+ elif port_to_use.startswith('chromium-win'):
+ import chromium_win
+ return chromium_win.ChromiumWinPort(port_name, options)
+
+ raise NotImplementedError('unsupported port: %s' % port_to_use)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py
new file mode 100644
index 0000000..de5e28a
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/gtk.py
@@ -0,0 +1,91 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit Gtk implementation of the Port interface."""
+
+import logging
+import os
+import subprocess
+
+from webkitpy.layout_tests.port.webkit import WebKitPort
+
+_log = logging.getLogger("webkitpy.layout_tests.port.gtk")
+
+
+class GtkPort(WebKitPort):
+ """WebKit Gtk implementation of the Port class."""
+
+ def __init__(self, port_name=None, options=None):
+ if port_name is None:
+ port_name = 'gtk'
+ WebKitPort.__init__(self, port_name, options)
+
+ def _tests_for_other_platforms(self):
+ # FIXME: This list could be dynamic based on platform name and
+ # pushed into base.Port.
+ # This really need to be automated.
+ return [
+ "platform/chromium",
+ "platform/win",
+ "platform/qt",
+ "platform/mac",
+ ]
+
+ def _path_to_apache_config_file(self):
+ # FIXME: This needs to detect the distribution and change config files.
+ return os.path.join(self.layout_tests_dir(), 'http', 'conf',
+ 'apache2-debian-httpd.conf')
+
+ def _kill_all_process(self, process_name):
+ null = open(os.devnull)
+ subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
+ process_name], stderr=null)
+ null.close()
+
+ def _shut_down_http_server(self, server_pid):
+ """Shut down the httpd web server. Blocks until it's fully
+ shut down.
+
+ Args:
+ server_pid: The process ID of the running server.
+ """
+ # server_pid is not set when "http_server.py stop" is run manually.
+ if server_pid is None:
+ # FIXME: This isn't ideal, since it could conflict with
+ # lighttpd processes not started by http_server.py,
+ # but good enough for now.
+ self._kill_all_process('apache2')
+ else:
+ try:
+ os.kill(server_pid, signal.SIGTERM)
+ # TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
+ except OSError:
+ # Sometimes we get a bad PID (e.g. from a stale httpd.pid
+ # file), so if kill fails on the given PID, just try to
+ # 'killall' web servers.
+ self._shut_down_http_server(None)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py
index 0315704..cc434bc 100755
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server.py
@@ -40,8 +40,11 @@ import tempfile
import time
import urllib
+import factory
import http_server_base
+_log = logging.getLogger("webkitpy.layout_tests.port.http_server")
+
class HttpdNotStarted(Exception):
pass
@@ -200,11 +203,11 @@ class Lighttpd(http_server_base.HttpServerBase):
env['PATH'])
if sys.platform == 'win32' and self._register_cygwin:
- setup_mount = port.path_from_chromium_base('third_party',
+ setup_mount = self._port_obj.path_from_chromium_base('third_party',
'cygwin', 'setup_mount.bat')
subprocess.Popen(setup_mount).wait()
- logging.debug('Starting http server')
+ _log.debug('Starting http server')
self._process = subprocess.Popen(start_cmd, env=env)
# Wait for server to start.
@@ -216,7 +219,7 @@ class Lighttpd(http_server_base.HttpServerBase):
if not server_started or self._process.returncode != None:
raise google.httpd_utils.HttpdNotStarted('Failed to start httpd.')
- logging.debug("Server successfully started")
+ _log.debug("Server successfully started")
# TODO(deanm): Find a nicer way to shutdown cleanly. Our log files are
# probably not being flushed, etc... why doesn't our python have os.kill ?
@@ -233,40 +236,3 @@ class Lighttpd(http_server_base.HttpServerBase):
if self._process:
self._process.wait()
self._process = None
-
-if '__main__' == __name__:
- # Provide some command line params for starting/stopping the http server
- # manually. Also used in ui_tests to run http layout tests in a browser.
- option_parser = optparse.OptionParser()
- option_parser.add_option('-k', '--server',
- help='Server action (start|stop)')
- option_parser.add_option('-p', '--port',
- help='Port to listen on (overrides layout test ports)')
- option_parser.add_option('-r', '--root',
- help='Absolute path to DocumentRoot (overrides layout test roots)')
- option_parser.add_option('--register_cygwin', action="store_true",
- dest="register_cygwin", help='Register Cygwin paths (on Win try bots)')
- option_parser.add_option('--run_background', action="store_true",
- dest="run_background",
- help='Run on background (for running as UI test)')
- options, args = option_parser.parse_args()
-
- if not options.server:
- print ('Usage: %s --server {start|stop} [--root=root_dir]'
- ' [--port=port_number]' % sys.argv[0])
- else:
- if (options.root is None) and (options.port is not None):
- # specifying root but not port means we want httpd on default
- # set of ports that LayoutTest use, but pointing to a different
- # source of tests. Specifying port but no root does not seem
- # meaningful.
- raise 'Specifying port requires also a root.'
- httpd = Lighttpd(tempfile.gettempdir(),
- port=options.port,
- root=options.root,
- register_cygwin=options.register_cygwin,
- run_background=options.run_background)
- if 'start' == options.server:
- httpd.start()
- else:
- httpd.stop(force=True)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server_base.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server_base.py
index e82943e..c9805d6 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server_base.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/http_server_base.py
@@ -34,6 +34,8 @@ import os
import time
import urllib
+_log = logging.getLogger("webkitpy.layout_tests.port.http_server_base")
+
class HttpServerBase(object):
@@ -47,6 +49,7 @@ class HttpServerBase(object):
while time.time() - start_time < 20:
if action():
return True
+ _log.debug("Waiting for action: %s" % action)
time.sleep(1)
return False
@@ -63,9 +66,9 @@ class HttpServerBase(object):
try:
response = urllib.urlopen(url)
- logging.debug("Server running at %s" % url)
+ _log.debug("Server running at %s" % url)
except IOError:
- logging.debug("Server NOT running at %s" % url)
+ _log.debug("Server NOT running at %s" % url)
return False
return True
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/lighttpd.conf b/WebKitTools/Scripts/webkitpy/layout_tests/port/lighttpd.conf
index d3150dd..2e9c82e 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/lighttpd.conf
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/lighttpd.conf
@@ -21,6 +21,7 @@ mimetype.assign = (
".html" => "text/html",
".htm" => "text/html",
".xhtml" => "application/xhtml+xml",
+ ".xhtmlmp" => "application/vnd.wap.xhtml+xml",
".js" => "text/javascript",
".log" => "text/plain",
".conf" => "text/plain",
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py
index d355f62..cf4daa8 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/mac.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -29,31 +28,41 @@
"""WebKit Mac implementation of the Port interface."""
-import fcntl
import logging
import os
import pdb
import platform
-import select
+import re
+import shutil
import signal
import subprocess
import sys
import time
import webbrowser
-import base
+import webkitpy.common.system.ospath as ospath
+import webkitpy.layout_tests.port.server_process as server_process
+from webkitpy.layout_tests.port.webkit import WebKitPort, WebKitDriver
-import webkitpy
-from webkitpy import executive
+_log = logging.getLogger("webkitpy.layout_tests.port.mac")
-class MacPort(base.Port):
+
+class MacPort(WebKitPort):
"""WebKit Mac implementation of the Port class."""
def __init__(self, port_name=None, options=None):
if port_name is None:
port_name = 'mac' + self.version()
- base.Port.__init__(self, port_name, options)
- self._cached_build_root = None
+ WebKitPort.__init__(self, port_name, options)
+
+ def default_child_processes(self):
+ # FIXME: new-run-webkit-tests is unstable on Mac running more than
+ # four threads in parallel.
+ # See https://bugs.webkit.org/show_bug.cgi?id=36622
+ child_processes = WebKitPort.default_child_processes(self)
+ if child_processes > 4:
+ return 4
+ return child_processes
def baseline_search_path(self):
dirs = []
@@ -66,53 +75,13 @@ class MacPort(base.Port):
dirs.append(self._webkit_baseline_path('mac'))
return dirs
- def check_sys_deps(self):
- if executive.run_command([self.script_path("build-dumprendertree")], return_exit_code=True) != 0:
- return False
-
- driver_path = self._path_to_driver()
- if not os.path.exists(driver_path):
- logging.error("DumpRenderTree was not found at %s" % driver_path)
- return False
-
- # This should also validate that the ImageDiff path is valid (once this script knows how to use ImageDiff).
- # https://bugs.webkit.org/show_bug.cgi?id=34826
- return True
-
- def num_cores(self):
- return int(os.popen2("sysctl -n hw.ncpu")[1].read())
-
- def results_directory(self):
- return ('/tmp/run-chromium-webkit-tests-' +
- self._options.results_directory)
-
- def setup_test_run(self):
- # This port doesn't require any specific configuration.
- pass
-
- def show_results_html_file(self, results_filename):
- uri = self.filename_to_uri(results_filename)
- webbrowser.open(uri, new=1)
-
- def start_driver(self, image_path, options):
- """Starts a new Driver and returns a handle to it."""
- return MacDriver(self, image_path, options)
-
- def start_helper(self):
- # This port doesn't use a helper process.
- pass
-
- def stop_helper(self):
- # This port doesn't use a helper process.
- pass
-
- def test_base_platform_names(self):
- # At the moment we don't use test platform names, but we have
- # to return something.
- return ('mac',)
+ def path_to_test_expectations_file(self):
+ return self.path_from_webkit_base('LayoutTests', 'platform',
+ 'mac', 'test_expectations.txt')
def _skipped_file_paths(self):
- # FIXME: This method will need to be made work for non-mac platforms and moved into base.Port.
+ # FIXME: This method will need to be made work for non-mac
+ # platforms and moved into base.Port.
skipped_files = []
if self._name in ('mac-tiger', 'mac-leopard', 'mac-snowleopard'):
skipped_files.append(os.path.join(
@@ -121,79 +90,8 @@ class MacPort(base.Port):
'Skipped'))
return skipped_files
- def _tests_for_other_platforms(self):
- # The original run-webkit-tests builds up a "whitelist" of tests to run, and passes that to DumpRenderTree.
- # run-chromium-webkit-tests assumes we run *all* tests and test_expectations.txt functions as a blacklist.
- # FIXME: This list could be dynamic based on platform name and pushed into base.Port.
- return [
- "platform/chromium",
- "platform/gtk",
- "platform/qt",
- "platform/win",
- ]
-
- def _tests_for_disabled_features(self):
- # FIXME: This should use the feature detection from webkitperl/features.pm to match run-webkit-tests.
- # For now we hard-code a list of features known to be disabled on the Mac platform.
- disabled_feature_tests = [
- "fast/xhtmlmp",
- "http/tests/wml",
- "mathml",
- "wml",
- ]
- # FIXME: webarchive tests expect to read-write from -expected.webarchive files instead of .txt files.
- # This script doesn't know how to do that yet, so pretend they're just "disabled".
- webarchive_tests = [
- "webarchive",
- "svg/webarchive",
- "http/tests/webarchive",
- "svg/custom/image-with-prefix-in-webarchive.svg",
- ]
- return disabled_feature_tests + webarchive_tests
-
- def _tests_from_skipped_file(self, skipped_file):
- tests_to_skip = []
- for line in skipped_file.readlines():
- line = line.strip()
- if line.startswith('#') or not len(line):
- continue
- tests_to_skip.append(line)
- return tests_to_skip
-
- def _expectations_from_skipped_files(self):
- tests_to_skip = []
- for filename in self._skipped_file_paths():
- if not os.path.exists(filename):
- logging.warn("Failed to open Skipped file: %s" % filename)
- continue
- skipped_file = file(filename)
- tests_to_skip.extend(self._tests_from_skipped_file(skipped_file))
- skipped_file.close()
- return tests_to_skip
-
- def test_expectations(self):
- # The WebKit mac port uses 'Skipped' files at the moment. Each
- # file contains a list of files or directories to be skipped during
- # the test run. The total list of tests to skipped is given by the
- # contents of the generic Skipped file found in platform/X plus
- # a version-specific file found in platform/X-version. Duplicate
- # entries are allowed. This routine reads those files and turns
- # contents into the format expected by test_expectations.
- tests_to_skip = set(self._expectations_from_skipped_files()) # Use a set to allow duplicates
- tests_to_skip.update(self._tests_for_other_platforms())
- tests_to_skip.update(self._tests_for_disabled_features())
- expectations = map(lambda test_path: "BUG_SKIPPED SKIP : %s = FAIL" % test_path, tests_to_skip)
- return "\n".join(expectations)
-
def test_platform_name(self):
- # At the moment we don't use test platform names, but we have
- # to return something.
- return 'mac'
-
- def test_platform_names(self):
- # At the moment we don't use test platform names, but we have
- # to return something.
- return ('mac',)
+ return 'mac' + self.version()
def version(self):
os_version_string = platform.mac_ver()[0] # e.g. "10.5.6"
@@ -208,23 +106,32 @@ class MacPort(base.Port):
return '-snowleopard'
return ''
- #
- # PROTECTED METHODS
- #
-
- def _build_path(self, *comps):
- if not self._cached_build_root:
- self._cached_build_root = executive.run_command([self.script_path("webkit-build-directory"), "--top-level"]).rstrip()
- return os.path.join(self._cached_build_root, self._options.target, *comps)
+ def _build_java_test_support(self):
+ java_tests_path = os.path.join(self.layout_tests_dir(), "java")
+ build_java = ["/usr/bin/make", "-C", java_tests_path]
+ if self._executive.run_command(build_java, return_exit_code=True):
+ _log.error("Failed to build Java support files: %s" % build_java)
+ return False
+ return True
- def _kill_process(self, pid):
- """Forcefully kill the process.
+ def _check_port_build(self):
+ return self._build_java_test_support()
- Args:
- pid: The id of the process to be killed.
- """
- os.kill(pid, signal.SIGKILL)
+ def _tests_for_other_platforms(self):
+ # The original run-webkit-tests builds up a "whitelist" of tests to
+ # run, and passes that to DumpRenderTree. new-run-webkit-tests assumes
+ # we run *all* tests and test_expectations.txt functions as a
+ # blacklist.
+ # FIXME: This list could be dynamic based on platform name and
+ # pushed into base.Port.
+ return [
+ "platform/chromium",
+ "platform/gtk",
+ "platform/qt",
+ "platform/win",
+ ]
+ # FIXME: This doesn't have anything to do with WebKit.
def _kill_all_process(self, process_name):
# On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or
# -SIGNALNUMBER must come first. Example problem:
@@ -236,25 +143,11 @@ class MacPort(base.Port):
process_name], stderr=null)
null.close()
- def _path_to_apache(self):
- return '/usr/sbin/httpd'
-
def _path_to_apache_config_file(self):
return os.path.join(self.layout_tests_dir(), 'http', 'conf',
'apache2-httpd.conf')
- def _path_to_driver(self):
- return self._build_path('DumpRenderTree')
-
- def _path_to_helper(self):
- return None
-
- def _path_to_image_diff(self):
- return self._build_path('image_diff') # FIXME: This is wrong and should be "ImageDiff", but having the correct path causes other parts of the script to hang.
-
- def _path_to_wdiff(self):
- return 'wdiff' # FIXME: This does not exist on a default Mac OS X Leopard install.
-
+ # FIXME: This doesn't have anything to do with WebKit.
def _shut_down_http_server(self, server_pid):
"""Shut down the lighttpd web server. Blocks until it's fully
shut down.
@@ -264,209 +157,16 @@ class MacPort(base.Port):
"""
# server_pid is not set when "http_server.py stop" is run manually.
if server_pid is None:
- # TODO(mmoss) This isn't ideal, since it could conflict with
+ # FIXME: This isn't ideal, since it could conflict with
# lighttpd processes not started by http_server.py,
# but good enough for now.
self._kill_all_process('httpd')
else:
try:
os.kill(server_pid, signal.SIGTERM)
- # TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
+ # FIXME: Maybe throw in a SIGKILL just to be sure?
except OSError:
# Sometimes we get a bad PID (e.g. from a stale httpd.pid
# file), so if kill fails on the given PID, just try to
# 'killall' web servers.
self._shut_down_http_server(None)
-
-
-class MacDriver(base.Driver):
- """implementation of the DumpRenderTree interface."""
-
- def __init__(self, port, image_path, driver_options):
- self._port = port
- self._driver_options = driver_options
- self._target = port._options.target
- self._image_path = image_path
- self._stdout_fd = None
- self._cmd = None
- self._env = None
- self._proc = None
- self._read_buffer = ''
-
- cmd = []
- # Hook for injecting valgrind or other runtime instrumentation,
- # used by e.g. tools/valgrind/valgrind_tests.py.
- wrapper = os.environ.get("BROWSER_WRAPPER", None)
- if wrapper != None:
- cmd += [wrapper]
- if self._port._options.wrapper:
- # This split() isn't really what we want -- it incorrectly will
- # split quoted strings within the wrapper argument -- but in
- # practice it shouldn't come up and the --help output warns
- # about it anyway.
- cmd += self._options.wrapper.split()
- # FIXME: Using arch here masks any possible file-not-found errors from a non-existant driver executable.
- cmd += ['arch', '-i386', port._path_to_driver(), '-']
-
- # FIXME: This is a hack around our lack of ImageDiff support for now.
- if not self._port._options.no_pixel_tests:
- logging.warn("This port does not yet support pixel tests.")
- self._port._options.no_pixel_tests = True
- #cmd.append('--pixel-tests')
-
- #if driver_options:
- # cmd += driver_options
- env = os.environ
- env['DYLD_FRAMEWORK_PATH'] = self._port._build_path()
- self._cmd = cmd
- self._env = env
- self.restart()
-
- def poll(self):
- return self._proc.poll()
-
- def restart(self):
- self.stop()
- self._proc = subprocess.Popen(self._cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- env=self._env)
-
- def returncode(self):
- return self._proc.returncode
-
- def run_test(self, uri, timeoutms, image_hash):
- output = []
- error = []
- image = ''
- crash = False
- timeout = False
- actual_uri = None
- actual_image_hash = None
-
- if uri.startswith("file:///"):
- cmd = uri[7:]
- else:
- cmd = uri
-
- if image_hash:
- cmd += "'" + image_hash
- cmd += "\n"
-
- self._proc.stdin.write(cmd)
- self._stdout_fd = self._proc.stdout.fileno()
- fl = fcntl.fcntl(self._stdout_fd, fcntl.F_GETFL)
- fcntl.fcntl(self._stdout_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
-
- stop_time = time.time() + (int(timeoutms) / 1000.0)
- resp = ''
- (timeout, line) = self._read_line(timeout, stop_time)
- resp += line
- have_seen_content_type = False
- while not timeout and line.rstrip() != "#EOF":
- # Make sure we haven't crashed.
- if line == '' and self.poll() is not None:
- # This is hex code 0xc000001d, which is used for abrupt
- # termination. This happens if we hit ctrl+c from the prompt
- # and we happen to be waiting on the test_shell.
- # sdoyon: Not sure for which OS and in what circumstances the
- # above code is valid. What works for me under Linux to detect
- # ctrl+c is for the subprocess returncode to be negative
- # SIGINT. And that agrees with the subprocess documentation.
- if (-1073741510 == self.returncode() or
- - signal.SIGINT == self.returncode()):
- raise KeyboardInterrupt
- crash = True
- break
-
- elif (line.startswith('Content-Type:') and not
- have_seen_content_type):
- have_seen_content_type = True
- pass
- else:
- output.append(line)
-
- (timeout, line) = self._read_line(timeout, stop_time)
- resp += line
-
- # Now read a second block of text for the optional image data
- image_length = 0
- (timeout, line) = self._read_line(timeout, stop_time)
- resp += line
- HASH_HEADER = 'ActualHash: '
- LENGTH_HEADER = 'Content-Length: '
- while not timeout and not crash and line.rstrip() != "#EOF":
- if line == '' and self.poll() is not None:
- if (-1073741510 == self.returncode() or
- - signal.SIGINT == self.returncode()):
- raise KeyboardInterrupt
- crash = True
- break
- elif line.startswith(HASH_HEADER):
- actual_image_hash = line[len(HASH_HEADER):].strip()
- elif line.startswith('Content-Type:'):
- pass
- elif line.startswith(LENGTH_HEADER):
- image_length = int(line[len(LENGTH_HEADER):])
- elif image_length:
- image += line
-
- (timeout, line) = self._read_line(timeout, stop_time, image_length)
- resp += line
-
- if timeout:
- self.restart()
-
- if self._image_path and len(self._image_path):
- image_file = file(self._image_path, "wb")
- image_file.write(image)
- image_file.close()
-
- return (crash, timeout, actual_image_hash,
- ''.join(output), ''.join(error))
-
- def stop(self):
- if self._proc:
- self._proc.stdin.close()
- self._proc.stdout.close()
- if self._proc.stderr:
- self._proc.stderr.close()
- if (sys.platform not in ('win32', 'cygwin') and
- not self._proc.poll()):
- # Closing stdin/stdout/stderr hangs sometimes on OS X.
- null = open(os.devnull, "w")
- subprocess.Popen(["kill", "-9",
- str(self._proc.pid)], stderr=null)
- null.close()
-
- def _read_line(self, timeout, stop_time, image_length=0):
- now = time.time()
- read_fds = []
-
- # first check to see if we have a line already read or if we've
- # read the entire image
- if image_length and len(self._read_buffer) >= image_length:
- out = self._read_buffer[0:image_length]
- self._read_buffer = self._read_buffer[image_length:]
- return (timeout, out)
-
- idx = self._read_buffer.find('\n')
- if not image_length and idx != -1:
- out = self._read_buffer[0:idx + 1]
- self._read_buffer = self._read_buffer[idx + 1:]
- return (timeout, out)
-
- # If we've timed out, return just what we have, if anything
- if timeout or now >= stop_time:
- out = self._read_buffer
- self._read_buffer = ''
- return (True, out)
-
- (read_fds, write_fds, err_fds) = select.select(
- [self._stdout_fd], [], [], stop_time - now)
- try:
- if timeout or len(read_fds) == 1:
- self._read_buffer += self._proc.stdout.read()
- except IOError, e:
- read = []
- return self._read_line(timeout, stop_time)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py
new file mode 100644
index 0000000..67cdefe
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/qt.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""QtWebKit implementation of the Port interface."""
+
+import logging
+import os
+import subprocess
+import signal
+
+from webkitpy.layout_tests.port.webkit import WebKitPort
+
+_log = logging.getLogger("webkitpy.layout_tests.port.qt")
+
+
+class QtPort(WebKitPort):
+ """QtWebKit implementation of the Port class."""
+
+ def __init__(self, port_name=None, options=None):
+ if port_name is None:
+ port_name = 'qt'
+ WebKitPort.__init__(self, port_name, options)
+
+ def _tests_for_other_platforms(self):
+ # FIXME: This list could be dynamic based on platform name and
+ # pushed into base.Port.
+ # This really need to be automated.
+ return [
+ "platform/chromium",
+ "platform/win",
+ "platform/gtk",
+ "platform/mac",
+ ]
+
+ def _path_to_apache_config_file(self):
+ # FIXME: This needs to detect the distribution and change config files.
+ return os.path.join(self.layout_tests_dir(), 'http', 'conf',
+ 'apache2-debian-httpd.conf')
+
+ def _kill_all_process(self, process_name):
+ null = open(os.devnull)
+ subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
+ process_name], stderr=null)
+ null.close()
+
+ def _shut_down_http_server(self, server_pid):
+ """Shut down the httpd web server. Blocks until it's fully
+ shut down.
+
+ Args:
+ server_pid: The process ID of the running server.
+ """
+ # server_pid is not set when "http_server.py stop" is run manually.
+ if server_pid is None:
+ # FIXME: This isn't ideal, since it could conflict with
+ # lighttpd processes not started by http_server.py,
+ # but good enough for now.
+ self._kill_all_process('apache2')
+ else:
+ try:
+ os.kill(server_pid, signal.SIGTERM)
+ # TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
+ except OSError:
+ # Sometimes we get a bad PID (e.g. from a stale httpd.pid
+ # file), so if kill fails on the given PID, just try to
+ # 'killall' web servers.
+ self._shut_down_http_server(None)
+
+ def _build_driver(self):
+ # The Qt port builds DRT as part of the main build step
+ return True
+
+ def _path_to_driver(self):
+ return self._build_path('bin/DumpRenderTree')
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py
new file mode 100644
index 0000000..f1c6d73
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/server_process.py
@@ -0,0 +1,223 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Package that implements the ServerProcess wrapper class"""
+
+import fcntl
+import logging
+import os
+import select
+import signal
+import subprocess
+import sys
+import time
+
+_log = logging.getLogger("webkitpy.layout_tests.port.server_process")
+
+
+class ServerProcess:
+ """This class provides a wrapper around a subprocess that
+ implements a simple request/response usage model. The primary benefit
+ is that reading responses takes a timeout, so that we don't ever block
+ indefinitely. The class also handles transparently restarting processes
+ as necessary to keep issuing commands."""
+
+ def __init__(self, port_obj, name, cmd, env=None):
+ self._port = port_obj
+ self._name = name
+ self._cmd = cmd
+ self._env = env
+ self._reset()
+
+ def _reset(self):
+ self._proc = None
+ self._output = ''
+ self.crashed = False
+ self.timed_out = False
+ self.error = ''
+
+ def _start(self):
+ if self._proc:
+ raise ValueError("%s already running" % self._name)
+ self._reset()
+ close_fds = sys.platform not in ('win32', 'cygwin')
+ self._proc = subprocess.Popen(self._cmd, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=close_fds,
+ env=self._env)
+ fd = self._proc.stdout.fileno()
+ fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+ fd = self._proc.stderr.fileno()
+ fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+
+ def handle_interrupt(self):
+ """This routine checks to see if the process crashed or exited
+ because of a keyboard interrupt and raises KeyboardInterrupt
+ accordingly."""
+ if self.crashed:
+ # This is hex code 0xc000001d, which is used for abrupt
+ # termination. This happens if we hit ctrl+c from the prompt
+ # and we happen to be waiting on the DumpRenderTree.
+ # sdoyon: Not sure for which OS and in what circumstances the
+ # above code is valid. What works for me under Linux to detect
+ # ctrl+c is for the subprocess returncode to be negative
+ # SIGINT. And that agrees with the subprocess documentation.
+ if (-1073741510 == self._proc.returncode or
+ - signal.SIGINT == self._proc.returncode):
+ raise KeyboardInterrupt
+ return
+
+ def poll(self):
+ """Check to see if the underlying process is running; returns None
+ if it still is (wrapper around subprocess.poll)."""
+ if self._proc:
+ return self._proc.poll()
+ return None
+
+ def returncode(self):
+ """Returns the exit code from the subprcoess; returns None if the
+ process hasn't exited (this is a wrapper around subprocess.returncode).
+ """
+ if self._proc:
+ return self._proc.returncode
+ return None
+
+ def write(self, input):
+ """Write a request to the subprocess. The subprocess is (re-)start()'ed
+ if is not already running."""
+ if not self._proc:
+ self._start()
+ self._proc.stdin.write(input)
+
+ def read_line(self, timeout):
+ """Read a single line from the subprocess, waiting until the deadline.
+ If the deadline passes, the call times out. Note that even if the
+ subprocess has crashed or the deadline has passed, if there is output
+ pending, it will be returned.
+
+ Args:
+ timeout: floating-point number of seconds the call is allowed
+ to block for. A zero or negative number will attempt to read
+ any existing data, but will not block. There is no way to
+ block indefinitely.
+ Returns:
+ output: data returned, if any. If no data is available and the
+ call times out or crashes, an empty string is returned. Note
+ that the returned string includes the newline ('\n')."""
+ return self._read(timeout, size=0)
+
+ def read(self, timeout, size):
+ """Attempts to read size characters from the subprocess, waiting until
+ the deadline passes. If the deadline passes, any available data will be
+ returned. Note that even if the deadline has passed or if the
+ subprocess has crashed, any available data will still be returned.
+
+ Args:
+ timeout: floating-point number of seconds the call is allowed
+ to block for. A zero or negative number will attempt to read
+ any existing data, but will not block. There is no way to
+ block indefinitely.
+ size: amount of data to read. Must be a postive integer.
+ Returns:
+ output: data returned, if any. If no data is available, an empty
+ string is returned.
+ """
+ if size <= 0:
+ raise ValueError('ServerProcess.read() called with a '
+ 'non-positive size: %d ' % size)
+ return self._read(timeout, size)
+
+ def _read(self, timeout, size):
+ """Internal routine that actually does the read."""
+ index = -1
+ out_fd = self._proc.stdout.fileno()
+ err_fd = self._proc.stderr.fileno()
+ select_fds = (out_fd, err_fd)
+ deadline = time.time() + timeout
+ while not self.timed_out and not self.crashed:
+ if self._proc.poll() != None:
+ self.crashed = True
+ self.handle_interrupt()
+
+ now = time.time()
+ if now > deadline:
+ self.timed_out = True
+
+ # Check to see if we have any output we can return.
+ if size and len(self._output) >= size:
+ index = size
+ elif size == 0:
+ index = self._output.find('\n') + 1
+
+ if index or self.crashed or self.timed_out:
+ output = self._output[0:index]
+ self._output = self._output[index:]
+ return output
+
+ # Nope - wait for more data.
+ (read_fds, write_fds, err_fds) = select.select(select_fds, [],
+ select_fds,
+ deadline - now)
+ try:
+ if out_fd in read_fds:
+ self._output += self._proc.stdout.read()
+ if err_fd in read_fds:
+ self.error += self._proc.stderr.read()
+ except IOError, e:
+ pass
+
+ def stop(self):
+ """Stop (shut down) the subprocess), if it is running."""
+ pid = self._proc.pid
+ self._proc.stdin.close()
+ self._proc.stdout.close()
+ if self._proc.stderr:
+ self._proc.stderr.close()
+ if sys.platform not in ('win32', 'cygwin'):
+ # Closing stdin/stdout/stderr hangs sometimes on OS X,
+ # (see restart(), above), and anyway we don't want to hang
+ # the harness if DumpRenderTree is buggy, so we wait a couple
+ # seconds to give DumpRenderTree a chance to clean up, but then
+ # force-kill the process if necessary.
+ KILL_TIMEOUT = 3.0
+ timeout = time.time() + KILL_TIMEOUT
+ while self._proc.poll() is None and time.time() < timeout:
+ time.sleep(0.1)
+ if self._proc.poll() is None:
+ _log.warning('stopping %s timed out, killing it' %
+ self._name)
+ null = open(os.devnull, "w")
+ subprocess.Popen(["kill", "-9",
+ str(self._proc.pid)], stderr=null)
+ null.close()
+ _log.warning('killed')
+ self._reset()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
index c3e97be..edef485 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/test.py
@@ -52,31 +52,28 @@ class TestPort(base.Port):
def baseline_search_path(self):
return [self.baseline_path()]
- def check_sys_deps(self):
+ def check_build(self, needs_http):
return True
- def diff_image(self, actual_filename, expected_filename,
- diff_filename=None):
+ def compare_text(self, expected_text, actual_text):
return False
- def compare_text(self, actual_text, expected_text):
+ def diff_image(self, expected_filename, actual_filename,
+ diff_filename=None):
return False
- def diff_text(self, actual_text, expected_text,
- actual_filename, expected_filename):
+ def diff_text(self, expected_text, actual_text,
+ expected_filename, actual_filename):
return ''
def name(self):
return self._name
- def num_cores(self):
- return int(os.popen2("sysctl -n hw.ncpu")[1].read())
-
def options(self):
return self._options
def results_directory(self):
- return '/tmp' + self._options.results_directory
+ return '/tmp/' + self._options.results_directory
def setup_test_run(self):
pass
@@ -93,18 +90,12 @@ class TestPort(base.Port):
def start_websocket_server(self):
pass
- def start_helper(self):
- pass
-
def stop_http_server(self):
pass
def stop_websocket_server(self):
pass
- def stop_helper(self):
- pass
-
def test_expectations(self):
return ''
@@ -120,7 +111,7 @@ class TestPort(base.Port):
def version():
return ''
- def wdiff_text(self, actual_filename, expected_filename):
+ def wdiff_text(self, expected_filename, actual_filename):
return ''
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py
new file mode 100644
index 0000000..f2f5237
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/webkit.py
@@ -0,0 +1,448 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit implementations of the Port interface."""
+
+import logging
+import os
+import pdb
+import platform
+import re
+import shutil
+import signal
+import subprocess
+import sys
+import time
+import webbrowser
+
+import webkitpy.common.system.ospath as ospath
+import webkitpy.layout_tests.port.base as base
+import webkitpy.layout_tests.port.server_process as server_process
+
+_log = logging.getLogger("webkitpy.layout_tests.port.webkit")
+
+
+class WebKitPort(base.Port):
+ """WebKit implementation of the Port class."""
+
+ def __init__(self, port_name=None, options=None):
+ base.Port.__init__(self, port_name, options)
+ self._cached_build_root = None
+ self._cached_apache_path = None
+
+ # FIXME: disable pixel tests until they are run by default on the
+ # build machines.
+ if options and (not hasattr(options, "pixel_tests") or
+ options.pixel_tests is None):
+ options.pixel_tests = False
+
+ def baseline_path(self):
+ return self._webkit_baseline_path(self._name)
+
+ def baseline_search_path(self):
+ return [self._webkit_baseline_path(self._name)]
+
+ def path_to_test_expectations_file(self):
+ return os.path.join(self._webkit_baseline_path(self._name),
+ 'test_expectations.txt')
+
+ # Only needed by ports which maintain versioned test expectations (like mac-tiger vs. mac-leopard)
+ def version(self):
+ return ''
+
+ def _build_driver(self):
+ return not self._executive.run_command([
+ self.script_path("build-dumprendertree"),
+ self.flag_from_configuration(self._options.configuration),
+ ], return_exit_code=True)
+
+ def _check_driver(self):
+ driver_path = self._path_to_driver()
+ if not os.path.exists(driver_path):
+ _log.error("DumpRenderTree was not found at %s" % driver_path)
+ return False
+ return True
+
+ def check_build(self, needs_http):
+ if self._options.build and not self._build_driver():
+ return False
+ if not self._check_driver():
+ return False
+ if self._options.pixel_tests:
+ if not self.check_image_diff():
+ return False
+ if not self._check_port_build():
+ return False
+ return True
+
+ def _check_port_build(self):
+ # Ports can override this method to do additional checks.
+ return True
+
+ def check_image_diff(self, override_step=None, logging=True):
+ image_diff_path = self._path_to_image_diff()
+ if not os.path.exists(image_diff_path):
+ _log.error("ImageDiff was not found at %s" % image_diff_path)
+ return False
+ return True
+
+ def diff_image(self, expected_filename, actual_filename,
+ diff_filename=None):
+ """Return True if the two files are different. Also write a delta
+ image of the two images into |diff_filename| if it is not None."""
+
+ # Handle the case where the test didn't actually generate an image.
+ actual_length = os.stat(actual_filename).st_size
+ if actual_length == 0:
+ if diff_filename:
+ shutil.copyfile(actual_filename, expected_filename)
+ return True
+
+ sp = self._diff_image_request(expected_filename, actual_filename)
+ return self._diff_image_reply(sp, expected_filename, diff_filename)
+
+ def _diff_image_request(self, expected_filename, actual_filename):
+ # FIXME: either expose the tolerance argument as a command-line
+ # parameter, or make it go away and aways use exact matches.
+ command = [self._path_to_image_diff(), '--tolerance', '0.1']
+ sp = server_process.ServerProcess(self, 'ImageDiff', command)
+
+ actual_length = os.stat(actual_filename).st_size
+ actual_file = open(actual_filename).read()
+ expected_length = os.stat(expected_filename).st_size
+ expected_file = open(expected_filename).read()
+ sp.write('Content-Length: %d\n%sContent-Length: %d\n%s' %
+ (actual_length, actual_file, expected_length, expected_file))
+
+ return sp
+
+ def _diff_image_reply(self, sp, expected_filename, diff_filename):
+ timeout = 2.0
+ deadline = time.time() + timeout
+ output = sp.read_line(timeout)
+ while not sp.timed_out and not sp.crashed and output:
+ if output.startswith('Content-Length'):
+ m = re.match('Content-Length: (\d+)', output)
+ content_length = int(m.group(1))
+ timeout = deadline - time.time()
+ output = sp.read(timeout, content_length)
+ break
+ elif output.startswith('diff'):
+ break
+ else:
+ timeout = deadline - time.time()
+ output = sp.read_line(deadline)
+
+ result = True
+ if output.startswith('diff'):
+ m = re.match('diff: (.+)% (passed|failed)', output)
+ if m.group(2) == 'passed':
+ result = False
+ elif output and diff_filename:
+ open(diff_filename, 'w').write(output) # FIXME: This leaks a file handle.
+ elif sp.timed_out:
+ _log.error("ImageDiff timed out on %s" % expected_filename)
+ elif sp.crashed:
+ _log.error("ImageDiff crashed")
+ sp.stop()
+ return result
+
+ def results_directory(self):
+ # Results are store relative to the built products to make it easy
+ # to have multiple copies of webkit checked out and built.
+ return self._build_path(self._options.results_directory)
+
+ def setup_test_run(self):
+ # This port doesn't require any specific configuration.
+ pass
+
+ def show_results_html_file(self, results_filename):
+ uri = self.filename_to_uri(results_filename)
+ # FIXME: We should open results in the version of WebKit we built.
+ webbrowser.open(uri, new=1)
+
+ def start_driver(self, image_path, options):
+ return WebKitDriver(self, image_path, options)
+
+ def test_base_platform_names(self):
+ # At the moment we don't use test platform names, but we have
+ # to return something.
+ return ('mac', 'win')
+
+ def _tests_for_other_platforms(self):
+ raise NotImplementedError('WebKitPort._tests_for_other_platforms')
+ # The original run-webkit-tests builds up a "whitelist" of tests to
+ # run, and passes that to DumpRenderTree. new-run-webkit-tests assumes
+ # we run *all* tests and test_expectations.txt functions as a
+ # blacklist.
+ # FIXME: This list could be dynamic based on platform name and
+ # pushed into base.Port.
+ return [
+ "platform/chromium",
+ "platform/gtk",
+ "platform/qt",
+ "platform/win",
+ ]
+
+ def _tests_for_disabled_features(self):
+ # FIXME: This should use the feature detection from
+ # webkitperl/features.pm to match run-webkit-tests.
+ # For now we hard-code a list of features known to be disabled on
+ # the Mac platform.
+ disabled_feature_tests = [
+ "fast/xhtmlmp",
+ "http/tests/wml",
+ "mathml",
+ "wml",
+ ]
+ # FIXME: webarchive tests expect to read-write from
+ # -expected.webarchive files instead of .txt files.
+ # This script doesn't know how to do that yet, so pretend they're
+ # just "disabled".
+ webarchive_tests = [
+ "webarchive",
+ "svg/webarchive",
+ "http/tests/webarchive",
+ "svg/custom/image-with-prefix-in-webarchive.svg",
+ ]
+ return disabled_feature_tests + webarchive_tests
+
+ def _tests_from_skipped_file(self, skipped_file):
+ tests_to_skip = []
+ for line in skipped_file.readlines():
+ line = line.strip()
+ if line.startswith('#') or not len(line):
+ continue
+ tests_to_skip.append(line)
+ return tests_to_skip
+
+ def _skipped_file_paths(self):
+ return [os.path.join(self._webkit_baseline_path(self._name),
+ 'Skipped')]
+
+ def _expectations_from_skipped_files(self):
+ tests_to_skip = []
+ for filename in self._skipped_file_paths():
+ if not os.path.exists(filename):
+ _log.warn("Failed to open Skipped file: %s" % filename)
+ continue
+ skipped_file = file(filename)
+ tests_to_skip.extend(self._tests_from_skipped_file(skipped_file))
+ skipped_file.close()
+ return tests_to_skip
+
+ def test_expectations(self):
+ # The WebKit mac port uses a combination of a test_expectations file
+ # and 'Skipped' files.
+ expectations_file = self.path_to_test_expectations_file()
+ expectations = file(expectations_file, "r").read()
+ return expectations + self._skips()
+
+ def _skips(self):
+ # Each Skipped file contains a list of files
+ # or directories to be skipped during the test run. The total list
+ # of tests to skipped is given by the contents of the generic
+ # Skipped file found in platform/X plus a version-specific file
+ # found in platform/X-version. Duplicate entries are allowed.
+ # This routine reads those files and turns contents into the
+ # format expected by test_expectations.
+
+ # Use a set to allow duplicates
+ tests_to_skip = set(self._expectations_from_skipped_files())
+
+ tests_to_skip.update(self._tests_for_other_platforms())
+ tests_to_skip.update(self._tests_for_disabled_features())
+ skip_lines = map(lambda test_path: "BUG_SKIPPED SKIP : %s = FAIL" %
+ test_path, tests_to_skip)
+ return "\n".join(skip_lines)
+
+ def test_platform_name(self):
+ return self._name + self.version()
+
+ def test_platform_names(self):
+ return self.test_base_platform_names() + (
+ 'mac-tiger', 'mac-leopard', 'mac-snowleopard')
+
+ def default_configuration(self):
+ # This is a bit of a hack. This state exists in a much nicer form in
+ # perl-land.
+ configuration = ospath.relpath(
+ self._webkit_build_directory(["--configuration"]),
+ self._webkit_build_directory(["--top-level"]))
+ assert(configuration == "Debug" or configuration == "Release")
+ return configuration
+
+ def _webkit_build_directory(self, args):
+ args = [self.script_path("webkit-build-directory")] + args
+ return self._executive.run_command(args).rstrip()
+
+ def _build_path(self, *comps):
+ if not self._cached_build_root:
+ self._cached_build_root = self._webkit_build_directory([
+ "--configuration",
+ self.flag_from_configuration(self._options.configuration),
+ ])
+ return os.path.join(self._cached_build_root, *comps)
+
+ def _path_to_driver(self):
+ return self._build_path('DumpRenderTree')
+
+ def _path_to_helper(self):
+ return None
+
+ def _path_to_image_diff(self):
+ return self._build_path('ImageDiff')
+
+ def _path_to_wdiff(self):
+ # FIXME: This does not exist on a default Mac OS X Leopard install.
+ return 'wdiff'
+
+ def _path_to_apache(self):
+ if not self._cached_apache_path:
+ # The Apache binary path can vary depending on OS and distribution
+ # See http://wiki.apache.org/httpd/DistrosDefaultLayout
+ for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]:
+ if os.path.exists(path):
+ self._cached_apache_path = path
+ break
+
+ if not self._cached_apache_path:
+ _log.error("Could not find apache. Not installed or unknown path.")
+
+ return self._cached_apache_path
+
+
+class WebKitDriver(base.Driver):
+ """WebKit implementation of the DumpRenderTree interface."""
+
+ def __init__(self, port, image_path, driver_options):
+ self._port = port
+ self._driver_options = driver_options
+ self._image_path = image_path
+
+ command = []
+ # Hook for injecting valgrind or other runtime instrumentation,
+ # used by e.g. tools/valgrind/valgrind_tests.py.
+ wrapper = os.environ.get("BROWSER_WRAPPER", None)
+ if wrapper != None:
+ command += [wrapper]
+ if self._port._options.wrapper:
+ # This split() isn't really what we want -- it incorrectly will
+ # split quoted strings within the wrapper argument -- but in
+ # practice it shouldn't come up and the --help output warns
+ # about it anyway.
+ # FIXME: Use a real shell parser.
+ command += self._options.wrapper.split()
+
+ command += [port._path_to_driver(), '-']
+
+ if image_path:
+ command.append('--pixel-tests')
+ environment = os.environ
+ environment['DYLD_FRAMEWORK_PATH'] = self._port._build_path()
+ self._server_process = server_process.ServerProcess(self._port,
+ "DumpRenderTree", command, environment)
+
+ def poll(self):
+ return self._server_process.poll()
+
+ def restart(self):
+ self._server_process.stop()
+ self._server_process.start()
+ return
+
+ def returncode(self):
+ return self._server_process.returncode()
+
+ # FIXME: This function is huge.
+ def run_test(self, uri, timeoutms, image_hash):
+ if uri.startswith("file:///"):
+ command = uri[7:]
+ else:
+ command = uri
+
+ if image_hash:
+ command += "'" + image_hash
+ command += "\n"
+
+ # pdb.set_trace()
+ self._server_process.write(command)
+
+ have_seen_content_type = False
+ actual_image_hash = None
+ output = ''
+ image = ''
+
+ timeout = int(timeoutms) / 1000.0
+ deadline = time.time() + timeout
+ line = self._server_process.read_line(timeout)
+ while (not self._server_process.timed_out
+ and not self._server_process.crashed
+ and line.rstrip() != "#EOF"):
+ if (line.startswith('Content-Type:') and not
+ have_seen_content_type):
+ have_seen_content_type = True
+ else:
+ output += line
+ line = self._server_process.read_line(timeout)
+ timeout = deadline - time.time()
+
+ # Now read a second block of text for the optional image data
+ remaining_length = -1
+ HASH_HEADER = 'ActualHash: '
+ LENGTH_HEADER = 'Content-Length: '
+ line = self._server_process.read_line(timeout)
+ while (not self._server_process.timed_out
+ and not self._server_process.crashed
+ and line.rstrip() != "#EOF"):
+ if line.startswith(HASH_HEADER):
+ actual_image_hash = line[len(HASH_HEADER):].strip()
+ elif line.startswith('Content-Type:'):
+ pass
+ elif line.startswith(LENGTH_HEADER):
+ timeout = deadline - time.time()
+ content_length = int(line[len(LENGTH_HEADER):])
+ image = self._server_process.read(timeout, content_length)
+ timeout = deadline - time.time()
+ line = self._server_process.read_line(timeout)
+
+ if self._image_path and len(self._image_path):
+ image_file = file(self._image_path, "wb")
+ image_file.write(image)
+ image_file.close()
+ return (self._server_process.crashed,
+ self._server_process.timed_out,
+ actual_image_hash,
+ output,
+ self._server_process.error)
+
+ def stop(self):
+ if self._server_process:
+ self._server_process.stop()
+ self._server_process = None
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py
index 54c2f6f..a9ba160 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/websocket_server.py
@@ -39,8 +39,13 @@ import tempfile
import time
import urllib
+import factory
import http_server
+from webkitpy.common.system.executive import Executive
+
+_log = logging.getLogger("webkitpy.layout_tests.port.websocket_server")
+
_WS_LOG_PREFIX = 'pywebsocket.ws.log-'
_WSS_LOG_PREFIX = 'pywebsocket.wss.log-'
@@ -59,6 +64,7 @@ def url_is_alive(url):
Return:
True if the url is alive.
"""
+ sleep_time = 0.5
wait_time = 5
while wait_time > 0:
try:
@@ -67,9 +73,9 @@ def url_is_alive(url):
return True
except IOError:
pass
- wait_time -= 1
- # Wait a second and try again.
- time.sleep(1)
+ # Wait for sleep_time before trying again.
+ wait_time -= sleep_time
+ time.sleep(sleep_time)
return False
@@ -86,7 +92,7 @@ class PyWebSocket(http_server.Lighttpd):
def __init__(self, port_obj, output_dir, port=_DEFAULT_WS_PORT,
root=None, use_tls=False,
- register_cygwin=None,
+ register_cygwin=True,
pidfile=None):
"""Args:
output_dir: the absolute path to the layout test result directory
@@ -126,7 +132,7 @@ class PyWebSocket(http_server.Lighttpd):
def start(self):
if not self._web_socket_tests:
- logging.info('No need to start %s server.' % self._server_name)
+ _log.info('No need to start %s server.' % self._server_name)
return
if self.is_running():
raise PyWebSocketNotStarted('%s is already running.' %
@@ -150,27 +156,27 @@ class PyWebSocket(http_server.Lighttpd):
python_interp = sys.executable
pywebsocket_base = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
- os.path.dirname(os.path.dirname(
- os.path.abspath(__file__)))))), 'pywebsocket')
+ os.path.abspath(__file__)))), 'thirdparty', 'pywebsocket')
pywebsocket_script = os.path.join(pywebsocket_base, 'mod_pywebsocket',
'standalone.py')
start_cmd = [
python_interp, pywebsocket_script,
- '-p', str(self._port),
- '-d', self._layout_tests,
- '-s', self._web_socket_tests,
- '-x', '/websocket/tests/cookies',
- '-l', error_log,
+ '--server-host', '127.0.0.1',
+ '--port', str(self._port),
+ '--document-root', self._layout_tests,
+ '--scan-dir', self._web_socket_tests,
+ '--cgi-paths', '/websocket/tests',
+ '--log-file', error_log,
]
handler_map_file = os.path.join(self._web_socket_tests,
'handler_map.txt')
if os.path.exists(handler_map_file):
- logging.debug('Using handler_map_file: %s' % handler_map_file)
- start_cmd.append('-m')
+ _log.debug('Using handler_map_file: %s' % handler_map_file)
+ start_cmd.append('--websock-handlers-map-file')
start_cmd.append(handler_map_file)
else:
- logging.warning('No handler_map_file found')
+ _log.warning('No handler_map_file found')
if self._use_tls:
start_cmd.extend(['-t', '-k', self._private_key,
@@ -183,6 +189,8 @@ class PyWebSocket(http_server.Lighttpd):
self._port_obj.path_from_chromium_base('third_party',
'cygwin', 'bin'),
env['PATH'])
+ env['CYGWIN_PATH'] = self._port_obj.path_from_chromium_base(
+ 'third_party', 'cygwin', 'bin')
if sys.platform == 'win32' and self._register_cygwin:
setup_mount = self._port_obj.path_from_chromium_base(
@@ -192,16 +200,16 @@ class PyWebSocket(http_server.Lighttpd):
env['PYTHONPATH'] = (pywebsocket_base + os.path.pathsep +
env.get('PYTHONPATH', ''))
- logging.debug('Starting %s server on %d.' % (
- self._server_name, self._port))
- logging.debug('cmdline: %s' % ' '.join(start_cmd))
- self._process = subprocess.Popen(start_cmd, stdout=self._wsout,
+ _log.debug('Starting %s server on %d.' % (
+ self._server_name, self._port))
+ _log.debug('cmdline: %s' % ' '.join(start_cmd))
+ # FIXME: We should direct this call through Executive for testing.
+ self._process = subprocess.Popen(start_cmd,
+ stdin=open(os.devnull, 'r'),
+ stdout=self._wsout,
stderr=subprocess.STDOUT,
env=env)
- # Wait a bit before checking the liveness of the server.
- time.sleep(0.5)
-
if self._use_tls:
url = 'https'
else:
@@ -211,7 +219,7 @@ class PyWebSocket(http_server.Lighttpd):
fp = open(output_log)
try:
for line in fp:
- logging.error(line)
+ _log.error(line)
finally:
fp.close()
raise PyWebSocketNotStarted(
@@ -231,6 +239,7 @@ class PyWebSocket(http_server.Lighttpd):
if not force and not self.is_running():
return
+ pid = None
if self._process:
pid = self._process.pid
elif self._pidfile:
@@ -242,8 +251,9 @@ class PyWebSocket(http_server.Lighttpd):
raise PyWebSocketNotFound(
'Failed to find %s server pid.' % self._server_name)
- logging.debug('Shutting down %s server %d.' % (self._server_name, pid))
- self._port_obj._kill_process(pid)
+ _log.debug('Shutting down %s server %d.' % (self._server_name, pid))
+ # FIXME: We should use a non-static Executive for easier testing.
+ Executive().kill_process(pid)
if self._process:
self._process.wait()
@@ -252,53 +262,3 @@ class PyWebSocket(http_server.Lighttpd):
if self._wsout:
self._wsout.close()
self._wsout = None
-
-
-if '__main__' == __name__:
- # Provide some command line params for starting the PyWebSocket server
- # manually.
- option_parser = optparse.OptionParser()
- option_parser.add_option('--server', type='choice',
- choices=['start', 'stop'], default='start',
- help='Server action (start|stop)')
- option_parser.add_option('-p', '--port', dest='port',
- default=None, help='Port to listen on')
- option_parser.add_option('-r', '--root',
- help='Absolute path to DocumentRoot '
- '(overrides layout test roots)')
- option_parser.add_option('-t', '--tls', dest='use_tls',
- action='store_true',
- default=False, help='use TLS (wss://)')
- option_parser.add_option('-k', '--private_key', dest='private_key',
- default='', help='TLS private key file.')
- option_parser.add_option('-c', '--certificate', dest='certificate',
- default='', help='TLS certificate file.')
- option_parser.add_option('--register_cygwin', action="store_true",
- dest="register_cygwin",
- help='Register Cygwin paths (on Win try bots)')
- option_parser.add_option('--pidfile', help='path to pid file.')
- options, args = option_parser.parse_args()
-
- if not options.port:
- if options.use_tls:
- options.port = _DEFAULT_WSS_PORT
- else:
- options.port = _DEFAULT_WS_PORT
-
- kwds = {'port': options.port, 'use_tls': options.use_tls}
- if options.root:
- kwds['root'] = options.root
- if options.private_key:
- kwds['private_key'] = options.private_key
- if options.certificate:
- kwds['certificate'] = options.certificate
- kwds['register_cygwin'] = options.register_cygwin
- if options.pidfile:
- kwds['pidfile'] = options.pidfile
-
- pywebsocket = PyWebSocket(tempfile.gettempdir(), **kwds)
-
- if 'start' == options.server:
- pywebsocket.start()
- else:
- pywebsocket.stop(force=True)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py b/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py
new file mode 100644
index 0000000..2bf692b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/port/win.py
@@ -0,0 +1,75 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit Win implementation of the Port interface."""
+
+import logging
+import os
+import subprocess
+
+from webkitpy.layout_tests.port.webkit import WebKitPort
+
+_log = logging.getLogger("webkitpy.layout_tests.port.win")
+
+
+class WinPort(WebKitPort):
+ """WebKit Win implementation of the Port class."""
+
+ def __init__(self, port_name=None, options=None):
+ if port_name is None:
+ port_name = 'win'
+ WebKitPort.__init__(self, port_name, options)
+
+ def _tests_for_other_platforms(self):
+ # FIXME: This list could be dynamic based on platform name and
+ # pushed into base.Port.
+ # This really need to be automated.
+ return [
+ "platform/chromium",
+ "platform/gtk",
+ "platform/qt",
+ "platform/mac",
+ ]
+
+ def _path_to_apache_config_file(self):
+ return os.path.join(self.layout_tests_dir(), 'http', 'conf',
+ 'cygwin-httpd.conf')
+
+ def _shut_down_http_server(self, server_pid):
+ """Shut down the httpd web server. Blocks until it's fully
+ shut down.
+
+ Args:
+ server_pid: The process ID of the running server.
+ """
+ # Looks like we ignore server_pid.
+ # Copy/pasted from chromium-win.
+ subprocess.Popen(('taskkill.exe', '/f', '/im', 'httpd.exe'),
+ stdin=open(os.devnull, 'r'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE).wait()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
index 4604a1a..b972154 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
@@ -41,6 +41,7 @@ The script does the following for each platform specified:
At the end, the script generates a html that compares old and new baselines.
"""
+import copy
import logging
import optparse
import os
@@ -59,6 +60,9 @@ from layout_package import test_expectations
from test_types import image_diff
from test_types import text_diff
+_log = logging.getLogger("webkitpy.layout_tests."
+ "rebaseline_chromium_webkit_tests")
+
# Repository type constants.
REPO_SVN, REPO_UNKNOWN = range(2)
@@ -137,11 +141,11 @@ def log_dashed_string(text, platform, logging_level=logging.INFO):
msg = '%s %s %s' % (dashes, msg, dashes)
if logging_level == logging.ERROR:
- logging.error(msg)
+ _log.error(msg)
elif logging_level == logging.WARNING:
- logging.warn(msg)
+ _log.warn(msg)
else:
- logging.info(msg)
+ _log.info(msg)
def setup_html_directory(html_directory):
@@ -163,11 +167,11 @@ def setup_html_directory(html_directory):
os.mkdir(html_directory)
html_directory = os.path.join(html_directory, 'rebaseline_html')
- logging.info('Html directory: "%s"', html_directory)
+ _log.info('Html directory: "%s"', html_directory)
if os.path.exists(html_directory):
shutil.rmtree(html_directory, True)
- logging.info('Deleted file at html directory: "%s"', html_directory)
+ _log.info('Deleted file at html directory: "%s"', html_directory)
if not os.path.exists(html_directory):
os.mkdir(html_directory)
@@ -191,7 +195,7 @@ def get_result_file_fullpath(html_directory, baseline_filename, platform,
base, ext = os.path.splitext(baseline_filename)
result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext)
fullpath = os.path.join(html_directory, result_filename)
- logging.debug(' Result file full path: "%s".', fullpath)
+ _log.debug(' Result file full path: "%s".', fullpath)
return fullpath
@@ -200,12 +204,21 @@ class Rebaseliner(object):
REVISION_REGEX = r'<a href=\"(\d+)/\">'
- def __init__(self, port, platform, options):
- self._file_dir = port.path_from_chromium_base('webkit', 'tools',
- 'layout_tests')
- self._port = port
+ def __init__(self, running_port, target_port, platform, options):
+ """
+ Args:
+ running_port: the Port the script is running on.
+ target_port: the Port the script uses to find port-specific
+ configuration information like the test_expectations.txt
+ file location and the list of test platforms.
+ platform: the test platform to rebaseline
+ options: the command-line options object."""
self._platform = platform
self._options = options
+ self._port = running_port
+ self._target_port = target_port
+ self._rebaseline_port = port.get(
+ self._target_port.test_platform_name_to_name(platform), options)
self._rebaselining_tests = []
self._rebaselined_tests = []
@@ -213,9 +226,9 @@ class Rebaseliner(object):
# -. compile list of tests that need rebaselining.
# -. update the tests in test_expectations file after rebaseline
# is done.
- expectations_str = self._port.test_expectations()
+ expectations_str = self._rebaseline_port.test_expectations()
self._test_expectations = \
- test_expectations.TestExpectations(self._port,
+ test_expectations.TestExpectations(self._rebaseline_port,
None,
expectations_str,
self._platform,
@@ -233,9 +246,9 @@ class Rebaseliner(object):
log_dashed_string('Downloading archive', self._platform)
archive_file = self._download_buildbot_archive()
- logging.info('')
+ _log.info('')
if not archive_file:
- logging.error('No archive found.')
+ _log.error('No archive found.')
return False
log_dashed_string('Extracting and adding new baselines',
@@ -246,19 +259,19 @@ class Rebaseliner(object):
log_dashed_string('Updating rebaselined tests in file',
self._platform)
self._update_rebaselined_tests_in_file(backup)
- logging.info('')
+ _log.info('')
if len(self._rebaselining_tests) != len(self._rebaselined_tests):
- logging.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN '
- 'REBASELINED.')
- logging.warning(' Total tests needing rebaselining: %d',
- len(self._rebaselining_tests))
- logging.warning(' Total tests rebaselined: %d',
- len(self._rebaselined_tests))
+ _log.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN '
+ 'REBASELINED.')
+ _log.warning(' Total tests needing rebaselining: %d',
+ len(self._rebaselining_tests))
+ _log.warning(' Total tests rebaselined: %d',
+ len(self._rebaselined_tests))
return False
- logging.warning('All tests needing rebaselining were successfully '
- 'rebaselined.')
+ _log.warning('All tests needing rebaselining were successfully '
+ 'rebaselined.')
return True
@@ -285,16 +298,16 @@ class Rebaseliner(object):
self._rebaselining_tests = \
self._test_expectations.get_rebaselining_failures()
if not self._rebaselining_tests:
- logging.warn('No tests found that need rebaselining.')
+ _log.warn('No tests found that need rebaselining.')
return None
- logging.info('Total number of tests needing rebaselining '
- 'for "%s": "%d"', self._platform,
- len(self._rebaselining_tests))
+ _log.info('Total number of tests needing rebaselining '
+ 'for "%s": "%d"', self._platform,
+ len(self._rebaselining_tests))
test_no = 1
for test in self._rebaselining_tests:
- logging.info(' %d: %s', test_no, test)
+ _log.info(' %d: %s', test_no, test)
test_no += 1
return self._rebaselining_tests
@@ -310,7 +323,7 @@ class Rebaseliner(object):
None on failure.
"""
- logging.debug('Url to retrieve revision: "%s"', url)
+ _log.debug('Url to retrieve revision: "%s"', url)
f = urllib.urlopen(url)
content = f.read()
@@ -318,11 +331,11 @@ class Rebaseliner(object):
revisions = re.findall(self.REVISION_REGEX, content)
if not revisions:
- logging.error('Failed to find revision, content: "%s"', content)
+ _log.error('Failed to find revision, content: "%s"', content)
return None
revisions.sort(key=int)
- logging.info('Latest revision: "%s"', revisions[len(revisions) - 1])
+ _log.info('Latest revision: "%s"', revisions[len(revisions) - 1])
return revisions[len(revisions) - 1]
def _get_archive_dir_name(self, platform, webkit_canary):
@@ -339,8 +352,8 @@ class Rebaseliner(object):
if platform in ARCHIVE_DIR_NAME_DICT:
return ARCHIVE_DIR_NAME_DICT[platform]
else:
- logging.error('Cannot find platform key %s in archive '
- 'directory name dictionary', platform)
+ _log.error('Cannot find platform key %s in archive '
+ 'directory name dictionary', platform)
return None
def _get_archive_url(self):
@@ -356,7 +369,7 @@ class Rebaseliner(object):
if not dir_name:
return None
- logging.debug('Buildbot platform dir name: "%s"', dir_name)
+ _log.debug('Buildbot platform dir name: "%s"', dir_name)
url_base = '%s/%s/' % (self._options.archive_url, dir_name)
latest_revision = self._get_latest_revision(url_base)
@@ -364,7 +377,7 @@ class Rebaseliner(object):
return None
archive_url = ('%s%s/layout-test-results.zip' % (url_base,
latest_revision))
- logging.info('Archive url: "%s"', archive_url)
+ _log.info('Archive url: "%s"', archive_url)
return archive_url
def _download_buildbot_archive(self):
@@ -380,7 +393,7 @@ class Rebaseliner(object):
return None
fn = urllib.urlretrieve(url)[0]
- logging.info('Archive downloaded and saved to file: "%s"', fn)
+ _log.info('Archive downloaded and saved to file: "%s"', fn)
return fn
def _extract_and_add_new_baselines(self, archive_file):
@@ -397,17 +410,18 @@ class Rebaseliner(object):
zip_file = zipfile.ZipFile(archive_file, 'r')
zip_namelist = zip_file.namelist()
- logging.debug('zip file namelist:')
+ _log.debug('zip file namelist:')
for name in zip_namelist:
- logging.debug(' ' + name)
+ _log.debug(' ' + name)
- platform = self._port.name()
- logging.debug('Platform dir: "%s"', platform)
+ platform = self._rebaseline_port.test_platform_name_to_name(
+ self._platform)
+ _log.debug('Platform dir: "%s"', platform)
test_no = 1
self._rebaselined_tests = []
for test in self._rebaselining_tests:
- logging.info('Test %d: %s', test_no, test)
+ _log.info('Test %d: %s', test_no, test)
found = False
svn_error = False
@@ -415,14 +429,14 @@ class Rebaseliner(object):
for suffix in BASELINE_SUFFIXES:
archive_test_name = ('layout-test-results/%s-actual%s' %
(test_basename, suffix))
- logging.debug(' Archive test file name: "%s"',
- archive_test_name)
+ _log.debug(' Archive test file name: "%s"',
+ archive_test_name)
if not archive_test_name in zip_namelist:
- logging.info(' %s file not in archive.', suffix)
+ _log.info(' %s file not in archive.', suffix)
continue
found = True
- logging.info(' %s file found in archive.', suffix)
+ _log.info(' %s file found in archive.', suffix)
# Extract new baseline from archive and save it to a temp file.
data = zip_file.read(archive_test_name)
@@ -433,11 +447,10 @@ class Rebaseliner(object):
expected_filename = '%s-expected%s' % (test_basename, suffix)
expected_fullpath = os.path.join(
- self._port._chromium_baseline_path(platform),
- expected_filename)
+ self._rebaseline_port.baseline_path(), expected_filename)
expected_fullpath = os.path.normpath(expected_fullpath)
- logging.debug(' Expected file full path: "%s"',
- expected_fullpath)
+ _log.debug(' Expected file full path: "%s"',
+ expected_fullpath)
# TODO(victorw): for now, the rebaselining tool checks whether
# or not THIS baseline is duplicate and should be skipped.
@@ -466,12 +479,12 @@ class Rebaseliner(object):
self._create_html_baseline_files(expected_fullpath)
if not found:
- logging.warn(' No new baselines found in archive.')
+ _log.warn(' No new baselines found in archive.')
else:
if svn_error:
- logging.warn(' Failed to add baselines to SVN.')
+ _log.warn(' Failed to add baselines to SVN.')
else:
- logging.info(' Rebaseline succeeded.')
+ _log.info(' Rebaseline succeeded.')
self._rebaselined_tests.append(test)
test_no += 1
@@ -499,9 +512,10 @@ class Rebaseliner(object):
True if the baseline is unnecessary.
False otherwise.
"""
- test_filepath = os.path.join(self._port.layout_tests_dir(), test)
- all_baselines = self._port.expected_baselines(test_filepath,
- suffix, True)
+ test_filepath = os.path.join(self._target_port.layout_tests_dir(),
+ test)
+ all_baselines = self._rebaseline_port.expected_baselines(
+ test_filepath, suffix, True)
for (fallback_dir, fallback_file) in all_baselines:
if fallback_dir and fallback_file:
fallback_fullpath = os.path.normpath(
@@ -509,8 +523,8 @@ class Rebaseliner(object):
if fallback_fullpath.lower() != baseline_path.lower():
if not self._diff_baselines(new_baseline,
fallback_fullpath):
- logging.info(' Found same baseline at %s',
- fallback_fullpath)
+ _log.info(' Found same baseline at %s',
+ fallback_fullpath)
return True
else:
return False
@@ -531,15 +545,15 @@ class Rebaseliner(object):
ext1 = os.path.splitext(file1)[1].upper()
ext2 = os.path.splitext(file2)[1].upper()
if ext1 != ext2:
- logging.warn('Files to compare have different ext. '
- 'File1: %s; File2: %s', file1, file2)
+ _log.warn('Files to compare have different ext. '
+ 'File1: %s; File2: %s', file1, file2)
return True
if ext1 == '.PNG':
- return image_diff.ImageDiff(self._port, self._platform,
- '').diff_files(self._port, file1, file2)
+ return image_diff.ImageDiff(self._port,
+ '').diff_files(self._port, file1, file2)
else:
- return text_diff.TestTextDiff(self._port, self._platform,
+ return text_diff.TestTextDiff(self._port,
'').diff_files(self._port, file1, file2)
def _delete_baseline(self, filename):
@@ -575,20 +589,20 @@ class Rebaseliner(object):
new_expectations = (
self._test_expectations.remove_platform_from_expectations(
self._rebaselined_tests, self._platform))
- path = self._port.path_to_test_expectations_file()
+ path = self._target_port.path_to_test_expectations_file()
if backup:
date_suffix = time.strftime('%Y%m%d%H%M%S',
time.localtime(time.time()))
backup_file = ('%s.orig.%s' % (path, date_suffix))
if os.path.exists(backup_file):
os.remove(backup_file)
- logging.info('Saving original file to "%s"', backup_file)
+ _log.info('Saving original file to "%s"', backup_file)
os.rename(path, backup_file)
f = open(path, "w")
f.write(new_expectations)
f.close()
else:
- logging.info('No test was rebaselined so nothing to remove.')
+ _log.info('No test was rebaselined so nothing to remove.')
def _svn_add(self, filename):
"""Add the file to SVN repository.
@@ -607,7 +621,7 @@ class Rebaseliner(object):
parent_dir, basename = os.path.split(filename)
if self._repo_type != REPO_SVN or parent_dir == filename:
- logging.info("No svn checkout found, skip svn add.")
+ _log.info("No svn checkout found, skip svn add.")
return True
original_dir = os.getcwd()
@@ -616,12 +630,12 @@ class Rebaseliner(object):
os.chdir(original_dir)
output = status_output.upper()
if output.startswith('A') or output.startswith('M'):
- logging.info(' File already added to SVN: "%s"', filename)
+ _log.info(' File already added to SVN: "%s"', filename)
return True
if output.find('IS NOT A WORKING COPY') >= 0:
- logging.info(' File is not a working copy, add its parent: "%s"',
- parent_dir)
+ _log.info(' File is not a working copy, add its parent: "%s"',
+ parent_dir)
return self._svn_add(parent_dir)
os.chdir(parent_dir)
@@ -629,19 +643,19 @@ class Rebaseliner(object):
os.chdir(original_dir)
output = add_output.upper().rstrip()
if output.startswith('A') and output.find(basename.upper()) >= 0:
- logging.info(' Added new file: "%s"', filename)
+ _log.info(' Added new file: "%s"', filename)
self._svn_prop_set(filename)
return True
if (not status_output) and (add_output.upper().find(
'ALREADY UNDER VERSION CONTROL') >= 0):
- logging.info(' File already under SVN and has no change: "%s"',
- filename)
+ _log.info(' File already under SVN and has no change: "%s"',
+ filename)
return True
- logging.warn(' Failed to add file to SVN: "%s"', filename)
- logging.warn(' Svn status output: "%s"', status_output)
- logging.warn(' Svn add output: "%s"', add_output)
+ _log.warn(' Failed to add file to SVN: "%s"', filename)
+ _log.warn(' Svn status output: "%s"', status_output)
+ _log.warn(' Svn add output: "%s"', add_output)
return False
def _svn_prop_set(self, filename):
@@ -667,7 +681,7 @@ class Rebaseliner(object):
else:
cmd = ['svn', 'pset', 'svn:eol-style', 'LF', basename]
- logging.debug(' Set svn prop: %s', ' '.join(cmd))
+ _log.debug(' Set svn prop: %s', ' '.join(cmd))
run_shell(cmd, False)
os.chdir(original_dir)
@@ -689,14 +703,14 @@ class Rebaseliner(object):
baseline_filename, self._platform,
'new')
shutil.copyfile(baseline_fullpath, new_file)
- logging.info(' Html: copied new baseline file from "%s" to "%s".',
- baseline_fullpath, new_file)
+ _log.info(' Html: copied new baseline file from "%s" to "%s".',
+ baseline_fullpath, new_file)
# Get the old baseline from SVN and save to the html directory.
output = run_shell(['svn', 'cat', '-r', 'BASE', baseline_fullpath])
if (not output) or (output.upper().rstrip().endswith(
'NO SUCH FILE OR DIRECTORY')):
- logging.info(' No base file: "%s"', baseline_fullpath)
+ _log.info(' No base file: "%s"', baseline_fullpath)
return
base_file = get_result_file_fullpath(self._options.html_directory,
baseline_filename, self._platform,
@@ -704,8 +718,8 @@ class Rebaseliner(object):
f = open(base_file, 'wb')
f.write(output)
f.close()
- logging.info(' Html: created old baseline file: "%s".',
- base_file)
+ _log.info(' Html: created old baseline file: "%s".',
+ base_file)
# Get the diff between old and new baselines and save to the html dir.
if baseline_filename.upper().endswith('.TXT'):
@@ -721,7 +735,7 @@ class Rebaseliner(object):
else:
parent_dir = sys.path[0] # tempdir is not secure.
bogus_dir = os.path.join(parent_dir, "temp_svn_config")
- logging.debug(' Html: temp config dir: "%s".', bogus_dir)
+ _log.debug(' Html: temp config dir: "%s".', bogus_dir)
if not os.path.exists(bogus_dir):
os.mkdir(bogus_dir)
delete_bogus_dir = True
@@ -737,13 +751,13 @@ class Rebaseliner(object):
f = open(diff_file, 'wb')
f.write(output)
f.close()
- logging.info(' Html: created baseline diff file: "%s".',
- diff_file)
+ _log.info(' Html: created baseline diff file: "%s".',
+ diff_file)
if delete_bogus_dir:
shutil.rmtree(bogus_dir, True)
- logging.debug(' Html: removed temp config dir: "%s".',
- bogus_dir)
+ _log.debug(' Html: removed temp config dir: "%s".',
+ bogus_dir)
class HtmlGenerator(object):
@@ -792,9 +806,9 @@ class HtmlGenerator(object):
'<img style="width: 200" src="%(uri)s" /></a></td>')
HTML_TR = '<tr>%s</tr>'
- def __init__(self, port, options, platforms, rebaselining_tests):
+ def __init__(self, target_port, options, platforms, rebaselining_tests):
self._html_directory = options.html_directory
- self._port = port
+ self._target_port = target_port
self._platforms = platforms
self._rebaselining_tests = rebaselining_tests
self._html_file = os.path.join(options.html_directory,
@@ -803,7 +817,7 @@ class HtmlGenerator(object):
def generate_html(self):
"""Generate html file for rebaselining result comparison."""
- logging.info('Generating html file')
+ _log.info('Generating html file')
html_body = ''
if not self._rebaselining_tests:
@@ -814,29 +828,29 @@ class HtmlGenerator(object):
test_no = 1
for test in tests:
- logging.info('Test %d: %s', test_no, test)
+ _log.info('Test %d: %s', test_no, test)
html_body += self._generate_html_for_one_test(test)
html = self.HTML_REBASELINE % ({'time': time.asctime(),
'body': html_body})
- logging.debug(html)
+ _log.debug(html)
f = open(self._html_file, 'w')
f.write(html)
f.close()
- logging.info('Baseline comparison html generated at "%s"',
- self._html_file)
+ _log.info('Baseline comparison html generated at "%s"',
+ self._html_file)
def show_html(self):
"""Launch the rebaselining html in brwoser."""
- logging.info('Launching html: "%s"', self._html_file)
+ _log.info('Launching html: "%s"', self._html_file)
- html_uri = self._port.filename_to_uri(self._html_file)
+ html_uri = self._target_port.filename_to_uri(self._html_file)
webbrowser.open(html_uri, 1)
- logging.info('Html launched.')
+ _log.info('Html launched.')
def _generate_baseline_links(self, test_basename, suffix, platform):
"""Generate links for baseline results (old, new and diff).
@@ -851,18 +865,18 @@ class HtmlGenerator(object):
"""
baseline_filename = '%s-expected%s' % (test_basename, suffix)
- logging.debug(' baseline filename: "%s"', baseline_filename)
+ _log.debug(' baseline filename: "%s"', baseline_filename)
new_file = get_result_file_fullpath(self._html_directory,
baseline_filename, platform, 'new')
- logging.info(' New baseline file: "%s"', new_file)
+ _log.info(' New baseline file: "%s"', new_file)
if not os.path.exists(new_file):
- logging.info(' No new baseline file: "%s"', new_file)
+ _log.info(' No new baseline file: "%s"', new_file)
return ''
old_file = get_result_file_fullpath(self._html_directory,
baseline_filename, platform, 'old')
- logging.info(' Old baseline file: "%s"', old_file)
+ _log.info(' Old baseline file: "%s"', old_file)
if suffix == '.png':
html_td_link = self.HTML_TD_LINK_IMG
else:
@@ -871,24 +885,25 @@ class HtmlGenerator(object):
links = ''
if os.path.exists(old_file):
links += html_td_link % {
- 'uri': self._port.filename_to_uri(old_file),
+ 'uri': self._target_port.filename_to_uri(old_file),
'name': baseline_filename}
else:
- logging.info(' No old baseline file: "%s"', old_file)
+ _log.info(' No old baseline file: "%s"', old_file)
links += self.HTML_TD_NOLINK % ''
- links += html_td_link % {'uri': self._port.filename_to_uri(new_file),
+ links += html_td_link % {'uri': self._target_port.filename_to_uri(
+ new_file),
'name': baseline_filename}
diff_file = get_result_file_fullpath(self._html_directory,
baseline_filename, platform,
'diff')
- logging.info(' Baseline diff file: "%s"', diff_file)
+ _log.info(' Baseline diff file: "%s"', diff_file)
if os.path.exists(diff_file):
- links += html_td_link % {'uri': self._port.filename_to_uri(
+ links += html_td_link % {'uri': self._target_port.filename_to_uri(
diff_file), 'name': 'Diff'}
else:
- logging.info(' No baseline diff file: "%s"', diff_file)
+ _log.info(' No baseline diff file: "%s"', diff_file)
links += self.HTML_TD_NOLINK % ''
return links
@@ -904,13 +919,13 @@ class HtmlGenerator(object):
"""
test_basename = os.path.basename(os.path.splitext(test)[0])
- logging.info(' basename: "%s"', test_basename)
+ _log.info(' basename: "%s"', test_basename)
rows = []
for suffix in BASELINE_SUFFIXES:
if suffix == '.checksum':
continue
- logging.info(' Checking %s files', suffix)
+ _log.info(' Checking %s files', suffix)
for platform in self._platforms:
links = self._generate_baseline_links(test_basename, suffix,
platform)
@@ -919,17 +934,18 @@ class HtmlGenerator(object):
suffix)
row += self.HTML_TD_NOLINK % platform
row += links
- logging.debug(' html row: %s', row)
+ _log.debug(' html row: %s', row)
rows.append(self.HTML_TR % row)
if rows:
- test_path = os.path.join(self._port.layout_tests_dir(), test)
- html = self.HTML_TR_TEST % (self._port.filename_to_uri(test_path),
- test)
+ test_path = os.path.join(self._target_port.layout_tests_dir(),
+ test)
+ html = self.HTML_TR_TEST % (
+ self._target_port.filename_to_uri(test_path), test)
html += self.HTML_TEST_DETAIL % ' '.join(rows)
- logging.debug(' html for test: %s', html)
+ _log.debug(' html for test: %s', html)
return self.HTML_TABLE_TEST % html
return ''
@@ -982,8 +998,23 @@ def main():
help=('The directory that stores the results for'
' rebaselining comparison.'))
+ option_parser.add_option('', '--target-platform',
+ default='chromium',
+ help=('The target platform to rebaseline '
+ '("mac", "chromium", "qt", etc.). Defaults '
+ 'to "chromium".'))
options = option_parser.parse_args()[0]
- port_obj = port.get(None, options)
+
+ # We need to create three different Port objects over the life of this
+ # script. |target_port_obj| is used to determine configuration information:
+ # location of the expectations file, names of ports to rebaseline, etc.
+ # |port_obj| is used for runtime functionality like actually diffing
+ # Then we create a rebaselining port to actual find and manage the
+ # baselines.
+ target_options = copy.copy(options)
+ if options.target_platform == 'chromium':
+ target_options.chromium = True
+ target_port_obj = port.get(None, target_options)
# Set up our logging format.
log_level = logging.INFO
@@ -994,15 +1025,27 @@ def main():
'%(levelname)s %(message)s'),
datefmt='%y%m%d %H:%M:%S')
+ # options.configuration is used by port to locate image_diff binary.
+ # Check the imgage_diff release binary, if it does not exist,
+ # fallback to debug.
+ options.configuration = "Release"
+ port_obj = port.get(None, options)
+ if not port_obj.check_image_diff(override_step=None, logging=False):
+ _log.debug('No release version image diff binary found.')
+ options.configuration = "Debug"
+ port_obj = port.get(None, options)
+ else:
+ _log.debug('Found release version image diff binary.')
+
# Verify 'platforms' option is valid
if not options.platforms:
- logging.error('Invalid "platforms" option. --platforms must be '
- 'specified in order to rebaseline.')
+ _log.error('Invalid "platforms" option. --platforms must be '
+ 'specified in order to rebaseline.')
sys.exit(1)
platforms = [p.strip().lower() for p in options.platforms.split(',')]
for platform in platforms:
if not platform in REBASELINE_PLATFORM_ORDER:
- logging.error('Invalid platform: "%s"' % (platform))
+ _log.error('Invalid platform: "%s"' % (platform))
sys.exit(1)
# Adjust the platform order so rebaseline tool is running at the order of
@@ -1019,9 +1062,9 @@ def main():
rebaselining_tests = set()
backup = options.backup
for platform in rebaseline_platforms:
- rebaseliner = Rebaseliner(port_obj, platform, options)
+ rebaseliner = Rebaseliner(port_obj, target_port_obj, platform, options)
- logging.info('')
+ _log.info('')
log_dashed_string('Rebaseline started', platform)
if rebaseliner.run(backup):
# Only need to backup one original copy of test expectation file.
@@ -1032,9 +1075,9 @@ def main():
rebaselining_tests |= set(rebaseliner.get_rebaselining_tests())
- logging.info('')
+ _log.info('')
log_dashed_string('Rebaselining result comparison started', None)
- html_generator = HtmlGenerator(port_obj,
+ html_generator = HtmlGenerator(target_port_obj,
options,
rebaseline_platforms,
rebaselining_tests)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index f0b68ee..73195b3 100755
--- a/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -27,7 +27,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""Run layout tests using the test_shell.
+"""Run layout tests.
This is a port of the existing webkit test script run-webkit-tests.
@@ -44,12 +44,16 @@ directory. Entire lines starting with '//' (comments) will be ignored.
For details of the files' contents and purposes, see test_lists/README.
"""
+from __future__ import with_statement
+
+import codecs
import errno
import glob
import logging
import math
import optparse
import os
+import platform
import Queue
import random
import re
@@ -58,27 +62,50 @@ import sys
import time
import traceback
-import simplejson
-
from layout_package import test_expectations
from layout_package import json_layout_results_generator
from layout_package import metered_stream
from layout_package import test_failures
-from layout_package import test_shell_thread
+from layout_package import dump_render_tree_thread
from layout_package import test_files
from test_types import fuzzy_image_diff
from test_types import image_diff
from test_types import test_type_base
from test_types import text_diff
+from webkitpy.common.system.executive import Executive
+from webkitpy.thirdparty import simplejson
+
import port
+_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests")
+
+# dummy value used for command-line explicitness to disable defaults
+LOG_NOTHING = 'nothing'
+
+# Display the one-line progress bar (% completed) while testing
+LOG_PROGRESS = 'progress'
+
# Indicates that we want detailed progress updates in the output (prints
# directory-by-directory feedback).
LOG_DETAILED_PROGRESS = 'detailed-progress'
+# Log the one-line summary at the end of the run
+LOG_SUMMARY = 'summary'
+
+# "Trace" the test - log the expected result, the actual result, and the
+# baselines used
+LOG_TRACE = 'trace'
+
# Log any unexpected results while running (instead of just at the end).
LOG_UNEXPECTED = 'unexpected'
+LOG_UNEXPECTED_RESULTS = 'unexpected-results'
+
+LOG_VALUES = ",".join(("actual", "config", LOG_DETAILED_PROGRESS, "expected",
+ LOG_NOTHING, LOG_PROGRESS, LOG_SUMMARY, "timing",
+ LOG_UNEXPECTED, LOG_UNEXPECTED_RESULTS))
+LOG_DEFAULT_VALUE = ",".join((LOG_DETAILED_PROGRESS, LOG_SUMMARY,
+ LOG_UNEXPECTED, LOG_UNEXPECTED_RESULTS))
# Builder base URL where we have the archived test results.
BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
@@ -98,13 +125,27 @@ class TestInfo:
self.filename = filename
self.uri = port.filename_to_uri(filename)
self.timeout = timeout
- expected_hash_file = port.expected_filename(filename, '.checksum')
+ # FIXME: Confusing that the file is .checksum and we call it "hash"
+ self._expected_hash_path = port.expected_filename(filename, '.checksum')
+ self._have_read_expected_hash = False
+ self._image_hash = None
+
+ def _read_image_hash(self):
try:
- self.image_hash = open(expected_hash_file, "r").read()
+ with codecs.open(self._expected_hash_path, "r", "ascii") as hash_file:
+ return hash_file.read()
except IOError, e:
if errno.ENOENT != e.errno:
raise
- self.image_hash = None
+
+ def image_hash(self):
+ # Read the image_hash lazily to reduce startup time.
+ # This class is accessed across threads, but only one thread should
+ # ever be dealing with any given TestInfo so no locking is needed.
+ if not self._have_read_expected_hash:
+ self._have_read_expected_hash = True
+ self._image_hash = self._read_image_hash()
+ return self._image_hash
class ResultSummary(object):
@@ -131,25 +172,23 @@ class ResultSummary(object):
self.tests_by_timeline[timeline] = (
expectations.get_tests_with_timeline(timeline))
- def add(self, test, failures, result, expected):
- """Add a result into the appropriate bin.
+ def add(self, result, expected):
+ """Add a TestResult into the appropriate bin.
Args:
- test: test file name
- failures: list of failure objects from test execution
- result: result of test (PASS, IMAGE, etc.).
+ result: TestResult from dump_render_tree_thread.
expected: whether the result was what we expected it to be.
"""
- self.tests_by_expectation[result].add(test)
- self.results[test] = result
+ self.tests_by_expectation[result.type].add(result.filename)
+ self.results[result.filename] = result.type
self.remaining -= 1
- if len(failures):
- self.failures[test] = failures
+ if len(result.failures):
+ self.failures[result.filename] = result.failures
if expected:
self.expected += 1
else:
- self.unexpected_results[test] = result
+ self.unexpected_results[result.filename] = result.type
self.unexpected += 1
@@ -162,7 +201,7 @@ class TestRunner:
# The per-test timeout in milliseconds, if no --time-out-ms option was
# given to run_webkit_tests. This should correspond to the default timeout
- # in test_shell.exe.
+ # in DumpRenderTree.
DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
NUM_RETRY_ON_UNEXPECTED_FAILURE = 1
@@ -197,14 +236,16 @@ class TestRunner:
self._current_progress_str = ""
self._current_test_number = 0
+ self._retries = 0
+
def __del__(self):
- logging.debug("flushing stdout")
+ _log.debug("flushing stdout")
sys.stdout.flush()
- logging.debug("flushing stderr")
+ _log.debug("flushing stderr")
sys.stderr.flush()
- logging.debug("stopping http server")
+ _log.debug("stopping http server")
self._port.stop_http_server()
- logging.debug("stopping websocket server")
+ _log.debug("stopping websocket server")
self._port.stop_websocket_server()
def gather_file_paths(self, paths):
@@ -225,11 +266,13 @@ class TestRunner:
try:
expectations_str = self._port.test_expectations()
+ overrides_str = self._port.test_expectations_overrides()
self._expectations = test_expectations.TestExpectations(
self._port, test_files, expectations_str, test_platform_name,
- is_debug_mode, self._options.lint_test_files)
+ is_debug_mode, self._options.lint_test_files,
+ tests_are_present=True, overrides=overrides_str)
return self._expectations
- except Exception, err:
+ except SyntaxError, err:
if self._options.lint_test_files:
print str(err)
else:
@@ -274,7 +317,7 @@ class TestRunner:
test_size = int(chunk_len)
assert(test_size > 0)
except:
- logging.critical("invalid chunk '%s'" % chunk_value)
+ _log.critical("invalid chunk '%s'" % chunk_value)
sys.exit(1)
# Get the number of tests
@@ -343,7 +386,7 @@ class TestRunner:
self._expectations = self.parse_expectations(
self._port.test_platform_name(),
- self._options.target == 'Debug')
+ self._options.configuration == 'Debug')
self._test_files = set(files)
self._test_files_list = files
@@ -361,7 +404,6 @@ class TestRunner:
self._print_expected_results_of_type(write, result_summary,
test_expectations.SKIP, "skipped")
-
if self._options.force:
write('Running all tests, including skips (--force)')
else:
@@ -369,8 +411,11 @@ class TestRunner:
# subtracted out of self._test_files, above), but we stub out the
# results here so the statistics can remain accurate.
for test in skip_chunk:
- result_summary.add(test, [], test_expectations.SKIP,
- expected=True)
+ result = dump_render_tree_thread.TestResult(test,
+ failures=[], test_run_time=0, total_time_for_all_diffs=0,
+ time_for_diffs=0)
+ result.type = test_expectations.SKIP
+ result_summary.add(result, expected=True)
write("")
return result_summary
@@ -471,12 +516,12 @@ class TestRunner:
filename_queue.put(item)
return filename_queue
- def _get_test_shell_args(self, index):
- """Returns the tuple of arguments for tests and for test_shell."""
+ def _get_dump_render_tree_args(self, index):
+ """Returns the tuple of arguments for tests and for DumpRenderTree."""
shell_args = []
test_args = test_type_base.TestArguments()
png_path = None
- if not self._options.no_pixel_tests:
+ if self._options.pixel_tests:
png_path = os.path.join(self._options.results_directory,
"png_result%s.png" % index)
shell_args.append("--pixel-tests=" + png_path)
@@ -495,12 +540,13 @@ class TestRunner:
return test_args, png_path, shell_args
def _contains_tests(self, subdir):
- for test_file in self._test_files_list:
+ for test_file in self._test_files:
if test_file.find(subdir) >= 0:
return True
return False
- def _instantiate_test_shell_threads(self, test_files, result_summary):
+ def _instantiate_dump_render_tree_threads(self, test_files,
+ result_summary):
"""Instantitates and starts the TestShellThread(s).
Return:
@@ -510,22 +556,18 @@ class TestRunner:
# Instantiate TestShellThreads and start them.
threads = []
- for i in xrange(int(self._options.num_test_shells)):
+ for i in xrange(int(self._options.child_processes)):
# Create separate TestTypes instances for each thread.
test_types = []
- for t in self._test_types:
- test_types.append(t(self._port, self._options.platform,
+ for test_type in self._test_types:
+ test_types.append(test_type(self._port,
self._options.results_directory))
- test_args, png_path, shell_args = self._get_test_shell_args(i)
- thread = test_shell_thread.TestShellThread(self._port,
- filename_queue,
- self._result_queue,
- test_types,
- test_args,
- png_path,
- shell_args,
- self._options)
+ test_args, png_path, shell_args = \
+ self._get_dump_render_tree_args(i)
+ thread = dump_render_tree_thread.TestShellThread(self._port,
+ filename_queue, self._result_queue, test_types, test_args,
+ png_path, shell_args, self._options)
if self._is_single_threaded():
thread.run_in_main_thread(self, result_summary)
else:
@@ -536,7 +578,7 @@ class TestRunner:
def _is_single_threaded(self):
"""Returns whether we should run all the tests in the main thread."""
- return int(self._options.num_test_shells) == 1
+ return int(self._options.child_processes) == 1
def _run_tests(self, file_list, result_summary):
"""Runs the tests in the file_list.
@@ -552,8 +594,14 @@ class TestRunner:
in the form {filename:filename, test_run_time:test_run_time}
result_summary: summary object to populate with the results
"""
- threads = self._instantiate_test_shell_threads(file_list,
- result_summary)
+ plural = ""
+ if self._options.child_processes > 1:
+ plural = "s"
+ self._meter.update('Starting %s%s ...' %
+ (self._port.driver_name(), plural))
+ threads = self._instantiate_dump_render_tree_threads(file_list,
+ result_summary)
+ self._meter.update("Starting testing ...")
# Wait for the threads to finish and collect test failures.
failures = {}
@@ -575,11 +623,10 @@ class TestRunner:
'total_time': thread.get_total_time()})
test_timings.update(thread.get_directory_timing_stats())
individual_test_timings.extend(
- thread.get_individual_test_stats())
+ thread.get_test_results())
except KeyboardInterrupt:
for thread in threads:
thread.cancel()
- self._port.stop_helper()
raise
for thread in threads:
# Check whether a TestShellThread died before normal completion.
@@ -594,7 +641,11 @@ class TestRunner:
self.update_summary(result_summary)
return (thread_timings, test_timings, individual_test_timings)
- def run(self, result_summary):
+ def needs_http(self):
+ """Returns whether the test runner needs an HTTP server."""
+ return self._contains_tests(self.HTTP_SUBDIR)
+
+ def run(self, result_summary, print_results):
"""Run all our tests on all our test files.
For each test file, we run each test type. If there are any failures,
@@ -602,22 +653,21 @@ class TestRunner:
Args:
result_summary: a summary object tracking the test results.
+ print_results: whether or not to print the summary at the end
Return:
- We return nonzero if there are regressions compared to the last run.
+ The number of unexpected results (0 == success)
"""
if not self._test_files:
return 0
start_time = time.time()
- # Start up any helper needed
- if not self._options.no_pixel_tests:
- self._port.start_helper()
-
- if self._contains_tests(self.HTTP_SUBDIR):
+ if self.needs_http():
+ self._meter.update('Starting HTTP server ...')
self._port.start_http_server()
if self._contains_tests(self.WEBSOCKET_SUBDIR):
+ self._meter.update('Starting WebSocket server ...')
self._port.start_websocket_server()
# self._websocket_secure_server.Start()
@@ -627,17 +677,17 @@ class TestRunner:
# We exclude the crashes from the list of results to retry, because
# we want to treat even a potentially flaky crash as an error.
failures = self._get_failures(result_summary, include_crashes=False)
- retries = 0
retry_summary = result_summary
- while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and
+ while (self._retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and
len(failures)):
- logging.debug("Retrying %d unexpected failure(s)" % len(failures))
- retries += 1
+ _log.info('')
+ _log.info("Retrying %d unexpected failure(s)" % len(failures))
+ _log.info('')
+ self._retries += 1
retry_summary = ResultSummary(self._expectations, failures.keys())
self._run_tests(failures.keys(), retry_summary)
failures = self._get_failures(retry_summary, include_crashes=True)
- self._port.stop_helper()
end_time = time.time()
write = create_logging_writer(self._options, 'timing')
@@ -660,27 +710,29 @@ class TestRunner:
sys.stdout.flush()
sys.stderr.flush()
- if (LOG_DETAILED_PROGRESS in self._options.log or
- (LOG_UNEXPECTED in self._options.log and
- result_summary.total != result_summary.expected)):
- print
-
# This summary data gets written to stdout regardless of log level
- self._print_one_line_summary(result_summary.total,
- result_summary.expected)
+ # (unless of course we're printing nothing).
+ if print_results:
+ if (LOG_DETAILED_PROGRESS in self._options.log or
+ (LOG_UNEXPECTED in self._options.log and
+ result_summary.total != result_summary.expected)):
+ print
+ if LOG_SUMMARY in self._options.log:
+ self._print_one_line_summary(result_summary.total,
+ result_summary.expected)
unexpected_results = self._summarize_unexpected_results(result_summary,
retry_summary)
- self._print_unexpected_results(unexpected_results)
+ if LOG_UNEXPECTED_RESULTS in self._options.log:
+ self._print_unexpected_results(unexpected_results)
# Write the same data to log files.
self._write_json_files(unexpected_results, result_summary,
individual_test_timings)
- # Write the summary to disk (results.html) and maybe open the
- # test_shell to this file.
+ # Write the summary to disk (results.html) and display it if requested.
wrote_results = self._write_results_html_file(result_summary)
- if not self._options.noshow_results and wrote_results:
+ if self._options.show_results and wrote_results:
self._show_results_html_file()
# Ignore flaky failures and unexpected passes so we don't turn the
@@ -688,32 +740,69 @@ class TestRunner:
return unexpected_results['num_regressions']
def update_summary(self, result_summary):
- """Update the summary while running tests."""
+ """Update the summary and print results with any completed tests."""
while True:
try:
- (test, fail_list) = self._result_queue.get_nowait()
- result = test_failures.determine_result_type(fail_list)
- expected = self._expectations.matches_an_expected_result(test,
- result)
- result_summary.add(test, fail_list, result, expected)
- if (LOG_DETAILED_PROGRESS in self._options.log and
- (self._options.experimental_fully_parallel or
- self._is_single_threaded())):
- self._display_detailed_progress(result_summary)
- else:
- if not expected and LOG_UNEXPECTED in self._options.log:
- self._print_unexpected_test_result(test, result)
- self._display_one_line_progress(result_summary)
+ result = self._result_queue.get_nowait()
except Queue.Empty:
return
-
- def _display_one_line_progress(self, result_summary):
+ expected = self._expectations.matches_an_expected_result(
+ result.filename, result.type, self._options.pixel_tests)
+ result_summary.add(result, expected)
+ self._print_test_results(result, expected, result_summary)
+
+ def _print_test_results(self, result, expected, result_summary):
+ "Print the result of the test as determined by the --log switches."
+ if LOG_TRACE in self._options.log:
+ self._print_test_trace(result)
+ elif (LOG_DETAILED_PROGRESS in self._options.log and
+ (self._options.experimental_fully_parallel or
+ self._is_single_threaded())):
+ self._print_detailed_progress(result_summary)
+ else:
+ if (not expected and LOG_UNEXPECTED in self._options.log):
+ self._print_unexpected_test_result(result)
+ self._print_one_line_progress(result_summary)
+
+ def _print_test_trace(self, result):
+ """Print detailed results of a test (triggered by --log trace).
+ For each test, print:
+ - location of the expected baselines
+ - expected results
+ - actual result
+ - timing info
+ """
+ filename = result.filename
+ test_name = self._port.relative_test_filename(filename)
+ _log.info('trace: %s' % test_name)
+ _log.info(' txt: %s' %
+ self._port.relative_test_filename(
+ self._port.expected_filename(filename, '.txt')))
+ png_file = self._port.expected_filename(filename, '.png')
+ if os.path.exists(png_file):
+ _log.info(' png: %s' %
+ self._port.relative_test_filename(filename))
+ else:
+ _log.info(' png: <none>')
+ _log.info(' exp: %s' %
+ self._expectations.get_expectations_string(filename))
+ _log.info(' got: %s' %
+ self._expectations.expectation_to_string(result.type))
+ _log.info(' took: %-.3f' % result.test_run_time)
+ _log.info('')
+
+ def _print_one_line_progress(self, result_summary):
"""Displays the progress through the test run."""
- self._meter.update("Testing: %d ran as expected, %d didn't, %d left" %
- (result_summary.expected, result_summary.unexpected,
- result_summary.remaining))
-
- def _display_detailed_progress(self, result_summary):
+ percent_complete = 100 * (result_summary.expected +
+ result_summary.unexpected) / result_summary.total
+ action = "Testing"
+ if self._retries > 0:
+ action = "Retrying"
+ self._meter.progress("%s (%d%%): %d ran as expected, %d didn't,"
+ " %d left" % (action, percent_complete, result_summary.expected,
+ result_summary.unexpected, result_summary.remaining))
+
+ def _print_detailed_progress(self, result_summary):
"""Display detailed progress output where we print the directory name
and one dot for each completed test. This is triggered by
"--log detailed-progress"."""
@@ -752,10 +841,17 @@ class TestRunner:
if result_summary.remaining:
remain_str = " (%d)" % (result_summary.remaining)
- self._meter.update("%s%s" %
- (self._current_progress_str, remain_str))
+ self._meter.progress("%s%s" %
+ (self._current_progress_str, remain_str))
else:
- self._meter.write("%s\n" % (self._current_progress_str))
+ self._meter.progress("%s\n" % (self._current_progress_str))
+
+ def _print_unexpected_test_result(self, result):
+ """Prints one unexpected test result line."""
+ desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0]
+ self._meter.write(" %s -> unexpected %s\n" %
+ (self._port.relative_test_filename(result.filename),
+ desc))
def _get_failures(self, result_summary, include_crashes):
"""Filters a dict of results and returns only the failures.
@@ -870,8 +966,8 @@ class TestRunner:
individual_test_timings: list of test times (used by the flakiness
dashboard).
"""
- logging.debug("Writing JSON files in %s." %
- self._options.results_directory)
+ _log.debug("Writing JSON files in %s." %
+ self._options.results_directory)
unexpected_file = open(os.path.join(self._options.results_directory,
"unexpected_results.json"), "w")
unexpected_file.write(simplejson.dumps(unexpected_results,
@@ -893,7 +989,7 @@ class TestRunner:
BUILDER_BASE_URL, individual_test_timings,
self._expectations, result_summary, self._test_files_list)
- logging.debug("Finished writing JSON files.")
+ _log.debug("Finished writing JSON files.")
def _print_expected_results_of_type(self, write, result_summary,
result_type, result_type_str):
@@ -951,7 +1047,7 @@ class TestRunner:
(t['name'], t['num_tests'], t['total_time']))
cuml_time += t['total_time']
write(" %6.2f cumulative, %6.2f optimal" %
- (cuml_time, cuml_time / int(self._options.num_test_shells)))
+ (cuml_time, cuml_time / int(self._options.child_processes)))
write("")
self._print_aggregate_test_statistics(write, individual_test_timings)
@@ -964,18 +1060,20 @@ class TestRunner:
Args:
write: A callback to write info to (e.g., a LoggingWriter) or
sys.stdout.write.
- individual_test_timings: List of test_shell_thread.TestStats for all
- tests.
+ individual_test_timings: List of dump_render_tree_thread.TestStats
+ for all tests.
"""
- test_types = individual_test_timings[0].time_for_diffs.keys()
- times_for_test_shell = []
+ test_types = [] # Unit tests don't actually produce any timings.
+ if individual_test_timings:
+ test_types = individual_test_timings[0].time_for_diffs.keys()
+ times_for_dump_render_tree = []
times_for_diff_processing = []
times_per_test_type = {}
for test_type in test_types:
times_per_test_type[test_type] = []
for test_stats in individual_test_timings:
- times_for_test_shell.append(test_stats.test_run_time)
+ times_for_dump_render_tree.append(test_stats.test_run_time)
times_for_diff_processing.append(
test_stats.total_time_for_all_diffs)
time_for_diffs = test_stats.time_for_diffs
@@ -984,7 +1082,8 @@ class TestRunner:
time_for_diffs[test_type])
self._print_statistics_for_test_timings(write,
- "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell)
+ "PER TEST TIME IN TESTSHELL (seconds):",
+ times_for_dump_render_tree)
self._print_statistics_for_test_timings(write,
"PER TEST DIFF PROCESSING TIMES (seconds):",
times_for_diff_processing)
@@ -999,11 +1098,11 @@ class TestRunner:
Args:
write: A callback to write info to (e.g., a LoggingWriter) or
sys.stdout.write.
- individual_test_timings: List of test_shell_thread.TestStats for all
- tests.
+ individual_test_timings: List of dump_render_tree_thread.TestStats
+ for all tests.
result_summary: summary object for test run
"""
- # Reverse-sort by the time spent in test_shell.
+ # Reverse-sort by the time spent in DumpRenderTree.
individual_test_timings.sort(lambda a, b:
cmp(b.test_run_time, a.test_run_time))
@@ -1098,6 +1197,8 @@ class TestRunner:
timings.sort()
num_tests = len(timings)
+ if not num_tests:
+ return
percentile90 = timings[int(.9 * num_tests)]
percentile99 = timings[int(.99 * num_tests)]
@@ -1269,12 +1370,6 @@ class TestRunner:
if len(unexpected_results['tests']) and self._options.verbose:
print "-" * 78
- def _print_unexpected_test_result(self, test, result):
- """Prints one unexpected test result line."""
- desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0]
- self._meter.write(" %s -> unexpected %s\n" %
- (self._port.relative_test_filename(test), desc))
-
def _write_results_html_file(self, result_summary):
"""Write results.html which is a summary of tests that failed.
@@ -1324,7 +1419,7 @@ class TestRunner:
return True
def _show_results_html_file(self):
- """Launches the test shell open to the results.html page."""
+ """Shows the results.html page."""
results_filename = os.path.join(self._options.results_directory,
"results.html")
self._port.show_results_html_file(results_filename)
@@ -1345,7 +1440,7 @@ def read_test_files(files):
def create_logging_writer(options, log_option):
- """Returns a write() function that will write the string to logging.info()
+ """Returns a write() function that will write the string to _log.info()
if comp was specified in --log or if --verbose is true. Otherwise the
message is dropped.
@@ -1355,16 +1450,21 @@ def create_logging_writer(options, log_option):
to be logged (e.g., 'actual' or 'expected')
"""
if options.verbose or log_option in options.log.split(","):
- return logging.info
+ return _log.info
return lambda str: 1
-def main(options, args):
- """Run the tests. Will call sys.exit when complete.
+def main(options, args, print_results=True):
+ """Run the tests.
Args:
options: a dictionary of command line options
args: a list of sub directories or files to test
+ print_results: whether or not to log anything to stdout.
+ Set to false by the unit tests
+ Returns:
+ the number of unexpected results that occurred, or -1 if there is an
+ error.
"""
if options.sources:
@@ -1382,13 +1482,14 @@ def main(options, args):
logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt,
stream=meter)
- if not options.target:
- if options.debug:
- options.target = "Debug"
- else:
- options.target = "Release"
-
port_obj = port.get(options.platform, options)
+ executive = Executive()
+
+ if not options.configuration:
+ options.configuration = port_obj.default_configuration()
+
+ if options.pixel_tests is None:
+ options.pixel_tests = True
if not options.use_apache:
options.use_apache = sys.platform in ('darwin', 'linux2')
@@ -1402,23 +1503,44 @@ def main(options, args):
# Debug or Release.
options.results_directory = port_obj.results_directory()
+ last_unexpected_results = []
+ if options.print_unexpected_results or options.retry_unexpected_results:
+ unexpected_results_filename = os.path.join(
+ options.results_directory, "unexpected_results.json")
+ f = file(unexpected_results_filename)
+ results = simplejson.load(f)
+ f.close()
+ last_unexpected_results = results['tests'].keys()
+ if options.print_unexpected_results:
+ print "\n".join(last_unexpected_results) + "\n"
+ return 0
+
if options.clobber_old_results:
# Just clobber the actual test results directories since the other
# files in the results directory are explicitly used for cross-run
# tracking.
- path = os.path.join(options.results_directory, 'LayoutTests')
- if os.path.exists(path):
- shutil.rmtree(path)
-
- if not options.num_test_shells:
- # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1.
- options.num_test_shells = port_obj.num_cores()
+ meter.update("Clobbering old results in %s" %
+ options.results_directory)
+ layout_tests_dir = port_obj.layout_tests_dir()
+ possible_dirs = os.listdir(layout_tests_dir)
+ for dirname in possible_dirs:
+ if os.path.isdir(os.path.join(layout_tests_dir, dirname)):
+ shutil.rmtree(os.path.join(options.results_directory, dirname),
+ ignore_errors=True)
+
+ if not options.child_processes:
+ # FIXME: Investigate perf/flakiness impact of using cpu_count + 1.
+ options.child_processes = port_obj.default_child_processes()
write = create_logging_writer(options, 'config')
- write("Running %s test_shells in parallel" % options.num_test_shells)
+ if options.child_processes == 1:
+ write("Running one %s" % port_obj.driver_name)
+ else:
+ write("Running %s %ss in parallel" % (
+ options.child_processes, port_obj.driver_name()))
if not options.time_out_ms:
- if options.target == "Debug":
+ if options.configuration == "Debug":
options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS)
else:
options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS)
@@ -1436,44 +1558,57 @@ def main(options, args):
paths = new_args
if not paths:
paths = []
+ paths += last_unexpected_results
if options.test_list:
paths += read_test_files(options.test_list)
# Create the output directory if it doesn't already exist.
port_obj.maybe_make_directory(options.results_directory)
- meter.update("Gathering files ...")
+ meter.update("Collecting tests ...")
test_runner = TestRunner(port_obj, options, meter)
test_runner.gather_file_paths(paths)
if options.lint_test_files:
- # Creating the expecations for each platform/target pair does all the
- # test list parsing and ensures it's correct syntax (e.g. no dupes).
- for platform in port_obj.test_platform_names():
- test_runner.parse_expectations(platform, is_debug_mode=True)
- test_runner.parse_expectations(platform, is_debug_mode=False)
+ # Creating the expecations for each platform/configuration pair does
+ # all the test list parsing and ensures it's correct syntax (e.g. no
+ # dupes).
+ for platform_name in port_obj.test_platform_names():
+ test_runner.parse_expectations(platform_name, is_debug_mode=True)
+ test_runner.parse_expectations(platform_name, is_debug_mode=False)
+ meter.update("")
print ("If there are no fail messages, errors or exceptions, then the "
"lint succeeded.")
- sys.exit(0)
-
- # Check that the system dependencies (themes, fonts, ...) are correct.
- if not options.nocheck_sys_deps:
- if not port_obj.check_sys_deps():
- sys.exit(1)
+ return 0
write = create_logging_writer(options, "config")
write("Using port '%s'" % port_obj.name())
write("Placing test results in %s" % options.results_directory)
if options.new_baseline:
write("Placing new baselines in %s" % port_obj.baseline_path())
- write("Using %s build" % options.target)
- if options.no_pixel_tests:
- write("Not running pixel tests")
+ write("Using %s build" % options.configuration)
+ if options.pixel_tests:
+ write("Pixel tests enabled")
+ else:
+ write("Pixel tests disabled")
write("")
meter.update("Parsing expectations ...")
test_runner.parse_expectations(port_obj.test_platform_name(),
- options.target == 'Debug')
+ options.configuration == 'Debug')
+
+ meter.update("Checking build ...")
+ if not port_obj.check_build(test_runner.needs_http()):
+ return -1
+
+ meter.update("Starting helper ...")
+ port_obj.start_helper()
+
+ # Check that the system dependencies (themes, fonts, ...) are correct.
+ if not options.nocheck_sys_deps:
+ meter.update("Checking system dependencies ...")
+ if not port_obj.check_sys_deps(test_runner.needs_http()):
+ return -1
meter.update("Preparing tests ...")
write = create_logging_writer(options, "expected")
@@ -1482,143 +1617,237 @@ def main(options, args):
port_obj.setup_test_run()
test_runner.add_test_type(text_diff.TestTextDiff)
- if not options.no_pixel_tests:
+ if options.pixel_tests:
test_runner.add_test_type(image_diff.ImageDiff)
if options.fuzzy_pixel_tests:
test_runner.add_test_type(fuzzy_image_diff.FuzzyImageDiff)
- meter.update("Starting ...")
- has_new_failures = test_runner.run(result_summary)
+ num_unexpected_results = test_runner.run(result_summary, print_results)
- logging.debug("Exit status: %d" % has_new_failures)
- sys.exit(has_new_failures)
+ port_obj.stop_helper()
+
+ _log.debug("Exit status: %d" % num_unexpected_results)
+ return num_unexpected_results
+
+
+def _compat_shim_callback(option, opt_str, value, parser):
+ print "Ignoring unsupported option: %s" % opt_str
+
+
+def _compat_shim_option(option_name, nargs=0):
+ return optparse.make_option(option_name, action="callback", callback=_compat_shim_callback, nargs=nargs, help="Ignored, for old-run-webkit-tests compat only.")
def parse_args(args=None):
"""Provides a default set of command line args.
Returns a tuple of options, args from optparse"""
- option_parser = optparse.OptionParser()
- option_parser.add_option("", "--no-pixel-tests", action="store_true",
- default=False,
- help="disable pixel-to-pixel PNG comparisons")
- option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true",
- default=False,
- help="Also use fuzzy matching to compare pixel "
- "test outputs.")
- option_parser.add_option("", "--results-directory",
- default="layout-test-results",
- help="Output results directory source dir,"
- " relative to Debug or Release")
- option_parser.add_option("", "--new-baseline", action="store_true",
- default=False,
- help="save all generated results as new baselines"
- " into the platform directory, overwriting "
- "whatever's already there.")
- option_parser.add_option("", "--noshow-results", action="store_true",
- default=False, help="don't launch the test_shell"
- " with results after the tests are done")
- option_parser.add_option("", "--full-results-html", action="store_true",
- default=False, help="show all failures in "
- "results.html, rather than only regressions")
- option_parser.add_option("", "--clobber-old-results", action="store_true",
- default=False, help="Clobbers test results from "
- "previous runs.")
- option_parser.add_option("", "--lint-test-files", action="store_true",
- default=False, help="Makes sure the test files "
- "parse for all configurations. Does not run any "
- "tests.")
- option_parser.add_option("", "--force", action="store_true",
- default=False,
- help="Run all tests, even those marked SKIP "
- "in the test list")
- option_parser.add_option("", "--num-test-shells",
- help="Number of testshells to run in parallel.")
- option_parser.add_option("", "--use-apache", action="store_true",
- default=False,
- help="Whether to use apache instead of lighttpd.")
- option_parser.add_option("", "--time-out-ms", default=None,
- help="Set the timeout for each test")
- option_parser.add_option("", "--run-singly", action="store_true",
- default=False,
- help="run a separate test_shell for each test")
- option_parser.add_option("", "--debug", action="store_true", default=False,
- help="use the debug binary instead of the release"
- " binary")
- option_parser.add_option("", "--num-slow-tests-to-log", default=50,
- help="Number of slow tests whose timings "
- "to print.")
- option_parser.add_option("", "--platform",
- help="Override the platform for expected results")
- option_parser.add_option("", "--target", default="",
- help="Set the build target configuration "
- "(overrides --debug)")
- option_parser.add_option("", "--log", action="store",
- default="detailed-progress,unexpected",
- help="log various types of data. The param should"
- " be a comma-separated list of values from: "
- "actual,config," + LOG_DETAILED_PROGRESS +
- ",expected,timing," + LOG_UNEXPECTED + " "
- "(defaults to " +
- "--log detailed-progress,unexpected)")
- option_parser.add_option("-v", "--verbose", action="store_true",
- default=False, help="include debug-level logging")
- option_parser.add_option("", "--sources", action="store_true",
- help="show expected result file path for each "
- "test (implies --verbose)")
- option_parser.add_option("", "--startup-dialog", action="store_true",
- default=False,
- help="create a dialog on test_shell.exe startup")
- option_parser.add_option("", "--gp-fault-error-box", action="store_true",
- default=False,
- help="enable Windows GP fault error box")
- option_parser.add_option("", "--wrapper",
- help="wrapper command to insert before "
- "invocations of test_shell; option is split "
- "on whitespace before running. (Example: "
- "--wrapper='valgrind --smc-check=all')")
- option_parser.add_option("", "--test-list", action="append",
- help="read list of tests to run from file",
- metavar="FILE")
- option_parser.add_option("", "--nocheck-sys-deps", action="store_true",
- default=False,
- help="Don't check the system dependencies "
- "(themes)")
- option_parser.add_option("", "--randomize-order", action="store_true",
- default=False,
- help=("Run tests in random order (useful for "
- "tracking down corruption)"))
- option_parser.add_option("", "--run-chunk",
- default=None,
- help=("Run a specified chunk (n:l), the "
- "nth of len l, of the layout tests"))
- option_parser.add_option("", "--run-part",
- default=None,
- help=("Run a specified part (n:m), the nth of m"
- " parts, of the layout tests"))
- option_parser.add_option("", "--batch-size",
- default=None,
- help=("Run a the tests in batches (n), after "
- "every n tests, the test shell is "
- "relaunched."))
- option_parser.add_option("", "--builder-name",
- default="DUMMY_BUILDER_NAME",
- help=("The name of the builder shown on the "
- "waterfall running this script e.g. "
- "WebKit."))
- option_parser.add_option("", "--build-name",
- default="DUMMY_BUILD_NAME",
- help=("The name of the builder used in its path, "
- "e.g. webkit-rel."))
- option_parser.add_option("", "--build-number",
- default="DUMMY_BUILD_NUMBER",
- help=("The build number of the builder running"
- "this script."))
- option_parser.add_option("", "--experimental-fully-parallel",
- action="store_true", default=False,
- help="run all tests in parallel")
+
+ # FIXME: All of these options should be stored closer to the code which
+ # FIXME: actually uses them. configuration_options should move
+ # FIXME: to WebKitPort and be shared across all scripts.
+ configuration_options = [
+ optparse.make_option("-t", "--target", dest="configuration",
+ help="(DEPRECATED)"),
+ # FIXME: --help should display which configuration is default.
+ optparse.make_option('--debug', action='store_const', const='Debug',
+ dest="configuration",
+ help='Set the configuration to Debug'),
+ optparse.make_option('--release', action='store_const',
+ const='Release', dest="configuration",
+ help='Set the configuration to Release'),
+ # old-run-webkit-tests also accepts -c, --configuration CONFIGURATION.
+ ]
+
+ logging_options = [
+ optparse.make_option("--log", action="store",
+ default=LOG_DEFAULT_VALUE,
+ help=("log various types of data. The argument value should be a "
+ "comma-separated list of values from: %s (defaults to "
+ "--log %s)" % (LOG_VALUES, LOG_DEFAULT_VALUE))),
+ optparse.make_option("-v", "--verbose", action="store_true",
+ default=False, help="include debug-level logging"),
+ optparse.make_option("--sources", action="store_true",
+ help="show expected result file path for each test " +
+ "(implies --verbose)"),
+ # old-run-webkit-tests has a --slowest option which just prints
+ # the slowest 10.
+ optparse.make_option("--num-slow-tests-to-log", default=50,
+ help="Number of slow tests whose timings to print."),
+ ]
+
+ # FIXME: These options should move onto the ChromiumPort.
+ chromium_options = [
+ optparse.make_option("--chromium", action="store_true", default=False,
+ help="use the Chromium port"),
+ optparse.make_option("--startup-dialog", action="store_true",
+ default=False, help="create a dialog on DumpRenderTree startup"),
+ optparse.make_option("--gp-fault-error-box", action="store_true",
+ default=False, help="enable Windows GP fault error box"),
+ optparse.make_option("--nocheck-sys-deps", action="store_true",
+ default=False,
+ help="Don't check the system dependencies (themes)"),
+ optparse.make_option("--use-drt", action="store_true",
+ default=False,
+ help="Use DumpRenderTree instead of test_shell"),
+ ]
+
+ # Missing Mac-specific old-run-webkit-tests options:
+ # FIXME: Need: -g, --guard for guard malloc support on Mac.
+ # FIXME: Need: -l --leaks Enable leaks checking.
+ # FIXME: Need: --sample-on-timeout Run sample on timeout
+
+ old_run_webkit_tests_compat = [
+ # NRWT doesn't generate results by default anyway.
+ _compat_shim_option("--no-new-test-results"),
+ # NRWT doesn't sample on timeout yet anyway.
+ _compat_shim_option("--no-sample-on-timeout"),
+ # FIXME: NRWT needs to support remote links eventually.
+ _compat_shim_option("--use-remote-links-to-tests"),
+ # FIXME: NRWT doesn't need this option as much since failures are
+ # designed to be cheap. We eventually plan to add this support.
+ _compat_shim_option("--exit-after-n-failures", nargs=1),
+ ]
+
+ results_options = [
+ # NEED for bots: --use-remote-links-to-tests Link to test files
+ # within the SVN repository in the results.
+ optparse.make_option("-p", "--pixel-tests", action="store_true",
+ dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
+ optparse.make_option("--no-pixel-tests", action="store_false",
+ dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
+ optparse.make_option("--fuzzy-pixel-tests", action="store_true",
+ default=False,
+ help="Also use fuzzy matching to compare pixel test outputs."),
+ # old-run-webkit-tests allows a specific tolerance: --tolerance t
+ # Ignore image differences less than this percentage (default: 0.1)
+ optparse.make_option("--results-directory",
+ default="layout-test-results",
+ help="Output results directory source dir, relative to Debug or "
+ "Release"),
+ optparse.make_option("--new-baseline", action="store_true",
+ default=False, help="Save all generated results as new baselines "
+ "into the platform directory, overwriting whatever's "
+ "already there."),
+ optparse.make_option("--no-show-results", action="store_false",
+ default=True, dest="show_results",
+ help="Don't launch a browser with results after the tests "
+ "are done"),
+ # FIXME: We should have a helper function to do this sort of
+ # deprectated mapping and automatically log, etc.
+ optparse.make_option("--noshow-results", action="store_false",
+ dest="show_results",
+ help="Deprecated, same as --no-show-results."),
+ optparse.make_option("--no-launch-safari", action="store_false",
+ dest="show_results",
+ help="old-run-webkit-tests compat, same as --noshow-results."),
+ # old-run-webkit-tests:
+ # --[no-]launch-safari Launch (or do not launch) Safari to display
+ # test results (default: launch)
+ optparse.make_option("--full-results-html", action="store_true",
+ default=False,
+ help="Show all failures in results.html, rather than only "
+ "regressions"),
+ optparse.make_option("--clobber-old-results", action="store_true",
+ default=False, help="Clobbers test results from previous runs."),
+ optparse.make_option("--platform",
+ help="Override the platform for expected results"),
+ # old-run-webkit-tests also has HTTP toggle options:
+ # --[no-]http Run (or do not run) http tests
+ # (default: run)
+ # --[no-]wait-for-httpd Wait for httpd if some other test
+ # session is using it already (same
+ # as WEBKIT_WAIT_FOR_HTTPD=1).
+ # (default: 0)
+ ]
+
+ test_options = [
+ optparse.make_option("--build", dest="build",
+ action="store_true", default=True,
+ help="Check to ensure the DumpRenderTree build is up-to-date "
+ "(default)."),
+ optparse.make_option("--no-build", dest="build",
+ action="store_false", help="Don't check to see if the "
+ "DumpRenderTree build is up-to-date."),
+ # old-run-webkit-tests has --valgrind instead of wrapper.
+ optparse.make_option("--wrapper",
+ help="wrapper command to insert before invocations of "
+ "DumpRenderTree; option is split on whitespace before "
+ "running. (Example: --wrapper='valgrind --smc-check=all')"),
+ # old-run-webkit-tests:
+ # -i|--ignore-tests Comma-separated list of directories
+ # or tests to ignore
+ optparse.make_option("--test-list", action="append",
+ help="read list of tests to run from file", metavar="FILE"),
+ # old-run-webkit-tests uses --skipped==[default|ignore|only]
+ # instead of --force:
+ optparse.make_option("--force", action="store_true", default=False,
+ help="Run all tests, even those marked SKIP in the test list"),
+ optparse.make_option("--use-apache", action="store_true",
+ default=False, help="Whether to use apache instead of lighttpd."),
+ optparse.make_option("--time-out-ms",
+ help="Set the timeout for each test"),
+ # old-run-webkit-tests calls --randomize-order --random:
+ optparse.make_option("--randomize-order", action="store_true",
+ default=False, help=("Run tests in random order (useful "
+ "for tracking down corruption)")),
+ optparse.make_option("--run-chunk",
+ help=("Run a specified chunk (n:l), the nth of len l, "
+ "of the layout tests")),
+ optparse.make_option("--run-part", help=("Run a specified part (n:m), "
+ "the nth of m parts, of the layout tests")),
+ # old-run-webkit-tests calls --batch-size: --nthly n
+ # Restart DumpRenderTree every n tests (default: 1000)
+ optparse.make_option("--batch-size",
+ help=("Run a the tests in batches (n), after every n tests, "
+ "DumpRenderTree is relaunched.")),
+ # old-run-webkit-tests calls --run-singly: -1|--singly
+ # Isolate each test case run (implies --nthly 1 --verbose)
+ optparse.make_option("--run-singly", action="store_true",
+ default=False, help="run a separate DumpRenderTree for each test"),
+ optparse.make_option("--child-processes",
+ help="Number of DumpRenderTrees to run in parallel."),
+ # FIXME: Display default number of child processes that will run.
+ optparse.make_option("--experimental-fully-parallel",
+ action="store_true", default=False,
+ help="run all tests in parallel"),
+ # FIXME: Need --exit-after-n-failures N
+ # Exit after the first N failures instead of running all tests
+ # FIXME: consider: --iterations n
+ # Number of times to run the set of tests (e.g. ABCABCABC)
+ optparse.make_option("--print-unexpected-results", action="store_true",
+ default=False, help="print the tests in the last run that "
+ "had unexpected results."),
+ optparse.make_option("--retry-unexpected-results", action="store_true",
+ default=False, help="re-try the tests in the last run that "
+ "had unexpected results."),
+ ]
+
+ misc_options = [
+ optparse.make_option("--lint-test-files", action="store_true",
+ default=False, help=("Makes sure the test files parse for all "
+ "configurations. Does not run any tests.")),
+ ]
+
+ # FIXME: Move these into json_results_generator.py
+ results_json_options = [
+ optparse.make_option("--builder-name", default="DUMMY_BUILDER_NAME",
+ help=("The name of the builder shown on the waterfall running "
+ "this script e.g. WebKit.")),
+ optparse.make_option("--build-name", default="DUMMY_BUILD_NAME",
+ help=("The name of the builder used in its path, e.g. "
+ "webkit-rel.")),
+ optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER",
+ help=("The build number of the builder running this script.")),
+ ]
+
+ option_list = (configuration_options + logging_options +
+ chromium_options + results_options + test_options +
+ misc_options + results_json_options +
+ old_run_webkit_tests_compat)
+ option_parser = optparse.OptionParser(option_list=option_list)
return option_parser.parse_args(args)
if '__main__' == __name__:
options, args = parse_args()
- main(options, args)
+ sys.exit(main(options, args))
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
new file mode 100644
index 0000000..9fe0e74
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py
@@ -0,0 +1,74 @@
+#!/usr/bin/python
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for run_webkit_tests."""
+
+import os
+import sys
+import unittest
+
+import webkitpy.layout_tests.run_webkit_tests as run_webkit_tests
+
+
+def passing_run(args):
+ options, args = run_webkit_tests.parse_args(args)
+ res = run_webkit_tests.main(options, args, False)
+ return res == 0
+
+
+class MainTest(unittest.TestCase):
+ def test_fast(self):
+ self.assertTrue(passing_run(['--platform', 'test',
+ 'fast/html']))
+ self.assertTrue(passing_run(['--platform', 'test',
+ '--run-singly',
+ 'fast/html']))
+ self.assertTrue(passing_run(['--platform', 'test',
+ 'fast/html/article-element.html']))
+ self.assertTrue(passing_run(['--platform', 'test',
+ '--child-processes', '1',
+ '--log', 'unexpected',
+ 'fast/html']))
+
+
+class DryrunTest(unittest.TestCase):
+ def test_basics(self):
+ self.assertTrue(passing_run(['--platform', 'dryrun',
+ 'fast/html']))
+ #self.assertTrue(passing_run(['--platform', 'dryrun-mac',
+ # 'fast/html']))
+ #self.assertTrue(passing_run(['--platform', 'dryrun-chromium-mac',
+ # 'fast/html']))
+ #self.assertTrue(passing_run(['--platform', 'dryrun-chromium-win',
+ # 'fast/html']))
+ #self.assertTrue(passing_run(['--platform', 'dryrun-chromium-linux',
+ # 'fast/html']))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py
index 89dd192..64dfb20 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py
@@ -36,13 +36,15 @@ import logging
import os
import shutil
-from layout_package import test_failures
-from test_types import test_type_base
+from webkitpy.layout_tests.layout_package import test_failures
+from webkitpy.layout_tests.test_types import test_type_base
+
+_log = logging.getLogger("webkitpy.layout_tests.test_types.fuzzy_image_diff")
class FuzzyImageDiff(test_type_base.TestTypeBase):
- def compare_output(self, filename, output, test_args, target):
+ def compare_output(self, filename, output, test_args, configuration):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
@@ -55,14 +57,14 @@ class FuzzyImageDiff(test_type_base.TestTypeBase):
expected_png_file = self._port.expected_filename(filename, '.png')
if test_args.show_sources:
- logging.debug('Using %s' % expected_png_file)
+ _log.debug('Using %s' % expected_png_file)
# Also report a missing expected PNG file.
if not os.path.isfile(expected_png_file):
failures.append(test_failures.FailureMissingImage(self))
# Run the fuzzymatcher
- r = port.fuzzy_diff(test_args.png_path, expected_png_file)
+ r = self._port.fuzzy_diff(test_args.png_path, expected_png_file)
if r != 0:
failures.append(test_failures.FailureFuzzyFailure(self))
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py
index 1df7ca3..b414358 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py
@@ -39,13 +39,15 @@ import logging
import os
import shutil
-from layout_package import test_failures
-from test_types import test_type_base
+from webkitpy.layout_tests.layout_package import test_failures
+from webkitpy.layout_tests.test_types import test_type_base
# Cache whether we have the image_diff executable available.
_compare_available = True
_compare_msg_printed = False
+_log = logging.getLogger("webkitpy.layout_tests.test_types.image_diff")
+
class ImageDiff(test_type_base.TestTypeBase):
@@ -82,12 +84,13 @@ class ImageDiff(test_type_base.TestTypeBase):
self._save_baseline_data(filename, png_data, ".png")
self._save_baseline_data(filename, checksum, ".checksum")
- def _create_image_diff(self, port, filename, target):
+ def _create_image_diff(self, port, filename, configuration):
"""Creates the visual diff of the expected/actual PNGs.
Args:
filename: the name of the test
- target: Debug or Release
+ configuration: Debug or Release
+ Returns True if the files are different, False if they match
"""
diff_filename = self.output_filename(filename,
self.FILENAME_SUFFIX_COMPARE)
@@ -96,9 +99,10 @@ class ImageDiff(test_type_base.TestTypeBase):
expected_filename = self.output_filename(filename,
self.FILENAME_SUFFIX_EXPECTED + '.png')
+ result = True
try:
_compare_available = True
- result = port.diff_image(actual_filename, expected_filename,
+ result = port.diff_image(expected_filename, actual_filename,
diff_filename)
except ValueError:
_compare_available = False
@@ -106,12 +110,12 @@ class ImageDiff(test_type_base.TestTypeBase):
global _compare_msg_printed
if not _compare_available and not _compare_msg_printed:
_compare_msg_printed = True
- print('image_diff not found. Make sure you have a ' + target +
- ' build of the image_diff executable.')
+ print('image_diff not found. Make sure you have a ' +
+ configuration + ' build of the image_diff executable.')
return result
- def compare_output(self, port, filename, output, test_args, target):
+ def compare_output(self, port, filename, output, test_args, configuration):
"""Implementation of CompareOutput that checks the output image and
checksum against the expected files from the LayoutTest directory.
"""
@@ -133,8 +137,8 @@ class ImageDiff(test_type_base.TestTypeBase):
expected_png_file = self._port.expected_filename(filename, '.png')
if test_args.show_sources:
- logging.debug('Using %s' % expected_hash_file)
- logging.debug('Using %s' % expected_png_file)
+ _log.debug('Using %s' % expected_hash_file)
+ _log.debug('Using %s' % expected_png_file)
try:
expected_hash = open(expected_hash_file, "r").read()
@@ -146,9 +150,9 @@ class ImageDiff(test_type_base.TestTypeBase):
if not os.path.isfile(expected_png_file):
# Report a missing expected PNG file.
- self.write_output_files(port, filename, '', '.checksum',
+ self.write_output_files(port, filename, '.checksum',
test_args.hash, expected_hash,
- diff=False, wdiff=False)
+ print_text_diffs=False)
self._copy_output_png(filename, test_args.png_path, '-actual.png')
failures.append(test_failures.FailureMissingImage(self))
return failures
@@ -156,25 +160,22 @@ class ImageDiff(test_type_base.TestTypeBase):
# Hash matched (no diff needed, okay to return).
return failures
-
- self.write_output_files(port, filename, '', '.checksum',
+ self.write_output_files(port, filename, '.checksum',
test_args.hash, expected_hash,
- diff=False, wdiff=False)
+ print_text_diffs=False)
self._copy_output_png(filename, test_args.png_path, '-actual.png')
self._copy_output_png(filename, expected_png_file, '-expected.png')
- # Even though we only use result in one codepath below but we
+ # Even though we only use the result in one codepath below but we
# still need to call CreateImageDiff for other codepaths.
- result = self._create_image_diff(port, filename, target)
+ images_are_different = self._create_image_diff(port, filename, configuration)
if expected_hash == '':
failures.append(test_failures.FailureMissingImageHash(self))
elif test_args.hash != expected_hash:
- # Hashes don't match, so see if the images match. If they do, then
- # the hash is wrong.
- if result == 0:
- failures.append(test_failures.FailureImageHashIncorrect(self))
- else:
+ if images_are_different:
failures.append(test_failures.FailureImageHashMismatch(self))
+ else:
+ failures.append(test_failures.FailureImageHashIncorrect(self))
return failures
@@ -188,10 +189,7 @@ class ImageDiff(test_type_base.TestTypeBase):
True if two files are different.
False otherwise.
"""
-
try:
- result = port.diff_image(file1, file2)
+ return port.diff_image(file1, file2)
except ValueError, e:
return True
-
- return result == 1
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
index efa2e8c..4c99be0 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
@@ -37,6 +37,8 @@ import errno
import logging
import os.path
+_log = logging.getLogger("webkitpy.layout_tests.test_types.test_type_base")
+
class TestArguments(object):
"""Struct-like wrapper for additional arguments needed by
@@ -68,19 +70,18 @@ class TestTypeBase(object):
FILENAME_SUFFIX_EXPECTED = "-expected"
FILENAME_SUFFIX_DIFF = "-diff"
FILENAME_SUFFIX_WDIFF = "-wdiff.html"
+ FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
FILENAME_SUFFIX_COMPARE = "-diff.png"
- def __init__(self, port, platform, root_output_dir):
+ def __init__(self, port, root_output_dir):
"""Initialize a TestTypeBase object.
Args:
- platform: the platform (e.g., 'chromium-mac-leopard')
- identifying the platform-specific results to be used.
+ port: object implementing port-specific information and methods
root_output_dir: The unix style path to the output dir.
"""
self._root_output_dir = root_output_dir
self._port = port
- self._platform = platform
def _make_output_directory(self, filename):
"""Creates the output directory (if needed) for a given test
@@ -90,7 +91,7 @@ class TestTypeBase(object):
self._port.maybe_make_directory(os.path.split(output_filename)[0])
def _save_baseline_data(self, filename, data, modifier):
- """Saves a new baseline file into the platform directory.
+ """Saves a new baseline file into the port's baseline directory.
The file will be named simply "<test>-expected<modifier>", suitable for
use as the expected results in a later run.
@@ -102,15 +103,16 @@ class TestTypeBase(object):
"""
relative_dir = os.path.dirname(
self._port.relative_test_filename(filename))
- output_dir = os.path.join(
- self._port.chromium_baseline_path(self._platform), relative_dir)
+
+ baseline_path = self._port.baseline_path()
+ output_dir = os.path.join(baseline_path, relative_dir)
output_file = os.path.basename(os.path.splitext(filename)[0] +
self.FILENAME_SUFFIX_EXPECTED + modifier)
self._port.maybe_make_directory(output_dir)
output_path = os.path.join(output_dir, output_file)
- logging.debug('writing new baseline to "%s"' % (output_path))
- open(output_path, "wb").write(data)
+ _log.debug('writing new baseline to "%s"' % (output_path))
+ self._write_into_file_at_path(output_path, data)
def output_filename(self, filename, modifier):
"""Returns a filename inside the output dir that contains modifier.
@@ -130,7 +132,7 @@ class TestTypeBase(object):
self._port.relative_test_filename(filename))
return os.path.splitext(output_filename)[0] + modifier
- def compare_output(self, port, filename, output, test_args, target):
+ def compare_output(self, port, filename, output, test_args, configuration):
"""Method that compares the output from the test with the
expected value.
@@ -141,56 +143,59 @@ class TestTypeBase(object):
output: a string containing the output of the test
test_args: a TestArguments object holding optional additional
arguments
- target: Debug or Release
+ configuration: Debug or Release
Return:
a list of TestFailure objects, empty if the test passes
"""
raise NotImplemented
- def write_output_files(self, port, filename, test_type, file_type,
- output, expected, diff=True, wdiff=False):
+ def _write_into_file_at_path(self, file_path, contents):
+ file = open(file_path, "wb")
+ file.write(contents)
+ file.close()
+
+ def write_output_files(self, port, filename, file_type,
+ output, expected, print_text_diffs=False):
"""Writes the test output, the expected output and optionally the diff
between the two to files in the results directory.
The full output filename of the actual, for example, will be
- <filename><test_type>-actual<file_type>
+ <filename>-actual<file_type>
For instance,
- my_test-simp-actual.txt
+ my_test-actual.txt
Args:
filename: The test filename
- test_type: A string describing the test type, e.g. "simp"
file_type: A string describing the test output file type, e.g. ".txt"
output: A string containing the test output
expected: A string containing the expected test output
- diff: if True, write a file containing the diffs too. This should be
- False for results that are not text
- wdiff: if True, write an HTML file containing word-by-word diffs
+ print_text_diffs: True for text diffs. (FIXME: We should be able to get this from the file type?)
"""
self._make_output_directory(filename)
- actual_filename = self.output_filename(filename,
- test_type + self.FILENAME_SUFFIX_ACTUAL + file_type)
- expected_filename = self.output_filename(filename,
- test_type + self.FILENAME_SUFFIX_EXPECTED + file_type)
+ actual_filename = self.output_filename(filename, self.FILENAME_SUFFIX_ACTUAL + file_type)
+ expected_filename = self.output_filename(filename, self.FILENAME_SUFFIX_EXPECTED + file_type)
if output:
- open(actual_filename, "wb").write(output)
+ self._write_into_file_at_path(actual_filename, output)
if expected:
- open(expected_filename, "wb").write(expected)
+ self._write_into_file_at_path(expected_filename, expected)
if not output or not expected:
return
- if diff:
- diff = port.diff_text(expected, output, expected_filename,
- actual_filename)
- diff_filename = self.output_filename(filename,
- test_type + self.FILENAME_SUFFIX_DIFF + file_type)
- open(diff_filename, "wb").write(diff)
-
- if wdiff:
- # Shell out to wdiff to get colored inline diffs.
- wdiff = port.wdiff_text(expected_filename, actual_filename)
- filename = self.output_filename(filename, test_type +
- self.FILENAME_SUFFIX_WDIFF)
- out = open(filename, 'wb').write(wdiff)
+ if not print_text_diffs:
+ return
+
+ diff = port.diff_text(expected, output, expected_filename, actual_filename)
+ diff_filename = self.output_filename(filename, self.FILENAME_SUFFIX_DIFF + file_type)
+ self._write_into_file_at_path(diff_filename, diff)
+
+ # Shell out to wdiff to get colored inline diffs.
+ wdiff = port.wdiff_text(expected_filename, actual_filename)
+ wdiff_filename = self.output_filename(filename, self.FILENAME_SUFFIX_WDIFF)
+ self._write_into_file_at_path(wdiff_filename, wdiff)
+
+ # Use WebKit's PrettyPatch.rb to get an HTML diff.
+ pretty_patch = port.pretty_patch_text(diff_filename)
+ pretty_patch_filename = self.output_filename(filename, self.FILENAME_SUFFIX_PRETTY_PATCH)
+ self._write_into_file_at_path(pretty_patch_filename, pretty_patch)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py
index 54b332b..8f7907c 100644
--- a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py
@@ -37,8 +37,10 @@ import errno
import logging
import os.path
-from layout_package import test_failures
-from test_types import test_type_base
+from webkitpy.layout_tests.layout_package import test_failures
+from webkitpy.layout_tests.test_types import test_type_base
+
+_log = logging.getLogger("webkitpy.layout_tests.test_types.text_diff")
def is_render_tree_dump(data):
@@ -63,7 +65,7 @@ class TestTextDiff(test_type_base.TestTypeBase):
# Read the port-specific expected text.
expected_filename = self._port.expected_filename(filename, '.txt')
if show_sources:
- logging.debug('Using %s' % expected_filename)
+ _log.debug('Using %s' % expected_filename)
return self.get_normalized_text(expected_filename)
@@ -78,7 +80,7 @@ class TestTextDiff(test_type_base.TestTypeBase):
# Normalize line endings
return text.strip("\r\n").replace("\r\n", "\n") + "\n"
- def compare_output(self, port, filename, output, test_args, target):
+ def compare_output(self, port, filename, output, test_args, configuration):
"""Implementation of CompareOutput that checks the output text against
the expected text from the LayoutTest directory."""
failures = []
@@ -96,8 +98,8 @@ class TestTextDiff(test_type_base.TestTypeBase):
# Write output files for new tests, too.
if port.compare_text(output, expected):
# Text doesn't match, write output files.
- self.write_output_files(port, filename, "", ".txt", output,
- expected, diff=True, wdiff=True)
+ self.write_output_files(port, filename, ".txt", output,
+ expected, print_text_diffs=True)
if expected == '':
failures.append(test_failures.FailureMissingResult(self))