summaryrefslogtreecommitdiffstats
path: root/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py
diff options
context:
space:
mode:
Diffstat (limited to 'Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py')
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py41
1 files changed, 27 insertions, 14 deletions
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py
index a8c716f..c38cb8f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py
@@ -82,7 +82,7 @@ class SingleTestRunner:
# For example, if 'foo.html' has two expectation files, 'foo-expected.html' and
# 'foo-expected.txt', we should warn users. One test file must be used exclusively
# in either layout tests or reftests, but not in both.
- for suffix in ['.txt', '.checksum', '.png']:
+ for suffix in ('.txt', '.checksum', '.png', '.wav'):
expected_filename = self._port.expected_filename(self._filename, suffix)
if fs.exists(expected_filename):
_log.error('The reftest (%s) can not have an expectation file (%s).'
@@ -91,7 +91,8 @@ class SingleTestRunner:
def _expected_driver_output(self):
return base.DriverOutput(self._port.expected_text(self._filename),
self._port.expected_image(self._filename),
- self._port.expected_checksum(self._filename))
+ self._port.expected_checksum(self._filename),
+ self._port.expected_audio(self._filename))
def _should_fetch_expected_checksum(self):
return (self._options.pixel_tests and
@@ -122,14 +123,14 @@ class SingleTestRunner:
driver_output = self._driver.run_test(self._driver_input())
expected_driver_output = self._expected_driver_output()
test_result = self._compare_output(driver_output, expected_driver_output)
- test_result_writer.write_test_result(self._port, self._options.results_directory, self._filename,
+ test_result_writer.write_test_result(self._port, self._filename,
driver_output, expected_driver_output, test_result.failures)
return test_result
def _run_rebaseline(self):
driver_output = self._driver.run_test(self._driver_input())
failures = self._handle_error(driver_output)
- test_result_writer.write_test_result(self._port, self._options.results_directory, self._filename,
+ test_result_writer.write_test_result(self._port, self._filename,
driver_output, None, failures)
# FIXME: It the test crashed or timed out, it might be bettter to avoid
# to write new baselines.
@@ -142,6 +143,9 @@ class SingleTestRunner:
# DumpRenderTree may not output utf-8 text (e.g. webarchives).
self._save_baseline_data(driver_output.text, ".txt",
generate_new_baseline=self._options.new_baseline)
+ if driver_output.audio:
+ self._save_baseline_data(driver_output.audio, '.wav',
+ generate_new_baseline=self._options.new_baseline)
if self._options.pixel_tests and driver_output.image_hash:
self._save_baseline_data(driver_output.image, ".png",
generate_new_baseline=self._options.new_baseline)
@@ -190,7 +194,7 @@ class SingleTestRunner:
failures = []
fs = self._port._filesystem
if driver_output.timeout:
- failures.append(test_failures.FailureTimeout(reference_filename))
+ failures.append(test_failures.FailureTimeout(bool(reference_filename)))
if reference_filename:
testname = self._port.relative_test_filename(reference_filename)
@@ -198,7 +202,7 @@ class SingleTestRunner:
testname = self._testname
if driver_output.crash:
- failures.append(test_failures.FailureCrash(reference_filename))
+ failures.append(test_failures.FailureCrash(bool(reference_filename)))
_log.debug("%s Stacktrace for %s:\n%s" % (self._worker_name, testname,
driver_output.error))
elif driver_output.error:
@@ -216,19 +220,28 @@ class SingleTestRunner:
return TestResult(self._filename, failures, driver_output.test_time)
failures.extend(self._compare_text(driver_output.text, expected_driver_output.text))
+ failures.extend(self._compare_audio(driver_output.audio, expected_driver_output.audio))
if self._options.pixel_tests:
failures.extend(self._compare_image(driver_output, expected_driver_output))
return TestResult(self._filename, failures, driver_output.test_time)
def _compare_text(self, actual_text, expected_text):
failures = []
- if self._port.compare_text(self._get_normalized_output_text(actual_text),
- # Assuming expected_text is already normalized.
- expected_text):
- if expected_text == '':
- failures.append(test_failures.FailureMissingResult())
- else:
- failures.append(test_failures.FailureTextMismatch())
+ if (expected_text and actual_text and
+ # Assuming expected_text is already normalized.
+ self._port.compare_text(self._get_normalized_output_text(actual_text), expected_text)):
+ failures.append(test_failures.FailureTextMismatch())
+ elif actual_text and not expected_text:
+ failures.append(test_failures.FailureMissingResult())
+ return failures
+
+ def _compare_audio(self, actual_audio, expected_audio):
+ failures = []
+ if (expected_audio and actual_audio and
+ self._port.compare_audio(actual_audio, expected_audio)):
+ failures.append(test_failures.FailureAudioMismatch())
+ elif actual_audio and not expected_audio:
+ failures.append(test_failures.FailureMissingAudio())
return failures
def _get_normalized_output_text(self, output):
@@ -259,7 +272,7 @@ class SingleTestRunner:
base.DriverInput(self._reference_filename, self._timeout, driver_output1.image_hash))
test_result = self._compare_output_with_reference(driver_output1, driver_output2)
- test_result_writer.write_test_result(self._port, self._options.results_directory, self._filename,
+ test_result_writer.write_test_result(self._port, self._filename,
driver_output1, driver_output2, test_result.failures)
return test_result