summaryrefslogtreecommitdiffstats
path: root/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
diff options
context:
space:
mode:
Diffstat (limited to 'Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py')
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py63
1 files changed, 6 insertions, 57 deletions
diff --git a/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py b/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
index ad65016..09bfc31 100644
--- a/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
@@ -28,8 +28,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Defines the interface TestTypeBase which other test types inherit from.
-
-Also defines the TestArguments "struct" to pass them additional arguments.
"""
import cgi
@@ -39,21 +37,6 @@ import logging
_log = logging.getLogger("webkitpy.layout_tests.test_types.test_type_base")
-class TestArguments(object):
- """Struct-like wrapper for additional arguments needed by
- specific tests."""
- # Whether to save new baseline results.
- new_baseline = False
-
- # Path to the actual PNG file generated by pixel tests
- png_path = None
-
- # Value of checksum generated by pixel tests.
- hash = None
-
- # Whether to use wdiff to generate by-word diffs.
- wdiff = False
-
# Python bug workaround. See the wdiff code in WriteOutputFiles for an
# explanation.
_wdiff_available = True
@@ -87,39 +70,6 @@ class TestTypeBase(object):
self._port.relative_test_filename(filename))
fs.maybe_make_directory(fs.dirname(output_filename))
- def _save_baseline_data(self, filename, data, modifier, encoding,
- generate_new_baseline=True):
- """Saves a new baseline file into the port's baseline directory.
-
- The file will be named simply "<test>-expected<modifier>", suitable for
- use as the expected results in a later run.
-
- Args:
- filename: path to the test file
- data: result to be saved as the new baseline
- modifier: type of the result file, e.g. ".txt" or ".png"
- encoding: file encoding (none, "utf-8", etc.)
- generate_new_baseline: whether to enerate a new, platform-specific
- baseline, or update the existing one
- """
-
- port = self._port
- fs = self._port._filesystem
- if generate_new_baseline:
- relative_dir = fs.dirname(port.relative_test_filename(filename))
- baseline_path = port.baseline_path()
- output_dir = fs.join(baseline_path, relative_dir)
- output_file = fs.basename(fs.splitext(filename)[0] +
- self.FILENAME_SUFFIX_EXPECTED + modifier)
- fs.maybe_make_directory(output_dir)
- output_path = fs.join(output_dir, output_file)
- _log.debug('writing new baseline result "%s"' % (output_path))
- else:
- output_path = port.expected_filename(filename, modifier)
- _log.debug('resetting baseline result "%s"' % output_path)
-
- port.update_baseline(output_path, data, encoding)
-
def output_filename(self, filename, modifier):
"""Returns a filename inside the output dir that contains modifier.
@@ -139,8 +89,8 @@ class TestTypeBase(object):
self._port.relative_test_filename(filename))
return fs.splitext(output_filename)[0] + modifier
- def compare_output(self, port, filename, test_args, actual_test_output,
- expected_test_output):
+ def compare_output(self, port, filename, options, actual_driver_output,
+ expected_driver_output):
"""Method that compares the output from the test with the
expected value.
@@ -149,12 +99,11 @@ class TestTypeBase(object):
Args:
port: object implementing port-specific information and methods
filename: absolute filename to test file
- test_args: a TestArguments object holding optional additional
- arguments
- actual_test_output: a TestOutput object which represents actual test
+ options: command line argument object from optparse
+ actual_driver_output: a DriverOutput object which represents actual test
output
- expected_test_output: a TestOutput object which represents a expected
- test output
+ expected_driver_output: a ExpectedDriverOutput object which represents a
+ expected test output
Return:
a list of TestFailure objects, empty if the test passes