summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2009-10-08 17:19:54 +0100
committerSteve Block <steveblock@google.com>2009-10-20 00:41:58 +0100
commit231d4e3152a9c27a73b6ac7badbe6be673aa3ddf (patch)
treea6c7e2d6cd7bfa7011cc39abbb436142d7a4a7c8 /WebKitTools/Scripts
parente196732677050bd463301566a68a643b6d14b907 (diff)
downloadexternal_webkit-231d4e3152a9c27a73b6ac7badbe6be673aa3ddf.zip
external_webkit-231d4e3152a9c27a73b6ac7badbe6be673aa3ddf.tar.gz
external_webkit-231d4e3152a9c27a73b6ac7badbe6be673aa3ddf.tar.bz2
Merge webkit.org at R49305 : Automatic merge by git.
Change-Id: I8968561bc1bfd72b8923b7118d3728579c6dbcc7
Diffstat (limited to 'WebKitTools/Scripts')
-rw-r--r--WebKitTools/Scripts/VCSUtils.pm89
-rwxr-xr-xWebKitTools/Scripts/bisect-builds2
-rwxr-xr-xWebKitTools/Scripts/bugzilla-tool555
-rwxr-xr-xWebKitTools/Scripts/build-webkit115
-rwxr-xr-xWebKitTools/Scripts/check-for-global-initializers5
-rwxr-xr-xWebKitTools/Scripts/check-webkit-style2
-rwxr-xr-xWebKitTools/Scripts/commit-log-editor33
-rwxr-xr-xWebKitTools/Scripts/make-script-test-wrappers (renamed from WebKitTools/Scripts/make-js-test-wrappers)62
-rwxr-xr-xWebKitTools/Scripts/mark-bug-fixed141
-rw-r--r--WebKitTools/Scripts/modules/bugzilla.py163
-rw-r--r--WebKitTools/Scripts/modules/bugzilla_unittest.py12
-rw-r--r--WebKitTools/Scripts/modules/buildbot.py102
-rw-r--r--WebKitTools/Scripts/modules/buildbot_unittest.py118
-rw-r--r--WebKitTools/Scripts/modules/changelogs.py92
-rw-r--r--WebKitTools/Scripts/modules/changelogs_unittest.py145
-rwxr-xr-xWebKitTools/Scripts/modules/comments.py39
-rw-r--r--WebKitTools/Scripts/modules/committers.py46
-rw-r--r--WebKitTools/Scripts/modules/committers_unittest.py (renamed from WebKitTools/Scripts/modules/commiters_unittest.py)6
-rw-r--r--WebKitTools/Scripts/modules/cpp_style.py62
-rw-r--r--WebKitTools/Scripts/modules/cpp_style_unittest.py104
-rw-r--r--WebKitTools/Scripts/modules/logging.py11
-rw-r--r--WebKitTools/Scripts/modules/logging_unittest.py61
-rw-r--r--WebKitTools/Scripts/modules/scm.py211
-rw-r--r--WebKitTools/Scripts/modules/scm_unittest.py248
-rw-r--r--WebKitTools/Scripts/modules/statusbot.py66
-rwxr-xr-xWebKitTools/Scripts/parse-malloc-history24
-rwxr-xr-xWebKitTools/Scripts/pdevenv16
-rwxr-xr-xWebKitTools/Scripts/prepare-ChangeLog164
-rwxr-xr-xWebKitTools/Scripts/resolve-ChangeLogs16
-rwxr-xr-xWebKitTools/Scripts/run-javascriptcore-tests2
-rwxr-xr-xWebKitTools/Scripts/run-launcher9
-rwxr-xr-xWebKitTools/Scripts/run-sunspider8
-rwxr-xr-xWebKitTools/Scripts/run-webkit-httpd1
-rwxr-xr-xWebKitTools/Scripts/run-webkit-tests665
-rwxr-xr-xWebKitTools/Scripts/run-webkit-unittests7
-rwxr-xr-xWebKitTools/Scripts/sunspider-compare-results4
-rwxr-xr-xWebKitTools/Scripts/svn-apply70
-rwxr-xr-xWebKitTools/Scripts/svn-create-patch16
-rwxr-xr-xWebKitTools/Scripts/svn-unapply30
-rwxr-xr-x[-rw-r--r--]WebKitTools/Scripts/update-sources-list.py2
-rwxr-xr-xWebKitTools/Scripts/update-webkit14
-rw-r--r--WebKitTools/Scripts/update-webkit-chromium51
-rwxr-xr-xWebKitTools/Scripts/update-webkit-localizable-strings2
-rw-r--r--WebKitTools/Scripts/webkitdirs.pm192
44 files changed, 3016 insertions, 767 deletions
diff --git a/WebKitTools/Scripts/VCSUtils.pm b/WebKitTools/Scripts/VCSUtils.pm
index 571487d..6ec12c9 100644
--- a/WebKitTools/Scripts/VCSUtils.pm
+++ b/WebKitTools/Scripts/VCSUtils.pm
@@ -25,6 +25,7 @@
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Module to share code to work with various version control systems.
+package VCSUtils;
use strict;
use warnings;
@@ -34,21 +35,37 @@ use File::Basename;
use File::Spec;
BEGIN {
- use Exporter ();
- our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS);
- $VERSION = 1.00;
- @ISA = qw(Exporter);
- @EXPORT = qw(&chdirReturningRelativePath &determineSVNRoot &determineVCSRoot &isGit &isGitDirectory &isSVN &isSVNDirectory &makeFilePathRelative);
- %EXPORT_TAGS = ( );
- @EXPORT_OK = ();
+ use Exporter ();
+ our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS);
+ $VERSION = 1.00;
+ @ISA = qw(Exporter);
+ @EXPORT = qw(
+ &chdirReturningRelativePath
+ &determineSVNRoot
+ &determineVCSRoot
+ &gitBranch
+ &isGit
+ &isGitBranchBuild
+ &isGitDirectory
+ &isSVN
+ &isSVNDirectory
+ &isSVNVersion16OrNewer
+ &makeFilePathRelative
+ &pathRelativeToSVNRepositoryRootForPath
+ &svnRevisionForDirectory
+ );
+ %EXPORT_TAGS = ( );
+ @EXPORT_OK = ();
}
our @EXPORT_OK;
-my $isGit;
-my $isSVN;
my $gitBranch;
+my $gitRoot;
+my $isGit;
my $isGitBranchBuild;
+my $isSVN;
+my $svnVersion;
sub isGitDirectory($)
{
@@ -68,7 +85,7 @@ sub gitBranch()
{
unless (defined $gitBranch) {
chomp($gitBranch = `git symbolic-ref -q HEAD`);
- $gitBranch = "" if exitStatus($?);
+ $gitBranch = "" if main::exitStatus($?); # FIXME: exitStatus is defined in webkitdirs.pm
$gitBranch =~ s#^refs/heads/##;
$gitBranch = "" if $gitBranch eq "master";
}
@@ -106,6 +123,24 @@ sub isSVN()
return $isSVN;
}
+sub svnVersion()
+{
+ return $svnVersion if defined $svnVersion;
+
+ if (!isSVN()) {
+ $svnVersion = 0;
+ } else {
+ chomp($svnVersion = `svn --version --quiet`);
+ }
+ return $svnVersion;
+}
+
+sub isSVNVersion16OrNewer()
+{
+ my $version = svnVersion();
+ return eval "v$version" ge v1.6;
+}
+
sub chdirReturningRelativePath($)
{
my ($directory) = @_;
@@ -128,28 +163,38 @@ sub determineSVNRoot()
my $last = '';
my $path = '.';
my $parent = '..';
+ my $repositoryRoot;
my $repositoryUUID;
while (1) {
+ my $thisRoot;
my $thisUUID;
# Ignore error messages in case we've run past the root of the checkout.
open INFO, "svn info '$path' 2> $devNull |" or die;
while (<INFO>) {
+ if (/^Repository Root: (.+)/) {
+ $thisRoot = $1;
+ }
if (/^Repository UUID: (.+)/) {
$thisUUID = $1;
- { local $/ = undef; <INFO>; } # Consume the rest of the input.
+ }
+ if ($thisRoot && $thisUUID) {
+ local $/ = undef;
+ <INFO>; # Consume the rest of the input.
}
}
close INFO;
# It's possible (e.g. for developers of some ports) to have a WebKit
# checkout in a subdirectory of another checkout. So abort if the
- # repository UUID suddenly changes.
+ # repository root or the repository UUID suddenly changes.
last if !$thisUUID;
- if (!$repositoryUUID) {
- $repositoryUUID = $thisUUID;
- }
+ $repositoryUUID = $thisUUID if !$repositoryUUID;
last if $thisUUID ne $repositoryUUID;
+ last if !$thisRoot;
+ $repositoryRoot = $thisRoot if !$repositoryRoot;
+ last if $thisRoot ne $repositoryRoot;
+
$last = $path;
$path = File::Spec->catdir($parent, $path);
}
@@ -162,10 +207,16 @@ sub determineVCSRoot()
if (isGit()) {
return determineGitRoot();
}
- if (isSVN()) {
- return determineSVNRoot();
+
+ if (!isSVN()) {
+ # Some users have a workflow where svn-create-patch, svn-apply and
+ # svn-unapply are used outside of multiple svn working directores,
+ # so warn the user and assume Subversion is being used in this case.
+ warn "Unable to determine VCS root; assuming Subversion";
+ $isSVN = 1;
}
- die "Unable to determine VCS root";
+
+ return determineSVNRoot();
}
sub svnRevisionForDirectory($)
@@ -206,8 +257,6 @@ sub pathRelativeToSVNRepositoryRootForPath($)
return $svnURL;
}
-
-my $gitRoot;
sub makeFilePathRelative($)
{
my ($path) = @_;
diff --git a/WebKitTools/Scripts/bisect-builds b/WebKitTools/Scripts/bisect-builds
index 34230a9..93e9223 100755
--- a/WebKitTools/Scripts/bisect-builds
+++ b/WebKitTools/Scripts/bisect-builds
@@ -209,7 +209,7 @@ sub createTempFile($)
my ($fh, $tempFile) = tempfile(
basename($0) . "-XXXXXXXX",
- DIR => File::Spec->tmpdir,
+ DIR => File::Spec->tmpdir(),
SUFFIX => ".html",
UNLINK => 0,
);
diff --git a/WebKitTools/Scripts/bugzilla-tool b/WebKitTools/Scripts/bugzilla-tool
index b3c0d67..ec5aa0d 100755
--- a/WebKitTools/Scripts/bugzilla-tool
+++ b/WebKitTools/Scripts/bugzilla-tool
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# Copyright (c) 2009, Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
@@ -30,19 +30,24 @@
#
# A tool for automating dealing with bugzilla, posting patches, committing patches, etc.
-import fileinput # inplace file editing for set_reviewer_in_changelog
import os
import re
import StringIO # for add_patch_to_bug file wrappers
import subprocess
import sys
+import time
+from datetime import datetime, timedelta
from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option
# Import WebKit-specific modules.
-from modules.bugzilla import Bugzilla
-from modules.logging import error, log
-from modules.scm import CommitMessage, detect_scm_system, ScriptError
+from modules.bugzilla import Bugzilla, parse_bug_id
+from modules.changelogs import ChangeLog
+from modules.comments import bug_comment_from_commit_text
+from modules.logging import error, log, tee
+from modules.scm import CommitMessage, detect_scm_system, ScriptError, CheckoutNeedsUpdate
+from modules.buildbot import BuildBot
+from modules.statusbot import StatusBot
def plural(noun):
# This is a dumb plural() implementation which was just enough for our uses.
@@ -56,70 +61,21 @@ def pluralize(noun, count):
noun = plural(noun)
return "%d %s" % (count, noun)
-# These could be put in some sort of changelogs.py.
-def latest_changelog_entry(changelog_path):
- # e.g. 2009-06-03 Eric Seidel <eric@webkit.org>
- changelog_date_line_regexp = re.compile('^(\d{4}-\d{2}-\d{2})' # Consume the date.
- + '\s+(.+)\s+' # Consume the name.
- + '<([^<>]+)>$') # And finally the email address.
-
- entry_lines = []
- changelog = open(changelog_path)
- try:
- log("Parsing ChangeLog: " + changelog_path)
- # The first line should be a date line.
- first_line = changelog.readline()
- if not changelog_date_line_regexp.match(first_line):
- return None
- entry_lines.append(first_line)
-
- for line in changelog:
- # If we've hit the next entry, return.
- if changelog_date_line_regexp.match(line):
- return ''.join(entry_lines)
- entry_lines.append(line)
- finally:
- changelog.close()
- # We never found a date line!
- return None
-
-def set_reviewer_in_changelog(changelog_path, reviewer):
- # inplace=1 creates a backup file and re-directs stdout to the file
- for line in fileinput.FileInput(changelog_path, inplace=1):
- print line.replace("NOBODY (OOPS!)", reviewer.encode("utf-8")), # Trailing comma suppresses printing newline
-
-def modified_changelogs(scm):
- changelog_paths = []
- paths = scm.changed_files()
- for path in paths:
- if os.path.basename(path) == "ChangeLog":
- changelog_paths.append(path)
- return changelog_paths
-
-def parse_bug_id(commit_message):
- message = commit_message.message()
- match = re.search("http\://webkit\.org/b/(?P<bug_id>\d+)", message)
- if match:
- return match.group('bug_id')
- match = re.search(Bugzilla.bug_server_regex + "show_bug\.cgi\?id=(?P<bug_id>\d+)", message)
- if match:
- return match.group('bug_id')
- return None
-
def commit_message_for_this_commit(scm):
- changelog_paths = modified_changelogs(scm)
+ changelog_paths = scm.modified_changelogs()
if not len(changelog_paths):
- raise ScriptError("Found no modified ChangeLogs, cannot create a commit message.\n"
+ raise ScriptError(message="Found no modified ChangeLogs, cannot create a commit message.\n"
"All changes require a ChangeLog. See:\n"
"http://webkit.org/coding/contributing.html")
changelog_messages = []
- for path in changelog_paths:
- changelog_entry = latest_changelog_entry(path)
+ for changelog_path in changelog_paths:
+ log("Parsing ChangeLog: %s" % changelog_path)
+ changelog_entry = ChangeLog(changelog_path).latest_entry()
if not changelog_entry:
- error("Failed to parse ChangeLog: " + os.path.abspath(path))
+ error("Failed to parse ChangeLog: " + os.path.abspath(changelog_path))
changelog_messages.append(changelog_entry)
-
+
# FIXME: We should sort and label the ChangeLog messages like commit-log-editor does.
return CommitMessage(''.join(changelog_messages).splitlines())
@@ -183,10 +139,9 @@ class ApplyPatchesFromBug(Command):
def __init__(self):
options = [
make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory before applying patches"),
- make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)"),
- make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches"),
make_option("--local-commit", action="store_true", dest="local_commit", default=False, help="Make a local commit for each applied patch"),
]
+ options += WebKitLandingScripts.cleaning_options()
Command.__init__(self, 'Applies all patches on a bug to the local working directory without committing.', 'BUGID', options=options)
@staticmethod
@@ -212,23 +167,110 @@ class ApplyPatchesFromBug(Command):
self.apply_patches(patches, tool.scm(), options.local_commit)
-def bug_comment_from_commit_text(scm, commit_text):
- match = re.search(scm.commit_success_regexp(), commit_text, re.MULTILINE)
- svn_revision = match.group('svn_revision')
- commit_text += ("\nhttp://trac.webkit.org/changeset/%s" % svn_revision)
- return commit_text
-
+class WebKitLandingScripts:
+ @staticmethod
+ def cleaning_options():
+ return [
+ make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)"),
+ make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches"),
+ ]
-class LandAndUpdateBug(Command):
- def __init__(self):
- options = [
- make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER."),
+ @staticmethod
+ def land_options():
+ return [
+ make_option("--ignore-builders", action="store_false", dest="check_builders", default=True, help="Don't check to see if the build.webkit.org builders are green before landing."),
make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing."),
make_option("--no-build", action="store_false", dest="build", default=True, help="Commit without building first, implies --no-test."),
make_option("--no-test", action="store_false", dest="test", default=True, help="Commit without running run-webkit-tests."),
make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output."),
make_option("--commit-queue", action="store_true", dest="commit_queue", default=False, help="Run in commit queue mode (no user interaction)."),
]
+
+ @staticmethod
+ def run_command_with_teed_output(args, teed_output):
+ child_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ # Use our own custom wait loop because Popen ignores a tee'd stderr/stdout.
+ # FIXME: This could be improved not to flatten output to stdout.
+ while True:
+ output_line = child_process.stdout.readline()
+ if output_line == '' and child_process.poll() != None:
+ return child_process.poll()
+ teed_output.write(output_line)
+
+ @staticmethod
+ def run_and_throw_if_fail(args, quiet=False):
+ # Cache the child's output locally so it can be used for error reports.
+ child_out_file = StringIO.StringIO()
+ if quiet:
+ dev_null = open(os.devnull, "w")
+ child_stdout = tee(child_out_file, dev_null if quiet else sys.stdout)
+ exit_code = WebKitLandingScripts.run_command_with_teed_output(args, child_stdout)
+ if quiet:
+ dev_null.close()
+
+ child_output = child_out_file.getvalue()
+ child_out_file.close()
+
+ if exit_code:
+ raise ScriptError(script_args=args, exit_code=exit_code, output=child_output)
+
+ # We might need to pass scm into this function for scm.checkout_root
+ @staticmethod
+ def webkit_script_path(script_name):
+ return os.path.join("WebKitTools", "Scripts", script_name)
+
+ @classmethod
+ def run_webkit_script(cls, script_name, quiet=False):
+ log("Running %s" % script_name)
+ cls.run_and_throw_if_fail(cls.webkit_script_path(script_name), quiet)
+
+ @classmethod
+ def build_webkit(cls, quiet=False):
+ cls.run_webkit_script("build-webkit", quiet)
+
+ @staticmethod
+ def ensure_builders_are_green(buildbot, options):
+ if not options.check_builders or buildbot.core_builders_are_green():
+ return
+ error("Builders at %s are red, please do not commit. Pass --ignore-builders to bypass this check." % (buildbot.buildbot_host))
+
+ @classmethod
+ def run_webkit_tests(cls, launch_safari, fail_fast=False, quiet=False):
+ args = [cls.webkit_script_path("run-webkit-tests")]
+ if not launch_safari:
+ args.append("--no-launch-safari")
+ if quiet:
+ args.append("--quiet")
+ if fail_fast:
+ args.append("--exit-after-n-failures=1")
+ cls.run_and_throw_if_fail(args)
+
+ @staticmethod
+ def setup_for_landing(scm, options):
+ os.chdir(scm.checkout_root)
+ scm.ensure_no_local_commits(options.force_clean)
+ if options.clean:
+ scm.ensure_clean_working_directory(options.force_clean)
+
+ @classmethod
+ def build_and_commit(cls, scm, options):
+ if options.build:
+ cls.build_webkit(quiet=options.quiet)
+ if options.test:
+ # When running the commit-queue we don't want to launch Safari and we want to exit after the first failure.
+ cls.run_webkit_tests(launch_safari=not options.commit_queue, fail_fast=options.commit_queue, quiet=options.quiet)
+ commit_message = commit_message_for_this_commit(scm)
+ commit_log = scm.commit_with_message(commit_message.message())
+ return bug_comment_from_commit_text(scm, commit_log)
+
+
+class LandAndUpdateBug(Command):
+ def __init__(self):
+ options = [
+ make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER."),
+ ]
+ options += WebKitLandingScripts.land_options()
Command.__init__(self, 'Lands the current working directory diff and updates the bug if provided.', '[BUGID]', options=options)
def guess_reviewer_from_bug(self, bugs, bug_id):
@@ -252,17 +294,18 @@ class LandAndUpdateBug(Command):
log("Failed to guess reviewer from bug %s and --reviewer= not provided. Not updating ChangeLogs with reviewer." % bug_id)
return
- changelogs = modified_changelogs(tool.scm())
- for changelog in changelogs:
- set_reviewer_in_changelog(changelog, reviewer)
+ for changelog_path in tool.scm().modified_changelogs():
+ ChangeLog(changelog_path).set_reviewer(reviewer)
def execute(self, options, args, tool):
bug_id = args[0] if len(args) else None
os.chdir(tool.scm().checkout_root)
+ WebKitLandingScripts.ensure_builders_are_green(tool.buildbot, options)
+
self.update_changelogs_with_reviewer(options.reviewer, bug_id, tool)
- comment_text = LandPatchesFromBugs.build_and_commit(tool.scm(), options)
+ comment_text = WebKitLandingScripts.build_and_commit(tool.scm(), options)
if bug_id:
log("Updating bug %s" % bug_id)
if options.close_bug:
@@ -278,104 +321,66 @@ class LandAndUpdateBug(Command):
class LandPatchesFromBugs(Command):
def __init__(self):
- options = [
- make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)"),
- make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches"),
- make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing."),
- make_option("--no-build", action="store_false", dest="build", default=True, help="Commit without building first, implies --no-test."),
- make_option("--no-test", action="store_false", dest="test", default=True, help="Commit without running run-webkit-tests."),
- make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output."),
- make_option("--commit-queue", action="store_true", dest="commit_queue", default=False, help="Run in commit queue mode (no user interaction)."),
- ]
+ options = WebKitLandingScripts.cleaning_options()
+ options += WebKitLandingScripts.land_options()
Command.__init__(self, 'Lands all patches on a bug optionally testing them first', 'BUGID', options=options)
- @staticmethod
- def run_and_throw_if_fail(args, quiet=False):
- child_stdout = subprocess.PIPE if quiet else None
- child_process = subprocess.Popen(args, stdout=child_stdout)
- if child_process.stdout:
- child_process.communicate()
- return_code = child_process.wait()
- if return_code:
- raise ScriptError("%s failed with exit code %d" % (" ".join(args), return_code))
-
- # We might need to pass scm into this function for scm.checkout_root
- @staticmethod
- def webkit_script_path(script_name):
- return os.path.join("WebKitTools", "Scripts", script_name)
-
- @classmethod
- def run_webkit_script(cls, script_name, quiet=False):
- print "Running WebKit Script " + script_name
- cls.run_and_throw_if_fail(cls.webkit_script_path(script_name), quiet)
-
- @classmethod
- def build_webkit(cls, quiet=False):
- cls.run_webkit_script("build-webkit", quiet)
-
- @classmethod
- def run_webkit_tests(cls, launch_safari, quiet=False):
- args = [cls.webkit_script_path("run-webkit-tests")]
- if not launch_safari:
- args.append("--no-launch-safari")
- if quiet:
- args.append("--quiet")
- cls.run_and_throw_if_fail(args)
-
- @staticmethod
- def setup_for_landing(scm, options):
- os.chdir(scm.checkout_root)
- scm.ensure_no_local_commits(options.force_clean)
- if options.clean:
- scm.ensure_clean_working_directory(options.force_clean)
-
- @classmethod
- def build_and_commit(cls, scm, options):
- if options.build:
- cls.build_webkit(quiet=options.quiet)
- if options.test:
- cls.run_webkit_tests(launch_safari=not options.commit_queue, quiet=options.quiet)
- commit_message = commit_message_for_this_commit(scm)
- commit_log = scm.commit_with_message(commit_message.message())
- return bug_comment_from_commit_text(scm, commit_log)
-
@classmethod
def land_patches(cls, bug_id, patches, options, tool):
try:
comment_text = ""
for patch in patches:
tool.scm().update_webkit() # Update before every patch in case the tree has changed
+ log("Applying %s from bug %s." % (patch['id'], bug_id))
tool.scm().apply_patch(patch, force=options.commit_queue)
- comment_text = cls.build_and_commit(tool.scm(), options)
- tool.bugs.clear_attachment_review_flag(patch['id'], comment_text)
+ # Make sure the tree is still green after updating, before building this patch.
+ # The first patch ends up checking tree status twice, but that's OK.
+ WebKitLandingScripts.ensure_builders_are_green(tool.buildbot, options)
+ comment_text = WebKitLandingScripts.build_and_commit(tool.scm(), options)
+ tool.bugs.clear_attachment_flags(patch['id'], comment_text)
if options.close_bug:
tool.bugs.close_bug_as_fixed(bug_id, "All reviewed patches have been landed. Closing bug.")
+ except CheckoutNeedsUpdate, e:
+ log("Commit was rejected because the checkout is out of date. Please update and try again.")
+ log("You can pass --no-build to skip building/testing after update if you believe the new commits did not affect the results.")
+ error(e)
except ScriptError, e:
- # We should add a comment to the bug, and r- the patch on failure
+ # Mark the patch as commit-queue- and comment in the bug.
+ tool.bugs.reject_patch_from_commit_queue(patch['id'], e.message_with_output())
error(e)
- def execute(self, options, args, tool):
- if not len(args):
- error("bug-id(s) required")
-
+ @staticmethod
+ def _fetch_list_of_patches_to_land(options, args, tool):
bugs_to_patches = {}
patch_count = 0
for bug_id in args:
patches = []
if options.commit_queue:
- patches = tool.bugs.fetch_commit_queue_patches_from_bug(bug_id)
+ patches = tool.bugs.fetch_commit_queue_patches_from_bug(bug_id, reject_invalid_patches=True)
else:
patches = tool.bugs.fetch_reviewed_patches_from_bug(bug_id)
- if not len(patches):
- log("No reviewed patches found on %s." % bug_id)
- continue
- patch_count += len(patches)
- bugs_to_patches[bug_id] = patches
+
+ patches_found = len(patches)
+ log("%s found on bug %s." % (pluralize("reviewed patch", patches_found), bug_id))
+
+ patch_count += patches_found
+ if patches_found:
+ bugs_to_patches[bug_id] = patches
log("Landing %s from %s." % (pluralize("patch", patch_count), pluralize("bug", len(args))))
+ return bugs_to_patches
+
+ def execute(self, options, args, tool):
+ if not len(args):
+ error("bug-id(s) required")
- self.setup_for_landing(tool.scm(), options)
+ # Check the tree status here so we can fail early
+ WebKitLandingScripts.ensure_builders_are_green(tool.buildbot, options)
+
+ bugs_to_patches = self._fetch_list_of_patches_to_land(options, args, tool)
+
+ WebKitLandingScripts.setup_for_landing(tool.scm(), options)
for bug_id in bugs_to_patches.keys():
self.land_patches(bug_id, bugs_to_patches[bug_id], options, tool)
@@ -405,11 +410,17 @@ class ObsoleteAttachmentsOnBug(Command):
class PostDiffAsPatchToBug(Command):
def __init__(self):
options = [
+ make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: 'patch')"),
+ ]
+ options += self.posting_options()
+ Command.__init__(self, 'Attaches the current working directory diff to a bug as a patch file.', '[BUGID]', options=options)
+
+ @staticmethod
+ def posting_options():
+ return [
make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting this one."),
make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
- make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: 'patch')"),
]
- Command.__init__(self, 'Attaches the current working directory diff to a bug as a patch file.', 'BUGID', options=options)
@staticmethod
def obsolete_patches_on_bug(bug_id, bugs):
@@ -420,7 +431,10 @@ class PostDiffAsPatchToBug(Command):
bugs.obsolete_attachment(patch['id'])
def execute(self, options, args, tool):
- bug_id = args[0]
+ # Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs).
+ bug_id = (args and args[0]) or parse_bug_id(tool.scm().create_patch())
+ if not bug_id:
+ error("No bug id passed and no bug url found in diff, can't post.")
if options.obsolete_patches:
self.obsolete_patches_on_bug(bug_id, tool.bugs)
@@ -436,49 +450,109 @@ class PostCommitsAsPatchesToBug(Command):
def __init__(self):
options = [
make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
- make_option("--no-comment", action="store_false", dest="comment", default=True, help="Do not use commit log message as a comment for the patch."),
- make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting new ones."),
- make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
- make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: 'patch')"),
+ make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."),
+ make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"),
]
+ options += PostDiffAsPatchToBug.posting_options()
Command.__init__(self, 'Attaches a range of local commits to bugs as patch files.', 'COMMITISH', options=options, requires_local_commits=True)
+ def _comment_text_for_commit(self, options, commit_message, tool, commit_id):
+ comment_text = None
+ if (options.add_log_as_comment):
+ comment_text = commit_message.body(lstrip=True)
+ comment_text += "---\n"
+ comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
+ return comment_text
+
+ def _diff_file_for_commit(self, tool, commit_id):
+ diff = tool.scm().create_patch_from_local_commit(commit_id)
+ return StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object
+
def execute(self, options, args, tool):
if not args:
error("%s argument is required" % self.argument_names)
commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
- if len(commit_ids) > 10:
- error("Are you sure you want to attach %s patches?" % (pluralize('patch', len(commit_ids))))
- # Could add a --patches-limit option.
+ if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is.
+ error("bugzilla-tool does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize('patch', len(commit_ids))))
have_obsoleted_patches = set()
for commit_id in commit_ids:
- # FIXME: commit_message is the wrong place to look for the bug_id
- # the ChangeLogs should have the bug id, but the local commit message might not.
commit_message = tool.scm().commit_message_for_local_commit(commit_id)
- bug_id = options.bug_id or parse_bug_id(commit_message)
+ # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs).
+ bug_id = options.bug_id or parse_bug_id(commit_message.message()) or parse_bug_id(tool.scm().create_patch_from_local_commit(commit_id))
if not bug_id:
- log("Skipping %s: No bug id found in commit log or specified with --bug-id." % commit_id)
+ log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id)
continue
if options.obsolete_patches and bug_id not in have_obsoleted_patches:
PostDiffAsPatchToBug.obsolete_patches_on_bug(bug_id, tool.bugs)
have_obsoleted_patches.add(bug_id)
+ diff_file = self._diff_file_for_commit(tool, commit_id)
description = options.description or commit_message.description(lstrip=True, strip_url=True)
- comment_text = None
- if (options.comment):
- comment_text = commit_message.body(lstrip=True)
- comment_text += "---\n"
- comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
-
- diff = tool.scm().create_patch_from_local_commit(commit_id)
- diff_file = StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object
+ comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id)
tool.bugs.add_patch_to_bug(bug_id, diff_file, description, comment_text, mark_for_review=options.review)
+class RolloutCommit(Command):
+ def __init__(self):
+ options = WebKitLandingScripts.land_options()
+ options += WebKitLandingScripts.cleaning_options()
+ options.append(make_option("--complete-rollout", action="store_true", dest="complete_rollout", help="Experimental support for complete unsupervised rollouts, including re-opening the bug. Not recommended."))
+ Command.__init__(self, 'Reverts the given revision and commits the revert and re-opens the original bug.', 'REVISION [BUGID]', options=options)
+
+ @staticmethod
+ def _create_changelogs_for_revert(scm, revision):
+ # First, discard the ChangeLog changes from the rollout.
+ changelog_paths = scm.modified_changelogs()
+ scm.revert_files(changelog_paths)
+
+ # Second, make new ChangeLog entries for this rollout.
+ # This could move to prepare-ChangeLog by adding a --revert= option.
+ WebKitLandingScripts.run_webkit_script("prepare-ChangeLog")
+ for changelog_path in changelog_paths:
+ ChangeLog(changelog_path).update_for_revert(revision)
+
+ @staticmethod
+ def _parse_bug_id_from_revision_diff(tool, revision):
+ original_diff = tool.scm().diff_for_revision(revision)
+ return parse_bug_id(original_diff)
+
+ @staticmethod
+ def _reopen_bug_after_rollout(tool, bug_id, comment_text):
+ if bug_id:
+ tool.bugs.reopen_bug(bug_id, comment_text)
+ else:
+ log(comment_text)
+ log("No bugs were updated or re-opened to reflect this rollout.")
+
+ def execute(self, options, args, tool):
+ if not args:
+ error("REVISION is required, see --help.")
+ revision = args[0]
+ bug_id = self._parse_bug_id_from_revision_diff(tool, revision)
+ if options.complete_rollout:
+ if bug_id:
+ log("Will re-open bug %s after rollout." % bug_id)
+ else:
+ log("Failed to parse bug number from diff. No bugs will be updated/reopened after the rollout.")
+
+ WebKitLandingScripts.setup_for_landing(tool.scm(), options)
+ tool.scm().update_webkit()
+ tool.scm().apply_reverse_diff(revision)
+ self._create_changelogs_for_revert(tool.scm(), revision)
+
+ # FIXME: Fully automated rollout is not 100% idiot-proof yet, so for now just log with instructions on how to complete the rollout.
+ # Once we trust rollout we will remove this option.
+ if not options.complete_rollout:
+ log("\nNOTE: Rollout support is experimental.\nPlease verify the rollout diff and use 'bugzilla-tool land-diff %s' to commit the rollout." % bug_id)
+ else:
+ comment_text = WebKitLandingScripts.build_and_commit(tool.scm(), options)
+ self._reopen_bug_after_rollout(tool, bug_id, comment_text)
+
+
class CreateBug(Command):
def __init__(self):
options = [
@@ -533,9 +607,18 @@ class CreateBug(Command):
def prompt_for_bug_title_and_comment(self):
bug_title = raw_input("Bug title: ")
- print("Bug comment (hit ^D on blank line to end):")
+ print "Bug comment (hit ^D on blank line to end):"
lines = sys.stdin.readlines()
- sys.stdin.seek(0, os.SEEK_END)
+ try:
+ sys.stdin.seek(0, os.SEEK_END)
+ except IOError:
+ # Cygwin raises an Illegal Seek (errno 29) exception when the above
+ # seek() call is made. Ignoring it seems to cause no harm.
+ # FIXME: Figure out a way to get avoid the exception in the first
+ # place.
+ pass
+ else:
+ raise
comment_text = ''.join(lines)
return (bug_title, comment_text)
@@ -548,6 +631,126 @@ class CreateBug(Command):
self.create_bug_from_patch(options, args, tool)
+class CheckTreeStatus(Command):
+ def __init__(self):
+ Command.__init__(self, 'Print out the status of the webkit builders.')
+
+ def execute(self, options, args, tool):
+ for builder in tool.buildbot.builder_statuses():
+ status_string = "ok" if builder['is_green'] else 'FAIL'
+ print "%s : %s" % (status_string.ljust(4), builder['name'])
+
+
+class LandPatchesFromCommitQueue(Command):
+ def __init__(self):
+ options = [
+ make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue. Dangerous!"),
+ make_option("--status-host", action="store", type="string", dest="status_host", default=StatusBot.default_host, help="Do not ask the user for confirmation before running the queue. Dangerous!"),
+ ]
+ Command.__init__(self, 'Run the commit queue.', options=options)
+ self._original_stdout = None
+ self._original_stderr = None
+ self._files_for_output = []
+
+ queue_log_path = 'commit_queue.log'
+ bug_logs_directory = 'commit_queue_logs'
+
+ log_date_format = "%Y-%m-%d %H:%M:%S"
+ sleep_duration_text = "5 mins"
+ seconds_to_sleep = 300
+
+ def _tee_outputs_to_files(self, files):
+ if not self._original_stdout:
+ self._original_stdout = sys.stdout
+ self._original_stderr = sys.stderr
+ if files and len(files):
+ sys.stdout = tee(self._original_stdout, *files)
+ sys.stderr = tee(self._original_stderr, *files)
+ else:
+ sys.stdout = self._original_stdout
+ sys.stderr = self._original_stderr
+
+ @classmethod
+ def _sleep_message(cls, message):
+ wake_time = datetime.now() + timedelta(seconds=cls.seconds_to_sleep)
+ return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(cls.log_date_format), cls.sleep_duration_text)
+
+ @classmethod
+ def _sleep(cls, message):
+ log(cls._sleep_message(message))
+ time.sleep(cls.seconds_to_sleep)
+
+ def _update_status_and_sleep(self, message):
+ status_message = self._sleep_message(message)
+ self.status_bot.update_status(status_message)
+ log(status_message)
+ time.sleep(self.seconds_to_sleep)
+
+ @staticmethod
+ def _open_log_file(log_path):
+ (log_directory, log_name) = os.path.split(log_path)
+ if log_directory and not os.path.exists(log_directory):
+ os.makedirs(log_directory)
+ return open(log_path, 'a+')
+
+ def _add_log_to_output_tee(self, path):
+ log_file = self._open_log_file(path)
+ self._files_for_output.append(log_file)
+ self._tee_outputs_to_files(self._files_for_output)
+ return log_file
+
+ def _remove_log_from_output_tee(self, log_file):
+ self._files_for_output.remove(log_file)
+ self._tee_outputs_to_files(self._files_for_output)
+ log_file.close()
+
+ def execute(self, options, args, tool):
+ log("CAUTION: commit-queue will discard all local changes in %s" % tool.scm().checkout_root)
+ if options.confirm:
+ response = raw_input("Are you sure? Type 'yes' to continue: ")
+ if (response != 'yes'):
+ error("User declined.")
+
+ queue_log = self._add_log_to_output_tee(self.queue_log_path)
+ log("Running WebKit Commit Queue. %s" % datetime.now().strftime(self.log_date_format))
+
+ self.status_bot = StatusBot(host=options.status_host)
+
+ while (True):
+ # Either of these calls could throw URLError which shouldn't stop the queue.
+ # We catch all exceptions just in case.
+ try:
+ # Fetch patches instead of just bug ids to that we validate reviewer/committer flags on every patch.
+ patches = tool.bugs.fetch_patches_from_commit_queue(reject_invalid_patches=True)
+ if not len(patches):
+ self._update_status_and_sleep("Empty queue.")
+ continue
+ patch_ids = map(lambda patch: patch['id'], patches)
+ first_bug_id = patches[0]['bug_id']
+ log("%s in commit queue [%s]" % (pluralize('patch', len(patches)), ", ".join(patch_ids)))
+
+ if not tool.buildbot.core_builders_are_green():
+ self._update_status_and_sleep("Builders (http://build.webkit.org) are red.")
+ continue
+
+ self.status_bot.update_status("Landing patches from bug %s." % first_bug_id, bug_id=first_bug_id)
+ except Exception, e:
+ # Don't try tell the status bot, in case telling it causes an exception.
+ self._sleep("Exception while checking queue and bots: %s." % e)
+ continue
+
+ # Try to land patches on the first bug in the queue before looping
+ bug_log_path = os.path.join(self.bug_logs_directory, "%s.log" % first_bug_id)
+ bug_log = self._add_log_to_output_tee(bug_log_path)
+ bugzilla_tool_path = __file__ # re-execute this script
+ bugzilla_tool_args = [bugzilla_tool_path, 'land-patches', '--force-clean', '--commit-queue', '--quiet', first_bug_id]
+ WebKitLandingScripts.run_command_with_teed_output(bugzilla_tool_args, sys.stdout)
+ self._remove_log_from_output_tee(bug_log)
+
+ log("Finished WebKit Commit Queue. %s" % datetime.now().strftime(self.log_date_format))
+ self._remove_log_from_output_tee(queue_log)
+
+
class NonWrappingEpilogIndentedHelpFormatter(IndentedHelpFormatter):
def __init__(self):
IndentedHelpFormatter.__init__(self)
@@ -571,6 +774,7 @@ class BugzillaTool:
def __init__(self):
self.cached_scm = None
self.bugs = Bugzilla()
+ self.buildbot = BuildBot()
self.commands = [
{ 'name' : 'bugs-to-commit', 'object' : BugsInCommitQueue() },
{ 'name' : 'patches-to-commit', 'object' : PatchesInCommitQueue() },
@@ -583,6 +787,9 @@ class BugzillaTool:
{ 'name' : 'obsolete-attachments', 'object' : ObsoleteAttachmentsOnBug() },
{ 'name' : 'post-diff', 'object' : PostDiffAsPatchToBug() },
{ 'name' : 'post-commits', 'object' : PostCommitsAsPatchesToBug() },
+ { 'name' : 'tree-status', 'object' : CheckTreeStatus() },
+ { 'name' : 'commit-queue', 'object' : LandPatchesFromCommitQueue() },
+ { 'name' : 'rollout', 'object' : RolloutCommit() },
]
self.global_option_parser = HelpPrintingOptionParser(usage=self.usage_line(), formatter=NonWrappingEpilogIndentedHelpFormatter(), epilog=self.commands_usage())
diff --git a/WebKitTools/Scripts/build-webkit b/WebKitTools/Scripts/build-webkit
index fe6d3c7..4f78eef 100755
--- a/WebKitTools/Scripts/build-webkit
+++ b/WebKitTools/Scripts/build-webkit
@@ -1,6 +1,7 @@
#!/usr/bin/perl -w
-# Copyright (C) 2005, 2006 Apple Computer, Inc. All rights reserved.
+# Copyright (C) 2005, 2006 Apple Inc. All rights reserved.
+# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -37,6 +38,9 @@ use lib $FindBin::Bin;
use webkitdirs;
use POSIX;
+sub formatBuildTime($);
+sub writeCongrats();
+
my $originalWorkingDirectory = getcwd();
chdirWebKit();
@@ -44,17 +48,18 @@ my $showHelp = 0;
my $clean = 0;
my $minimal = 0;
my $makeArgs;
+my $startTime = time();
my ($threeDCanvasSupport, $threeDRenderingSupport, $channelMessagingSupport, $databaseSupport, $datagridSupport, $domStorageSupport,
- $filtersSupport, $geolocationSupport, $gnomeKeyringSupport, $iconDatabaseSupport,
- $javaScriptDebuggerSupport, $offlineWebApplicationSupport, $rubySupport, $sharedWorkersSupport,
+ $eventsourceSupport, $filtersSupport, $geolocationSupport, $iconDatabaseSupport,
+ $javaScriptDebuggerSupport, $mathmlSupport, $offlineWebApplicationSupport, $rubySupport, $sharedWorkersSupport,
$svgSupport, $svgAnimationSupport, $svgAsImageSupport, $svgDOMObjCBindingsSupport, $svgFontsSupport,
- $svgForeignObjectSupport, $svgUseSupport, $videoSupport, $webSocketsSupport, $wmlSupport, $workersSupport,
- $xpathSupport, $xsltSupport, $coverageSupport);
+ $svgForeignObjectSupport, $svgUseSupport, $videoSupport, $webSocketsSupport, $wmlSupport, $wcssSupport, $xhtmlmpSupport, $workersSupport,
+ $xpathSupport, $xsltSupport, $coverageSupport, $notificationsSupport);
my @features = (
{ option => "3d-canvas", desc => "Toggle 3D canvas support",
- define => "ENABLE_3D_CANVAS", default => 0, value => \$threeDCanvasSupport },
+ define => "ENABLE_3D_CANVAS", default => (isAppleMacWebKit() && !isTiger()), value => \$threeDCanvasSupport },
{ option => "3d-rendering", desc => "Toggle 3D rendering support",
define => "ENABLE_3D_RENDERING", default => (isAppleMacWebKit() && !isTiger()), value => \$threeDRenderingSupport },
@@ -74,14 +79,14 @@ my @features = (
{ option => "dom-storage", desc => "Toggle DOM Storage Support",
define => "ENABLE_DOM_STORAGE", default => 1, value => \$domStorageSupport },
+ { option => "eventsource", desc => "Toggle server-sent events support",
+ define => "ENABLE_EVENTSOURCE", default => 1, value => \$eventsourceSupport },
+
{ option => "filters", desc => "Toggle Filters support",
define => "ENABLE_FILTERS", default => 0, value => \$filtersSupport },
{ option => "geolocation", desc => "Toggle Geolocation support",
- define => "ENABLE_GEOLOCATION", default => 0, value => \$geolocationSupport },
-
- { option => "gnomekeyring", desc => "Toggle GNOME Keyring Support (GTK+ port only)",
- define => "WTF_USE_GNOMEKEYRING", default => 0, value => \$gnomeKeyringSupport },
+ define => "ENABLE_GEOLOCATION", default => isGtk(), value => \$geolocationSupport },
{ option => "icon-database", desc => "Toggle Icon database support",
define => "ENABLE_ICONDATABASE", default => 1, value => \$iconDatabaseSupport },
@@ -89,6 +94,12 @@ my @features = (
{ option => "javascript-debugger", desc => "Toggle JavaScript Debugger/Profiler support",
define => "ENABLE_JAVASCRIPT_DEBUGGER", default => 1, value => \$javaScriptDebuggerSupport },
+ { option => "mathml", desc => "Toggle MathML support",
+ define => "ENABLE_MATHML", default => 0, value => \$mathmlSupport },
+
+ { option => "notifications", desc => "Toggle Desktop Notifications Support",
+ define => "ENABLE_NOTIFICATIONS", default => 0, value => \$notificationsSupport },
+
{ option => "offline-web-applications", desc => "Toggle Offline Web Application Support",
define => "ENABLE_OFFLINE_WEB_APPLICATIONS", default => 1, value => \$offlineWebApplicationSupport },
@@ -96,7 +107,7 @@ my @features = (
define => "ENABLE_RUBY", default => 1, value => \$rubySupport },
{ option => "shared-workers", desc => "Toggle SharedWorkers support",
- define => "ENABLE_SHARED_WORKERS", default => 0, value => \$sharedWorkersSupport },
+ define => "ENABLE_SHARED_WORKERS", default => (isAppleWebKit() || isGtk()), value => \$sharedWorkersSupport },
{ option => "svg", desc => "Toggle SVG support",
define => "ENABLE_SVG", default => 1, value => \$svgSupport },
@@ -128,6 +139,12 @@ my @features = (
{ option => "wml", desc => "Toggle WML support",
define => "ENABLE_WML", default => 0, value => \$wmlSupport },
+ { option => "xhtmlmp", desc => "Toggle XHTML-MP support",
+ define => "ENABLE_XHTMLMP", default => 0, value => \$xhtmlmpSupport },
+
+ { option => "wcss", desc => "Toggle WCSS support",
+ define => "ENABLE_WCSS", default => 0, value => \$wcssSupport },
+
{ option => "workers", desc => "Toggle Web Workers support",
define => "ENABLE_WORKERS", default => (isAppleWebKit() || isGtk()), value => \$workersSupport },
@@ -205,18 +222,6 @@ if ($showHelp) {
checkRequiredSystemConfig();
setConfiguration();
-if (isWx()) {
- $ENV{"WEBKITOUTPUTDIR"} = productDir();
-
- my @opts = getWxArgs();
-
- if ($clean) {
- push(@opts, "clean");
- }
- system "WebKitTools/wx/build-wxwebkit @opts";
- exit exitStatus($?);
-}
-
my $productDir = productDir();
# Check that all the project directories are there.
@@ -306,6 +311,19 @@ if (isGtk()) {
# Force re-link of existing libraries if different than expected
removeLibraryDependingOnSVG("WebCore", $svgSupport);
+if (isWx()) {
+ downloadWafIfNeeded();
+ push @projects, 'WebKitTools/wx/browser';
+ push @projects, 'WebKit/wx/bindings/python';
+}
+
+if (isChromium()) {
+ # Chromium doesn't build by project directories.
+ @projects = ();
+ my $result = buildChromium($clean, @options);
+ exit $result if $result;
+}
+
# Build, and abort if the build fails.
for my $dir (@projects) {
chdir $dir or die;
@@ -327,6 +345,16 @@ for my $dir (@projects) {
if ($dir eq "WebKit") {
$result = buildVisualStudioProject("win/WebKit.vcproj/WebKit.sln", $clean);
}
+ } elsif (isWx()) {
+ @options = ();
+ if (defined($makeArgs)) {
+ @options = split(/ /, $makeArgs);
+ }
+ if ($dir eq "WebKit" && isWx()) {
+ chdir 'wx' or die;
+ }
+
+ $result = buildWafProject($dir, $clean, @options);
}
if (exitStatus($result)) {
@@ -339,21 +367,44 @@ for my $dir (@projects) {
}
exit exitStatus($result);
}
- chdir ".." or die;
+ chdirWebKit();
}
# Don't report the "WebKit is now built" message after a clean operation.
exit if $clean;
# Write out congratulations message.
+writeCongrats();
+
+exit 0;
-my $launcherPath = launcherPath();
-my $launcherName = launcherName();
+sub formatBuildTime($)
+{
+ my ($buildTime) = @_;
-print "\n";
-print "===========================================================\n";
-print " WebKit is now built. To run $launcherName with this newly-built\n";
-print " code, use the \"$launcherPath\" script.\n";
-print "===========================================================\n";
+ my $buildHours = int($buildTime / 3600);
+ my $buildMins = int(($buildTime - $buildHours * 3600) / 60);
+ my $buildSecs = $buildTime - $buildHours * 3600 - $buildMins * 60;
-exit 0;
+ if ($buildHours) {
+ return sprintf("%dh:%02dm:%02ds", $buildHours, $buildMins, $buildSecs);
+ }
+ return sprintf("%02dm:%02ds", $buildMins, $buildSecs);
+}
+
+sub writeCongrats()
+{
+ my $launcherPath = launcherPath();
+ my $launcherName = launcherName();
+ my $endTime = time();
+ my $buildTime = formatBuildTime($endTime - $startTime);
+
+ print "\n";
+ print "===========================================================\n";
+ print " WebKit is now built ($buildTime). \n";
+ if (!isChromium()) {
+ print " To run $launcherName with this newly-built code, use the\n";
+ print " \"$launcherPath\" script.\n";
+ }
+ print "===========================================================\n";
+}
diff --git a/WebKitTools/Scripts/check-for-global-initializers b/WebKitTools/Scripts/check-for-global-initializers
index e6c1a69..a74f57d 100755
--- a/WebKitTools/Scripts/check-for-global-initializers
+++ b/WebKitTools/Scripts/check-for-global-initializers
@@ -111,6 +111,11 @@ for my $file (sort @files) {
next if $shortName eq "RenderObject.o";
next if $shortName eq "SubresourceLoader.o";
next if $shortName eq "SVGElementInstance.o";
+ next if $shortName eq "XMLHttpRequest.o";
+ }
+ if ($target eq "WebKit") {
+ next if $shortName eq "HostedNetscapePluginStream.o";
+ next if $shortName eq "NetscapePluginInstanceProxy.o";
}
}
diff --git a/WebKitTools/Scripts/check-webkit-style b/WebKitTools/Scripts/check-webkit-style
index 14812a7..5709cf0 100755
--- a/WebKitTools/Scripts/check-webkit-style
+++ b/WebKitTools/Scripts/check-webkit-style
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc. All rights reserved.
#
diff --git a/WebKitTools/Scripts/commit-log-editor b/WebKitTools/Scripts/commit-log-editor
index e2fc92d..e58b181 100755
--- a/WebKitTools/Scripts/commit-log-editor
+++ b/WebKitTools/Scripts/commit-log-editor
@@ -39,6 +39,7 @@ use VCSUtils;
use webkitdirs;
sub normalizeLineEndings($$);
+sub removeLongestCommonPrefixEndingInDoubleNewline(\%);
sub usage
{
@@ -216,6 +217,8 @@ for my $changeLog (@changeLogs) {
$changeLogContents{$label} = $contents;
}
+my $commonPrefix = removeLongestCommonPrefixEndingInDoubleNewline(%changeLogContents);
+
my $first = 1;
open NEWLOG, ">$log.edit" or die;
if (isGit() && scalar keys %changeLogSort == 0) {
@@ -233,6 +236,7 @@ if (isGit() && scalar keys %changeLogSort == 0) {
close CHANGELOG_ENTRIES;
}
} else {
+ print NEWLOG normalizeLineEndings($commonPrefix, $endl);
for my $sortKey (sort keys %changeLogSort) {
my $label = $changeLogSort{$sortKey};
if (keys %changeLogSort > 1) {
@@ -273,3 +277,32 @@ sub normalizeLineEndings($$)
$string =~ s/\r?\n/$endl/g;
return $string;
}
+
+sub removeLongestCommonPrefixEndingInDoubleNewline(\%)
+{
+ my ($hashOfStrings) = @_;
+
+ my @strings = values %{$hashOfStrings};
+ return "" unless @strings > 1;
+
+ my $prefix = shift @strings;
+ my $prefixLength = length $prefix;
+ foreach my $string (@strings) {
+ while ($prefixLength) {
+ last if substr($string, 0, $prefixLength) eq $prefix;
+ --$prefixLength;
+ $prefix = substr($prefix, 0, -1);
+ }
+ last unless $prefixLength;
+ }
+
+ return "" unless $prefixLength;
+
+ my $lastDoubleNewline = rindex($prefix, "\n\n");
+ return "" unless $lastDoubleNewline > 0;
+
+ foreach my $key (keys %{$hashOfStrings}) {
+ $hashOfStrings->{$key} = substr($hashOfStrings->{$key}, $lastDoubleNewline);
+ }
+ return substr($prefix, 0, $lastDoubleNewline + 2);
+}
diff --git a/WebKitTools/Scripts/make-js-test-wrappers b/WebKitTools/Scripts/make-script-test-wrappers
index a030d3b..133476c 100755
--- a/WebKitTools/Scripts/make-js-test-wrappers
+++ b/WebKitTools/Scripts/make-script-test-wrappers
@@ -62,7 +62,15 @@ my @templates = findTemplateFiles(@ARGV);
for my $tfile (@templates) {
my $tpath = $tfile;
- $tpath =~ s:/resources/TEMPLATE.html$::;
+ my $templateDirectory;
+ my $templateRelativePath;
+ if ($tpath =~ s:/(script-tests)/TEMPLATE.html$::) {
+ $templateDirectory = $1;
+ $templateRelativePath = $1 . "/TEMPLATE.html";
+ } else {
+ print "Inappropriate position of a template: ${tpath}\n";
+ next;
+ }
print "${tpath}\n";
@@ -73,64 +81,22 @@ for my $tfile (@templates) {
my $fileFilter = sub {
push @files, $File::Find::name if substr($_, -3) eq ".js";
};
- find({ preprocess => \&directoryFilter, wanted => $fileFilter }, "resources");
+ find({ preprocess => \&directoryFilter, wanted => $fileFilter }, $templateDirectory);
- open TEMPLATE, "<resources/TEMPLATE.html";
+ open TEMPLATE, "<${templateRelativePath}";
my $template = do { local $/; <TEMPLATE> };
close TEMPLATE;
my $templateNegative = $template;
- if (-e "resources/TEMPLATE-n.html") {
- open TEMPLATE, "<resources/TEMPLATE-n.html";
+ if (-e "${templateDirectory}/TEMPLATE-n.html") {
+ open TEMPLATE, "<${templateDirectory}/TEMPLATE-n.html";
$templateNegative = do { local $/; <TEMPLATE> };
close TEMPLATE;
}
for my $file (@files) {
- next if $file =~ /js-test-.*\.js$/;
- next if $file =~ /cookies-test-(post|pre)\.js$/;
- next if $file =~ /standalone-.*\.js$/;
- next if $file =~ /SVGTestCase\.js/;
- next if $file =~ /WMLTestCase\.js/;
-
- next if $file =~ m:resources/bom-in-file-retains-correct-offset\.js$:; # has a custom template
- next if $file =~ m:resources/NSResolver-exceptions\.js$:;
- next if $file =~ m:resources/WindowProperties\.js$:;
- next if $file =~ m:resources/altGlyph-dom\.js$:;
- next if $file =~ m:resources/attr-case-sensitivity\.js$:;
- next if $file =~ m:resources/box-shadow-overflow-scroll\.js$:;
- next if $file =~ m:resources/codegen-temporaries-multiple-global-blocks-1\.js$:;
- next if $file =~ m:resources/codegen-temporaries-multiple-global-blocks-2\.js$:;
- next if $file =~ m:resources/constructors-cached-navigate\.js$:;
- next if $file =~ m:resources/frame-loading-via-document-write\.js$:;
- next if $file =~ m:resources/id-fastpath-almost-strict\.js$:;
- next if $file =~ m:resources/id-fastpath-strict\.js$:;
- next if $file =~ m:resources/intersectsNode\.js$:;
- next if $file =~ m:resources/p-in-scope\.js$:;
- next if $file =~ m:resources/paste-blockquote-before-blockquote\.js$:;
- next if $file =~ m:resources/reflection-overflow-scroll\.js$:;
- next if $file =~ m:resources/script-element-gc\.js$:;
- next if $file =~ m:resources/script-element-gc\.js$:;
- next if $file =~ m:resources/script3\.js$:;
- next if $file =~ m:resources/script4\.js$:;
- next if $file =~ m:resources/script5\.js$:;
- next if $file =~ m:resources/scripted-random\.js$:;
- next if $file =~ m:resources/select-options-remove\.js$:;
- next if $file =~ m:resources/shadow-offset\.js$:;
- next if $file =~ m:resources/tabindex-focus-blur-all\.js$:;
- next if $file =~ m:resources/use-instanceRoot-event-bubbling\.js$:;
- next if $file =~ m:resources/use-instanceRoot-event-listeners\.js$:;
- next if $file =~ m:resources/window-properties\.js$:;
- next if $file =~ m:resources/wrapper-identity-base\.js$:;
- next if $file =~ m:resources/xhtml-scripts\.js$:;
- next if $file =~ m:resources/instanceof-operator-dummy-worker\.js$:;
- next if $file =~ m:resources/json2-es5-compat\.js$:;
- next if $file =~ m:resources/JSON-stringify\.js$:;
- next if $file =~ m:resources/JSON-parse\.js$:;
- next if $file =~ m:resources/textarea-input-event\.js$:;
-
my $html = $file;
- $html =~ s:resources/(.*)\.js:$1.html:;
+ $html =~ s:${templateDirectory}/(.*)\.js:$1.html:;
next if -f "$html-disabled";
system("grep -q 'successfullyParsed =' $file");
diff --git a/WebKitTools/Scripts/mark-bug-fixed b/WebKitTools/Scripts/mark-bug-fixed
new file mode 100755
index 0000000..c7086c2
--- /dev/null
+++ b/WebKitTools/Scripts/mark-bug-fixed
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Mark a bug as fixed on bugs.webkit.org.
+
+import os
+import re
+import sys
+
+from optparse import OptionParser
+
+from modules.bugzilla import Bugzilla, parse_bug_id
+from modules.comments import bug_comment_from_svn_revision
+from modules.logging import error, log
+from modules.scm import SCM, detect_scm_system
+
+
+class MarkBugFixedTool:
+ def __init__(self):
+ self.bugs = Bugzilla()
+ self.cached_scm = None
+ self.option_parser = OptionParser(usage="usage: %prog [options] [rNNNNN]")
+ self.option_parser.add_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log.")
+ self.option_parser.add_option("-m", "--comment", action="store", type="string", dest="comment", help="Text to include in bug comment.")
+ self.option_parser.add_option("-o", "--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only).")
+ self.option_parser.add_option("-u", "--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it.")
+
+ def scm(self):
+ # Lazily initialize SCM to not error-out before command line parsing (or when running non-scm commands).
+ if not self.cached_scm:
+ original_cwd = os.path.abspath('.')
+ self.cached_scm = detect_scm_system(original_cwd)
+ return self.cached_scm
+
+ def _fetch_commit_log(self, scm, svn_revision):
+ if not svn_revision:
+ return scm.last_svn_commit_log()
+ return scm.svn_commit_log(svn_revision)
+
+ def _determine_bug_id_and_svn_revision(self, bug_id, svn_revision):
+ commit_log = self._fetch_commit_log(self.scm(), svn_revision)
+
+ if not bug_id:
+ bug_id = parse_bug_id(commit_log)
+
+ if not svn_revision:
+ match = re.search("^r(?P<svn_revision>\d+) \|", commit_log, re.MULTILINE)
+ if match:
+ svn_revision = match.group('svn_revision')
+
+ if not bug_id or not svn_revision:
+ not_found = []
+ if not bug_id:
+ not_found.append("bug id")
+ if not svn_revision:
+ not_found.append("svn revision")
+ error("Could not find %s on command-line or in %s."
+ % (" or ".join(not_found), "r%s" % svn_revision if svn_revision else "last commit"))
+
+ return (bug_id, svn_revision)
+
+ def _open_bug_in_web_browser(self, bug_id):
+ if sys.platform == "darwin":
+ SCM.run_command(["open", self.bugs.short_bug_url_for_bug_id(bug_id)])
+ return
+ log("WARNING: -o|--open is only supported on Mac OS X.")
+
+ def _prompt_user_for_correctness(self, bug_id, svn_revision):
+ answer = raw_input("Is this correct (y/N)? ")
+ if not re.match("^\s*y(es)?", answer, re.IGNORECASE):
+ exit(1)
+
+ def main(self):
+ (options, args) = self.option_parser.parse_args(sys.argv[1:])
+
+ if len(args) > 1:
+ error("Only one revision may be specified.")
+
+ bug_id = options.bug_id
+
+ svn_revision = args[0] if len(args) == 1 else None
+ if svn_revision:
+ if re.match("^r[0-9]+$", svn_revision, re.IGNORECASE):
+ svn_revision = svn_revision[1:]
+ if not re.match("^[0-9]+$", svn_revision):
+ error("Invalid svn revision: '%s'" % svn_revision)
+
+ needs_prompt = False
+ if not bug_id or not svn_revision:
+ needs_prompt = True
+ (bug_id, svn_revision) = self._determine_bug_id_and_svn_revision(bug_id, svn_revision)
+
+ log("Bug: <%s> %s" % (self.bugs.short_bug_url_for_bug_id(bug_id), self.bugs.fetch_title_from_bug(bug_id)))
+ log("Revision: %s" % svn_revision)
+
+ if options.open_bug:
+ self._open_bug_in_web_browser(bug_id)
+
+ if needs_prompt:
+ self._prompt_user_for_correctness(bug_id, svn_revision)
+
+ bug_comment = bug_comment_from_svn_revision(svn_revision)
+ if options.comment:
+ bug_comment = "%s\n\n%s" % (options.comment, bug_comment)
+
+ if options.update_only:
+ log("Adding comment to Bug %s." % bug_id)
+ self.bugs.post_comment_to_bug(bug_id, bug_comment)
+ else:
+ log("Adding comment to Bug %s and marking as Resolved/Fixed." % bug_id)
+ self.bugs.close_bug_as_fixed(bug_id, bug_comment)
+
+
+def main():
+ tool = MarkBugFixedTool()
+ return tool.main()
+
+if __name__ == "__main__":
+ main()
diff --git a/WebKitTools/Scripts/modules/bugzilla.py b/WebKitTools/Scripts/modules/bugzilla.py
index 1eebe9d..daf3f19 100644
--- a/WebKitTools/Scripts/modules/bugzilla.py
+++ b/WebKitTools/Scripts/modules/bugzilla.py
@@ -92,10 +92,19 @@ def credentials_from_keychain(username=None):
def is_mac_os_x():
return platform.mac_ver()[0]
+def parse_bug_id(message):
+ match = re.search("http\://webkit\.org/b/(?P<bug_id>\d+)", message)
+ if match:
+ return match.group('bug_id')
+ match = re.search(Bugzilla.bug_server_regex + "show_bug\.cgi\?id=(?P<bug_id>\d+)", message)
+ if match:
+ return match.group('bug_id')
+ return None
+
# FIXME: This should not depend on git for config storage
def read_config(key):
# Need a way to read from svn too
- config_process = subprocess.Popen("git config --get bugzilla." + key, stdout=subprocess.PIPE, shell=True)
+ config_process = subprocess.Popen("git config --get bugzilla.%s" % key, stdout=subprocess.PIPE, shell=True)
value = config_process.communicate()[0]
return_code = config_process.wait()
@@ -119,6 +128,11 @@ def read_credentials():
def timestamp():
return datetime.now().strftime("%Y%m%d%H%M%S")
+
+class BugzillaError(Exception):
+ pass
+
+
class Bugzilla:
def __init__(self, dryrun=False, committers=CommitterList()):
self.dryrun = dryrun
@@ -137,13 +151,21 @@ class Bugzilla:
def bug_url_for_bug_id(self, bug_id, xml=False):
content_type = "&ctype=xml" if xml else ""
return "%sshow_bug.cgi?id=%s%s" % (self.bug_server_url, bug_id, content_type)
-
+
+ def short_bug_url_for_bug_id(self, bug_id):
+ return "http://webkit.org/b/%s" % bug_id
+
def attachment_url_for_id(self, attachment_id, action="view"):
action_param = ""
if action and action != "view":
- action_param = "&action=" + action
+ action_param = "&action=%s" % action
return "%sattachment.cgi?id=%s%s" % (self.bug_server_url, attachment_id, action_param)
+ def _parse_attachment_flag(self, element, flag_name, attachment, result_key):
+ flag = element.find('flag', attrs={'name' : flag_name})
+ if flag and flag['status'] == '+':
+ attachment[result_key] = flag['setter']
+
def _parse_attachment_element(self, element, bug_id):
attachment = {}
attachment['bug_id'] = bug_id
@@ -153,24 +175,13 @@ class Bugzilla:
attachment['url'] = self.attachment_url_for_id(attachment['id'])
attachment['name'] = unicode(element.find('desc').string)
attachment['type'] = str(element.find('type').string)
-
- review_flag = element.find('flag', attrs={"name" : "review"})
- if review_flag and review_flag['status'] == '+':
- reviewer_email = review_flag['setter']
- reviewer = self.committers.reviewer_by_bugzilla_email(reviewer_email)
- attachment['reviewer'] = reviewer.full_name
-
- commit_queue_flag = element.find('flag', attrs={"name" : "commit-queue"})
- if commit_queue_flag and commit_queue_flag['status'] == '+':
- committer_email = commit_queue_flag['setter']
- committer = self.committers.committer_by_bugzilla_email(committer_email)
- attachment['commit-queue'] = committer.full_name
-
+ self._parse_attachment_flag(element, 'review', attachment, 'reviewer_email')
+ self._parse_attachment_flag(element, 'commit-queue', attachment, 'committer_email')
return attachment
def fetch_attachments_from_bug(self, bug_id):
bug_url = self.bug_url_for_bug_id(bug_id, xml=True)
- log("Fetching: " + bug_url)
+ log("Fetching: %s" % bug_url)
page = urllib2.urlopen(bug_url)
soup = BeautifulSoup(page)
@@ -181,6 +192,12 @@ class Bugzilla:
attachments.append(attachment)
return attachments
+ def fetch_title_from_bug(self, bug_id):
+ bug_url = self.bug_url_for_bug_id(bug_id, xml=True)
+ page = urllib2.urlopen(bug_url)
+ soup = BeautifulSoup(page)
+ return soup.find('short_desc').string
+
def fetch_patches_from_bug(self, bug_id):
patches = []
for attachment in self.fetch_attachments_from_bug(bug_id):
@@ -188,17 +205,45 @@ class Bugzilla:
patches.append(attachment)
return patches
- def fetch_reviewed_patches_from_bug(self, bug_id):
+ # _view_source_link belongs in some sort of webkit_config.py module.
+ def _view_source_link(self, local_path):
+ return "http://trac.webkit.org/browser/trunk/%s" % local_path
+
+ def _validate_setter_email(self, patch, result_key, lookup_function, rejection_function, reject_invalid_patches):
+ setter_email = patch.get(result_key + '_email')
+ if not setter_email:
+ return None
+
+ committer = lookup_function(setter_email)
+ if committer:
+ patch[result_key] = committer.full_name
+ return patch[result_key]
+
+ if reject_invalid_patches:
+ committer_list = "WebKitTools/Scripts/modules/committers.py"
+ failure_message = "%s does not have %s permissions according to %s." % (setter_email, result_key, self._view_source_link(committer_list))
+ rejection_function(patch['id'], failure_message)
+ else:
+ log("Warning, attachment %s on bug %s has invalid %s (%s)", (patch['id'], patch['bug_id'], result_key, setter_email))
+ return None
+
+ def _validate_reviewer(self, patch, reject_invalid_patches):
+ return self._validate_setter_email(patch, 'reviewer', self.committers.reviewer_by_bugzilla_email, self.reject_patch_from_review_queue, reject_invalid_patches)
+
+ def _validate_committer(self, patch, reject_invalid_patches):
+ return self._validate_setter_email(patch, 'committer', self.committers.committer_by_bugzilla_email, self.reject_patch_from_commit_queue, reject_invalid_patches)
+
+ def fetch_reviewed_patches_from_bug(self, bug_id, reject_invalid_patches=False):
reviewed_patches = []
for attachment in self.fetch_attachments_from_bug(bug_id):
- if 'reviewer' in attachment and not attachment['is_obsolete']:
+ if self._validate_reviewer(attachment, reject_invalid_patches) and not attachment['is_obsolete']:
reviewed_patches.append(attachment)
return reviewed_patches
- def fetch_commit_queue_patches_from_bug(self, bug_id):
+ def fetch_commit_queue_patches_from_bug(self, bug_id, reject_invalid_patches=False):
commit_queue_patches = []
- for attachment in self.fetch_reviewed_patches_from_bug(bug_id):
- if 'commit-queue' in attachment and not attachment['is_obsolete']:
+ for attachment in self.fetch_reviewed_patches_from_bug(bug_id, reject_invalid_patches):
+ if self._validate_committer(attachment, reject_invalid_patches) and not attachment['is_obsolete']:
commit_queue_patches.append(attachment)
return commit_queue_patches
@@ -216,10 +261,10 @@ class Bugzilla:
return bug_ids
- def fetch_patches_from_commit_queue(self):
+ def fetch_patches_from_commit_queue(self, reject_invalid_patches=False):
patches_to_land = []
for bug_id in self.fetch_bug_ids_from_commit_queue():
- patches = self.fetch_commit_queue_patches_from_bug(bug_id)
+ patches = self.fetch_commit_queue_patches_from_bug(bug_id, reject_invalid_patches)
patches_to_land += patches
return patches_to_land
@@ -245,7 +290,7 @@ class Bugzilla:
# If the resulting page has a title, and it contains the word "invalid" assume it's the login failure page.
if match and re.search("Invalid", match.group(1), re.IGNORECASE):
# FIXME: We could add the ability to try again on failure.
- raise ScriptError("Bugzilla login failed: %s" % match.group(1))
+ raise BugzillaError("Bugzilla login failed: %s" % match.group(1))
self.authenticated = True
@@ -257,7 +302,7 @@ class Bugzilla:
log(comment_text)
return
- self.browser.open(self.bug_server_url + "attachment.cgi?action=enter&bugid=" + bug_id)
+ self.browser.open("%sattachment.cgi?action=enter&bugid=%s" % (self.bug_server_url, bug_id))
self.browser.select_form(name="entryform")
self.browser['description'] = description
self.browser['ispatch'] = ("1",)
@@ -287,7 +332,7 @@ class Bugzilla:
if match:
text_lines = BeautifulSoup(match.group('error_message')).findAll(text=True)
error_message = "\n" + '\n'.join([" " + line.strip() for line in text_lines if line.strip()])
- raise ScriptError("Bug not created: %s" % error_message)
+ raise BugzillaError("Bug not created: %s" % error_message)
def create_bug_with_patch(self, bug_title, bug_description, component, patch_file_object, patch_description, cc, mark_for_review=False):
self.authenticate()
@@ -304,7 +349,8 @@ class Bugzilla:
if not component or component not in component_names:
component = self.prompt_for_component(component_names)
self.browser['component'] = [component]
- self.browser['cc'] = cc
+ if cc:
+ self.browser['cc'] = cc
self.browser['short_desc'] = bug_title
if bug_description:
log(bug_description)
@@ -317,15 +363,23 @@ class Bugzilla:
bug_id = self._check_create_bug_response(response.read())
log("Bug %s created." % bug_id)
- log(self.bug_server_url + "show_bug.cgi?id=" + bug_id)
+ log("%sshow_bug.cgi?id=%s" % (self.bug_server_url, bug_id))
return bug_id
- def clear_attachment_review_flag(self, attachment_id, additional_comment_text=None):
+ def _find_select_element_for_flag(self, flag_name):
+ # FIXME: This will break if we ever re-order attachment flags
+ if flag_name == "review":
+ return self.browser.find_control(type='select', nr=0)
+ if flag_name == "commit-queue":
+ return self.browser.find_control(type='select', nr=1)
+ raise Exception("Don't know how to find flag named \"%s\"" % flag_name)
+
+ def clear_attachment_flags(self, attachment_id, additional_comment_text=None):
self.authenticate()
- comment_text = "Clearing review flag on attachment: %s" % attachment_id
+ comment_text = "Clearing flags on attachment: %s" % attachment_id
if additional_comment_text:
- comment_text += "\n\n" + additional_comment_text
+ comment_text += "\n\n%s" % additional_comment_text
log(comment_text)
if self.dryrun:
@@ -334,9 +388,35 @@ class Bugzilla:
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
self.browser.set_value(comment_text, name='comment', nr=0)
- self.browser.find_control(type='select', nr=0).value = ("X",)
+ self._find_select_element_for_flag('review').value = ("X",)
+ self._find_select_element_for_flag('commit-queue').value = ("X",)
self.browser.submit()
+ # FIXME: We need a way to test this on a live bugzilla instance.
+ def _set_flag_on_attachment(self, attachment_id, flag_name, flag_value, comment_text, additional_comment_text):
+ self.authenticate()
+
+ if additional_comment_text:
+ comment_text += "\n\n%s" % additional_comment_text
+ log(comment_text)
+
+ if self.dryrun:
+ return
+
+ self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
+ self.browser.select_form(nr=1)
+ self.browser.set_value(comment_text, name='comment', nr=0)
+ self._find_select_element_for_flag(flag_name).value = (flag_value,)
+ self.browser.submit()
+
+ def reject_patch_from_commit_queue(self, attachment_id, additional_comment_text=None):
+ comment_text = "Rejecting patch %s from commit-queue." % attachment_id
+ self._set_flag_on_attachment(attachment_id, 'commit-queue', '-', comment_text, additional_comment_text)
+
+ def reject_patch_from_review_queue(self, attachment_id, additional_comment_text=None):
+ comment_text = "Rejecting patch %s from review queue." % attachment_id
+ self._set_flag_on_attachment(attachment_id, 'review', '-', comment_text, additional_comment_text)
+
def obsolete_attachment(self, attachment_id, comment_text = None):
self.authenticate()
@@ -349,7 +429,8 @@ class Bugzilla:
self.browser.select_form(nr=1)
self.browser.find_control('isobsolete').items[0].selected = True
# Also clear any review flag (to remove it from review/commit queues)
- self.browser.find_control(type='select', nr=0).value = ("X",)
+ self._find_select_element_for_flag('review').value = ("X",)
+ self._find_select_element_for_flag('commit-queue').value = ("X",)
if comment_text:
log(comment_text)
# Bugzilla has two textareas named 'comment', one is somehow hidden. We want the first.
@@ -385,3 +466,17 @@ class Bugzilla:
self.browser['bug_status'] = ['RESOLVED']
self.browser['resolution'] = ['FIXED']
self.browser.submit()
+
+ def reopen_bug(self, bug_id, comment_text):
+ self.authenticate()
+
+ log("Re-opening bug %s" % bug_id)
+ log(comment_text) # Bugzilla requires a comment when re-opening a bug, so we know it will never be None.
+ if self.dryrun:
+ return
+
+ self.browser.open(self.bug_url_for_bug_id(bug_id))
+ self.browser.select_form(name="changeform")
+ self.browser['bug_status'] = ['REOPENED']
+ self.browser['comment'] = comment_text
+ self.browser.submit()
diff --git a/WebKitTools/Scripts/modules/bugzilla_unittest.py b/WebKitTools/Scripts/modules/bugzilla_unittest.py
index 1e52140..f08031e 100644
--- a/WebKitTools/Scripts/modules/bugzilla_unittest.py
+++ b/WebKitTools/Scripts/modules/bugzilla_unittest.py
@@ -68,21 +68,21 @@ class BugzillaTest(unittest.TestCase):
'url' : "https://bugs.webkit.org/attachment.cgi?id=33721",
'name' : "Fixed whitespace issue",
'type' : "text/plain",
- 'reviewer' : 'Test One',
- 'commit-queue' : 'Test Two'
+ 'reviewer_email' : 'one@test.com',
+ 'committer_email' : 'two@test.com'
}
def test_attachment_parsing(self):
- reviewer = Reviewer('Test One', 'one@test.com')
- committer = Committer('Test Two', 'two@test.com')
- committer_list = CommitterList(committers=[committer], reviewers=[reviewer])
- bugzilla = Bugzilla(committers=committer_list)
+ bugzilla = Bugzilla()
soup = BeautifulSoup(self._example_attachment)
attachment_element = soup.find("attachment")
attachment = bugzilla._parse_attachment_element(attachment_element, self._expected_example_attachment_parsing['bug_id'])
self.assertTrue(attachment)
+ # Make sure we aren't parsing more or less than we expect
+ self.assertEquals(attachment.keys(), self._expected_example_attachment_parsing.keys())
+
for key, expected_value in self._expected_example_attachment_parsing.items():
self.assertEquals(attachment[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, attachment[key], expected_value)))
diff --git a/WebKitTools/Scripts/modules/buildbot.py b/WebKitTools/Scripts/modules/buildbot.py
new file mode 100644
index 0000000..4478429
--- /dev/null
+++ b/WebKitTools/Scripts/modules/buildbot.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for interacting with WebKit's buildbot
+
+import re
+import urllib2
+
+# Import WebKit-specific modules.
+from modules.logging import log
+
+# WebKit includes a built copy of BeautifulSoup in Scripts/modules
+# so this import should always succeed.
+from .BeautifulSoup import BeautifulSoup
+
+class BuildBot:
+ def __init__(self, host="build.webkit.org"):
+ self.buildbot_host = host
+ self.buildbot_server_url = "http://%s/" % self.buildbot_host
+
+ # If any of the Leopard build/test bots or the Windows builders are red we should not be landing patches.
+ # Other builders should be added to this list once they're known to be stable.
+ self.core_builder_names_regexps = [ 'Leopard', "Windows.*Build" ]
+
+ # If WebKit's buildbot has an XMLRPC interface we could use, we could do something more sophisticated here.
+ # For now we just parse out the basics, enough to support basic questions like "is the tree green?"
+ def _parse_builder_status_from_row(self, status_row):
+ status_cells = status_row.findAll('td')
+ builder = {}
+
+ name_link = status_cells[0].find('a')
+ builder['name'] = name_link.string
+ # We could generate the builder_url from the name in a future version of this code.
+ builder['builder_url'] = self.buildbot_server_url + name_link['href']
+
+ status_link = status_cells[1].find('a')
+ if not status_link:
+ # We failed to find a link in the first cell, just give up.
+ # This can happen if a builder is just-added, the first cell will just be "no build"
+ builder['is_green'] = False # Other parts of the code depend on is_green being present.
+ return builder
+ revision_string = status_link.string # Will be either a revision number or a build number
+ # If revision_string has non-digits assume it's not a revision number.
+ builder['built_revision'] = int(revision_string) if not re.match('\D', revision_string) else None
+ builder['is_green'] = not re.search('fail', status_cells[1].renderContents())
+ # We could parse out the build number instead, but for now just store the URL.
+ builder['build_url'] = self.buildbot_server_url + status_link['href']
+
+ # We could parse out the current activity too.
+
+ return builder
+
+ def _builder_statuses_with_names_matching_regexps(self, builder_statuses, name_regexps):
+ builders = []
+ for builder in builder_statuses:
+ for name_regexp in name_regexps:
+ if re.match(name_regexp, builder['name']):
+ builders.append(builder)
+ return builders
+
+ def core_builders_are_green(self):
+ for builder in self._builder_statuses_with_names_matching_regexps(self.builder_statuses(), self.core_builder_names_regexps):
+ if not builder['is_green']:
+ return False
+ return True
+
+ def builder_statuses(self):
+ build_status_url = self.buildbot_server_url + 'one_box_per_builder'
+ page = urllib2.urlopen(build_status_url)
+ soup = BeautifulSoup(page)
+
+ builders = []
+ status_table = soup.find('table')
+ for status_row in status_table.findAll('tr'):
+ builder = self._parse_builder_status_from_row(status_row)
+ builders.append(builder)
+ return builders
diff --git a/WebKitTools/Scripts/modules/buildbot_unittest.py b/WebKitTools/Scripts/modules/buildbot_unittest.py
new file mode 100644
index 0000000..461e5a2
--- /dev/null
+++ b/WebKitTools/Scripts/modules/buildbot_unittest.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from modules.buildbot import BuildBot
+
+from modules.BeautifulSoup import BeautifulSoup
+
+class BuildBotTest(unittest.TestCase):
+
+ _example_one_box_status = '''
+ <table>
+ <tr>
+ <td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
+ <td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
+ <td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
+ <tr>
+ <td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
+ <td class="LastBuild box" >no build</td>
+ <td align="center" class="Activity building">building<br />< 1 min</td>
+ <tr>
+ <td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
+ <td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
+ <td align="center" class="Activity idle">idle</td>
+ </table>
+'''
+ _expected_example_one_box_parsings = [
+ {
+ 'builder_url': u'http://build.webkit.org/builders/Windows%20Debug%20%28Tests%29',
+ 'build_url': u'http://build.webkit.org/builders/Windows%20Debug%20%28Tests%29/builds/3693',
+ 'is_green': True,
+ 'name': u'Windows Debug (Tests)',
+ 'built_revision': 47380
+ },
+ {
+ 'builder_url': u'http://build.webkit.org/builders/SnowLeopard%20Intel%20Release',
+ 'is_green': False,
+ 'name': u'SnowLeopard Intel Release',
+ },
+ {
+ 'builder_url': u'http://build.webkit.org/builders/Qt%20Linux%20Release',
+ 'build_url': u'http://build.webkit.org/builders/Qt%20Linux%20Release/builds/654',
+ 'is_green': False,
+ 'name': u'Qt Linux Release',
+ 'built_revision': 47383
+ },
+ ]
+
+ def test_status_parsing(self):
+ buildbot = BuildBot()
+
+ soup = BeautifulSoup(self._example_one_box_status)
+ status_table = soup.find("table")
+ input_rows = status_table.findAll('tr')
+
+ for x in range(len(input_rows)):
+ status_row = input_rows[x]
+ expected_parsing = self._expected_example_one_box_parsings[x]
+
+ builder = buildbot._parse_builder_status_from_row(status_row)
+
+ # Make sure we aren't parsing more or less than we expect
+ self.assertEquals(builder.keys(), expected_parsing.keys())
+
+ for key, expected_value in expected_parsing.items():
+ self.assertEquals(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
+
+ def test_builder_name_regexps(self):
+ buildbot = BuildBot()
+
+ example_builders = [
+ { 'name': u'Leopard Debug (Build)', },
+ { 'name': u'Leopard Debug (Tests)', },
+ { 'name': u'Windows Release (Build)', },
+ { 'name': u'Windows Debug (Tests)', },
+ { 'name': u'Qt Linux Release', },
+ ]
+ name_regexps = [ 'Leopard', "Windows.*Build" ]
+ expected_builders = [
+ { 'name': u'Leopard Debug (Build)', },
+ { 'name': u'Leopard Debug (Tests)', },
+ { 'name': u'Windows Release (Build)', },
+ ]
+
+ # This test should probably be updated if the default regexp list changes
+ self.assertEquals(buildbot.core_builder_names_regexps, name_regexps)
+
+ builders = buildbot._builder_statuses_with_names_matching_regexps(example_builders, name_regexps)
+ self.assertEquals(builders, expected_builders)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/modules/changelogs.py b/WebKitTools/Scripts/modules/changelogs.py
new file mode 100644
index 0000000..a407d23
--- /dev/null
+++ b/WebKitTools/Scripts/modules/changelogs.py
@@ -0,0 +1,92 @@
+# Copyright (C) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for parsing and modifying ChangeLog files
+
+import fileinput # inplace file editing for set_reviewer_in_changelog
+import re
+
+# FIMXE: This doesn't really belong in this file, but we don't have a better home for it yet.
+# Maybe eventually a webkit_config.py?
+def view_source_url(revision_number):
+ return "http://trac.webkit.org/changeset/%s" % revision_number
+
+
+class ChangeLog:
+ def __init__(self, path):
+ self.path = path
+
+ # e.g. 2009-06-03 Eric Seidel <eric@webkit.org>
+ date_line_regexp = re.compile('^(\d{4}-\d{2}-\d{2})' # Consume the date.
+ + '\s+(.+)\s+' # Consume the name.
+ + '<([^<>]+)>$') # And finally the email address.
+
+ @staticmethod
+ def _parse_latest_entry_from_file(changelog_file):
+ entry_lines = []
+ # The first line should be a date line.
+ first_line = changelog_file.readline()
+ if not ChangeLog.date_line_regexp.match(first_line):
+ return None
+ entry_lines.append(first_line)
+
+ for line in changelog_file:
+ # If we've hit the next entry, return.
+ if ChangeLog.date_line_regexp.match(line):
+ return ''.join(entry_lines[:-1]) # Remove the extra newline at the end
+ entry_lines.append(line)
+ return None # We never found a date line!
+
+ def latest_entry(self):
+ changelog_file = open(self.path)
+ try:
+ return self._parse_latest_entry_from_file(changelog_file)
+ finally:
+ changelog_file.close()
+
+ def update_for_revert(self, revision):
+ reviewed_by_regexp = re.compile('Reviewed by NOBODY \(OOPS!\)\.')
+ removing_boilerplate = False
+ # inplace=1 creates a backup file and re-directs stdout to the file
+ for line in fileinput.FileInput(self.path, inplace=1):
+ if reviewed_by_regexp.search(line):
+ print reviewed_by_regexp.sub("No review, rolling out r%s." % revision, line),
+ print " %s\n" % view_source_url(revision)
+ # Remove all the ChangeLog boilerplate between the Reviewed by line and the first changed file.
+ removing_boilerplate = True
+ elif removing_boilerplate:
+ if line.find('*') >= 0 : # each changed file is preceded by a *
+ removing_boilerplate = False
+
+ if not removing_boilerplate:
+ print line,
+
+ def set_reviewer(self, reviewer):
+ # inplace=1 creates a backup file and re-directs stdout to the file
+ for line in fileinput.FileInput(self.path, inplace=1):
+ print line.replace("NOBODY (OOPS!)", reviewer.encode("utf-8")), # Trailing comma suppresses printing newline
diff --git a/WebKitTools/Scripts/modules/changelogs_unittest.py b/WebKitTools/Scripts/modules/changelogs_unittest.py
new file mode 100644
index 0000000..dd14cb7
--- /dev/null
+++ b/WebKitTools/Scripts/modules/changelogs_unittest.py
@@ -0,0 +1,145 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+from changelogs import *
+
+import os
+import tempfile
+from StringIO import StringIO
+
+class ChangeLogsTest(unittest.TestCase):
+
+ _example_entry = '''2009-08-17 Peter Kasting <pkasting@google.com>
+
+ Reviewed by Steve Falkenburg.
+
+ https://bugs.webkit.org/show_bug.cgi?id=27323
+ Only add Cygwin to the path when it isn't already there. This avoids
+ causing problems for people who purposefully have non-Cygwin versions of
+ executables like svn in front of the Cygwin ones in their paths.
+
+ * DumpRenderTree/win/DumpRenderTree.vcproj:
+ * DumpRenderTree/win/ImageDiff.vcproj:
+ * DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj:
+'''
+
+ # More example text than we need. Eventually we need to support parsing this all and write tests for the parsing.
+ _example_changelog = '''2009-08-17 David Kilzer <ddkilzer@apple.com>
+
+ <http://webkit.org/b/28393> check-webkit-style: add check for use of std::max()/std::min() instead of MAX()/MIN()
+
+ Reviewed by David Levin.
+
+ * Scripts/modules/cpp_style.py:
+ (_ERROR_CATEGORIES): Added 'runtime/max_min_macros'.
+ (check_max_min_macros): Added. Returns level 4 error when MAX()
+ and MIN() macros are used in header files and C++ source files.
+ (check_style): Added call to check_max_min_macros().
+ * Scripts/modules/cpp_style_unittest.py: Added unit tests.
+ (test_max_macro): Added.
+ (test_min_macro): Added.
+
+2009-08-16 David Kilzer <ddkilzer@apple.com>
+
+ Backed out r47343 which was mistakenly committed
+
+ * Scripts/bugzilla-tool:
+ * Scripts/modules/scm.py:
+
+2009-06-18 Darin Adler <darin@apple.com>
+
+ Rubber stamped by Mark Rowe.
+
+ * DumpRenderTree/mac/DumpRenderTreeWindow.mm:
+ (-[DumpRenderTreeWindow close]): Resolved crashes seen during regression
+ tests. The close method can be called on a window that's already closed
+ so we can't assert here.
+
+== Rolled over to ChangeLog-2009-06-16 ==
+'''
+
+ def test_latest_entry_parse(self):
+ changelog_contents = "%s\n%s" % (self._example_entry, self._example_changelog)
+ changelog_file = StringIO(changelog_contents)
+ latest_entry = ChangeLog._parse_latest_entry_from_file(changelog_file)
+ self.assertEquals(self._example_entry, latest_entry)
+
+ @staticmethod
+ def _write_tmp_file_with_contents(contents):
+ (file_descriptor, file_path) = tempfile.mkstemp() # NamedTemporaryFile always deletes the file on close in python < 2.6
+ file = os.fdopen(file_descriptor, 'w')
+ file.write(contents)
+ file.close()
+ return file_path
+
+ @staticmethod
+ def _read_file_contents(file_path):
+ file = open(file_path)
+ contents = file.read()
+ file.close()
+ return contents
+
+ _new_entry_boilerplate = '''2009-08-19 Eric Seidel <eric@webkit.org>
+
+ Reviewed by NOBODY (OOPS!).
+
+ Need a short description and bug URL (OOPS!)
+
+ * Scripts/bugzilla-tool:
+'''
+
+ def test_set_reviewer(self):
+ changelog_contents = "%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
+ changelog_path = self._write_tmp_file_with_contents(changelog_contents)
+ reviewer_name = 'Test Reviewer'
+ ChangeLog(changelog_path).set_reviewer(reviewer_name)
+ actual_contents = self._read_file_contents(changelog_path)
+ expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
+ os.remove(changelog_path)
+ self.assertEquals(actual_contents, expected_contents)
+
+ _expected_revert_entry = '''2009-08-19 Eric Seidel <eric@webkit.org>
+
+ No review, rolling out r12345.
+ http://trac.webkit.org/changeset/12345
+
+ * Scripts/bugzilla-tool:
+'''
+
+ def test_update_for_revert(self):
+ changelog_contents = "%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
+ changelog_path = self._write_tmp_file_with_contents(changelog_contents)
+ changelog = ChangeLog(changelog_path)
+ changelog.update_for_revert(12345)
+ actual_entry = changelog.latest_entry()
+ os.remove(changelog_path)
+ self.assertEquals(actual_entry, self._expected_revert_entry)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/modules/comments.py b/WebKitTools/Scripts/modules/comments.py
new file mode 100755
index 0000000..eeee655
--- /dev/null
+++ b/WebKitTools/Scripts/modules/comments.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# A tool for automating dealing with bugzilla, posting patches, committing patches, etc.
+
+from modules.changelogs import view_source_url
+
+def bug_comment_from_svn_revision(svn_revision):
+ return "Committed r%s: <%s>" % (svn_revision, view_source_url(svn_revision))
+
+def bug_comment_from_commit_text(scm, commit_text):
+ svn_revision = scm.svn_revision_from_commit_text(commit_text)
+ return bug_comment_from_svn_revision(svn_revision)
diff --git a/WebKitTools/Scripts/modules/committers.py b/WebKitTools/Scripts/modules/committers.py
index 6a7f436..e157fb4 100644
--- a/WebKitTools/Scripts/modules/committers.py
+++ b/WebKitTools/Scripts/modules/committers.py
@@ -45,13 +45,43 @@ class Reviewer(Committer):
# All reviewers are committers, so this list is only of committers
# who are not reviewers.
committers_unable_to_review = [
+ Committer("Aaron Boodman", "aa@chromium.org"),
+ Committer("Adam Langley", "agl@chromium.org"),
Committer("Albert J. Wong", "ajwong@chromium.org"),
+ Committer("Antonio Gomes", "tonikitoo@webkit.org"),
+ Committer("Anthony Ricaud", "rik@webkit.org"),
Committer("Ben Murdoch", "benm@google.com"),
+ Committer("Brent Fulgham", "bfulgham@webkit.org"),
+ Committer("Brian Weinstein", "bweinstein@apple.com"),
+ Committer("Cameron McCormack", "cam@webkit.org"),
+ Committer("Daniel Bates", "dbates@webkit.org"),
+ Committer("Drew Wilson", "atwilson@chromium.org"),
+ Committer("Dirk Schulze", "krit@webkit.org"),
+ Committer("Dmitry Titov", "dimich@chromium.org"),
+ Committer("Eli Fidler", "eli@staikos.net"),
+ Committer("Eric Roman", "eroman@chromium.org"),
+ Committer("Fumitoshi Ukai", "ukai@chromium.org"),
+ Committer("Greg Bolsinga", "bolsinga@apple.com"),
+ Committer("Jeremy Moskovich", "playmobil@google.com"),
Committer("Jeremy Orlow", "jorlow@chromium.org"),
+ Committer("Jian Li", "jianli@chromium.org"),
+ Committer("John Abd-El-Malek", "jam@chromium.org"),
+ Committer("Joseph Pecoraro", "joepeck@webkit.org"),
+ Committer("Julie Parent", "jparent@google.com"),
+ Committer("Kenneth Rohde Christiansen", "kenneth@webkit.org"),
+ Committer("Laszlo Gombos", "laszlo.1.gombos@nokia.com"),
+ Committer("Nate Chapin", "japhet@chromium.org"),
+ Committer("Ojan Vafai", "ojan@chromium.org"),
+ Committer("Pam Greene", "pam@chromium.org"),
Committer("Peter Kasting", "pkasting@google.com"),
Committer("Pierre d'Herbemont", "pdherbemont@free.fr"),
- Committer("Shinichiro Hamaji", "hamaji@google.com"),
- Committer("Zoltan Horvath", "hzoltan@inf.u-szeged.hu"),
+ Committer("Ryosuke Niwa", "rniwa@webkit.org"),
+ Committer("Scott Violet", "sky@chromium.org"),
+ Committer("Shinichiro Hamaji", "hamaji@chromium.org"),
+ Committer("Tony Chang", "tony@chromium.org"),
+ Committer("Yael Aharon", "yael.aharon@nokia.com"),
+ Committer("Yong Li", "yong.li@torchmobile.com"),
+ Committer("Zoltan Horvath", "zoltan@webkit.org"),
]
reviewers_list = [
@@ -71,12 +101,15 @@ reviewers_list = [
Reviewer("David Kilzer", "ddkilzer@webkit.org"),
Reviewer("David Levin", "levin@chromium.org"),
Reviewer("Dimitri Glazkov", "dglazkov@chromium.org"),
+ Reviewer("Eric Carlson", "eric.carlson@apple.com"),
Reviewer("Eric Seidel", "eric@webkit.org"),
Reviewer("Gavin Barraclough", "barraclough@apple.com"),
+ Reviewer("Geoffrey Garen", "ggaren@apple.com"),
Reviewer("George Staikos", "staikos@kde.org"),
Reviewer("Gustavo Noronha", "gns@gnome.org"),
Reviewer("Holger Freyther", "zecke@selfish.org"),
Reviewer("Jan Alonzo", "jmalonzo@gmail.com"),
+ Reviewer("John Sullivan", "sullivan@apple.com"),
Reviewer("Justin Garcia", "justin.garcia@apple.com"),
Reviewer("Kevin McCullough", "kmccullough@apple.com"),
Reviewer("Kevin Ollivier", "kevino@theolliviers.com"),
@@ -111,13 +144,10 @@ class CommitterList:
return self._committers_by_email
def committer_by_bugzilla_email(self, bugzilla_email):
- committer = self._email_to_committer_map().get(bugzilla_email)
- if not committer:
- raise Exception("Unknown committer: %s" % bugzilla_email)
- return committer
+ return self._email_to_committer_map().get(bugzilla_email)
def reviewer_by_bugzilla_email(self, bugzilla_email):
committer = self.committer_by_bugzilla_email(bugzilla_email)
- if not committer.can_review:
- raise Exception("Committer %s does not have review rights." % committer)
+ if committer and not committer.can_review:
+ return None
return committer
diff --git a/WebKitTools/Scripts/modules/commiters_unittest.py b/WebKitTools/Scripts/modules/committers_unittest.py
index d221c8b..045e20e 100644
--- a/WebKitTools/Scripts/modules/commiters_unittest.py
+++ b/WebKitTools/Scripts/modules/committers_unittest.py
@@ -42,11 +42,11 @@ class CommittersTest(unittest.TestCase):
self.assertEqual(committer_list.committer_by_bugzilla_email('two@test.com'), reviewer)
# Test that a known committer is not returned during reviewer lookup
- self.assertRaises(Exception, committer_list.reviewer_by_bugzilla_email, 'one@test.com')
+ self.assertEqual(committer_list.reviewer_by_bugzilla_email('one@test.com'), None)
# Test that unknown email address fail both committer and reviewer lookup
- self.assertRaises(Exception, committer_list.committer_by_bugzilla_email, 'bar@bar.com')
- self.assertRaises(Exception, committer_list.reviewer_by_bugzilla_email, 'bar@bar.com')
+ self.assertEqual(committer_list.committer_by_bugzilla_email('bar@bar.com'), None)
+ self.assertEqual(committer_list.reviewer_by_bugzilla_email('bar@bar.com'), None)
if __name__ == '__main__':
unittest.main()
diff --git a/WebKitTools/Scripts/modules/cpp_style.py b/WebKitTools/Scripts/modules/cpp_style.py
index 86c0401..0c9dfa0 100644
--- a/WebKitTools/Scripts/modules/cpp_style.py
+++ b/WebKitTools/Scripts/modules/cpp_style.py
@@ -3,6 +3,7 @@
#
# Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -117,6 +118,7 @@ _ERROR_CATEGORIES = '''\
build/namespaces
build/printf_format
build/storage_class
+ build/using_std
legal/copyright
readability/braces
readability/casting
@@ -138,6 +140,7 @@ _ERROR_CATEGORIES = '''\
runtime/int
runtime/init
runtime/invalid_increment
+ runtime/max_min_macros
runtime/memset
runtime/printf
runtime/printf_format
@@ -1545,7 +1548,7 @@ def check_spacing(filename, clean_lines, line_number, error):
# Alas, we can't test < or > because they're legitimately used sans spaces
# (a->b, vector<int> a). The only time we can tell is a < with no >, and
# only if it's not template params list spilling into the next line.
- matched = search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
+ matched = search(r'[^<>=!\s](==|!=|\+=|-=|\*=|/=|/|\|=|&=|<<=|>>=|<=|>=|\|\||\||&&|>>|<<)[^<>=!\s]', line)
if not matched:
# Note that while it seems that the '<[^<]*' term in the following
# regexp could be simplified to '<.*', which would indeed match
@@ -1558,7 +1561,7 @@ def check_spacing(filename, clean_lines, line_number, error):
'Missing spaces around %s' % matched.group(1))
# We allow no-spaces around << and >> when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
- matched = search(r'[^0-9\s](<<|>>)[^0-9\s]', line)
+ matched = search(r'[^0-9\s](<<|>>)[^0-9\s=]', line)
if matched:
error(filename, line_number, 'whitespace/operators', 3,
'Missing spaces around %s' % matched.group(1))
@@ -1741,6 +1744,58 @@ def check_namespace_indentation(filename, clean_lines, line_number, file_extensi
break
+def check_using_std(filename, clean_lines, line_number, error):
+ """Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # This check doesn't apply to C or Objective-C implementation files.
+ if filename.endswith('.c') or filename.endswith('.m'):
+ return
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ using_std_match = match(r'\s*using\s+std::(?P<method_name>\S+)\s*;\s*$', line)
+ if not using_std_match:
+ return
+
+ method_name = using_std_match.group('method_name')
+ error(filename, line_number, 'build/using_std', 4,
+ "Use 'using namespace std;' instead of 'using std::%s;'." % method_name)
+
+
+def check_max_min_macros(filename, clean_lines, line_number, error):
+ """Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min().
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # This check doesn't apply to C or Objective-C implementation files.
+ if filename.endswith('.c') or filename.endswith('.m'):
+ return
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ max_min_macros_search = search(r'\b(?P<max_min_macro>(MAX|MIN))\s*\(', line)
+ if not max_min_macros_search:
+ return
+
+ max_min_macro = max_min_macros_search.group('max_min_macro')
+ max_min_macro_lower = max_min_macro.lower()
+ error(filename, line_number, 'runtime/max_min_macros', 4,
+ 'Use std::%s() or std::%s<type>() instead of the %s() macro.'
+ % (max_min_macro_lower, max_min_macro_lower, max_min_macro))
+
+
def check_switch_indentation(filename, clean_lines, line_number, error):
"""Looks for indentation errors inside of switch statements.
@@ -2174,6 +2229,8 @@ def check_style(filename, clean_lines, line_number, file_extension, error):
# Some more style checks
check_namespace_indentation(filename, clean_lines, line_number, file_extension, error)
+ check_using_std(filename, clean_lines, line_number, error)
+ check_max_min_macros(filename, clean_lines, line_number, error)
check_switch_indentation(filename, clean_lines, line_number, error)
check_braces(filename, clean_lines, line_number, error)
check_exit_statement_simplifications(filename, clean_lines, line_number, error)
@@ -3087,6 +3144,7 @@ def use_webkit_styles():
# modify the implementation and enable them.
global _DEFAULT_FILTERS
_DEFAULT_FILTERS = [
+ '-whitespace/end_of_line',
'-whitespace/comments',
'-whitespace/blank_line',
'-runtime/explicit', # explicit
diff --git a/WebKitTools/Scripts/modules/cpp_style_unittest.py b/WebKitTools/Scripts/modules/cpp_style_unittest.py
index ad01fc3..322356e 100644
--- a/WebKitTools/Scripts/modules/cpp_style_unittest.py
+++ b/WebKitTools/Scripts/modules/cpp_style_unittest.py
@@ -3,6 +3,7 @@
#
# Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -950,7 +951,7 @@ class CppStyleTest(CppStyleTestBase):
self.assert_lint('int a[sizeof(struct Foo)];', '')
self.assert_lint('int a[128 - sizeof(const bar)];', '')
self.assert_lint('int a[(sizeof(foo) * 4)];', '')
- self.assert_lint('int a[(arraysize(fixed_size_array)/2) << 1];', '')
+ self.assert_lint('int a[(arraysize(fixed_size_array)/2) << 1];', 'Missing spaces around / [whitespace/operators] [3]')
self.assert_lint('delete a[some_var];', '')
self.assert_lint('return a[some_var];', '')
@@ -1208,6 +1209,62 @@ class CppStyleTest(CppStyleTestBase):
self.assert_lint('typedef hash_map<Foo, Bar', 'Missing spaces around <'
' [whitespace/operators] [3]')
self.assert_lint('typedef hash_map<FoooooType, BaaaaarType,', '')
+ self.assert_lint('a<Foo> t+=b;', 'Missing spaces around +='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo> t-=b;', 'Missing spaces around -='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t*=b;', 'Missing spaces around *='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t/=b;', 'Missing spaces around /='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t|=b;', 'Missing spaces around |='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t&=b;', 'Missing spaces around &='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t<<=b;', 'Missing spaces around <<='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t>>=b;', 'Missing spaces around >>='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t>>=&b|c;', 'Missing spaces around >>='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t<<=*b/c;', 'Missing spaces around <<='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo> t -= b;', '')
+ self.assert_lint('a<Foo> t += b;', '')
+ self.assert_lint('a<Foo*> t *= b;', '')
+ self.assert_lint('a<Foo*> t /= b;', '')
+ self.assert_lint('a<Foo*> t |= b;', '')
+ self.assert_lint('a<Foo*> t &= b;', '')
+ self.assert_lint('a<Foo*> t <<= b;', '')
+ self.assert_lint('a<Foo*> t >>= b;', '')
+ self.assert_lint('a<Foo*> t >>= &b|c;', 'Missing spaces around |'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= *b/c;', 'Missing spaces around /'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b/c; //Test', ['At least two spaces'
+ ' is best between code and comments [whitespace/'
+ 'comments] [2]', 'Should have a space between // '
+ 'and comment [whitespace/comments] [4]', 'Missing'
+ ' spaces around / [whitespace/operators] [3]'])
+ self.assert_lint('a<Foo*> t <<= b||c; //Test', ['Should have a space'
+ ' between // and comment [whitespace/comments] [4]',
+ 'Missing spaces around || [whitespace/operators] [3]'])
+ self.assert_lint('a<Foo*> t <<= b&&c; // Test', 'Missing spaces around'
+ ' && [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b&&&c; // Test', 'Missing spaces around'
+ ' && [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b&&*c; // Test', 'Missing spaces around'
+ ' && [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b && *c; // Test', '')
+ self.assert_lint('a<Foo*> t <<= b && &c; // Test', '')
+ self.assert_lint('a<Foo*> t <<= b || &c; /*Test', 'Complex multi-line '
+ '/*...*/-style comment found. Lint may give bogus '
+ 'warnings. Consider replacing these with //-style'
+ ' comments, with #if 0...#endif, or with more clearly'
+ ' structured multi-line comments. [readability/multiline_comment] [5]')
+ self.assert_lint('a<Foo&> t <<= &b | &c;', '')
+ self.assert_lint('a<Foo*> t <<= &b & &c; // Test', '')
+ self.assert_lint('a<Foo*> t <<= *b / &c; // Test', '')
def test_spacing_before_last_semicolon(self):
self.assert_lint('call_function() ;',
@@ -2959,7 +3016,7 @@ class WebKitStyleTest(CppStyleTestBase):
'Missing space after , [whitespace/comma] [3]')
self.assert_multi_line_lint(
'c = a|b;',
- '')
+ 'Missing spaces around | [whitespace/operators] [3]')
# FIXME: We cannot catch this lint error.
# self.assert_multi_line_lint(
# 'return condition ? 1:0;',
@@ -3425,6 +3482,49 @@ class WebKitStyleTest(CppStyleTestBase):
'if (othertrue == fontType)',
'')
+ def test_using_std(self):
+ self.assert_lint(
+ 'using std::min;',
+ "Use 'using namespace std;' instead of 'using std::min;'."
+ " [build/using_std] [4]",
+ 'foo.cpp')
+
+ def test_max_macro(self):
+ self.assert_lint(
+ 'int i = MAX(0, 1);',
+ '',
+ 'foo.c')
+
+ self.assert_lint(
+ 'int i = MAX(0, 1);',
+ 'Use std::max() or std::max<type>() instead of the MAX() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.cpp')
+
+ self.assert_lint(
+ 'inline int foo() { return MAX(0, 1); }',
+ 'Use std::max() or std::max<type>() instead of the MAX() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.h')
+
+ def test_min_macro(self):
+ self.assert_lint(
+ 'int i = MIN(0, 1);',
+ '',
+ 'foo.c')
+
+ self.assert_lint(
+ 'int i = MIN(0, 1);',
+ 'Use std::min() or std::min<type>() instead of the MIN() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.cpp')
+
+ self.assert_lint(
+ 'inline int foo() { return MIN(0, 1); }',
+ 'Use std::min() or std::min<type>() instead of the MIN() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.h')
+
def test_names(self):
# FIXME: Implement this.
pass
diff --git a/WebKitTools/Scripts/modules/logging.py b/WebKitTools/Scripts/modules/logging.py
index ea03a48..cbccacf 100644
--- a/WebKitTools/Scripts/modules/logging.py
+++ b/WebKitTools/Scripts/modules/logging.py
@@ -35,5 +35,14 @@ def log(string):
print >> sys.stderr, string
def error(string):
- log("ERROR: " + string)
+ log("ERROR: %s" % string)
exit(1)
+
+# Simple class to split output between multiple destinations
+class tee:
+ def __init__(self, *files):
+ self.files = files
+
+ def write(self, string):
+ for file in self.files:
+ file.write(string)
diff --git a/WebKitTools/Scripts/modules/logging_unittest.py b/WebKitTools/Scripts/modules/logging_unittest.py
new file mode 100644
index 0000000..7d41e56
--- /dev/null
+++ b/WebKitTools/Scripts/modules/logging_unittest.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import subprocess
+import StringIO
+import tempfile
+import unittest
+
+from modules.logging import *
+from modules.scm import ScriptError
+
+class LoggingTest(unittest.TestCase):
+
+ def assert_log_equals(self, log_input, expected_output):
+ original_stderr = sys.stderr
+ test_stderr = StringIO.StringIO()
+ sys.stderr = test_stderr
+
+ try:
+ log(log_input)
+ actual_output = test_stderr.getvalue()
+ finally:
+ original_stderr = original_stderr
+
+ self.assertEquals(actual_output, expected_output, "log(\"%s\") expected: %s actual: %s" % (log_input, expected_output, actual_output))
+
+ def test_log(self):
+ self.assert_log_equals("test", "test\n")
+
+ # Test that log() does not throw an exception when passed an object instead of a string.
+ self.assert_log_equals(ScriptError(message="ScriptError"), "ScriptError\n")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/modules/scm.py b/WebKitTools/Scripts/modules/scm.py
index ec1f362..3daecbc 100644
--- a/WebKitTools/Scripts/modules/scm.py
+++ b/WebKitTools/Scripts/modules/scm.py
@@ -78,8 +78,43 @@ class CommitMessage:
class ScriptError(Exception):
- pass
+ def __init__(self, message=None, script_args=None, exit_code=None, output=None, cwd=None):
+ if not message:
+ message = 'Failed to run "%s"' % script_args
+ if exit_code:
+ message += " exit_code: %d" % exit_code
+ if cwd:
+ message += " cwd: %s" % cwd
+
+ Exception.__init__(self, message)
+ self.script_args = script_args # 'args' is already used by Exception
+ self.exit_code = exit_code
+ self.output = output
+ self.cwd = cwd
+
+ def message_with_output(self, output_limit=500):
+ if self.output:
+ if len(self.output) > output_limit:
+ return "%s\nLast %s characters of output:\n%s" % (self, output_limit, self.output[-output_limit:])
+ return "%s\n%s" % (self, self.output)
+ return str(self)
+
+class CheckoutNeedsUpdate(ScriptError):
+ def __init__(self, script_args, exit_code, output, cwd):
+ ScriptError.__init__(self, script_args=script_args, exit_code=exit_code, output=output, cwd=cwd)
+
+
+def default_error_handler(error):
+ raise error
+
+def commit_error_handler(error):
+ if re.search("resource out of date", error.output):
+ raise CheckoutNeedsUpdate(script_args=error.script_args, exit_code=error.exit_code, output=error.output, cwd=error.cwd)
+ default_error_handler(error)
+
+def ignore_error(error):
+ pass
class SCM:
def __init__(self, cwd, dryrun=False):
@@ -88,24 +123,28 @@ class SCM:
self.dryrun = dryrun
@staticmethod
- def run_command(args, cwd=None, input=None, raise_on_failure=True, return_exit_code=False):
+ def run_command(args, cwd=None, input=None, error_handler=default_error_handler, return_exit_code=False):
stdin = subprocess.PIPE if input else None
- process = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=stdin, cwd=cwd)
+ process = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
output = process.communicate(input)[0].rstrip()
exit_code = process.wait()
- if raise_on_failure and exit_code:
- raise ScriptError('Failed to run "%s" exit_code: %d cwd: %s' % (args, exit_code, cwd))
+ if exit_code:
+ script_error = ScriptError(script_args=args, exit_code=exit_code, output=output, cwd=cwd)
+ error_handler(script_error)
if return_exit_code:
return exit_code
return output
+ def scripts_directory(self):
+ return os.path.join(self.checkout_root, "WebKitTools", "Scripts")
+
def script_path(self, script_name):
- return os.path.join(self.checkout_root, "WebKitTools", "Scripts", script_name)
+ return os.path.join(self.scripts_directory(), script_name)
def ensure_clean_working_directory(self, force):
if not force and not self.working_directory_is_clean():
- print self.run_command(self.status_command(), raise_on_failure=False)
- raise ScriptError("Working directory has modifications, pass --force-clean or --no-clean to continue.")
+ print self.run_command(self.status_command(), error_handler=ignore_error)
+ raise ScriptError(message="Working directory has modifications, pass --force-clean or --no-clean to continue.")
log("Cleaning working directory")
self.clean_working_directory()
@@ -123,7 +162,7 @@ class SCM:
def apply_patch(self, patch, force=False):
# It's possible that the patch was not made from the root directory.
# We should detect and handle that case.
- curl_process = subprocess.Popen(['curl', patch['url']], stdout=subprocess.PIPE)
+ curl_process = subprocess.Popen(['curl', '--location', '--silent', '--show-error', patch['url']], stdout=subprocess.PIPE)
args = [self.script_path('svn-apply'), '--reviewer', patch['reviewer']]
if force:
args.append('--force')
@@ -131,7 +170,7 @@ class SCM:
return_code = patch_apply_process.wait()
if return_code:
- raise ScriptError("Patch %s from bug %s failed to download and apply." % (patch['url'], patch['bug_id']))
+ raise ScriptError(message="Patch %s from bug %s failed to download and apply." % (patch['url'], patch['bug_id']))
def run_status_and_extract_filenames(self, status_command, status_regexp):
filenames = []
@@ -144,6 +183,25 @@ class SCM:
filenames.append(filename)
return filenames
+ def strip_r_from_svn_revision(self, svn_revision):
+ match = re.match("^r(?P<svn_revision>\d+)", svn_revision)
+ if (match):
+ return match.group('svn_revision')
+ return svn_revision
+
+ def svn_revision_from_commit_text(self, commit_text):
+ match = re.search(self.commit_success_regexp(), commit_text, re.MULTILINE)
+ return match.group('svn_revision')
+
+ # ChangeLog-specific code doesn't really belong in scm.py, but this function is very useful.
+ def modified_changelogs(self):
+ changelog_paths = []
+ paths = self.changed_files()
+ for path in paths:
+ if os.path.basename(path) == "ChangeLog":
+ changelog_paths.append(path)
+ return changelog_paths
+
@staticmethod
def in_working_directory(path):
raise NotImplementedError, "subclasses must implement"
@@ -177,9 +235,24 @@ class SCM:
def create_patch(self):
raise NotImplementedError, "subclasses must implement"
+ def diff_for_revision(self, revision):
+ raise NotImplementedError, "subclasses must implement"
+
+ def apply_reverse_diff(self, revision):
+ raise NotImplementedError, "subclasses must implement"
+
+ def revert_files(self, file_paths):
+ raise NotImplementedError, "subclasses must implement"
+
def commit_with_message(self, message):
raise NotImplementedError, "subclasses must implement"
-
+
+ def svn_commit_log(self, svn_revision):
+ raise NotImplementedError, "subclasses must implement"
+
+ def last_svn_commit_log(self):
+ raise NotImplementedError, "subclasses must implement"
+
# Subclasses must indicate if they support local commits,
# but the SCM baseclass will only call local_commits methods when this is true.
@staticmethod
@@ -211,16 +284,21 @@ class SVN(SCM):
def in_working_directory(path):
return os.path.isdir(os.path.join(path, '.svn'))
- @staticmethod
- def find_uuid(path):
- if not SVN.in_working_directory(path):
+ @classmethod
+ def find_uuid(cls, path):
+ if not cls.in_working_directory(path):
return None
- info = SVN.run_command(['svn', 'info', path])
- match = re.search("^Repository UUID: (?P<uuid>.+)$", info, re.MULTILINE)
+ return cls.value_from_svn_info(path, 'Repository UUID')
+
+ @classmethod
+ def value_from_svn_info(cls, path, field_name):
+ svn_info_args = ['svn', 'info', path]
+ info_output = cls.run_command(svn_info_args)
+ match = re.search("^%s: (?P<value>.+)$" % field_name, info_output, re.MULTILINE)
if not match:
- raise ScriptError('svn info did not contain a UUID.')
- return match.group('uuid')
-
+ raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name)
+ return match.group('value')
+
@staticmethod
def find_checkout_root(path):
uuid = SVN.find_uuid(path)
@@ -236,11 +314,11 @@ class SVN(SCM):
(path, last_component) = os.path.split(path)
if last_path == path:
return None
-
+
@staticmethod
def commit_success_regexp():
return "^Committed revision (?P<svn_revision>\d+)\.$"
-
+
def svn_version(self):
if not self.cached_version:
self.cached_version = self.run_command(['svn', '--version', '--quiet'])
@@ -274,19 +352,44 @@ class SVN(SCM):
return "svn"
def create_patch(self):
- return self.run_command(self.script_path("svn-create-patch"))
+ return self.run_command(self.script_path("svn-create-patch"), cwd=self.checkout_root)
+
+ def diff_for_revision(self, revision):
+ return self.run_command(['svn', 'diff', '-c', str(revision)])
+
+ def _repository_url(self):
+ return self.value_from_svn_info(self.checkout_root, 'URL')
+
+ def apply_reverse_diff(self, revision):
+ # '-c -revision' applies the inverse diff of 'revision'
+ svn_merge_args = ['svn', 'merge', '--non-interactive', '-c', '-%s' % revision, self._repository_url()]
+ log("WARNING: svn merge has been known to take more than 10 minutes to complete. It is recommended you use git for rollouts.")
+ log("Running '%s'" % " ".join(svn_merge_args))
+ self.run_command(svn_merge_args)
+
+ def revert_files(self, file_paths):
+ self.run_command(['svn', 'revert'] + file_paths)
def commit_with_message(self, message):
if self.dryrun:
- return "Dry run, no remote commit."
- return self.run_command(['svn', 'commit', '-m', message])
+ # Return a string which looks like a commit so that things which parse this output will succeed.
+ return "Dry run, no commit.\nCommitted revision 0."
+ return self.run_command(['svn', 'commit', '-m', message], error_handler=commit_error_handler)
+
+ def svn_commit_log(self, svn_revision):
+ svn_revision = self.strip_r_from_svn_revision(str(svn_revision))
+ return self.run_command(['svn', 'log', '--non-interactive', '--revision', svn_revision]);
+ def last_svn_commit_log(self):
+ # BASE is the checkout revision, HEAD is the remote repository revision
+ # http://svnbook.red-bean.com/en/1.0/ch03s03.html
+ return self.svn_commit_log('BASE')
# All git-specific logic should go here.
class Git(SCM):
def __init__(self, cwd, dryrun=False):
SCM.__init__(self, cwd, dryrun)
-
+
@classmethod
def in_working_directory(cls, path):
return cls.run_command(['git', 'rev-parse', '--is-inside-work-tree'], cwd=path) == "true"
@@ -303,22 +406,29 @@ class Git(SCM):
@staticmethod
def commit_success_regexp():
return "^Committed r(?P<svn_revision>\d+)$"
-
+
+
def discard_local_commits(self):
self.run_command(['git', 'reset', '--hard', 'trunk'])
def local_commits(self):
return self.run_command(['git', 'log', '--pretty=oneline', 'HEAD...trunk']).splitlines()
+ def rebase_in_progress(self):
+ return os.path.exists(os.path.join(self.checkout_root, '.git/rebase-apply'))
+
def working_directory_is_clean(self):
return self.run_command(['git', 'diff-index', 'HEAD']) == ""
-
+
def clean_working_directory(self):
# Could run git clean here too, but that wouldn't match working_directory_is_clean
self.run_command(['git', 'reset', '--hard', 'HEAD'])
-
+ # Aborting rebase even though this does not match working_directory_is_clean
+ if self.rebase_in_progress():
+ self.run_command(['git', 'rebase', '--abort'])
+
def update_webkit(self):
- # FIXME: Should probably call update-webkit, no?
+ # FIXME: Call update-webkit once https://bugs.webkit.org/show_bug.cgi?id=27162 is fixed.
log("Updating working directory")
self.run_command(['git', 'svn', 'rebase'])
@@ -340,10 +450,44 @@ class Git(SCM):
def create_patch(self):
return self.run_command(['git', 'diff', 'HEAD'])
+ @classmethod
+ def git_commit_from_svn_revision(cls, revision):
+ # git svn find-rev always exits 0, even when the revision is not found.
+ return cls.run_command(['git', 'svn', 'find-rev', 'r%s' % revision])
+
+ def diff_for_revision(self, revision):
+ git_commit = self.git_commit_from_svn_revision(revision)
+ return self.create_patch_from_local_commit(git_commit)
+
+ def apply_reverse_diff(self, revision):
+ # Assume the revision is an svn revision.
+ git_commit = self.git_commit_from_svn_revision(revision)
+ if not git_commit:
+ raise ScriptError(message='Failed to find git commit for revision %s, git svn log output: "%s"' % (revision, git_commit))
+
+ # I think this will always fail due to ChangeLogs.
+ # FIXME: We need to detec specific failure conditions and handle them.
+ self.run_command(['git', 'revert', '--no-commit', git_commit], error_handler=ignore_error)
+
+ # Fix any ChangeLogs if necessary.
+ changelog_paths = self.modified_changelogs()
+ if len(changelog_paths):
+ self.run_command([self.script_path('resolve-ChangeLogs')] + changelog_paths)
+
+ def revert_files(self, file_paths):
+ self.run_command(['git', 'checkout', 'HEAD'] + file_paths)
+
def commit_with_message(self, message):
self.commit_locally_with_message(message)
return self.push_local_commits_to_server()
+ def svn_commit_log(self, svn_revision):
+ svn_revision = self.strip_r_from_svn_revision(svn_revision)
+ return self.run_command(['git', 'svn', 'log', '-r', svn_revision])
+
+ def last_svn_commit_log(self):
+ return self.run_command(['git', 'svn', 'log', '--limit=1'])
+
# Git-specific methods:
def create_patch_from_local_commit(self, commit_id):
@@ -357,8 +501,9 @@ class Git(SCM):
def push_local_commits_to_server(self):
if self.dryrun:
- return "Dry run, no remote commit."
- return self.run_command(['git', 'svn', 'dcommit'])
+ # Return a string which looks like a commit so that things which parse this output will succeed.
+ return "Dry run, no remote commit.\nCommitted r0"
+ return self.run_command(['git', 'svn', 'dcommit'], error_handler=commit_error_handler)
# This function supports the following argument formats:
# no args : rev-list trunk..HEAD
@@ -373,9 +518,9 @@ class Git(SCM):
commit_ids = []
for commitish in args:
if '...' in commitish:
- raise ScriptError("'...' is not supported (found in '%s'). Did you mean '..'?" % commitish)
+ raise ScriptError(message="'...' is not supported (found in '%s'). Did you mean '..'?" % commitish)
elif '..' in commitish:
- commit_ids += self.run_command(['git', 'rev-list', commitish]).splitlines()
+ commit_ids += reversed(self.run_command(['git', 'rev-list', commitish]).splitlines())
else:
# Turn single commits or branch or tag names into commit ids.
commit_ids += self.run_command(['git', 'rev-parse', '--revs-only', commitish]).splitlines()
diff --git a/WebKitTools/Scripts/modules/scm_unittest.py b/WebKitTools/Scripts/modules/scm_unittest.py
index 5bf2726..58494a0 100644
--- a/WebKitTools/Scripts/modules/scm_unittest.py
+++ b/WebKitTools/Scripts/modules/scm_unittest.py
@@ -1,4 +1,5 @@
# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -26,18 +27,40 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import base64
import os
+import re
+import stat
import subprocess
import tempfile
import unittest
-from modules.scm import detect_scm_system, SCM, ScriptError
+import urllib
+from modules.scm import detect_scm_system, SCM, ScriptError, CheckoutNeedsUpdate, ignore_error, commit_error_handler
# Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.)
# Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from.
-def run(args):
- SCM.run_command(args)
+def run(args, cwd=None):
+ return SCM.run_command(args, cwd=cwd)
+
+def run_silent(args, cwd=None):
+ process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
+ process.communicate() # ignore output
+ exit_code = process.wait()
+ if exit_code:
+ raise ScriptError('Failed to run "%s" exit_code: %d cwd: %s' % (args, exit_code, cwd))
+
+def write_into_file_at_path(file_path, contents):
+ file = open(file_path, 'w')
+ file.write(contents)
+ file.close()
+
+def read_from_path(file_path):
+ file = open(file_path, 'r')
+ contents = file.read()
+ file.close()
+ return contents
# Exists to share svn repository creation code between the git and svn tests
class SVNTestRepository:
@@ -57,13 +80,23 @@ class SVNTestRepository:
run(['svn', 'commit', '--quiet', '--message', 'second commit'])
- test_file.write("test3")
- test_file.close()
+ test_file.write("test3\n")
+ test_file.flush()
run(['svn', 'commit', '--quiet', '--message', 'third commit'])
+ test_file.write("test4\n")
+ test_file.close()
+
+ run(['svn', 'commit', '--quiet', '--message', 'fourth commit'])
+
+ # svn does not seem to update after commit as I would expect.
+ run(['svn', 'update'])
+
@classmethod
def setup(cls, test_object):
+ test_object.original_path = os.path.abspath('.')
+
# Create an test SVN repository
test_object.svn_repo_path = tempfile.mkdtemp(suffix="svn_test_repo")
test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path # Not sure this will work on windows
@@ -83,29 +116,161 @@ class SVNTestRepository:
run(['rm', '-rf', test_object.svn_checkout_path])
-class SVNTest(unittest.TestCase):
+class SCMTest(unittest.TestCase):
+ def _create_patch(self, patch_contents):
+ patch_path = os.path.join(self.svn_checkout_path, 'patch.diff')
+ write_into_file_at_path(patch_path, patch_contents)
+ patch = {}
+ patch['reviewer'] = 'Joe Cool'
+ patch['bug_id'] = '12345'
+ patch['url'] = 'file://%s' % urllib.pathname2url(patch_path)
+ return patch
+
+ def _setup_webkittools_scripts_symlink(self, local_scm):
+ webkit_scm = detect_scm_system(self.original_path)
+ webkit_scripts_directory = webkit_scm.scripts_directory()
+ local_scripts_directory = local_scm.scripts_directory()
+ os.mkdir(os.path.dirname(local_scripts_directory))
+ os.symlink(webkit_scripts_directory, local_scripts_directory)
+
+ def test_error_handlers(self):
+ git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469"
+ svn_failure_message="""svn: Commit failed (details follow):
+svn: File or directory 'ChangeLog' is out of date; try updating
+svn: resource out of date; try updating
+"""
+ command_does_not_exist = ['does_not_exist', 'invalid_option']
+ self.assertRaises(OSError, SCM.run_command, command_does_not_exist)
+ self.assertRaises(OSError, SCM.run_command, command_does_not_exist, error_handler=ignore_error)
+
+ command_returns_non_zero = ['/bin/sh', '--invalid-option']
+ self.assertRaises(ScriptError, SCM.run_command, command_returns_non_zero)
+ self.assertTrue(SCM.run_command(command_returns_non_zero, error_handler=ignore_error))
+
+ self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message))
+ self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message))
+ self.assertRaises(ScriptError, commit_error_handler, ScriptError(output='blah blah blah'))
+
+
+ # Tests which both GitTest and SVNTest should run.
+ # FIXME: There must be a simpler way to add these w/o adding a wrapper method to both subclasses
+ def _shared_test_commit_with_message(self):
+ write_into_file_at_path('test_file', 'more test content')
+ commit_text = self.scm.commit_with_message('another test commit')
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '5')
+
+ self.scm.dryrun = True
+ write_into_file_at_path('test_file', 'still more test content')
+ commit_text = self.scm.commit_with_message('yet another test commit')
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '0')
+
+ def _shared_test_reverse_diff(self):
+ self._setup_webkittools_scripts_symlink(self.scm) # Git's apply_reverse_diff uses resolve-ChangeLogs
+ # Only test the simple case, as any other will end up with conflict markers.
+ self.scm.apply_reverse_diff('4')
+ self.assertEqual(read_from_path('test_file'), "test1test2test3\n")
+
+ def _shared_test_diff_for_revision(self):
+ # Patch formats are slightly different between svn and git, so just regexp for things we know should be there.
+ r3_patch = self.scm.diff_for_revision(3)
+ self.assertTrue(re.search('test3', r3_patch))
+ self.assertFalse(re.search('test4', r3_patch))
+ self.assertTrue(re.search('test2', r3_patch))
+ self.assertTrue(re.search('test2', self.scm.diff_for_revision(2)))
+
+
+class SVNTest(SCMTest):
def setUp(self):
SVNTestRepository.setup(self)
os.chdir(self.svn_checkout_path)
+ self.scm = detect_scm_system(self.svn_checkout_path)
def tearDown(self):
SVNTestRepository.tear_down(self)
+ os.chdir(self.original_path)
+
+ def test_create_patch_is_full_patch(self):
+ test_dir_path = os.path.join(self.svn_checkout_path, 'test_dir')
+ os.mkdir(test_dir_path)
+ test_file_path = os.path.join(test_dir_path, 'test_file2')
+ write_into_file_at_path(test_file_path, 'test content')
+ run(['svn', 'add', 'test_dir'])
+
+ # create_patch depends on 'svn-create-patch', so make a dummy version.
+ scripts_path = os.path.join(self.svn_checkout_path, 'WebKitTools', 'Scripts')
+ os.makedirs(scripts_path)
+ create_patch_path = os.path.join(scripts_path, 'svn-create-patch')
+ write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD')
+ os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR)
+
+ # Change into our test directory and run the create_patch command.
+ os.chdir(test_dir_path)
+ scm = detect_scm_system(test_dir_path)
+ self.assertEqual(scm.checkout_root, self.svn_checkout_path) # Sanity check that detection worked right.
+ patch_contents = scm.create_patch()
+ # Our fake 'svn-create-patch' returns $PWD instead of a patch, check that it was executed from the root of the repo.
+ self.assertEqual(os.path.realpath(scm.checkout_root), patch_contents)
def test_detection(self):
scm = detect_scm_system(self.svn_checkout_path)
self.assertEqual(scm.display_name(), "svn")
self.assertEqual(scm.supports_local_commits(), False)
-class GitTest(unittest.TestCase):
+ def test_apply_small_binary_patch(self):
+ patch_contents = """Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+"""
+ expected_contents = base64.b64decode("Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==")
+ self._setup_webkittools_scripts_symlink(self.scm)
+ patch_file = self._create_patch(patch_contents)
+ self.scm.apply_patch(patch_file)
+ actual_contents = read_from_path("test_file.swf")
+ self.assertEqual(actual_contents, expected_contents)
+
+ def test_apply_svn_patch(self):
+ scm = detect_scm_system(self.svn_checkout_path)
+ patch = self._create_patch(run(['svn', 'diff', '-r4:3']))
+ self._setup_webkittools_scripts_symlink(scm)
+ scm.apply_patch(patch)
+
+ def test_apply_svn_patch_force(self):
+ scm = detect_scm_system(self.svn_checkout_path)
+ patch = self._create_patch(run(['svn', 'diff', '-r2:4']))
+ self._setup_webkittools_scripts_symlink(scm)
+ self.assertRaises(ScriptError, scm.apply_patch, patch, force=True)
+
+ def test_commit_logs(self):
+ # Commits have dates and usernames in them, so we can't just direct compare.
+ self.assertTrue(re.search('fourth commit', self.scm.last_svn_commit_log()))
+ self.assertTrue(re.search('second commit', self.scm.svn_commit_log(2)))
+
+ def test_commit_text_parsing(self):
+ self._shared_test_commit_with_message()
+
+ def test_reverse_diff(self):
+ self._shared_test_reverse_diff()
+
+ def test_diff_for_revision(self):
+ self._shared_test_diff_for_revision()
+
+
+class GitTest(SCMTest):
def _setup_git_clone_of_svn_repository(self):
self.git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
- # --quiet doesn't make git svn silent, so we redirect output
- args = ['git', 'svn', '--quiet', 'clone', self.svn_repo_url, self.git_checkout_path]
- git_svn_clone = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- git_svn_clone.communicate() # ignore output
- git_svn_clone.wait()
+ # --quiet doesn't make git svn silent, so we use run_silent to redirect output
+ run_silent(['git', 'svn', '--quiet', 'clone', self.svn_repo_url, self.git_checkout_path])
def _tear_down_git_clone_of_svn_repository(self):
run(['rm', '-rf', self.git_checkout_path])
@@ -114,26 +279,81 @@ class GitTest(unittest.TestCase):
SVNTestRepository.setup(self)
self._setup_git_clone_of_svn_repository()
os.chdir(self.git_checkout_path)
+ self.scm = detect_scm_system(self.git_checkout_path)
def tearDown(self):
SVNTestRepository.tear_down(self)
self._tear_down_git_clone_of_svn_repository()
+ os.chdir(self.original_path)
def test_detection(self):
scm = detect_scm_system(self.git_checkout_path)
self.assertEqual(scm.display_name(), "git")
self.assertEqual(scm.supports_local_commits(), True)
- def test_commitish_parsing(self):
+ def test_rebase_in_progress(self):
+ svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
+ write_into_file_at_path(svn_test_file, "svn_checkout")
+ run(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
+
+ git_test_file = os.path.join(self.git_checkout_path, 'test_file')
+ write_into_file_at_path(git_test_file, "git_checkout")
+ run(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
+
+ # --quiet doesn't make git svn silent, so use run_silent to redirect output
+ self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase.
+
scm = detect_scm_system(self.git_checkout_path)
+ self.assertTrue(scm.rebase_in_progress())
+
+ # Make sure our cleanup works.
+ scm.clean_working_directory()
+ self.assertFalse(scm.rebase_in_progress())
+
+ # Make sure cleanup doesn't throw when no rebase is in progress.
+ scm.clean_working_directory()
+ def test_commitish_parsing(self):
+ scm = detect_scm_system(self.git_checkout_path)
+
# Multiple revisions are cherry-picked.
self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
-
+
# ... is an invalid range specifier
self.assertRaises(ScriptError, scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
+ def test_commitish_order(self):
+ scm = detect_scm_system(self.git_checkout_path)
+
+ commit_range = 'HEAD~3..HEAD'
+
+ actual_commits = scm.commit_ids_from_commitish_arguments([commit_range])
+ expected_commits = []
+ expected_commits += reversed(run(['git', 'rev-list', commit_range]).splitlines())
+
+ self.assertEqual(actual_commits, expected_commits)
+
+ def test_apply_git_patch(self):
+ scm = detect_scm_system(self.git_checkout_path)
+ patch = self._create_patch(run(['git', 'diff', 'HEAD..HEAD^']))
+ self._setup_webkittools_scripts_symlink(scm)
+ scm.apply_patch(patch)
+
+ def test_apply_git_patch_force(self):
+ scm = detect_scm_system(self.git_checkout_path)
+ patch = self._create_patch(run(['git', 'diff', 'HEAD~2..HEAD']))
+ self._setup_webkittools_scripts_symlink(scm)
+ self.assertRaises(ScriptError, scm.apply_patch, patch, force=True)
+
+ def test_commit_text_parsing(self):
+ self._shared_test_commit_with_message()
+
+ def test_reverse_diff(self):
+ self._shared_test_reverse_diff()
+
+ def test_diff_for_revision(self):
+ self._shared_test_diff_for_revision()
if __name__ == '__main__':
unittest.main()
diff --git a/WebKitTools/Scripts/modules/statusbot.py b/WebKitTools/Scripts/modules/statusbot.py
new file mode 100644
index 0000000..9c9ba04
--- /dev/null
+++ b/WebKitTools/Scripts/modules/statusbot.py
@@ -0,0 +1,66 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for interacting with the Commit Queue status page.
+
+# WebKit includes a built copy of BeautifulSoup in Scripts/modules
+# so this import should always succeed.
+from .BeautifulSoup import BeautifulSoup
+
+try:
+ from mechanize import Browser
+except ImportError, e:
+ print """
+mechanize is required.
+
+To install:
+sudo easy_install mechanize
+
+Or from the web:
+http://wwwsearch.sourceforge.net/mechanize/
+"""
+ exit(1)
+
+class StatusBot:
+ default_host = "webkit-commit-queue.appspot.com"
+
+ def __init__(self, host=default_host):
+ self.statusbot_host = host
+ self.statusbot_server_url = "http://%s" % self.statusbot_host
+ self.update_status_url = "%s/update_status" % self.statusbot_server_url
+ self.browser = Browser()
+
+ def update_status(self, status, bug_id=None, patch_id=None):
+ self.browser.open(self.update_status_url)
+ self.browser.select_form(name="update_status")
+ if bug_id:
+ self.browser['bug_id'] = str(bug_id)
+ if patch_id:
+ self.browser['patch_id'] = str(patch_id)
+ self.browser['status'] = status
+ self.browser.submit()
diff --git a/WebKitTools/Scripts/parse-malloc-history b/WebKitTools/Scripts/parse-malloc-history
index 76ca74b..177de1c 100755
--- a/WebKitTools/Scripts/parse-malloc-history
+++ b/WebKitTools/Scripts/parse-malloc-history
@@ -70,8 +70,6 @@ sub main()
for (my $i = 0; $i < @file; $i++) {
my $line = $file[$i];
my ($callCount, $byteCount);
-
- next if $line =~ /^\-/;
# First try malloc_history format
# 6 calls for 664 bytes thread_ffffffff |0x0 | start
@@ -93,6 +91,28 @@ sub main()
}
}
+ # Then try LeakFinder format
+ # --------------- Key: 213813, 84 bytes ---------
+ # c:\cygwin\home\buildbot\webkit\opensource\webcore\rendering\renderarena.cpp(78): WebCore::RenderArena::allocate
+ # c:\cygwin\home\buildbot\webkit\opensource\webcore\rendering\renderobject.cpp(82): WebCore::RenderObject::operator new
+ if (!$callCount || !$byteCount) {
+ $callCount = 1;
+ ($byteCount) = ($line =~ /Key: (?:\d+), (\d+) bytes/);
+ if ($byteCount) {
+ $line = $file[++$i];
+ my @tempStack;
+ while ($file[$i+1] !~ /^(?:-|\d)/) {
+ if ($line =~ /\): (.*)$/) {
+ my $call = $1;
+ $call =~ s/\r$//;
+ unshift(@tempStack, $call);
+ }
+ $line = $file[++$i];
+ }
+ $line = join(" | ", @tempStack);
+ }
+ }
+
# Then give up
next if (!$callCount || !$byteCount);
diff --git a/WebKitTools/Scripts/pdevenv b/WebKitTools/Scripts/pdevenv
index 9128912..818e4ee 100755
--- a/WebKitTools/Scripts/pdevenv
+++ b/WebKitTools/Scripts/pdevenv
@@ -11,8 +11,22 @@ my ($fh, $path) = tempfile(UNLINK => 0, SUFFIX => '.cmd') or die;
chomp(my $vcBin = `cygpath -w "$FindBin::Bin/../vcbin"`);
chomp(my $scriptsPath = `cygpath -w "$FindBin::Bin"`);
+my $vsToolsVar;
+if ($ENV{'VS80COMNTOOLS'}) {
+ $vsToolsVar = "VS80COMNTOOLS";
+} elsif ($ENV{'VS90COMNTOOLS'}) {
+ $vsToolsVar = "VS90COMNTOOLS";
+} else {
+ print "*************************************************************\n";
+ print "Cannot find Visual Studio tools dir.\n";
+ print "Please ensure that \$VS80COMNTOOLS or \$VS90COMNTOOLS\n";
+ print "is set to a valid location.\n";
+ print "*************************************************************\n";
+ die;
+}
+
print $fh "\@echo off\n\n";
-print $fh "call \"\%VS80COMNTOOLS\%\\vsvars32.bat\"\n\n";
+print $fh "call \"\%" . $vsToolsVar . "\%\\vsvars32.bat\"\n\n";
print $fh "set PATH=$vcBin;$scriptsPath;\%PATH\%\n\n";
print $fh "IF EXIST \"\%VSINSTALLDIR\%\\Common7\\IDE\\devenv.com\" (devenv.com /useenv " . join(" ", @ARGV) . ") ELSE ";
print $fh "VCExpress.exe /useenv " . join(" ", @ARGV) . "\n";
diff --git a/WebKitTools/Scripts/prepare-ChangeLog b/WebKitTools/Scripts/prepare-ChangeLog
index c3e2cef..ed31005 100755
--- a/WebKitTools/Scripts/prepare-ChangeLog
+++ b/WebKitTools/Scripts/prepare-ChangeLog
@@ -5,6 +5,7 @@
# Copyright (C) 2000, 2001 Eazel, Inc.
# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apple Inc. All rights reserved.
# Copyright (C) 2009 Torch Mobile, Inc.
+# Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au>
#
# prepare-ChangeLog is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
@@ -73,12 +74,16 @@ sub statusCommand(@);
sub createPatchCommand($);
sub diffHeaderFormat();
sub findOriginalFileFromSvn($);
+sub determinePropertyChanges($$$);
+sub pluralizeAndList($$@);
sub generateFileList(\@\@\%);
sub gitConfig($);
+sub isUnmodifiedStatus($);
sub isModifiedStatus($);
sub isAddedStatus($);
sub isConflictStatus($);
-sub statusDescription($$);
+sub statusDescription($$$$);
+sub propertyChangeDescription($);
sub extractLineRange($);
sub canonicalizePath($);
sub testListForChangeLog(@);
@@ -146,8 +151,6 @@ $isSVN || $isGit || die "Couldn't determine your version control system.";
my $SVN = "svn";
my $GIT = "git";
-my $svnVersion = `svn --version --quiet` if $isSVN;
-
# Find the list of modified files
my @changed_files;
my $changed_files_string;
@@ -326,7 +329,7 @@ if (@logs && $updateChangeLogs && $isSVN) {
my @conflictedChangeLogs;
while (my $line = <ERRORS>) {
print STDERR " ", $line;
- push @conflictedChangeLogs, $1 if $line =~ m/^C\s+(.*\S+)\s*$/;
+ push @conflictedChangeLogs, $1 if $line =~ m/^C\s+(.+?)[\r\n]*$/;
}
close ERRORS;
@@ -1323,7 +1326,7 @@ sub createPatchCommand($)
sub diffHeaderFormat()
{
- return qr/^Index: (\S+)\s*$/ if $isSVN;
+ return qr/^Index: (\S+)[\r\n]*$/ if $isSVN;
return qr/^diff --git a\/.+ b\/(.+)$/ if $isGit;
}
@@ -1333,7 +1336,7 @@ sub findOriginalFileFromSvn($)
my $baseUrl;
open INFO, "$SVN info . |" or die;
while (<INFO>) {
- if (/^URL: (.*\S+)\s*$/) {
+ if (/^URL: (.+?)[\r\n]*$/) {
$baseUrl = $1;
}
}
@@ -1341,7 +1344,7 @@ sub findOriginalFileFromSvn($)
my $sourceFile;
open INFO, "$SVN info '$file' |" or die;
while (<INFO>) {
- if (/^Copied From URL: (.*\S+)\s*$/) {
+ if (/^Copied From URL: (.+?)[\r\n]*$/) {
$sourceFile = File::Spec->abs2rel($1, $baseUrl);
}
}
@@ -1349,6 +1352,76 @@ sub findOriginalFileFromSvn($)
return $sourceFile;
}
+sub determinePropertyChanges($$$)
+{
+ my ($file, $isAdd, $original) = @_;
+
+ my %changes;
+ if ($isAdd) {
+ my %addedProperties;
+ my %removedProperties;
+ open PROPLIST, "$SVN proplist '$file' |" or die;
+ while (<PROPLIST>) {
+ $addedProperties{$1} = 1 if /^ (.+?)[\r\n]*$/ && $1 ne 'svn:mergeinfo';
+ }
+ close PROPLIST;
+ if ($original) {
+ open PROPLIST, "$SVN proplist '$original' |" or die;
+ while (<PROPLIST>) {
+ next unless /^ (.+?)[\r\n]*$/;
+ my $property = $1;
+ if (exists $addedProperties{$property}) {
+ delete $addedProperties{$1};
+ } else {
+ $removedProperties{$1} = 1;
+ }
+ }
+ }
+ $changes{"A"} = [sort keys %addedProperties] if %addedProperties;
+ $changes{"D"} = [sort keys %removedProperties] if %removedProperties;
+ } else {
+ open DIFF, "$SVN diff '$file' |" or die;
+ while (<DIFF>) {
+ if (/^Property changes on:/) {
+ while (<DIFF>) {
+ my $operation;
+ my $property;
+ if (/^Added: (\S*)/) {
+ $operation = "A";
+ $property = $1;
+ } elsif (/^Modified: (\S*)/) {
+ $operation = "M";
+ $property = $1;
+ } elsif (/^Deleted: (\S*)/) {
+ $operation = "D";
+ $property = $1;
+ } elsif (/^Name: (\S*)/) {
+ # Older versions of svn just say "Name" instead of the type
+ # of property change.
+ $operation = "C";
+ $property = $1;
+ }
+ if ($operation) {
+ $changes{$operation} = [] unless exists $changes{$operation};
+ push @{$changes{$operation}}, $property;
+ }
+ }
+ }
+ }
+ close DIFF;
+ }
+ return \%changes;
+}
+
+sub pluralizeAndList($$@)
+{
+ my ($singular, $plural, @items) = @_;
+
+ return if @items == 0;
+ return "$singular $items[0]" if @items == 1;
+ return "$plural " . join(", ", @items[0 .. $#items - 1]) . " and " . $items[-1];
+}
+
sub generateFileList(\@\@\%)
{
my ($changedFiles, $conflictFiles, $functionLists) = @_;
@@ -1356,32 +1429,40 @@ sub generateFileList(\@\@\%)
open STAT, "-|", statusCommand(keys %paths) or die "The status failed: $!.\n";
while (<STAT>) {
my $status;
+ my $propertyStatus;
+ my $propertyChanges;
my $original;
my $file;
if ($isSVN) {
my $matches;
- if (eval "v$svnVersion" ge v1.6) {
- $matches = /^([ACDMR]).{6} (.*\S+)\s*$/;
+ if (isSVNVersion16OrNewer()) {
+ $matches = /^([ ACDMR])([ CM]).{5} (.+?)[\r\n]*$/;
$status = $1;
- $file = $2;
+ $propertyStatus = $2;
+ $file = $3;
} else {
- $matches = /^([ACDMR]).{5} (.*\S+)\s*$/;
+ $matches = /^([ ACDMR])([ CM]).{4} (.+?)[\r\n]*$/;
$status = $1;
- $file = $2;
+ $propertyStatus = $2;
+ $file = $3;
}
if ($matches) {
$file = normalizePath($file);
$original = findOriginalFileFromSvn($file) if substr($_, 3, 1) eq "+";
+ my $isAdd = isAddedStatus($status);
+ $propertyChanges = determinePropertyChanges($file, $isAdd, $original) if isModifiedStatus($propertyStatus) || $isAdd;
} else {
print; # error output from svn stat
}
} elsif ($isGit) {
if (/^([ADM])\t(.+)$/) {
$status = $1;
+ $propertyStatus = " "; # git doesn't have properties
$file = normalizePath($2);
} elsif (/^([CR])[0-9]{1,3}\t([^\t]+)\t([^\t\n]+)$/) { # for example: R90% newfile oldfile
$status = $1;
+ $propertyStatus = " ";
$original = normalizePath($2);
$file = normalizePath($3);
} else {
@@ -1389,11 +1470,11 @@ sub generateFileList(\@\@\%)
}
}
- next unless $status;
+ next if !$status || isUnmodifiedStatus($status) && isUnmodifiedStatus($propertyStatus);
$file = makeFilePathRelative($file);
- if (isModifiedStatus($status) || isAddedStatus($status)) {
+ if (isModifiedStatus($status) || isAddedStatus($status) || isModifiedStatus($propertyStatus)) {
my @components = File::Spec->splitdir($file);
if ($components[0] eq "LayoutTests") {
$didChangeRegressionTests = 1;
@@ -1401,14 +1482,15 @@ sub generateFileList(\@\@\%)
if isAddedStatus($status)
&& $file =~ /\.([a-zA-Z]+)$/
&& $supportedTestExtensions{lc($1)}
- && !scalar(grep(/^resources$/i, @components));
+ && !scalar(grep(/^resources$/i, @components))
+ && !scalar(grep(/^script-tests$/i, @components));
}
push @{$changedFiles}, $file if $components[$#components] ne "ChangeLog";
- } elsif (isConflictStatus($status)) {
+ } elsif (isConflictStatus($status) || isConflictStatus($propertyStatus)) {
push @{$conflictFiles}, $file;
}
if (basename($file) ne "ChangeLog") {
- my $description = statusDescription($status, $original);
+ my $description = statusDescription($status, $propertyStatus, $original, $propertyChanges);
$functionLists->{$file} = $description if defined $description;
}
}
@@ -1429,6 +1511,17 @@ sub gitConfig($)
return $result;
}
+sub isUnmodifiedStatus($)
+{
+ my ($status) = @_;
+
+ my %statusCodes = (
+ " " => 1,
+ );
+
+ return $statusCodes{$status};
+}
+
sub isModifiedStatus($)
{
my ($status) = @_;
@@ -1470,15 +1563,18 @@ sub isConflictStatus($)
return $git{$status} if $isGit;
}
-sub statusDescription($$)
+sub statusDescription($$$$)
{
- my ($status, $original) = @_;
+ my ($status, $propertyStatus, $original, $propertyChanges) = @_;
+
+ my $propertyDescription = defined $propertyChanges ? propertyChangeDescription($propertyChanges) : "";
my %svn = (
"A" => defined $original ? " Copied from \%s." : " Added.",
"D" => " Removed.",
"M" => "",
"R" => defined $original ? " Replaced with \%s." : " Replaced.",
+ " " => "",
);
my %git = %svn;
@@ -1486,9 +1582,33 @@ sub statusDescription($$)
$git{"C"} = " Copied from \%s.";
$git{"R"} = " Renamed from \%s.";
- return sprintf($svn{$status}, $original) if $isSVN && exists $svn{$status};
- return sprintf($git{$status}, $original) if $isGit && exists $git{$status};
- return undef;
+ my $description;
+ $description = sprintf($svn{$status}, $original) if $isSVN && exists $svn{$status};
+ $description = sprintf($git{$status}, $original) if $isGit && exists $git{$status};
+ return unless defined $description;
+
+ $description .= $propertyDescription unless isAddedStatus($status);
+ return $description;
+}
+
+sub propertyChangeDescription($)
+{
+ my ($propertyChanges) = @_;
+
+ my %operations = (
+ "A" => "Added",
+ "M" => "Modified",
+ "D" => "Removed",
+ "C" => "Changed",
+ );
+
+ my $description = "";
+ while (my ($operation, $properties) = each %$propertyChanges) {
+ my $word = $operations{$operation};
+ my $list = pluralizeAndList("property", "properties", @$properties);
+ $description .= " $word $list.";
+ }
+ return $description;
}
sub extractLineRange($)
diff --git a/WebKitTools/Scripts/resolve-ChangeLogs b/WebKitTools/Scripts/resolve-ChangeLogs
index 9107fd2..db497f9 100755
--- a/WebKitTools/Scripts/resolve-ChangeLogs
+++ b/WebKitTools/Scripts/resolve-ChangeLogs
@@ -48,6 +48,7 @@ sub fixChangeLogPatch($);
sub fixMergedChangeLogs($;@);
sub fixOneMergedChangeLog($);
sub hasGitUnmergedFiles();
+sub isInGitFilterBranch();
sub mergeChanges($$$);
sub parseFixMerged($$;$);
sub removeChangeLogArguments($);
@@ -63,8 +64,6 @@ my $isSVN = isSVN();
my $SVN = "svn";
my $GIT = "git";
-my $svnVersion = `svn --version --quiet` if $isSVN;
-
my $fixMerged;
my $gitRebaseContinue = 0;
my $printWarnings = 1;
@@ -77,7 +76,7 @@ my $getOptionsResult = GetOptions(
'w|warnings!' => \$printWarnings,
);
-my $relativePath = chdirReturningRelativePath(determineVCSRoot());
+my $relativePath = isInGitFilterBranch() ? '.' : chdirReturningRelativePath(determineVCSRoot());
my @changeLogFiles = removeChangeLogArguments($relativePath);
@@ -255,11 +254,11 @@ sub findUnmergedChangeLogs()
if ($isSVN) {
my $matches;
my $file;
- if (eval "v$svnVersion" ge v1.6) {
- $matches = /^([C]).{6} (.*\S+)\s*$/;
+ if (isSVNVersion16OrNewer()) {
+ $matches = /^([C]).{6} (.+?)[\r\n]*$/;
$file = $2;
} else {
- $matches = /^([C]).{5} (.*\S+)\s*$/;
+ $matches = /^([C]).{5} (.+?)[\r\n]*$/;
$file = $2;
}
if ($matches) {
@@ -432,6 +431,11 @@ sub hasGitUnmergedFiles()
return $output ne "";
}
+sub isInGitFilterBranch()
+{
+ return exists $ENV{MAPPED_PREVIOUS_COMMIT} && $ENV{MAPPED_PREVIOUS_COMMIT};
+}
+
sub mergeChanges($$$)
{
my ($fileMine, $fileOlder, $fileNewer) = @_;
diff --git a/WebKitTools/Scripts/run-javascriptcore-tests b/WebKitTools/Scripts/run-javascriptcore-tests
index 21d63c2..865ae1d 100755
--- a/WebKitTools/Scripts/run-javascriptcore-tests
+++ b/WebKitTools/Scripts/run-javascriptcore-tests
@@ -124,7 +124,7 @@ sub testapiPath($)
}
#run api tests
-if (isAppleMacWebKit()) {
+if (isAppleMacWebKit() || isAppleWinWebKit()) {
chdirWebKit();
chdir($productDir) or die;
my $testapiResult = system testapiPath($productDir);
diff --git a/WebKitTools/Scripts/run-launcher b/WebKitTools/Scripts/run-launcher
index 24a4c32..ee462ba 100755
--- a/WebKitTools/Scripts/run-launcher
+++ b/WebKitTools/Scripts/run-launcher
@@ -61,6 +61,15 @@ if (isQt()) {
if (isGtk()) {
$launcherPath = catdir($launcherPath, "Programs", "GtkLauncher");
}
+
+ if (isWx()) {
+ if (isDarwin()) {
+ $launcherPath = catdir($launcherPath, 'wxBrowser.app', 'Contents', 'MacOS', 'wxBrowser');
+ } else {
+ $ENV{LD_LIBRARY_PATH} = $ENV{LD_LIBRARY_PATH} ? "$productDir:$ENV{LD_LIBRARY_PATH}" : $productDir;
+ $launcherPath = catdir($launcherPath, 'wxBrowser');
+ }
+ }
print "Starting webkit launcher.\n";
}
diff --git a/WebKitTools/Scripts/run-sunspider b/WebKitTools/Scripts/run-sunspider
index 154c9fa..367fd06 100755
--- a/WebKitTools/Scripts/run-sunspider
+++ b/WebKitTools/Scripts/run-sunspider
@@ -43,6 +43,7 @@ my $runShark20 = 0;
my $runSharkCache = 0;
my $ubench = 0;
my $v8 = 0;
+my $parseonly = 0;
my $setBaseline = 0;
my $showHelp = 0;
my $testsPattern;
@@ -60,6 +61,7 @@ Usage: $programName [options] [options to pass to build system]
--shark-cache Like --shark, but performs a L2 cache-miss sample instead of time sample
--ubench Use microbenchmark suite instead of regular tests (to check for core execution regressions)
--v8 Use the V8 benchmark suite.
+ --parse-only Use the parse-only benchmark suite
EOF
GetOptions('root=s' => sub { my ($x, $value) = @_; $root = $value; setConfigurationProductDir(Cwd::abs_path($root)); },
@@ -70,6 +72,7 @@ GetOptions('root=s' => sub { my ($x, $value) = @_; $root = $value; setConfigurat
'shark-cache' => \$runSharkCache,
'ubench' => \$ubench,
'v8' => \$v8,
+ 'parse-only' => \$parseonly,
'tests=s' => \$testsPattern,
'help' => \$showHelp);
@@ -84,7 +87,7 @@ sub buildJSC
push(@ARGV, "--" . $configuration);
chdirWebKit();
- my $buildResult = system "WebKitTools/Scripts/build-jsc", @ARGV;
+ my $buildResult = system currentPerlPath(), "WebKitTools/Scripts/build-jsc", @ARGV;
if ($buildResult) {
print STDERR "Compiling jsc failed!\n";
exit exitStatus($buildResult);
@@ -127,6 +130,7 @@ push @args, "--shark20" if $runShark20;
push @args, "--shark-cache" if $runSharkCache;
push @args, "--ubench" if $ubench;
push @args, "--v8" if $v8;
+push @args, "--parse-only" if $parseonly;
push @args, "--tests", $testsPattern if $testsPattern;
-exec "./sunspider", @args;
+exec currentPerlPath(), "./sunspider", @args;
diff --git a/WebKitTools/Scripts/run-webkit-httpd b/WebKitTools/Scripts/run-webkit-httpd
index 62eae14..9a97190 100755
--- a/WebKitTools/Scripts/run-webkit-httpd
+++ b/WebKitTools/Scripts/run-webkit-httpd
@@ -90,6 +90,7 @@ my $httpdConfig = "$testDirectory/http/conf/httpd.conf";
$httpdConfig = "$testDirectory/http/conf/cygwin-httpd.conf" if isCygwin();
$httpdConfig = "$testDirectory/http/conf/apache2-httpd.conf" if `$httpdPath -v` =~ m|Apache/2|;
$httpdConfig = "$testDirectory/http/conf/apache2-debian-httpd.conf" if isDebianBased();
+$httpdConfig = "$testDirectory/http/conf/fedora-httpd.conf" if isFedoraBased();
my $documentRoot = "$testDirectory/http/tests";
my $typesConfig = "$testDirectory/http/conf/mime.types";
my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem";
diff --git a/WebKitTools/Scripts/run-webkit-tests b/WebKitTools/Scripts/run-webkit-tests
index f51cf53..a08a53c 100755
--- a/WebKitTools/Scripts/run-webkit-tests
+++ b/WebKitTools/Scripts/run-webkit-tests
@@ -5,6 +5,7 @@
# Copyright (C) 2007 Matt Lilek (pewtermoose@gmail.com)
# Copyright (C) 2007 Eric Seidel <eric@webkit.org>
# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Andras Becsi (becsi.andras@stud.u-szeged.hu), University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -70,80 +71,86 @@ use webkitdirs;
use VCSUtils;
use POSIX;
-sub launchWithCurrentEnv(@);
-sub openDiffTool();
-sub openDumpTool();
+sub buildPlatformResultHierarchy();
+sub buildPlatformTestHierarchy(@);
+sub closeCygpaths();
sub closeDumpTool();
-sub dumpToolDidCrash();
sub closeHTTPD();
sub countAndPrintLeaks($$$);
+sub countFinishedTest($$$$);
+sub deleteExpectedAndActualResults($);
+sub dumpToolDidCrash();
+sub epiloguesAndPrologues($$);
+sub expectedDirectoryForTest($;$;$);
sub fileNameWithNumber($$);
+sub htmlForResultsSection(\@$&);
+sub isTextOnlyTest($);
+sub launchWithCurrentEnv(@);
sub numericcmp($$);
+sub openDiffTool();
+sub openDumpTool();
sub openHTTPDIfNeeded();
+sub parseLeaksandPrintUniqueLeaks();
sub pathcmp($$);
+sub printFailureMessageForTest($$);
sub processIgnoreTests($$);
+sub readFromDumpToolWithTimer(**);
+sub recordActualResultsAndDiff($$);
+sub sampleDumpTool();
+sub setFileHandleNonBlocking(*$);
sub slowestcmp($$);
sub splitpath($);
sub stripExtension($);
-sub isTextOnlyTest($);
-sub expectedDirectoryForTest($;$;$);
-sub countFinishedTest($$$$);
+sub stripMetrics($$);
sub testCrashedOrTimedOut($$$$$);
-sub sampleDumpTool();
-sub printFailureMessageForTest($$);
sub toURL($);
sub toWindowsPath($);
-sub closeCygpaths();
sub validateSkippedArg($$;$);
-sub htmlForResultsSection(\@$&);
-sub deleteExpectedAndActualResults($);
-sub recordActualResultsAndDiff($$);
-sub buildPlatformResultHierarchy();
-sub buildPlatformTestHierarchy(@);
-sub epiloguesAndPrologues($$);
-sub parseLeaksandPrintUniqueLeaks();
-sub readFromDumpToolWithTimer(*;$);
-sub setFileHandleNonBlocking(*$);
sub writeToFile($$);
# Argument handling
my $addPlatformExceptions = 0;
my $complexText = 0;
+my $exitAfterNFailures = 0;
+my $generateNewResults = isAppleMacWebKit() ? 1 : 0;
my $guardMalloc = '';
my $httpdPort = 8000;
my $httpdSSLPort = 8443;
+my $ignoreMetrics = 0;
my $ignoreTests = '';
+my $iterations = 1;
my $launchSafari = 1;
-my $platform;
+my $mergeDepth;
my $pixelTests = '';
+my $platform;
my $quiet = '';
+my $randomizeTests = 0;
+my $repeatEach = 1;
my $report10Slowest = 0;
my $resetResults = 0;
+my $reverseTests = 0;
+my $root;
+my $runSample = 1;
my $shouldCheckLeaks = 0;
my $showHelp = 0;
-my $testsPerDumpTool;
+my $stripEditingCallbacks = isCygwin();
my $testHTTP = 1;
my $testMedia = 1;
my $testResultsDirectory = "/tmp/layout-test-results";
+my $testsPerDumpTool = 1000;
my $threaded = 0;
+# DumpRenderTree has an internal timeout of 15 seconds, so this must be > 15.
+my $timeoutSeconds = 20;
my $tolerance = 0;
my $treatSkipped = "default";
-my $verbose = 0;
-my $useValgrind = 0;
-my $strictTesting = 0;
-my $generateNewResults = isAppleMacWebKit() ? 1 : 0;
-my $stripEditingCallbacks = isCygwin();
-my $runSample = 1;
-my $root;
-my $reverseTests = 0;
-my $randomizeTests = 0;
-my $mergeDepth;
-my $timeoutSeconds = 15;
my $useRemoteLinksToTests = 0;
+my $useValgrind = 0;
+my $verbose = 0;
+
my @leaksFilenames;
-# Default to --no-http for Qt, and wx for now.
-$testHTTP = 0 if (isQt() || isWx());
+# Default to --no-http for wx for now.
+$testHTTP = 0 if (isWx());
my $expectedTag = "expected";
my $actualTag = "actual";
@@ -167,7 +174,15 @@ if (isAppleMacWebKit()) {
$platform = "mac";
}
} elsif (isQt()) {
- $platform = "qt";
+ if (isDarwin()) {
+ $platform = "qt-mac";
+ } elsif (isLinux()) {
+ $platform = "qt-linux";
+ } elsif (isWindows() || isCygwin()) {
+ $platform = "qt-win";
+ } else {
+ $platform = "qt";
+ }
} elsif (isGtk()) {
$platform = "gtk";
} elsif (isWx()) {
@@ -186,19 +201,21 @@ my $launchSafariDefault = $launchSafari ? "launch" : "do not launch";
my $httpDefault = $testHTTP ? "run" : "do not run";
my $sampleDefault = $runSample ? "run" : "do not run";
-# FIXME: "--strict" should be renamed to qt-mac-comparison, or something along those lines.
my $usage = <<EOF;
Usage: $programName [options] [testdir|testpath ...]
--add-platform-exceptions Put new results for non-platform-specific failing tests into the platform-specific results directory
--complex-text Use the complex text code path for all text (Mac OS X and Windows only)
-c|--configuration config Set DumpRenderTree build configuration
-g|--guard-malloc Enable malloc guard
- --help Show this help message
+ --exit-after-n-failures N Exit after the first N failures instead of running all tests
+ -h|--help Show this help message
--[no-]http Run (or do not run) http tests (default: $httpDefault)
-i|--ignore-tests Comma-separated list of directories or tests to ignore
+ --iterations n Number of times to run the set of tests (e.g. ABCABCABC)
--[no-]launch-safari Launch (or do not launch) Safari to display test results (default: $launchSafariDefault)
-l|--leaks Enable leaks checking
--[no-]new-test-results Generate results for new tests
+ --nthly n Restart DumpRenderTree every n tests (default: $testsPerDumpTool)
-p|--pixel-tests Enable pixel tests
--tolerance t Ignore image differences less than this percentage (default: $tolerance)
--platform Override the detected platform to use for tests and results (default: $platform)
@@ -207,16 +224,17 @@ Usage: $programName [options] [testdir|testpath ...]
--reset-results Reset ALL results (including pixel tests if --pixel-tests is set)
-o|--results-directory Output results directory (default: $testResultsDirectory)
--random Run the tests in a random order
+ --repeat-each n Number of times to run each test (e.g. AAABBBCCC)
--reverse Run the tests in reverse alphabetical order
--root Path to root tools build
--[no-]sample-on-timeout Run sample on timeout (default: $sampleDefault) (Mac OS X only)
- -1|--singly Isolate each test case run (implies --verbose)
+ -1|--singly Isolate each test case run (implies --nthly 1 --verbose)
--skipped=[default|ignore|only] Specifies how to treat the Skipped file
default: Tests/directories listed in the Skipped file are not tested
ignore: The Skipped file is ignored
only: Only those tests/directories listed in the Skipped file will be run
--slowest Report the 10 slowest tests
- --strict Do a comparison with the output on Mac (Qt only)
+ --ignore-metrics Ignore metrics in tests
--[no-]strip-editing-callbacks Remove editing callbacks from expected results
-t|--threaded Run a concurrent JavaScript thead with each test
--timeout t Sets the number of seconds before a test times out (default: $timeoutSeconds)
@@ -229,38 +247,41 @@ EOF
setConfiguration();
my $getOptionsResult = GetOptions(
+ 'add-platform-exceptions' => \$addPlatformExceptions,
'complex-text' => \$complexText,
+ 'exit-after-n-failures=i' => \$exitAfterNFailures,
'guard-malloc|g' => \$guardMalloc,
- 'help' => \$showHelp,
+ 'help|h' => \$showHelp,
'http!' => \$testHTTP,
+ 'ignore-metrics!' => \$ignoreMetrics,
'ignore-tests|i=s' => \$ignoreTests,
+ 'iterations=i' => \$iterations,
'launch-safari!' => \$launchSafari,
'leaks|l' => \$shouldCheckLeaks,
+ 'merge-leak-depth|m:5' => \$mergeDepth,
+ 'new-test-results!' => \$generateNewResults,
+ 'nthly=i' => \$testsPerDumpTool,
'pixel-tests|p' => \$pixelTests,
'platform=s' => \$platform,
'port=i' => \$httpdPort,
'quiet|q' => \$quiet,
+ 'random' => \$randomizeTests,
+ 'repeat-each=i' => \$repeatEach,
'reset-results' => \$resetResults,
- 'new-test-results!' => \$generateNewResults,
'results-directory|o=s' => \$testResultsDirectory,
+ 'reverse' => \$reverseTests,
+ 'root=s' => \$root,
+ 'sample-on-timeout!' => \$runSample,
'singly|1' => sub { $testsPerDumpTool = 1; },
- 'nthly=i' => \$testsPerDumpTool,
'skipped=s' => \&validateSkippedArg,
'slowest' => \$report10Slowest,
- 'threaded|t' => \$threaded,
- 'tolerance=f' => \$tolerance,
- 'verbose|v' => \$verbose,
- 'valgrind' => \$useValgrind,
- 'sample-on-timeout!' => \$runSample,
- 'strict' => \$strictTesting,
'strip-editing-callbacks!' => \$stripEditingCallbacks,
- 'random' => \$randomizeTests,
- 'reverse' => \$reverseTests,
- 'root=s' => \$root,
- 'add-platform-exceptions' => \$addPlatformExceptions,
- 'merge-leak-depth|m:5' => \$mergeDepth,
+ 'threaded|t' => \$threaded,
'timeout=i' => \$timeoutSeconds,
+ 'tolerance=f' => \$tolerance,
'use-remote-links-to-tests' => \$useRemoteLinksToTests,
+ 'valgrind' => \$useValgrind,
+ 'verbose|v' => \$verbose,
);
if (!$getOptionsResult || $showHelp) {
@@ -275,8 +296,6 @@ my $skippedOnly = $treatSkipped eq "only";
my $configuration = configuration();
-$testsPerDumpTool = 1000 if !$testsPerDumpTool;
-
$verbose = 1 if $testsPerDumpTool == 1;
if ($shouldCheckLeaks && $testsPerDumpTool > 1000) {
@@ -286,8 +305,8 @@ if ($shouldCheckLeaks && $testsPerDumpTool > 1000) {
# Stack logging does not play well with QuickTime on Tiger (rdar://problem/5537157)
$testMedia = 0 if $shouldCheckLeaks && isTiger();
-# Generating remote links causes a lot of unnecessary spew on GTK and Qt build bot
-$useRemoteLinksToTests = 0 if (isGtk() || isQt());
+# Generating remote links causes a lot of unnecessary spew on GTK build bot
+$useRemoteLinksToTests = 0 if isGtk();
setConfigurationProductDir(Cwd::abs_path($root)) if (defined($root));
my $productDir = productDir();
@@ -297,10 +316,30 @@ $productDir .= "/Programs" if isGtk();
chdirWebKit();
if (!defined($root)) {
- # Push the parameters to build-dumprendertree as an array
+ print STDERR "Running build-dumprendertree\n";
+
+ local *DEVNULL;
+ my ($childIn, $childOut, $childErr);
+ if ($quiet) {
+ open(DEVNULL, ">", File::Spec->devnull) or die "Failed to open /dev/null";
+ $childOut = ">&DEVNULL";
+ $childErr = ">&DEVNULL";
+ } else {
+ # When not quiet, let the child use our stdout/stderr.
+ $childOut = ">&STDOUT";
+ $childErr = ">&STDERR";
+ }
+
my @args = argumentsForConfiguration();
+ my $buildProcess = open3($childIn, $childOut, $childErr, "WebKitTools/Scripts/build-dumprendertree", @args) or die "Failed to run build-dumprendertree";
+ close($childIn);
+ waitpid $buildProcess, 0;
+ my $buildResult = $?;
+ close($childOut);
+ close($childErr);
+
+ close DEVNULL if ($quiet);
- my $buildResult = system "WebKitTools/Scripts/build-dumprendertree", @args;
if ($buildResult) {
print STDERR "Compiling DumpRenderTree failed!\n";
exit exitStatus($buildResult);
@@ -320,7 +359,7 @@ checkFrameworks() unless isCygwin();
if (isAppleMacWebKit()) {
push @INC, $productDir;
- eval 'use DumpRenderTreeSupport;';
+ require DumpRenderTreeSupport;
}
my $layoutTestsName = "LayoutTests";
@@ -351,16 +390,17 @@ if ($pixelTests) {
}
}
-my @tests = ();
-my %testType = ();
-
system "ln", "-s", $testDirectory, "/tmp/LayoutTests" unless -x "/tmp/LayoutTests";
-my %ignoredFiles = ();
+my %ignoredFiles = ( "results.html" => 1 );
my %ignoredDirectories = map { $_ => 1 } qw(platform);
-my %ignoredLocalDirectories = map { $_ => 1 } qw(.svn _svn resources);
+my %ignoredLocalDirectories = map { $_ => 1 } qw(.svn _svn resources script-tests);
my %supportedFileExtensions = map { $_ => 1 } qw(html shtml xml xhtml pl php);
+if (!checkWebCoreMathMLSupport(0)) {
+ $ignoredDirectories{'mathml'} = 1;
+}
+
# FIXME: We should fix webkitdirs.pm:hasSVG/WMLSupport() to do the correct feature detection for Cygwin.
if (checkWebCoreSVGSupport(0)) {
$supportedFileExtensions{'svg'} = 1;
@@ -388,6 +428,10 @@ if (!checkWebCore3DRenderingSupport(0)) {
$ignoredDirectories{'transforms/3d'} = 1;
}
+if (!checkWebCore3DCanvasSupport(0)) {
+ $ignoredDirectories{'fast/canvas/webgl'} = 1;
+}
+
if (checkWebCoreWMLSupport(0)) {
$supportedFileExtensions{'wml'} = 1;
} else {
@@ -404,105 +448,13 @@ if (!checkWebCoreWCSSSupport(0)) {
$ignoredDirectories{'fast/wcss'} = 1;
}
-if ($ignoreTests) {
- processIgnoreTests($ignoreTests, "ignore-tests");
-}
-
-sub fileShouldBeIgnored {
- my($filePath) = @_;
- foreach my $ignoredDir (keys %ignoredDirectories) {
- if ($filePath =~ m/^$ignoredDir/) {
- return 1;
- }
- }
- return 0;
-}
-
-if (!$ignoreSkipped) {
- foreach my $level (@platformTestHierarchy) {
- if (open SKIPPED, "<", "$level/Skipped") {
- if ($verbose) {
- my ($dir, $name) = splitpath($level);
- print "Skipped tests in $name:\n";
- }
-
- while (<SKIPPED>) {
- my $skipped = $_;
- chomp $skipped;
- $skipped =~ s/^[ \n\r]+//;
- $skipped =~ s/[ \n\r]+$//;
- if ($skipped && $skipped !~ /^#/) {
- if ($skippedOnly) {
- if (!&fileShouldBeIgnored($skipped)) {
- push(@ARGV, $skipped);
- } elsif ($verbose) {
- print " $skipped\n";
- }
- } else {
- if ($verbose) {
- print " $skipped\n";
- }
- processIgnoreTests($skipped, "Skipped");
- }
- }
- }
- close SKIPPED;
- }
- }
-}
-
+processIgnoreTests($ignoreTests, "ignore-tests") if $ignoreTests;
+readSkippedFiles() unless $ignoreSkipped;
-my $directoryFilter = sub {
- return () if exists $ignoredLocalDirectories{basename($File::Find::dir)};
- return () if exists $ignoredDirectories{File::Spec->abs2rel($File::Find::dir, $testDirectory)};
- return @_;
-};
-
-my $fileFilter = sub {
- my $filename = $_;
- if ($filename =~ /\.([^.]+)$/) {
- if (exists $supportedFileExtensions{$1}) {
- my $path = File::Spec->abs2rel(catfile($File::Find::dir, $filename), $testDirectory);
- push @tests, $path if !exists $ignoredFiles{$path};
- }
- }
-};
-
-for my $test (@ARGV) {
- $test =~ s/^($layoutTestsName|$testDirectory)\///;
- my $fullPath = catfile($testDirectory, $test);
- if (file_name_is_absolute($test)) {
- print "can't run test $test outside $testDirectory\n";
- } elsif (-f $fullPath) {
- my ($filename, $pathname, $fileExtension) = fileparse($test, qr{\.[^.]+$});
- if (!exists $supportedFileExtensions{substr($fileExtension, 1)}) {
- print "test $test does not have a supported extension\n";
- } elsif ($testHTTP || $pathname !~ /^http\//) {
- push @tests, $test;
- }
- } elsif (-d $fullPath) {
- find({ preprocess => $directoryFilter, wanted => $fileFilter }, $fullPath);
-
- for my $level (@platformTestHierarchy) {
- my $platformPath = catfile($level, $test);
- find({ preprocess => $directoryFilter, wanted => $fileFilter }, $platformPath) if (-d $platformPath);
- }
- } else {
- print "test $test not found\n";
- }
-}
-if (!scalar @ARGV) {
- find({ preprocess => $directoryFilter, wanted => $fileFilter }, $testDirectory);
-
- for my $level (@platformTestHierarchy) {
- find({ preprocess => $directoryFilter, wanted => $fileFilter }, $level);
- }
-}
+my @tests = findTestsToRun();
die "no tests to run\n" if !@tests;
-@tests = sort pathcmp @tests;
-
my %counts;
my %tests;
my %imagesPresent;
@@ -537,20 +489,31 @@ my $isHttpdOpen = 0;
sub catch_pipe { $dumpToolCrashed = 1; }
$SIG{"PIPE"} = "catch_pipe";
-print "Testing ", scalar @tests, " test cases.\n";
+print "Testing ", scalar @tests, " test cases";
+print " $iterations times" if ($iterations > 1);
+print ", repeating each test $repeatEach times" if ($repeatEach > 1);
+print ".\n";
+
my $overallStartTime = time;
my %expectedResultPaths;
-# Reverse the tests
-@tests = reverse @tests if $reverseTests;
-
-# Shuffle the array
-@tests = shuffle(@tests) if $randomizeTests;
+my @originalTests = @tests;
+# Add individual test repetitions
+if ($repeatEach > 1) {
+ @tests = ();
+ foreach my $test (@originalTests) {
+ for (my $i = 0; $i < $repeatEach; $i++) {
+ push(@tests, $test);
+ }
+ }
+}
+# Add test set repetitions
+for (my $i = 1; $i < $iterations; $i++) {
+ push(@tests, @originalTests);
+}
for my $test (@tests) {
- next if $test eq 'results.html';
-
my $newDumpTool = not $isDumpToolOpen;
openDumpTool();
@@ -648,14 +611,12 @@ for my $test (@tests) {
# The first block is the output of the test (in text, RenderTree or other formats).
# The second block is for optional pixel data in PNG format, and may be empty if
# pixel tests are not being run, or the test does not dump pixels (e.g. text tests).
-
- my $actualRead = readFromDumpToolWithTimer(IN);
- my $errorRead = readFromDumpToolWithTimer(ERROR, $actualRead->{status} eq "timedOut");
+ my $readResults = readFromDumpToolWithTimer(IN, ERROR);
- my $actual = $actualRead->{output};
- my $error = $errorRead->{output};
+ my $actual = $readResults->{output};
+ my $error = $readResults->{error};
- $expectedExtension = $actualRead->{extension};
+ $expectedExtension = $readResults->{extension};
my $expectedFileName = "$base-$expectedTag.$expectedExtension";
my $isText = isTextOnlyTest($actual);
@@ -663,12 +624,12 @@ for my $test (@tests) {
my $expectedDir = expectedDirectoryForTest($base, $isText, $expectedExtension);
$expectedResultPaths{$base} = "$expectedDir/$expectedFileName";
- unless ($actualRead->{status} eq "success" && $errorRead->{status} eq "success") {
- my $crashed = $actualRead->{status} eq "crashed" || $errorRead->{status} eq "crashed";
+ unless ($readResults->{status} eq "success") {
+ my $crashed = $readResults->{status} eq "crashed";
testCrashedOrTimedOut($test, $base, $crashed, $actual, $error);
countFinishedTest($test, $base, $crashed ? "crash" : "timedout", 0);
next;
- }
+ }
$durations{$test} = time - $startTime if $report10Slowest;
@@ -682,15 +643,9 @@ for my $test (@tests) {
}
close EXPECTED;
}
- my $expectedMac;
- if (!isAppleMacWebKit() && $strictTesting && !$isText) {
- if (!$resetResults && open EXPECTED, "<", "$testDirectory/platform/mac/$expectedFileName") {
- $expectedMac = "";
- while (<EXPECTED>) {
- $expectedMac .= $_;
- }
- close EXPECTED;
- }
+
+ if ($ignoreMetrics && !$isText && defined $expected) {
+ ($actual, $expected) = stripMetrics($actual, $expected);
}
if ($shouldCheckLeaks && $testsPerDumpTool == 1) {
@@ -776,57 +731,6 @@ for my $test (@tests) {
}
}
- if (!isAppleMacWebKit() && $strictTesting && !$isText) {
- if (defined $expectedMac) {
- my $simplified_actual;
- $simplified_actual = $actual;
- $simplified_actual =~ s/at \(-?[0-9]+,-?[0-9]+\) *//g;
- $simplified_actual =~ s/size -?[0-9]+x-?[0-9]+ *//g;
- $simplified_actual =~ s/text run width -?[0-9]+: //g;
- $simplified_actual =~ s/text run width -?[0-9]+ [a-zA-Z ]+: //g;
- $simplified_actual =~ s/RenderButton {BUTTON} .*/RenderButton {BUTTON}/g;
- $simplified_actual =~ s/RenderImage {INPUT} .*/RenderImage {INPUT}/g;
- $simplified_actual =~ s/RenderBlock {INPUT} .*/RenderBlock {INPUT}/g;
- $simplified_actual =~ s/RenderTextControl {INPUT} .*/RenderTextControl {INPUT}/g;
- $simplified_actual =~ s/\([0-9]+px/px/g;
- $simplified_actual =~ s/ *" *\n +" */ /g;
- $simplified_actual =~ s/" +$/"/g;
-
- $simplified_actual =~ s/- /-/g;
- $simplified_actual =~ s/\n( *)"\s+/\n$1"/g;
- $simplified_actual =~ s/\s+"\n/"\n/g;
-
- $expectedMac =~ s/at \(-?[0-9]+,-?[0-9]+\) *//g;
- $expectedMac =~ s/size -?[0-9]+x-?[0-9]+ *//g;
- $expectedMac =~ s/text run width -?[0-9]+: //g;
- $expectedMac =~ s/text run width -?[0-9]+ [a-zA-Z ]+: //g;
- $expectedMac =~ s/RenderButton {BUTTON} .*/RenderButton {BUTTON}/g;
- $expectedMac =~ s/RenderImage {INPUT} .*/RenderImage {INPUT}/g;
- $expectedMac =~ s/RenderBlock {INPUT} .*/RenderBlock {INPUT}/g;
- $expectedMac =~ s/RenderTextControl {INPUT} .*/RenderTextControl {INPUT}/g;
- $expectedMac =~ s/\([0-9]+px/px/g;
- $expectedMac =~ s/ *" *\n +" */ /g;
- $expectedMac =~ s/" +$/"/g;
-
- $expectedMac =~ s/- /-/g;
- $expectedMac =~ s/\n( *)"\s+/\n$1"/g;
- $expectedMac =~ s/\s+"\n/"\n/g;
-
- if ($simplified_actual ne $expectedMac) {
- writeToFile("/tmp/actual.txt", $simplified_actual);
- writeToFile("/tmp/expected.txt", $expectedMac);
- system "diff -u \"/tmp/expected.txt\" \"/tmp/actual.txt\" > \"/tmp/simplified.diff\"";
-
- $diffResult = "failed";
- if ($verbose) {
- print "\n";
- system "cat /tmp/simplified.diff";
- print "failed!!!!!";
- }
- }
- }
- }
-
if (dumpToolDidCrash()) {
$result = "crash";
testCrashedOrTimedOut($test, $base, 1, $actual, $error);
@@ -971,6 +875,17 @@ for my $test (@tests) {
}
countFinishedTest($test, $base, $result, $isText);
+
+ # --reset-results does not check pass vs. fail, so exitAfterNFailures makes no sense with --reset-results.
+ if ($exitAfterNFailures && !$resetResults) {
+ my $passCount = $counts{match} || 0; # $counts{match} will be undefined if we've not yet passed a test (e.g. the first test fails).
+ my $failureCount = $count - $passCount; # "Failure" here includes new tests, timeouts, crashes, etc.
+ if ($failureCount >= $exitAfterNFailures) {
+ print "\nExiting early after $failureCount failures. $count tests run.";
+ closeDumpTool();
+ last;
+ }
+ }
}
printf "\n%0.2fs total testing time\n", (time - $overallStartTime) . "";
@@ -990,8 +905,7 @@ if ($isDiffToolOpen && $shouldCheckLeaks) {
if ($totalLeaks) {
if ($mergeDepth) {
parseLeaksandPrintUniqueLeaks();
- }
- else {
+ } else {
print "\nWARNING: $totalLeaks total leaks found!\n";
print "See above for individual leaks results.\n" if ($leaksOutputFileNumber > 2);
}
@@ -1025,31 +939,7 @@ if ($resetResults || ($counts{match} && $counts{match} == $count)) {
exit;
}
-
-my %text = (
- match => "succeeded",
- mismatch => "had incorrect layout",
- new => "were new",
- timedout => "timed out",
- crash => "crashed",
- error => "had stderr output"
-);
-
-for my $type ("match", "mismatch", "new", "timedout", "crash", "error") {
- my $c = $counts{$type};
- if ($c) {
- my $t = $text{$type};
- my $message;
- if ($c == 1) {
- $t =~ s/were/was/;
- $message = sprintf "1 test case (%d%%) %s\n", 1 * 100 / $count, $t;
- } else {
- $message = sprintf "%d test cases (%d%%) %s\n", $c, $c * 100 / $count, $t;
- }
- $message =~ s-\(0%\)-(<1%)-;
- print $message;
- }
-}
+printResults();
mkpath $testResultsDirectory;
@@ -1060,6 +950,10 @@ print HTML "<title>Layout Test Results</title>\n";
print HTML "</head>\n";
print HTML "<body>\n";
+if ($ignoreMetrics) {
+ print HTML "<h4>Tested with metrics ignored.</h4>";
+}
+
print HTML htmlForResultsSection(@{$tests{mismatch}}, "Tests where results did not match expected results", \&linksForMismatchTest);
print HTML htmlForResultsSection(@{$tests{timedout}}, "Tests that timed out", \&linksForErrorTest);
print HTML htmlForResultsSection(@{$tests{crash}}, "Tests that caused the DumpRenderTree tool to crash", \&linksForErrorTest);
@@ -1143,6 +1037,13 @@ sub countAndPrintLeaks($$$)
);
}
+ if (isSnowLeopard()) {
+ push @callStacksToExclude, (
+ "readMakerNoteProps", # <rdar://problem/7156432> leak in ImageIO
+ "QTKitMovieControllerView completeUISetup", # <rdar://problem/7155156> leak in QTKit
+ );
+ }
+
my $leaksTool = sourceDir() . "/WebKitTools/Scripts/run-leaks";
my $excludeString = "--exclude-callstack '" . (join "' --exclude-callstack '", @callStacksToExclude) . "'";
$excludeString .= " --exclude-type '" . (join "' --exclude-type '", @typesToExclude) . "'" if @typesToExclude;
@@ -1172,7 +1073,7 @@ sub countAndPrintLeaks($$$)
writeToFile($leaksFilePath, $leaksOutput);
- push( @leaksFilenames, $leaksFilePath );
+ push @leaksFilenames, $leaksFilePath;
}
return $adjustedCount;
@@ -1414,6 +1315,9 @@ sub openHTTPDIfNeeded()
} elsif (isDebianBased()) {
$httpdPath = "/usr/sbin/apache2";
$httpdConfig = "$testDirectory/http/conf/apache2-debian-httpd.conf";
+ } elsif (isFedoraBased()) {
+ $httpdPath = "/usr/sbin/httpd";
+ $httpdConfig = "$testDirectory/http/conf/fedora-httpd.conf";
} else {
$httpdConfig = "$testDirectory/http/conf/httpd.conf";
$httpdConfig = "$testDirectory/http/conf/apache2-httpd.conf" if `$httpdPath -v` =~ m|Apache/2|;
@@ -1476,7 +1380,8 @@ sub fileNameWithNumber($$)
return $base;
}
-sub processIgnoreTests($$) {
+sub processIgnoreTests($$)
+{
my @ignoreList = split(/\s*,\s*/, shift);
my $listName = shift;
@@ -1541,7 +1446,8 @@ sub expectedDirectoryForTest($;$;$)
return $isText ? $expectedDirectory : $platformResultHierarchy[$#platformResultHierarchy];
}
-sub countFinishedTest($$$$) {
+sub countFinishedTest($$$$)
+{
my ($test, $base, $result, $isText) = @_;
if (($count + 1) % $testsPerDumpTool == 0 || $count == $#tests) {
@@ -1563,7 +1469,6 @@ sub countFinishedTest($$$$) {
$count++;
$counts{$result}++;
push @{$tests{$result}}, $test;
- $testType{$test} = $isText;
}
sub testCrashedOrTimedOut($$$$$)
@@ -1833,6 +1738,9 @@ sub buildPlatformResultHierarchy()
for (; $i < @macPlatforms; $i++) {
push @platforms, $macPlatforms[$i];
}
+ } elsif ($platform =~ /^qt-/) {
+ push @platforms, $platform;
+ push @platforms, "qt";
} else {
@platforms = $platform;
}
@@ -1854,7 +1762,8 @@ sub buildPlatformTestHierarchy(@)
return ($platformHierarchy[0], $platformHierarchy[$#platformHierarchy]);
}
-sub epiloguesAndPrologues($$) {
+sub epiloguesAndPrologues($$)
+{
my ($lastDirectory, $directory) = @_;
my @lastComponents = split('/', $lastDirectory);
my @components = split('/', $directory);
@@ -1890,7 +1799,8 @@ sub epiloguesAndPrologues($$) {
return @result;
}
-sub parseLeaksandPrintUniqueLeaks() {
+sub parseLeaksandPrintUniqueLeaks()
+{
return unless @leaksFilenames;
my $mergedFilenames = join " ", @leaksFilenames;
@@ -1926,11 +1836,12 @@ sub extensionForMimeType($)
}
# Read up to the first #EOF (the content block of the test), or until detecting crashes or timeouts.
-sub readFromDumpToolWithTimer(*;$)
+sub readFromDumpToolWithTimer(**)
{
- my ($fh, $dontWaitForTimeOut) = @_;
+ my ($fhIn, $fhError) = @_;
- setFileHandleNonBlocking($fh, 1);
+ setFileHandleNonBlocking($fhIn, 1);
+ setFileHandleNonBlocking($fhError, 1);
my $maximumSecondsWithoutOutput = $timeoutSeconds;
$maximumSecondsWithoutOutput *= 10 if $guardMalloc;
@@ -1939,11 +1850,14 @@ sub readFromDumpToolWithTimer(*;$)
my $timeOfLastSuccessfulRead = time;
my @output = ();
+ my @error = ();
my $status = "success";
my $mimeType = "text/plain";
# We don't have a very good way to know when the "headers" stop
# and the content starts, so we use this as a hack:
my $haveSeenContentType = 0;
+ my $haveSeenEofIn = 0;
+ my $haveSeenEofError = 0;
while (1) {
if (time - $timeOfLastSuccessfulRead > $maximumSecondsWithoutOutput) {
@@ -1951,37 +1865,48 @@ sub readFromDumpToolWithTimer(*;$)
last;
}
- my $line = readline($fh);
- if (!defined($line)) {
+ # Once we've seen the EOF, we must not read anymore.
+ my $lineIn = readline($fhIn) unless $haveSeenEofIn;
+ my $lineError = readline($fhError) unless $haveSeenEofError;
+ if (!defined($lineIn) && !defined($lineError)) {
+ last if ($haveSeenEofIn && $haveSeenEofError);
+
if ($! != EAGAIN) {
$status = "crashed";
last;
}
- if ($dontWaitForTimeOut) {
- last;
- }
-
# No data ready
usleep($microsecondsToWaitBeforeReadingAgain);
next;
}
$timeOfLastSuccessfulRead = time;
-
- if (!$haveSeenContentType && $line =~ /^Content-Type: (\S+)$/) {
- $mimeType = $1;
- $haveSeenContentType = 1;
- next;
+
+ if (defined($lineIn)) {
+ if (!$haveSeenContentType && $lineIn =~ /^Content-Type: (\S+)$/) {
+ $mimeType = $1;
+ $haveSeenContentType = 1;
+ } elsif ($lineIn =~ /#EOF/) {
+ $haveSeenEofIn = 1;
+ } else {
+ push @output, $lineIn;
+ }
+ }
+ if (defined($lineError)) {
+ if ($lineError =~ /#EOF/) {
+ $haveSeenEofError = 1;
+ } else {
+ push @error, $lineError;
+ }
}
- last if ($line =~ /#EOF/);
-
- push @output, $line;
}
- setFileHandleNonBlocking($fh, 0);
+ setFileHandleNonBlocking($fhIn, 0);
+ setFileHandleNonBlocking($fhError, 0);
return {
output => join("", @output),
+ error => join("", @error),
status => $status,
mimeType => $mimeType,
extension => extensionForMimeType($mimeType)
@@ -2016,3 +1941,167 @@ sub sampleDumpTool()
my $outputFile = "$outputDirectory/HangReport.txt";
system "/usr/bin/sample", $dumpToolPID, qw(10 10 -file), $outputFile;
}
+
+sub stripMetrics($$)
+{
+ my ($actual, $expected) = @_;
+
+ foreach my $result ($actual, $expected) {
+ $result =~ s/at \(-?[0-9]+,-?[0-9]+\) *//g;
+ $result =~ s/size -?[0-9]+x-?[0-9]+ *//g;
+ $result =~ s/text run width -?[0-9]+: //g;
+ $result =~ s/text run width -?[0-9]+ [a-zA-Z ]+: //g;
+ $result =~ s/RenderButton {BUTTON} .*/RenderButton {BUTTON}/g;
+ $result =~ s/RenderImage {INPUT} .*/RenderImage {INPUT}/g;
+ $result =~ s/RenderBlock {INPUT} .*/RenderBlock {INPUT}/g;
+ $result =~ s/RenderTextControl {INPUT} .*/RenderTextControl {INPUT}/g;
+ $result =~ s/\([0-9]+px/px/g;
+ $result =~ s/ *" *\n +" */ /g;
+ $result =~ s/" +$/"/g;
+
+ $result =~ s/- /-/g;
+ $result =~ s/\n( *)"\s+/\n$1"/g;
+ $result =~ s/\s+"\n/"\n/g;
+ $result =~ s/scrollWidth [0-9]+/scrollWidth/g;
+ $result =~ s/scrollHeight [0-9]+/scrollHeight/g;
+ }
+
+ return ($actual, $expected);
+}
+
+sub fileShouldBeIgnored
+{
+ my ($filePath) = @_;
+ foreach my $ignoredDir (keys %ignoredDirectories) {
+ if ($filePath =~ m/^$ignoredDir/) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+sub readSkippedFiles
+{
+ foreach my $level (@platformTestHierarchy) {
+ if (open SKIPPED, "<", "$level/Skipped") {
+ if ($verbose) {
+ my ($dir, $name) = splitpath($level);
+ print "Skipped tests in $name:\n";
+ }
+
+ while (<SKIPPED>) {
+ my $skipped = $_;
+ chomp $skipped;
+ $skipped =~ s/^[ \n\r]+//;
+ $skipped =~ s/[ \n\r]+$//;
+ if ($skipped && $skipped !~ /^#/) {
+ if ($skippedOnly) {
+ if (!&fileShouldBeIgnored($skipped)) {
+ push(@ARGV, $skipped);
+ } elsif ($verbose) {
+ print " $skipped\n";
+ }
+ } else {
+ if ($verbose) {
+ print " $skipped\n";
+ }
+ processIgnoreTests($skipped, "Skipped");
+ }
+ }
+ }
+ close SKIPPED;
+ }
+ }
+}
+
+my @testsToRun;
+
+sub directoryFilter
+{
+ return () if exists $ignoredLocalDirectories{basename($File::Find::dir)};
+ return () if exists $ignoredDirectories{File::Spec->abs2rel($File::Find::dir, $testDirectory)};
+ return @_;
+}
+
+sub fileFilter
+{
+ my $filename = $_;
+ if ($filename =~ /\.([^.]+)$/) {
+ if (exists $supportedFileExtensions{$1}) {
+ my $path = File::Spec->abs2rel(catfile($File::Find::dir, $filename), $testDirectory);
+ push @testsToRun, $path if !exists $ignoredFiles{$path};
+ }
+ }
+}
+
+sub findTestsToRun
+{
+ @testsToRun = ();
+
+ for my $test (@ARGV) {
+ $test =~ s/^($layoutTestsName|$testDirectory)\///;
+ my $fullPath = catfile($testDirectory, $test);
+ if (file_name_is_absolute($test)) {
+ print "can't run test $test outside $testDirectory\n";
+ } elsif (-f $fullPath) {
+ my ($filename, $pathname, $fileExtension) = fileparse($test, qr{\.[^.]+$});
+ if (!exists $supportedFileExtensions{substr($fileExtension, 1)}) {
+ print "test $test does not have a supported extension\n";
+ } elsif ($testHTTP || $pathname !~ /^http\//) {
+ push @testsToRun, $test;
+ }
+ } elsif (-d $fullPath) {
+ find({ preprocess => \&directoryFilter, wanted => \&fileFilter }, $fullPath);
+ for my $level (@platformTestHierarchy) {
+ my $platformPath = catfile($level, $test);
+ find({ preprocess => \&directoryFilter, wanted => \&fileFilter }, $platformPath) if (-d $platformPath);
+ }
+ } else {
+ print "test $test not found\n";
+ }
+ }
+
+ if (!scalar @ARGV) {
+ find({ preprocess => \&directoryFilter, wanted => \&fileFilter }, $testDirectory);
+ for my $level (@platformTestHierarchy) {
+ find({ preprocess => \&directoryFilter, wanted => \&fileFilter }, $level);
+ }
+ }
+
+ @testsToRun = sort pathcmp @testsToRun;
+
+ # Reverse the tests
+ @testsToRun = reverse @testsToRun if $reverseTests;
+
+ # Shuffle the array
+ @testsToRun = shuffle(@testsToRun) if $randomizeTests;
+
+ return @testsToRun;
+}
+
+sub printResults
+{
+ my %text = (
+ match => "succeeded",
+ mismatch => "had incorrect layout",
+ new => "were new",
+ timedout => "timed out",
+ crash => "crashed",
+ error => "had stderr output"
+ );
+
+ for my $type ("match", "mismatch", "new", "timedout", "crash", "error") {
+ my $typeCount = $counts{$type};
+ next unless $typeCount;
+ my $typeText = $text{$type};
+ my $message;
+ if ($typeCount == 1) {
+ $typeText =~ s/were/was/;
+ $message = sprintf "1 test case (%d%%) %s\n", 1 * 100 / $count, $typeText;
+ } else {
+ $message = sprintf "%d test cases (%d%%) %s\n", $typeCount, $typeCount * 100 / $count, $typeText;
+ }
+ $message =~ s-\(0%\)-(<1%)-;
+ print $message;
+ }
+}
diff --git a/WebKitTools/Scripts/run-webkit-unittests b/WebKitTools/Scripts/run-webkit-unittests
index 83aaea9..8d0ef1d 100755
--- a/WebKitTools/Scripts/run-webkit-unittests
+++ b/WebKitTools/Scripts/run-webkit-unittests
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -30,9 +30,12 @@
import unittest
from modules.bugzilla_unittest import *
-from modules.commiters_unittest import *
+from modules.buildbot_unittest import *
+from modules.changelogs_unittest import *
+from modules.committers_unittest import *
from modules.cpp_style_unittest import *
from modules.diff_parser_unittest import *
+from modules.logging_unittest import *
from modules.scm_unittest import *
if __name__ == "__main__":
diff --git a/WebKitTools/Scripts/sunspider-compare-results b/WebKitTools/Scripts/sunspider-compare-results
index d219896..ce87a23 100755
--- a/WebKitTools/Scripts/sunspider-compare-results
+++ b/WebKitTools/Scripts/sunspider-compare-results
@@ -67,7 +67,7 @@ sub buildJSC
{
if (!defined($root)){
chdirWebKit();
- my $buildResult = system "WebKitTools/Scripts/build-jsc", "--" . $configuration;
+ my $buildResult = system currentPerlPath(), "WebKitTools/Scripts/build-jsc", "--" . $configuration;
if ($buildResult) {
print STDERR "Compiling jsc failed!\n";
exit WEXITSTATUS($buildResult);
@@ -124,4 +124,4 @@ my @args = ("--shell", $jscPath);
push @args, "--ubench" if $ubench;
push @args, "--v8" if $v8;
-exec "./sunspider-compare-results", @args, @ARGV;
+exec currentPerlPath(), "./sunspider-compare-results", @args, @ARGV;
diff --git a/WebKitTools/Scripts/svn-apply b/WebKitTools/Scripts/svn-apply
index 4b88a37..19c8c56 100755
--- a/WebKitTools/Scripts/svn-apply
+++ b/WebKitTools/Scripts/svn-apply
@@ -1,6 +1,7 @@
#!/usr/bin/perl -w
# Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved.
+# Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -80,6 +81,7 @@ sub isDirectoryEmptyForRemoval($);
sub patch($);
sub removeDirectoriesIfNeeded();
sub setChangeLogDateAndReviewer($$);
+sub removeEOL($);
sub svnStatus($);
# These should be replaced by an scm class/module:
@@ -109,10 +111,6 @@ if (!$optionParseSuccess || $showHelp) {
exit 1;
}
-my $isGit = isGitDirectory(".");
-my $isSVN = isSVNDirectory(".");
-$isSVN || $isGit || die "Couldn't determine your version control system.";
-
my %removeDirectoryIgnoreList = (
'.' => 1,
'..' => 1,
@@ -121,6 +119,11 @@ my %removeDirectoryIgnoreList = (
'_svn' => 1,
);
+my $globalExitCode = 0;
+
+my $pathScriptWasRunFrom = Cwd::getcwd();
+my $pathForRepositoryRoot = determineVCSRoot();
+
my %checkedDirectories;
my %copiedFiles;
my @patches;
@@ -172,7 +175,7 @@ if ($patch && !$copiedFromPath) {
}
if ($merge) {
- die "--merge is currently only supported for SVN" unless $isSVN;
+ die "--merge is currently only supported for SVN" unless isSVN();
# How do we handle Git patches applied to an SVN checkout here?
for my $file (sort keys %versions) {
print "Getting version $versions{$file} of $file\n";
@@ -192,7 +195,7 @@ for $patch (@patches) {
removeDirectoriesIfNeeded();
-exit 0;
+exit $globalExitCode;
sub addDirectoriesIfNeeded($)
{
@@ -226,16 +229,22 @@ sub addDirectoriesIfNeeded($)
sub applyPatch($$;$)
{
my ($patch, $fullPath, $options) = @_;
+ chdir $pathForRepositoryRoot;
$options = [] if (! $options);
+ push @{$options}, "--force" if $force;
my $command = "patch " . join(" ", "-p0", @{$options});
open PATCH, "| $command" or die "Failed to patch $fullPath\n";
print PATCH $patch;
close PATCH;
+ chdir $pathScriptWasRunFrom;
my $exitCode = $? >> 8;
- if ($exitCode != 0) {
- print "patch -p0 \"$fullPath\" returned $exitCode. Pass --force to ignore patch failures.\n";
- exit($exitCode);
+ if ($exitCode) {
+ if (!$force) {
+ print "$command \"$fullPath\" returned $exitCode. Pass --force to ignore patch failures.\n";
+ exit $exitCode;
+ }
+ $globalExitCode = $exitCode;
}
}
@@ -320,7 +329,10 @@ sub gitdiff2svndiff($)
sub handleBinaryChange($$)
{
my ($fullPath, $contents) = @_;
- if ($contents =~ m#((\n[A-Za-z0-9+/]{76})+\n[A-Za-z0-9+/=]{4,76}\n)#) {
+ # [A-Za-z0-9+/] is the class of allowed base64 characters.
+ # One or more lines, at most 76 characters in length.
+ # The last line is allowed to have up to two '=' characters at the end (to signify padding).
+ if ($contents =~ m#((\n[A-Za-z0-9+/]{76})*\n[A-Za-z0-9+/]{2,74}?[A-Za-z0-9+/=]{2}\n)#) {
# Addition or Modification
open FILE, ">", $fullPath or die;
print FILE decode_base64($1);
@@ -369,7 +381,7 @@ sub patch($)
my $addition = 0;
my $isBinary = 0;
- $addition = 1 if ($patch =~ /\n--- .+\(revision 0\)\r?\n/ || $patch =~ /\n@@ -0,0 .* @@/);
+ $addition = 1 if ($patch =~ /\n--- .+\(revision 0\)\r?\n/ || $patch =~ /\n@@ -0,0 .* @@/) && !exists($copiedFiles{$fullPath});
$deletion = 1 if $patch =~ /\n@@ .* \+0,0 @@/;
$isBinary = 1 if $patch =~ /\nCannot display: file marked as a binary type\./;
@@ -401,7 +413,7 @@ sub patch($)
unlink("$fullPath.orig") if -e "$fullPath.orig" && checksum($fullPath) eq checksum("$fullPath.orig");
scmAdd($fullPath);
# What is this for?
- system "svn", "stat", "$fullPath.orig" if $isSVN && -e "$fullPath.orig";
+ system "svn", "stat", "$fullPath.orig" if isSVN() && -e "$fullPath.orig";
}
}
}
@@ -435,6 +447,14 @@ sub setChangeLogDateAndReviewer($$)
return $patch;
}
+sub removeEOL($)
+{
+ my ($line) = @_;
+
+ $line =~ s/[\r\n]+$//g;
+ return $line;
+}
+
sub svnStatus($)
{
my ($fullPath) = @_;
@@ -448,10 +468,10 @@ sub svnStatus($)
my $normalizedFullPath = File::Spec->catdir(File::Spec->splitdir($fullPath));
while (<SVN>) {
# Input may use a different EOL sequence than $/, so avoid chomp.
- $_ =~ s/[\r\n]+$//g;
+ $_ = removeEOL($_);
my $normalizedStatPath = File::Spec->catdir(File::Spec->splitdir(substr($_, 7)));
if ($normalizedFullPath eq $normalizedStatPath) {
- $svnStatus = $_;
+ $svnStatus = "$_\n";
last;
}
}
@@ -461,7 +481,7 @@ sub svnStatus($)
}
else {
# Files will have only one status returned.
- $svnStatus = <SVN>;
+ $svnStatus = removeEOL(<SVN>) . "\n";
}
close SVN;
return $svnStatus;
@@ -472,10 +492,10 @@ sub svnStatus($)
sub scmWillDeleteFile($)
{
my ($path) = @_;
- if ($isSVN) {
+ if (isSVN()) {
my $svnOutput = svnStatus($path);
return 1 if $svnOutput && substr($svnOutput, 0, 1) eq "D";
- } elsif ($isGit) {
+ } elsif (isGit()) {
my $gitOutput = `git diff-index --name-status HEAD -- $path`;
return 1 if $gitOutput && substr($gitOutput, 0, 1) eq "D";
}
@@ -485,7 +505,7 @@ sub scmWillDeleteFile($)
sub scmKnowsOfFile($)
{
my ($path) = @_;
- if ($isSVN) {
+ if (isSVN()) {
my $svnOutput = svnStatus($path);
# This will match more than intended. ? might not be the first field in the status
if ($svnOutput && $svnOutput =~ m#\?\s+$path\n#) {
@@ -493,7 +513,7 @@ sub scmKnowsOfFile($)
}
# This does not handle errors well.
return 1;
- } elsif ($isGit) {
+ } elsif (isGit()) {
`git ls-files --error-unmatch -- $path`;
my $exitCode = $? >> 8;
return $exitCode == 0;
@@ -503,9 +523,9 @@ sub scmKnowsOfFile($)
sub scmCopy($$)
{
my ($source, $destination) = @_;
- if ($isSVN) {
+ if (isSVN()) {
system "svn", "copy", $source, $destination;
- } elsif ($isGit) {
+ } elsif (isGit()) {
system "cp", $source, $destination;
system "git", "add", $destination;
}
@@ -514,9 +534,9 @@ sub scmCopy($$)
sub scmAdd($)
{
my ($path) = @_;
- if ($isSVN) {
+ if (isSVN()) {
system "svn", "add", $path;
- } elsif ($isGit) {
+ } elsif (isGit()) {
system "git", "add", $path;
}
}
@@ -524,7 +544,7 @@ sub scmAdd($)
sub scmRemove($)
{
my ($path) = @_;
- if ($isSVN) {
+ if (isSVN()) {
# SVN is very verbose when removing directories. Squelch all output except the last line.
my $svnOutput;
open SVN, "svn rm --force '$path' |" or die "svn rm --force '$path' failed!";
@@ -534,7 +554,7 @@ sub scmRemove($)
}
close SVN;
print $svnOutput if $svnOutput;
- } elsif ($isGit) {
+ } elsif (isGit()) {
system "git", "rm", "--force", $path;
}
}
diff --git a/WebKitTools/Scripts/svn-create-patch b/WebKitTools/Scripts/svn-create-patch
index 75c82bc..3f40783 100755
--- a/WebKitTools/Scripts/svn-create-patch
+++ b/WebKitTools/Scripts/svn-create-patch
@@ -77,14 +77,15 @@ sub testfilecmp($$);
$ENV{'LC_ALL'} = 'C';
my $showHelp;
-my $svnVersion = `svn --version --quiet`;
+my $ignoreChangelogs = 0;
my $devNull = File::Spec->devnull();
my $result = GetOptions(
"help" => \$showHelp,
+ "ignore-changelogs" => \$ignoreChangelogs
);
if (!$result || $showHelp) {
- print STDERR basename($0) . " [-h|--help] [svndir1 [svndir2 ...]]\n";
+ print STDERR basename($0) . " [-h|--help] [--ignore-changelogs] [svndir1 [svndir2 ...]]\n";
exit 1;
}
@@ -156,7 +157,7 @@ sub findBaseUrl($)
my $baseUrl;
open INFO, "svn info '$infoPath' |" or die;
while (<INFO>) {
- if (/^URL: (.+)/) {
+ if (/^URL: (.+?)[\r\n]*$/) {
$baseUrl = $1;
}
}
@@ -200,7 +201,7 @@ sub findSourceFileAndRevision($)
my $sourceRevision;
open INFO, "svn info '$file' |" or die;
while (<INFO>) {
- if (/^Copied From URL: (.+)/) {
+ if (/^Copied From URL: (.+?)[\r\n]*$/) {
$sourceFile = File::Spec->abs2rel($1, $baseUrl);
} elsif (/^Copied From Rev: ([0-9]+)/) {
$sourceRevision = $1;
@@ -265,6 +266,11 @@ sub generateDiff($$)
{
my ($fileData, $prefix) = @_;
my $file = File::Spec->catdir($prefix, $fileData->{path});
+
+ if ($ignoreChangelogs && basename($file) eq "ChangeLog") {
+ return;
+ }
+
my $patch;
if ($fileData->{modificationType} eq "additionWithHistory") {
manufacturePatchForAdditionWithHistory($fileData);
@@ -292,7 +298,7 @@ sub generateFileList($\%)
$line =~ s/[\r\n]+$//g;
my $stat;
my $path;
- if (eval "v$svnVersion" ge v1.6) {
+ if (isSVNVersion16OrNewer()) {
$stat = substr($line, 0, 8);
$path = substr($line, 8);
} else {
diff --git a/WebKitTools/Scripts/svn-unapply b/WebKitTools/Scripts/svn-unapply
index 964b51e..a4cec9a 100755
--- a/WebKitTools/Scripts/svn-unapply
+++ b/WebKitTools/Scripts/svn-unapply
@@ -1,6 +1,7 @@
#!/usr/bin/perl -w
# Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved.
+# Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -65,11 +66,16 @@ use File::Spec;
use File::Temp qw(tempfile);
use Getopt::Long;
+use FindBin;
+use lib $FindBin::Bin;
+use VCSUtils;
+
sub checksum($);
sub fixChangeLogPatch($);
sub gitdiff2svndiff($);
sub patch($);
sub revertDirectories();
+sub removeEOL($);
sub svnStatus($);
sub unapplyPatch($$;$);
sub unsetChangeLogDate($$);
@@ -80,6 +86,9 @@ if (!GetOptions("help!" => \$showHelp) || $showHelp) {
exit 1;
}
+my $pathScriptWasRunFrom = Cwd::getcwd();
+my $pathForRepositoryRoot = determineVCSRoot();
+
my @copiedFiles;
my %directoriesToCheck;
@@ -133,7 +142,9 @@ for $patch (@copiedFiles) {
patch($patch);
}
-revertDirectories();
+if (isSVN()) {
+ revertDirectories();
+}
exit 0;
@@ -289,6 +300,7 @@ sub patch($)
sub revertDirectories()
{
+ chdir $pathForRepositoryRoot;
my %checkedDirectories;
foreach my $path (reverse sort keys %directoriesToCheck) {
my @dirs = File::Spec->splitdir($path);
@@ -318,6 +330,14 @@ sub revertDirectories()
}
}
+sub removeEOL($)
+{
+ my ($line) = @_;
+
+ $line =~ s/[\r\n]+$//g;
+ return $line;
+}
+
sub svnStatus($)
{
my ($fullPath) = @_;
@@ -331,10 +351,10 @@ sub svnStatus($)
my $normalizedFullPath = File::Spec->catdir(File::Spec->splitdir($fullPath));
while (<SVN>) {
# Input may use a different EOL sequence than $/, so avoid chomp.
- $_ =~ s/[\r\n]+$//g;
+ $_ = removeEOL($_);
my $normalizedStatPath = File::Spec->catdir(File::Spec->splitdir(substr($_, 7)));
if ($normalizedFullPath eq $normalizedStatPath) {
- $svnStatus = $_;
+ $svnStatus = "$_\n";
last;
}
}
@@ -344,7 +364,7 @@ sub svnStatus($)
}
else {
# Files will have only one status returned.
- $svnStatus = <SVN>;
+ $svnStatus = removeEOL(<SVN>) . "\n";
}
close SVN;
return $svnStatus;
@@ -353,11 +373,13 @@ sub svnStatus($)
sub unapplyPatch($$;$)
{
my ($patch, $fullPath, $options) = @_;
+ chdir $pathForRepositoryRoot;
$options = [] if (! $options);
my $command = "patch " . join(" ", "-p0", "-R", @{$options});
open PATCH, "| $command" or die "Failed to patch $fullPath: $!";
print PATCH $patch;
close PATCH;
+ chdir $pathScriptWasRunFrom;
}
sub unsetChangeLogDate($$)
diff --git a/WebKitTools/Scripts/update-sources-list.py b/WebKitTools/Scripts/update-sources-list.py
index e565059..433d04a 100644..100755
--- a/WebKitTools/Scripts/update-sources-list.py
+++ b/WebKitTools/Scripts/update-sources-list.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
# Copyright (C) 2007 Kevin Ollivier All rights reserved.
#
diff --git a/WebKitTools/Scripts/update-webkit b/WebKitTools/Scripts/update-webkit
index 5f72869..e562cc0 100755
--- a/WebKitTools/Scripts/update-webkit
+++ b/WebKitTools/Scripts/update-webkit
@@ -1,6 +1,7 @@
#!/usr/bin/perl -w
-# Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved.
+# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -34,6 +35,7 @@ use lib $FindBin::Bin;
use File::Basename;
use File::Spec;
use Getopt::Long;
+use VCSUtils;
use webkitdirs;
sub runSvnUpdate();
@@ -43,6 +45,8 @@ sub normalizePath($);
my $quiet = '';
my $showHelp;
+determineIsChromium();
+
my $getOptionsResult = GetOptions(
'h|help' => \$showHelp,
'q|quiet' => \$quiet,
@@ -51,6 +55,7 @@ my $getOptionsResult = GetOptions(
if (!$getOptionsResult || $showHelp) {
print STDERR <<__END__;
Usage: @{[ basename($0) ]} [options]
+ --chromium also update dependencies of the chromium port
-h|--help show the help message
-q|--quiet pass -q to svn update for quiet updates
__END__
@@ -60,6 +65,9 @@ __END__
my @svnOptions = ();
push @svnOptions, '-q' if $quiet;
+# Don't prompt when using svn-1.6 or newer.
+push @svnOptions, qw(--accept postpone) if isSVNVersion16OrNewer();
+
chdirWebKit();
print "Updating OpenSource\n" unless $quiet;
runSvnUpdate();
@@ -68,6 +76,8 @@ if (-d "../Internal") {
chdir("../Internal");
print "Updating Internal\n" unless $quiet;
runSvnUpdate();
+} elsif (isChromium()) {
+ system("perl", "WebKitTools/Scripts/update-webkit-chromium") == 0 or die $!;
} elsif (isAppleWinWebKit()) {
system("perl", "WebKitTools/Scripts/update-webkit-auxiliary-libs") == 0 or die;
}
@@ -80,7 +90,7 @@ sub runSvnUpdate()
my @conflictedChangeLogs;
while (my $line = <UPDATE>) {
print $line;
- $line =~ m/^C\s+(.*\S+)\s*$/;
+ $line =~ m/^C\s+(.+?)[\r\n]*$/;
if ($1) {
my $filename = normalizePath($1);
push @conflictedChangeLogs, $filename if basename($filename) eq "ChangeLog";
diff --git a/WebKitTools/Scripts/update-webkit-chromium b/WebKitTools/Scripts/update-webkit-chromium
new file mode 100644
index 0000000..a0cc19a
--- /dev/null
+++ b/WebKitTools/Scripts/update-webkit-chromium
@@ -0,0 +1,51 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Update script for the WebKit Chromium Port.
+
+# Check if gclient is installed.
+if (not `gclient --version`) {
+ print STDERR "gclient is required for updating chromium dependencies.\n";
+ print STDERR "Install depot_tools and add gclient to the environment\n";
+ print STDERR "path. For more information, refer to:\n";
+ print STDERR "http://dev.chromium.org/developers/how-tos/install-gclient\n";
+ die;
+}
+
+chdir("WebKit/chromium") or die $!;
+if (! -e ".gclient") {
+ # If .gclient configuration file doesn't exist, create it.
+ print "Configuring gclient...\n";
+ system("gclient",
+ "config",
+ "--spec=solutions=[{'name':'./','url':None}]") == 0 or die $!;
+}
+
+# Execute gclient sync.
+print "Updating chromium port dependencies using gclient...\n";
+system("gclient", "sync") == 0 or die $!;
diff --git a/WebKitTools/Scripts/update-webkit-localizable-strings b/WebKitTools/Scripts/update-webkit-localizable-strings
index 350bf21..1d1f413 100755
--- a/WebKitTools/Scripts/update-webkit-localizable-strings
+++ b/WebKitTools/Scripts/update-webkit-localizable-strings
@@ -43,4 +43,4 @@ my $exceptionsFile = "WebKit/StringsNotToBeLocalized.txt";
chdirWebKit();
system "sort -u $exceptionsFile -o $exceptionsFile";
-exec "extract-localizable-strings", $exceptionsFile, $fileToUpdate, @directoriesToScan;
+exec "./WebKitTools/Scripts/extract-localizable-strings", $exceptionsFile, $fileToUpdate, @directoriesToScan;
diff --git a/WebKitTools/Scripts/webkitdirs.pm b/WebKitTools/Scripts/webkitdirs.pm
index d8ccd3a..d5177dd 100644
--- a/WebKitTools/Scripts/webkitdirs.pm
+++ b/WebKitTools/Scripts/webkitdirs.pm
@@ -1,4 +1,5 @@
# Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved.
+# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -28,8 +29,10 @@
use strict;
use warnings;
+use Config;
use FindBin;
use File::Basename;
+use File::Spec;
use POSIX;
use VCSUtils;
@@ -83,6 +86,15 @@ sub determineSourceDir
$sourceDir = "$sourceDir/OpenSource" if -d "$sourceDir/OpenSource";
}
+sub currentPerlPath()
+{
+ my $thisPerl = $^X;
+ if ($^O ne 'VMS') {
+ $thisPerl .= $Config{_exe} unless $thisPerl =~ m/$Config{_exe}$/i;
+ }
+ return $thisPerl;
+}
+
# used for scripts which are stored in a non-standard location
sub setSourceDir($)
{
@@ -468,9 +480,17 @@ sub safariPath
# Use Safari.app in product directory if present (good for Safari development team).
if (isAppleMacWebKit() && -d "$configurationProductDir/Safari.app") {
$safariBundle = "$configurationProductDir/Safari.app";
- } elsif (isAppleWinWebKit() && -x "$configurationProductDir/bin/Safari.exe") {
- $safariBundle = "$configurationProductDir/bin/Safari.exe";
- } else {
+ } elsif (isAppleWinWebKit()) {
+ my $path = "$configurationProductDir/Safari.exe";
+ my $debugPath = "$configurationProductDir/Safari_debug.exe";
+
+ if (configurationForVisualStudio() =~ /Debug/ && -x $debugPath) {
+ $safariBundle = $debugPath;
+ } elsif (-x $path) {
+ $safariBundle = $path;
+ }
+ }
+ if (!$safariBundle) {
return installedSafariPath();
}
}
@@ -539,6 +559,25 @@ sub libraryContainsSymbol
return $foundSymbol;
}
+sub hasMathMLSupport
+{
+ my $path = shift;
+
+ return libraryContainsSymbol($path, "MathMLElement");
+}
+
+sub checkWebCoreMathMLSupport
+{
+ my $required = shift;
+ my $framework = "WebCore";
+ my $path = builtDylibPathForName($framework);
+ my $hasMathML = hasMathMLSupport($path);
+ if ($required && !$hasMathML) {
+ die "$framework at \"$path\" does not include MathML Support, please run build-webkit --mathml\n";
+ }
+ return $hasMathML;
+}
+
sub hasSVGSupport
{
my $path = shift;
@@ -618,6 +657,26 @@ sub checkWebCore3DRenderingSupport
return $has3DRendering;
}
+sub has3DCanvasSupport
+{
+ return 0 if isQt();
+
+ my $path = shift;
+ return libraryContainsSymbol($path, "CanvasShader");
+}
+
+sub checkWebCore3DCanvasSupport
+{
+ my $required = shift;
+ my $framework = "WebCore";
+ my $path = builtDylibPathForName($framework);
+ my $has3DCanvas = has3DCanvasSupport($path);
+ if ($required && !$has3DCanvas) {
+ die "$framework at \"$path\" does not include 3D Canvas Support, please run build-webkit --3d-canvas\n";
+ }
+ return $has3DCanvas;
+}
+
sub hasWMLSupport
{
my $path = shift;
@@ -788,6 +847,11 @@ sub isDebianBased()
return -e "/etc/debian_version";
}
+sub isFedoraBased()
+{
+ return -e "/etc/fedora-release";
+}
+
sub isChromium()
{
determineIsChromium();
@@ -810,6 +874,16 @@ sub isDarwin()
return ($^O eq "darwin") || 0;
}
+sub isWindows()
+{
+ return ($^O eq "MSWin32") || 0;
+}
+
+sub isLinux()
+{
+ return ($^O eq "linux") || 0;
+}
+
sub isAppleWebKit()
{
return !(isQt() or isGtk() or isWx() or isChromium());
@@ -894,7 +968,7 @@ sub relativeScriptsDir()
sub launcherPath()
{
my $relativeScriptsPath = relativeScriptsDir();
- if (isGtk() || isQt()) {
+ if (isGtk() || isQt() || isWx()) {
return "$relativeScriptsPath/run-launcher";
} elsif (isAppleWebKit()) {
return "$relativeScriptsPath/run-safari";
@@ -907,6 +981,8 @@ sub launcherName()
return "GtkLauncher";
} elsif (isQt()) {
return "QtLauncher";
+ } elsif (isWx()) {
+ return "wxBrowser";
} elsif (isAppleWebKit()) {
return "Safari";
}
@@ -931,7 +1007,7 @@ sub checkRequiredSystemConfig
print "http://developer.apple.com/tools/xcode\n";
print "*************************************************************\n";
}
- } elsif (isGtk() or isQt() or isWx()) {
+ } elsif (isGtk() or isQt() or isWx() or isChromium()) {
my @cmds = qw(flex bison gperf);
my @missing = ();
foreach my $cmd (@cmds) {
@@ -952,23 +1028,21 @@ sub setupCygwinEnv()
return if !isCygwin();
return if $vcBuildPath;
- my $programFilesPath = `cygpath "$ENV{'PROGRAMFILES'}"`;
- chomp $programFilesPath;
- $vcBuildPath = "$programFilesPath/Microsoft Visual Studio 8/Common7/IDE/devenv.com";
+ my $vsInstallDir;
+ my $programFilesPath = $ENV{'PROGRAMFILES'} || "C:\\Program Files";
+ if ($ENV{'VSINSTALLDIR'}) {
+ $vsInstallDir = $ENV{'VSINSTALLDIR'};
+ } else {
+ $vsInstallDir = "$programFilesPath/Microsoft Visual Studio 8";
+ }
+ $vsInstallDir = `cygpath "$vsInstallDir"`;
+ chomp $vsInstallDir;
+ $vcBuildPath = "$vsInstallDir/Common7/IDE/devenv.com";
if (-e $vcBuildPath) {
# Visual Studio is installed; we can use pdevenv to build.
$vcBuildPath = File::Spec->catfile(sourceDir(), qw(WebKitTools Scripts pdevenv));
} else {
# Visual Studio not found, try VC++ Express
- my $vsInstallDir;
- if ($ENV{'VSINSTALLDIR'}) {
- $vsInstallDir = $ENV{'VSINSTALLDIR'};
- } else {
- $programFilesPath = $ENV{'PROGRAMFILES'} || "C:\\Program Files";
- $vsInstallDir = "$programFilesPath/Microsoft Visual Studio 8";
- }
- $vsInstallDir = `cygpath "$vsInstallDir"`;
- chomp $vsInstallDir;
$vcBuildPath = "$vsInstallDir/Common7/IDE/VCExpress.exe";
if (! -e $vcBuildPath) {
print "*************************************************************\n";
@@ -1032,6 +1106,50 @@ sub buildVisualStudioProject
return system @command;
}
+sub downloadWafIfNeeded
+{
+ # get / update waf if needed
+ my $waf = "$sourceDir/WebKitTools/wx/waf";
+ my $wafURL = 'http://wxwebkit.wxcommunity.com/downloads/deps/waf';
+ if (!-f $waf) {
+ my $result = system "curl -o $waf $wafURL";
+ chmod 0755, $waf;
+ }
+}
+
+sub buildWafProject
+{
+ my ($project, $shouldClean, @options) = @_;
+
+ # set the PYTHONPATH for waf
+ my $pythonPath = $ENV{'PYTHONPATH'};
+ if (!defined($pythonPath)) {
+ $pythonPath = '';
+ }
+ my $sourceDir = sourceDir();
+ my $newPythonPath = "$sourceDir/WebKitTools/wx/build:$pythonPath";
+ if (isCygwin()) {
+ $newPythonPath = `cygpath --mixed --path $newPythonPath`;
+ }
+ $ENV{'PYTHONPATH'} = $newPythonPath;
+
+ print "Building $project\n";
+
+ my $wafCommand = "$sourceDir/WebKitTools/wx/waf";
+ if ($ENV{'WXWEBKIT_WAF'}) {
+ $wafCommand = $ENV{'WXWEBKIT_WAF'};
+ }
+ if (isCygwin()) {
+ $wafCommand = `cygpath --windows "$wafCommand"`;
+ chomp($wafCommand);
+ }
+ if ($shouldClean) {
+ return system $wafCommand, "clean", "distclean";
+ }
+
+ return system $wafCommand, 'configure', 'build', 'install', @options;
+}
+
sub retrieveQMakespecVar
{
my $mkspec = $_[0];
@@ -1187,26 +1305,28 @@ sub buildQMakeProject($@)
push @buildArgs, "CONFIG-=release";
push @buildArgs, "CONFIG+=debug";
} else {
- push @buildArgs, "CONFIG+=release";
my $passedConfig = passedConfiguration() || "";
if (!isDarwin() || $passedConfig =~ m/release/i) {
+ push @buildArgs, "CONFIG+=release";
push @buildArgs, "CONFIG-=debug";
} else {
+ push @buildArgs, "CONFIG+=debug";
push @buildArgs, "CONFIG+=debug_and_release";
- push @buildArgs, "CONFIG+=build_all";
}
}
- my $dir = baseProductDir();
+ my $dir = File::Spec->canonpath(baseProductDir());
+ my @mkdirArgs;
+ push @mkdirArgs, "-p" if !isWindows();
if (! -d $dir) {
- system "mkdir", "-p", "$dir";
+ system "mkdir", @mkdirArgs, "$dir";
if (! -d $dir) {
die "Failed to create product directory " . $dir;
}
}
- $dir = $dir . "/$config";
+ $dir = File::Spec->catfile($dir, $config);
if (! -d $dir) {
- system "mkdir", "-p", "$dir";
+ system "mkdir", @mkdirArgs, "$dir";
if (! -d $dir) {
die "Failed to create build directory " . $dir;
}
@@ -1217,7 +1337,7 @@ sub buildQMakeProject($@)
print "Calling '$qmakebin @buildArgs' in " . $dir . "\n\n";
print "Installation directory: $prefix\n" if(defined($prefix));
- my $result = system $qmakebin, @buildArgs;
+ my $result = system "$qmakebin @buildArgs";
if ($result ne 0) {
die "Failed to setup build environment using $qmakebin!\n";
}
@@ -1250,6 +1370,30 @@ sub buildGtkProject($$@)
return buildAutotoolsProject($clean, @buildArgs);
}
+sub buildChromium($@)
+{
+ my ($clean, @options) = @_;
+
+ my $result = 1;
+ if (isDarwin()) {
+ # Mac build - builds the root xcode project.
+ $result = buildXCodeProject("WebKit/chromium/webkit",
+ $clean,
+ (@options));
+ } elsif (isCygwin()) {
+ # Windows build - builds the root visual studio solution.
+ $result = buildVisualStudioProject("WebKit/chromium/webkit.sln",
+ $clean);
+ } elsif (isLinux()) {
+ # Linux build
+ # FIXME support linux.
+ print STDERR "Linux build is not supported. Yet.";
+ } else {
+ print STDERR "This platform is not supported by chromium.";
+ }
+ return $result;
+}
+
sub setPathForRunningWebKitApp
{
my ($env) = @_;