summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts
diff options
context:
space:
mode:
Diffstat (limited to 'WebKitTools/Scripts')
-rw-r--r--WebKitTools/Scripts/VCSUtils.pm258
-rwxr-xr-xWebKitTools/Scripts/bisect-builds5
-rwxr-xr-xWebKitTools/Scripts/bugzilla-tool918
-rwxr-xr-xWebKitTools/Scripts/build-webkit19
-rwxr-xr-xWebKitTools/Scripts/commit-log-editor5
-rwxr-xr-xWebKitTools/Scripts/do-webcore-rename244
-rw-r--r--WebKitTools/Scripts/modules/bugzilla.py149
-rw-r--r--WebKitTools/Scripts/modules/bugzilla_unittest.py91
-rw-r--r--WebKitTools/Scripts/modules/buildbot.py3
-rw-r--r--WebKitTools/Scripts/modules/buildsteps.py254
-rw-r--r--WebKitTools/Scripts/modules/commands/__init__.py1
-rw-r--r--WebKitTools/Scripts/modules/commands/commandtest.py42
-rw-r--r--WebKitTools/Scripts/modules/commands/download.py370
-rw-r--r--WebKitTools/Scripts/modules/commands/download_unittest.py77
-rw-r--r--WebKitTools/Scripts/modules/commands/early_warning_system.py66
-rw-r--r--WebKitTools/Scripts/modules/commands/queries.py135
-rw-r--r--WebKitTools/Scripts/modules/commands/queries_unittest.py68
-rw-r--r--WebKitTools/Scripts/modules/commands/queues.py216
-rw-r--r--WebKitTools/Scripts/modules/commands/queues_unittest.py66
-rw-r--r--WebKitTools/Scripts/modules/commands/upload.py246
-rw-r--r--WebKitTools/Scripts/modules/commands/upload_unittest.py42
-rw-r--r--WebKitTools/Scripts/modules/committers.py144
-rw-r--r--WebKitTools/Scripts/modules/committers_unittest.py24
-rw-r--r--WebKitTools/Scripts/modules/cpp_style.py179
-rw-r--r--WebKitTools/Scripts/modules/cpp_style_unittest.py146
-rw-r--r--WebKitTools/Scripts/modules/executive.py124
-rw-r--r--WebKitTools/Scripts/modules/grammar.py43
-rw-r--r--WebKitTools/Scripts/modules/landingsequence.py113
-rw-r--r--WebKitTools/Scripts/modules/logging.py36
-rw-r--r--WebKitTools/Scripts/modules/logging_unittest.py2
-rw-r--r--WebKitTools/Scripts/modules/mock.py309
-rw-r--r--WebKitTools/Scripts/modules/mock_bugzillatool.py153
-rw-r--r--WebKitTools/Scripts/modules/multicommandtool.py253
-rw-r--r--WebKitTools/Scripts/modules/multicommandtool_unittest.py158
-rw-r--r--WebKitTools/Scripts/modules/outputcapture.py53
-rw-r--r--WebKitTools/Scripts/modules/patchcollection.py71
-rw-r--r--WebKitTools/Scripts/modules/scm.py161
-rw-r--r--WebKitTools/Scripts/modules/scm_unittest.py284
-rw-r--r--WebKitTools/Scripts/modules/statusbot.py39
-rw-r--r--WebKitTools/Scripts/modules/stepsequence.py68
-rw-r--r--WebKitTools/Scripts/modules/webkitport.py118
-rw-r--r--WebKitTools/Scripts/modules/webkitport_unittest.py56
-rw-r--r--WebKitTools/Scripts/modules/workqueue.py159
-rw-r--r--WebKitTools/Scripts/modules/workqueue_unittest.py176
-rwxr-xr-xWebKitTools/Scripts/prepare-ChangeLog73
-rwxr-xr-xWebKitTools/Scripts/run-webkit-tests194
-rwxr-xr-xWebKitTools/Scripts/run-webkit-unittests7
-rwxr-xr-xWebKitTools/Scripts/run-webkit-websocketserver96
-rwxr-xr-xWebKitTools/Scripts/svn-apply52
-rwxr-xr-xWebKitTools/Scripts/svn-unapply9
-rwxr-xr-xWebKitTools/Scripts/update-webkit20
-rwxr-xr-x[-rw-r--r--]WebKitTools/Scripts/update-webkit-chromium2
-rwxr-xr-xWebKitTools/Scripts/validate-committer-lists252
-rw-r--r--WebKitTools/Scripts/webkitdirs.pm165
54 files changed, 5699 insertions, 1315 deletions
diff --git a/WebKitTools/Scripts/VCSUtils.pm b/WebKitTools/Scripts/VCSUtils.pm
index e1e0bc2..7638102 100644
--- a/WebKitTools/Scripts/VCSUtils.pm
+++ b/WebKitTools/Scripts/VCSUtils.pm
@@ -41,7 +41,10 @@ BEGIN {
@ISA = qw(Exporter);
@EXPORT = qw(
&canonicalizePath
+ &changeLogEmailAddress
+ &changeLogName
&chdirReturningRelativePath
+ &decodeGitBinaryPatch
&determineSVNRoot
&determineVCSRoot
&fixChangeLogPatch
@@ -298,6 +301,14 @@ sub canonicalizePath($)
return ($#dirs >= 0) ? File::Spec->catdir(@dirs) : ".";
}
+sub removeEOL($)
+{
+ my ($line) = @_;
+
+ $line =~ s/[\r\n]+$//g;
+ return $line;
+}
+
sub svnStatus($)
{
my ($fullPath) = @_;
@@ -335,8 +346,6 @@ sub gitdiff2svndiff($)
$_ = shift @_;
if (m#^diff --git a/(.+) b/(.+)#) {
return "Index: $1";
- } elsif (m/^new file.*/) {
- return "";
} elsif (m#^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}#) {
return "===================================================================";
} elsif (m#^--- a/(.+)#) {
@@ -347,56 +356,215 @@ sub gitdiff2svndiff($)
return $_;
}
+# The diff(1) command is greedy when matching lines, so a new ChangeLog entry will
+# have lines of context at the top of a patch when the existing entry has the same
+# date and author as the new entry. Alter the ChangeLog patch so
+# that the added lines ("+") in the patch always start at the beginning of the
+# patch and there are no initial lines of context.
sub fixChangeLogPatch($)
{
- my $patch = shift;
- my $contextLineCount = 3;
-
- return $patch if $patch !~ /\n@@ -1,(\d+) \+1,(\d+) @@\n( .*\n)+(\+.*\n)+( .*\n){$contextLineCount}$/m;
- my ($oldLineCount, $newLineCount) = ($1, $2);
- return $patch if $oldLineCount <= $contextLineCount;
-
- # The diff(1) command is greedy when matching lines, so a new ChangeLog entry will
- # have lines of context at the top of a patch when the existing entry has the same
- # date and author as the new entry. This nifty loop alters a ChangeLog patch so
- # that the added lines ("+") in the patch always start at the beginning of the
- # patch and there are no initial lines of context.
- my $newPatch;
- my $lineCountInState = 0;
- my $oldContentLineCountReduction = $oldLineCount - $contextLineCount;
- my $newContentLineCountWithoutContext = $newLineCount - $oldLineCount - $oldContentLineCountReduction;
- my ($stateHeader, $statePreContext, $stateNewChanges, $statePostContext) = (1..4);
- my $state = $stateHeader;
- foreach my $line (split(/\n/, $patch)) {
- $lineCountInState++;
- if ($state == $stateHeader && $line =~ /^@@ -1,$oldLineCount \+1,$newLineCount @\@$/) {
- $line = "@@ -1,$contextLineCount +1," . ($newLineCount - $oldContentLineCountReduction) . " @@";
- $lineCountInState = 0;
- $state = $statePreContext;
- } elsif ($state == $statePreContext && substr($line, 0, 1) eq " ") {
- $line = "+" . substr($line, 1);
- if ($lineCountInState == $oldContentLineCountReduction) {
- $lineCountInState = 0;
- $state = $stateNewChanges;
- }
- } elsif ($state == $stateNewChanges && substr($line, 0, 1) eq "+") {
- # No changes to these lines
- if ($lineCountInState == $newContentLineCountWithoutContext) {
- $lineCountInState = 0;
- $state = $statePostContext;
- }
- } elsif ($state == $statePostContext) {
- if (substr($line, 0, 1) eq "+" && $lineCountInState <= $oldContentLineCountReduction) {
- $line = " " . substr($line, 1);
- } elsif ($lineCountInState > $contextLineCount && substr($line, 0, 1) eq " ") {
- next; # Discard
+ my $patch = shift; # $patch will only contain patch fragments for ChangeLog.
+
+ $patch =~ /(\r?\n)/;
+ my $lineEnding = $1;
+ my @patchLines = split(/$lineEnding/, $patch);
+
+ # e.g. 2009-06-03 Eric Seidel <eric@webkit.org>
+ my $dateLineRegexpString = '^\+(\d{4}-\d{2}-\d{2})' # Consume the leading '+' and the date.
+ . '\s+(.+)\s+' # Consume the name.
+ . '<([^<>]+)>$'; # And finally the email address.
+
+ # Figure out where the patch contents start and stop.
+ my $patchHeaderIndex;
+ my $firstContentIndex;
+ my $trailingContextIndex;
+ my $dateIndex;
+ my $patchEndIndex = scalar(@patchLines);
+ for (my $index = 0; $index < @patchLines; ++$index) {
+ my $line = $patchLines[$index];
+ if ($line =~ /^\@\@ -\d+,\d+ \+\d+,\d+ \@\@$/) { # e.g. @@ -1,5 +1,18 @@
+ if ($patchHeaderIndex) {
+ $patchEndIndex = $index; # We only bother to fix up the first patch fragment.
+ last;
}
+ $patchHeaderIndex = $index;
}
- $newPatch .= $line . "\n";
+ $firstContentIndex = $index if ($patchHeaderIndex && !$firstContentIndex && $line =~ /^\+[^+]/); # Only match after finding patchHeaderIndex, otherwise we'd match "+++".
+ $dateIndex = $index if ($line =~ /$dateLineRegexpString/);
+ $trailingContextIndex = $index if ($firstContentIndex && !$trailingContextIndex && $line =~ /^ /);
}
+ my $contentLineCount = $trailingContextIndex - $firstContentIndex;
+ my $trailingContextLineCount = $patchEndIndex - $trailingContextIndex;
+
+ # If we didn't find a date line in the content then this is not a patch we should try and fix.
+ return $patch if (!$dateIndex);
+
+ # We only need to do anything if the date line is not the first content line.
+ return $patch if ($dateIndex == $firstContentIndex);
+
+ # Write the new patch.
+ my $totalNewContentLines = $contentLineCount + $trailingContextLineCount;
+ $patchLines[$patchHeaderIndex] = "@@ -1,$trailingContextLineCount +1,$totalNewContentLines @@"; # Write a new header.
+ my @repeatedLines = splice(@patchLines, $dateIndex, $trailingContextIndex - $dateIndex); # The date line and all the content after it that diff saw as repeated.
+ splice(@patchLines, $firstContentIndex, 0, @repeatedLines); # Move the repeated content to the top.
+ foreach my $line (@repeatedLines) {
+ $line =~ s/^\+/ /;
+ }
+ splice(@patchLines, $trailingContextIndex, $patchEndIndex, @repeatedLines); # Replace trailing context with the repeated content.
+ splice(@patchLines, $patchHeaderIndex + 1, $firstContentIndex - $patchHeaderIndex - 1); # Remove any leading context.
+
+ return join($lineEnding, @patchLines) . "\n"; # patch(1) expects an extra trailing newline.
+}
- return $newPatch;
+sub gitConfig($)
+{
+ return unless $isGit;
+
+ my ($config) = @_;
+
+ my $result = `git config $config`;
+ if (($? >> 8)) {
+ $result = `git repo-config $config`;
+ }
+ chomp $result;
+ return $result;
}
+sub changeLogNameError($)
+{
+ my ($message) = @_;
+ print STDERR "$message\nEither:\n";
+ print STDERR " set CHANGE_LOG_NAME in your environment\n";
+ print STDERR " OR pass --name= on the command line\n";
+ print STDERR " OR set REAL_NAME in your environment";
+ print STDERR " OR git users can set 'git config user.name'\n";
+ exit(1);
+}
+
+sub changeLogName()
+{
+ my $name = $ENV{CHANGE_LOG_NAME} || $ENV{REAL_NAME} || gitConfig("user.name") || (split /\s*,\s*/, (getpwuid $<)[6])[0];
+
+ changeLogNameError("Failed to determine ChangeLog name.") unless $name;
+ # getpwuid seems to always succeed on windows, returning the username instead of the full name. This check will catch that case.
+ changeLogNameError("'$name' does not contain a space! ChangeLogs should contain your full name.") unless ($name =~ /\w \w/);
+
+ return $name;
+}
+
+sub changeLogEmailAddressError($)
+{
+ my ($message) = @_;
+ print STDERR "$message\nEither:\n";
+ print STDERR " set CHANGE_LOG_EMAIL_ADDRESS in your environment\n";
+ print STDERR " OR pass --email= on the command line\n";
+ print STDERR " OR set EMAIL_ADDRESS in your environment\n";
+ print STDERR " OR git users can set 'git config user.email'\n";
+ exit(1);
+}
+
+sub changeLogEmailAddress()
+{
+ my $emailAddress = $ENV{CHANGE_LOG_EMAIL_ADDRESS} || $ENV{EMAIL_ADDRESS} || gitConfig("user.email");
+
+ changeLogEmailAddressError("Failed to determine email address for ChangeLog.") unless $emailAddress;
+ changeLogEmailAddressError("Email address '$emailAddress' does not contain '\@' and is likely invalid.") unless ($emailAddress =~ /\@/);
+
+ return $emailAddress;
+}
+
+# http://tools.ietf.org/html/rfc1924
+sub decodeBase85($)
+{
+ my ($encoded) = @_;
+ my %table;
+ my @characters = ('0'..'9', 'A'..'Z', 'a'..'z', '!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=', '>', '?', '@', '^', '_', '`', '{', '|', '}', '~');
+ for (my $i = 0; $i < 85; $i++) {
+ $table{$characters[$i]} = $i;
+ }
+
+ my $decoded = '';
+ my @encodedChars = $encoded =~ /./g;
+
+ for (my $encodedIter = 0; defined($encodedChars[$encodedIter]);) {
+ my $digit = 0;
+ for (my $i = 0; $i < 5; $i++) {
+ $digit *= 85;
+ my $char = $encodedChars[$encodedIter];
+ $digit += $table{$char};
+ $encodedIter++;
+ }
+
+ for (my $i = 0; $i < 4; $i++) {
+ $decoded .= chr(($digit >> (3 - $i) * 8) & 255);
+ }
+ }
+
+ return $decoded;
+}
+
+sub decodeGitBinaryChunk($$)
+{
+ my ($contents, $fullPath) = @_;
+
+ # Load this module lazily in case the user don't have this module
+ # and won't handle git binary patches.
+ require Compress::Zlib;
+
+ my $encoded = "";
+ my $compressedSize = 0;
+ while ($contents =~ /^([A-Za-z])(.*)$/gm) {
+ my $line = $2;
+ next if $line eq "";
+ die "$fullPath: unexpected size of a line: $&" if length($2) % 5 != 0;
+ my $actualSize = length($2) / 5 * 4;
+ my $encodedExpectedSize = ord($1);
+ my $expectedSize = $encodedExpectedSize <= ord("Z") ? $encodedExpectedSize - ord("A") + 1 : $encodedExpectedSize - ord("a") + 27;
+
+ die "$fullPath: unexpected size of a line: $&" if int(($expectedSize + 3) / 4) * 4 != $actualSize;
+ $compressedSize += $expectedSize;
+ $encoded .= $line;
+ }
+
+ my $compressed = decodeBase85($encoded);
+ $compressed = substr($compressed, 0, $compressedSize);
+ return Compress::Zlib::uncompress($compressed);
+}
+
+sub decodeGitBinaryPatch($$)
+{
+ my ($contents, $fullPath) = @_;
+
+ # Git binary patch has two chunks. One is for the normal patching
+ # and another is for the reverse patching.
+ #
+ # Each chunk a line which starts from either "literal" or "delta",
+ # followed by a number which specifies decoded size of the chunk.
+ # The "delta" type chunks aren't supported by this function yet.
+ #
+ # Then, content of the chunk comes. To decode the content, we
+ # need decode it with base85 first, and then zlib.
+ my $gitPatchRegExp = '(literal|delta) ([0-9]+)\n([A-Za-z0-9!#$%&()*+-;<=>?@^_`{|}~\\n]*?)\n\n';
+ if ($contents !~ m"\nGIT binary patch\n$gitPatchRegExp$gitPatchRegExp\Z") {
+ die "$fullPath: unknown git binary patch format"
+ }
+
+ my $binaryChunkType = $1;
+ my $binaryChunkExpectedSize = $2;
+ my $encodedChunk = $3;
+ my $reverseBinaryChunkType = $4;
+ my $reverseBinaryChunkExpectedSize = $5;
+ my $encodedReverseChunk = $6;
+
+ my $binaryChunk = decodeGitBinaryChunk($encodedChunk, $fullPath);
+ my $binaryChunkActualSize = length($binaryChunk);
+ my $reverseBinaryChunk = decodeGitBinaryChunk($encodedReverseChunk, $fullPath);
+ my $reverseBinaryChunkActualSize = length($reverseBinaryChunk);
+
+ die "$fullPath: unexpected size of the first chunk (expected $binaryChunkExpectedSize but was $binaryChunkActualSize" if ($binaryChunkExpectedSize != $binaryChunkActualSize);
+ die "$fullPath: unexpected size of the second chunk (expected $reverseBinaryChunkExpectedSize but was $reverseBinaryChunkActualSize" if ($reverseBinaryChunkExpectedSize != $reverseBinaryChunkActualSize);
+
+ return ($binaryChunkType, $binaryChunk, $reverseBinaryChunkType, $reverseBinaryChunk);
+}
1;
diff --git a/WebKitTools/Scripts/bisect-builds b/WebKitTools/Scripts/bisect-builds
index 55bf238..063b61e 100755
--- a/WebKitTools/Scripts/bisect-builds
+++ b/WebKitTools/Scripts/bisect-builds
@@ -363,12 +363,13 @@ sub mountAndRunNightly($$$$)
my $mountPath = "/Volumes/WebKit";
my $webkitApp = File::Spec->catfile($mountPath, "WebKit.app");
my $diskImage = File::Spec->catfile($directory, $filename);
+ my $devNull = File::Spec->devnull();
my $i = 0;
while (-e $mountPath) {
$i++;
usleep 100 if $i > 1;
- exec "hdiutil", "detach '$mountPath' 2> " . File::Spec->devnull();
+ `hdiutil detach '$mountPath' 2> $devNull`;
die "Could not unmount $diskImage at $mountPath" if $i > 100;
}
die "Can't mount $diskImage: $mountPath already exists!" if -e $mountPath;
@@ -393,7 +394,7 @@ sub mountAndRunNightly($$$$)
$tempFile ||= "";
`DYLD_FRAMEWORK_PATH=$frameworkPath WEBKIT_UNSET_DYLD_FRAMEWORK_PATH=YES $safari $tempFile`;
- exec "hdiutil", "detach '$mountPath' 2> " . File::Spec->devnull();
+ `hdiutil detach '$mountPath' 2> $devNull`;
}
sub parseRevisions($$;$)
diff --git a/WebKitTools/Scripts/bugzilla-tool b/WebKitTools/Scripts/bugzilla-tool
index 8e899b5..fdbb740 100755
--- a/WebKitTools/Scripts/bugzilla-tool
+++ b/WebKitTools/Scripts/bugzilla-tool
@@ -31,894 +31,74 @@
# A tool for automating dealing with bugzilla, posting patches, committing patches, etc.
import os
-import re
-import StringIO # for add_patch_to_bug file wrappers
-import subprocess
-import sys
-import time
-from datetime import datetime, timedelta
-from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option
-
-# Import WebKit-specific modules.
-from modules.bugzilla import Bugzilla, parse_bug_id
-from modules.changelogs import ChangeLog
-from modules.comments import bug_comment_from_commit_text
-from modules.logging import error, log, tee
-from modules.scm import CommitMessage, detect_scm_system, ScriptError, CheckoutNeedsUpdate
+from modules.bugzilla import Bugzilla
from modules.buildbot import BuildBot
-from modules.statusbot import StatusBot
-
-def plural(noun):
- # This is a dumb plural() implementation which was just enough for our uses.
- if re.search('h$', noun):
- return noun + 'es'
- else:
- return noun + 's'
-
-def pluralize(noun, count):
- if count != 1:
- noun = plural(noun)
- return "%d %s" % (count, noun)
-
-def commit_message_for_this_commit(scm):
- changelog_paths = scm.modified_changelogs()
- if not len(changelog_paths):
- raise ScriptError(message="Found no modified ChangeLogs, cannot create a commit message.\n"
- "All changes require a ChangeLog. See:\n"
- "http://webkit.org/coding/contributing.html")
-
- changelog_messages = []
- for changelog_path in changelog_paths:
- log("Parsing ChangeLog: %s" % changelog_path)
- changelog_entry = ChangeLog(changelog_path).latest_entry()
- if not changelog_entry:
- raise ScriptError(message="Failed to parse ChangeLog: " + os.path.abspath(changelog_path))
- changelog_messages.append(changelog_entry)
-
- # FIXME: We should sort and label the ChangeLog messages like commit-log-editor does.
- return CommitMessage(''.join(changelog_messages).splitlines())
-
-
-class Command:
- def __init__(self, help_text, argument_names="", options=[], requires_local_commits=False):
- self.help_text = help_text
- self.argument_names = argument_names
- self.options = options
- self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options)
- self.requires_local_commits = requires_local_commits
-
- def name_with_arguments(self, command_name):
- usage_string = command_name
- if len(self.options) > 0:
- usage_string += " [options]"
- if self.argument_names:
- usage_string += " " + self.argument_names
- return usage_string
-
- def parse_args(self, args):
- return self.option_parser.parse_args(args)
-
- def execute(self, options, args, tool):
- raise NotImplementedError, "subclasses must implement"
-
-
-class BugsInCommitQueue(Command):
- def __init__(self):
- Command.__init__(self, 'Bugs in the commit queue')
-
- def execute(self, options, args, tool):
- bug_ids = tool.bugs.fetch_bug_ids_from_commit_queue()
- for bug_id in bug_ids:
- print "%s" % bug_id
-
-
-class PatchesInCommitQueue(Command):
- def __init__(self):
- Command.__init__(self, 'Patches in the commit queue')
-
- def execute(self, options, args, tool):
- patches = tool.bugs.fetch_patches_from_commit_queue()
- log("Patches in commit queue:")
- for patch in patches:
- print "%s" % patch['url']
-
-
-class ReviewedPatchesOnBug(Command):
- def __init__(self):
- Command.__init__(self, 'r+\'d patches on a bug', 'BUGID')
-
- def execute(self, options, args, tool):
- bug_id = args[0]
- patches_to_land = tool.bugs.fetch_reviewed_patches_from_bug(bug_id)
- for patch in patches_to_land:
- print "%s" % patch['url']
-
-
-class ApplyPatchesFromBug(Command):
- def __init__(self):
- options = [
- make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory before applying patches"),
- make_option("--local-commit", action="store_true", dest="local_commit", default=False, help="Make a local commit for each applied patch"),
- ]
- options += WebKitLandingScripts.cleaning_options()
- Command.__init__(self, 'Applies all patches on a bug to the local working directory without committing.', 'BUGID', options=options)
-
- @staticmethod
- def apply_patches(patches, scm, commit_each):
- for patch in patches:
- scm.apply_patch(patch)
- if commit_each:
- commit_message = commit_message_for_this_commit(scm)
- scm.commit_locally_with_message(commit_message.message() or patch['name'])
-
- def execute(self, options, args, tool):
- bug_id = args[0]
- patches = tool.bugs.fetch_reviewed_patches_from_bug(bug_id)
- os.chdir(tool.scm().checkout_root)
- if options.clean:
- tool.scm().ensure_clean_working_directory(options.force_clean)
- if options.update:
- tool.scm().update_webkit()
-
- if options.local_commit and not tool.scm().supports_local_commits():
- error("--local-commit passed, but %s does not support local commits" % tool.scm().display_name())
-
- self.apply_patches(patches, tool.scm(), options.local_commit)
-
-
-class WebKitLandingScripts:
- @staticmethod
- def cleaning_options():
- return [
- make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)"),
- make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches"),
- ]
-
- @staticmethod
- def land_options():
- return [
- make_option("--ignore-builders", action="store_false", dest="check_builders", default=True, help="Don't check to see if the build.webkit.org builders are green before landing."),
- make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing."),
- make_option("--no-build", action="store_false", dest="build", default=True, help="Commit without building first, implies --no-test."),
- make_option("--no-test", action="store_false", dest="test", default=True, help="Commit without running run-webkit-tests."),
- make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output."),
- make_option("--commit-queue", action="store_true", dest="commit_queue", default=False, help="Run in commit queue mode (no user interaction)."),
- ]
-
- @staticmethod
- def run_command_with_teed_output(args, teed_output):
- child_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
-
- # Use our own custom wait loop because Popen ignores a tee'd stderr/stdout.
- # FIXME: This could be improved not to flatten output to stdout.
- while True:
- output_line = child_process.stdout.readline()
- if output_line == '' and child_process.poll() != None:
- return child_process.poll()
- teed_output.write(output_line)
-
- @staticmethod
- def run_and_throw_if_fail(args, quiet=False):
- # Cache the child's output locally so it can be used for error reports.
- child_out_file = StringIO.StringIO()
- if quiet:
- dev_null = open(os.devnull, "w")
- child_stdout = tee(child_out_file, dev_null if quiet else sys.stdout)
- exit_code = WebKitLandingScripts.run_command_with_teed_output(args, child_stdout)
- if quiet:
- dev_null.close()
-
- child_output = child_out_file.getvalue()
- child_out_file.close()
-
- if exit_code:
- raise ScriptError(script_args=args, exit_code=exit_code, output=child_output)
-
- # We might need to pass scm into this function for scm.checkout_root
- @staticmethod
- def webkit_script_path(script_name):
- return os.path.join("WebKitTools", "Scripts", script_name)
-
- @classmethod
- def run_webkit_script(cls, script_name, quiet=False):
- log("Running %s" % script_name)
- cls.run_and_throw_if_fail(cls.webkit_script_path(script_name), quiet)
-
- @classmethod
- def build_webkit(cls, quiet=False):
- cls.run_webkit_script("build-webkit", quiet)
-
- @staticmethod
- def ensure_builders_are_green(buildbot, options):
- if not options.check_builders or buildbot.core_builders_are_green():
- return
- error("Builders at %s are red, please do not commit. Pass --ignore-builders to bypass this check." % (buildbot.buildbot_host))
-
- @classmethod
- def run_webkit_tests(cls, launch_safari, fail_fast=False, quiet=False):
- args = [cls.webkit_script_path("run-webkit-tests")]
- if not launch_safari:
- args.append("--no-launch-safari")
- if quiet:
- args.append("--quiet")
- if fail_fast:
- args.append("--exit-after-n-failures=1")
- cls.run_and_throw_if_fail(args)
-
- @staticmethod
- def setup_for_landing(scm, options):
- os.chdir(scm.checkout_root)
- scm.ensure_no_local_commits(options.force_clean)
- if options.clean:
- scm.ensure_clean_working_directory(options.force_clean)
-
- @classmethod
- def build_and_commit(cls, scm, options):
- if options.build:
- cls.build_webkit(quiet=options.quiet)
- if options.test:
- # When running the commit-queue we don't want to launch Safari and we want to exit after the first failure.
- cls.run_webkit_tests(launch_safari=not options.commit_queue, fail_fast=options.commit_queue, quiet=options.quiet)
- commit_message = commit_message_for_this_commit(scm)
- commit_log = scm.commit_with_message(commit_message.message())
- return bug_comment_from_commit_text(scm, commit_log)
-
-
-class LandAndUpdateBug(Command):
- def __init__(self):
- options = [
- make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER."),
- ]
- options += WebKitLandingScripts.land_options()
- Command.__init__(self, 'Lands the current working directory diff and updates the bug if provided.', '[BUGID]', options=options)
-
- def guess_reviewer_from_bug(self, bugs, bug_id):
- patches = bugs.fetch_reviewed_patches_from_bug(bug_id)
- if len(patches) != 1:
- log("%s on bug %s, cannot infer reviewer." % (pluralize("reviewed patch", len(patches)), bug_id))
- return None
- patch = patches[0]
- reviewer = patch['reviewer']
- log('Guessing "%s" as reviewer from attachment %s on bug %s.' % (reviewer, patch['id'], bug_id))
- return reviewer
-
- def update_changelogs_with_reviewer(self, reviewer, bug_id, tool):
- if not reviewer:
- if not bug_id:
- log("No bug id provided and --reviewer= not provided. Not updating ChangeLogs with reviewer.")
- return
- reviewer = self.guess_reviewer_from_bug(tool.bugs, bug_id)
-
- if not reviewer:
- log("Failed to guess reviewer from bug %s and --reviewer= not provided. Not updating ChangeLogs with reviewer." % bug_id)
- return
-
- for changelog_path in tool.scm().modified_changelogs():
- ChangeLog(changelog_path).set_reviewer(reviewer)
-
- def execute(self, options, args, tool):
- bug_id = args[0] if len(args) else None
- os.chdir(tool.scm().checkout_root)
-
- WebKitLandingScripts.ensure_builders_are_green(tool.buildbot, options)
-
- self.update_changelogs_with_reviewer(options.reviewer, bug_id, tool)
-
- comment_text = WebKitLandingScripts.build_and_commit(tool.scm(), options)
- if bug_id:
- log("Updating bug %s" % bug_id)
- if options.close_bug:
- tool.bugs.close_bug_as_fixed(bug_id, comment_text)
- else:
- # FIXME: We should a smart way to figure out if the patch is attached
- # to the bug, and if so obsolete it.
- tool.bugs.post_comment_to_bug(bug_id, comment_text)
- else:
- log(comment_text)
- log("No bug id provided.")
-
-
-class LandPatchesFromBugs(Command):
- def __init__(self):
- options = WebKitLandingScripts.cleaning_options()
- options += WebKitLandingScripts.land_options()
- Command.__init__(self, 'Lands all patches on a bug optionally testing them first', 'BUGID', options=options)
-
- @staticmethod
- def handled_error(error):
- log(error)
- exit(2) # Exit 2 insted of 1 to indicate to the commit-queue to indicate we handled the error, and that the queue should keep looping.
-
- @classmethod
- def land_patches(cls, bug_id, patches, options, tool):
- try:
- comment_text = ""
- for patch in patches:
- tool.scm().update_webkit() # Update before every patch in case the tree has changed
- log("Applying %s from bug %s." % (patch['id'], bug_id))
- tool.scm().apply_patch(patch, force=options.commit_queue)
- # Make sure the tree is still green after updating, before building this patch.
- # The first patch ends up checking tree status twice, but that's OK.
- WebKitLandingScripts.ensure_builders_are_green(tool.buildbot, options)
- comment_text = WebKitLandingScripts.build_and_commit(tool.scm(), options)
- tool.bugs.clear_attachment_flags(patch['id'], comment_text)
-
- if options.close_bug:
- tool.bugs.close_bug_as_fixed(bug_id, "All reviewed patches have been landed. Closing bug.")
- except CheckoutNeedsUpdate, e:
- log("Commit was rejected because the checkout is out of date. Please update and try again.")
- log("You can pass --no-build to skip building/testing after update if you believe the new commits did not affect the results.")
- cls.handled_error(e)
- except ScriptError, e:
- # Mark the patch as commit-queue- and comment in the bug.
- tool.bugs.reject_patch_from_commit_queue(patch['id'], e.message_with_output())
- cls.handled_error(e)
-
- @staticmethod
- def _fetch_list_of_patches_to_land(options, args, tool):
- bugs_to_patches = {}
- patch_count = 0
- for bug_id in args:
- patches = []
- if options.commit_queue:
- patches = tool.bugs.fetch_commit_queue_patches_from_bug(bug_id, reject_invalid_patches=True)
- else:
- patches = tool.bugs.fetch_reviewed_patches_from_bug(bug_id)
-
- patches_found = len(patches)
- log("%s found on bug %s." % (pluralize("reviewed patch", patches_found), bug_id))
-
- patch_count += patches_found
- if patches_found:
- bugs_to_patches[bug_id] = patches
-
- log("Landing %s from %s." % (pluralize("patch", patch_count), pluralize("bug", len(args))))
- return bugs_to_patches
-
- def execute(self, options, args, tool):
- if not len(args):
- error("bug-id(s) required")
-
- # Check the tree status here so we can fail early
- WebKitLandingScripts.ensure_builders_are_green(tool.buildbot, options)
-
- bugs_to_patches = self._fetch_list_of_patches_to_land(options, args, tool)
-
- WebKitLandingScripts.setup_for_landing(tool.scm(), options)
-
- for bug_id in bugs_to_patches.keys():
- self.land_patches(bug_id, bugs_to_patches[bug_id], options, tool)
-
-
-class CommitMessageForCurrentDiff(Command):
- def __init__(self):
- Command.__init__(self, 'Prints a commit message suitable for the uncommitted changes.')
-
- def execute(self, options, args, tool):
- os.chdir(tool.scm().checkout_root)
- print "%s" % commit_message_for_this_commit(tool.scm()).message()
-
-
-class ObsoleteAttachmentsOnBug(Command):
- def __init__(self):
- Command.__init__(self, 'Marks all attachments on a bug as obsolete.', 'BUGID')
-
- def execute(self, options, args, tool):
- bug_id = args[0]
- attachments = tool.bugs.fetch_attachments_from_bug(bug_id)
- for attachment in attachments:
- if not attachment['is_obsolete']:
- tool.bugs.obsolete_attachment(attachment['id'])
-
+from modules.buildsteps import BuildSteps
+from modules.commands.download import *
+from modules.commands.early_warning_system import *
+from modules.commands.queries import *
+from modules.commands.queues import *
+from modules.commands.upload import *
+from modules.executive import Executive
+from modules.logging import log
+from modules.multicommandtool import MultiCommandTool
+from modules.scm import detect_scm_system
+
+class BugzillaTool(MultiCommandTool):
+ def __init__(self):
+ MultiCommandTool.__init__(self)
+ self.global_option_parser.add_option("--dry-run", action="callback", help="do not touch remote servers", callback=self.dry_run_callback)
-class PostDiffAsPatchToBug(Command):
- def __init__(self):
- options = [
- make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: 'patch')"),
- ]
- options += self.posting_options()
- Command.__init__(self, 'Attaches the current working directory diff to a bug as a patch file.', '[BUGID]', options=options)
-
- @staticmethod
- def posting_options():
- return [
- make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting this one."),
- make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
- make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."),
- ]
-
- @staticmethod
- def obsolete_patches_on_bug(bug_id, bugs):
- patches = bugs.fetch_patches_from_bug(bug_id)
- if len(patches):
- log("Obsoleting %s on bug %s" % (pluralize('old patch', len(patches)), bug_id))
- for patch in patches:
- bugs.obsolete_attachment(patch['id'])
-
- def execute(self, options, args, tool):
- # Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs).
- bug_id = (args and args[0]) or parse_bug_id(tool.scm().create_patch())
- if not bug_id:
- error("No bug id passed and no bug url found in diff, can't post.")
-
- if options.obsolete_patches:
- self.obsolete_patches_on_bug(bug_id, tool.bugs)
-
- diff = tool.scm().create_patch()
- diff_file = StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object
-
- description = options.description or "Patch v1"
- tool.bugs.add_patch_to_bug(bug_id, diff_file, description, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
-
-
-class PostCommitsAsPatchesToBug(Command):
- def __init__(self):
- options = [
- make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
- make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."),
- make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"),
- ]
- options += PostDiffAsPatchToBug.posting_options()
- Command.__init__(self, 'Attaches a range of local commits to bugs as patch files.', 'COMMITISH', options=options, requires_local_commits=True)
-
- def _comment_text_for_commit(self, options, commit_message, tool, commit_id):
- comment_text = None
- if (options.add_log_as_comment):
- comment_text = commit_message.body(lstrip=True)
- comment_text += "---\n"
- comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
- return comment_text
-
- def _diff_file_for_commit(self, tool, commit_id):
- diff = tool.scm().create_patch_from_local_commit(commit_id)
- return StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object
-
- def execute(self, options, args, tool):
- if not args:
- error("%s argument is required" % self.argument_names)
-
- commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
- if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is.
- error("bugzilla-tool does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize('patch', len(commit_ids))))
-
- have_obsoleted_patches = set()
- for commit_id in commit_ids:
- commit_message = tool.scm().commit_message_for_local_commit(commit_id)
-
- # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs).
- bug_id = options.bug_id or parse_bug_id(commit_message.message()) or parse_bug_id(tool.scm().create_patch_from_local_commit(commit_id))
- if not bug_id:
- log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id)
- continue
-
- if options.obsolete_patches and bug_id not in have_obsoleted_patches:
- PostDiffAsPatchToBug.obsolete_patches_on_bug(bug_id, tool.bugs)
- have_obsoleted_patches.add(bug_id)
-
- diff_file = self._diff_file_for_commit(tool, commit_id)
- description = options.description or commit_message.description(lstrip=True, strip_url=True)
- comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id)
- tool.bugs.add_patch_to_bug(bug_id, diff_file, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
-
-
-class RolloutCommit(Command):
- def __init__(self):
- options = WebKitLandingScripts.land_options()
- options += WebKitLandingScripts.cleaning_options()
- options.append(make_option("--complete-rollout", action="store_true", dest="complete_rollout", help="Experimental support for complete unsupervised rollouts, including re-opening the bug. Not recommended."))
- Command.__init__(self, 'Reverts the given revision and commits the revert and re-opens the original bug.', 'REVISION [BUGID]', options=options)
-
- @staticmethod
- def _create_changelogs_for_revert(scm, revision):
- # First, discard the ChangeLog changes from the rollout.
- changelog_paths = scm.modified_changelogs()
- scm.revert_files(changelog_paths)
-
- # Second, make new ChangeLog entries for this rollout.
- # This could move to prepare-ChangeLog by adding a --revert= option.
- WebKitLandingScripts.run_webkit_script("prepare-ChangeLog")
- for changelog_path in changelog_paths:
- ChangeLog(changelog_path).update_for_revert(revision)
-
- @staticmethod
- def _parse_bug_id_from_revision_diff(tool, revision):
- original_diff = tool.scm().diff_for_revision(revision)
- return parse_bug_id(original_diff)
-
- @staticmethod
- def _reopen_bug_after_rollout(tool, bug_id, comment_text):
- if bug_id:
- tool.bugs.reopen_bug(bug_id, comment_text)
- else:
- log(comment_text)
- log("No bugs were updated or re-opened to reflect this rollout.")
-
- def execute(self, options, args, tool):
- if not args:
- error("REVISION is required, see --help.")
- revision = args[0]
- bug_id = self._parse_bug_id_from_revision_diff(tool, revision)
- if options.complete_rollout:
- if bug_id:
- log("Will re-open bug %s after rollout." % bug_id)
- else:
- log("Failed to parse bug number from diff. No bugs will be updated/reopened after the rollout.")
-
- WebKitLandingScripts.setup_for_landing(tool.scm(), options)
- tool.scm().update_webkit()
- tool.scm().apply_reverse_diff(revision)
- self._create_changelogs_for_revert(tool.scm(), revision)
-
- # FIXME: Fully automated rollout is not 100% idiot-proof yet, so for now just log with instructions on how to complete the rollout.
- # Once we trust rollout we will remove this option.
- if not options.complete_rollout:
- log("\nNOTE: Rollout support is experimental.\nPlease verify the rollout diff and use 'bugzilla-tool land-diff %s' to commit the rollout." % bug_id)
- else:
- comment_text = WebKitLandingScripts.build_and_commit(tool.scm(), options)
- self._reopen_bug_after_rollout(tool, bug_id, comment_text)
-
-
-class CreateBug(Command):
- def __init__(self):
- options = [
- make_option("--cc", action="store", type="string", dest="cc", help="Comma-separated list of email addresses to carbon-copy."),
- make_option("--component", action="store", type="string", dest="component", help="Component for the new bug."),
- make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."),
- make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
- make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."),
- ]
- Command.__init__(self, 'Create a bug from local changes or local commits.', '[COMMITISH]', options=options)
-
- def create_bug_from_commit(self, options, args, tool):
- commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
- if len(commit_ids) > 3:
- error("Are you sure you want to create one bug with %s patches?" % len(commit_ids))
-
- commit_id = commit_ids[0]
-
- bug_title = ""
- comment_text = ""
- if options.prompt:
- (bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
- else:
- commit_message = tool.scm().commit_message_for_local_commit(commit_id)
- bug_title = commit_message.description(lstrip=True, strip_url=True)
- comment_text = commit_message.body(lstrip=True)
- comment_text += "---\n"
- comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
-
- diff = tool.scm().create_patch_from_local_commit(commit_id)
- diff_file = StringIO.StringIO(diff) # create_bug_with_patch expects a file-like object
- bug_id = tool.bugs.create_bug_with_patch(bug_title, comment_text, options.component, diff_file, "Patch v1", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
-
- if bug_id and len(commit_ids) > 1:
- options.bug_id = bug_id
- options.obsolete_patches = False
- # FIXME: We should pass through --no-comment switch as well.
- PostCommitsAsPatchesToBug.execute(self, options, commit_ids[1:], tool)
-
- def create_bug_from_patch(self, options, args, tool):
- bug_title = ""
- comment_text = ""
- if options.prompt:
- (bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
- else:
- commit_message = commit_message_for_this_commit(tool.scm())
- bug_title = commit_message.description(lstrip=True, strip_url=True)
- comment_text = commit_message.body(lstrip=True)
-
- diff = tool.scm().create_patch()
- diff_file = StringIO.StringIO(diff) # create_bug_with_patch expects a file-like object
- bug_id = tool.bugs.create_bug_with_patch(bug_title, comment_text, options.component, diff_file, "Patch v1", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
-
- def prompt_for_bug_title_and_comment(self):
- bug_title = raw_input("Bug title: ")
- print "Bug comment (hit ^D on blank line to end):"
- lines = sys.stdin.readlines()
- try:
- sys.stdin.seek(0, os.SEEK_END)
- except IOError:
- # Cygwin raises an Illegal Seek (errno 29) exception when the above
- # seek() call is made. Ignoring it seems to cause no harm.
- # FIXME: Figure out a way to get avoid the exception in the first
- # place.
- pass
- else:
- raise
- comment_text = ''.join(lines)
- return (bug_title, comment_text)
-
- def execute(self, options, args, tool):
- if len(args):
- if (not tool.scm().supports_local_commits()):
- error("Extra arguments not supported; patch is taken from working directory.")
- self.create_bug_from_commit(options, args, tool)
- else:
- self.create_bug_from_patch(options, args, tool)
-
-
-class CheckTreeStatus(Command):
- def __init__(self):
- Command.__init__(self, 'Print out the status of the webkit builders.')
-
- def execute(self, options, args, tool):
- for builder in tool.buildbot.builder_statuses():
- status_string = "ok" if builder['is_green'] else 'FAIL'
- print "%s : %s" % (status_string.ljust(4), builder['name'])
-
-
-class LandPatchesFromCommitQueue(Command):
- def __init__(self):
- options = [
- make_option("--is-relaunch", action="store_true", dest="is_relaunch", default=False, help="Internal: Used by the queue to indicate that it's relaunching itself."),
- make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue. Dangerous!"),
- make_option("--status-host", action="store", type="string", dest="status_host", default=StatusBot.default_host, help="Do not ask the user for confirmation before running the queue. Dangerous!"),
- ]
- Command.__init__(self, 'Run the commit queue.', options=options)
- self._original_stdout = None
- self._original_stderr = None
- self._files_for_output = []
-
- queue_log_path = 'commit_queue.log'
- bug_logs_directory = 'commit_queue_logs'
-
- log_date_format = "%Y-%m-%d %H:%M:%S"
- sleep_duration_text = "5 mins"
- seconds_to_sleep = 300
-
- def _tee_outputs_to_files(self, files):
- if not self._original_stdout:
- self._original_stdout = sys.stdout
- self._original_stderr = sys.stderr
- if files and len(files):
- sys.stdout = tee(self._original_stdout, *files)
- sys.stderr = tee(self._original_stderr, *files)
- else:
- sys.stdout = self._original_stdout
- sys.stderr = self._original_stderr
-
- @classmethod
- def _sleep_message(cls, message):
- wake_time = datetime.now() + timedelta(seconds=cls.seconds_to_sleep)
- return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(cls.log_date_format), cls.sleep_duration_text)
-
- def _sleep(self, message):
- log(self._sleep_message(message))
- time.sleep(self.seconds_to_sleep)
- self._next_patch()
-
- def _update_status_and_sleep(self, message):
- status_message = self._sleep_message(message)
- self.status_bot.update_status(status_message)
- log(status_message)
- time.sleep(self.seconds_to_sleep)
- self._next_patch()
-
- def _next_patch(self):
- # Re-exec this script to catch any updates to the script.
- # Make sure that the re-execed commit-queue does not wait for the user.
- args = sys.argv[:]
- if args.count("--is-relaunch") == 0:
- args.append("--is-relaunch")
- os.execvp(sys.argv[0], args)
-
- @staticmethod
- def _open_log_file(log_path):
- (log_directory, log_name) = os.path.split(log_path)
- if log_directory and not os.path.exists(log_directory):
- os.makedirs(log_directory)
- return open(log_path, 'a+')
-
- def _add_log_to_output_tee(self, path):
- log_file = self._open_log_file(path)
- self._files_for_output.append(log_file)
- self._tee_outputs_to_files(self._files_for_output)
- return log_file
-
- def _remove_log_from_output_tee(self, log_file):
- self._files_for_output.remove(log_file)
- self._tee_outputs_to_files(self._files_for_output)
- log_file.close()
-
- def execute(self, options, args, tool):
- if not options.is_relaunch:
- log("CAUTION: commit-queue will discard all local changes in %s" % tool.scm().checkout_root)
- if options.confirm:
- response = raw_input("Are you sure? Type 'yes' to continue: ")
- if (response != 'yes'):
- error("User declined.")
-
- queue_log = self._add_log_to_output_tee(self.queue_log_path)
- if not options.is_relaunch:
- log("Running WebKit Commit Queue. %s" % datetime.now().strftime(self.log_date_format))
-
- self.status_bot = StatusBot(host=options.status_host)
-
- # Either of these calls could throw URLError which shouldn't stop the queue.
- # We catch all exceptions just in case.
- try:
- # Fetch patches instead of just bug ids to that we validate reviewer/committer flags on every patch.
- patches = tool.bugs.fetch_patches_from_commit_queue(reject_invalid_patches=True)
- if not len(patches):
- self._update_status_and_sleep("Empty queue.")
- patch_ids = map(lambda patch: patch['id'], patches)
- first_bug_id = patches[0]['bug_id']
- log("%s in commit queue [%s]" % (pluralize('patch', len(patches)), ", ".join(patch_ids)))
-
- red_builders_names = tool.buildbot.red_core_builders_names()
- if red_builders_names:
- red_builders_names = map(lambda name: '"%s"' % name, red_builders_names) # Add quotes around the names.
- self._update_status_and_sleep("Builders [%s] are red. See http://build.webkit.org." % ", ".join(red_builders_names))
-
- self.status_bot.update_status("Landing patches from bug %s." % first_bug_id, bug_id=first_bug_id)
- except Exception, e:
- # Don't try tell the status bot, in case telling it causes an exception.
- self._sleep("Exception while checking queue and bots: %s." % e)
-
- # Try to land patches on the first bug in the queue before looping
- bug_log_path = os.path.join(self.bug_logs_directory, "%s.log" % first_bug_id)
- bug_log = self._add_log_to_output_tee(bug_log_path)
- bugzilla_tool_path = __file__ # re-execute this script
- bugzilla_tool_args = [bugzilla_tool_path, 'land-patches', '--force-clean', '--commit-queue', '--quiet', first_bug_id]
- try:
- WebKitLandingScripts.run_and_throw_if_fail(bugzilla_tool_args)
- except ScriptError, e:
- # Unexpected failure! Mark the patch as commit-queue- and comment in the bug.
- # exit(2) is a special exit code we use to indicate that the error was already handled by land-patches and we should keep looping anyway.
- if e.exit_code != 2:
- tool.bugs.reject_patch_from_commit_queue(patch['id'], "Unexpected failure when landing patch! Please file a bug against bugzilla-tool.\n%s" % e.message_with_output())
- self._remove_log_from_output_tee(bug_log)
- # self._remove_log_from_output_tee(queue_log) # implicit in the exec()
- self._next_patch()
-
-
-class NonWrappingEpilogIndentedHelpFormatter(IndentedHelpFormatter):
- def __init__(self):
- IndentedHelpFormatter.__init__(self)
-
- # The standard IndentedHelpFormatter paragraph-wraps the epilog, killing our custom formatting.
- def format_epilog(self, epilog):
- if epilog:
- return "\n" + epilog + "\n"
- return ""
-
-
-class HelpPrintingOptionParser(OptionParser):
- def error(self, msg):
- self.print_usage(sys.stderr)
- error_message = "%s: error: %s\n" % (self.get_prog_name(), msg)
- error_message += "\nType '" + self.get_prog_name() + " --help' to see usage.\n"
- self.exit(2, error_message)
-
-
-class BugzillaTool:
- def __init__(self):
- self.cached_scm = None
self.bugs = Bugzilla()
self.buildbot = BuildBot()
- self.commands = [
- { 'name' : 'bugs-to-commit', 'object' : BugsInCommitQueue() },
- { 'name' : 'patches-to-commit', 'object' : PatchesInCommitQueue() },
- { 'name' : 'reviewed-patches', 'object' : ReviewedPatchesOnBug() },
- { 'name' : 'create-bug', 'object' : CreateBug() },
- { 'name' : 'apply-patches', 'object' : ApplyPatchesFromBug() },
- { 'name' : 'land-diff', 'object' : LandAndUpdateBug() },
- { 'name' : 'land-patches', 'object' : LandPatchesFromBugs() },
- { 'name' : 'commit-message', 'object' : CommitMessageForCurrentDiff() },
- { 'name' : 'obsolete-attachments', 'object' : ObsoleteAttachmentsOnBug() },
- { 'name' : 'post-diff', 'object' : PostDiffAsPatchToBug() },
- { 'name' : 'post-commits', 'object' : PostCommitsAsPatchesToBug() },
- { 'name' : 'tree-status', 'object' : CheckTreeStatus() },
- { 'name' : 'commit-queue', 'object' : LandPatchesFromCommitQueue() },
- { 'name' : 'rollout', 'object' : RolloutCommit() },
- ]
+ self.executive = Executive()
+ self._scm = None
+ self._status = None
+ self.steps = BuildSteps()
- self.global_option_parser = HelpPrintingOptionParser(usage=self.usage_line(), formatter=NonWrappingEpilogIndentedHelpFormatter(), epilog=self.commands_usage())
- self.global_option_parser.add_option("--dry-run", action="store_true", dest="dryrun", help="do not touch remote servers", default=False)
+ def dry_run_callback(self, option, opt, value, parser):
+ self.scm().dryrun = True
+ self.bugs.dryrun = True
def scm(self):
# Lazily initialize SCM to not error-out before command line parsing (or when running non-scm commands).
- original_cwd = os.path.abspath('.')
- if not self.cached_scm:
- self.cached_scm = detect_scm_system(original_cwd)
-
- if not self.cached_scm:
+ original_cwd = os.path.abspath(".")
+ if not self._scm:
+ self._scm = detect_scm_system(original_cwd)
+
+ if not self._scm:
script_directory = os.path.abspath(sys.path[0])
webkit_directory = os.path.abspath(os.path.join(script_directory, "../.."))
- self.cached_scm = detect_scm_system(webkit_directory)
- if self.cached_scm:
+ self._scm = detect_scm_system(webkit_directory)
+ if self._scm:
log("The current directory (%s) is not a WebKit checkout, using %s" % (original_cwd, webkit_directory))
else:
error("FATAL: Failed to determine the SCM system for either %s or %s" % (original_cwd, webkit_directory))
-
- return self.cached_scm
-
- @staticmethod
- def usage_line():
- return "Usage: %prog [options] command [command-options] [command-arguments]"
-
- def commands_usage(self):
- commands_text = "Commands:\n"
- longest_name_length = 0
- command_rows = []
- scm_supports_local_commits = self.scm().supports_local_commits()
- for command in self.commands:
- command_object = command['object']
- if command_object.requires_local_commits and not scm_supports_local_commits:
- continue
- command_name_and_args = command_object.name_with_arguments(command['name'])
- command_rows.append({ 'name-and-args': command_name_and_args, 'object': command_object })
- longest_name_length = max([longest_name_length, len(command_name_and_args)])
-
- # Use our own help formatter so as to indent enough.
- formatter = IndentedHelpFormatter()
- formatter.indent()
- formatter.indent()
-
- for row in command_rows:
- command_object = row['object']
- commands_text += " " + row['name-and-args'].ljust(longest_name_length + 3) + command_object.help_text + "\n"
- commands_text += command_object.option_parser.format_option_help(formatter)
- return commands_text
-
- def handle_global_args(self, args):
- (options, args) = self.global_option_parser.parse_args(args)
- if len(args):
- # We'll never hit this because split_args splits at the first arg without a leading '-'
- self.global_option_parser.error("Extra arguments before command: " + args)
-
- if options.dryrun:
- self.scm().dryrun = True
- self.bugs.dryrun = True
-
- @staticmethod
- def split_args(args):
- # Assume the first argument which doesn't start with '-' is the command name.
- command_index = 0
- for arg in args:
- if arg[0] != '-':
- break
- command_index += 1
- else:
- return (args[:], None, [])
- global_args = args[:command_index]
- command = args[command_index]
- command_args = args[command_index + 1:]
- return (global_args, command, command_args)
-
- def command_by_name(self, command_name):
- for command in self.commands:
- if command_name == command['name']:
- return command
- return None
-
- def main(self):
- (global_args, command_name, args_after_command_name) = self.split_args(sys.argv[1:])
-
- # Handle --help, etc:
- self.handle_global_args(global_args)
-
- if not command_name:
- self.global_option_parser.error("No command specified")
-
- command = self.command_by_name(command_name)
- if not command:
- self.global_option_parser.error(command_name + " is not a recognized command")
+ return self._scm
- command_object = command['object']
+ def status(self):
+ if not self._status:
+ self._status = StatusBot()
+ return self._status
- if command_object.requires_local_commits and not self.scm().supports_local_commits():
- error(command_name + " requires local commits using %s in %s." % (self.scm().display_name(), self.scm().checkout_root))
+ def path(self):
+ return __file__
- (command_options, command_args) = command_object.parse_args(args_after_command_name)
- return command_object.execute(command_options, command_args, self)
+ def should_show_in_main_help(self, command):
+ if not command.show_in_main_help:
+ return False
+ if command.requires_local_commits:
+ return self.scm().supports_local_commits()
+ return True
+ def should_execute_command(self, command):
+ if command.requires_local_commits and not self.scm().supports_local_commits():
+ failure_reason = "%s requires local commits using %s in %s." % (command.name, self.scm().display_name(), self.scm().checkout_root)
+ return (False, failure_reason)
+ return (True, None)
-def main():
- tool = BugzillaTool()
- return tool.main()
if __name__ == "__main__":
- main()
+ BugzillaTool().main()
diff --git a/WebKitTools/Scripts/build-webkit b/WebKitTools/Scripts/build-webkit
index 4f78eef..566965b 100755
--- a/WebKitTools/Scripts/build-webkit
+++ b/WebKitTools/Scripts/build-webkit
@@ -50,9 +50,9 @@ my $minimal = 0;
my $makeArgs;
my $startTime = time();
-my ($threeDCanvasSupport, $threeDRenderingSupport, $channelMessagingSupport, $databaseSupport, $datagridSupport, $domStorageSupport,
- $eventsourceSupport, $filtersSupport, $geolocationSupport, $iconDatabaseSupport,
- $javaScriptDebuggerSupport, $mathmlSupport, $offlineWebApplicationSupport, $rubySupport, $sharedWorkersSupport,
+my ($threeDCanvasSupport, $threeDRenderingSupport, $channelMessagingSupport, $databaseSupport, $datagridSupport, $datalistSupport,
+ $domStorageSupport, $eventsourceSupport, $filtersSupport, $geolocationSupport, $iconDatabaseSupport,
+ $javaScriptDebuggerSupport, $mathmlSupport, $offlineWebApplicationSupport, $sharedWorkersSupport,
$svgSupport, $svgAnimationSupport, $svgAsImageSupport, $svgDOMObjCBindingsSupport, $svgFontsSupport,
$svgForeignObjectSupport, $svgUseSupport, $videoSupport, $webSocketsSupport, $wmlSupport, $wcssSupport, $xhtmlmpSupport, $workersSupport,
$xpathSupport, $xsltSupport, $coverageSupport, $notificationsSupport);
@@ -75,6 +75,9 @@ my @features = (
{ option => "datagrid", desc => "Toggle Datagrid Support",
define => "ENABLE_DATAGRID", default => 1, value => \$datagridSupport },
+
+ { option => "datalist", desc => "Toggle HTML5 datalist support",
+ define => "ENABLE_DATALIST", default => 1, value => \$datalistSupport },
{ option => "dom-storage", desc => "Toggle DOM Storage Support",
define => "ENABLE_DOM_STORAGE", default => 1, value => \$domStorageSupport },
@@ -83,7 +86,7 @@ my @features = (
define => "ENABLE_EVENTSOURCE", default => 1, value => \$eventsourceSupport },
{ option => "filters", desc => "Toggle Filters support",
- define => "ENABLE_FILTERS", default => 0, value => \$filtersSupport },
+ define => "ENABLE_FILTERS", default => (isAppleWebKit() || isGtk() || isQt()), value => \$filtersSupport },
{ option => "geolocation", desc => "Toggle Geolocation support",
define => "ENABLE_GEOLOCATION", default => isGtk(), value => \$geolocationSupport },
@@ -103,9 +106,6 @@ my @features = (
{ option => "offline-web-applications", desc => "Toggle Offline Web Application Support",
define => "ENABLE_OFFLINE_WEB_APPLICATIONS", default => 1, value => \$offlineWebApplicationSupport },
- { option => "ruby", desc => "Toggle HTML5 Ruby support",
- define => "ENABLE_RUBY", default => 1, value => \$rubySupport },
-
{ option => "shared-workers", desc => "Toggle SharedWorkers support",
define => "ENABLE_SHARED_WORKERS", default => (isAppleWebKit() || isGtk()), value => \$sharedWorkersSupport },
@@ -313,6 +313,7 @@ removeLibraryDependingOnSVG("WebCore", $svgSupport);
if (isWx()) {
downloadWafIfNeeded();
+ push @projects, 'WebKitTools/DumpRenderTree';
push @projects, 'WebKitTools/wx/browser';
push @projects, 'WebKit/wx/bindings/python';
}
@@ -321,7 +322,7 @@ if (isChromium()) {
# Chromium doesn't build by project directories.
@projects = ();
my $result = buildChromium($clean, @options);
- exit $result if $result;
+ exit exitStatus($result) if exitStatus($result);
}
# Build, and abort if the build fails.
@@ -361,7 +362,7 @@ for my $dir (@projects) {
if (isAppleWinWebKit()) {
print "\n\n===== BUILD FAILED ======\n\n";
my $scriptDir = relativeScriptsDir();
- print "Please ensure you have run $scriptDir/update-webkit to install depenedencies.\n\n";
+ print "Please ensure you have run $scriptDir/update-webkit to install dependencies.\n\n";
my $baseProductDir = baseProductDir();
print "You can view build errors by checking the BuildLog.htm files located at:\n$baseProductDir/obj/<project>/<config>.\n";
}
diff --git a/WebKitTools/Scripts/commit-log-editor b/WebKitTools/Scripts/commit-log-editor
index e58b181..75017e3 100755
--- a/WebKitTools/Scripts/commit-log-editor
+++ b/WebKitTools/Scripts/commit-log-editor
@@ -170,7 +170,8 @@ for my $changeLog (@changeLogs) {
# Attempt to insert the "patch by" line, after the first blank line.
if ($previousLineWasBlank && $hasAuthorInfoToWrite && $lineCount > 0) {
- my $authorAndCommitterAreSamePerson = $ENV{EMAIL_ADDRESS} && $email eq $ENV{EMAIL_ADDRESS};
+ my $committerEmail = changeLogEmailAddress();
+ my $authorAndCommitterAreSamePerson = $email eq $committerEmail;
if (!$authorAndCommitterAreSamePerson) {
$contents .= "Patch by $author <$email> on $date\n";
$hasAuthorInfoToWrite = 0;
@@ -229,7 +230,7 @@ if (isGit() && scalar keys %changeLogSort == 0) {
chomp($webkitGenerateCommitMessage = `git config --bool core.webkitGenerateCommitMessage`);
}
if ($webkitGenerateCommitMessage ne "false") {
- open CHANGELOG_ENTRIES, "-|", "prepare-ChangeLog --git-index --no-write" or die "prepare-ChangeLog failed: $!.\n";
+ open CHANGELOG_ENTRIES, "-|", "$FindBin::Bin/prepare-ChangeLog --git-index --no-write" or die "prepare-ChangeLog failed: $!.\n";
while (<CHANGELOG_ENTRIES>) {
print NEWLOG normalizeLineEndings($_, $endl);
}
diff --git a/WebKitTools/Scripts/do-webcore-rename b/WebKitTools/Scripts/do-webcore-rename
index 2d6ca46..a65fa4f 100755
--- a/WebKitTools/Scripts/do-webcore-rename
+++ b/WebKitTools/Scripts/do-webcore-rename
@@ -33,6 +33,7 @@ use FindBin;
use lib $FindBin::Bin;
use webkitdirs;
use File::Find;
+use VCSUtils;
setConfiguration();
chdirWebKit();
@@ -66,9 +67,229 @@ sub wanted
push @paths, $File::Find::name;
}
-
+my $isDOMTypeRename = 1;
my %renames = (
- "parseURL" => "deprecatedParseURL"
+ "CanvasActiveInfo" => "WebGLActiveInfo",
+ "canvasActiveInfo" => "webGLActiveInfo",
+ "CanvasActiveInfoConstructor" => "WebGLActiveInfoConstructor",
+ "CanvasActiveInfoPrototype" => "WebGLActiveInfoPrototype",
+ "toCanvasActiveInfo" => "toWebGLActiveInfo",
+ "JSCanvasActiveInfo" => "JSWebGLActiveInfo",
+ "JSCanvasActiveInfoPrototype" => "JSWebGLActiveInfoPrototype",
+ "JSCanvasActiveInfoConstructor" => "JSWebGLActiveInfoConstructor",
+ "JSCanvasActiveInfoCustom" => "JSWebGLActiveInfoCustom",
+ "V8CanvasActiveInfo" => "V8WebGLActiveInfo",
+ "V8CanvasActiveInfoPrototype" => "V8WebGLActiveInfoPrototype",
+ "V8CanvasActiveInfoConstructor" => "V8WebGLActiveInfoConstructor",
+ "V8CanvasActiveInfoCustom" => "V8WebGLActiveInfoCustom",
+ "CanvasArray" => "WebGLArray",
+ "canvasArray" => "webGLArray",
+ "CanvasArrayConstructor" => "WebGLArrayConstructor",
+ "CanvasArrayPrototype" => "WebGLArrayPrototype",
+ "toCanvasArray" => "toWebGLArray",
+ "JSCanvasArray" => "JSWebGLArray",
+ "JSCanvasArrayPrototype" => "JSWebGLArrayPrototype",
+ "JSCanvasArrayConstructor" => "JSWebGLArrayConstructor",
+ "JSCanvasArrayCustom" => "JSWebGLArrayCustom",
+ "V8CanvasArray" => "V8WebGLArray",
+ "V8CanvasArrayPrototype" => "V8WebGLArrayPrototype",
+ "V8CanvasArrayConstructor" => "V8WebGLArrayConstructor",
+ "V8CanvasArrayCustom" => "V8WebGLArrayCustom",
+ "CanvasArrayBuffer" => "WebGLArrayBuffer",
+ "canvasArrayBuffer" => "webGLArrayBuffer",
+ "CanvasArrayBufferConstructor" => "WebGLArrayBufferConstructor",
+ "CanvasArrayBufferPrototype" => "WebGLArrayBufferPrototype",
+ "toCanvasArrayBuffer" => "toWebGLArrayBuffer",
+ "JSCanvasArrayBuffer" => "JSWebGLArrayBuffer",
+ "JSCanvasArrayBufferPrototype" => "JSWebGLArrayBufferPrototype",
+ "JSCanvasArrayBufferConstructor" => "JSWebGLArrayBufferConstructor",
+ "JSCanvasArrayBufferCustom" => "JSWebGLArrayBufferCustom",
+ "V8CanvasArrayBuffer" => "V8WebGLArrayBuffer",
+ "V8CanvasArrayBufferPrototype" => "V8WebGLArrayBufferPrototype",
+ "V8CanvasArrayBufferConstructor" => "V8WebGLArrayBufferConstructor",
+ "V8CanvasArrayBufferCustom" => "V8WebGLArrayBufferCustom",
+ "CanvasBuffer" => "WebGLBuffer",
+ "canvasBuffer" => "webGLBuffer",
+ "CanvasBufferConstructor" => "WebGLBufferConstructor",
+ "CanvasBufferPrototype" => "WebGLBufferPrototype",
+ "toCanvasBuffer" => "toWebGLBuffer",
+ "JSCanvasBuffer" => "JSWebGLBuffer",
+ "JSCanvasBufferPrototype" => "JSWebGLBufferPrototype",
+ "JSCanvasBufferConstructor" => "JSWebGLBufferConstructor",
+ "JSCanvasBufferCustom" => "JSWebGLBufferCustom",
+ "V8CanvasBuffer" => "V8WebGLBuffer",
+ "V8CanvasBufferPrototype" => "V8WebGLBufferPrototype",
+ "V8CanvasBufferConstructor" => "V8WebGLBufferConstructor",
+ "V8CanvasBufferCustom" => "V8WebGLBufferCustom",
+ "CanvasByteArray" => "WebGLByteArray",
+ "canvasByteArray" => "webGLByteArray",
+ "CanvasByteArrayConstructor" => "WebGLByteArrayConstructor",
+ "CanvasByteArrayPrototype" => "WebGLByteArrayPrototype",
+ "toCanvasByteArray" => "toWebGLByteArray",
+ "JSCanvasByteArray" => "JSWebGLByteArray",
+ "JSCanvasByteArrayPrototype" => "JSWebGLByteArrayPrototype",
+ "JSCanvasByteArrayConstructor" => "JSWebGLByteArrayConstructor",
+ "JSCanvasByteArrayCustom" => "JSWebGLByteArrayCustom",
+ "V8CanvasByteArray" => "V8WebGLByteArray",
+ "V8CanvasByteArrayPrototype" => "V8WebGLByteArrayPrototype",
+ "V8CanvasByteArrayConstructor" => "V8WebGLByteArrayConstructor",
+ "V8CanvasByteArrayCustom" => "V8WebGLByteArrayCustom",
+ "CanvasFloatArray" => "WebGLFloatArray",
+ "canvasFloatArray" => "webGLFloatArray",
+ "CanvasFloatArrayConstructor" => "WebGLFloatArrayConstructor",
+ "CanvasFloatArrayPrototype" => "WebGLFloatArrayPrototype",
+ "toCanvasFloatArray" => "toWebGLFloatArray",
+ "JSCanvasFloatArray" => "JSWebGLFloatArray",
+ "JSCanvasFloatArrayPrototype" => "JSWebGLFloatArrayPrototype",
+ "JSCanvasFloatArrayConstructor" => "JSWebGLFloatArrayConstructor",
+ "JSCanvasFloatArrayCustom" => "JSWebGLFloatArrayCustom",
+ "V8CanvasFloatArray" => "V8WebGLFloatArray",
+ "V8CanvasFloatArrayPrototype" => "V8WebGLFloatArrayPrototype",
+ "V8CanvasFloatArrayConstructor" => "V8WebGLFloatArrayConstructor",
+ "V8CanvasFloatArrayCustom" => "V8WebGLFloatArrayCustom",
+ "CanvasFramebuffer" => "WebGLFramebuffer",
+ "canvasFramebuffer" => "webGLFramebuffer",
+ "CanvasFramebufferConstructor" => "WebGLFramebufferConstructor",
+ "CanvasFramebufferPrototype" => "WebGLFramebufferPrototype",
+ "toCanvasFramebuffer" => "toWebGLFramebuffer",
+ "JSCanvasFramebuffer" => "JSWebGLFramebuffer",
+ "JSCanvasFramebufferPrototype" => "JSWebGLFramebufferPrototype",
+ "JSCanvasFramebufferConstructor" => "JSWebGLFramebufferConstructor",
+ "JSCanvasFramebufferCustom" => "JSWebGLFramebufferCustom",
+ "V8CanvasFramebuffer" => "V8WebGLFramebuffer",
+ "V8CanvasFramebufferPrototype" => "V8WebGLFramebufferPrototype",
+ "V8CanvasFramebufferConstructor" => "V8WebGLFramebufferConstructor",
+ "V8CanvasFramebufferCustom" => "V8WebGLFramebufferCustom",
+ "CanvasIntArray" => "WebGLIntArray",
+ "canvasIntArray" => "webGLIntArray",
+ "CanvasIntArrayConstructor" => "WebGLIntArrayConstructor",
+ "CanvasIntArrayPrototype" => "WebGLIntArrayPrototype",
+ "toCanvasIntArray" => "toWebGLIntArray",
+ "JSCanvasIntArray" => "JSWebGLIntArray",
+ "JSCanvasIntArrayPrototype" => "JSWebGLIntArrayPrototype",
+ "JSCanvasIntArrayConstructor" => "JSWebGLIntArrayConstructor",
+ "JSCanvasIntArrayCustom" => "JSWebGLIntArrayCustom",
+ "V8CanvasIntArray" => "V8WebGLIntArray",
+ "V8CanvasIntArrayPrototype" => "V8WebGLIntArrayPrototype",
+ "V8CanvasIntArrayConstructor" => "V8WebGLIntArrayConstructor",
+ "V8CanvasIntArrayCustom" => "V8WebGLIntArrayCustom",
+ "CanvasProgram" => "WebGLProgram",
+ "canvasProgram" => "webGLProgram",
+ "CanvasProgramConstructor" => "WebGLProgramConstructor",
+ "CanvasProgramPrototype" => "WebGLProgramPrototype",
+ "toCanvasProgram" => "toWebGLProgram",
+ "JSCanvasProgram" => "JSWebGLProgram",
+ "JSCanvasProgramPrototype" => "JSWebGLProgramPrototype",
+ "JSCanvasProgramConstructor" => "JSWebGLProgramConstructor",
+ "JSCanvasProgramCustom" => "JSWebGLProgramCustom",
+ "V8CanvasProgram" => "V8WebGLProgram",
+ "V8CanvasProgramPrototype" => "V8WebGLProgramPrototype",
+ "V8CanvasProgramConstructor" => "V8WebGLProgramConstructor",
+ "V8CanvasProgramCustom" => "V8WebGLProgramCustom",
+ "CanvasRenderbuffer" => "WebGLRenderbuffer",
+ "canvasRenderbuffer" => "webGLRenderbuffer",
+ "CanvasRenderbufferConstructor" => "WebGLRenderbufferConstructor",
+ "CanvasRenderbufferPrototype" => "WebGLRenderbufferPrototype",
+ "toCanvasRenderbuffer" => "toWebGLRenderbuffer",
+ "JSCanvasRenderbuffer" => "JSWebGLRenderbuffer",
+ "JSCanvasRenderbufferPrototype" => "JSWebGLRenderbufferPrototype",
+ "JSCanvasRenderbufferConstructor" => "JSWebGLRenderbufferConstructor",
+ "JSCanvasRenderbufferCustom" => "JSWebGLRenderbufferCustom",
+ "V8CanvasRenderbuffer" => "V8WebGLRenderbuffer",
+ "V8CanvasRenderbufferPrototype" => "V8WebGLRenderbufferPrototype",
+ "V8CanvasRenderbufferConstructor" => "V8WebGLRenderbufferConstructor",
+ "V8CanvasRenderbufferCustom" => "V8WebGLRenderbufferCustom",
+ "CanvasRenderingContext3D" => "WebGLRenderingContext",
+ "canvasRenderingContext3D" => "webGLRenderingContext",
+ "CanvasRenderingContext3DConstructor" => "WebGLRenderingContextConstructor",
+ "CanvasRenderingContext3DPrototype" => "WebGLRenderingContextPrototype",
+ "toCanvasRenderingContext3D" => "toWebGLRenderingContext",
+ "JSCanvasRenderingContext3D" => "JSWebGLRenderingContext",
+ "JSCanvasRenderingContext3DPrototype" => "JSWebGLRenderingContextPrototype",
+ "JSCanvasRenderingContext3DConstructor" => "JSWebGLRenderingContextConstructor",
+ "JSCanvasRenderingContext3DCustom" => "JSWebGLRenderingContextCustom",
+ "V8CanvasRenderingContext3D" => "V8WebGLRenderingContext",
+ "V8CanvasRenderingContext3DPrototype" => "V8WebGLRenderingContextPrototype",
+ "V8CanvasRenderingContext3DConstructor" => "V8WebGLRenderingContextConstructor",
+ "V8CanvasRenderingContext3DCustom" => "V8WebGLRenderingContextCustom",
+ "CanvasShader" => "WebGLShader",
+ "canvasShader" => "webGLShader",
+ "CanvasShaderConstructor" => "WebGLShaderConstructor",
+ "CanvasShaderPrototype" => "WebGLShaderPrototype",
+ "toCanvasShader" => "toWebGLShader",
+ "JSCanvasShader" => "JSWebGLShader",
+ "JSCanvasShaderPrototype" => "JSWebGLShaderPrototype",
+ "JSCanvasShaderConstructor" => "JSWebGLShaderConstructor",
+ "JSCanvasShaderCustom" => "JSWebGLShaderCustom",
+ "V8CanvasShader" => "V8WebGLShader",
+ "V8CanvasShaderPrototype" => "V8WebGLShaderPrototype",
+ "V8CanvasShaderConstructor" => "V8WebGLShaderConstructor",
+ "V8CanvasShaderCustom" => "V8WebGLShaderCustom",
+ "CanvasShortArray" => "WebGLShortArray",
+ "canvasShortArray" => "webGLShortArray",
+ "CanvasShortArrayConstructor" => "WebGLShortArrayConstructor",
+ "CanvasShortArrayPrototype" => "WebGLShortArrayPrototype",
+ "toCanvasShortArray" => "toWebGLShortArray",
+ "JSCanvasShortArray" => "JSWebGLShortArray",
+ "JSCanvasShortArrayPrototype" => "JSWebGLShortArrayPrototype",
+ "JSCanvasShortArrayConstructor" => "JSWebGLShortArrayConstructor",
+ "JSCanvasShortArrayCustom" => "JSWebGLShortArrayCustom",
+ "V8CanvasShortArray" => "V8WebGLShortArray",
+ "V8CanvasShortArrayPrototype" => "V8WebGLShortArrayPrototype",
+ "V8CanvasShortArrayConstructor" => "V8WebGLShortArrayConstructor",
+ "V8CanvasShortArrayCustom" => "V8WebGLShortArrayCustom",
+ "CanvasTexture" => "WebGLTexture",
+ "canvasTexture" => "webGLTexture",
+ "CanvasTextureConstructor" => "WebGLTextureConstructor",
+ "CanvasTexturePrototype" => "WebGLTexturePrototype",
+ "toCanvasTexture" => "toWebGLTexture",
+ "JSCanvasTexture" => "JSWebGLTexture",
+ "JSCanvasTexturePrototype" => "JSWebGLTexturePrototype",
+ "JSCanvasTextureConstructor" => "JSWebGLTextureConstructor",
+ "JSCanvasTextureCustom" => "JSWebGLTextureCustom",
+ "V8CanvasTexture" => "V8WebGLTexture",
+ "V8CanvasTexturePrototype" => "V8WebGLTexturePrototype",
+ "V8CanvasTextureConstructor" => "V8WebGLTextureConstructor",
+ "V8CanvasTextureCustom" => "V8WebGLTextureCustom",
+ "CanvasUnsignedByteArray" => "WebGLUnsignedByteArray",
+ "canvasUnsignedByteArray" => "webGLUnsignedByteArray",
+ "CanvasUnsignedByteArrayConstructor" => "WebGLUnsignedByteArrayConstructor",
+ "CanvasUnsignedByteArrayPrototype" => "WebGLUnsignedByteArrayPrototype",
+ "toCanvasUnsignedByteArray" => "toWebGLUnsignedByteArray",
+ "JSCanvasUnsignedByteArray" => "JSWebGLUnsignedByteArray",
+ "JSCanvasUnsignedByteArrayPrototype" => "JSWebGLUnsignedByteArrayPrototype",
+ "JSCanvasUnsignedByteArrayConstructor" => "JSWebGLUnsignedByteArrayConstructor",
+ "JSCanvasUnsignedByteArrayCustom" => "JSWebGLUnsignedByteArrayCustom",
+ "V8CanvasUnsignedByteArray" => "V8WebGLUnsignedByteArray",
+ "V8CanvasUnsignedByteArrayPrototype" => "V8WebGLUnsignedByteArrayPrototype",
+ "V8CanvasUnsignedByteArrayConstructor" => "V8WebGLUnsignedByteArrayConstructor",
+ "V8CanvasUnsignedByteArrayCustom" => "V8WebGLUnsignedByteArrayCustom",
+ "CanvasUnsignedIntArray" => "WebGLUnsignedIntArray",
+ "canvasUnsignedIntArray" => "webGLUnsignedIntArray",
+ "CanvasUnsignedIntArrayConstructor" => "WebGLUnsignedIntArrayConstructor",
+ "CanvasUnsignedIntArrayPrototype" => "WebGLUnsignedIntArrayPrototype",
+ "toCanvasUnsignedIntArray" => "toWebGLUnsignedIntArray",
+ "JSCanvasUnsignedIntArray" => "JSWebGLUnsignedIntArray",
+ "JSCanvasUnsignedIntArrayPrototype" => "JSWebGLUnsignedIntArrayPrototype",
+ "JSCanvasUnsignedIntArrayConstructor" => "JSWebGLUnsignedIntArrayConstructor",
+ "JSCanvasUnsignedIntArrayCustom" => "JSWebGLUnsignedIntArrayCustom",
+ "V8CanvasUnsignedIntArray" => "V8WebGLUnsignedIntArray",
+ "V8CanvasUnsignedIntArrayPrototype" => "V8WebGLUnsignedIntArrayPrototype",
+ "V8CanvasUnsignedIntArrayConstructor" => "V8WebGLUnsignedIntArrayConstructor",
+ "V8CanvasUnsignedIntArrayCustom" => "V8WebGLUnsignedIntArrayCustom",
+ "CanvasUnsignedShortArray" => "WebGLUnsignedShortArray",
+ "canvasUnsignedShortArray" => "webGLUnsignedShortArray",
+ "CanvasUnsignedShortArrayConstructor" => "WebGLUnsignedShortArrayConstructor",
+ "CanvasUnsignedShortArrayPrototype" => "WebGLUnsignedShortArrayPrototype",
+ "toCanvasUnsignedShortArray" => "toWebGLUnsignedShortArray",
+ "JSCanvasUnsignedShortArray" => "JSWebGLUnsignedShortArray",
+ "JSCanvasUnsignedShortArrayPrototype" => "JSWebGLUnsignedShortArrayPrototype",
+ "JSCanvasUnsignedShortArrayConstructor" => "JSWebGLUnsignedShortArrayConstructor",
+ "JSCanvasUnsignedShortArrayCustom" => "JSWebGLUnsignedShortArrayCustom",
+ "V8CanvasUnsignedShortArray" => "V8WebGLUnsignedShortArray",
+ "V8CanvasUnsignedShortArrayPrototype" => "V8WebGLUnsignedShortArrayPrototype",
+ "V8CanvasUnsignedShortArrayConstructor" => "V8WebGLUnsignedShortArrayConstructor",
+ "V8CanvasUnsignedShortArrayCustom" => "V8WebGLUnsignedShortArrayCustom"
);
my %renamesContemplatedForTheFuture = (
@@ -150,11 +371,18 @@ for my $file (sort @paths) {
}
}
+
+my $isGit = isGit();
+
for my $file (sort @paths) {
if ($newFile{$file}) {
my $newFile = $newFile{$file};
print "Renaming $file to $newFile\n";
- system "svn move $file $newFile";
+ if ($isGit) {
+ system "git mv $file $newFile";
+ } else {
+ system "svn move $file $newFile";
+ }
}
}
@@ -171,8 +399,14 @@ for my $file (sort @paths) {
}
my $newContents = $contents;
- for my $from (keys %renames) {
- $newContents =~ s/\b$from(?!["\w])/$renames{$from}/g; # this " unconfuses Xcode syntax highlighting
+ if ($isDOMTypeRename) {
+ for my $from (keys %renames) {
+ $newContents =~ s/\b$from/$renames{$from}/g;
+ }
+ } else {
+ for my $from (keys %renames) {
+ $newContents =~ s/\b$from(?!["\w])/$renames{$from}/g; # this " unconfuses Xcode syntax highlighting
+ }
}
if ($newContents ne $contents) {
diff --git a/WebKitTools/Scripts/modules/bugzilla.py b/WebKitTools/Scripts/modules/bugzilla.py
index fe81b48..be78544 100644
--- a/WebKitTools/Scripts/modules/bugzilla.py
+++ b/WebKitTools/Scripts/modules/bugzilla.py
@@ -43,7 +43,7 @@ from modules.committers import CommitterList
# WebKit includes a built copy of BeautifulSoup in Scripts/modules
# so this import should always succeed.
-from .BeautifulSoup import BeautifulSoup
+from .BeautifulSoup import BeautifulSoup, SoupStrainer
try:
from mechanize import Browser
@@ -95,10 +95,10 @@ def is_mac_os_x():
def parse_bug_id(message):
match = re.search("http\://webkit\.org/b/(?P<bug_id>\d+)", message)
if match:
- return match.group('bug_id')
+ return int(match.group('bug_id'))
match = re.search(Bugzilla.bug_server_regex + "show_bug\.cgi\?id=(?P<bug_id>\d+)", message)
if match:
- return match.group('bug_id')
+ return int(match.group('bug_id'))
return None
# FIXME: This should not depend on git for config storage
@@ -163,17 +163,20 @@ class Bugzilla:
def _parse_attachment_flag(self, element, flag_name, attachment, result_key):
flag = element.find('flag', attrs={'name' : flag_name})
- if flag and flag['status'] == '+':
- attachment[result_key] = flag['setter']
+ if flag:
+ attachment[flag_name] = flag['status']
+ if flag['status'] == '+':
+ attachment[result_key] = flag['setter']
def _parse_attachment_element(self, element, bug_id):
attachment = {}
attachment['bug_id'] = bug_id
attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1")
attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1")
- attachment['id'] = str(element.find('attachid').string)
+ attachment['id'] = int(element.find('attachid').string)
attachment['url'] = self.attachment_url_for_id(attachment['id'])
attachment['name'] = unicode(element.find('desc').string)
+ attachment['attacher_email'] = str(element.find('attacher').string)
attachment['type'] = str(element.find('type').string)
self._parse_attachment_flag(element, 'review', attachment, 'reviewer_email')
self._parse_attachment_flag(element, 'commit-queue', attachment, 'committer_email')
@@ -192,6 +195,36 @@ class Bugzilla:
attachments.append(attachment)
return attachments
+ def _parse_bug_id_from_attachment_page(self, page):
+ up_link = BeautifulSoup(page).find('link', rel='Up') # The "Up" relation happens to point to the bug.
+ if not up_link:
+ return None # This attachment does not exist (or you don't have permissions to view it).
+ match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href'])
+ return int(match.group('bug_id'))
+
+ def bug_id_for_attachment_id(self, attachment_id):
+ attachment_url = self.attachment_url_for_id(attachment_id, 'edit')
+ log("Fetching: %s" % attachment_url)
+ page = urllib2.urlopen(attachment_url)
+ return self._parse_bug_id_from_attachment_page(page)
+
+ # This should really return an Attachment object
+ # which can lazily fetch any missing data.
+ def fetch_attachment(self, attachment_id):
+ # We could grab all the attachment details off of the attachment edit page
+ # but we already have working code to do so off of the bugs page, so re-use that.
+ bug_id = self.bug_id_for_attachment_id(attachment_id)
+ if not bug_id:
+ return None
+ attachments = self.fetch_attachments_from_bug(bug_id)
+ for attachment in attachments:
+ # FIXME: Once we have a real Attachment class we shouldn't paper over this possible comparison failure
+ # and we should remove the int() == int() hacks and leave it just ==.
+ if int(attachment['id']) == int(attachment_id):
+ self._validate_committer_and_reviewer(attachment)
+ return attachment
+ return None # This should never be hit.
+
def fetch_title_from_bug(self, bug_id):
bug_url = self.bug_url_for_bug_id(bug_id, xml=True)
page = urllib2.urlopen(bug_url)
@@ -209,6 +242,14 @@ class Bugzilla:
def _view_source_link(self, local_path):
return "http://trac.webkit.org/browser/trunk/%s" % local_path
+ def _flag_permission_rejection_message(self, setter_email, flag_name):
+ committer_list = "WebKitTools/Scripts/modules/committers.py"
+ contribution_guidlines_url = "http://webkit.org/coding/contributing.html"
+ rejection_message = "%s does not have %s permissions according to %s." % (setter_email, flag_name, self._view_source_link(committer_list))
+ rejection_message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed) and then set the %s flag again." % (flag_name, committer_list, flag_name)
+ rejection_message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % (flag_name, contribution_guidlines_url)
+ return rejection_message
+
def _validate_setter_email(self, patch, result_key, lookup_function, rejection_function, reject_invalid_patches):
setter_email = patch.get(result_key + '_email')
if not setter_email:
@@ -220,18 +261,30 @@ class Bugzilla:
return patch[result_key]
if reject_invalid_patches:
- committer_list = "WebKitTools/Scripts/modules/committers.py"
- failure_message = "%s does not have %s permissions according to %s." % (setter_email, result_key, self._view_source_link(committer_list))
- rejection_function(patch['id'], failure_message)
+ rejection_function(patch['id'], self._flag_permission_rejection_message(setter_email, result_key))
else:
- log("Warning, attachment %s on bug %s has invalid %s (%s)", (patch['id'], patch['bug_id'], result_key, setter_email))
+ log("Warning, attachment %s on bug %s has invalid %s (%s)" % (patch['id'], patch['bug_id'], result_key, setter_email))
return None
def _validate_reviewer(self, patch, reject_invalid_patches):
- return self._validate_setter_email(patch, 'reviewer', self.committers.reviewer_by_bugzilla_email, self.reject_patch_from_review_queue, reject_invalid_patches)
+ return self._validate_setter_email(patch, 'reviewer', self.committers.reviewer_by_email, self.reject_patch_from_review_queue, reject_invalid_patches)
def _validate_committer(self, patch, reject_invalid_patches):
- return self._validate_setter_email(patch, 'committer', self.committers.committer_by_bugzilla_email, self.reject_patch_from_commit_queue, reject_invalid_patches)
+ return self._validate_setter_email(patch, 'committer', self.committers.committer_by_email, self.reject_patch_from_commit_queue, reject_invalid_patches)
+
+ # FIXME: This is a hack until we have a real Attachment object.
+ # _validate_committer and _validate_reviewer fill in the 'reviewer' and 'committer'
+ # keys which other parts of the code expect to be filled in.
+ def _validate_committer_and_reviewer(self, patch):
+ self._validate_reviewer(patch, reject_invalid_patches=False)
+ self._validate_committer(patch, reject_invalid_patches=False)
+
+ def fetch_unreviewed_patches_from_bug(self, bug_id):
+ unreviewed_patches = []
+ for attachment in self.fetch_attachments_from_bug(bug_id):
+ if attachment.get('review') == '?' and not attachment['is_obsolete']:
+ unreviewed_patches.append(attachment)
+ return unreviewed_patches
def fetch_reviewed_patches_from_bug(self, bug_id, reject_invalid_patches=False):
reviewed_patches = []
@@ -247,20 +300,44 @@ class Bugzilla:
commit_queue_patches.append(attachment)
return commit_queue_patches
- def fetch_bug_ids_from_commit_queue(self):
- commit_queue_url = self.bug_server_url + "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B"
-
- page = urllib2.urlopen(commit_queue_url)
+ def _fetch_bug_ids_advanced_query(self, query):
+ page = urllib2.urlopen(query)
soup = BeautifulSoup(page)
bug_ids = []
# Grab the cells in the first column (which happens to be the bug ids)
for bug_link_cell in soup('td', "first-child"): # tds with the class "first-child"
bug_link = bug_link_cell.find("a")
- bug_ids.append(bug_link.string) # the contents happen to be the bug id
+ bug_ids.append(int(bug_link.string)) # the contents happen to be the bug id
return bug_ids
+ def _parse_attachment_ids_request_query(self, page):
+ digits = re.compile("\d+")
+ attachment_href = re.compile("attachment.cgi\?id=\d+&action=review")
+ attachment_links = SoupStrainer("a", href=attachment_href)
+ return [int(digits.search(tag["href"]).group(0)) for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)]
+
+ def _fetch_attachment_ids_request_query(self, query):
+ return self._parse_attachment_ids_request_query(urllib2.urlopen(query))
+
+ def fetch_bug_ids_from_commit_queue(self):
+ commit_queue_url = self.bug_server_url + "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B"
+ return self._fetch_bug_ids_advanced_query(commit_queue_url)
+
+ # List of all r+'d bugs.
+ def fetch_bug_ids_from_needs_commit_list(self):
+ needs_commit_query_url = self.bug_server_url + "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B"
+ return self._fetch_bug_ids_advanced_query(needs_commit_query_url)
+
+ def fetch_bug_ids_from_review_queue(self):
+ review_queue_url = self.bug_server_url + "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
+ return self._fetch_bug_ids_advanced_query(review_queue_url)
+
+ def fetch_attachment_ids_from_review_queue(self):
+ review_queue_url = self.bug_server_url + "request.cgi?action=queue&type=review&group=type"
+ return self._fetch_attachment_ids_request_query(review_queue_url)
+
def fetch_patches_from_commit_queue(self, reject_invalid_patches=False):
patches_to_land = []
for bug_id in self.fetch_bug_ids_from_commit_queue():
@@ -268,6 +345,22 @@ class Bugzilla:
patches_to_land += patches
return patches_to_land
+ def fetch_patches_from_pending_commit_list(self):
+ patches_needing_commit = []
+ for bug_id in self.fetch_bug_ids_from_needs_commit_list():
+ patches = self.fetch_reviewed_patches_from_bug(bug_id)
+ patches_needing_commit += patches
+ return patches_needing_commit
+
+ def fetch_patches_from_review_queue(self, limit=None):
+ patches_to_review = []
+ for bug_id in self.fetch_bug_ids_from_review_queue():
+ if limit and len(patches_to_review) >= limit:
+ break
+ patches = self.fetch_unreviewed_patches_from_bug(bug_id)
+ patches_to_review += patches
+ return patches_to_review
+
def authenticate(self):
if self.authenticated:
return
@@ -312,7 +405,7 @@ class Bugzilla:
if self.dryrun:
log(comment_text)
return
-
+
self.browser.open("%sattachment.cgi?action=enter&bugid=%s" % (self.bug_server_url, bug_id))
self.browser.select_form(name="entryform")
self._fill_attachment_form(description, patch_file_object, mark_for_review=mark_for_review, mark_for_commit_queue=mark_for_commit_queue, bug_id=bug_id)
@@ -442,8 +535,20 @@ class Bugzilla:
# Bugzilla has two textareas named 'comment', one is somehow hidden. We want the first.
self.browser.set_value(comment_text, name='comment', nr=0)
self.browser.submit()
-
- def post_comment_to_bug(self, bug_id, comment_text):
+
+ def add_cc_to_bug(self, bug_id, email_address):
+ self.authenticate()
+
+ log("Adding %s to the CC list for bug %s" % (email_address, bug_id))
+ if self.dryrun:
+ return
+
+ self.browser.open(self.bug_url_for_bug_id(bug_id))
+ self.browser.select_form(name="changeform")
+ self.browser["newcc"] = email_address
+ self.browser.submit()
+
+ def post_comment_to_bug(self, bug_id, comment_text, cc=None):
self.authenticate()
log("Adding comment to bug %s" % bug_id)
@@ -453,7 +558,9 @@ class Bugzilla:
self.browser.open(self.bug_url_for_bug_id(bug_id))
self.browser.select_form(name="changeform")
- self.browser['comment'] = comment_text
+ self.browser["comment"] = comment_text
+ if cc:
+ self.browser["newcc"] = cc
self.browser.submit()
def close_bug_as_fixed(self, bug_id, comment_text=None):
diff --git a/WebKitTools/Scripts/modules/bugzilla_unittest.py b/WebKitTools/Scripts/modules/bugzilla_unittest.py
index f08031e..fb7f8c4 100644
--- a/WebKitTools/Scripts/modules/bugzilla_unittest.py
+++ b/WebKitTools/Scripts/modules/bugzilla_unittest.py
@@ -29,7 +29,7 @@
import unittest
from modules.committers import CommitterList, Reviewer, Committer
-from modules.bugzilla import Bugzilla
+from modules.bugzilla import Bugzilla, parse_bug_id
from modules.BeautifulSoup import BeautifulSoup
@@ -61,17 +61,34 @@ class BugzillaTest(unittest.TestCase):
</attachment>
'''
_expected_example_attachment_parsing = {
- 'bug_id' : "100",
+ 'bug_id' : 100,
'is_obsolete' : True,
'is_patch' : True,
- 'id' : "33721",
+ 'id' : 33721,
'url' : "https://bugs.webkit.org/attachment.cgi?id=33721",
'name' : "Fixed whitespace issue",
'type' : "text/plain",
+ 'review' : '+',
'reviewer_email' : 'one@test.com',
- 'committer_email' : 'two@test.com'
+ 'commit-queue' : '+',
+ 'committer_email' : 'two@test.com',
+ 'attacher_email' : 'christian.plesner.hansen@gmail.com',
}
+ def test_parse_bug_id(self):
+ # FIXME: These would be all better as doctests
+ bugs = Bugzilla()
+ self.assertEquals(12345, parse_bug_id("http://webkit.org/b/12345"))
+ self.assertEquals(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345"))
+ self.assertEquals(12345, parse_bug_id(bugs.short_bug_url_for_bug_id(12345)))
+ self.assertEquals(12345, parse_bug_id(bugs.bug_url_for_bug_id(12345)))
+ self.assertEquals(12345, parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True)))
+
+ # Our bug parser is super-fragile, but at least we're testing it.
+ self.assertEquals(None, parse_bug_id("http://www.webkit.org/b/12345"))
+ self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345"))
+
+
def test_attachment_parsing(self):
bugzilla = Bugzilla()
@@ -86,5 +103,71 @@ class BugzillaTest(unittest.TestCase):
for key, expected_value in self._expected_example_attachment_parsing.items():
self.assertEquals(attachment[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, attachment[key], expected_value)))
+ _sample_attachment_detail_page = """
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+ "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+ <head>
+ <title>
+ Attachment 41073 Details for Bug 27314</title>
+<link rel="Top" href="https://bugs.webkit.org/">
+ <link rel="Up" href="show_bug.cgi?id=27314">
+"""
+
+ def test_attachment_detail_bug_parsing(self):
+ bugzilla = Bugzilla()
+ self.assertEquals(27314, bugzilla._parse_bug_id_from_attachment_page(self._sample_attachment_detail_page))
+
+ _sample_request_page = """
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+ "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+ <head>
+ <title>Request Queue</title>
+ </head>
+<body>
+
+<h3>Flag: review</h3>
+ <table class="requests" cellspacing="0" cellpadding="4" border="1">
+ <tr>
+ <th>Requester</th>
+ <th>Requestee</th>
+ <th>Bug</th>
+ <th>Attachment</th>
+ <th>Created</th>
+ </tr>
+ <tr>
+ <td>Shinichiro Hamaji &lt;hamaji&#64;chromium.org&gt;</td>
+ <td></td>
+ <td><a href="show_bug.cgi?id=30015">30015: text-transform:capitalize is failing in CSS2.1 test suite</a></td>
+ <td><a href="attachment.cgi?id=40511&amp;action=review">
+40511: Patch v0</a></td>
+ <td>2009-10-02 04:58 PST</td>
+ </tr>
+ <tr>
+ <td>Zan Dobersek &lt;zandobersek&#64;gmail.com&gt;</td>
+ <td></td>
+ <td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
+ <td><a href="attachment.cgi?id=40722&amp;action=review">
+40722: Media controls, the simple approach</a></td>
+ <td>2009-10-06 09:13 PST</td>
+ </tr>
+ <tr>
+ <td>Zan Dobersek &lt;zandobersek&#64;gmail.com&gt;</td>
+ <td></td>
+ <td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
+ <td><a href="attachment.cgi?id=40723&amp;action=review">
+40723: Adjust the media slider thumb size</a></td>
+ <td>2009-10-06 09:15 PST</td>
+ </tr>
+ </table>
+</body>
+</html>
+"""
+
+ def test_request_page_parsing(self):
+ bugzilla = Bugzilla()
+ self.assertEquals([40511, 40722, 40723], bugzilla._parse_attachment_ids_request_query(self._sample_request_page))
+
if __name__ == '__main__':
unittest.main()
diff --git a/WebKitTools/Scripts/modules/buildbot.py b/WebKitTools/Scripts/modules/buildbot.py
index e948d8c..548cad8 100644
--- a/WebKitTools/Scripts/modules/buildbot.py
+++ b/WebKitTools/Scripts/modules/buildbot.py
@@ -39,7 +39,8 @@ from modules.logging import log
from .BeautifulSoup import BeautifulSoup
class BuildBot:
- def __init__(self, host="build.webkit.org"):
+ default_host = "build.webkit.org"
+ def __init__(self, host=default_host):
self.buildbot_host = host
self.buildbot_server_url = "http://%s/" % self.buildbot_host
diff --git a/WebKitTools/Scripts/modules/buildsteps.py b/WebKitTools/Scripts/modules/buildsteps.py
new file mode 100644
index 0000000..425b912
--- /dev/null
+++ b/WebKitTools/Scripts/modules/buildsteps.py
@@ -0,0 +1,254 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from optparse import make_option
+
+from modules.comments import bug_comment_from_commit_text
+from modules.logging import log, error
+from modules.webkitport import WebKitPort
+
+
+class CommandOptions(object):
+ force_clean = make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)")
+ clean = make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches")
+ check_builders = make_option("--ignore-builders", action="store_false", dest="check_builders", default=True, help="Don't check to see if the build.webkit.org builders are green before landing.")
+ quiet = make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output.")
+ non_interactive = make_option("--non-interactive", action="store_true", dest="non_interactive", default=False, help="Never prompt the user, fail as fast as possible.")
+ parent_command = make_option("--parent-command", action="store", dest="parent_command", default=None, help="(Internal) The command that spawned this instance.")
+ update = make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory.")
+ build = make_option("--no-build", action="store_false", dest="build", default=True, help="Commit without building first, implies --no-test.")
+ test = make_option("--no-test", action="store_false", dest="test", default=True, help="Commit without running run-webkit-tests.")
+ close_bug = make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing.")
+ port = make_option("--port", action="store", dest="port", default=None, help="Specify a port (e.g., mac, qt, gtk, ...).")
+
+
+class AbstractStep(object):
+ def __init__(self, tool, options, patch=None):
+ self._tool = tool
+ self._options = options
+ self._patch = patch
+ self._port = None
+
+ def _run_script(self, script_name, quiet=False, port=WebKitPort):
+ log("Running %s" % script_name)
+ self._tool.executive.run_and_throw_if_fail(port.script_path(script_name), quiet)
+
+ # FIXME: The port should live on the tool.
+ def port(self):
+ if self._port:
+ return self._port
+ self._port = WebKitPort.port(self._options.port)
+ return self._port
+
+ @classmethod
+ def options(cls):
+ return []
+
+ def run(self, tool):
+ raise NotImplementedError, "subclasses must implement"
+
+
+class PrepareChangelogStep(AbstractStep):
+ def run(self):
+ self._run_script("prepare-ChangeLog")
+
+
+class CleanWorkingDirectoryStep(AbstractStep):
+ def __init__(self, tool, options, patch=None, allow_local_commits=False):
+ AbstractStep.__init__(self, tool, options, patch)
+ self._allow_local_commits = allow_local_commits
+
+ @classmethod
+ def options(cls):
+ return [
+ CommandOptions.force_clean,
+ CommandOptions.clean,
+ ]
+
+ def run(self):
+ os.chdir(self._tool.scm().checkout_root)
+ if not self._allow_local_commits:
+ self._tool.scm().ensure_no_local_commits(self._options.force_clean)
+ if self._options.clean:
+ self._tool.scm().ensure_clean_working_directory(force_clean=self._options.force_clean)
+
+
+class UpdateStep(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ CommandOptions.update,
+ CommandOptions.port,
+ ]
+
+ def run(self):
+ if not self._options.update:
+ return
+ log("Updating working directory")
+ self._tool.executive.run_and_throw_if_fail(self.port().update_webkit_command())
+
+
+class ApplyPatchStep(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ CommandOptions.non_interactive,
+ ]
+
+ def run(self):
+ log("Processing patch %s from bug %s." % (self._patch["id"], self._patch["bug_id"]))
+ self._tool.scm().apply_patch(self._patch, force=self._options.non_interactive)
+
+
+class EnsureBuildersAreGreenStep(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ CommandOptions.check_builders,
+ ]
+
+ def run(self):
+ if not self._options.check_builders:
+ return
+ red_builders_names = self._tool.buildbot.red_core_builders_names()
+ if not red_builders_names:
+ return
+ red_builders_names = map(lambda name: "\"%s\"" % name, red_builders_names) # Add quotes around the names.
+ error("Builders [%s] are red, please do not commit.\nSee http://%s.\nPass --ignore-builders to bypass this check." % (", ".join(red_builders_names), self._tool.buildbot.buildbot_host))
+
+
+class BuildStep(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ CommandOptions.build,
+ CommandOptions.quiet,
+ ]
+
+ def run(self):
+ if not self._options.build:
+ return
+ log("Building WebKit")
+ self._tool.executive.run_and_throw_if_fail(self.port().build_webkit_command(), self._options.quiet)
+
+
+class CheckStyleStep(AbstractStep):
+ def run(self):
+ self._run_script("check-webkit-style")
+
+
+class RunTestsStep(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ CommandOptions.build,
+ CommandOptions.test,
+ CommandOptions.non_interactive,
+ CommandOptions.quiet,
+ CommandOptions.port,
+ ]
+
+ def run(self):
+ if not self._options.build:
+ return
+ if not self._options.test:
+ return
+ args = self.port().run_webkit_tests_command()
+ if self._options.non_interactive:
+ args.append("--no-launch-safari")
+ args.append("--exit-after-n-failures=1")
+ if self._options.quiet:
+ args.append("--quiet")
+ self._tool.executive.run_and_throw_if_fail(args)
+
+
+class CommitStep(AbstractStep):
+ def run(self):
+ commit_message = self._tool.scm().commit_message_for_this_commit()
+ return self._tool.scm().commit_with_message(commit_message.message())
+
+
+class ClosePatchStep(AbstractStep):
+ def run(self, commit_log):
+ comment_text = bug_comment_from_commit_text(self._tool.scm(), commit_log)
+ self._tool.bugs.clear_attachment_flags(self._patch["id"], comment_text)
+
+
+class CloseBugStep(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ CommandOptions.close_bug,
+ ]
+
+ def run(self):
+ if not self._options.close_bug:
+ return
+ # Check to make sure there are no r? or r+ patches on the bug before closing.
+ # Assume that r- patches are just previous patches someone forgot to obsolete.
+ patches = self._tool.bugs.fetch_patches_from_bug(self._patch["bug_id"])
+ for patch in patches:
+ review_flag = patch.get("review")
+ if review_flag == "?" or review_flag == "+":
+ log("Not closing bug %s as attachment %s has review=%s. Assuming there are more patches to land from this bug." % (patch["bug_id"], patch["id"], review_flag))
+ return
+ self._tool.bugs.close_bug_as_fixed(self._patch["bug_id"], "All reviewed patches have been landed. Closing bug.")
+
+
+# FIXME: This class is a dinosaur and should be extinct soon.
+class BuildSteps:
+ # FIXME: The options should really live on each "Step" object.
+ @staticmethod
+ def cleaning_options():
+ return [
+ CommandOptions.force_clean,
+ CommandOptions.clean,
+ ]
+
+ # FIXME: These distinctions are bogus. We need a better model for handling options.
+ @staticmethod
+ def build_options():
+ return [
+ CommandOptions.check_builders,
+ CommandOptions.quiet,
+ CommandOptions.non_interactive,
+ CommandOptions.parent_command,
+ CommandOptions.port,
+ ]
+
+ @staticmethod
+ def land_options():
+ return [
+ CommandOptions.update,
+ CommandOptions.build,
+ CommandOptions.test,
+ CommandOptions.close_bug,
+ ]
+
diff --git a/WebKitTools/Scripts/modules/commands/__init__.py b/WebKitTools/Scripts/modules/commands/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/WebKitTools/Scripts/modules/commands/commandtest.py b/WebKitTools/Scripts/modules/commands/commandtest.py
new file mode 100644
index 0000000..618a517
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/commandtest.py
@@ -0,0 +1,42 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from modules.mock import Mock
+from modules.mock_bugzillatool import MockBugzillaTool
+from modules.outputcapture import OutputCapture
+
+class CommandsTest(unittest.TestCase):
+ def assert_execute_outputs(self, command, args, expected_stdout="", expected_stderr="", options=Mock(), tool=MockBugzillaTool()):
+ capture = OutputCapture()
+ capture.capture_output()
+ command.execute(options, args, tool)
+ (stdout_string, stderr_string) = capture.restore_output()
+ self.assertEqual(stdout_string, expected_stdout)
+ self.assertEqual(expected_stderr, expected_stderr)
diff --git a/WebKitTools/Scripts/modules/commands/download.py b/WebKitTools/Scripts/modules/commands/download.py
new file mode 100644
index 0000000..2acd69f
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/download.py
@@ -0,0 +1,370 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from optparse import make_option
+
+from modules.bugzilla import parse_bug_id
+from modules.buildsteps import CommandOptions, BuildSteps, EnsureBuildersAreGreenStep, CleanWorkingDirectoryStep, UpdateStep, ApplyPatchStep, BuildStep, CheckStyleStep, PrepareChangelogStep
+from modules.changelogs import ChangeLog
+from modules.comments import bug_comment_from_commit_text
+from modules.executive import ScriptError
+from modules.grammar import pluralize
+from modules.landingsequence import LandingSequence
+from modules.logging import error, log
+from modules.multicommandtool import Command
+from modules.stepsequence import StepSequence
+
+
+class Build(Command):
+ name = "build"
+ show_in_main_help = False
+ def __init__(self):
+ self._sequence = StepSequence([
+ CleanWorkingDirectoryStep,
+ UpdateStep,
+ BuildStep
+ ])
+ Command.__init__(self, "Update working copy and build", "", self._sequence.options())
+
+ def execute(self, options, args, tool):
+ self._sequence.run_and_handle_errors(tool, options)
+
+
+class ApplyAttachment(Command):
+ name = "apply-attachment"
+ show_in_main_help = True
+ def __init__(self):
+ options = WebKitApplyingScripts.apply_options()
+ options += BuildSteps.cleaning_options()
+ Command.__init__(self, "Apply an attachment to the local working directory", "ATTACHMENT_ID", options=options)
+
+ def execute(self, options, args, tool):
+ WebKitApplyingScripts.setup_for_patch_apply(tool, options)
+ attachment_id = args[0]
+ attachment = tool.bugs.fetch_attachment(attachment_id)
+ WebKitApplyingScripts.apply_patches_with_options(tool.scm(), [attachment], options)
+
+
+class ApplyPatches(Command):
+ name = "apply-patches"
+ show_in_main_help = True
+ def __init__(self):
+ options = WebKitApplyingScripts.apply_options()
+ options += BuildSteps.cleaning_options()
+ Command.__init__(self, "Apply reviewed patches from provided bugs to the local working directory", "BUGID", options=options)
+
+ def execute(self, options, args, tool):
+ WebKitApplyingScripts.setup_for_patch_apply(tool, options)
+ bug_id = args[0]
+ patches = tool.bugs.fetch_reviewed_patches_from_bug(bug_id)
+ WebKitApplyingScripts.apply_patches_with_options(tool.scm(), patches, options)
+
+
+class WebKitApplyingScripts:
+ @staticmethod
+ def apply_options():
+ return [
+ make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory before applying patches"),
+ make_option("--local-commit", action="store_true", dest="local_commit", default=False, help="Make a local commit for each applied patch"),
+ CommandOptions.port,
+ ]
+
+ @staticmethod
+ def setup_for_patch_apply(tool, options):
+ clean_step = CleanWorkingDirectoryStep(tool, options, allow_local_commits=True)
+ clean_step.run()
+ update_step = UpdateStep(tool, options)
+ update_step.run()
+
+ @staticmethod
+ def apply_patches_with_options(scm, patches, options):
+ if options.local_commit and not scm.supports_local_commits():
+ error("--local-commit passed, but %s does not support local commits" % scm.display_name())
+
+ for patch in patches:
+ log("Applying attachment %s from bug %s" % (patch["id"], patch["bug_id"]))
+ scm.apply_patch(patch)
+ if options.local_commit:
+ commit_message = scm.commit_message_for_this_commit()
+ scm.commit_locally_with_message(commit_message.message() or patch["name"])
+
+
+class LandDiffSequence(LandingSequence):
+ def run(self):
+ self.check_builders()
+ self.build()
+ self.test()
+ commit_log = self.commit()
+ self.close_bug(commit_log)
+
+ def close_bug(self, commit_log):
+ comment_test = bug_comment_from_commit_text(self._tool.scm(), commit_log)
+ bug_id = self._patch["bug_id"]
+ if bug_id:
+ log("Updating bug %s" % bug_id)
+ if self._options.close_bug:
+ self._tool.bugs.close_bug_as_fixed(bug_id, comment_test)
+ else:
+ # FIXME: We should a smart way to figure out if the patch is attached
+ # to the bug, and if so obsolete it.
+ self._tool.bugs.post_comment_to_bug(bug_id, comment_test)
+ else:
+ log(comment_test)
+ log("No bug id provided.")
+
+
+class LandDiff(Command):
+ name = "land-diff"
+ show_in_main_help = True
+ def __init__(self):
+ options = [
+ make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER."),
+ ]
+ options += BuildSteps.build_options()
+ options += BuildSteps.land_options()
+ Command.__init__(self, "Land the current working directory diff and updates the associated bug if any", "[BUGID]", options=options)
+
+ def guess_reviewer_from_bug(self, bugs, bug_id):
+ patches = bugs.fetch_reviewed_patches_from_bug(bug_id)
+ if len(patches) != 1:
+ log("%s on bug %s, cannot infer reviewer." % (pluralize("reviewed patch", len(patches)), bug_id))
+ return None
+ patch = patches[0]
+ reviewer = patch["reviewer"]
+ log("Guessing \"%s\" as reviewer from attachment %s on bug %s." % (reviewer, patch["id"], bug_id))
+ return reviewer
+
+ def update_changelogs_with_reviewer(self, reviewer, bug_id, tool):
+ if not reviewer:
+ if not bug_id:
+ log("No bug id provided and --reviewer= not provided. Not updating ChangeLogs with reviewer.")
+ return
+ reviewer = self.guess_reviewer_from_bug(tool.bugs, bug_id)
+
+ if not reviewer:
+ log("Failed to guess reviewer from bug %s and --reviewer= not provided. Not updating ChangeLogs with reviewer." % bug_id)
+ return
+
+ for changelog_path in tool.scm().modified_changelogs():
+ ChangeLog(changelog_path).set_reviewer(reviewer)
+
+ def execute(self, options, args, tool):
+ bug_id = (args and args[0]) or parse_bug_id(tool.scm().create_patch())
+
+ EnsureBuildersAreGreenStep(tool, options).run()
+
+ os.chdir(tool.scm().checkout_root)
+ self.update_changelogs_with_reviewer(options.reviewer, bug_id, tool)
+
+ fake_patch = {
+ "id": None,
+ "bug_id": bug_id
+ }
+
+ sequence = LandDiffSequence(fake_patch, options, tool)
+ sequence.run()
+
+
+class AbstractPatchProcessingCommand(Command):
+ def __init__(self, help_text, args_description, options):
+ Command.__init__(self, help_text, args_description, options=options)
+
+ def _fetch_list_of_patches_to_process(self, options, args, tool):
+ raise NotImplementedError, "subclasses must implement"
+
+ def _prepare_to_process(self, options, args, tool):
+ raise NotImplementedError, "subclasses must implement"
+
+ @staticmethod
+ def _collect_patches_by_bug(patches):
+ bugs_to_patches = {}
+ for patch in patches:
+ bug_id = patch["bug_id"]
+ bugs_to_patches[bug_id] = bugs_to_patches.get(bug_id, []) + [patch]
+ return bugs_to_patches
+
+ def execute(self, options, args, tool):
+ self._prepare_to_process(options, args, tool)
+ patches = self._fetch_list_of_patches_to_process(options, args, tool)
+
+ # It's nice to print out total statistics.
+ bugs_to_patches = self._collect_patches_by_bug(patches)
+ log("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches))))
+
+ for patch in patches:
+ self._process_patch(patch, options, args, tool)
+
+
+class CheckStyle(AbstractPatchProcessingCommand):
+ name = "check-style"
+ show_in_main_help = False
+ def __init__(self):
+ self._sequence = StepSequence([
+ CleanWorkingDirectoryStep,
+ UpdateStep,
+ ApplyPatchStep,
+ CheckStyleStep,
+ ])
+ AbstractPatchProcessingCommand.__init__(self, "Run check-webkit-style on the specified attachments", "ATTACHMENT_ID [ATTACHMENT_IDS]", self._sequence.options())
+
+ def _fetch_list_of_patches_to_process(self, options, args, tool):
+ return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args)
+
+ def _prepare_to_process(self, options, args, tool):
+ pass
+
+ def _process_patch(self, patch, options, args, tool):
+ self._sequence.run_and_handle_errors(tool, options, patch)
+
+
+class BuildAttachment(AbstractPatchProcessingCommand):
+ name = "build-attachment"
+ show_in_main_help = False
+ def __init__(self):
+ self._sequence = StepSequence([
+ CleanWorkingDirectoryStep,
+ UpdateStep,
+ ApplyPatchStep,
+ BuildStep,
+ ])
+ AbstractPatchProcessingCommand.__init__(self, "Apply and build patches from bugzilla", "ATTACHMENT_ID [ATTACHMENT_IDS]", self._sequence.options())
+
+ def _fetch_list_of_patches_to_process(self, options, args, tool):
+ return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args)
+
+ def _prepare_to_process(self, options, args, tool):
+ pass
+
+ def _process_patch(self, patch, options, args, tool):
+ self._sequence.run_and_handle_errors(tool, options, patch)
+
+
+class AbstractPatchLandingCommand(AbstractPatchProcessingCommand):
+ def __init__(self, help_text, args_description):
+ options = BuildSteps.cleaning_options()
+ options += BuildSteps.build_options()
+ options += BuildSteps.land_options()
+ AbstractPatchProcessingCommand.__init__(self, help_text, args_description, options)
+
+ def _prepare_to_process(self, options, args, tool):
+ # Check the tree status first so we can fail early.
+ EnsureBuildersAreGreenStep(tool, options).run()
+
+ def _process_patch(self, patch, options, args, tool):
+ sequence = LandingSequence(patch, options, tool)
+ sequence.run_and_handle_errors()
+
+
+class LandAttachment(AbstractPatchLandingCommand):
+ name = "land-attachment"
+ show_in_main_help = True
+ def __init__(self):
+ AbstractPatchLandingCommand.__init__(self, "Land patches from bugzilla, optionally building and testing them first", "ATTACHMENT_ID [ATTACHMENT_IDS]")
+
+ def _fetch_list_of_patches_to_process(self, options, args, tool):
+ return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args)
+
+
+class LandPatches(AbstractPatchLandingCommand):
+ name = "land-patches"
+ show_in_main_help = True
+ def __init__(self):
+ AbstractPatchLandingCommand.__init__(self, "Land all patches on the given bugs, optionally building and testing them first", "BUGID [BUGIDS]")
+
+ def _fetch_list_of_patches_to_process(self, options, args, tool):
+ all_patches = []
+ for bug_id in args:
+ patches = tool.bugs.fetch_reviewed_patches_from_bug(bug_id)
+ log("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id))
+ all_patches += patches
+ return all_patches
+
+
+# FIXME: Requires unit test.
+class Rollout(Command):
+ name = "rollout"
+ show_in_main_help = True
+ def __init__(self):
+ options = BuildSteps.cleaning_options()
+ options += BuildSteps.build_options()
+ options += BuildSteps.land_options()
+ options.append(make_option("--complete-rollout", action="store_true", dest="complete_rollout", help="Commit the revert and re-open the original bug."))
+ Command.__init__(self, "Revert the given revision in the working copy and optionally commit the revert and re-open the original bug", "REVISION [BUGID]", options=options)
+
+ @staticmethod
+ def _create_changelogs_for_revert(tool, revision):
+ # First, discard the ChangeLog changes from the rollout.
+ changelog_paths = tool.scm().modified_changelogs()
+ tool.scm().revert_files(changelog_paths)
+
+ # Second, make new ChangeLog entries for this rollout.
+ # This could move to prepare-ChangeLog by adding a --revert= option.
+ PrepareChangelogStep(tool, None).run()
+ for changelog_path in changelog_paths:
+ ChangeLog(changelog_path).update_for_revert(revision)
+
+ @staticmethod
+ def _parse_bug_id_from_revision_diff(tool, revision):
+ original_diff = tool.scm().diff_for_revision(revision)
+ return parse_bug_id(original_diff)
+
+ @staticmethod
+ def _reopen_bug_after_rollout(tool, bug_id, comment_text):
+ if bug_id:
+ tool.bugs.reopen_bug(bug_id, comment_text)
+ else:
+ log(comment_text)
+ log("No bugs were updated or re-opened to reflect this rollout.")
+
+ def execute(self, options, args, tool):
+ revision = args[0]
+ bug_id = self._parse_bug_id_from_revision_diff(tool, revision)
+ if options.complete_rollout:
+ if bug_id:
+ log("Will re-open bug %s after rollout." % bug_id)
+ else:
+ log("Failed to parse bug number from diff. No bugs will be updated/reopened after the rollout.")
+
+ CleanWorkingDirectoryStep(tool, options).run()
+ UpdateStep(tool, options).run()
+ tool.scm().apply_reverse_diff(revision)
+ self._create_changelogs_for_revert(tool, revision)
+
+ # FIXME: Fully automated rollout is not 100% idiot-proof yet, so for now just log with instructions on how to complete the rollout.
+ # Once we trust rollout we will remove this option.
+ if not options.complete_rollout:
+ log("\nNOTE: Rollout support is experimental.\nPlease verify the rollout diff and use \"bugzilla-tool land-diff %s\" to commit the rollout." % bug_id)
+ else:
+ # FIXME: This function does not exist!!
+ # comment_text = WebKitLandingScripts.build_and_commit(tool.scm(), options)
+ raise ScriptError("OOPS! This option is not implemented (yet).")
+ self._reopen_bug_after_rollout(tool, bug_id, comment_text)
diff --git a/WebKitTools/Scripts/modules/commands/download_unittest.py b/WebKitTools/Scripts/modules/commands/download_unittest.py
new file mode 100644
index 0000000..a1ed41a
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/download_unittest.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from modules.commands.commandtest import CommandsTest
+from modules.commands.download import *
+from modules.mock import Mock
+
+class DownloadCommandsTest(CommandsTest):
+ def _default_options(self):
+ options = Mock()
+ options.force_clean = False
+ options.clean = True
+ options.check_builders = True
+ options.quiet = False
+ options.non_interactive = False
+ options.update = True
+ options.build = True
+ options.test = True
+ options.close_bug = True
+ return options
+
+ def test_build(self):
+ self.assert_execute_outputs(Build(), [], options=self._default_options())
+
+ def test_apply_attachment(self):
+ options = self._default_options()
+ options.update = True
+ options.local_commit = True
+ self.assert_execute_outputs(ApplyAttachment(), [197], options=options)
+
+ def test_apply_patches(self):
+ options = self._default_options()
+ options.update = True
+ options.local_commit = True
+ self.assert_execute_outputs(ApplyPatches(), [42], options=options)
+
+ def test_land_diff(self):
+ self.assert_execute_outputs(LandDiff(), [42], options=self._default_options())
+
+ def test_check_style(self):
+ self.assert_execute_outputs(CheckStyle(), [197], options=self._default_options())
+
+ def test_build_attachment(self):
+ self.assert_execute_outputs(BuildAttachment(), [197], options=self._default_options())
+
+ def test_land_attachment(self):
+ self.assert_execute_outputs(LandAttachment(), [197], options=self._default_options())
+
+ def test_land_patches(self):
+ self.assert_execute_outputs(LandPatches(), [42], options=self._default_options())
diff --git a/WebKitTools/Scripts/modules/commands/early_warning_system.py b/WebKitTools/Scripts/modules/commands/early_warning_system.py
new file mode 100644
index 0000000..e8ef408
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/early_warning_system.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from modules.commands.queues import AbstractReviewQueue
+from modules.executive import ScriptError
+from modules.webkitport import WebKitPort
+
+class AbstractEarlyWarningSystem(AbstractReviewQueue):
+ def __init__(self):
+ AbstractReviewQueue.__init__(self)
+ self.port = WebKitPort.port(self.port_name)
+
+ def should_proceed_with_work_item(self, patch):
+ try:
+ self.run_bugzilla_tool(["build", self.port.flag(), "--force-clean", "--quiet"])
+ except ScriptError, e:
+ return (False, "Unable to perform a build.", None)
+ return (True, "Building patch %s on bug %s." % (patch["id"], patch["bug_id"]), patch)
+
+ def process_work_item(self, patch):
+ self.run_bugzilla_tool([
+ "build-attachment",
+ self.port.flag(),
+ "--force-clean",
+ "--quiet",
+ "--non-interactive",
+ "--parent-command=%s" % self.name,
+ "--no-update",
+ patch["id"]])
+ self._patches.did_pass(patch)
+
+
+class QtEWS(AbstractEarlyWarningSystem):
+ name = "qt-ews"
+ port_name = "qt"
+
+
+class ChromiumEWS(AbstractEarlyWarningSystem):
+ name = "chromium-ews"
+ port_name = "chromium"
diff --git a/WebKitTools/Scripts/modules/commands/queries.py b/WebKitTools/Scripts/modules/commands/queries.py
new file mode 100644
index 0000000..98310e3
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/queries.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from optparse import make_option
+
+from modules.buildbot import BuildBot
+from modules.committers import CommitterList
+from modules.logging import log
+from modules.multicommandtool import Command
+
+
+class BugsToCommit(Command):
+ name = "bugs-to-commit"
+ show_in_main_help = False
+ def __init__(self):
+ Command.__init__(self, "List bugs in the commit-queue")
+
+ def execute(self, options, args, tool):
+ bug_ids = tool.bugs.fetch_bug_ids_from_commit_queue()
+ for bug_id in bug_ids:
+ print "%s" % bug_id
+
+
+class PatchesToCommit(Command):
+ name = "patches-to-commit"
+ show_in_main_help = False
+ def __init__(self):
+ Command.__init__(self, "List patches in the commit-queue")
+
+ def execute(self, options, args, tool):
+ patches = tool.bugs.fetch_patches_from_commit_queue()
+ log("Patches in commit queue:")
+ for patch in patches:
+ print "%s" % patch["url"]
+
+
+class PatchesToCommitQueue(Command):
+ name = "patches-to-commit-queue"
+ show_in_main_help = False
+ def __init__(self):
+ options = [
+ make_option("--bugs", action="store_true", dest="bugs", help="Output bug links instead of patch links"),
+ ]
+ Command.__init__(self, "Patches which should be added to the commit queue", options=options)
+
+ @staticmethod
+ def _needs_commit_queue(patch):
+ commit_queue_flag = patch.get("commit-queue")
+ if (commit_queue_flag and commit_queue_flag == '+'): # If it's already cq+, ignore the patch.
+ log("%s already has cq=%s" % (patch["id"], commit_queue_flag))
+ return False
+
+ # We only need to worry about patches from contributers who are not yet committers.
+ committer_record = CommitterList().committer_by_email(patch["attacher_email"])
+ if committer_record:
+ log("%s committer = %s" % (patch["id"], committer_record))
+ return not committer_record
+
+ def execute(self, options, args, tool):
+ patches = tool.bugs.fetch_patches_from_pending_commit_list()
+ patches_needing_cq = filter(self._needs_commit_queue, patches)
+ if options.bugs:
+ bugs_needing_cq = map(lambda patch: patch['bug_id'], patches_needing_cq)
+ bugs_needing_cq = sorted(set(bugs_needing_cq))
+ for bug_id in bugs_needing_cq:
+ print "%s" % tool.bugs.bug_url_for_bug_id(bug_id)
+ else:
+ for patch in patches_needing_cq:
+ print "%s" % tool.bugs.attachment_url_for_id(patch["id"], action="edit")
+
+
+class PatchesToReview(Command):
+ name = "patches-to-review"
+ show_in_main_help = False
+ def __init__(self):
+ Command.__init__(self, "List patches that are pending review")
+
+ def execute(self, options, args, tool):
+ patch_ids = tool.bugs.fetch_attachment_ids_from_review_queue()
+ log("Patches pending review:")
+ for patch_id in patch_ids:
+ print patch_id
+
+
+class ReviewedPatches(Command):
+ name = "reviewed-patches"
+ show_in_main_help = False
+ def __init__(self):
+ Command.__init__(self, "List r+'d patches on a bug", "BUGID")
+
+ def execute(self, options, args, tool):
+ bug_id = args[0]
+ patches_to_land = tool.bugs.fetch_reviewed_patches_from_bug(bug_id)
+ for patch in patches_to_land:
+ print "%s" % patch["url"]
+
+
+class TreeStatus(Command):
+ name = "tree-status"
+ show_in_main_help = True
+ def __init__(self):
+ Command.__init__(self, "Print the status of the %s buildbots" % BuildBot.default_host)
+
+ def execute(self, options, args, tool):
+ for builder in tool.buildbot.builder_statuses():
+ status_string = "ok" if builder["is_green"] else "FAIL"
+ print "%s : %s" % (status_string.ljust(4), builder["name"])
diff --git a/WebKitTools/Scripts/modules/commands/queries_unittest.py b/WebKitTools/Scripts/modules/commands/queries_unittest.py
new file mode 100644
index 0000000..0d1c82a
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/queries_unittest.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from modules.bugzilla import Bugzilla
+from modules.commands.commandtest import CommandsTest
+from modules.commands.queries import *
+from modules.mock import Mock
+from modules.mock_bugzillatool import MockBugzillaTool
+
+class QueryCommandsTest(CommandsTest):
+ def test_bugs_to_commit(self):
+ self.assert_execute_outputs(BugsToCommit(), None, "42\n75\n")
+
+ def test_patches_to_commit(self):
+ expected_stdout = "http://example.com/197\nhttp://example.com/128\n"
+ expected_stderr = "Patches in commit queue:\n"
+ self.assert_execute_outputs(PatchesToCommit(), None, expected_stdout, expected_stderr)
+
+ def test_patches_to_commit_queue(self):
+ expected_stdout = "http://example.com/197&action=edit\nhttp://example.com/128&action=edit\n"
+ expected_stderr = ""
+ options = Mock()
+ options.bugs = False
+ self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options)
+
+ expected_stdout = "http://example.com/42\n"
+ options.bugs = True
+ self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options)
+
+ def test_patches_to_review(self):
+ expected_stdout = "197\n128\n"
+ expected_stderr = "Patches pending review:\n"
+ self.assert_execute_outputs(PatchesToReview(), None, expected_stdout, expected_stderr)
+
+ def test_reviewed_patches(self):
+ expected_stdout = "http://example.com/197\nhttp://example.com/128\n"
+ self.assert_execute_outputs(ReviewedPatches(), [42], expected_stdout)
+
+ def test_tree_status(self):
+ expected_stdout = "ok : Builder1\nok : Builder2\n"
+ self.assert_execute_outputs(TreeStatus(), None, expected_stdout)
diff --git a/WebKitTools/Scripts/modules/commands/queues.py b/WebKitTools/Scripts/modules/commands/queues.py
new file mode 100644
index 0000000..53b9e48
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/queues.py
@@ -0,0 +1,216 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+from datetime import datetime
+from optparse import make_option
+
+from modules.executive import ScriptError
+from modules.grammar import pluralize
+from modules.landingsequence import LandingSequence, LandingSequenceErrorHandler
+from modules.logging import error, log
+from modules.multicommandtool import Command
+from modules.patchcollection import PersistentPatchCollection, PersistentPatchCollectionDelegate
+from modules.statusbot import StatusBot
+from modules.workqueue import WorkQueue, WorkQueueDelegate
+
+class AbstractQueue(Command, WorkQueueDelegate):
+ show_in_main_help = False
+ watchers = "webkit-bot-watchers@googlegroups.com"
+ def __init__(self, options=None): # Default values should never be collections (like []) as default values are shared between invocations
+ options_list = (options or []) + [
+ make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue. Dangerous!"),
+ make_option("--status-host", action="store", type="string", dest="status_host", default=StatusBot.default_host, help="Hostname (e.g. localhost or commit.webkit.org) where status updates should be posted."),
+ ]
+ Command.__init__(self, "Run the %s" % self.name, options=options_list)
+
+ def _cc_watchers(self, bug_id):
+ try:
+ self.tool.bugs.add_cc_to_bug(bug_id, self.watchers)
+ except Exception, e:
+ log("Failed to CC watchers: %s." % e)
+
+ def queue_log_path(self):
+ return "%s.log" % self.name
+
+ def work_logs_directory(self):
+ return "%s-logs" % self.name
+
+ def status_host(self):
+ return self.options.status_host
+
+ def begin_work_queue(self):
+ log("CAUTION: %s will discard all local changes in %s" % (self.name, self.tool.scm().checkout_root))
+ if self.options.confirm:
+ response = raw_input("Are you sure? Type \"yes\" to continue: ")
+ if (response != "yes"):
+ error("User declined.")
+ log("Running WebKit %s. %s" % (self.name, datetime.now().strftime(WorkQueue.log_date_format)))
+
+ def should_continue_work_queue(self):
+ return True
+
+ def next_work_item(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def should_proceed_with_work_item(self, work_item):
+ raise NotImplementedError, "subclasses must implement"
+
+ def process_work_item(self, work_item):
+ raise NotImplementedError, "subclasses must implement"
+
+ def handle_unexpected_error(self, work_item, message):
+ raise NotImplementedError, "subclasses must implement"
+
+ def run_bugzilla_tool(self, args):
+ bugzilla_tool_args = [self.tool.path()] + map(str, args)
+ self.tool.executive.run_and_throw_if_fail(bugzilla_tool_args)
+
+ def log_progress(self, patch_ids):
+ log("%s in %s [%s]" % (pluralize("patch", len(patch_ids)), self.name, ", ".join(map(str, patch_ids))))
+
+ def execute(self, options, args, tool):
+ self.options = options
+ self.tool = tool
+ work_queue = WorkQueue(self.name, self)
+ return work_queue.run()
+
+
+class CommitQueue(AbstractQueue, LandingSequenceErrorHandler):
+ name = "commit-queue"
+ def __init__(self):
+ AbstractQueue.__init__(self)
+
+ # AbstractQueue methods
+
+ def begin_work_queue(self):
+ AbstractQueue.begin_work_queue(self)
+
+ def next_work_item(self):
+ patches = self.tool.bugs.fetch_patches_from_commit_queue(reject_invalid_patches=True)
+ if not patches:
+ return None
+ # Only bother logging if we have patches in the queue.
+ self.log_progress([patch['id'] for patch in patches])
+ return patches[0]
+
+ def should_proceed_with_work_item(self, patch):
+ red_builders_names = self.tool.buildbot.red_core_builders_names()
+ if red_builders_names:
+ red_builders_names = map(lambda name: "\"%s\"" % name, red_builders_names) # Add quotes around the names.
+ return (False, "Builders [%s] are red. See http://build.webkit.org." % ", ".join(red_builders_names), None)
+ return (True, "Landing patch %s from bug %s." % (patch["id"], patch["bug_id"]), patch)
+
+ def process_work_item(self, patch):
+ self._cc_watchers(patch["bug_id"])
+ self.run_bugzilla_tool(["land-attachment", "--force-clean", "--non-interactive", "--parent-command=commit-queue", "--quiet", patch["id"]])
+
+ def handle_unexpected_error(self, patch, message):
+ self.tool.bugs.reject_patch_from_commit_queue(patch["id"], message)
+
+ # LandingSequenceErrorHandler methods
+
+ @classmethod
+ def handle_script_error(cls, tool, patch, script_error):
+ tool.bugs.reject_patch_from_commit_queue(patch["id"], script_error.message_with_output())
+
+
+class AbstractReviewQueue(AbstractQueue, PersistentPatchCollectionDelegate, LandingSequenceErrorHandler):
+ def __init__(self, options=None):
+ AbstractQueue.__init__(self, options)
+
+ # PersistentPatchCollectionDelegate methods
+
+ def collection_name(self):
+ return self.name
+
+ def fetch_potential_patch_ids(self):
+ return self.tool.bugs.fetch_attachment_ids_from_review_queue()
+
+ def status_server(self):
+ return self.tool.status()
+
+ # AbstractQueue methods
+
+ def begin_work_queue(self):
+ AbstractQueue.begin_work_queue(self)
+ self.tool.status().set_host(self.options.status_host)
+ self._patches = PersistentPatchCollection(self)
+
+ def next_work_item(self):
+ patch_id = self._patches.next()
+ if patch_id:
+ return self.tool.bugs.fetch_attachment(patch_id)
+
+ def should_proceed_with_work_item(self, patch):
+ raise NotImplementedError, "subclasses must implement"
+
+ def process_work_item(self, patch):
+ raise NotImplementedError, "subclasses must implement"
+
+ def handle_unexpected_error(self, patch, message):
+ log(message)
+
+ # LandingSequenceErrorHandler methods
+
+ @classmethod
+ def handle_script_error(cls, tool, patch, script_error):
+ log(script_error.message_with_output())
+
+
+class StyleQueue(AbstractReviewQueue):
+ name = "style-queue"
+ def __init__(self):
+ AbstractReviewQueue.__init__(self)
+
+ def should_proceed_with_work_item(self, patch):
+ return (True, "Checking style for patch %s on bug %s." % (patch["id"], patch["bug_id"]), patch)
+
+ def process_work_item(self, patch):
+ try:
+ self.run_bugzilla_tool(["check-style", "--force-clean", "--non-interactive", "--parent-command=style-queue", patch["id"]])
+ message = "%s ran check-webkit-style on attachment %s without any errors." % (self.name, patch["id"])
+ self.tool.bugs.post_comment_to_bug(patch["bug_id"], message, cc=self.watchers)
+ self._patches.did_pass(patch)
+ except ScriptError, e:
+ self._patches.did_fail(patch)
+ raise e
+
+ @classmethod
+ def handle_script_error(cls, tool, patch, script_error):
+ command = script_error.script_args
+ if type(command) is list:
+ command = command[0]
+ # FIXME: We shouldn't need to use a regexp here. ScriptError should
+ # have a better API.
+ if re.search("check-webkit-style", command):
+ message = "Attachment %s did not pass %s:\n\n%s" % (patch["id"], cls.name, script_error.message_with_output(output_limit=5*1024))
+ tool.bugs.post_comment_to_bug(patch["bug_id"], message, cc=cls.watchers)
diff --git a/WebKitTools/Scripts/modules/commands/queues_unittest.py b/WebKitTools/Scripts/modules/commands/queues_unittest.py
new file mode 100644
index 0000000..75abbe5
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/queues_unittest.py
@@ -0,0 +1,66 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from modules.commands.commandtest import CommandsTest
+from modules.commands.queues import *
+from modules.mock_bugzillatool import MockBugzillaTool
+from modules.outputcapture import OutputCapture
+
+
+class TestQueue(AbstractQueue):
+ name = "test-queue"
+
+
+class AbstractQueueTest(CommandsTest):
+ def _assert_output(self, function, args, expected_stdout="", expected_stderr=""):
+ capture = OutputCapture()
+ capture.capture_output()
+ function(*args)
+ (stdout_string, stderr_string) = capture.restore_output()
+ self.assertEqual(stdout_string, expected_stdout)
+ self.assertEqual(stderr_string, expected_stderr)
+
+ def _assert_log_progress_output(self, patch_ids, progress_output):
+ self._assert_output(TestQueue().log_progress, [patch_ids], expected_stderr=progress_output)
+
+ def test_log_progress(self):
+ self._assert_log_progress_output([1,2,3], "3 patches in test-queue [1, 2, 3]\n")
+ self._assert_log_progress_output(["1","2","3"], "3 patches in test-queue [1, 2, 3]\n")
+ self._assert_log_progress_output([1], "1 patch in test-queue [1]\n")
+
+ def _assert_run_bugzilla_tool_output(self, run_args, tool_output):
+ queue = TestQueue()
+ queue.bind_to_tool(MockBugzillaTool())
+ # MockBugzillaTool.path() is "echo"
+ self._assert_output(queue.run_bugzilla_tool, [run_args], expected_stdout=tool_output)
+
+ def test_run_bugzilla_tool(self):
+ self._assert_run_bugzilla_tool_output([1], "")
+ self._assert_run_bugzilla_tool_output(["one", 2], "")
diff --git a/WebKitTools/Scripts/modules/commands/upload.py b/WebKitTools/Scripts/modules/commands/upload.py
new file mode 100644
index 0000000..1f892a1
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/upload.py
@@ -0,0 +1,246 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import StringIO
+import sys
+
+from optparse import make_option
+
+from modules.bugzilla import parse_bug_id
+from modules.grammar import pluralize
+from modules.logging import error, log
+from modules.multicommandtool import Command
+
+# FIXME: Requires unit test.
+class CommitMessageForCurrentDiff(Command):
+ name = "commit-message"
+ show_in_main_help = False
+ def __init__(self):
+ Command.__init__(self, "Print a commit message suitable for the uncommitted changes")
+
+ def execute(self, options, args, tool):
+ os.chdir(tool.scm().checkout_root)
+ print "%s" % tool.scm().commit_message_for_this_commit().message()
+
+
+class ObsoleteAttachments(Command):
+ name = "obsolete-attachments"
+ show_in_main_help = False
+ def __init__(self):
+ Command.__init__(self, "Mark all attachments on a bug as obsolete", "BUGID")
+
+ def execute(self, options, args, tool):
+ bug_id = args[0]
+ attachments = tool.bugs.fetch_attachments_from_bug(bug_id)
+ for attachment in attachments:
+ if not attachment["is_obsolete"]:
+ tool.bugs.obsolete_attachment(attachment["id"])
+
+
+class PostDiff(Command):
+ name = "post-diff"
+ show_in_main_help = True
+ def __init__(self):
+ options = [
+ make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: \"patch\")"),
+ ]
+ options += self.posting_options()
+ Command.__init__(self, "Attach the current working directory diff to a bug as a patch file", "[BUGID]", options=options)
+
+ @staticmethod
+ def posting_options():
+ return [
+ make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting this one."),
+ make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
+ make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."),
+ ]
+
+ @staticmethod
+ def obsolete_patches_on_bug(bug_id, bugs):
+ patches = bugs.fetch_patches_from_bug(bug_id)
+ if len(patches):
+ log("Obsoleting %s on bug %s" % (pluralize("old patch", len(patches)), bug_id))
+ for patch in patches:
+ bugs.obsolete_attachment(patch["id"])
+
+ def execute(self, options, args, tool):
+ # Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs).
+ bug_id = (args and args[0]) or parse_bug_id(tool.scm().create_patch())
+ if not bug_id:
+ error("No bug id passed and no bug url found in diff, can't post.")
+
+ if options.obsolete_patches:
+ self.obsolete_patches_on_bug(bug_id, tool.bugs)
+
+ diff = tool.scm().create_patch()
+ diff_file = StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object
+
+ description = options.description or "Patch"
+ tool.bugs.add_patch_to_bug(bug_id, diff_file, description, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+
+class PostCommits(Command):
+ name = "post-commits"
+ show_in_main_help = True
+ def __init__(self):
+ options = [
+ make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
+ make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."),
+ make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"),
+ ]
+ options += PostDiff.posting_options()
+ Command.__init__(self, "Attach a range of local commits to bugs as patch files", "COMMITISH", options=options, requires_local_commits=True)
+
+ def _comment_text_for_commit(self, options, commit_message, tool, commit_id):
+ comment_text = None
+ if (options.add_log_as_comment):
+ comment_text = commit_message.body(lstrip=True)
+ comment_text += "---\n"
+ comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
+ return comment_text
+
+ def _diff_file_for_commit(self, tool, commit_id):
+ diff = tool.scm().create_patch_from_local_commit(commit_id)
+ return StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object
+
+ def execute(self, options, args, tool):
+ commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
+ if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is.
+ error("bugzilla-tool does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize("patch", len(commit_ids))))
+
+ have_obsoleted_patches = set()
+ for commit_id in commit_ids:
+ commit_message = tool.scm().commit_message_for_local_commit(commit_id)
+
+ # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs).
+ bug_id = options.bug_id or parse_bug_id(commit_message.message()) or parse_bug_id(tool.scm().create_patch_from_local_commit(commit_id))
+ if not bug_id:
+ log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id)
+ continue
+
+ if options.obsolete_patches and bug_id not in have_obsoleted_patches:
+ PostDiff.obsolete_patches_on_bug(bug_id, tool.bugs)
+ have_obsoleted_patches.add(bug_id)
+
+ diff_file = self._diff_file_for_commit(tool, commit_id)
+ description = options.description or commit_message.description(lstrip=True, strip_url=True)
+ comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id)
+ tool.bugs.add_patch_to_bug(bug_id, diff_file, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+
+class MarkFixed(Command):
+ name = "mark-fixed"
+ show_in_main_help = False
+ def __init__(self):
+ Command.__init__(self, "Mark the specified bug as fixed", "BUG_ID REASON")
+
+ def execute(self, options, args, tool):
+ tool.bugs.close_bug_as_fixed(args[0], args[1])
+
+
+# FIXME: Requires unit test. Blocking issue: too complex for now.
+class CreateBug(Command):
+ name = "create-bug"
+ show_in_main_help = True
+ def __init__(self):
+ options = [
+ make_option("--cc", action="store", type="string", dest="cc", help="Comma-separated list of email addresses to carbon-copy."),
+ make_option("--component", action="store", type="string", dest="component", help="Component for the new bug."),
+ make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."),
+ make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
+ make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."),
+ ]
+ Command.__init__(self, "Create a bug from local changes or local commits", "[COMMITISH]", options=options)
+
+ def create_bug_from_commit(self, options, args, tool):
+ commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
+ if len(commit_ids) > 3:
+ error("Are you sure you want to create one bug with %s patches?" % len(commit_ids))
+
+ commit_id = commit_ids[0]
+
+ bug_title = ""
+ comment_text = ""
+ if options.prompt:
+ (bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
+ else:
+ commit_message = tool.scm().commit_message_for_local_commit(commit_id)
+ bug_title = commit_message.description(lstrip=True, strip_url=True)
+ comment_text = commit_message.body(lstrip=True)
+ comment_text += "---\n"
+ comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
+
+ diff = tool.scm().create_patch_from_local_commit(commit_id)
+ diff_file = StringIO.StringIO(diff) # create_bug_with_patch expects a file-like object
+ bug_id = tool.bugs.create_bug_with_patch(bug_title, comment_text, options.component, diff_file, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+ if bug_id and len(commit_ids) > 1:
+ options.bug_id = bug_id
+ options.obsolete_patches = False
+ # FIXME: We should pass through --no-comment switch as well.
+ PostCommits.execute(self, options, commit_ids[1:], tool)
+
+ def create_bug_from_patch(self, options, args, tool):
+ bug_title = ""
+ comment_text = ""
+ if options.prompt:
+ (bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
+ else:
+ commit_message = tool.scm().commit_message_for_this_commit()
+ bug_title = commit_message.description(lstrip=True, strip_url=True)
+ comment_text = commit_message.body(lstrip=True)
+
+ diff = tool.scm().create_patch()
+ diff_file = StringIO.StringIO(diff) # create_bug_with_patch expects a file-like object
+ bug_id = tool.bugs.create_bug_with_patch(bug_title, comment_text, options.component, diff_file, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+ def prompt_for_bug_title_and_comment(self):
+ bug_title = raw_input("Bug title: ")
+ print "Bug comment (hit ^D on blank line to end):"
+ lines = sys.stdin.readlines()
+ try:
+ sys.stdin.seek(0, os.SEEK_END)
+ except IOError:
+ # Cygwin raises an Illegal Seek (errno 29) exception when the above
+ # seek() call is made. Ignoring it seems to cause no harm.
+ # FIXME: Figure out a way to get avoid the exception in the first
+ # place.
+ pass
+ comment_text = "".join(lines)
+ return (bug_title, comment_text)
+
+ def execute(self, options, args, tool):
+ if len(args):
+ if (not tool.scm().supports_local_commits()):
+ error("Extra arguments not supported; patch is taken from working directory.")
+ self.create_bug_from_commit(options, args, tool)
+ else:
+ self.create_bug_from_patch(options, args, tool)
diff --git a/WebKitTools/Scripts/modules/commands/upload_unittest.py b/WebKitTools/Scripts/modules/commands/upload_unittest.py
new file mode 100644
index 0000000..4d3f85c
--- /dev/null
+++ b/WebKitTools/Scripts/modules/commands/upload_unittest.py
@@ -0,0 +1,42 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from modules.commands.commandtest import CommandsTest
+from modules.commands.upload import *
+
+class UploadCommandsTest(CommandsTest):
+ def test_mark_fixed(self):
+ self.assert_execute_outputs(MarkFixed(), [43, "Test comment"])
+
+ def test_obsolete_attachments(self):
+ self.assert_execute_outputs(ObsoleteAttachments(), [42])
+
+ def test_post_diff(self):
+ self.assert_execute_outputs(PostDiff(), [42])
diff --git a/WebKitTools/Scripts/modules/committers.py b/WebKitTools/Scripts/modules/committers.py
index fc263eb..d32a536 100644
--- a/WebKitTools/Scripts/modules/committers.py
+++ b/WebKitTools/Scripts/modules/committers.py
@@ -29,128 +29,176 @@
# WebKit's Python module for committer and reviewer validation
class Committer:
- def __init__(self, name, email):
+ def __init__(self, name, email_or_emails):
self.full_name = name
- self.bugzilla_email = email
+ if isinstance(email_or_emails, str):
+ self.emails = [email_or_emails]
+ else:
+ self.emails = email_or_emails
self.can_review = False
def __str__(self):
- return '"%s" <%s>' % (self.full_name, self.bugzilla_email)
+ return '"%s" <%s>' % (self.full_name, self.emails[0])
class Reviewer(Committer):
- def __init__(self, name, email):
- Committer.__init__(self, name, email)
+ def __init__(self, name, email_or_emails):
+ Committer.__init__(self, name, email_or_emails)
self.can_review = True
-# This is intended as a cannonical, machine-readable list of all non-reviewer committers for WebKit.
+# This is intended as a canonical, machine-readable list of all non-reviewer committers for WebKit.
# If your name is missing here and you are a committer, please add it. No review needed.
# All reviewers are committers, so this list is only of committers who are not reviewers.
committers_unable_to_review = [
Committer("Aaron Boodman", "aa@chromium.org"),
Committer("Adam Langley", "agl@chromium.org"),
Committer("Albert J. Wong", "ajwong@chromium.org"),
+ Committer("Alexander Kellett", ["lypanov@mac.com", "a-lists001@lypanov.net", "lypanov@kde.org"]),
+ Committer("Andre Boule", "aboule@apple.com"),
+ Committer("Andrew Wellington", ["andrew@webkit.org", "proton@wiretapped.net"]),
+ Committer("Anthony Ricaud", "rik@webkit.org"),
Committer("Anton Muhin", "antonm@chromium.org"),
Committer("Antonio Gomes", "tonikitoo@webkit.org"),
- Committer("Anthony Ricaud", "rik@webkit.org"),
Committer("Ben Murdoch", "benm@google.com"),
- Committer("Chris Fleizach", "cfleizach@apple.com"),
+ Committer("Benjamin C Meyer", ["ben@meyerhome.net", "ben@webkit.org"]),
Committer("Brent Fulgham", "bfulgham@webkit.org"),
+ Committer("Brett Wilson", "brettw@chromium.org"),
Committer("Brian Weinstein", "bweinstein@apple.com"),
Committer("Cameron McCormack", "cam@webkit.org"),
+ Committer("Chris Fleizach", "cfleizach@apple.com"),
+ Committer("Chris Marrin", "cmarrin@apple.com"),
+ Committer("Chris Petersen", "cpetersen@apple.com"),
+ Committer("Christian Dywan", ["christian@twotoasts.de", "christian@webkit.org"]),
Committer("Collin Jackson", "collinj@webkit.org"),
Committer("Csaba Osztrogonac", "ossy@webkit.org"),
Committer("Daniel Bates", "dbates@webkit.org"),
+ Committer("David Smith", ["catfish.man@gmail.com", "dsmith@webkit.org"]),
+ Committer("Dean Jackson", "dino@apple.com"),
Committer("Drew Wilson", "atwilson@chromium.org"),
- Committer("Dirk Schulze", "krit@webkit.org"),
- Committer("Dmitry Titov", "dimich@chromium.org"),
Committer("Dumitru Daniliuc", "dumi@chromium.org"),
Committer("Eli Fidler", "eli@staikos.net"),
+ Committer("Enrica Casucci", "enrica@apple.com"),
+ Committer("Erik Arvidsson", "arv@chromium.org"),
Committer("Eric Roman", "eroman@chromium.org"),
+ Committer("Feng Qian", "feng@chromium.org"),
Committer("Fumitoshi Ukai", "ukai@chromium.org"),
+ Committer("Girish Ramakrishnan", ["girish@forwardbias.in", "ramakrishnan.girish@gmail.com"]),
+ Committer("Graham Dennis", ["Graham.Dennis@gmail.com", "gdennis@webkit.org"]),
Committer("Greg Bolsinga", "bolsinga@apple.com"),
- Committer("Jeremy Moskovich", "playmobil@google.com"),
+ Committer("Hin-Chung Lam", ["hclam@google.com", "hclam@chromium.org"]),
+ Committer("Jens Alfke", ["snej@chromium.org", "jens@apple.com"]),
+ Committer("Jeremy Moskovich", ["playmobil@google.com", "jeremy@chromium.org"]),
Committer("Jeremy Orlow", "jorlow@chromium.org"),
+ Committer("Jessie Berlin", ["jberlin@webkit.org", "jberlin@apple.com"]),
Committer("Jian Li", "jianli@chromium.org"),
Committer("John Abd-El-Malek", "jam@chromium.org"),
+ Committer("Joost de Valk", ["joost@webkit.org", "webkit-dev@joostdevalk.nl"]),
Committer("Joseph Pecoraro", "joepeck@webkit.org"),
- Committer("Julie Parent", "jparent@google.com"),
- Committer("Kenneth Rohde Christiansen", "kenneth@webkit.org"),
+ Committer("Julie Parent", ["jparent@google.com", "jparent@chromium.org"]),
+ Committer("Julien Chaffraix", ["jchaffraix@webkit.org", "julien.chaffraix@gmail.com"]),
+ Committer("Jungshik Shin", "jshin@chromium.org"),
+ Committer("Keishi Hattori", "keishi@webkit.org"),
+ Committer("Kelly Norton", "knorton@google.com"),
Committer("Kent Tamura", "tkent@chromium.org"),
+ Committer("Krzysztof Kowalczyk", "kkowalczyk@gmail.com"),
Committer("Laszlo Gombos", "laszlo.1.gombos@nokia.com"),
+ Committer("Levi Weintraub", "lweintraub@apple.com"),
Committer("Mads Ager", "ager@chromium.org"),
+ Committer("Matt Lilek", ["webkit@mattlilek.com", "pewtermoose@webkit.org"]),
+ Committer("Matt Perry", "mpcomplete@chromium.org"),
+ Committer("Maxime Britto", ["maxime.britto@gmail.com", "britto@apple.com"]),
+ Committer("Maxime Simon", ["simon.maxime@gmail.com", "maxime.simon@webkit.org"]),
+ Committer("Michelangelo De Simone", "michelangelo@webkit.org"),
Committer("Mike Belshe", "mike@belshe.com"),
+ Committer("Mike Fenton", ["mike.fenton@torchmobile.com", "mifenton@rim.com"]),
+ Committer("Mike Thole", ["mthole@mikethole.com", "mthole@apple.com"]),
Committer("Nate Chapin", "japhet@chromium.org"),
Committer("Ojan Vafai", "ojan@chromium.org"),
Committer("Pam Greene", "pam@chromium.org"),
- Committer("Peter Kasting", "pkasting@google.com"),
- Committer("Pierre d'Herbemont", "pdherbemont@free.fr"),
+ Committer("Peter Kasting", ["pkasting@google.com", "pkasting@chromium.org"]),
+ Committer("Pierre d'Herbemont", ["pdherbemont@free.fr", "pdherbemont@apple.com"]),
+ Committer("Pierre-Olivier Latour", "pol@apple.com"),
Committer("Roland Steiner", "rolandsteiner@chromium.org"),
Committer("Ryosuke Niwa", "rniwa@webkit.org"),
Committer("Scott Violet", "sky@chromium.org"),
Committer("Shinichiro Hamaji", "hamaji@chromium.org"),
+ Committer("Stephen White", "senorblanco@chromium.org"),
Committer("Steve Block", "steveblock@google.com"),
Committer("Tony Chang", "tony@chromium.org"),
+ Committer("Trey Matteson", "trey@usa.net"),
+ Committer("Tristan O'Tierney", ["tristan@otierney.net", "tristan@apple.com"]),
+ Committer("William Siegrist", "wsiegrist@apple.com"),
Committer("Yael Aharon", "yael.aharon@nokia.com"),
- Committer("Yong Li", "yong.li@torchmobile.com"),
+ Committer("Yaar Schnitman", ["yaar@chromium.org", "yaar@google.com"]),
+ Committer("Yong Li", ["yong.li@torchmobile.com", "yong.li.webkit@gmail.com"]),
+ Committer("Yongjun Zhang", "yongjun.zhang@nokia.com"),
Committer("Yury Semikhatsky", "yurys@chromium.org"),
Committer("Zoltan Horvath", "zoltan@webkit.org"),
]
-# This is intended as a cannonical, machine-readable list of all reviewers for WebKit.
+# This is intended as a canonical, machine-readable list of all reviewers for WebKit.
# If your name is missing here and you are a reviewer, please add it. No review needed.
reviewers_list = [
- Reviewer("Adam Barth", "abarth@webkit.org"),
Reviewer("Ada Chan", "adachan@apple.com"),
+ Reviewer("Adam Barth", "abarth@webkit.org"),
Reviewer("Adam Roben", "aroben@apple.com"),
- Reviewer("Adam Treat", "treat@kde.org"),
+ Reviewer("Adam Treat", ["treat@kde.org", "treat@webkit.org"]),
Reviewer("Adele Peterson", "adele@apple.com"),
- Reviewer("Alexey Proskuryakov", "ap@webkit.org"),
+ Reviewer("Alexey Proskuryakov", ["ap@webkit.org", "ap@apple.com"]),
Reviewer("Alice Liu", "alice.liu@apple.com"),
- Reviewer("Alp Toker", "alp@nuanti.com"),
- Reviewer("Anders Carlsson", "andersca@apple.com"),
- Reviewer("Antti Koivisto", "koivisto@iki.fi"),
- Reviewer("Ariya Hidayat", "ariya.hidayat@trolltech.com"),
+ Reviewer("Alp Toker", ["alp@nuanti.com", "alp@atoker.com", "alp@webkit.org"]),
+ Reviewer("Anders Carlsson", ["andersca@apple.com", "acarlsson@apple.com"]),
+ Reviewer("Antti Koivisto", ["koivisto@iki.fi", "antti@apple.com"]),
+ Reviewer("Ariya Hidayat", ["ariya.hidayat@trolltech.com", "ariya.hidayat@gmail.com", "ariya@webkit.org"]),
+ Reviewer("Beth Dakin", "bdakin@apple.com"),
Reviewer("Brady Eidson", "beidson@apple.com"),
- Reviewer("Cameron Zwarich", "zwarich@apple.com"),
- Reviewer("Dan Bernstein", "mitz@webkit.org"),
+ Reviewer("Cameron Zwarich", ["zwarich@apple.com", "cwzwarich@apple.com", "cwzwarich@webkit.org"]),
+ Reviewer("Chris Blumenberg", "cblu@apple.com"),
+ Reviewer("Dan Bernstein", ["mitz@webkit.org", "mitz@apple.com"]),
Reviewer("Darin Adler", "darin@apple.com"),
- Reviewer("Darin Fisher", "fishd@chromium.org"),
+ Reviewer("Darin Fisher", ["fishd@chromium.org", "darin@chromium.org"]),
Reviewer("David Harrison", "harrison@apple.com"),
Reviewer("David Hyatt", "hyatt@apple.com"),
- Reviewer("David Kilzer", "ddkilzer@webkit.org"),
+ Reviewer("David Kilzer", ["ddkilzer@webkit.org", "ddkilzer@apple.com"]),
Reviewer("David Levin", "levin@chromium.org"),
Reviewer("Dimitri Glazkov", "dglazkov@chromium.org"),
+ Reviewer("Dirk Schulze", "krit@webkit.org"),
+ Reviewer("Dmitry Titov", "dimich@chromium.org"),
Reviewer("Don Melton", "gramps@apple.com"),
- Reviewer("Dmitri Titov", "dimich@chromium.org"),
Reviewer("Eric Carlson", "eric.carlson@apple.com"),
Reviewer("Eric Seidel", "eric@webkit.org"),
Reviewer("Gavin Barraclough", "barraclough@apple.com"),
Reviewer("Geoffrey Garen", "ggaren@apple.com"),
- Reviewer("George Staikos", "staikos@kde.org"),
- Reviewer("Gustavo Noronha", "gns@gnome.org"),
- Reviewer("Holger Freyther", "zecke@selfish.org"),
- Reviewer("Jan Alonzo", "jmalonzo@gmail.com"),
+ Reviewer("George Staikos", ["staikos@kde.org", "staikos@webkit.org"]),
+ Reviewer("Gustavo Noronha Silva", ["gns@gnome.org", "kov@webkit.org"]),
+ Reviewer("Holger Freyther", ["zecke@selfish.org", "zecke@webkit.org"]),
+ Reviewer("Jan Alonzo", ["jmalonzo@gmail.com", "jmalonzo@webkit.org"]),
Reviewer("John Sullivan", "sullivan@apple.com"),
Reviewer("Jon Honeycutt", "jhoneycutt@apple.com"),
Reviewer("Justin Garcia", "justin.garcia@apple.com"),
+ Reviewer("Ken Kocienda", "kocienda@apple.com"),
+ Reviewer("Kenneth Rohde Christiansen", ["kenneth@webkit.org", "kenneth.christiansen@openbossa.org"]),
Reviewer("Kevin Decker", "kdecker@apple.com"),
Reviewer("Kevin McCullough", "kmccullough@apple.com"),
- Reviewer("Kevin Ollivier", "kevino@theolliviers.com"),
- Reviewer("Lars Knoll", "lars@trolltech.com"),
+ Reviewer("Kevin Ollivier", ["kevino@theolliviers.com", "kevino@webkit.org"]),
+ Reviewer("Lars Knoll", ["lars@trolltech.com", "lars@kde.org"]),
Reviewer("Maciej Stachowiak", "mjs@apple.com"),
Reviewer("Mark Rowe", "mrowe@apple.com"),
- Reviewer("Nikolas Zimmermann", "zimmermann@kde.org"),
+ Reviewer("Nikolas Zimmermann", ["zimmermann@kde.org", "zimmermann@physik.rwth-aachen.de", "zimmermann@webkit.org"]),
Reviewer("Oliver Hunt", "oliver@apple.com"),
Reviewer("Pavel Feldman", "pfeldman@chromium.org"),
- Reviewer("Rob Buis", "rwlbuis@gmail.com"),
- Reviewer("Sam Weinig", "sam@webkit.org"),
+ Reviewer("Richard Williamson", "rjw@apple.com"),
+ Reviewer("Rob Buis", ["rwlbuis@gmail.com", "rwlbuis@webkit.org"]),
+ Reviewer("Sam Weinig", ["sam@webkit.org", "weinig@apple.com"]),
Reviewer("Simon Fraser", "simon.fraser@apple.com"),
- Reviewer("Simon Hausmann", "hausmann@webkit.org"),
+ Reviewer("Simon Hausmann", ["hausmann@webkit.org", "hausmann@kde.org"]),
Reviewer("Stephanie Lewis", "slewis@apple.com"),
Reviewer("Steve Falkenburg", "sfalken@apple.com"),
- Reviewer("Timothy Hatcher", "timothy@hatcher.name"),
+ Reviewer("Tim Omernick", "timo@apple.com"),
+ Reviewer("Timothy Hatcher", ["timothy@hatcher.name", "timothy@apple.com"]),
Reviewer(u'Tor Arne Vestb\xf8', "vestbo@webkit.org"),
- Reviewer("Xan Lopez", "xan.lopez@gmail.com"),
+ Reviewer("Vicki Murley", "vicki@apple.com"),
+ Reviewer("Xan Lopez", ["xan.lopez@gmail.com", "xan@gnome.org", "xan@webkit.org"]),
+ Reviewer("Zack Rusin", "zack@kde.org"),
]
@@ -164,17 +212,21 @@ class CommitterList:
def committers(self):
return self._committers
+ def reviewers(self):
+ return self._reviewers
+
def _email_to_committer_map(self):
if not len(self._committers_by_email):
for committer in self._committers:
- self._committers_by_email[committer.bugzilla_email] = committer
+ for email in committer.emails:
+ self._committers_by_email[email] = committer
return self._committers_by_email
- def committer_by_bugzilla_email(self, bugzilla_email):
- return self._email_to_committer_map().get(bugzilla_email)
+ def committer_by_email(self, email):
+ return self._email_to_committer_map().get(email)
- def reviewer_by_bugzilla_email(self, bugzilla_email):
- committer = self.committer_by_bugzilla_email(bugzilla_email)
+ def reviewer_by_email(self, email):
+ committer = self.committer_by_email(email)
if committer and not committer.can_review:
return None
return committer
diff --git a/WebKitTools/Scripts/modules/committers_unittest.py b/WebKitTools/Scripts/modules/committers_unittest.py
index 045e20e..cf9f486 100644
--- a/WebKitTools/Scripts/modules/committers_unittest.py
+++ b/WebKitTools/Scripts/modules/committers_unittest.py
@@ -33,20 +33,30 @@ class CommittersTest(unittest.TestCase):
def test_committer_lookup(self):
committer = Committer('Test One', 'one@test.com')
- reviewer = Reviewer('Test Two', 'two@test.com')
+ reviewer = Reviewer('Test Two', ['two@test.com', 'two@rad.com', 'so_two@gmail.com'])
committer_list = CommitterList(committers=[committer], reviewers=[reviewer])
# Test valid committer and reviewer lookup
- self.assertEqual(committer_list.committer_by_bugzilla_email('one@test.com'), committer)
- self.assertEqual(committer_list.reviewer_by_bugzilla_email('two@test.com'), reviewer)
- self.assertEqual(committer_list.committer_by_bugzilla_email('two@test.com'), reviewer)
+ self.assertEqual(committer_list.committer_by_email('one@test.com'), committer)
+ self.assertEqual(committer_list.reviewer_by_email('two@test.com'), reviewer)
+ self.assertEqual(committer_list.committer_by_email('two@test.com'), reviewer)
+ self.assertEqual(committer_list.committer_by_email('two@rad.com'), reviewer)
+ self.assertEqual(committer_list.reviewer_by_email('so_two@gmail.com'), reviewer)
# Test that a known committer is not returned during reviewer lookup
- self.assertEqual(committer_list.reviewer_by_bugzilla_email('one@test.com'), None)
+ self.assertEqual(committer_list.reviewer_by_email('one@test.com'), None)
# Test that unknown email address fail both committer and reviewer lookup
- self.assertEqual(committer_list.committer_by_bugzilla_email('bar@bar.com'), None)
- self.assertEqual(committer_list.reviewer_by_bugzilla_email('bar@bar.com'), None)
+ self.assertEqual(committer_list.committer_by_email('bar@bar.com'), None)
+ self.assertEqual(committer_list.reviewer_by_email('bar@bar.com'), None)
+
+ # Test that emails returns a list.
+ self.assertEqual(committer.emails, ['one@test.com'])
+
+ # Test that committers returns committers and reviewers and reviewers() just reviewers.
+ self.assertEqual(committer_list.committers(), [committer, reviewer])
+ self.assertEqual(committer_list.reviewers(), [reviewer])
+
if __name__ == '__main__':
unittest.main()
diff --git a/WebKitTools/Scripts/modules/cpp_style.py b/WebKitTools/Scripts/modules/cpp_style.py
index 485b07c..d8ca8d1 100644
--- a/WebKitTools/Scripts/modules/cpp_style.py
+++ b/WebKitTools/Scripts/modules/cpp_style.py
@@ -130,6 +130,7 @@ _ERROR_CATEGORIES = '''\
readability/function
readability/multiline_comment
readability/multiline_string
+ readability/naming
readability/null
readability/streams
readability/todo
@@ -243,14 +244,14 @@ _PRIMARY_HEADER = 1
_OTHER_HEADER = 2
+# The regexp compilation caching is inlined in all regexp functions for
+# performance reasons; factoring it out into a separate function turns out
+# to be noticeably expensive.
_regexp_compile_cache = {}
def match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
- # The regexp compilation caching is inlined in both match and search for
- # performance reasons; factoring it out into a separate function turns out
- # to be noticeably expensive.
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
@@ -263,6 +264,20 @@ def search(pattern, s):
return _regexp_compile_cache[pattern].search(s)
+def sub(pattern, replacement, s):
+ """Substitutes occurrences of a pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].sub(replacement, s)
+
+
+def subn(pattern, replacement, s):
+ """Substitutes occurrences of a pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].subn(replacement, s)
+
+
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
@@ -868,7 +883,7 @@ def get_header_guard_cpp_variable(filename):
"""
fileinfo = FileInfo(filename)
- return re.sub(r'[-./\s]', '_', fileinfo.repository_name()).upper() + '_'
+ return sub(r'[-./\s]', '_', fileinfo.repository_name()).upper() + '_'
def check_for_header_guard(filename, lines, error):
@@ -1119,6 +1134,16 @@ class _ClassState(object):
self.classinfo_stack[0].name)
+class _FileState(object):
+ def __init__(self):
+ self._did_inside_namespace_indent_warning = False
+
+ def set_did_inside_namespace_indent_warning(self):
+ self._did_inside_namespace_indent_warning = True
+
+ def did_inside_namespace_indent_warning(self):
+ return self._did_inside_namespace_indent_warning
+
def check_for_non_standard_constructs(filename, clean_lines, line_number,
class_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
@@ -1532,10 +1557,10 @@ def check_spacing(filename, clean_lines, line_number, error):
line = clean_lines.elided[line_number] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
- line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
- # Don't try to do spacing checks for #include statements at minimum it
- # messes up checks for spacing around /
- if match(r'\s*#\s*include', line):
+ line = sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
+ # Don't try to do spacing checks for #include or #import statements at
+ # minimum because it messes up checks for spacing around /
+ if match(r'\s*#\s*(?:include|import)', line):
return
if search(r'[\w.]=[\w.]', line):
error(filename, line_number, 'whitespace/operators', 4,
@@ -1675,7 +1700,7 @@ def get_previous_non_blank_line(clean_lines, line_number):
return ('', -1)
-def check_namespace_indentation(filename, clean_lines, line_number, file_extension, error):
+def check_namespace_indentation(filename, clean_lines, line_number, file_extension, file_state, error):
"""Looks for indentation errors inside of namespaces.
Args:
@@ -1683,6 +1708,8 @@ def check_namespace_indentation(filename, clean_lines, line_number, file_extensi
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (dot not included) of the file.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
error: The function to call with any errors found.
"""
@@ -1694,8 +1721,10 @@ def check_namespace_indentation(filename, clean_lines, line_number, file_extensi
current_indentation_level = len(namespace_match.group('namespace_indentation'))
if current_indentation_level > 0:
- error(filename, line_number, 'whitespace/indent', 4,
- 'namespace should never be indented.')
+ # Don't warn about an indented namespace if we already warned about indented code.
+ if not file_state.did_inside_namespace_indent_warning():
+ error(filename, line_number, 'whitespace/indent', 4,
+ 'namespace should never be indented.')
return
looking_for_semicolon = False;
line_offset = 0
@@ -1706,7 +1735,8 @@ def check_namespace_indentation(filename, clean_lines, line_number, file_extensi
continue
if not current_indentation_level:
if not (in_preprocessor_directive or looking_for_semicolon):
- if not match(r'\S', current_line):
+ if not match(r'\S', current_line) and not file_state.did_inside_namespace_indent_warning():
+ file_state.set_did_inside_namespace_indent_warning()
error(filename, line_number + line_offset, 'whitespace/indent', 4,
'Code inside a namespace should not be indented.')
if in_preprocessor_directive or (current_line.strip()[0] == '#'): # This takes care of preprocessor directive syntax.
@@ -1871,7 +1901,8 @@ def check_braces(filename, clean_lines, line_number, error):
'This { should be at the end of the previous line')
elif (search(r'\)\s*(const\s*)?{\s*$', line)
and line.count('(') == line.count(')')
- and not search(r'\b(if|for|foreach|while|switch)\b', line)):
+ and not search(r'\b(if|for|foreach|while|switch)\b', line)
+ and not match(r'\s+[A-Z_][A-Z_0-9]+\b', line)):
error(filename, line_number, 'whitespace/braces', 4,
'Place brace on its own line for function definitions.')
@@ -2124,7 +2155,7 @@ def get_line_width(line):
return len(line)
-def check_style(filename, clean_lines, line_number, file_extension, error):
+def check_style(filename, clean_lines, line_number, file_extension, file_state, error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
@@ -2136,6 +2167,8 @@ def check_style(filename, clean_lines, line_number, file_extension, error):
clean_lines: A CleansedLines instance containing the file.
line_number: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
error: The function to call with any errors found.
"""
@@ -2203,7 +2236,7 @@ def check_style(filename, clean_lines, line_number, file_extension, error):
'operators on the left side of the line instead of the right side.')
# Some more style checks
- check_namespace_indentation(filename, clean_lines, line_number, file_extension, error)
+ check_namespace_indentation(filename, clean_lines, line_number, file_extension, file_state, error)
check_using_std(filename, clean_lines, line_number, error)
check_max_min_macros(filename, clean_lines, line_number, error)
check_switch_indentation(filename, clean_lines, line_number, error)
@@ -2309,7 +2342,7 @@ def _classify_include(filename, include, is_system, include_state):
include_base = FileInfo(include).base_name()
# If we haven't encountered a primary header, then be lenient in checking.
- if not include_state.visited_primary_section() and target_base.startswith(include_base):
+ if not include_state.visited_primary_section() and target_base.find(include_base) != -1:
return _PRIMARY_HEADER
# If we already encountered a primary header, perform a strict comparison.
# In case the two filename bases are the same then the above lenient check
@@ -2616,6 +2649,109 @@ def check_language(filename, clean_lines, line_number, file_extension, include_s
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
+ check_identifier_name_in_declaration(filename, line_number, line, error)
+
+
+def check_identifier_name_in_declaration(filename, line_number, line, error):
+ """Checks if identifier names contain any underscores.
+
+ As identifiers in libraries we are using have a bunch of
+ underscores, we only warn about the declarations of identifiers
+ and don't check use of identifiers.
+
+ Args:
+ filename: The name of the current file.
+ line_number: The number of the line to check.
+ line: The line of code to check.
+ error: The function to call with any errors found.
+ """
+ # We don't check a return statement.
+ if match(r'\s*return\b', line):
+ return
+
+ # Basically, a declaration is a type name followed by whitespaces
+ # followed by an identifier. The type name can be complicated
+ # due to type adjectives and templates. We remove them first to
+ # simplify the process to find declarations of identifiers.
+
+ # Convert "long long", "long double", and "long long int" to
+ # simple types, but don't remove simple "long".
+ line = sub(r'long (long )?(?=long|double|int)', '', line)
+ line = sub(r'\b(unsigned|signed|inline|using|static|const|volatile|auto|register|extern|typedef|restrict|struct|class|virtual)(?=\W)', '', line)
+
+ # Remove all template parameters by removing matching < and >.
+ # Loop until no templates are removed to remove nested templates.
+ while True:
+ line, number_of_replacements = subn(r'<([\w\s:]|::)+\s*[*&]*\s*>', '', line)
+ if not number_of_replacements:
+ break
+
+ # Declarations of local variables can be in condition expressions
+ # of control flow statements (e.g., "if (RenderObject* p = o->parent())").
+ # We remove the keywords and the first parenthesis.
+ #
+ # Declarations in "while", "if", and "switch" are different from
+ # other declarations in two aspects:
+ #
+ # - There can be only one declaration between the parentheses.
+ # (i.e., you cannot write "if (int i = 0, j = 1) {}")
+ # - The variable must be initialized.
+ # (i.e., you cannot write "if (int i) {}")
+ #
+ # and we will need different treatments for them.
+ line = sub(r'^\s*for\s*\(', '', line)
+ line, control_statement = subn(r'^\s*(while|else if|if|switch)\s*\(', '', line)
+
+ # Detect variable and functions.
+ type_regexp = r'\w([\w]|\s*[*&]\s*|::)+'
+ identifier_regexp = r'(?P<identifier>[\w:]+)'
+ character_after_identifier_regexp = r'(?P<character_after_identifier>[[;()=,])(?!=)'
+ declaration_without_type_regexp = r'\s*' + identifier_regexp + r'\s*' + character_after_identifier_regexp
+ declaration_with_type_regexp = r'\s*' + type_regexp + r'\s' + declaration_without_type_regexp
+ is_function_arguments = False
+ number_of_identifiers = 0
+ while True:
+ # If we are seeing the first identifier or arguments of a
+ # function, there should be a type name before an identifier.
+ if not number_of_identifiers or is_function_arguments:
+ declaration_regexp = declaration_with_type_regexp
+ else:
+ declaration_regexp = declaration_without_type_regexp
+
+ matched = match(declaration_regexp, line)
+ if not matched:
+ return
+ identifier = matched.group('identifier')
+ character_after_identifier = matched.group('character_after_identifier')
+
+ # If we removed a non-for-control statement, the character after
+ # the identifier should be '='. With this rule, we can avoid
+ # warning for cases like "if (val & INT_MAX) {".
+ if control_statement and character_after_identifier != '=':
+ return
+
+ is_function_arguments = is_function_arguments or character_after_identifier == '('
+
+ # Remove "m_" and "s_" to allow them.
+ modified_identifier = sub(r'(^|(?<=::))[ms]_', '', identifier)
+ if modified_identifier.find('_') >= 0:
+ # Various exceptions to the rule: JavaScript op codes functions, const_iterator.
+ if (not (filename.find('JavaScriptCore') >= 0 and modified_identifier.find('_op_') >= 0)
+ and not modified_identifier == "const_iterator"):
+ error(filename, line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use underscores in your identifier names.")
+
+ # There can be only one declaration in non-for-control statements.
+ if control_statement:
+ return
+ # We should continue checking if this is a function
+ # declaration because we need to check its arguments.
+ # Also, we need to check multiple declarations.
+ if character_after_identifier != '(' and character_after_identifier != ',':
+ return
+
+ number_of_identifiers += 1
+ line = line[matched.end():]
+
def check_c_style_cast(filename, line_number, line, raw_line, cast_type, pattern,
error):
@@ -2914,7 +3050,7 @@ def check_for_include_what_you_use(filename, clean_lines, include_state, error,
def process_line(filename, file_extension,
clean_lines, line, include_state, function_state,
- class_state, error):
+ class_state, file_state, error):
"""Processes a single line in the file.
Args:
@@ -2927,6 +3063,8 @@ def process_line(filename, file_extension,
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
@@ -2936,7 +3074,7 @@ def process_line(filename, file_extension,
if search(r'\bNOLINT\b', raw_lines[line]): # ignore nolint lines
return
check_for_multiline_comments_and_strings(filename, clean_lines, line, error)
- check_style(filename, clean_lines, line, file_extension, error)
+ check_style(filename, clean_lines, line, file_extension, file_state, error)
check_language(filename, clean_lines, line, file_extension, include_state,
error)
check_for_non_standard_constructs(filename, clean_lines, line,
@@ -2961,6 +3099,7 @@ def process_file_data(filename, file_extension, lines, error):
include_state = _IncludeState()
function_state = _FunctionState()
class_state = _ClassState()
+ file_state = _FileState()
check_for_copyright(filename, lines, error)
@@ -2971,7 +3110,7 @@ def process_file_data(filename, file_extension, lines, error):
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.num_lines()):
process_line(filename, file_extension, clean_lines, line,
- include_state, function_state, class_state, error)
+ include_state, function_state, class_state, file_state, error)
class_state.check_finished(filename, error)
check_for_include_what_you_use(filename, clean_lines, include_state, error)
@@ -3038,8 +3177,6 @@ def process_file(filename, error=error):
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
- sys.stderr.write('Done processing %s\n' % filename)
-
def print_usage(message):
"""Prints a brief usage string and exits, optionally with an error message.
diff --git a/WebKitTools/Scripts/modules/cpp_style_unittest.py b/WebKitTools/Scripts/modules/cpp_style_unittest.py
index d5637f4..75dd47e 100644
--- a/WebKitTools/Scripts/modules/cpp_style_unittest.py
+++ b/WebKitTools/Scripts/modules/cpp_style_unittest.py
@@ -120,9 +120,10 @@ class CppStyleTestBase(unittest.TestCase):
function_state = cpp_style._FunctionState()
ext = file_name[file_name.rfind('.') + 1:]
class_state = cpp_style._ClassState()
+ file_state = cpp_style._FileState()
cpp_style.process_line(file_name, ext, clean_lines, 0,
include_state, function_state,
- class_state, error_collector)
+ class_state, file_state, error_collector)
# Single-line lint tests are allowed to fail the 'unlintable function'
# check.
error_collector.remove_if_present(
@@ -137,8 +138,9 @@ class CppStyleTestBase(unittest.TestCase):
lines = cpp_style.CleansedLines(lines)
ext = file_name[file_name.rfind('.') + 1:]
class_state = cpp_style._ClassState()
+ file_state = cpp_style._FileState()
for i in xrange(lines.num_lines()):
- cpp_style.check_style(file_name, lines, i, ext, error_collector)
+ cpp_style.check_style(file_name, lines, i, ext, file_state, error_collector)
cpp_style.check_for_non_standard_constructs(file_name, lines, i, class_state,
error_collector)
class_state.check_finished(file_name, error_collector)
@@ -934,15 +936,15 @@ class CppStyleTest(CppStyleTestBase):
self.assert_lint('int doublesize[some_var * 2];', errmsg)
self.assert_lint('int a[afunction()];', errmsg)
self.assert_lint('int a[function(kMaxFooBars)];', errmsg)
- self.assert_lint('bool a_list[items_->size()];', errmsg)
+ self.assert_lint('bool aList[items_->size()];', errmsg)
self.assert_lint('namespace::Type buffer[len+1];', errmsg)
self.assert_lint('int a[64];', '')
self.assert_lint('int a[0xFF];', '')
self.assert_lint('int first[256], second[256];', '')
- self.assert_lint('int array_name[kCompileTimeConstant];', '')
+ self.assert_lint('int arrayName[kCompileTimeConstant];', '')
self.assert_lint('char buf[somenamespace::kBufSize];', '')
- self.assert_lint('int array_name[ALL_CAPS];', '')
+ self.assert_lint('int arrayName[ALL_CAPS];', '')
self.assert_lint('AClass array1[foo::bar::ALL_CAPS];', '')
self.assert_lint('int a[kMaxStrLen + 1];', '')
self.assert_lint('int a[sizeof(foo)];', '')
@@ -1116,6 +1118,12 @@ class CppStyleTest(CppStyleTestBase):
'if (condition) {',
'')
self.assert_multi_line_lint(
+ ' MACRO1(macroArg) {',
+ '')
+ self.assert_multi_line_lint(
+ 'ACCESSOR_GETTER(MessageEventPorts) {',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
'int foo() {',
'Place brace on its own line for function definitions. [whitespace/braces] [4]')
self.assert_multi_line_lint(
@@ -1271,6 +1279,8 @@ class CppStyleTest(CppStyleTestBase):
self.assert_lint('a = 1 << 20', '')
self.assert_multi_line_lint('#include "config.h"\n#include <sys/io.h>\n',
'')
+ self.assert_multi_line_lint('#include "config.h"\n#import <foo/bar.h>\n',
+ '')
def test_spacing_before_last_semicolon(self):
self.assert_lint('call_function() ;',
@@ -1520,14 +1530,14 @@ class CppStyleTest(CppStyleTestBase):
def test_indent(self):
self.assert_lint('static int noindent;', '')
- self.assert_lint(' int four_space_indent;', '')
- self.assert_lint(' int one_space_indent;',
+ self.assert_lint(' int fourSpaceIndent;', '')
+ self.assert_lint(' int oneSpaceIndent;',
'Weird number of spaces at line-start. '
'Are you using a 4-space indent? [whitespace/indent] [3]')
- self.assert_lint(' int three_space_indent;',
+ self.assert_lint(' int threeSpaceIndent;',
'Weird number of spaces at line-start. '
'Are you using a 4-space indent? [whitespace/indent] [3]')
- self.assert_lint(' char* one_space_indent = "public:";',
+ self.assert_lint(' char* oneSpaceIndent = "public:";',
'Weird number of spaces at line-start. '
'Are you using a 4-space indent? [whitespace/indent] [3]')
self.assert_lint(' public:', '')
@@ -1960,7 +1970,7 @@ class CppStyleTest(CppStyleTestBase):
self.assert_lint('double const static foo = 2.0;',
build_storage_class_error_message)
- self.assert_lint('uint64 typedef unsigned_long_long;',
+ self.assert_lint('uint64 typedef unsignedLongLong;',
build_storage_class_error_message)
self.assert_lint('int register foo = 0;',
@@ -2044,6 +2054,7 @@ class CppStyleTest(CppStyleTestBase):
'Changing pointer instead of value (or unused value of '
'operator*). [runtime/invalid_increment] [5]')
+
class CleansedLinesTest(unittest.TestCase):
def test_init(self):
lines = ['Line 1',
@@ -2305,6 +2316,10 @@ class OrderOfIncludesTest(CppStyleTestBase):
classify_include('fooCustom.cpp',
'foo.h',
False, include_state))
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('PrefixFooCustom.cpp',
+ 'Foo.h',
+ False, include_state))
# Tricky example where both includes might be classified as primary.
self.assert_language_rules_check('ScrollbarThemeWince.cpp',
'#include "config.h"\n'
@@ -2828,7 +2843,16 @@ class WebKitStyleTest(CppStyleTestBase):
'};\n'
'};\n'
'}',
- ['Code inside a namespace should not be indented. [whitespace/indent] [4]', 'namespace should never be indented. [whitespace/indent] [4]'],
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.h')
+ self.assert_multi_line_lint(
+ 'namespace OuterNamespace {\n'
+ ' class Document {\n'
+ ' namespace InnerNamespace {\n'
+ '};\n'
+ '};\n'
+ '}',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
'foo.h')
self.assert_multi_line_lint(
'namespace WebCore {\n'
@@ -3584,8 +3608,104 @@ class WebKitStyleTest(CppStyleTestBase):
'foo.h')
def test_names(self):
- # FIXME: Implement this.
- pass
+ name_error_message = " is incorrectly named. Don't use underscores in your identifier names. [readability/naming] [4]"
+
+ # Basic cases from WebKit style guide.
+ self.assert_lint('struct Data;', '')
+ self.assert_lint('size_t bufferSize;', '')
+ self.assert_lint('class HTMLDocument;', '')
+ self.assert_lint('String mimeType();', '')
+ self.assert_lint('size_t buffer_size;',
+ 'buffer_size' + name_error_message)
+ self.assert_lint('short m_length;', '')
+ self.assert_lint('short _length;',
+ '_length' + name_error_message)
+ self.assert_lint('short length_;',
+ 'length_' + name_error_message)
+
+ # Pointers, references, functions, templates, and adjectives.
+ self.assert_lint('char* under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('const int UNDER_SCORE;',
+ 'UNDER_SCORE' + name_error_message)
+ self.assert_lint('static inline const char const& const under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('WebCore::RenderObject* under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('int func_name();',
+ 'func_name' + name_error_message)
+ self.assert_lint('RefPtr<RenderObject*> under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('WTF::Vector<WTF::RefPtr<const RenderObject* const> > under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('int under_score[];',
+ 'under_score' + name_error_message)
+ self.assert_lint('struct dirent* under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('long under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('long long under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('long double under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('long long int under_score;',
+ 'under_score' + name_error_message)
+
+ # Declarations in control statement.
+ self.assert_lint('if (int under_score = 42) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('else if (int under_score = 42) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('for (int under_score = 42; cond; i++) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('while (foo & under_score = bar) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('for (foo * under_score = p; cond; i++) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('for (foo * under_score; cond; i++) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('while (foo & value_in_thirdparty_library) {', '')
+ self.assert_lint('while (foo * value_in_thirdparty_library) {', '')
+ self.assert_lint('if (mli && S_OK == mli->foo()) {', '')
+
+ # More member variables and functions.
+ self.assert_lint('int SomeClass::s_validName', '')
+ self.assert_lint('int m_under_score;',
+ 'm_under_score' + name_error_message)
+ self.assert_lint('int SomeClass::s_under_score = 0;',
+ 'SomeClass::s_under_score' + name_error_message)
+ self.assert_lint('int SomeClass::under_score = 0;',
+ 'SomeClass::under_score' + name_error_message)
+
+ # Other statements.
+ self.assert_lint('return INT_MAX;', '')
+ self.assert_lint('return_t under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('goto under_score;',
+ 'under_score' + name_error_message)
+
+ # Multiple variables in one line.
+ self.assert_lint('void myFunction(int variable1, int another_variable);',
+ 'another_variable' + name_error_message)
+ self.assert_lint('int variable1, another_variable;',
+ 'another_variable' + name_error_message)
+ self.assert_lint('int first_variable, secondVariable;',
+ 'first_variable' + name_error_message)
+ self.assert_lint('void my_function(int variable_1, int variable_2);',
+ ['my_function' + name_error_message,
+ 'variable_1' + name_error_message,
+ 'variable_2' + name_error_message])
+ self.assert_lint('for (int variable_1, variable_2;;) {',
+ ['variable_1' + name_error_message,
+ 'variable_2' + name_error_message])
+
+ # There is an exception for op code functions but only in the JavaScriptCore directory.
+ self.assert_lint('void this_op_code(int var1, int var2)', '', 'JavaScriptCore/foo.cpp')
+ self.assert_lint('void this_op_code(int var1, int var2)', 'this_op_code' + name_error_message)
+
+ # const_iterator is allowed as well.
+ self.assert_lint('typedef VectorType::const_iterator const_iterator;', '')
+
def test_other(self):
# FIXME: Implement this.
diff --git a/WebKitTools/Scripts/modules/executive.py b/WebKitTools/Scripts/modules/executive.py
new file mode 100644
index 0000000..b73e17d
--- /dev/null
+++ b/WebKitTools/Scripts/modules/executive.py
@@ -0,0 +1,124 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import StringIO
+import subprocess
+import sys
+
+from modules.logging import tee
+
+
+class ScriptError(Exception):
+ def __init__(self, message=None, script_args=None, exit_code=None, output=None, cwd=None):
+ if not message:
+ message = 'Failed to run "%s"' % script_args
+ if exit_code:
+ message += " exit_code: %d" % exit_code
+ if cwd:
+ message += " cwd: %s" % cwd
+
+ Exception.__init__(self, message)
+ self.script_args = script_args # 'args' is already used by Exception
+ self.exit_code = exit_code
+ self.output = output
+ self.cwd = cwd
+
+ def message_with_output(self, output_limit=500):
+ if self.output:
+ if output_limit and len(self.output) > output_limit:
+ return "%s\nLast %s characters of output:\n%s" % (self, output_limit, self.output[-output_limit:])
+ return "%s\n%s" % (self, self.output)
+ return str(self)
+
+
+# FIXME: This should not be a global static.
+# New code should use Executive.run_command directly instead
+def run_command(*args, **kwargs):
+ return Executive().run_command(*args, **kwargs)
+
+
+class Executive(object):
+ def _run_command_with_teed_output(self, args, teed_output):
+ child_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+
+ # Use our own custom wait loop because Popen ignores a tee'd stderr/stdout.
+ # FIXME: This could be improved not to flatten output to stdout.
+ while True:
+ output_line = child_process.stdout.readline()
+ if output_line == "" and child_process.poll() != None:
+ return child_process.poll()
+ teed_output.write(output_line)
+
+ def run_and_throw_if_fail(self, args, quiet=False):
+ # Cache the child's output locally so it can be used for error reports.
+ child_out_file = StringIO.StringIO()
+ if quiet:
+ dev_null = open(os.devnull, "w")
+ child_stdout = tee(child_out_file, dev_null if quiet else sys.stdout)
+ exit_code = self._run_command_with_teed_output(args, child_stdout)
+ if quiet:
+ dev_null.close()
+
+ child_output = child_out_file.getvalue()
+ child_out_file.close()
+
+ if exit_code:
+ raise ScriptError(script_args=args, exit_code=exit_code, output=child_output)
+
+ # Error handlers do not need to be static methods once all callers are updated to use an Executive object.
+ @staticmethod
+ def default_error_handler(error):
+ raise error
+
+ @staticmethod
+ def ignore_error(error):
+ pass
+
+ # FIXME: This should be merged with run_and_throw_if_fail
+ def run_command(self, args, cwd=None, input=None, error_handler=None, return_exit_code=False, return_stderr=True):
+ if hasattr(input, 'read'): # Check if the input is a file.
+ stdin = input
+ string_to_communicate = None
+ else:
+ stdin = subprocess.PIPE if input else None
+ string_to_communicate = input
+ if return_stderr:
+ stderr = subprocess.STDOUT
+ else:
+ stderr = None
+ process = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr, cwd=cwd)
+ output = process.communicate(string_to_communicate)[0]
+ exit_code = process.wait()
+ if exit_code:
+ script_error = ScriptError(script_args=args, exit_code=exit_code, output=output, cwd=cwd)
+ (error_handler or self.default_error_handler)(script_error)
+ if return_exit_code:
+ return exit_code
+ return output
diff --git a/WebKitTools/Scripts/modules/grammar.py b/WebKitTools/Scripts/modules/grammar.py
new file mode 100644
index 0000000..dd2967a
--- /dev/null
+++ b/WebKitTools/Scripts/modules/grammar.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+def plural(noun):
+ # This is a dumb plural() implementation which was just enough for our uses.
+ if re.search("h$", noun):
+ return noun + "es"
+ else:
+ return noun + "s"
+
+def pluralize(noun, count):
+ if count != 1:
+ noun = plural(noun)
+ return "%d %s" % (count, noun)
diff --git a/WebKitTools/Scripts/modules/landingsequence.py b/WebKitTools/Scripts/modules/landingsequence.py
new file mode 100644
index 0000000..90683f4
--- /dev/null
+++ b/WebKitTools/Scripts/modules/landingsequence.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from modules.comments import bug_comment_from_commit_text
+from modules.executive import ScriptError
+from modules.logging import log
+from modules.scm import CheckoutNeedsUpdate
+from modules.webkitport import WebKitPort
+from modules.workqueue import WorkQueue
+from modules.buildsteps import CleanWorkingDirectoryStep, UpdateStep, ApplyPatchStep, EnsureBuildersAreGreenStep, BuildStep, RunTestsStep, CommitStep, ClosePatchStep, CloseBugStep
+
+
+class LandingSequenceErrorHandler():
+ @classmethod
+ def handle_script_error(cls, tool, patch, script_error):
+ raise NotImplementedError, "subclasses must implement"
+
+# FIXME: This class is slowing being killed and replaced with StepSequence.
+class LandingSequence:
+ def __init__(self, patch, options, tool):
+ self._patch = patch
+ self._options = options
+ self._tool = tool
+ self._port = WebKitPort.port(self._options.port)
+
+ def run(self):
+ self.clean()
+ self.update()
+ self.apply_patch()
+ self.check_builders()
+ self.build()
+ self.test()
+ commit_log = self.commit()
+ self.close_patch(commit_log)
+ self.close_bug()
+
+ def run_and_handle_errors(self):
+ try:
+ self.run()
+ except CheckoutNeedsUpdate, e:
+ log("Commit failed because the checkout is out of date. Please update and try again.")
+ log("You can pass --no-build to skip building/testing after update if you believe the new commits did not affect the results.")
+ WorkQueue.exit_after_handled_error(e)
+ except ScriptError, e:
+ if not self._options.quiet:
+ log(e.message_with_output())
+ if self._options.parent_command:
+ command = self._tool.command_by_name(self._options.parent_command)
+ command.handle_script_error(self._tool, self._patch, e)
+ WorkQueue.exit_after_handled_error(e)
+
+ def clean(self):
+ step = CleanWorkingDirectoryStep(self._tool, self._options)
+ step.run()
+
+ def update(self):
+ step = UpdateStep(self._tool, self._options)
+ step.run()
+
+ def apply_patch(self):
+ step = ApplyPatchStep(self._tool, self._options, self._patch)
+ step.run()
+
+ def check_builders(self):
+ step = EnsureBuildersAreGreenStep(self._tool, self._options)
+ step.run()
+
+ def build(self):
+ step = BuildStep(self._tool, self._options)
+ step.run()
+
+ def test(self):
+ step = RunTestsStep(self._tool, self._options)
+ step.run()
+
+ def commit(self):
+ step = CommitStep(self._tool, self._options)
+ return step.run()
+
+ def close_patch(self, commit_log):
+ step = ClosePatchStep(self._tool, self._options, self._patch)
+ step.run(commit_log)
+
+ def close_bug(self):
+ step = CloseBugStep(self._tool, self._options, self._patch)
+ step.run()
diff --git a/WebKitTools/Scripts/modules/logging.py b/WebKitTools/Scripts/modules/logging.py
index cbccacf..7b7cec5 100644
--- a/WebKitTools/Scripts/modules/logging.py
+++ b/WebKitTools/Scripts/modules/logging.py
@@ -29,6 +29,7 @@
#
# WebKit's Python module for logging
+import os
import sys
def log(string):
@@ -46,3 +47,38 @@ class tee:
def write(self, string):
for file in self.files:
file.write(string)
+
+class OutputTee:
+ def __init__(self):
+ self._original_stdout = None
+ self._original_stderr = None
+ self._files_for_output = []
+
+ def add_log(self, path):
+ log_file = self._open_log_file(path)
+ self._files_for_output.append(log_file)
+ self._tee_outputs_to_files(self._files_for_output)
+ return log_file
+
+ def remove_log(self, log_file):
+ self._files_for_output.remove(log_file)
+ self._tee_outputs_to_files(self._files_for_output)
+ log_file.close()
+
+ @staticmethod
+ def _open_log_file(log_path):
+ (log_directory, log_name) = os.path.split(log_path)
+ if log_directory and not os.path.exists(log_directory):
+ os.makedirs(log_directory)
+ return open(log_path, 'a+')
+
+ def _tee_outputs_to_files(self, files):
+ if not self._original_stdout:
+ self._original_stdout = sys.stdout
+ self._original_stderr = sys.stderr
+ if files and len(files):
+ sys.stdout = tee(self._original_stdout, *files)
+ sys.stderr = tee(self._original_stderr, *files)
+ else:
+ sys.stdout = self._original_stdout
+ sys.stderr = self._original_stderr
diff --git a/WebKitTools/Scripts/modules/logging_unittest.py b/WebKitTools/Scripts/modules/logging_unittest.py
index 7d41e56..b09a563 100644
--- a/WebKitTools/Scripts/modules/logging_unittest.py
+++ b/WebKitTools/Scripts/modules/logging_unittest.py
@@ -32,8 +32,8 @@ import StringIO
import tempfile
import unittest
+from modules.executive import ScriptError
from modules.logging import *
-from modules.scm import ScriptError
class LoggingTest(unittest.TestCase):
diff --git a/WebKitTools/Scripts/modules/mock.py b/WebKitTools/Scripts/modules/mock.py
new file mode 100644
index 0000000..f6f328e
--- /dev/null
+++ b/WebKitTools/Scripts/modules/mock.py
@@ -0,0 +1,309 @@
+# mock.py
+# Test tools for mocking and patching.
+# Copyright (C) 2007-2009 Michael Foord
+# E-mail: fuzzyman AT voidspace DOT org DOT uk
+
+# mock 0.6.0
+# http://www.voidspace.org.uk/python/mock/
+
+# Released subject to the BSD License
+# Please see http://www.voidspace.org.uk/python/license.shtml
+
+# 2009-11-25: Licence downloaded from above URL.
+# BEGIN DOWNLOADED LICENSE
+#
+# Copyright (c) 2003-2009, Michael Foord
+# All rights reserved.
+# E-mail : fuzzyman AT voidspace DOT org DOT uk
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# * Neither the name of Michael Foord nor the name of Voidspace
+# may be used to endorse or promote products derived from this
+# software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# END DOWNLOADED LICENSE
+
+# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
+# Comments, suggestions and bug reports welcome.
+
+
+__all__ = (
+ 'Mock',
+ 'patch',
+ 'patch_object',
+ 'sentinel',
+ 'DEFAULT'
+)
+
+__version__ = '0.6.0'
+
+class SentinelObject(object):
+ def __init__(self, name):
+ self.name = name
+
+ def __repr__(self):
+ return '<SentinelObject "%s">' % self.name
+
+
+class Sentinel(object):
+ def __init__(self):
+ self._sentinels = {}
+
+ def __getattr__(self, name):
+ return self._sentinels.setdefault(name, SentinelObject(name))
+
+
+sentinel = Sentinel()
+
+DEFAULT = sentinel.DEFAULT
+
+class OldStyleClass:
+ pass
+ClassType = type(OldStyleClass)
+
+def _is_magic(name):
+ return '__%s__' % name[2:-2] == name
+
+def _copy(value):
+ if type(value) in (dict, list, tuple, set):
+ return type(value)(value)
+ return value
+
+
+class Mock(object):
+
+ def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
+ name=None, parent=None, wraps=None):
+ self._parent = parent
+ self._name = name
+ if spec is not None and not isinstance(spec, list):
+ spec = [member for member in dir(spec) if not _is_magic(member)]
+
+ self._methods = spec
+ self._children = {}
+ self._return_value = return_value
+ self.side_effect = side_effect
+ self._wraps = wraps
+
+ self.reset_mock()
+
+
+ def reset_mock(self):
+ self.called = False
+ self.call_args = None
+ self.call_count = 0
+ self.call_args_list = []
+ self.method_calls = []
+ for child in self._children.itervalues():
+ child.reset_mock()
+ if isinstance(self._return_value, Mock):
+ self._return_value.reset_mock()
+
+
+ def __get_return_value(self):
+ if self._return_value is DEFAULT:
+ self._return_value = Mock()
+ return self._return_value
+
+ def __set_return_value(self, value):
+ self._return_value = value
+
+ return_value = property(__get_return_value, __set_return_value)
+
+
+ def __call__(self, *args, **kwargs):
+ self.called = True
+ self.call_count += 1
+ self.call_args = (args, kwargs)
+ self.call_args_list.append((args, kwargs))
+
+ parent = self._parent
+ name = self._name
+ while parent is not None:
+ parent.method_calls.append((name, args, kwargs))
+ if parent._parent is None:
+ break
+ name = parent._name + '.' + name
+ parent = parent._parent
+
+ ret_val = DEFAULT
+ if self.side_effect is not None:
+ if (isinstance(self.side_effect, Exception) or
+ isinstance(self.side_effect, (type, ClassType)) and
+ issubclass(self.side_effect, Exception)):
+ raise self.side_effect
+
+ ret_val = self.side_effect(*args, **kwargs)
+ if ret_val is DEFAULT:
+ ret_val = self.return_value
+
+ if self._wraps is not None and self._return_value is DEFAULT:
+ return self._wraps(*args, **kwargs)
+ if ret_val is DEFAULT:
+ ret_val = self.return_value
+ return ret_val
+
+
+ def __getattr__(self, name):
+ if self._methods is not None:
+ if name not in self._methods:
+ raise AttributeError("Mock object has no attribute '%s'" % name)
+ elif _is_magic(name):
+ raise AttributeError(name)
+
+ if name not in self._children:
+ wraps = None
+ if self._wraps is not None:
+ wraps = getattr(self._wraps, name)
+ self._children[name] = Mock(parent=self, name=name, wraps=wraps)
+
+ return self._children[name]
+
+
+ def assert_called_with(self, *args, **kwargs):
+ assert self.call_args == (args, kwargs), 'Expected: %s\nCalled with: %s' % ((args, kwargs), self.call_args)
+
+
+def _dot_lookup(thing, comp, import_path):
+ try:
+ return getattr(thing, comp)
+ except AttributeError:
+ __import__(import_path)
+ return getattr(thing, comp)
+
+
+def _importer(target):
+ components = target.split('.')
+ import_path = components.pop(0)
+ thing = __import__(import_path)
+
+ for comp in components:
+ import_path += ".%s" % comp
+ thing = _dot_lookup(thing, comp, import_path)
+ return thing
+
+
+class _patch(object):
+ def __init__(self, target, attribute, new, spec, create):
+ self.target = target
+ self.attribute = attribute
+ self.new = new
+ self.spec = spec
+ self.create = create
+ self.has_local = False
+
+
+ def __call__(self, func):
+ if hasattr(func, 'patchings'):
+ func.patchings.append(self)
+ return func
+
+ def patched(*args, **keywargs):
+ # don't use a with here (backwards compatability with 2.5)
+ extra_args = []
+ for patching in patched.patchings:
+ arg = patching.__enter__()
+ if patching.new is DEFAULT:
+ extra_args.append(arg)
+ args += tuple(extra_args)
+ try:
+ return func(*args, **keywargs)
+ finally:
+ for patching in getattr(patched, 'patchings', []):
+ patching.__exit__()
+
+ patched.patchings = [self]
+ patched.__name__ = func.__name__
+ patched.compat_co_firstlineno = getattr(func, "compat_co_firstlineno",
+ func.func_code.co_firstlineno)
+ return patched
+
+
+ def get_original(self):
+ target = self.target
+ name = self.attribute
+ create = self.create
+
+ original = DEFAULT
+ if _has_local_attr(target, name):
+ try:
+ original = target.__dict__[name]
+ except AttributeError:
+ # for instances of classes with slots, they have no __dict__
+ original = getattr(target, name)
+ elif not create and not hasattr(target, name):
+ raise AttributeError("%s does not have the attribute %r" % (target, name))
+ return original
+
+
+ def __enter__(self):
+ new, spec, = self.new, self.spec
+ original = self.get_original()
+ if new is DEFAULT:
+ # XXXX what if original is DEFAULT - shouldn't use it as a spec
+ inherit = False
+ if spec == True:
+ # set spec to the object we are replacing
+ spec = original
+ if isinstance(spec, (type, ClassType)):
+ inherit = True
+ new = Mock(spec=spec)
+ if inherit:
+ new.return_value = Mock(spec=spec)
+ self.temp_original = original
+ setattr(self.target, self.attribute, new)
+ return new
+
+
+ def __exit__(self, *_):
+ if self.temp_original is not DEFAULT:
+ setattr(self.target, self.attribute, self.temp_original)
+ else:
+ delattr(self.target, self.attribute)
+ del self.temp_original
+
+
+def patch_object(target, attribute, new=DEFAULT, spec=None, create=False):
+ return _patch(target, attribute, new, spec, create)
+
+
+def patch(target, new=DEFAULT, spec=None, create=False):
+ try:
+ target, attribute = target.rsplit('.', 1)
+ except (TypeError, ValueError):
+ raise TypeError("Need a valid target to patch. You supplied: %r" % (target,))
+ target = _importer(target)
+ return _patch(target, attribute, new, spec, create)
+
+
+
+def _has_local_attr(obj, name):
+ try:
+ return name in vars(obj)
+ except TypeError:
+ # objects without a __dict__
+ return hasattr(obj, name)
diff --git a/WebKitTools/Scripts/modules/mock_bugzillatool.py b/WebKitTools/Scripts/modules/mock_bugzillatool.py
new file mode 100644
index 0000000..e600947
--- /dev/null
+++ b/WebKitTools/Scripts/modules/mock_bugzillatool.py
@@ -0,0 +1,153 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from modules.mock import Mock
+from modules.scm import CommitMessage
+
+
+class MockBugzilla(Mock):
+ patch1 = {
+ "id" : 197,
+ "bug_id" : 42,
+ "url" : "http://example.com/197",
+ "is_obsolete" : False,
+ "reviewer" : "Reviewer1",
+ "attacher_email" : "Contributer1",
+ }
+ patch2 = {
+ "id" : 128,
+ "bug_id" : 42,
+ "url" : "http://example.com/128",
+ "is_obsolete" : False,
+ "reviewer" : "Reviewer2",
+ "attacher_email" : "Contributer2",
+ }
+ bug_server_url = "http://example.com"
+
+ def fetch_bug_ids_from_commit_queue(self):
+ return [42, 75]
+
+ def fetch_attachment_ids_from_review_queue(self):
+ return [197, 128]
+
+ def fetch_patches_from_commit_queue(self):
+ return [self.patch1, self.patch2]
+
+ def fetch_patches_from_pending_commit_list(self):
+ return [self.patch1, self.patch2]
+
+ def fetch_reviewed_patches_from_bug(self, bug_id):
+ if bug_id == 42:
+ return [self.patch1, self.patch2]
+ return None
+
+ def fetch_attachments_from_bug(self, bug_id):
+ if bug_id == 42:
+ return [self.patch1, self.patch2]
+ return None
+
+ def fetch_patches_from_bug(self, bug_id):
+ if bug_id == 42:
+ return [self.patch1, self.patch2]
+ return None
+
+ def fetch_attachment(self, attachment_id):
+ if attachment_id == 197:
+ return self.patch1
+ if attachment_id == 128:
+ return self.patch2
+ raise Exception("Bogus attachment_id in fetch_attachment.")
+
+ def bug_url_for_bug_id(self, bug_id):
+ return "%s/%s" % (self.bug_server_url, bug_id)
+
+ def attachment_url_for_id(self, attachment_id, action):
+ action_param = ""
+ if action and action != "view":
+ action_param = "&action=%s" % action
+ return "%s/%s%s" % (self.bug_server_url, attachment_id, action_param)
+
+
+class MockBuildBot(Mock):
+ def builder_statuses(self):
+ return [{
+ "name": "Builder1",
+ "is_green": True
+ }, {
+ "name": "Builder2",
+ "is_green": True
+ }]
+
+ def red_core_builders_names(self):
+ return []
+
+class MockSCM(Mock):
+ def __init__(self):
+ Mock.__init__(self)
+ self.checkout_root = os.getcwd()
+
+ def create_patch(self):
+ return "Patch1"
+
+ def commit_ids_from_commitish_arguments(self, args):
+ return ["Commitish1", "Commitish2"]
+
+ def commit_message_for_local_commit(self, commit_id):
+ if commit_id == "Commitish1":
+ return CommitMessage("CommitMessage1\nhttps://bugs.example.org/show_bug.cgi?id=42\n")
+ if commit_id == "Commitish2":
+ return CommitMessage("CommitMessage2\nhttps://bugs.example.org/show_bug.cgi?id=75\n")
+ raise Exception("Bogus commit_id in commit_message_for_local_commit.")
+
+ def create_patch_from_local_commit(self, commit_id):
+ if commit_id == "Commitish1":
+ return "Patch1"
+ if commit_id == "Commitish2":
+ return "Patch2"
+ raise Exception("Bogus commit_id in commit_message_for_local_commit.")
+
+ def modified_changelogs(self):
+ # Ideally we'd return something more interesting here.
+ # The problem is that LandDiff will try to actually read the path from disk!
+ return []
+
+
+class MockBugzillaTool():
+ def __init__(self):
+ self.bugs = MockBugzilla()
+ self.buildbot = MockBuildBot()
+ self.executive = Mock()
+ self._scm = MockSCM()
+
+ def scm(self):
+ return self._scm
+
+ def path(self):
+ return "echo"
diff --git a/WebKitTools/Scripts/modules/multicommandtool.py b/WebKitTools/Scripts/modules/multicommandtool.py
new file mode 100644
index 0000000..0475cf1
--- /dev/null
+++ b/WebKitTools/Scripts/modules/multicommandtool.py
@@ -0,0 +1,253 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# MultiCommandTool provides a framework for writing svn-like/git-like tools
+# which are called with the following format:
+# tool-name [global options] command-name [command options]
+
+import sys
+
+from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option
+
+from modules.grammar import pluralize
+from modules.logging import log
+
+class Command(object):
+ name = None
+ # show_in_main_help = False # Subclasses must define show_in_main_help, we leave it out here to enforce that.
+ def __init__(self, help_text, argument_names=None, options=None, requires_local_commits=False):
+ self.help_text = help_text
+ self.argument_names = argument_names
+ self.required_arguments = self._parse_required_arguments(argument_names)
+ self.options = options
+ self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options)
+ self.requires_local_commits = requires_local_commits
+ self.tool = None
+
+ # The tool calls bind_to_tool on each Command after adding it to its list.
+ def bind_to_tool(self, tool):
+ # Command instances can only be bound to one tool at a time.
+ if self.tool and tool != self.tool:
+ raise Exception("Command already bound to tool!")
+ self.tool = tool
+
+ @staticmethod
+ def _parse_required_arguments(argument_names):
+ required_args = []
+ if not argument_names:
+ return required_args
+ split_args = argument_names.split(" ")
+ for argument in split_args:
+ if argument[0] == '[':
+ # For now our parser is rather dumb. Do some minimal validation that
+ # we haven't confused it.
+ if argument[-1] != ']':
+ raise Exception("Failure to parse argument string %s. Argument %s is missing ending ]" % (argument_names, argument))
+ else:
+ required_args.append(argument)
+ return required_args
+
+ def name_with_arguments(self):
+ usage_string = self.name
+ if self.options:
+ usage_string += " [options]"
+ if self.argument_names:
+ usage_string += " " + self.argument_names
+ return usage_string
+
+ def parse_args(self, args):
+ return self.option_parser.parse_args(args)
+
+ def check_arguments_and_execute(self, args_after_command_name, tool):
+ (command_options, command_args) = self.parse_args(args_after_command_name)
+
+ if len(command_args) < len(self.required_arguments):
+ log("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % (
+ pluralize("argument", len(self.required_arguments)),
+ pluralize("argument", len(command_args)),
+ "'%s'" % " ".join(command_args),
+ " ".join(self.required_arguments),
+ tool.name(),
+ self.name))
+ return 1
+ return self.execute(command_options, command_args, tool) or 0
+
+ def standalone_help(self):
+ help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n"
+ help_text += self.option_parser.format_option_help(IndentedHelpFormatter())
+ return help_text
+
+ def execute(self, options, args, tool):
+ raise NotImplementedError, "subclasses must implement"
+
+
+class HelpPrintingOptionParser(OptionParser):
+ def __init__(self, epilog_method=None, *args, **kwargs):
+ self.epilog_method = epilog_method
+ OptionParser.__init__(self, *args, **kwargs)
+
+ def error(self, msg):
+ self.print_usage(sys.stderr)
+ error_message = "%s: error: %s\n" % (self.get_prog_name(), msg)
+ error_message += "\nType \"%s --help\" to see usage.\n" % self.get_prog_name()
+ self.exit(1, error_message)
+
+ # We override format_epilog to avoid the default formatting which would paragraph-wrap the epilog
+ # and also to allow us to compute the epilog lazily instead of in the constructor (allowing it to be context sensitive).
+ def format_epilog(self, epilog):
+ if self.epilog_method:
+ return "\n%s\n" % self.epilog_method()
+ return ""
+
+
+class HelpCommand(Command):
+ name = "help"
+ show_in_main_help = False
+
+ def __init__(self):
+ options = [
+ make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"),
+ ]
+ Command.__init__(self, "Display information about this program or its subcommands", "[COMMAND]", options=options)
+ self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser.
+
+ def _help_epilog(self):
+ # Only show commands which are relevant to this checkout's SCM system. Might this be confusing to some users?
+ if self.show_all_commands:
+ epilog = "All %prog commands:\n"
+ relevant_commands = self.tool.commands[:]
+ else:
+ epilog = "Common %prog commands:\n"
+ relevant_commands = filter(self.tool.should_show_in_main_help, self.tool.commands)
+ longest_name_length = max(map(lambda command: len(command.name), relevant_commands))
+ relevant_commands.sort(lambda a, b: cmp(a.name, b.name))
+ command_help_texts = map(lambda command: " %s %s\n" % (command.name.ljust(longest_name_length), command.help_text), relevant_commands)
+ epilog += "%s\n" % "".join(command_help_texts)
+ epilog += "See '%prog help --all-commands' to list all commands.\n"
+ epilog += "See '%prog help COMMAND' for more information on a specific command.\n"
+ return self.tool.global_option_parser.expand_prog_name(epilog)
+
+ def execute(self, options, args, tool):
+ if args:
+ command = self.tool.command_by_name(args[0])
+ if command:
+ print command.standalone_help()
+ return 0
+
+ self.show_all_commands = options.show_all_commands
+ tool.global_option_parser.print_help()
+ return 0
+
+
+class MultiCommandTool(object):
+ def __init__(self, name=None, commands=None):
+ # Allow the unit tests to disable command auto-discovery.
+ self.commands = commands or [cls() for cls in self._find_all_commands() if cls.name]
+ self.help_command = self.command_by_name(HelpCommand.name)
+ # Require a help command, even if the manual test list doesn't include one.
+ if not self.help_command:
+ self.help_command = HelpCommand()
+ self.commands.append(self.help_command)
+ for command in self.commands:
+ command.bind_to_tool(self)
+ self.global_option_parser = HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=name, usage=self._usage_line())
+
+ @classmethod
+ def _add_all_subclasses(cls, class_to_crawl, seen_classes):
+ for subclass in class_to_crawl.__subclasses__():
+ if subclass not in seen_classes:
+ seen_classes.add(subclass)
+ cls._add_all_subclasses(subclass, seen_classes)
+
+ @classmethod
+ def _find_all_commands(cls):
+ commands = set()
+ cls._add_all_subclasses(Command, commands)
+ return sorted(commands)
+
+ @staticmethod
+ def _usage_line():
+ return "Usage: %prog [options] COMMAND [ARGS]"
+
+ def name(self):
+ return self.global_option_parser.get_prog_name()
+
+ def handle_global_args(self, args):
+ (options, args) = self.global_option_parser.parse_args(args)
+ # We should never hit this because _split_args splits at the first arg without a leading "-".
+ if args:
+ self.global_option_parser.error("Extra arguments before command: %s" % args)
+
+ @staticmethod
+ def _split_args(args):
+ # Assume the first argument which doesn't start with "-" is the command name.
+ command_index = 0
+ for arg in args:
+ if arg[0] != "-":
+ break
+ command_index += 1
+ else:
+ return (args[:], None, [])
+
+ global_args = args[:command_index]
+ command = args[command_index]
+ command_args = args[command_index + 1:]
+ return (global_args, command, command_args)
+
+ def command_by_name(self, command_name):
+ for command in self.commands:
+ if command_name == command.name:
+ return command
+ return None
+
+ def path(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def should_show_in_main_help(self, command):
+ return command.show_in_main_help
+
+ def should_execute_command(self, command):
+ raise NotImplementedError, "subclasses must implement"
+
+ def main(self, argv=sys.argv):
+ (global_args, command_name, args_after_command_name) = self._split_args(argv[1:])
+
+ # Handle --help, etc:
+ self.handle_global_args(global_args)
+
+ command = self.command_by_name(command_name) or self.help_command
+ if not command:
+ self.global_option_parser.error("%s is not a recognized command" % command_name)
+
+ (should_execute, failure_reason) = self.should_execute_command(command)
+ if not should_execute:
+ log(failure_reason)
+ return 0
+
+ return command.check_arguments_and_execute(args_after_command_name, self)
diff --git a/WebKitTools/Scripts/modules/multicommandtool_unittest.py b/WebKitTools/Scripts/modules/multicommandtool_unittest.py
new file mode 100644
index 0000000..c71cc09
--- /dev/null
+++ b/WebKitTools/Scripts/modules/multicommandtool_unittest.py
@@ -0,0 +1,158 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+from multicommandtool import MultiCommandTool, Command
+from modules.outputcapture import OutputCapture
+
+from optparse import make_option
+
+class TrivialCommand(Command):
+ name = "trivial"
+ show_in_main_help = True
+ def __init__(self, **kwargs):
+ Command.__init__(self, "help text", **kwargs)
+
+ def execute(self, options, args, tool):
+ pass
+
+class UncommonCommand(TrivialCommand):
+ name = "uncommon"
+ show_in_main_help = False
+
+class CommandTest(unittest.TestCase):
+ def test_name_with_arguments(self):
+ command_with_args = TrivialCommand(argument_names="ARG1 ARG2")
+ self.assertEqual(command_with_args.name_with_arguments(), "trivial ARG1 ARG2")
+
+ command_with_args = TrivialCommand(options=[make_option("--my_option")])
+ self.assertEqual(command_with_args.name_with_arguments(), "trivial [options]")
+
+ def test_parse_required_arguments(self):
+ self.assertEqual(Command._parse_required_arguments("ARG1 ARG2"), ["ARG1", "ARG2"])
+ self.assertEqual(Command._parse_required_arguments("[ARG1] [ARG2]"), [])
+ self.assertEqual(Command._parse_required_arguments("[ARG1] ARG2"), ["ARG2"])
+ # Note: We might make our arg parsing smarter in the future and allow this type of arguments string.
+ self.assertRaises(Exception, Command._parse_required_arguments, "[ARG1 ARG2]")
+
+ def test_required_arguments(self):
+ two_required_arguments = TrivialCommand(argument_names="ARG1 ARG2 [ARG3]")
+ capture = OutputCapture()
+ capture.capture_output()
+ exit_code = two_required_arguments.check_arguments_and_execute(["foo"], TrivialTool())
+ (stdout_string, stderr_string) = capture.restore_output()
+ expected_missing_args_error = "2 arguments required, 1 argument provided. Provided: 'foo' Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n"
+ self.assertEqual(exit_code, 1)
+ self.assertEqual(stdout_string, "")
+ self.assertEqual(stderr_string, expected_missing_args_error)
+
+
+class TrivialTool(MultiCommandTool):
+ def __init__(self, commands=None):
+ MultiCommandTool.__init__(self, name="trivial-tool", commands=commands)
+
+ def path():
+ return __file__
+
+ def should_execute_command(self, command):
+ return (True, None)
+
+
+class MultiCommandToolTest(unittest.TestCase):
+ def _assert_split(self, args, expected_split):
+ self.assertEqual(MultiCommandTool._split_args(args), expected_split)
+
+ def test_split_args(self):
+ # MultiCommandToolTest._split_args returns: (global_args, command, command_args)
+ full_args = ["--global-option", "command", "--option", "arg"]
+ full_args_expected = (["--global-option"], "command", ["--option", "arg"])
+ self._assert_split(full_args, full_args_expected)
+
+ full_args = []
+ full_args_expected = ([], None, [])
+ self._assert_split(full_args, full_args_expected)
+
+ full_args = ["command", "arg"]
+ full_args_expected = ([], "command", ["arg"])
+ self._assert_split(full_args, full_args_expected)
+
+ def test_command_by_name(self):
+ # This also tests Command auto-discovery.
+ tool = TrivialTool()
+ self.assertEqual(tool.command_by_name("trivial").name, "trivial")
+ self.assertEqual(tool.command_by_name("bar"), None)
+
+ def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", exit_code=0):
+ capture = OutputCapture()
+ capture.capture_output()
+ exit_code = tool.main(main_args)
+ (stdout_string, stderr_string) = capture.restore_output()
+ self.assertEqual(stdout_string, expected_stdout)
+ self.assertEqual(expected_stderr, expected_stderr)
+
+ def test_global_help(self):
+ tool = TrivialTool(commands=[TrivialCommand(), UncommonCommand()])
+ expected_common_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
+
+Options:
+ -h, --help show this help message and exit
+
+Common trivial-tool commands:
+ trivial help text
+
+See 'trivial-tool help --all-commands' to list all commands.
+See 'trivial-tool help COMMAND' for more information on a specific command.
+
+"""
+ self._assert_tool_main_outputs(tool, ["tool", "help"], expected_common_commands_help)
+ expected_all_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
+
+Options:
+ -h, --help show this help message and exit
+
+All trivial-tool commands:
+ help Display information about this program or its subcommands
+ trivial help text
+ uncommon help text
+
+See 'trivial-tool help --all-commands' to list all commands.
+See 'trivial-tool help COMMAND' for more information on a specific command.
+
+"""
+ self._assert_tool_main_outputs(tool, ["tool", "help", "--all-commands"], expected_all_commands_help)
+
+ def test_command_help(self):
+ command_with_options = TrivialCommand(options=[make_option("--my_option")])
+ tool = TrivialTool(commands=[command_with_options])
+ expected_subcommand_help = "trivial [options] help text\nOptions:\n --my_option=MY_OPTION\n\n"
+ self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/WebKitTools/Scripts/modules/outputcapture.py b/WebKitTools/Scripts/modules/outputcapture.py
new file mode 100644
index 0000000..f02fc5d
--- /dev/null
+++ b/WebKitTools/Scripts/modules/outputcapture.py
@@ -0,0 +1,53 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Class for unittest support. Used for capturing stderr/stdout.
+
+import sys
+from StringIO import StringIO
+
+class OutputCapture(object):
+ def __init__(self):
+ self.saved_outputs = dict()
+
+ def _capture_output_with_name(self, output_name):
+ self.saved_outputs[output_name] = getattr(sys, output_name)
+ setattr(sys, output_name, StringIO())
+
+ def _restore_output_with_name(self, output_name):
+ captured_output = getattr(sys, output_name).getvalue()
+ setattr(sys, output_name, self.saved_outputs[output_name])
+ del self.saved_outputs[output_name]
+ return captured_output
+
+ def capture_output(self):
+ self._capture_output_with_name("stdout")
+ self._capture_output_with_name("stderr")
+
+ def restore_output(self):
+ return (self._restore_output_with_name("stdout"), self._restore_output_with_name("stderr"))
diff --git a/WebKitTools/Scripts/modules/patchcollection.py b/WebKitTools/Scripts/modules/patchcollection.py
new file mode 100644
index 0000000..add8129
--- /dev/null
+++ b/WebKitTools/Scripts/modules/patchcollection.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+class PersistentPatchCollectionDelegate:
+ def collection_name(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def fetch_potential_patch_ids(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def status_server(self):
+ raise NotImplementedError, "subclasses must implement"
+
+
+class PersistentPatchCollection:
+ _initial_status = "Pending"
+ _pass_status = "Pass"
+ _fail_status = "Fail"
+ def __init__(self, delegate):
+ self._delegate = delegate
+ self._name = self._delegate.collection_name()
+ self._status = self._delegate.status_server()
+ self._status_cache = {}
+
+ def _cached_status(self, patch_id):
+ cached = self._status_cache.get(patch_id)
+ if cached:
+ return cached
+ status = self._status.patch_status(self._name, patch_id)
+ if status:
+ self._status_cache[patch_id] = status
+ return status
+
+ def next(self):
+ patch_ids = self._delegate.fetch_potential_patch_ids()
+ for patch_id in patch_ids:
+ status = self._cached_status(patch_id)
+ if not status:
+ return patch_id
+
+ def did_pass(self, patch):
+ self._status.update_status(self._name, self._pass_status, patch)
+
+ def did_fail(self, patch):
+ self._status.update_status(self._name, self._fail_status, patch)
diff --git a/WebKitTools/Scripts/modules/scm.py b/WebKitTools/Scripts/modules/scm.py
index 3ffa23b..ff26693 100644
--- a/WebKitTools/Scripts/modules/scm.py
+++ b/WebKitTools/Scripts/modules/scm.py
@@ -34,6 +34,8 @@ import re
import subprocess
# Import WebKit-specific modules.
+from modules.changelogs import ChangeLog
+from modules.executive import Executive, run_command, ScriptError
from modules.logging import error, log
def detect_scm_system(path):
@@ -77,44 +79,16 @@ class CommitMessage:
return "\n".join(self.message_lines) + "\n"
-class ScriptError(Exception):
- def __init__(self, message=None, script_args=None, exit_code=None, output=None, cwd=None):
- if not message:
- message = 'Failed to run "%s"' % script_args
- if exit_code:
- message += " exit_code: %d" % exit_code
- if cwd:
- message += " cwd: %s" % cwd
-
- Exception.__init__(self, message)
- self.script_args = script_args # 'args' is already used by Exception
- self.exit_code = exit_code
- self.output = output
- self.cwd = cwd
-
- def message_with_output(self, output_limit=500):
- if self.output:
- if len(self.output) > output_limit:
- return "%s\nLast %s characters of output:\n%s" % (self, output_limit, self.output[-output_limit:])
- return "%s\n%s" % (self, self.output)
- return str(self)
-
-
class CheckoutNeedsUpdate(ScriptError):
def __init__(self, script_args, exit_code, output, cwd):
ScriptError.__init__(self, script_args=script_args, exit_code=exit_code, output=output, cwd=cwd)
-def default_error_handler(error):
- raise error
-
def commit_error_handler(error):
if re.search("resource out of date", error.output):
raise CheckoutNeedsUpdate(script_args=error.script_args, exit_code=error.exit_code, output=error.output, cwd=error.cwd)
- default_error_handler(error)
+ Executive.default_error_handler(error)
-def ignore_error(error):
- pass
class SCM:
def __init__(self, cwd, dryrun=False):
@@ -122,33 +96,15 @@ class SCM:
self.checkout_root = self.find_checkout_root(self.cwd)
self.dryrun = dryrun
- @staticmethod
- def run_command(args, cwd=None, input=None, error_handler=default_error_handler, return_exit_code=False):
- if hasattr(input, 'read'): # Check if the input is a file.
- stdin = input
- string_to_communicate = None
- else:
- stdin = subprocess.PIPE if input else None
- string_to_communicate = input
- process = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
- output = process.communicate(string_to_communicate)[0].rstrip()
- exit_code = process.wait()
- if exit_code:
- script_error = ScriptError(script_args=args, exit_code=exit_code, output=output, cwd=cwd)
- error_handler(script_error)
- if return_exit_code:
- return exit_code
- return output
-
def scripts_directory(self):
return os.path.join(self.checkout_root, "WebKitTools", "Scripts")
def script_path(self, script_name):
return os.path.join(self.scripts_directory(), script_name)
- def ensure_clean_working_directory(self, force):
- if not force and not self.working_directory_is_clean():
- print self.run_command(self.status_command(), error_handler=ignore_error)
+ def ensure_clean_working_directory(self, force_clean):
+ if not force_clean and not self.working_directory_is_clean():
+ print run_command(self.status_command(), error_handler=Executive.ignore_error)
raise ScriptError(message="Working directory has modifications, pass --force-clean or --no-clean to continue.")
log("Cleaning working directory")
@@ -168,15 +124,17 @@ class SCM:
# It's possible that the patch was not made from the root directory.
# We should detect and handle that case.
curl_process = subprocess.Popen(['curl', '--location', '--silent', '--show-error', patch['url']], stdout=subprocess.PIPE)
- args = [self.script_path('svn-apply'), '--reviewer', patch['reviewer']]
+ args = [self.script_path('svn-apply')]
+ if patch.get('reviewer'):
+ args += ['--reviewer', patch['reviewer']]
if force:
args.append('--force')
- self.run_command(args, input=curl_process.stdout)
+ run_command(args, input=curl_process.stdout)
def run_status_and_extract_filenames(self, status_command, status_regexp):
filenames = []
- for line in self.run_command(status_command).splitlines():
+ for line in run_command(status_command).splitlines():
match = re.search(status_regexp, line)
if not match:
continue
@@ -204,6 +162,28 @@ class SCM:
changelog_paths.append(path)
return changelog_paths
+ # FIXME: Requires unit test
+ # FIXME: commit_message_for_this_commit and modified_changelogs don't
+ # really belong here. We should have a separate module for
+ # handling ChangeLogs.
+ def commit_message_for_this_commit(self):
+ changelog_paths = self.modified_changelogs()
+ if not len(changelog_paths):
+ raise ScriptError(message="Found no modified ChangeLogs, cannot create a commit message.\n"
+ "All changes require a ChangeLog. See:\n"
+ "http://webkit.org/coding/contributing.html")
+
+ changelog_messages = []
+ for changelog_path in changelog_paths:
+ log("Parsing ChangeLog: %s" % changelog_path)
+ changelog_entry = ChangeLog(changelog_path).latest_entry()
+ if not changelog_entry:
+ raise ScriptError(message="Failed to parse ChangeLog: " + os.path.abspath(changelog_path))
+ changelog_messages.append(changelog_entry)
+
+ # FIXME: We should sort and label the ChangeLog messages like commit-log-editor does.
+ return CommitMessage("".join(changelog_messages).splitlines())
+
@staticmethod
def in_working_directory(path):
raise NotImplementedError, "subclasses must implement"
@@ -222,9 +202,6 @@ class SCM:
def clean_working_directory(self):
raise NotImplementedError, "subclasses must implement"
- def update_webkit(self):
- raise NotImplementedError, "subclasses must implement"
-
def status_command(self):
raise NotImplementedError, "subclasses must implement"
@@ -295,7 +272,7 @@ class SVN(SCM):
@classmethod
def value_from_svn_info(cls, path, field_name):
svn_info_args = ['svn', 'info', path]
- info_output = cls.run_command(svn_info_args)
+ info_output = run_command(svn_info_args).rstrip()
match = re.search("^%s: (?P<value>.+)$" % field_name, info_output, re.MULTILINE)
if not match:
raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name)
@@ -323,18 +300,15 @@ class SVN(SCM):
def svn_version(self):
if not self.cached_version:
- self.cached_version = self.run_command(['svn', '--version', '--quiet'])
+ self.cached_version = run_command(['svn', '--version', '--quiet'])
return self.cached_version
def working_directory_is_clean(self):
- return self.run_command(['svn', 'diff']) == ""
+ return run_command(['svn', 'diff']) == ""
def clean_working_directory(self):
- self.run_command(['svn', 'revert', '-R', '.'])
-
- def update_webkit(self):
- self.run_command(self.script_path("update-webkit"))
+ run_command(['svn', 'revert', '-R', '.'])
def status_command(self):
return ['svn', 'status']
@@ -354,10 +328,10 @@ class SVN(SCM):
return "svn"
def create_patch(self):
- return self.run_command(self.script_path("svn-create-patch"), cwd=self.checkout_root)
+ return run_command(self.script_path("svn-create-patch"), cwd=self.checkout_root, return_stderr=False)
def diff_for_revision(self, revision):
- return self.run_command(['svn', 'diff', '-c', str(revision)])
+ return run_command(['svn', 'diff', '-c', str(revision)])
def _repository_url(self):
return self.value_from_svn_info(self.checkout_root, 'URL')
@@ -367,20 +341,20 @@ class SVN(SCM):
svn_merge_args = ['svn', 'merge', '--non-interactive', '-c', '-%s' % revision, self._repository_url()]
log("WARNING: svn merge has been known to take more than 10 minutes to complete. It is recommended you use git for rollouts.")
log("Running '%s'" % " ".join(svn_merge_args))
- self.run_command(svn_merge_args)
+ run_command(svn_merge_args)
def revert_files(self, file_paths):
- self.run_command(['svn', 'revert'] + file_paths)
+ run_command(['svn', 'revert'] + file_paths)
def commit_with_message(self, message):
if self.dryrun:
# Return a string which looks like a commit so that things which parse this output will succeed.
return "Dry run, no commit.\nCommitted revision 0."
- return self.run_command(['svn', 'commit', '-m', message], error_handler=commit_error_handler)
+ return run_command(['svn', 'commit', '-m', message], error_handler=commit_error_handler)
def svn_commit_log(self, svn_revision):
svn_revision = self.strip_r_from_svn_revision(str(svn_revision))
- return self.run_command(['svn', 'log', '--non-interactive', '--revision', svn_revision]);
+ return run_command(['svn', 'log', '--non-interactive', '--revision', svn_revision]);
def last_svn_commit_log(self):
# BASE is the checkout revision, HEAD is the remote repository revision
@@ -394,12 +368,12 @@ class Git(SCM):
@classmethod
def in_working_directory(cls, path):
- return cls.run_command(['git', 'rev-parse', '--is-inside-work-tree'], cwd=path, error_handler=ignore_error) == "true"
+ return run_command(['git', 'rev-parse', '--is-inside-work-tree'], cwd=path, error_handler=Executive.ignore_error).rstrip() == "true"
@classmethod
def find_checkout_root(cls, path):
# "git rev-parse --show-cdup" would be another way to get to the root
- (checkout_root, dot_git) = os.path.split(cls.run_command(['git', 'rev-parse', '--git-dir'], cwd=path))
+ (checkout_root, dot_git) = os.path.split(run_command(['git', 'rev-parse', '--git-dir'], cwd=path))
# If we were using 2.6 # checkout_root = os.path.relpath(checkout_root, path)
if not os.path.isabs(checkout_root): # Sometimes git returns relative paths
checkout_root = os.path.join(path, checkout_root)
@@ -411,28 +385,23 @@ class Git(SCM):
def discard_local_commits(self):
- self.run_command(['git', 'reset', '--hard', 'trunk'])
+ run_command(['git', 'reset', '--hard', 'trunk'])
def local_commits(self):
- return self.run_command(['git', 'log', '--pretty=oneline', 'HEAD...trunk']).splitlines()
+ return run_command(['git', 'log', '--pretty=oneline', 'HEAD...trunk']).splitlines()
def rebase_in_progress(self):
return os.path.exists(os.path.join(self.checkout_root, '.git/rebase-apply'))
def working_directory_is_clean(self):
- return self.run_command(['git', 'diff-index', 'HEAD']) == ""
+ return run_command(['git', 'diff-index', 'HEAD']) == ""
def clean_working_directory(self):
# Could run git clean here too, but that wouldn't match working_directory_is_clean
- self.run_command(['git', 'reset', '--hard', 'HEAD'])
+ run_command(['git', 'reset', '--hard', 'HEAD'])
# Aborting rebase even though this does not match working_directory_is_clean
if self.rebase_in_progress():
- self.run_command(['git', 'rebase', '--abort'])
-
- def update_webkit(self):
- # FIXME: Call update-webkit once https://bugs.webkit.org/show_bug.cgi?id=27162 is fixed.
- log("Updating working directory")
- self.run_command(['git', 'svn', 'rebase'])
+ run_command(['git', 'rebase', '--abort'])
def status_command(self):
return ['git', 'status']
@@ -450,12 +419,12 @@ class Git(SCM):
return "git"
def create_patch(self):
- return self.run_command(['git', 'diff', 'HEAD'])
+ return run_command(['git', 'diff', '--binary', 'HEAD'])
@classmethod
def git_commit_from_svn_revision(cls, revision):
# git svn find-rev always exits 0, even when the revision is not found.
- return cls.run_command(['git', 'svn', 'find-rev', 'r%s' % revision])
+ return run_command(['git', 'svn', 'find-rev', 'r%s' % revision]).rstrip()
def diff_for_revision(self, revision):
git_commit = self.git_commit_from_svn_revision(revision)
@@ -469,15 +438,15 @@ class Git(SCM):
# I think this will always fail due to ChangeLogs.
# FIXME: We need to detec specific failure conditions and handle them.
- self.run_command(['git', 'revert', '--no-commit', git_commit], error_handler=ignore_error)
+ run_command(['git', 'revert', '--no-commit', git_commit], error_handler=Executive.ignore_error)
# Fix any ChangeLogs if necessary.
changelog_paths = self.modified_changelogs()
if len(changelog_paths):
- self.run_command([self.script_path('resolve-ChangeLogs')] + changelog_paths)
+ run_command([self.script_path('resolve-ChangeLogs')] + changelog_paths)
def revert_files(self, file_paths):
- self.run_command(['git', 'checkout', 'HEAD'] + file_paths)
+ run_command(['git', 'checkout', 'HEAD'] + file_paths)
def commit_with_message(self, message):
self.commit_locally_with_message(message)
@@ -485,27 +454,27 @@ class Git(SCM):
def svn_commit_log(self, svn_revision):
svn_revision = self.strip_r_from_svn_revision(svn_revision)
- return self.run_command(['git', 'svn', 'log', '-r', svn_revision])
+ return run_command(['git', 'svn', 'log', '-r', svn_revision])
def last_svn_commit_log(self):
- return self.run_command(['git', 'svn', 'log', '--limit=1'])
+ return run_command(['git', 'svn', 'log', '--limit=1'])
# Git-specific methods:
def create_patch_from_local_commit(self, commit_id):
- return self.run_command(['git', 'diff', commit_id + "^.." + commit_id])
+ return run_command(['git', 'diff', '--binary', commit_id + "^.." + commit_id])
def create_patch_since_local_commit(self, commit_id):
- return self.run_command(['git', 'diff', commit_id])
+ return run_command(['git', 'diff', '--binary', commit_id])
def commit_locally_with_message(self, message):
- self.run_command(['git', 'commit', '--all', '-F', '-'], input=message)
+ run_command(['git', 'commit', '--all', '-F', '-'], input=message)
def push_local_commits_to_server(self):
if self.dryrun:
# Return a string which looks like a commit so that things which parse this output will succeed.
return "Dry run, no remote commit.\nCommitted r0"
- return self.run_command(['git', 'svn', 'dcommit'], error_handler=commit_error_handler)
+ return run_command(['git', 'svn', 'dcommit'], error_handler=commit_error_handler)
# This function supports the following argument formats:
# no args : rev-list trunk..HEAD
@@ -522,14 +491,14 @@ class Git(SCM):
if '...' in commitish:
raise ScriptError(message="'...' is not supported (found in '%s'). Did you mean '..'?" % commitish)
elif '..' in commitish:
- commit_ids += reversed(self.run_command(['git', 'rev-list', commitish]).splitlines())
+ commit_ids += reversed(run_command(['git', 'rev-list', commitish]).splitlines())
else:
# Turn single commits or branch or tag names into commit ids.
- commit_ids += self.run_command(['git', 'rev-parse', '--revs-only', commitish]).splitlines()
+ commit_ids += run_command(['git', 'rev-parse', '--revs-only', commitish]).splitlines()
return commit_ids
def commit_message_for_local_commit(self, commit_id):
- commit_lines = self.run_command(['git', 'cat-file', 'commit', commit_id]).splitlines()
+ commit_lines = run_command(['git', 'cat-file', 'commit', commit_id]).splitlines()
# Skip the git headers.
first_line_after_headers = 0
@@ -540,4 +509,4 @@ class Git(SCM):
return CommitMessage(commit_lines[first_line_after_headers:])
def files_changed_summary_for_commit(self, commit_id):
- return self.run_command(['git', 'diff-tree', '--shortstat', '--no-commit-id', commit_id])
+ return run_command(['git', 'diff-tree', '--shortstat', '--no-commit-id', commit_id])
diff --git a/WebKitTools/Scripts/modules/scm_unittest.py b/WebKitTools/Scripts/modules/scm_unittest.py
index 784303f..8e82f3c 100644
--- a/WebKitTools/Scripts/modules/scm_unittest.py
+++ b/WebKitTools/Scripts/modules/scm_unittest.py
@@ -29,21 +29,22 @@
import base64
import os
+import os.path
import re
import stat
import subprocess
import tempfile
import unittest
import urllib
-from modules.scm import detect_scm_system, SCM, ScriptError, CheckoutNeedsUpdate, ignore_error, commit_error_handler
+from datetime import date
+from modules.executive import Executive, run_command, ScriptError
+from modules.scm import detect_scm_system, SCM, CheckoutNeedsUpdate, commit_error_handler
# Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.)
# Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from.
-def run(args, cwd=None):
- return SCM.run_command(args, cwd=cwd)
-
+# FIXME: This should be unified into one of the executive.py commands!
def run_silent(args, cwd=None):
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
process.communicate() # ignore output
@@ -72,26 +73,26 @@ class SVNTestRepository:
test_file.write("test1")
test_file.flush()
- run(['svn', 'add', 'test_file'])
- run(['svn', 'commit', '--quiet', '--message', 'initial commit'])
+ run_command(['svn', 'add', 'test_file'])
+ run_command(['svn', 'commit', '--quiet', '--message', 'initial commit'])
test_file.write("test2")
test_file.flush()
- run(['svn', 'commit', '--quiet', '--message', 'second commit'])
+ run_command(['svn', 'commit', '--quiet', '--message', 'second commit'])
test_file.write("test3\n")
test_file.flush()
- run(['svn', 'commit', '--quiet', '--message', 'third commit'])
+ run_command(['svn', 'commit', '--quiet', '--message', 'third commit'])
test_file.write("test4\n")
test_file.close()
- run(['svn', 'commit', '--quiet', '--message', 'fourth commit'])
+ run_command(['svn', 'commit', '--quiet', '--message', 'fourth commit'])
# svn does not seem to update after commit as I would expect.
- run(['svn', 'update'])
+ run_command(['svn', 'update'])
@classmethod
def setup(cls, test_object):
@@ -100,18 +101,18 @@ class SVNTestRepository:
test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path # Not sure this will work on windows
# git svn complains if we don't pass --pre-1.5-compatible, not sure why:
# Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477
- run(['svnadmin', 'create', '--pre-1.5-compatible', test_object.svn_repo_path])
+ run_command(['svnadmin', 'create', '--pre-1.5-compatible', test_object.svn_repo_path])
# Create a test svn checkout
test_object.svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
- run(['svn', 'checkout', '--quiet', test_object.svn_repo_url, test_object.svn_checkout_path])
+ run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url, test_object.svn_checkout_path])
cls._setup_test_commits(test_object)
@classmethod
def tear_down(cls, test_object):
- run(['rm', '-rf', test_object.svn_repo_path])
- run(['rm', '-rf', test_object.svn_checkout_path])
+ run_command(['rm', '-rf', test_object.svn_repo_path])
+ run_command(['rm', '-rf', test_object.svn_checkout_path])
# For testing the SCM baseclass directly.
class SCMClassTests(unittest.TestCase):
@@ -122,21 +123,21 @@ class SCMClassTests(unittest.TestCase):
self.dev_null.close()
def test_run_command_with_pipe(self):
- input_process = subprocess.Popen(['/bin/echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
- self.assertEqual(SCM.run_command(['/usr/bin/grep', 'bar'], input=input_process.stdout), "bar")
+ input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
+ self.assertEqual(run_command(['grep', 'bar'], input=input_process.stdout), "bar\n")
# Test the non-pipe case too:
- self.assertEqual(SCM.run_command(['/usr/bin/grep', 'bar'], input="foo\nbar"), "bar")
+ self.assertEqual(run_command(['grep', 'bar'], input="foo\nbar"), "bar\n")
command_returns_non_zero = ['/bin/sh', '--invalid-option']
# Test when the input pipe process fails.
input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null)
self.assertTrue(input_process.poll() != 0)
- self.assertRaises(ScriptError, SCM.run_command, ['/usr/bin/grep', 'bar'], input=input_process.stdout)
+ self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout)
# Test when the run_command process fails.
- input_process = subprocess.Popen(['/bin/echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) # grep shows usage and calls exit(2) when called w/o arguments.
- self.assertRaises(ScriptError, SCM.run_command, command_returns_non_zero, input=input_process.stdout)
+ input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) # grep shows usage and calls exit(2) when called w/o arguments.
+ self.assertRaises(ScriptError, run_command, command_returns_non_zero, input=input_process.stdout)
def test_error_handlers(self):
git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469"
@@ -145,12 +146,13 @@ svn: File or directory 'ChangeLog' is out of date; try updating
svn: resource out of date; try updating
"""
command_does_not_exist = ['does_not_exist', 'invalid_option']
- self.assertRaises(OSError, SCM.run_command, command_does_not_exist)
- self.assertRaises(OSError, SCM.run_command, command_does_not_exist, error_handler=ignore_error)
+ self.assertRaises(OSError, run_command, command_does_not_exist)
+ self.assertRaises(OSError, run_command, command_does_not_exist, error_handler=Executive.ignore_error)
command_returns_non_zero = ['/bin/sh', '--invalid-option']
- self.assertRaises(ScriptError, SCM.run_command, command_returns_non_zero)
- self.assertTrue(SCM.run_command(command_returns_non_zero, error_handler=ignore_error))
+ self.assertRaises(ScriptError, run_command, command_returns_non_zero)
+ # Check if returns error text:
+ self.assertTrue(run_command(command_returns_non_zero, error_handler=Executive.ignore_error))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message))
self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message))
@@ -201,9 +203,185 @@ class SCMTest(unittest.TestCase):
self.assertTrue(re.search('test2', r3_patch))
self.assertTrue(re.search('test2', self.scm.diff_for_revision(2)))
+ def _shared_test_svn_apply_git_patch(self):
+ self._setup_webkittools_scripts_symlink(self.scm)
+ git_binary_addition = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
+new file mode 100644
+index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d90
+60151690
+GIT binary patch
+literal 512
+zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
+zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
+zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
+zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
+zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
+zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
+zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
+z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
+z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
+ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
+
+literal 0
+HcmV?d00001
+
+"""
+ self.scm.apply_patch(self._create_patch(git_binary_addition))
+ added = read_from_path('fizzbuzz7.gif')
+ self.assertEqual(512, len(added))
+ self.assertTrue(added.startswith('GIF89a'))
+ self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
+
+ # The file already exists.
+ self.assertRaises(ScriptError, self.scm.apply_patch, self._create_patch(git_binary_addition))
+
+ git_binary_modification = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
+index 64a9532e7794fcd791f6f12157406d9060151690..323fae03f4606ea9991df8befbb2fca7
+GIT binary patch
+literal 7
+OcmYex&reD$;sO8*F9L)B
+
+literal 512
+zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
+zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
+zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
+zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
+zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
+zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
+zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
+z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
+z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
+ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
+
+"""
+ self.scm.apply_patch(self._create_patch(git_binary_modification))
+ modified = read_from_path('fizzbuzz7.gif')
+ self.assertEqual('foobar\n', modified)
+ self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
+
+ # Applying the same modification should fail.
+ self.assertRaises(ScriptError, self.scm.apply_patch, self._create_patch(git_binary_modification))
+
+ git_binary_deletion = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
+deleted file mode 100644
+index 323fae0..0000000
+GIT binary patch
+literal 0
+HcmV?d00001
+
+literal 7
+OcmYex&reD$;sO8*F9L)B
+
+"""
+ self.scm.apply_patch(self._create_patch(git_binary_deletion))
+ self.assertFalse(os.path.exists('fizzbuzz7.gif'))
+ self.assertFalse('fizzbuzz7.gif' in self.scm.changed_files())
+
+ # Cannot delete again.
+ self.assertRaises(ScriptError, self.scm.apply_patch, self._create_patch(git_binary_deletion))
+
class SVNTest(SCMTest):
+ @staticmethod
+ def _set_date_and_reviewer(changelog_entry):
+ # Joe Cool matches the reviewer set in SCMTest._create_patch
+ changelog_entry = changelog_entry.replace('REVIEWER_HERE', 'Joe Cool')
+ # svn-apply will update ChangeLog entries with today's date.
+ return changelog_entry.replace('DATE_HERE', date.today().isoformat())
+
+ def test_svn_apply(self):
+ first_entry = """2009-10-26 Eric Seidel <eric@webkit.org>
+
+ Reviewed by Foo Bar.
+
+ Most awesome change ever.
+
+ * scm_unittest.py:
+"""
+ intermediate_entry = """2009-10-27 Eric Seidel <eric@webkit.org>
+
+ Reviewed by Baz Bar.
+
+ A more awesomer change yet!
+
+ * scm_unittest.py:
+"""
+ one_line_overlap_patch = """Index: ChangeLog
+===================================================================
+--- ChangeLog (revision 5)
++++ ChangeLog (working copy)
+@@ -1,5 +1,13 @@
+ 2009-10-26 Eric Seidel <eric@webkit.org>
+
++ Reviewed by NOBODY (OOPS!).
++
++ Second most awsome change ever.
++
++ * scm_unittest.py:
++
++2009-10-26 Eric Seidel <eric@webkit.org>
++
+ Reviewed by Foo Bar.
+
+ Most awesome change ever.
+"""
+ one_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
+
+ Reviewed by REVIEWER_HERE.
+
+ Second most awsome change ever.
+
+ * scm_unittest.py:
+"""
+ two_line_overlap_patch = """Index: ChangeLog
+===================================================================
+--- ChangeLog (revision 5)
++++ ChangeLog (working copy)
+@@ -2,6 +2,14 @@
+
+ Reviewed by Foo Bar.
+
++ Second most awsome change ever.
++
++ * scm_unittest.py:
++
++2009-10-26 Eric Seidel <eric@webkit.org>
++
++ Reviewed by Foo Bar.
++
+ Most awesome change ever.
+
+ * scm_unittest.py:
+"""
+ two_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
+
+ Reviewed by Foo Bar.
+
+ Second most awsome change ever.
+
+ * scm_unittest.py:
+"""
+ write_into_file_at_path('ChangeLog', first_entry)
+ run_command(['svn', 'add', 'ChangeLog'])
+ run_command(['svn', 'commit', '--quiet', '--message', 'ChangeLog commit'])
+
+ # Patch files were created against just 'first_entry'.
+ # Add a second commit to make svn-apply have to apply the patches with fuzz.
+ changelog_contents = "%s\n%s" % (intermediate_entry, first_entry)
+ write_into_file_at_path('ChangeLog', changelog_contents)
+ run_command(['svn', 'commit', '--quiet', '--message', 'Intermediate commit'])
+
+ self._setup_webkittools_scripts_symlink(self.scm)
+ self.scm.apply_patch(self._create_patch(one_line_overlap_patch))
+ expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(one_line_overlap_entry), changelog_contents)
+ self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
+
+ self.scm.revert_files(['ChangeLog'])
+ self.scm.apply_patch(self._create_patch(two_line_overlap_patch))
+ expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(two_line_overlap_entry), changelog_contents)
+ self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
+
def setUp(self):
SVNTestRepository.setup(self)
os.chdir(self.svn_checkout_path)
@@ -217,13 +395,13 @@ class SVNTest(SCMTest):
os.mkdir(test_dir_path)
test_file_path = os.path.join(test_dir_path, 'test_file2')
write_into_file_at_path(test_file_path, 'test content')
- run(['svn', 'add', 'test_dir'])
+ run_command(['svn', 'add', 'test_dir'])
# create_patch depends on 'svn-create-patch', so make a dummy version.
scripts_path = os.path.join(self.svn_checkout_path, 'WebKitTools', 'Scripts')
os.makedirs(scripts_path)
create_patch_path = os.path.join(scripts_path, 'svn-create-patch')
- write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD')
+ write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD') # We could pass -n to prevent the \n, but not all echo accept -n.
os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR)
# Change into our test directory and run the create_patch command.
@@ -232,7 +410,7 @@ class SVNTest(SCMTest):
self.assertEqual(scm.checkout_root, self.svn_checkout_path) # Sanity check that detection worked right.
patch_contents = scm.create_patch()
# Our fake 'svn-create-patch' returns $PWD instead of a patch, check that it was executed from the root of the repo.
- self.assertEqual(os.path.realpath(scm.checkout_root), patch_contents)
+ self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents) # Add a \n because echo adds a \n.
def test_detection(self):
scm = detect_scm_system(self.svn_checkout_path)
@@ -262,13 +440,13 @@ Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
def test_apply_svn_patch(self):
scm = detect_scm_system(self.svn_checkout_path)
- patch = self._create_patch(run(['svn', 'diff', '-r4:3']))
+ patch = self._create_patch(run_command(['svn', 'diff', '-r4:3']))
self._setup_webkittools_scripts_symlink(scm)
scm.apply_patch(patch)
def test_apply_svn_patch_force(self):
scm = detect_scm_system(self.svn_checkout_path)
- patch = self._create_patch(run(['svn', 'diff', '-r2:4']))
+ patch = self._create_patch(run_command(['svn', 'diff', '-r2:4']))
self._setup_webkittools_scripts_symlink(scm)
self.assertRaises(ScriptError, scm.apply_patch, patch, force=True)
@@ -286,6 +464,8 @@ Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
+ def test_svn_apply_git_patch(self):
+ self._shared_test_svn_apply_git_patch()
class GitTest(SCMTest):
@@ -295,7 +475,7 @@ class GitTest(SCMTest):
run_silent(['git', 'svn', '--quiet', 'clone', self.svn_repo_url, self.git_checkout_path])
def _tear_down_git_clone_of_svn_repository(self):
- run(['rm', '-rf', self.git_checkout_path])
+ run_command(['rm', '-rf', self.git_checkout_path])
def setUp(self):
SVNTestRepository.setup(self)
@@ -315,11 +495,11 @@ class GitTest(SCMTest):
def test_rebase_in_progress(self):
svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
write_into_file_at_path(svn_test_file, "svn_checkout")
- run(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
+ run_command(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
git_test_file = os.path.join(self.git_checkout_path, 'test_file')
write_into_file_at_path(git_test_file, "git_checkout")
- run(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
+ run_command(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
# --quiet doesn't make git svn silent, so use run_silent to redirect output
self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase.
@@ -351,19 +531,19 @@ class GitTest(SCMTest):
actual_commits = scm.commit_ids_from_commitish_arguments([commit_range])
expected_commits = []
- expected_commits += reversed(run(['git', 'rev-list', commit_range]).splitlines())
+ expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines())
self.assertEqual(actual_commits, expected_commits)
def test_apply_git_patch(self):
scm = detect_scm_system(self.git_checkout_path)
- patch = self._create_patch(run(['git', 'diff', 'HEAD..HEAD^']))
+ patch = self._create_patch(run_command(['git', 'diff', 'HEAD..HEAD^']))
self._setup_webkittools_scripts_symlink(scm)
scm.apply_patch(patch)
def test_apply_git_patch_force(self):
scm = detect_scm_system(self.git_checkout_path)
- patch = self._create_patch(run(['git', 'diff', 'HEAD~2..HEAD']))
+ patch = self._create_patch(run_command(['git', 'diff', 'HEAD~2..HEAD']))
self._setup_webkittools_scripts_symlink(scm)
self.assertRaises(ScriptError, scm.apply_patch, patch, force=True)
@@ -376,5 +556,39 @@ class GitTest(SCMTest):
def test_diff_for_revision(self):
self._shared_test_diff_for_revision()
+ def test_svn_apply_git_patch(self):
+ self._shared_test_svn_apply_git_patch()
+
+ def test_create_binary_patch(self):
+ # Create a git binary patch and check the contents.
+ scm = detect_scm_system(self.git_checkout_path)
+ test_file_name = 'binary_file'
+ test_file_path = os.path.join(self.git_checkout_path, test_file_name)
+ file_contents = ''.join(map(chr, range(256)))
+ write_into_file_at_path(test_file_path, file_contents)
+ run_command(['git', 'add', test_file_name])
+ patch = scm.create_patch()
+ self.assertTrue(re.search(r'\nliteral 0\n', patch))
+ self.assertTrue(re.search(r'\nliteral 256\n', patch))
+
+ # Check if we can apply the created patch.
+ run_command(['git', 'rm', '-f', test_file_name])
+ self._setup_webkittools_scripts_symlink(scm)
+ self.scm.apply_patch(self._create_patch(patch))
+ self.assertEqual(file_contents, read_from_path(test_file_path))
+
+ # Check if we can create a patch from a local commit.
+ write_into_file_at_path(test_file_path, file_contents)
+ run_command(['git', 'add', test_file_name])
+ run_command(['git', 'commit', '-m', 'binary diff'])
+ patch_from_local_commit = scm.create_patch_from_local_commit('HEAD')
+ self.assertTrue(re.search(r'\nliteral 0\n', patch_from_local_commit))
+ self.assertTrue(re.search(r'\nliteral 256\n', patch_from_local_commit))
+ patch_since_local_commit = scm.create_patch_since_local_commit('HEAD^1')
+ self.assertTrue(re.search(r'\nliteral 0\n', patch_since_local_commit))
+ self.assertTrue(re.search(r'\nliteral 256\n', patch_since_local_commit))
+ self.assertEqual(patch_from_local_commit, patch_since_local_commit)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/WebKitTools/Scripts/modules/statusbot.py b/WebKitTools/Scripts/modules/statusbot.py
index 9c9ba04..350aebf 100644
--- a/WebKitTools/Scripts/modules/statusbot.py
+++ b/WebKitTools/Scripts/modules/statusbot.py
@@ -46,21 +46,44 @@ http://wwwsearch.sourceforge.net/mechanize/
"""
exit(1)
+import urllib2
+
+
class StatusBot:
default_host = "webkit-commit-queue.appspot.com"
def __init__(self, host=default_host):
+ self.set_host(host)
+ self.browser = Browser()
+
+ def set_host(self, host):
self.statusbot_host = host
self.statusbot_server_url = "http://%s" % self.statusbot_host
- self.update_status_url = "%s/update_status" % self.statusbot_server_url
- self.browser = Browser()
- def update_status(self, status, bug_id=None, patch_id=None):
- self.browser.open(self.update_status_url)
+ def update_status(self, queue_name, status, patch=None, results_file=None):
+ # During unit testing, statusbot_host is None
+ if not self.statusbot_host:
+ return
+
+ update_status_url = "%s/update-status" % self.statusbot_server_url
+ self.browser.open(update_status_url)
self.browser.select_form(name="update_status")
- if bug_id:
- self.browser['bug_id'] = str(bug_id)
- if patch_id:
- self.browser['patch_id'] = str(patch_id)
+ self.browser['queue_name'] = queue_name
+ if patch:
+ if patch.get('bug_id'):
+ self.browser['bug_id'] = str(patch['bug_id'])
+ if patch.get('id'):
+ self.browser['patch_id'] = str(patch['id'])
self.browser['status'] = status
+ if results_file:
+ self.browser.add_file(results_file, "text/plain", "results.txt", 'results_file')
self.browser.submit()
+
+ def patch_status(self, queue_name, patch_id):
+ update_status_url = "%s/patch-status/%s/%s" % (self.statusbot_server_url, queue_name, patch_id)
+ try:
+ return urllib2.urlopen(update_status_url).read()
+ except urllib2.HTTPError, e:
+ if e.code == 404:
+ return None
+ raise e
diff --git a/WebKitTools/Scripts/modules/stepsequence.py b/WebKitTools/Scripts/modules/stepsequence.py
new file mode 100644
index 0000000..6f085c9
--- /dev/null
+++ b/WebKitTools/Scripts/modules/stepsequence.py
@@ -0,0 +1,68 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from modules.buildsteps import CommandOptions
+from modules.executive import ScriptError
+from modules.logging import log
+from modules.scm import CheckoutNeedsUpdate
+from modules.workqueue import WorkQueue
+
+
+class StepSequence(object):
+ def __init__(self, steps):
+ self._steps = steps
+
+ def options(self):
+ collected_options = [
+ CommandOptions.parent_command,
+ CommandOptions.quiet,
+ ]
+ for step in self._steps:
+ collected_options = collected_options + step.options()
+ # Remove duplicates.
+ collected_options = sorted(set(collected_options))
+ return collected_options
+
+ def _run(self, tool, options, patch):
+ for step in self._steps:
+ step(tool, options, patch).run()
+
+ def run_and_handle_errors(self, tool, options, patch=None):
+ try:
+ self._run(tool, options, patch)
+ except CheckoutNeedsUpdate, e:
+ log("Commit failed because the checkout is out of date. Please update and try again.")
+ log("You can pass --no-build to skip building/testing after update if you believe the new commits did not affect the results.")
+ WorkQueue.exit_after_handled_error(e)
+ except ScriptError, e:
+ if not options.quiet:
+ log(e.message_with_output())
+ if options.parent_command:
+ command = tool.command_by_name(options.parent_command)
+ command.handle_script_error(tool, patch, e)
+ WorkQueue.exit_after_handled_error(e)
diff --git a/WebKitTools/Scripts/modules/webkitport.py b/WebKitTools/Scripts/modules/webkitport.py
new file mode 100644
index 0000000..849ac4b
--- /dev/null
+++ b/WebKitTools/Scripts/modules/webkitport.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for understanding the various ports
+
+import os
+
+from optparse import make_option
+
+class WebKitPort():
+ # We might need to pass scm into this function for scm.checkout_root
+ @classmethod
+ def script_path(cls, script_name):
+ return os.path.join("WebKitTools", "Scripts", script_name)
+
+ @staticmethod
+ def port(port_name):
+ if port_name == "mac":
+ return MacPort
+ if port_name == "qt":
+ return QtPort
+ if port_name == "chromium":
+ return ChromiumPort
+ # FIXME: We should default to WinPort on Windows.
+ return MacPort
+
+ @classmethod
+ def name(cls):
+ raise NotImplementedError, "subclasses must implement"
+
+ @classmethod
+ def flag(cls):
+ raise NotImplementedError, "subclasses must implement"
+
+ @classmethod
+ def update_webkit_command(cls):
+ return [cls.script_path("update-webkit")]
+
+ @classmethod
+ def build_webkit_command(cls):
+ return [cls.script_path("build-webkit")]
+
+ @classmethod
+ def run_webkit_tests_command(cls):
+ return [cls.script_path("run-webkit-tests")]
+
+
+class MacPort(WebKitPort):
+ @classmethod
+ def name(cls):
+ return "Mac"
+
+ @classmethod
+ def flag(cls):
+ return "--port=mac"
+
+
+class QtPort(WebKitPort):
+ @classmethod
+ def name(cls):
+ return "Qt"
+
+ @classmethod
+ def flag(cls):
+ return "--port=qt"
+
+ @classmethod
+ def build_webkit_command(cls):
+ command = WebKitPort.build_webkit_command()
+ command.append("--qt")
+ return command
+
+
+class ChromiumPort(WebKitPort):
+ @classmethod
+ def name(cls):
+ return "Chromium"
+
+ @classmethod
+ def flag(cls):
+ return "--port=chromium"
+
+ @classmethod
+ def update_webkit_command(cls):
+ command = WebKitPort.update_webkit_command()
+ command.append("--chromium")
+ return command
+
+ @classmethod
+ def build_webkit_command(cls):
+ command = WebKitPort.build_webkit_command()
+ command.append("--chromium")
+ return command
diff --git a/WebKitTools/Scripts/modules/webkitport_unittest.py b/WebKitTools/Scripts/modules/webkitport_unittest.py
new file mode 100644
index 0000000..c713e83
--- /dev/null
+++ b/WebKitTools/Scripts/modules/webkitport_unittest.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from modules.webkitport import WebKitPort, MacPort, QtPort, ChromiumPort
+
+class WebKitPortTest(unittest.TestCase):
+ def test_mac_port(self):
+ self.assertEquals(MacPort.name(), "Mac")
+ self.assertEquals(MacPort.flag(), "--port=mac")
+ self.assertEquals(MacPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")])
+ self.assertEquals(MacPort.build_webkit_command(), [WebKitPort.script_path("build-webkit")])
+
+ def test_qt_port(self):
+ self.assertEquals(QtPort.name(), "Qt")
+ self.assertEquals(QtPort.flag(), "--port=qt")
+ self.assertEquals(QtPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")])
+ self.assertEquals(QtPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--qt"])
+
+ def test_chromium_port(self):
+ self.assertEquals(ChromiumPort.name(), "Chromium")
+ self.assertEquals(ChromiumPort.flag(), "--port=chromium")
+ self.assertEquals(ChromiumPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")])
+ self.assertEquals(ChromiumPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--chromium"])
+ self.assertEquals(ChromiumPort.update_webkit_command(), [WebKitPort.script_path("update-webkit"), "--chromium"])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/modules/workqueue.py b/WebKitTools/Scripts/modules/workqueue.py
new file mode 100644
index 0000000..f8cbba8
--- /dev/null
+++ b/WebKitTools/Scripts/modules/workqueue.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import time
+import traceback
+
+from datetime import datetime, timedelta
+
+from modules.executive import ScriptError
+from modules.logging import log, OutputTee
+from modules.statusbot import StatusBot
+
+class WorkQueueDelegate:
+ def queue_name(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def queue_log_path(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def work_logs_directory(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def status_host(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def begin_work_queue(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def should_continue_work_queue(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def next_work_item(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def should_proceed_with_work_item(self, work_item):
+ # returns (safe_to_proceed, waiting_message, patch)
+ raise NotImplementedError, "subclasses must implement"
+
+ def process_work_item(self, work_item):
+ raise NotImplementedError, "subclasses must implement"
+
+ def handle_unexpected_error(self, work_item, message):
+ raise NotImplementedError, "subclasses must implement"
+
+
+class WorkQueue:
+ def __init__(self, name, delegate):
+ self._name = name
+ self._delegate = delegate
+ self._output_tee = OutputTee()
+
+ log_date_format = "%Y-%m-%d %H:%M:%S"
+ sleep_duration_text = "5 mins"
+ seconds_to_sleep = 300
+ handled_error_code = 2
+
+ # Child processes exit with a special code to the parent queue process can detect the error was handled.
+ @classmethod
+ def exit_after_handled_error(cls, error):
+ log(error)
+ exit(cls.handled_error_code)
+
+ def run(self):
+ self._begin_logging()
+ self.status_bot = StatusBot(host=self._delegate.status_host())
+
+ self._delegate.begin_work_queue()
+ while (self._delegate.should_continue_work_queue()):
+ self._ensure_work_log_closed()
+ try:
+ work_item = self._delegate.next_work_item()
+ if not work_item:
+ self._update_status_and_sleep("Empty queue.")
+ continue
+ (safe_to_proceed, waiting_message, patch) = self._delegate.should_proceed_with_work_item(work_item)
+ if not safe_to_proceed:
+ self._update_status_and_sleep(waiting_message)
+ continue
+ self.status_bot.update_status(self._name, waiting_message, patch)
+ except KeyboardInterrupt, e:
+ log("\nUser terminated queue.")
+ return 1
+ except Exception, e:
+ traceback.print_exc()
+ # Don't try tell the status bot, in case telling it causes an exception.
+ self._sleep("Exception while preparing queue: %s." % e)
+ continue
+
+ # FIXME: Work logs should not depend on bug_id specificaly.
+ self._open_work_log(patch["bug_id"])
+ try:
+ self._delegate.process_work_item(work_item)
+ except ScriptError, e:
+ # Use a special exit code to indicate that the error was already
+ # handled in the child process and we should just keep looping.
+ if e.exit_code == self.handled_error_code:
+ continue
+ message = "Unexpected failure when landing patch! Please file a bug against bugzilla-tool.\n%s" % e.message_with_output()
+ self._delegate.handle_unexpected_error(work_item, message)
+ # Never reached.
+ self._ensure_work_log_closed()
+
+ def _begin_logging(self):
+ self._queue_log = self._output_tee.add_log(self._delegate.queue_log_path())
+ self._work_log = None
+
+ def _open_work_log(self, bug_id):
+ work_log_path = os.path.join(self._delegate.work_logs_directory(), "%s.log" % bug_id)
+ self._work_log = self._output_tee.add_log(work_log_path)
+
+ def _ensure_work_log_closed(self):
+ # If we still have a bug log open, close it.
+ if self._work_log:
+ self._output_tee.remove_log(self._work_log)
+ self._work_log = None
+
+ @classmethod
+ def _sleep_message(cls, message):
+ wake_time = datetime.now() + timedelta(seconds=cls.seconds_to_sleep)
+ return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(cls.log_date_format), cls.sleep_duration_text)
+
+ @classmethod
+ def _sleep(cls, message):
+ log(cls._sleep_message(message))
+ time.sleep(cls.seconds_to_sleep)
+
+ def _update_status_and_sleep(self, message):
+ status_message = self._sleep_message(message)
+ self.status_bot.update_status(self._name, status_message)
+ log(status_message)
+ time.sleep(self.seconds_to_sleep)
diff --git a/WebKitTools/Scripts/modules/workqueue_unittest.py b/WebKitTools/Scripts/modules/workqueue_unittest.py
new file mode 100644
index 0000000..ed77b5f
--- /dev/null
+++ b/WebKitTools/Scripts/modules/workqueue_unittest.py
@@ -0,0 +1,176 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import shutil
+import tempfile
+import unittest
+
+from modules.executive import ScriptError
+from modules.workqueue import WorkQueue, WorkQueueDelegate
+
+class LoggingDelegate(WorkQueueDelegate):
+ def __init__(self, test):
+ self._test = test
+ self._callbacks = []
+ self._run_before = False
+
+ expected_callbacks = [
+ 'queue_log_path',
+ 'status_host',
+ 'begin_work_queue',
+ 'should_continue_work_queue',
+ 'next_work_item',
+ 'should_proceed_with_work_item',
+ 'work_logs_directory',
+ 'process_work_item',
+ 'should_continue_work_queue'
+ ]
+
+ def record(self, method_name):
+ self._callbacks.append(method_name)
+
+ def queue_log_path(self):
+ self.record("queue_log_path")
+ return os.path.join(self._test.temp_dir, "queue_log_path")
+
+ def work_logs_directory(self):
+ self.record("work_logs_directory")
+ return os.path.join(self._test.temp_dir, "work_log_path")
+
+ def status_host(self):
+ self.record("status_host")
+ return None
+
+ def begin_work_queue(self):
+ self.record("begin_work_queue")
+
+ def should_continue_work_queue(self):
+ self.record("should_continue_work_queue")
+ if not self._run_before:
+ self._run_before = True
+ return True
+ return False
+
+ def next_work_item(self):
+ self.record("next_work_item")
+ return "work_item"
+
+ def should_proceed_with_work_item(self, work_item):
+ self.record("should_proceed_with_work_item")
+ self._test.assertEquals(work_item, "work_item")
+ fake_patch = { 'bug_id' : 42 }
+ return (True, "waiting_message", fake_patch)
+
+ def process_work_item(self, work_item):
+ self.record("process_work_item")
+ self._test.assertEquals(work_item, "work_item")
+
+ def handle_unexpected_error(self, work_item, message):
+ self.record("handle_unexpected_error")
+ self._test.assertEquals(work_item, "work_item")
+
+
+class ThrowErrorDelegate(LoggingDelegate):
+ def __init__(self, test, error_code):
+ LoggingDelegate.__init__(self, test)
+ self.error_code = error_code
+
+ def process_work_item(self, work_item):
+ self.record("process_work_item")
+ raise ScriptError(exit_code=self.error_code)
+
+
+class NotSafeToProceedDelegate(LoggingDelegate):
+ def should_proceed_with_work_item(self, work_item):
+ self.record("should_proceed_with_work_item")
+ self._test.assertEquals(work_item, "work_item")
+ fake_patch = { 'bug_id' : 42 }
+ return (False, "waiting_message", fake_patch)
+
+
+class FastWorkQueue(WorkQueue):
+ def __init__(self, delegate):
+ WorkQueue.__init__(self, "fast-queue", delegate)
+
+ # No sleep for the wicked.
+ seconds_to_sleep = 0
+
+ def _update_status_and_sleep(self, message):
+ pass
+
+
+class WorkQueueTest(unittest.TestCase):
+ def test_trivial(self):
+ delegate = LoggingDelegate(self)
+ work_queue = WorkQueue("trivial-queue", delegate)
+ work_queue.run()
+ self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks)
+ self.assertTrue(os.path.exists(delegate.queue_log_path()))
+ self.assertTrue(os.path.exists(os.path.join(delegate.work_logs_directory(), "42.log")))
+
+ def test_unexpected_error(self):
+ delegate = ThrowErrorDelegate(self, 3)
+ work_queue = WorkQueue("error-queue", delegate)
+ work_queue.run()
+ expected_callbacks = LoggingDelegate.expected_callbacks[:]
+ work_item_index = expected_callbacks.index('process_work_item')
+ # The unexpected error should be handled right after process_work_item starts
+ # but before any other callback. Otherwise callbacks should be normal.
+ expected_callbacks.insert(work_item_index + 1, 'handle_unexpected_error')
+ self.assertEquals(delegate._callbacks, expected_callbacks)
+
+ def test_handled_error(self):
+ delegate = ThrowErrorDelegate(self, WorkQueue.handled_error_code)
+ work_queue = WorkQueue("handled-error-queue", delegate)
+ work_queue.run()
+ self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks)
+
+ def test_not_safe_to_proceed(self):
+ delegate = NotSafeToProceedDelegate(self)
+ work_queue = FastWorkQueue(delegate)
+ work_queue.run()
+ expected_callbacks = LoggingDelegate.expected_callbacks[:]
+ next_work_item_index = expected_callbacks.index('next_work_item')
+ # We slice out the common part of the expected callbacks.
+ # We add 2 here to include should_proceed_with_work_item, which is
+ # a pain to search for directly because it occurs twice.
+ expected_callbacks = expected_callbacks[:next_work_item_index + 2]
+ expected_callbacks.append('should_continue_work_queue')
+ self.assertEquals(delegate._callbacks, expected_callbacks)
+
+ def setUp(self):
+ self.temp_dir = tempfile.mkdtemp(suffix="work_queue_test_logs")
+
+ def tearDown(self):
+ shutil.rmtree(self.temp_dir)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/prepare-ChangeLog b/WebKitTools/Scripts/prepare-ChangeLog
index dd864df..4c59af9 100755
--- a/WebKitTools/Scripts/prepare-ChangeLog
+++ b/WebKitTools/Scripts/prepare-ChangeLog
@@ -65,8 +65,8 @@ use POSIX qw(strftime);
use VCSUtils;
sub changeLogDate($);
-sub changeLogEmailAddress($);
-sub changeLogName($);
+sub changeLogEmailAddressFromArgs($);
+sub changeLogNameFromArgs($);
sub firstDirectoryOrCwd();
sub diffFromToString();
sub diffCommand(@);
@@ -77,7 +77,6 @@ sub findOriginalFileFromSvn($);
sub determinePropertyChanges($$$);
sub pluralizeAndList($$@);
sub generateFileList(\@\@\%);
-sub gitConfig($);
sub isUnmodifiedStatus($);
sub isModifiedStatus($);
sub isAddedStatus($);
@@ -246,8 +245,8 @@ if (%changed_line_ranges) {
# Get some parameters for the ChangeLog we are about to write.
my $date = changeLogDate($changeLogTimeZone);
-$name = changeLogName($name);
-$emailAddress = changeLogEmailAddress($emailAddress);
+$name = changeLogNameFromArgs($name);
+$emailAddress = changeLogEmailAddressFromArgs($emailAddress);
print STDERR " Change author: $name <$emailAddress>.\n";
@@ -443,62 +442,22 @@ sub changeLogDate($)
return $date;
}
-sub changeLogNameError($)
-{
- my ($message) = @_;
- print STDERR "$message\nEither:\n";
- print STDERR " set CHANGE_LOG_NAME in your environment\n";
- print STDERR " OR pass --name= on the command line\n";
- print STDERR " OR set REAL_NAME in your environment";
- print STDERR " OR git users can set 'git config user.name'\n";
- exit(1);
-}
-
-sub changeLogName($)
+sub changeLogNameFromArgs($)
{
my ($nameFromArgs) = @_;
- # Silently allow --git-commit to win, we could warn if $emailAddressFromArgs is defined.
+ # Silently allow --git-commit to win, we could warn if $nameFromArgs is defined.
return `$GIT log --max-count=1 --pretty=\"format:%an\" \"$gitCommit\"` if $gitCommit;
- my $name = $nameFromArgs
- || $ENV{CHANGE_LOG_NAME}
- || $ENV{REAL_NAME}
- || gitConfig("user.name")
- || (split /\s*,\s*/, (getpwuid $<)[6])[0];
-
- changeLogNameError("Failed to determine ChangeLog name.") unless $name;
- # getpwuid seems to always succeed on windows, returning the username instead of the full name. This check will catch that case.
- changeLogNameError("'$name' does not contain a space! ChangeLogs should contain your full name.") unless ($name =~ /\w \w/);
-
- return $name;
-}
-
-sub changeLogEmailAddressError($)
-{
- my ($message) = @_;
- print STDERR "$message\nEither:\n";
- print STDERR " set CHANGE_LOG_EMAIL_ADDRESS in your environment\n";
- print STDERR " OR pass --email= on the command line\n";
- print STDERR " OR set EMAIL_ADDRESS in your environment\n";
- print STDERR " OR git users can set 'git config user.email'\n";
- exit(1);
+ return $nameFromArgs || changeLogName();
}
-sub changeLogEmailAddress($)
+sub changeLogEmailAddressFromArgs($)
{
my ($emailAddressFromArgs) = @_;
# Silently allow --git-commit to win, we could warn if $emailAddressFromArgs is defined.
return `$GIT log --max-count=1 --pretty=\"format:%ae\" \"$gitCommit\"` if $gitCommit;
- my $emailAddress = $emailAddressFromArgs
- || $ENV{CHANGE_LOG_EMAIL_ADDRESS}
- || $ENV{EMAIL_ADDRESS}
- || gitConfig("user.email");
-
- changeLogEmailAddressError("Failed to determine email address for ChangeLog.") unless $emailAddress;
- changeLogEmailAddressError("Email address '$emailAddress' does not contain '\@' and is likely invalid.") unless ($emailAddress =~ /\@/);
-
- return $emailAddress;
+ return $emailAddressFromArgs || changeLogEmailAddress();
}
sub get_function_line_ranges($$)
@@ -1477,20 +1436,6 @@ sub generateFileList(\@\@\%)
close STAT;
}
-sub gitConfig($)
-{
- return unless $isGit;
-
- my ($config) = @_;
-
- my $result = `$GIT config $config`;
- if (($? >> 8) != 0) {
- $result = `$GIT repo-config $config`;
- }
- chomp $result;
- return $result;
-}
-
sub isUnmodifiedStatus($)
{
my ($status) = @_;
diff --git a/WebKitTools/Scripts/run-webkit-tests b/WebKitTools/Scripts/run-webkit-tests
index 6056035..6dd8339 100755
--- a/WebKitTools/Scripts/run-webkit-tests
+++ b/WebKitTools/Scripts/run-webkit-tests
@@ -76,6 +76,7 @@ sub buildPlatformTestHierarchy(@);
sub closeCygpaths();
sub closeDumpTool();
sub closeHTTPD();
+sub closeWebSocketServer();
sub countAndPrintLeaks($$$);
sub countFinishedTest($$$$);
sub deleteExpectedAndActualResults($);
@@ -86,15 +87,18 @@ sub fileNameWithNumber($$);
sub htmlForResultsSection(\@$&);
sub isTextOnlyTest($);
sub launchWithCurrentEnv(@);
+sub resolveAndMakeTestResultsDirectory();
sub numericcmp($$);
sub openDiffTool();
sub openDumpTool();
sub openHTTPDIfNeeded();
sub parseLeaksandPrintUniqueLeaks();
+sub openWebSocketServerIfNeeded();
sub pathcmp($$);
sub printFailureMessageForTest($$);
sub processIgnoreTests($$);
sub readFromDumpToolWithTimer(**);
+sub readSkippedFiles($);
sub recordActualResultsAndDiff($$);
sub sampleDumpTool();
sub setFileHandleNonBlocking(*$);
@@ -117,6 +121,9 @@ my $guardMalloc = '';
my $httpdPort = 8000;
my $httpdSSLPort = 8443;
my $ignoreMetrics = 0;
+my $webSocketPort = 8880;
+# wss is disabled until all platforms support pyOpenSSL.
+# my $webSocketSecurePort = 9323;
my $ignoreTests = '';
my $iterations = 1;
my $launchSafari = 1;
@@ -185,6 +192,12 @@ if (isAppleMacWebKit()) {
}
} elsif (isGtk()) {
$platform = "gtk";
+ if (!$ENV{"WEBKIT_TESTFONTS"}) {
+ print "The WEBKIT_TESTFONTS environment variable is not defined.\n";
+ print "You must set it before running the tests.\n";
+ print "Use git to grab the actual fonts from http://gitorious.org/qtwebkit/testfonts\n";
+ exit 1;
+ }
} elsif (isWx()) {
$platform = "wx";
} elsif (isCygwin()) {
@@ -292,8 +305,6 @@ if (!$getOptionsResult || $showHelp) {
my $ignoreSkipped = $treatSkipped eq "ignore";
my $skippedOnly = $treatSkipped eq "only";
-!$skippedOnly || @ARGV == 0 or die "--skipped=only cannot be used when tests are specified on the command line.";
-
my $configuration = configuration();
$verbose = 1 if $testsPerDumpTool == 1;
@@ -347,12 +358,12 @@ if (!defined($root)) {
}
my $dumpToolName = "DumpRenderTree";
-$dumpToolName .= "_debug" if isCygwin() && $configuration ne "Release";
+$dumpToolName .= "_debug" if isCygwin() && configurationForVisualStudio() !~ /^Release|Debug_Internal$/;
my $dumpTool = "$productDir/$dumpToolName";
die "can't find executable $dumpToolName (looked in $productDir)\n" unless -x $dumpTool;
my $imageDiffTool = "$productDir/ImageDiff";
-$imageDiffTool .= "_debug" if isCygwin() && $configuration ne "Release";
+$imageDiffTool .= "_debug" if isCygwin() && configurationForVisualStudio() !~ /^Release|Debug_Internal$/;
die "can't find executable $imageDiffTool (looked in $productDir)\n" if $pixelTests && !-x $imageDiffTool;
checkFrameworks() unless isCygwin();
@@ -412,6 +423,7 @@ if (checkWebCoreSVGSupport(0)) {
if (!$testHTTP) {
$ignoredDirectories{'http'} = 1;
+ $ignoredDirectories{'websocket'} = 1;
}
if (!$testMedia) {
@@ -449,7 +461,17 @@ if (!checkWebCoreWCSSSupport(0)) {
}
processIgnoreTests($ignoreTests, "ignore-tests") if $ignoreTests;
-readSkippedFiles() unless $ignoreSkipped;
+if (!$ignoreSkipped) {
+ if (!$skippedOnly || @ARGV == 0) {
+ readSkippedFiles("");
+ } else {
+ # Since readSkippedFiles() appends to @ARGV, we must use a foreach
+ # loop so that we only iterate over the original argument list.
+ foreach my $argnum (0 .. $#ARGV) {
+ readSkippedFiles(shift @ARGV);
+ }
+ }
+}
my @tests = findTestsToRun();
@@ -485,6 +507,11 @@ my $atLineStart = 1;
my $lastDirectory = "";
my $isHttpdOpen = 0;
+my $isWebSocketServerOpen = 0;
+my $webSocketServerPID = 0;
+my $failedToStartWebSocketServer = 0;
+# wss is disabled until all platforms support pyOpenSSL.
+# my $webSocketSecureServerPID = 0;
sub catch_pipe { $dumpToolCrashed = 1; }
$SIG{"PIPE"} = "catch_pipe";
@@ -577,15 +604,7 @@ for my $test (@tests) {
}
}
- if ($test !~ /^http\//) {
- my $testPath = "$testDirectory/$test";
- if (isCygwin()) {
- $testPath = toWindowsPath($testPath);
- } else {
- $testPath = canonpath($testPath);
- }
- print OUT "$testPath$suffixExpectedHash\n";
- } else {
+ if ($test =~ /^http\//) {
openHTTPDIfNeeded();
if ($test !~ /^http\/tests\/local\// && $test !~ /^http\/tests\/ssl\// && $test !~ /^http\/tests\/wml\// && $test !~ /^http\/tests\/media\//) {
my $path = canonpath($test);
@@ -604,6 +623,41 @@ for my $test (@tests) {
}
print OUT "$testPath$suffixExpectedHash\n";
}
+ } elsif ($test =~ /^websocket\//) {
+ if ($test =~ /^websocket\/tests\/local\//) {
+ my $testPath = "$testDirectory/$test";
+ if (isCygwin()) {
+ $testPath = toWindowsPath($testPath);
+ } else {
+ $testPath = canonpath($testPath);
+ }
+ print OUT "$testPath\n";
+ } else {
+ if (openWebSocketServerIfNeeded()) {
+ my $path = canonpath($test);
+ if ($test =~ /^websocket\/tests\/ssl\//) {
+ # wss is disabled until all platforms support pyOpenSSL.
+ print STDERR "Error: wss is disabled until all platforms support pyOpenSSL.";
+ # print OUT "https://127.0.0.1:$webSocketSecurePort/$path\n";
+ } else {
+ print OUT "http://127.0.0.1:$webSocketPort/$path\n";
+ }
+ } else {
+ # We failed to launch the WebSocket server. Display a useful error message rather than attempting
+ # to run tests that expect the server to be available.
+ my $errorMessagePath = "$testDirectory/websocket/resources/server-failed-to-start.html";
+ $errorMessagePath = isCygwin() ? toWindowsPath($errorMessagePath) : canonpath($errorMessagePath);
+ print OUT "$errorMessagePath\n";
+ }
+ }
+ } else {
+ my $testPath = "$testDirectory/$test";
+ if (isCygwin()) {
+ $testPath = toWindowsPath($testPath);
+ } else {
+ $testPath = canonpath($testPath);
+ }
+ print OUT "$testPath$suffixExpectedHash\n";
}
# DumpRenderTree is expected to dump two "blocks" to stdout for each test.
@@ -892,6 +946,7 @@ printf "\n%0.2fs total testing time\n", (time - $overallStartTime) . "";
!$isDumpToolOpen || die "Failed to close $dumpToolName.\n";
closeHTTPD();
+closeWebSocketServer();
# Because multiple instances of this script are running concurrently we cannot
# safely delete this symlink.
@@ -966,7 +1021,10 @@ close HTML;
my @configurationArgs = argumentsForConfiguration();
-if (isQt() || isGtk()) {
+if (isGtk()) {
+ system "WebKitTools/Scripts/run-launcher", @configurationArgs, "file://".$testResults if $launchSafari;
+} elsif (isQt()) {
+ unshift @configurationArgs, qw(-graphicssystem raster -style windows);
system "WebKitTools/Scripts/run-launcher", @configurationArgs, "file://".$testResults if $launchSafari;
} elsif (isCygwin()) {
system "cygstart", $testResults if $launchSafari;
@@ -1166,6 +1224,13 @@ sub launchWithCurrentEnv(@)
return @args;
}
+sub resolveAndMakeTestResultsDirectory()
+{
+ my $absTestResultsDirectory = File::Spec->rel2abs(glob $testResultsDirectory);
+ mkpath $absTestResultsDirectory;
+ return $absTestResultsDirectory;
+}
+
sub openDiffTool()
{
return if $isDiffToolOpen;
@@ -1238,7 +1303,7 @@ sub openDumpTool()
}
if ($useValgrind) {
- unshift @args, "valgrind";
+ unshift @args, "valgrind", "--suppressions=$platformBaseDirectory/qt/SuppressedValgrindErrors";
}
$ENV{MallocStackLogging} = 1 if $shouldCheckLeaks;
@@ -1326,11 +1391,9 @@ sub openHTTPDIfNeeded()
my $jsTestResourcesDirectory = $testDirectory . "/fast/js/resources";
my $typesConfig = "$testDirectory/http/conf/mime.types";
my $listen = "127.0.0.1:$httpdPort";
- my $absTestResultsDirectory = File::Spec->rel2abs(glob $testResultsDirectory);
+ my $absTestResultsDirectory = resolveAndMakeTestResultsDirectory();
my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem";
- mkpath $absTestResultsDirectory;
-
my @args = (
"-f", "$httpdConfig",
"-C", "DocumentRoot \"$documentRoot\"",
@@ -1373,6 +1436,77 @@ sub closeHTTPD()
$isHttpdOpen = 0;
}
+sub openWebSocketServerIfNeeded()
+{
+ return 1 if $isWebSocketServerOpen;
+ return 0 if $failedToStartWebSocketServer;
+
+ my $webSocketServerPath = "/usr/bin/python";
+ my $webSocketPythonPath = "WebKitTools/pywebsocket";
+ my $webSocketHandlerDir = "$testDirectory";
+ my $webSocketHandlerScanDir = "$testDirectory/websocket/tests";
+ my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem";
+ my $absTestResultsDirectory = resolveAndMakeTestResultsDirectory();
+ my $logFile = "$absTestResultsDirectory/pywebsocket_log.txt";
+
+ my @args = (
+ "WebKitTools/pywebsocket/mod_pywebsocket/standalone.py",
+ "-p", "$webSocketPort",
+ "-d", "$webSocketHandlerDir",
+ "-s", "$webSocketHandlerScanDir",
+ "-l", "$logFile",
+ );
+ # wss is disabled until all platforms support pyOpenSSL.
+ # my @argsSecure = (
+ # "WebKitTools/pywebsocket/mod_pywebsocket/standalone.py",
+ # "-p", "$webSocketSecurePort",
+ # "-d", "$webSocketHandlerDir",
+ # "-t",
+ # "-k", "$sslCertificate",
+ # "-c", "$sslCertificate",
+ # );
+
+ $ENV{"PYTHONPATH"} = $webSocketPythonPath;
+ $webSocketServerPID = open3(\*WEBSOCKETSERVER_IN, \*WEBSOCKETSERVER_OUT, \*WEBSOCKETSERVER_ERR, $webSocketServerPath, @args);
+ # wss is disabled until all platforms support pyOpenSSL.
+ # $webSocketSecureServerPID = open3(\*WEBSOCKETSECURESERVER_IN, \*WEBSOCKETSECURESERVER_OUT, \*WEBSOCKETSECURESERVER_ERR, $webSocketServerPath, @argsSecure);
+ # my @listen = ("http://127.0.0.1:$webSocketPort", "https://127.0.0.1:$webSocketSecurePort");
+ my @listen = ("http://127.0.0.1:$webSocketPort");
+ for (my $i = 0; $i < @listen; $i++) {
+ my $retryCount = 10;
+ while (system("/usr/bin/curl -k -q --silent --stderr - --output /dev/null $listen[$i]") && $retryCount) {
+ sleep 1;
+ --$retryCount;
+ }
+ unless ($retryCount) {
+ print STDERR "Timed out waiting for WebSocketServer to start.\n";
+ $failedToStartWebSocketServer = 1;
+ return 0;
+ }
+ }
+
+ $isWebSocketServerOpen = 1;
+ return 1;
+}
+
+sub closeWebSocketServer()
+{
+ return if !$isWebSocketServerOpen;
+
+ close WEBSOCKETSERVER_IN;
+ close WEBSOCKETSERVER_OUT;
+ close WEBSOCKETSERVER_ERR;
+ kill 15, $webSocketServerPID;
+
+ # wss is disabled until all platforms support pyOpenSSL.
+ # close WEBSOCKETSECURESERVER_IN;
+ # close WEBSOCKETSECURESERVER_OUT;
+ # close WEBSOCKETSECURESERVER_ERR;
+ # kill 15, $webSocketSecureServerPID;
+
+ $isWebSocketServerOpen = 0;
+}
+
sub fileNameWithNumber($$)
{
my ($base, $number) = @_;
@@ -1980,8 +2114,10 @@ sub fileShouldBeIgnored
return 0;
}
-sub readSkippedFiles
+sub readSkippedFiles($)
{
+ my ($constraintPath) = @_;
+
foreach my $level (@platformTestHierarchy) {
if (open SKIPPED, "<", "$level/Skipped") {
if ($verbose) {
@@ -1996,8 +2132,19 @@ sub readSkippedFiles
$skipped =~ s/[ \n\r]+$//;
if ($skipped && $skipped !~ /^#/) {
if ($skippedOnly) {
- if (!&fileShouldBeIgnored($skipped)) {
- push(@ARGV, $skipped);
+ if (!fileShouldBeIgnored($skipped)) {
+ if (!$constraintPath) {
+ # Always add $skipped since no constraint path was specified on the command line.
+ push(@ARGV, $skipped);
+ } elsif ($skipped =~ /^($constraintPath)/) {
+ # Add $skipped only if it matches the current path constraint, e.g.,
+ # "--skipped=only dir1" with "dir1/file1.html" on the skipped list.
+ push(@ARGV, $skipped);
+ } elsif ($constraintPath =~ /^($skipped)/) {
+ # Add current path constraint if it is more specific than the skip list entry,
+ # e.g., "--skipped=only dir1/dir2/dir3" with "dir1" on the skipped list.
+ push(@ARGV, $constraintPath);
+ }
} elsif ($verbose) {
print " $skipped\n";
}
@@ -2068,6 +2215,9 @@ sub findTestsToRun
}
}
+ # Remove duplicate tests
+ @testsToRun = keys %{{ map { $_ => 1 } @testsToRun }};
+
@testsToRun = sort pathcmp @testsToRun;
# Reverse the tests
diff --git a/WebKitTools/Scripts/run-webkit-unittests b/WebKitTools/Scripts/run-webkit-unittests
index 8d0ef1d..3487299 100755
--- a/WebKitTools/Scripts/run-webkit-unittests
+++ b/WebKitTools/Scripts/run-webkit-unittests
@@ -32,11 +32,18 @@ import unittest
from modules.bugzilla_unittest import *
from modules.buildbot_unittest import *
from modules.changelogs_unittest import *
+from modules.commands.download_unittest import *
+from modules.commands.upload_unittest import *
+from modules.commands.queries_unittest import *
+from modules.commands.queues_unittest import *
from modules.committers_unittest import *
from modules.cpp_style_unittest import *
from modules.diff_parser_unittest import *
from modules.logging_unittest import *
+from modules.multicommandtool_unittest import *
from modules.scm_unittest import *
+from modules.webkitport_unittest import *
+from modules.workqueue_unittest import *
if __name__ == "__main__":
unittest.main()
diff --git a/WebKitTools/Scripts/run-webkit-websocketserver b/WebKitTools/Scripts/run-webkit-websocketserver
new file mode 100755
index 0000000..e05303a
--- /dev/null
+++ b/WebKitTools/Scripts/run-webkit-websocketserver
@@ -0,0 +1,96 @@
+#!/usr/bin/perl
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Script to run Web Socket server.
+
+use strict;
+use warnings;
+
+use File::Spec;
+use FindBin;
+use IPC::Open2;
+
+use lib $FindBin::Bin;
+use webkitdirs;
+
+sub closeWebSocketServer();
+sub openWebSocketServer();
+
+my $webSocketPort = 8880;
+
+my $srcDir = sourceDir();
+my $layoutTestsName = "$srcDir/LayoutTests";
+my $testDirectory = File::Spec->rel2abs($layoutTestsName);
+my $webSocketServerPID = 0;
+
+
+print "Starting Web Socket server...\n";
+openWebSocketServer();
+print "Started.\n";
+print "Hit [ENTER] to stop it.";
+<STDIN>;
+print "Stopping Web Socket server...\n";
+closeWebSocketServer();
+print "Stopped.\n";
+exit 0;
+
+
+sub openWebSocketServer()
+{
+ my $webSocketServerPath = "/usr/bin/python";
+ my $webSocketPythonPath = "$srcDir/WebKitTools/pywebsocket";
+ my $webSocketHandlerDir = "$testDirectory";
+ my $webSocketHandlerScanDir = "$testDirectory/websocket/tests";
+
+ my @args = (
+ "$srcDir/WebKitTools/pywebsocket/mod_pywebsocket/standalone.py",
+ "-p", "$webSocketPort",
+ "-d", "$webSocketHandlerDir",
+ "-s", "$webSocketHandlerScanDir",
+ );
+
+ $ENV{"PYTHONPATH"} = $webSocketPythonPath;
+ $webSocketServerPID = open2(\*WEBSOCKETSERVER_IN, \*WEBSOCKETSERVER_OUT, $webSocketServerPath, @args);
+
+ my $listen = "http://127.0.0.1:$webSocketPort";
+ my $retryCount = 10;
+ while (system("/usr/bin/curl -k -q --silent --stderr - --output /dev/null $listen") && $retryCount) {
+ sleep 1;
+ --$retryCount;
+ }
+ die "Timed out waiting for WebSocketServer to start" unless $retryCount;
+}
+
+sub closeWebSocketServer()
+{
+ close WEBSOCKETSERVER_IN;
+ close WEBSOCKETSERVER_OUT;
+ kill 15, $webSocketServerPID;
+}
+
diff --git a/WebKitTools/Scripts/svn-apply b/WebKitTools/Scripts/svn-apply
index 7d14e3a..0373aa5 100755
--- a/WebKitTools/Scripts/svn-apply
+++ b/WebKitTools/Scripts/svn-apply
@@ -55,7 +55,7 @@
# Notice a patch that's being applied at the "wrong level" and make it work anyway.
# Do a dry run on the whole patch and don't do anything if part of the patch is
# going to fail (probably too strict unless we exclude ChangeLog).
-# Handle git-diff patches with binary changes
+# Handle git-diff patches with binary delta
use strict;
use warnings;
@@ -75,11 +75,11 @@ sub addDirectoriesIfNeeded($);
sub applyPatch($$;$);
sub checksum($);
sub handleBinaryChange($$);
+sub handleGitBinaryChange($$);
sub isDirectoryEmptyForRemoval($);
sub patch($);
sub removeDirectoriesIfNeeded();
sub setChangeLogDateAndReviewer($$);
-sub removeEOL($);
# These should be replaced by an scm class/module:
sub scmKnowsOfFile($);
@@ -277,6 +277,39 @@ sub handleBinaryChange($$)
}
}
+sub handleGitBinaryChange($$)
+{
+ my ($fullPath, $contents) = @_;
+
+ my ($binaryChunkType, $binaryChunk, $reverseBinaryChunkType, $reverseBinaryChunk) = decodeGitBinaryPatch($contents, $fullPath);
+ # FIXME: support "delta" type.
+ die "only literal type is supported now" if ($binaryChunkType ne "literal" || $reverseBinaryChunkType ne "literal");
+
+ my $isFileAddition = $contents =~ /\nnew file mode \d+\n/;
+ my $isFileDeletion = $contents =~ /\ndeleted file mode \d+\n/;
+
+ my $originalContents = "";
+ if (open FILE, $fullPath) {
+ die "$fullPath already exists" if $isFileAddition;
+
+ $originalContents = join("", <FILE>);
+ close FILE;
+ }
+ die "Original content of $fullPath mismatches" if $originalContents ne $reverseBinaryChunk;
+
+ if ($isFileDeletion) {
+ scmRemove($fullPath);
+ } else {
+ # Addition or Modification
+ open FILE, ">", $fullPath or die "Failed to open $fullPath.";
+ print FILE $binaryChunk;
+ close FILE;
+ if ($isFileAddition) {
+ scmAdd($fullPath);
+ }
+ }
+}
+
sub isDirectoryEmptyForRemoval($)
{
my ($dir) = @_;
@@ -311,12 +344,14 @@ sub patch($)
my $deletion = 0;
my $addition = 0;
my $isBinary = 0;
+ my $isGitBinary = 0;
$addition = 1 if ($patch =~ /\n--- .+\(revision 0\)\r?\n/ || $patch =~ /\n@@ -0,0 .* @@/) && !exists($copiedFiles{$fullPath});
$deletion = 1 if $patch =~ /\n@@ .* \+0,0 @@/;
$isBinary = 1 if $patch =~ /\nCannot display: file marked as a binary type\./;
+ $isGitBinary = 1 if $patch =~ /\nGIT binary patch\n/;
- if (!$addition && !$deletion && !$isBinary) {
+ if (!$addition && !$deletion && !$isBinary && !$isGitBinary) {
# Standard patch, patch tool can handle this.
if (basename($fullPath) eq "ChangeLog") {
my $changeLogDotOrigExisted = -f "${fullPath}.orig";
@@ -333,6 +368,9 @@ sub patch($)
if ($isBinary) {
# Binary change
handleBinaryChange($fullPath, $patch);
+ } elsif ($isGitBinary) {
+ # Git binary change
+ handleGitBinaryChange($fullPath, $patch);
} elsif ($deletion) {
# Deletion
applyPatch($patch, $fullPath, ["--force"]);
@@ -378,14 +416,6 @@ sub setChangeLogDateAndReviewer($$)
return $patch;
}
-sub removeEOL($)
-{
- my ($line) = @_;
-
- $line =~ s/[\r\n]+$//g;
- return $line;
-}
-
# This could be made into a more general "status" call, except svn and git
# have different ideas about "moving" files which might get confusing.
sub scmWillDeleteFile($)
diff --git a/WebKitTools/Scripts/svn-unapply b/WebKitTools/Scripts/svn-unapply
index 94bb1ce..c277a3e 100755
--- a/WebKitTools/Scripts/svn-unapply
+++ b/WebKitTools/Scripts/svn-unapply
@@ -73,7 +73,6 @@ use VCSUtils;
sub checksum($);
sub patch($);
sub revertDirectories();
-sub removeEOL($);
sub unapplyPatch($$;$);
sub unsetChangeLogDate($$);
@@ -259,14 +258,6 @@ sub revertDirectories()
}
}
-sub removeEOL($)
-{
- my ($line) = @_;
-
- $line =~ s/[\r\n]+$//g;
- return $line;
-}
-
sub unapplyPatch($$;$)
{
my ($patch, $fullPath, $options) = @_;
diff --git a/WebKitTools/Scripts/update-webkit b/WebKitTools/Scripts/update-webkit
index b503004..7602c41 100755
--- a/WebKitTools/Scripts/update-webkit
+++ b/WebKitTools/Scripts/update-webkit
@@ -39,6 +39,7 @@ use VCSUtils;
use webkitdirs;
sub runSvnUpdate();
+sub runGitUpdate();
# Handle options
my $quiet = '';
@@ -46,6 +47,11 @@ my $showHelp;
determineIsChromium();
+chdirWebKit();
+
+my $isGit = isGit();
+my $isSVN = isSVN();
+
my $getOptionsResult = GetOptions(
'h|help' => \$showHelp,
'q|quiet' => \$quiet,
@@ -67,20 +73,23 @@ push @svnOptions, '-q' if $quiet;
# Don't prompt when using svn-1.6 or newer.
push @svnOptions, qw(--accept postpone) if isSVNVersion16OrNewer();
-chdirWebKit();
print "Updating OpenSource\n" unless $quiet;
-runSvnUpdate();
+runSvnUpdate() if $isSVN;
+runGitUpdate() if $isGit;
if (-d "../Internal") {
chdir("../Internal");
print "Updating Internal\n" unless $quiet;
- runSvnUpdate();
+ runSvnUpdate() if $isSVN;
+ runGitUpdate() if $isGit;
} elsif (isChromium()) {
system("perl", "WebKitTools/Scripts/update-webkit-chromium") == 0 or die $!;
} elsif (isAppleWinWebKit()) {
system("perl", "WebKitTools/Scripts/update-webkit-auxiliary-libs") == 0 or die;
}
+setupAppleWinEnv() if isAppleWinWebKit();
+
exit 0;
sub runSvnUpdate()
@@ -104,3 +113,8 @@ sub runSvnUpdate()
or die "Could not open resolve-ChangeLogs script: $!.\n";
}
}
+
+sub runGitUpdate()
+{
+ system("git", "svn", "rebase") == 0 or die;
+}
diff --git a/WebKitTools/Scripts/update-webkit-chromium b/WebKitTools/Scripts/update-webkit-chromium
index a0cc19a..779b9a6 100644..100755
--- a/WebKitTools/Scripts/update-webkit-chromium
+++ b/WebKitTools/Scripts/update-webkit-chromium
@@ -48,4 +48,4 @@ if (! -e ".gclient") {
# Execute gclient sync.
print "Updating chromium port dependencies using gclient...\n";
-system("gclient", "sync") == 0 or die $!;
+system("gclient", "sync", "--force") == 0 or die $!;
diff --git a/WebKitTools/Scripts/validate-committer-lists b/WebKitTools/Scripts/validate-committer-lists
new file mode 100755
index 0000000..05f2b36
--- /dev/null
+++ b/WebKitTools/Scripts/validate-committer-lists
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Checks Python's known list of committers against lists.webkit.org and SVN history.
+
+
+import os
+import subprocess
+import re
+import urllib2
+from datetime import date, datetime, timedelta
+from modules.committers import CommitterList
+from modules.logging import log, error
+
+# WebKit includes a built copy of BeautifulSoup in Scripts/modules
+# so this import should always succeed.
+from modules.BeautifulSoup import BeautifulSoup
+
+def print_list_if_non_empty(title, list_to_print):
+ if not list_to_print:
+ return
+ print # Newline before the list
+ print title
+ for item in list_to_print:
+ print item
+
+class CommitterListFromMailingList:
+ committers_list_url = "http://lists.webkit.org/mailman/roster.cgi/webkit-committers"
+ reviewers_list_url = "http://lists.webkit.org/mailman/roster.cgi/webkit-reviewers"
+
+ def _fetch_emails_from_page(self, url):
+ page = urllib2.urlopen(url)
+ soup = BeautifulSoup(page)
+
+ emails = []
+ # Grab the cells in the first column (which happens to be the bug ids).
+ for email_item in soup('li'):
+ email_link = email_item.find("a")
+ email = email_link.string.replace(" at ", "@") # The email is obfuscated using " at " instead of "@".
+ emails.append(email)
+ return emails
+
+ @staticmethod
+ def _commiters_not_found_in_email_list(committers, emails):
+ missing_from_mailing_list = []
+ for committer in committers:
+ for email in committer.emails:
+ if email in emails:
+ break
+ else:
+ missing_from_mailing_list.append(committer)
+ return missing_from_mailing_list
+
+ @staticmethod
+ def _emails_not_found_in_committer_list(committers, emails):
+ email_to_committer_map = {}
+ for committer in committers:
+ for email in committer.emails:
+ email_to_committer_map[email] = committer
+
+ return filter(lambda email: not email_to_committer_map.get(email), emails)
+
+ def check_for_emails_missing_from_list(self, committer_list):
+ committer_emails = self._fetch_emails_from_page(self.committers_list_url)
+ list_name = "webkit-committers@lists.webkit.org"
+
+ missing_from_mailing_list = self._commiters_not_found_in_email_list(committer_list.committers(), committer_emails)
+ print_list_if_non_empty("Committers missing from %s:" % list_name, missing_from_mailing_list)
+
+ users_missing_from_committers = self._emails_not_found_in_committer_list(committer_list.committers(), committer_emails)
+ print_list_if_non_empty("Subcribers to %s missing from committer.py:" % list_name, users_missing_from_committers)
+
+
+ reviewer_emails = self._fetch_emails_from_page(self.reviewers_list_url)
+ list_name = "webkit-reviewers@lists.webkit.org"
+
+ missing_from_mailing_list = self._commiters_not_found_in_email_list(committer_list.reviewers(), reviewer_emails)
+ print_list_if_non_empty("Reviewers missing from %s:" % list_name, missing_from_mailing_list)
+
+ missing_from_reviewers = self._emails_not_found_in_committer_list(committer_list.reviewers(), reviewer_emails)
+ print_list_if_non_empty("Subcribers to %s missing from reviewers in committer.py:" % list_name, missing_from_reviewers)
+
+ missing_from_committers = self._emails_not_found_in_committer_list(committer_list.committers(), reviewer_emails)
+ print_list_if_non_empty("Subcribers to %s completely missing from committers.py" % list_name, missing_from_committers)
+
+
+class CommitterListFromGit:
+ login_to_email_address = {
+ 'aliceli1' : 'alice.liu@apple.com',
+ 'bdash' : 'mrowe@apple.com',
+ 'bdibello' : 'bdibello@apple.com', # Bruce DiBello, only 4 commits: r10023, r9548, r9538, r9535
+ 'cblu' : 'cblu@apple.com',
+ 'cpeterse' : 'cpetersen@apple.com',
+ 'eseidel' : 'eric@webkit.org',
+ 'gdennis' : 'gdennis@webkit.org',
+ 'goldsmit' : 'goldsmit@apple.com', # Debbie Goldsmith, only one commit r8839
+ 'gramps' : 'gramps@apple.com',
+ 'honeycutt' : 'jhoneycutt@apple.com',
+ 'jdevalk' : 'joost@webkit.org',
+ 'jens' : 'jens@apple.com',
+ 'justing' : 'justin.garcia@apple.com',
+ 'kali' : 'kali@apple.com', # Christy Warren, did BIDI work, 5 commits: r8815, r8802, r8801, r8791, r8773, r8603
+ 'kjk' : 'kkowalczyk@gmail.com',
+ 'kmccullo' : 'kmccullough@apple.com',
+ 'kocienda' : 'kocienda@apple.com',
+ 'lamadio' : 'lamadio@apple.com', # Lou Amadio, only 2 commits: r17949 and r17783
+ 'lars' : 'lars@kde.org',
+ 'lweintraub' : 'lweintraub@apple.com',
+ 'lypanov' : 'lypanov@kde.org',
+ 'mhay' : 'mhay@apple.com', # Mike Hay, 3 commits: r3813, r2552, r2548
+ 'ouch' : 'ouch@apple.com', # John Louch
+ 'pyeh' : 'patti@apple.com', # Patti Yeh, did VoiceOver work in WebKit
+ 'rjw' : 'rjw@apple.com',
+ 'seangies' : 'seangies@apple.com', # Sean Gies?, only 5 commits: r16600, r16592, r16511, r16489, r16484
+ 'sheridan' : 'sheridan@apple.com', # Shelly Sheridan
+ 'thatcher' : 'timothy@apple.com',
+ 'tomernic' : 'timo@apple.com',
+ 'trey' : 'trey@usa.net',
+ 'tristan' : 'tristan@apple.com',
+ 'vicki' : 'vicki@apple.com',
+ 'voas' : 'voas@apple.com', # Ed Voas, did some Carbon work in WebKit
+ 'zack' : 'zack@kde.org',
+ 'zimmermann' : 'zimmermann@webkit.org',
+ }
+
+ def __init__(self):
+ self._last_commit_time_by_author_cache = {}
+
+ def _fetch_authors_and_last_commit_time_from_git_log(self):
+ last_commit_dates = {}
+ git_log_args = ['git', 'log', '--reverse', '--pretty=format:%ae %at']
+ process = subprocess.Popen(git_log_args, stdout=subprocess.PIPE)
+
+ # eric@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc 1257090899
+ line_regexp = re.compile("^(?P<author>.+)@\S+ (?P<timestamp>\d+)$")
+ while True:
+ output_line = process.stdout.readline()
+ if output_line == '' and process.poll() != None:
+ return last_commit_dates
+
+ match_result = line_regexp.match(output_line)
+ if not match_result:
+ error("Failed to match line: %s" % output_line)
+ last_commit_dates[match_result.group('author')] = float(match_result.group('timestamp'))
+
+ def _fill_in_emails_for_old_logins(self):
+ authors_missing_email = filter(lambda author: author.find('@') == -1, self._last_commit_time_by_author_cache)
+ authors_with_email = filter(lambda author: author.find('@') != -1, self._last_commit_time_by_author_cache)
+ prefixes_of_authors_with_email = map(lambda author: author.split('@')[0], authors_with_email)
+
+ for author in authors_missing_email:
+ # First check to see if we have a manual mapping from login to email.
+ author_email = self.login_to_email_address.get(author)
+
+ # Most old logins like 'darin' are now just 'darin@apple.com', so check for a prefix match if a manual mapping was not found.
+ if not author_email and author in prefixes_of_authors_with_email:
+ author_email_index = prefixes_of_authors_with_email.index(author)
+ author_email = authors_with_email[author_email_index]
+
+ if not author_email:
+ # No known email mapping, likely not an active committer. We could log here.
+ continue
+
+ # log("%s -> %s" % (author, author_email)) # For sanity checking.
+ no_email_commit_time = self._last_commit_time_by_author_cache.get(author)
+ email_commit_time = self._last_commit_time_by_author_cache.get(author_email)
+ # We compare the timestamps for extra sanity even though we could assume commits before email address were used for login are always going to be older.
+ if not email_commit_time or email_commit_time < no_email_commit_time:
+ self._last_commit_time_by_author_cache[author_email] = no_email_commit_time
+ del self._last_commit_time_by_author_cache[author]
+
+ def _last_commit_by_author(self):
+ if not self._last_commit_time_by_author_cache:
+ self._last_commit_time_by_author_cache = self._fetch_authors_and_last_commit_time_from_git_log()
+ self._fill_in_emails_for_old_logins()
+ del self._last_commit_time_by_author_cache['(no author)'] # The initial svn import isn't very useful.
+ return self._last_commit_time_by_author_cache
+
+ @staticmethod
+ def _print_three_column_row(widths, values):
+ print "%s%s%s" % (values[0].ljust(widths[0]), values[1].ljust(widths[1]), values[2])
+
+ def print_possibly_expired_committers(self, committer_list):
+ authors_and_last_commits = self._last_commit_by_author().items()
+ authors_and_last_commits.sort(lambda a,b: cmp(a[1], b[1]), reverse=True)
+ committer_cuttof = date.today() - timedelta(days=365)
+ column_widths = [13, 25]
+ print
+ print "Committers who have not committed within one year:"
+ self._print_three_column_row(column_widths, ("Last Commit", "Committer Email", "Committer Record"))
+ for (author, last_commit) in authors_and_last_commits:
+ last_commit_date = date.fromtimestamp(last_commit)
+ if committer_cuttof > last_commit_date:
+ committer_record = committer_list.committer_by_email(author)
+ self._print_three_column_row(column_widths, (str(last_commit_date), author, committer_record))
+
+ def print_committers_missing_from_committer_list(self, committer_list):
+ missing_from_committers_py = []
+ last_commit_time_by_author = self._last_commit_by_author()
+ for author in last_commit_time_by_author:
+ if not committer_list.committer_by_email(author):
+ missing_from_committers_py.append(author)
+
+ never_committed = []
+ for committer in committer_list.committers():
+ for email in committer.emails:
+ if last_commit_time_by_author.get(email):
+ break
+ else:
+ never_committed.append(committer)
+
+ print_list_if_non_empty("Historical committers missing from committer.py:", missing_from_committers_py)
+ print_list_if_non_empty("Committers in committer.py who have never committed:", never_committed)
+
+
+def main():
+ committer_list = CommitterList()
+ CommitterListFromMailingList().check_for_emails_missing_from_list(committer_list)
+
+ svn_committer_list = CommitterListFromGit()
+ svn_committer_list.print_possibly_expired_committers(committer_list)
+ svn_committer_list.print_committers_missing_from_committer_list(committer_list)
+
+if __name__ == "__main__":
+ main()
diff --git a/WebKitTools/Scripts/webkitdirs.pm b/WebKitTools/Scripts/webkitdirs.pm
index 16f9c26..64e5dc4 100644
--- a/WebKitTools/Scripts/webkitdirs.pm
+++ b/WebKitTools/Scripts/webkitdirs.pm
@@ -67,6 +67,7 @@ my $isChromium;
# Variables for Win32 support
my $vcBuildPath;
my $windowsTmpPath;
+my $windowsSourceDir;
sub determineSourceDir
{
@@ -521,9 +522,19 @@ sub builtDylibPathForName
{
my $libraryName = shift;
determineConfigurationProductDir();
- if (isQt() or isChromium()) {
+ if (isChromium()) {
return "$configurationProductDir/$libraryName";
}
+ if (isQt()) {
+ $libraryName = "QtWebKit";
+ if (isDarwin() and -d "$configurationProductDir/lib/$libraryName.framework") {
+ return "$configurationProductDir/lib/$libraryName.framework/$libraryName";
+ } elsif (isWindows() or isCygwin()) {
+ return "$configurationProductDir/lib/$libraryName.dll";
+ } else {
+ return "$configurationProductDir/lib/lib$libraryName.so";
+ }
+ }
if (isWx()) {
return "$configurationProductDir/libwxwebkit.dylib";
}
@@ -561,7 +572,7 @@ sub libraryContainsSymbol
my $path = shift;
my $symbol = shift;
- if (isCygwin()) {
+ if (isCygwin() or isWindows()) {
# FIXME: Implement this for Windows.
return 0;
}
@@ -637,7 +648,8 @@ sub checkWebCoreSVGSupport
sub hasAcceleratedCompositingSupport
{
- return 0 if isCygwin() || isQt();
+ # On platforms other than Mac the Skipped files are used to skip compositing tests
+ return 1 if !isAppleMacWebKit();
my $path = shift;
return libraryContainsSymbol($path, "GraphicsLayer");
@@ -657,7 +669,8 @@ sub checkWebCoreAcceleratedCompositingSupport
sub has3DRenderingSupport
{
- return 0 if isQt();
+ # On platforms other than Mac the Skipped files are used to skip 3D tests
+ return 1 if !isAppleMacWebKit();
my $path = shift;
return libraryContainsSymbol($path, "WebCoreHas3DRendering");
@@ -680,7 +693,7 @@ sub has3DCanvasSupport
return 0 if isQt();
my $path = shift;
- return libraryContainsSymbol($path, "CanvasShader");
+ return libraryContainsSymbol($path, "WebGLShader");
}
sub checkWebCore3DCanvasSupport
@@ -974,6 +987,11 @@ sub isSnowLeopard()
return isDarwin() && osXVersion()->{"minor"} == 6;
}
+sub isWindowsNT()
+{
+ return $ENV{'OS'} eq 'Windows_NT';
+}
+
sub relativeScriptsDir()
{
my $scriptDir = File::Spec->catpath("", File::Spec->abs2rel(dirname($0), getcwd()), "");
@@ -1025,7 +1043,7 @@ sub checkRequiredSystemConfig
print "http://developer.apple.com/tools/xcode\n";
print "*************************************************************\n";
}
- } elsif (isGtk() or isQt() or isWx() or isChromium()) {
+ } elsif (isGtk() or isQt() or isWx()) {
my @cmds = qw(flex bison gperf);
my @missing = ();
foreach my $cmd (@cmds) {
@@ -1041,6 +1059,68 @@ sub checkRequiredSystemConfig
# Win32 and other platforms may want to check for minimum config
}
+sub determineWindowsSourceDir()
+{
+ return if $windowsSourceDir;
+ my $sourceDir = sourceDir();
+ chomp($windowsSourceDir = `cygpath -w $sourceDir`);
+}
+
+sub windowsSourceDir()
+{
+ determineWindowsSourceDir();
+ return $windowsSourceDir;
+}
+
+sub windowsLibrariesDir()
+{
+ return windowsSourceDir() . "\\WebKitLibraries\\win";
+}
+
+sub windowsOutputDir()
+{
+ return windowsSourceDir() . "\\WebKitBuild";
+}
+
+sub setupAppleWinEnv()
+{
+ return unless isAppleWinWebKit();
+
+ if (isWindowsNT()) {
+ my $restartNeeded = 0;
+ my %variablesToSet = ();
+
+ # Setting the environment variable 'CYGWIN' to 'tty' makes cygwin enable extra support (i.e., termios)
+ # for UNIX-like ttys in the Windows console
+ $variablesToSet{CYGWIN} = "tty" unless $ENV{CYGWIN};
+
+ # Those environment variables must be set to be able to build inside Visual Studio.
+ $variablesToSet{WEBKITLIBRARIESDIR} = windowsLibrariesDir() unless $ENV{WEBKITLIBRARIESDIR};
+ $variablesToSet{WEBKITOUTPUTDIR} = windowsOutputDir() unless $ENV{WEBKITOUTPUTDIR};
+
+ foreach my $variable (keys %variablesToSet) {
+ print "Setting the Environment Variable '" . $variable . "' to '" . $variablesToSet{$variable} . "'\n\n";
+ system qw(regtool -s set), '\\HKEY_CURRENT_USER\\Environment\\' . $variable, $variablesToSet{$variable};
+ $restartNeeded ||= $variable eq "WEBKITLIBRARIESDIR" || $variable eq "WEBKITOUTPUTDIR";
+ }
+
+ if ($restartNeeded) {
+ print "Please restart your computer before attempting to build inside Visual Studio.\n\n";
+ }
+ } else {
+ if (!$ENV{'WEBKITLIBRARIESDIR'}) {
+ print "Warning: You must set the 'WebKitLibrariesDir' environment variable\n";
+ print " to be able build WebKit from within Visual Studio.\n";
+ print " Make sure that 'WebKitLibrariesDir' points to the\n";
+ print " 'WebKitLibraries/win' directory, not the 'WebKitLibraries/' directory.\n\n";
+ }
+ if (!$ENV{'WEBKITOUTPUTDIR'}) {
+ print "Warning: You must set the 'WebKitOutputDir' environment variable\n";
+ print " to be able build WebKit from within Visual Studio.\n\n";
+ }
+ }
+}
+
sub setupCygwinEnv()
{
return if !isCygwin();
@@ -1119,9 +1199,6 @@ sub buildVisualStudioProject
}
my $useenv = "/useenv";
- if (isChromium()) {
- $useenv = "";
- }
my @command = ($vcBuildPath, $useenv, $winProjectPath, $action, $config);
@@ -1393,6 +1470,46 @@ sub buildGtkProject($$@)
return buildAutotoolsProject($clean, @buildArgs);
}
+sub buildChromiumMakefile($$$)
+{
+ my ($dir, $target, $clean) = @_;
+ chdir $dir;
+ if ($clean) {
+ return system qw(rm -rf out);
+ }
+ my $config = configuration();
+ my @command = ("make", "-j4", "BUILDTYPE=$config", $target);
+ print join(" ", @command) . "\n";
+ return system @command;
+}
+
+sub buildChromiumVisualStudioProject($$)
+{
+ my ($projectPath, $clean) = @_;
+
+ my $config = configuration();
+ my $action = "/build";
+ $action = "/clean" if $clean;
+
+ # Find Visual Studio installation.
+ my $vsInstallDir;
+ my $programFilesPath = $ENV{'PROGRAMFILES'} || "C:\\Program Files";
+ if ($ENV{'VSINSTALLDIR'}) {
+ $vsInstallDir = $ENV{'VSINSTALLDIR'};
+ } else {
+ $vsInstallDir = "$programFilesPath/Microsoft Visual Studio 8";
+ }
+ $vsInstallDir = `cygpath "$vsInstallDir"` if isCygwin();
+ chomp $vsInstallDir;
+ $vcBuildPath = "$vsInstallDir/Common7/IDE/devenv.com";
+
+ # Create command line and execute it.
+ my @command = ($vcBuildPath, $projectPath, $action, $config);
+ print "Building results into: ", baseProductDir(), "\n";
+ print join(" ", @command), "\n";
+ return system @command;
+}
+
sub buildChromium($@)
{
my ($clean, @options) = @_;
@@ -1400,30 +1517,38 @@ sub buildChromium($@)
my $result = 1;
if (isDarwin()) {
# Mac build - builds the root xcode project.
- $result = buildXCodeProject("WebKit/chromium/webkit",
- $clean,
- (@options));
- } elsif (isCygwin()) {
+ $result = buildXCodeProject("WebKit/chromium/WebKit", $clean, (@options));
+ } elsif (isCygwin() || isWindows()) {
# Windows build - builds the root visual studio solution.
- $result = buildVisualStudioProject("WebKit/chromium/webkit.sln",
- $clean);
+ $result = buildChromiumVisualStudioProject("WebKit/chromium/WebKit.sln", $clean);
} elsif (isLinux()) {
- # Linux build
- # FIXME support linux.
- print STDERR "Linux build is not supported. Yet.";
+ # Linux build - build using make.
+ $ result = buildChromiumMakefile("WebKit/chromium/", "webkit", $clean);
} else {
- print STDERR "This platform is not supported by chromium.";
+ print STDERR "This platform is not supported by chromium.\n";
}
return $result;
}
+sub appleApplicationSupportPath
+{
+ open INSTALL_DIR, "</proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Apple\ Inc./Apple\ Application\ Support/InstallDir";
+ my $path = <INSTALL_DIR>;
+ $path =~ s/[\r\n\x00].*//;
+ close INSTALL_DIR;
+
+ my $unixPath = `cygpath -u '$path'`;
+ chomp $unixPath;
+ return $unixPath;
+}
+
sub setPathForRunningWebKitApp
{
my ($env) = @_;
return unless isAppleWinWebKit();
- $env->{PATH} = join(':', productDir(), dirname(installedSafariPath()), $env->{PATH} || "");
+ $env->{PATH} = join(':', productDir(), dirname(installedSafariPath()), appleApplicationSupportPath(), $env->{PATH} || "");
}
sub exitStatus($)