diff options
author | Steve Block <steveblock@google.com> | 2010-02-02 14:57:50 +0000 |
---|---|---|
committer | Steve Block <steveblock@google.com> | 2010-02-04 15:06:55 +0000 |
commit | d0825bca7fe65beaee391d30da42e937db621564 (patch) | |
tree | 7461c49eb5844ffd1f35d1ba2c8b7584c1620823 /WebKitTools/Scripts | |
parent | 3db770bd97c5a59b6c7574ca80a39e5a51c1defd (diff) | |
download | external_webkit-d0825bca7fe65beaee391d30da42e937db621564.zip external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.gz external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.bz2 |
Merge webkit.org at r54127 : Initial merge by git
Change-Id: Ib661abb595522f50ea406f72d3a0ce17f7193c82
Diffstat (limited to 'WebKitTools/Scripts')
229 files changed, 19533 insertions, 4483 deletions
diff --git a/WebKitTools/Scripts/VCSUtils.pm b/WebKitTools/Scripts/VCSUtils.pm index 7638102..022c72a 100644 --- a/WebKitTools/Scripts/VCSUtils.pm +++ b/WebKitTools/Scripts/VCSUtils.pm @@ -1,4 +1,5 @@ # Copyright (C) 2007, 2008, 2009 Apple Inc. All rights reserved. +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -31,8 +32,10 @@ use strict; use warnings; use Cwd qw(); # "qw()" prevents warnings about redefining getcwd() with "use POSIX;" +use English; # for $POSTMATCH, etc. use File::Basename; use File::Spec; +use POSIX; BEGIN { use Exporter (); @@ -47,6 +50,7 @@ BEGIN { &decodeGitBinaryPatch &determineSVNRoot &determineVCSRoot + &exitStatus &fixChangeLogPatch &gitBranch &gitdiff2svndiff @@ -58,7 +62,9 @@ BEGIN { &isSVNVersion16OrNewer &makeFilePathRelative &normalizePath + &parsePatch &pathRelativeToSVNRepositoryRootForPath + &runPatchCommand &svnRevisionForDirectory &svnStatus ); @@ -75,6 +81,20 @@ my $isGitBranchBuild; my $isSVN; my $svnVersion; +# This method is for portability. Return the system-appropriate exit +# status of a child process. +# +# Args: pass the child error status returned by the last pipe close, +# for example "$?". +sub exitStatus($) +{ + my ($returnvalue) = @_; + if ($^O eq "MSWin32") { + return $returnvalue >> 8; + } + return WEXITSTATUS($returnvalue); +} + sub isGitDirectory($) { my ($dir) = @_; @@ -93,7 +113,7 @@ sub gitBranch() { unless (defined $gitBranch) { chomp($gitBranch = `git symbolic-ref -q HEAD`); - $gitBranch = "" if main::exitStatus($?); # FIXME: exitStatus is defined in webkitdirs.pm + $gitBranch = "" if exitStatus($?); $gitBranch =~ s#^refs/heads/##; $gitBranch = "" if $gitBranch eq "master"; } @@ -341,79 +361,458 @@ sub svnStatus($) return $svnStatus; } +# Convert a line of a git-formatted patch to SVN format, while +# preserving any end-of-line characters. sub gitdiff2svndiff($) { $_ = shift @_; - if (m#^diff --git a/(.+) b/(.+)#) { - return "Index: $1"; - } elsif (m#^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}#) { - return "==================================================================="; - } elsif (m#^--- a/(.+)#) { - return "--- $1"; - } elsif (m#^\+\+\+ b/(.+)#) { - return "+++ $1"; + + if (m#^diff --git \w/(.+) \w/([^\r\n]+)#) { + return "Index: $1$POSTMATCH"; + } + if (m#^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}#) { + # FIXME: No need to return dividing line once parseDiffHeader() is used. + return "===================================================================$POSTMATCH"; + } + if (m#^--- \w/([^\r\n]+)#) { + return "--- $1$POSTMATCH"; + } + if (m#^\+\+\+ \w/([^\r\n]+)#) { + return "+++ $1$POSTMATCH"; } return $_; } -# The diff(1) command is greedy when matching lines, so a new ChangeLog entry will -# have lines of context at the top of a patch when the existing entry has the same -# date and author as the new entry. Alter the ChangeLog patch so -# that the added lines ("+") in the patch always start at the beginning of the -# patch and there are no initial lines of context. +# Parse the next diff header from the given file handle, and advance +# the file handle so the last line read is the first line after the +# parsed header block. +# +# This subroutine dies if given leading junk or if the end of the header +# block could not be detected. The last line of a header block is a +# line beginning with "+++". +# +# Args: +# $fileHandle: advanced so the last line read is the first line of the +# next diff header. For SVN-formatted diffs, this is the +# "Index:" line. +# $line: the line last read from $fileHandle +# +# Returns ($headerHashRef, $lastReadLine): +# $headerHashRef: a hash reference representing a diff header +# copiedFromPath: if a file copy, the path from which the file was +# copied. Otherwise, undefined. +# indexPath: the path in the "Index:" line. +# sourceRevision: the revision number of the source. This is the same +# as the revision number the file was copied from, in +# the case of a file copy. +# svnConvertedText: the header text converted to SVN format. +# Unrecognized lines are discarded. +# $lastReadLine: the line last read from $fileHandle. This is the first +# line after the header ending. +sub parseDiffHeader($$) +{ + my ($fileHandle, $line) = @_; + + my $filter; + if ($line =~ m#^diff --git #) { + $filter = \&gitdiff2svndiff; + } + $line = &$filter($line) if $filter; + + my $indexPath; + if ($line =~ /^Index: ([^\r\n]+)/) { + $indexPath = $1; + } else { + die("Could not parse first line of diff header: \"$line\"."); + } + + my %header; + + my $foundHeaderEnding; + my $lastReadLine; + my $sourceRevision; + my $svnConvertedText = $line; + while (<$fileHandle>) { + # Temporarily strip off any end-of-line characters to simplify + # regex matching below. + s/([\n\r]+)$//; + my $eol = $1; + + $_ = &$filter($_) if $filter; + + # Fix paths on ""---" and "+++" lines to match the leading + # index line. + if (s/^--- \S+/--- $indexPath/) { + # --- + if (/^--- .+\(revision (\d+)\)/) { + $sourceRevision = $1 if ($1 != 0); + if (/\(from (\S+):(\d+)\)$/) { + # The "from" clause is created by svn-create-patch, in + # which case there is always also a "revision" clause. + $header{copiedFromPath} = $1; + die("Revision number \"$2\" in \"from\" clause does not match " . + "source revision number \"$sourceRevision\".") if ($2 != $sourceRevision); + } + } + $_ = "=" x 67 . "$eol$_"; # Prepend dividing line ===.... + } elsif (s/^\+\+\+ \S+/+++ $indexPath/) { + # +++ + $foundHeaderEnding = 1; + } else { + # Skip unrecognized lines. + next; + } + + $svnConvertedText .= "$_$eol"; # Also restore end-of-line characters. + if ($foundHeaderEnding) { + $lastReadLine = <$fileHandle>; + last; + } + } # $lastReadLine is undef if while loop ran out. + + if (!$foundHeaderEnding) { + die("Did not find end of header block corresponding to index path \"$indexPath\"."); + } + + $header{indexPath} = $indexPath; + $header{sourceRevision} = $sourceRevision; + $header{svnConvertedText} = $svnConvertedText; + + return (\%header, $lastReadLine); +} + +# Parse one diff from a patch file created by svn-create-patch, and +# advance the file handle so the last line read is the first line +# of the next header block. +# +# This subroutine preserves any leading junk encountered before the header. +# +# Args: +# $fileHandle: a file handle advanced to the first line of the next +# header block. Leading junk is okay. +# $line: the line last read from $fileHandle. +# +# Returns ($diffHashRef, $lastReadLine): +# $diffHashRef: +# copiedFromPath: if a file copy, the path from which the file was +# copied. Otherwise, undefined. +# indexPath: the path in the "Index:" line. +# sourceRevision: the revision number of the source. This is the same +# as the revision number the file was copied from, in +# the case of a file copy. +# svnConvertedText: the diff converted to SVN format. +# $lastReadLine: the line last read from $fileHandle +sub parseDiff($$) +{ + my ($fileHandle, $line) = @_; + + my $headerStartRegEx = qr#^Index: #; # SVN-style header for the default + my $gitHeaderStartRegEx = qr#^diff --git \w/#; + + my $headerHashRef; # Last header found, as returned by parseDiffHeader(). + my $svnText; + while (defined($line)) { + if (!$headerHashRef && ($line =~ $gitHeaderStartRegEx)) { + # Then assume all diffs in the patch are Git-formatted. This + # block was made to be enterable at most once since we assume + # all diffs in the patch are formatted the same (SVN or Git). + $headerStartRegEx = $gitHeaderStartRegEx; + } + + if ($line !~ $headerStartRegEx) { + # Then we are in the body of the diff. + $svnText .= $line; + $line = <$fileHandle>; + next; + } # Otherwise, we found a diff header. + + if ($headerHashRef) { + # Then this is the second diff header of this while loop. + last; + } + + ($headerHashRef, $line) = parseDiffHeader($fileHandle, $line); + + $svnText .= $headerHashRef->{svnConvertedText}; + } + + my %diffHashRef; + $diffHashRef{copiedFromPath} = $headerHashRef->{copiedFromPath}; + $diffHashRef{indexPath} = $headerHashRef->{indexPath}; + $diffHashRef{sourceRevision} = $headerHashRef->{sourceRevision}; + $diffHashRef{svnConvertedText} = $svnText; + + return (\%diffHashRef, $line); +} + +# Parse a patch file created by svn-create-patch. +# +# Args: +# $fileHandle: A file handle to the patch file that has not yet been +# read from. +# +# Returns: +# @diffHashRefs: an array of diff hash references. See parseDiff() for +# a description of each $diffHashRef. +sub parsePatch($) +{ + my ($fileHandle) = @_; + + my @diffHashRefs; # return value + + my $line = <$fileHandle>; + + while (defined($line)) { # Otherwise, at EOF. + + my $diffHashRef; + ($diffHashRef, $line) = parseDiff($fileHandle, $line); + + push @diffHashRefs, $diffHashRef; + } + + return @diffHashRefs; +} + +# If possible, returns a ChangeLog patch equivalent to the given one, +# but with the newest ChangeLog entry inserted at the top of the +# file -- i.e. no leading context and all lines starting with "+". +# +# If given a patch string not representable as a patch with the above +# properties, it returns the input back unchanged. +# +# WARNING: This subroutine can return an inequivalent patch string if +# both the beginning of the new ChangeLog file matches the beginning +# of the source ChangeLog, and the source beginning was modified. +# Otherwise, it is guaranteed to return an equivalent patch string, +# if it returns. +# +# Applying this subroutine to ChangeLog patches allows svn-apply to +# insert new ChangeLog entries at the top of the ChangeLog file. +# svn-apply uses patch with --fuzz=3 to do this. We need to apply +# this subroutine because the diff(1) command is greedy when matching +# lines. A new ChangeLog entry with the same date and author as the +# previous will match and cause the diff to have lines of starting +# context. +# +# This subroutine has unit tests in VCSUtils_unittest.pl. sub fixChangeLogPatch($) { my $patch = shift; # $patch will only contain patch fragments for ChangeLog. $patch =~ /(\r?\n)/; my $lineEnding = $1; - my @patchLines = split(/$lineEnding/, $patch); - - # e.g. 2009-06-03 Eric Seidel <eric@webkit.org> - my $dateLineRegexpString = '^\+(\d{4}-\d{2}-\d{2})' # Consume the leading '+' and the date. - . '\s+(.+)\s+' # Consume the name. - . '<([^<>]+)>$'; # And finally the email address. - - # Figure out where the patch contents start and stop. - my $patchHeaderIndex; - my $firstContentIndex; - my $trailingContextIndex; - my $dateIndex; - my $patchEndIndex = scalar(@patchLines); - for (my $index = 0; $index < @patchLines; ++$index) { - my $line = $patchLines[$index]; - if ($line =~ /^\@\@ -\d+,\d+ \+\d+,\d+ \@\@$/) { # e.g. @@ -1,5 +1,18 @@ - if ($patchHeaderIndex) { - $patchEndIndex = $index; # We only bother to fix up the first patch fragment. - last; - } - $patchHeaderIndex = $index; + my @lines = split(/$lineEnding/, $patch); + + my $i = 0; # We reuse the same index throughout. + + # Skip to beginning of first chunk. + for (; $i < @lines; ++$i) { + if (substr($lines[$i], 0, 1) eq "@") { + last; } - $firstContentIndex = $index if ($patchHeaderIndex && !$firstContentIndex && $line =~ /^\+[^+]/); # Only match after finding patchHeaderIndex, otherwise we'd match "+++". - $dateIndex = $index if ($line =~ /$dateLineRegexpString/); - $trailingContextIndex = $index if ($firstContentIndex && !$trailingContextIndex && $line =~ /^ /); } - my $contentLineCount = $trailingContextIndex - $firstContentIndex; - my $trailingContextLineCount = $patchEndIndex - $trailingContextIndex; + my $chunkStartIndex = ++$i; + + # Optimization: do not process if new lines already begin the chunk. + if (substr($lines[$i], 0, 1) eq "+") { + return $patch; + } + + # Skip to first line of newly added ChangeLog entry. + # For example, +2009-06-03 Eric Seidel <eric@webkit.org> + my $dateStartRegEx = '^\+(\d{4}-\d{2}-\d{2})' # leading "+" and date + . '\s+(.+)\s+' # name + . '<([^<>]+)>$'; # e-mail address + + for (; $i < @lines; ++$i) { + my $line = $lines[$i]; + my $firstChar = substr($line, 0, 1); + if ($line =~ /$dateStartRegEx/) { + last; + } elsif ($firstChar eq " " or $firstChar eq "+") { + next; + } + return $patch; # Do not change if, for example, "-" or "@" found. + } + if ($i >= @lines) { + return $patch; # Do not change if date not found. + } + my $dateStartIndex = $i; + + # Rewrite overlapping lines to lead with " ". + my @overlappingLines = (); # These will include a leading "+". + for (; $i < @lines; ++$i) { + my $line = $lines[$i]; + if (substr($line, 0, 1) ne "+") { + last; + } + push(@overlappingLines, $line); + $lines[$i] = " " . substr($line, 1); + } + + # Remove excess ending context, if necessary. + my $shouldTrimContext = 1; + for (; $i < @lines; ++$i) { + my $firstChar = substr($lines[$i], 0, 1); + if ($firstChar eq " ") { + next; + } elsif ($firstChar eq "@") { + last; + } + $shouldTrimContext = 0; # For example, if "+" or "-" encountered. + last; + } + my $deletedLineCount = 0; + if ($shouldTrimContext) { # Also occurs if end of file reached. + splice(@lines, $i - @overlappingLines, @overlappingLines); + $deletedLineCount = @overlappingLines; + } + + # Work backwards, shifting overlapping lines towards front + # while checking that patch stays equivalent. + for ($i = $dateStartIndex - 1; $i >= $chunkStartIndex; --$i) { + my $line = $lines[$i]; + if (substr($line, 0, 1) ne " ") { + next; + } + my $text = substr($line, 1); + my $newLine = pop(@overlappingLines); + if ($text ne substr($newLine, 1)) { + return $patch; # Unexpected difference. + } + $lines[$i] = "+$text"; + } + + # Finish moving whatever overlapping lines remain, and update + # the initial chunk range. + my $chunkRangeRegEx = '^\@\@ -(\d+),(\d+) \+\d+,(\d+) \@\@$'; # e.g. @@ -2,6 +2,18 @@ + if ($lines[$chunkStartIndex - 1] !~ /$chunkRangeRegEx/) { + # FIXME: Handle errors differently from ChangeLog files that + # are okay but should not be altered. That way we can find out + # if improvements to the script ever become necessary. + return $patch; # Error: unexpected patch string format. + } + my $skippedFirstLineCount = $1 - 1; + my $oldSourceLineCount = $2; + my $oldTargetLineCount = $3; + + if (@overlappingLines != $skippedFirstLineCount) { + # This can happen, for example, when deliberately inserting + # a new ChangeLog entry earlier in the file. + return $patch; + } + # If @overlappingLines > 0, this is where we make use of the + # assumption that the beginning of the source file was not modified. + splice(@lines, $chunkStartIndex, 0, @overlappingLines); + + my $sourceLineCount = $oldSourceLineCount + @overlappingLines - $deletedLineCount; + my $targetLineCount = $oldTargetLineCount + @overlappingLines - $deletedLineCount; + $lines[$chunkStartIndex - 1] = "@@ -1,$sourceLineCount +1,$targetLineCount @@"; + + return join($lineEnding, @lines) . "\n"; # patch(1) expects an extra trailing newline. +} + +# This is a supporting method for runPatchCommand. +# +# Arg: the optional $args parameter passed to runPatchCommand (can be undefined). +# +# Returns ($patchCommand, $isForcing). +# +# This subroutine has unit tests in VCSUtils_unittest.pl. +sub generatePatchCommand($) +{ + my ($passedArgsHashRef) = @_; + + my $argsHashRef = { # Defaults + ensureForce => 0, + shouldReverse => 0, + options => [] + }; + + # Merges hash references. It's okay here if passed hash reference is undefined. + @{$argsHashRef}{keys %{$passedArgsHashRef}} = values %{$passedArgsHashRef}; + + my $ensureForce = $argsHashRef->{ensureForce}; + my $shouldReverse = $argsHashRef->{shouldReverse}; + my $options = $argsHashRef->{options}; + + if (! $options) { + $options = []; + } else { + $options = [@{$options}]; # Copy to avoid side effects. + } + + my $isForcing = 0; + if (grep /^--force$/, @{$options}) { + $isForcing = 1; + } elsif ($ensureForce) { + push @{$options}, "--force"; + $isForcing = 1; + } + + if ($shouldReverse) { # No check: --reverse should never be passed explicitly. + push @{$options}, "--reverse"; + } + + @{$options} = sort(@{$options}); # For easier testing. + + my $patchCommand = join(" ", "patch -p0", @{$options}); + + return ($patchCommand, $isForcing); +} + +# Apply the given patch using the patch(1) command. +# +# On success, return the resulting exit status. Otherwise, exit with the +# exit status. If "--force" is passed as an option, however, then never +# exit and always return the exit status. +# +# Args: +# $patch: a patch string. +# $repositoryRootPath: an absolute path to the repository root. +# $pathRelativeToRoot: the path of the file to be patched, relative to the +# repository root. This should normally be the path +# found in the patch's "Index:" line. It is passed +# explicitly rather than reparsed from the patch +# string for optimization purposes. +# This is used only for error reporting. The +# patch command gleans the actual file to patch +# from the patch string. +# $args: a reference to a hash of optional arguments. The possible +# keys are -- +# ensureForce: whether to ensure --force is passed (defaults to 0). +# shouldReverse: whether to pass --reverse (defaults to 0). +# options: a reference to an array of options to pass to the +# patch command. The subroutine passes the -p0 option +# no matter what. This should not include --reverse. +# +# This subroutine has unit tests in VCSUtils_unittest.pl. +sub runPatchCommand($$$;$) +{ + my ($patch, $repositoryRootPath, $pathRelativeToRoot, $args) = @_; + + my ($patchCommand, $isForcing) = generatePatchCommand($args); + + # Temporarily change the working directory since the path found + # in the patch's "Index:" line is relative to the repository root + # (i.e. the same as $pathRelativeToRoot). + my $cwd = Cwd::getcwd(); + chdir $repositoryRootPath; - # If we didn't find a date line in the content then this is not a patch we should try and fix. - return $patch if (!$dateIndex); + open PATCH, "| $patchCommand" or die "Could not call \"$patchCommand\" for file \"$pathRelativeToRoot\": $!"; + print PATCH $patch; + close PATCH; + my $exitStatus = exitStatus($?); - # We only need to do anything if the date line is not the first content line. - return $patch if ($dateIndex == $firstContentIndex); + chdir $cwd; - # Write the new patch. - my $totalNewContentLines = $contentLineCount + $trailingContextLineCount; - $patchLines[$patchHeaderIndex] = "@@ -1,$trailingContextLineCount +1,$totalNewContentLines @@"; # Write a new header. - my @repeatedLines = splice(@patchLines, $dateIndex, $trailingContextIndex - $dateIndex); # The date line and all the content after it that diff saw as repeated. - splice(@patchLines, $firstContentIndex, 0, @repeatedLines); # Move the repeated content to the top. - foreach my $line (@repeatedLines) { - $line =~ s/^\+/ /; + if ($exitStatus && !$isForcing) { + print "Calling \"$patchCommand\" for file \"$pathRelativeToRoot\" returned " . + "status $exitStatus. Pass --force to ignore patch failures.\n"; + exit $exitStatus; } - splice(@patchLines, $trailingContextIndex, $patchEndIndex, @repeatedLines); # Replace trailing context with the repeated content. - splice(@patchLines, $patchHeaderIndex + 1, $firstContentIndex - $patchHeaderIndex - 1); # Remove any leading context. - return join($lineEnding, @patchLines) . "\n"; # patch(1) expects an extra trailing newline. + return $exitStatus; } sub gitConfig($) diff --git a/WebKitTools/Scripts/build-webkit b/WebKitTools/Scripts/build-webkit index 566965b..8171fba 100755 --- a/WebKitTools/Scripts/build-webkit +++ b/WebKitTools/Scripts/build-webkit @@ -50,8 +50,8 @@ my $minimal = 0; my $makeArgs; my $startTime = time(); -my ($threeDCanvasSupport, $threeDRenderingSupport, $channelMessagingSupport, $databaseSupport, $datagridSupport, $datalistSupport, - $domStorageSupport, $eventsourceSupport, $filtersSupport, $geolocationSupport, $iconDatabaseSupport, +my ($threeDCanvasSupport, $threeDRenderingSupport, $channelMessagingSupport, $clientBasedGeolocationSupport, $databaseSupport, $datagridSupport, $datalistSupport, + $domStorageSupport, $eventsourceSupport, $filtersSupport, $geolocationSupport, $iconDatabaseSupport, $indexedDatabaseSupport, $javaScriptDebuggerSupport, $mathmlSupport, $offlineWebApplicationSupport, $sharedWorkersSupport, $svgSupport, $svgAnimationSupport, $svgAsImageSupport, $svgDOMObjCBindingsSupport, $svgFontsSupport, $svgForeignObjectSupport, $svgUseSupport, $videoSupport, $webSocketsSupport, $wmlSupport, $wcssSupport, $xhtmlmpSupport, $workersSupport, @@ -67,6 +67,9 @@ my @features = ( { option => "channel-messaging", desc => "Toggle MessageChannel and MessagePort support", define => "ENABLE_CHANNEL_MESSAGING", default => 1, value => \$channelMessagingSupport }, + { option => "client-based-geolocation", desc => "Toggle client-based Geolocation support", + define => "ENABLE_CLIENT_BASED_GEOLOCATION", default => isAppleWebKit(), value => \$clientBasedGeolocationSupport }, + { option => "coverage", desc => "Toggle code coverage support", define => "", default => 0, value => \$coverageSupport }, @@ -74,7 +77,7 @@ my @features = ( define => "ENABLE_DATABASE", default => 1, value => \$databaseSupport }, { option => "datagrid", desc => "Toggle Datagrid Support", - define => "ENABLE_DATAGRID", default => 1, value => \$datagridSupport }, + define => "ENABLE_DATAGRID", default => 0, value => \$datagridSupport }, { option => "datalist", desc => "Toggle HTML5 datalist support", define => "ENABLE_DATALIST", default => 1, value => \$datalistSupport }, @@ -89,11 +92,14 @@ my @features = ( define => "ENABLE_FILTERS", default => (isAppleWebKit() || isGtk() || isQt()), value => \$filtersSupport }, { option => "geolocation", desc => "Toggle Geolocation support", - define => "ENABLE_GEOLOCATION", default => isGtk(), value => \$geolocationSupport }, + define => "ENABLE_GEOLOCATION", default => (isAppleWebKit() || isGtk()), value => \$geolocationSupport }, { option => "icon-database", desc => "Toggle Icon database support", define => "ENABLE_ICONDATABASE", default => 1, value => \$iconDatabaseSupport }, + { option => "indexed-database", desc => "Toggle Indexed Database API support", + define => "ENABLE_INDEXED_DATABASE", default => 0, value => \$indexedDatabaseSupport }, + { option => "javascript-debugger", desc => "Toggle JavaScript Debugger/Profiler support", define => "ENABLE_JAVASCRIPT_DEBUGGER", default => 1, value => \$javaScriptDebuggerSupport }, @@ -191,6 +197,7 @@ Usage: $programName [options] [options to pass to build system] --chromium Build the Chromium port on Mac/Win/Linux --gtk Build the GTK+ port --qt Build the Qt port + --inspector-frontend Copy changes to the inspector front-end files to the build directory --makeargs=<arguments> Optional Makefile flags @@ -311,6 +318,10 @@ if (isGtk()) { # Force re-link of existing libraries if different than expected removeLibraryDependingOnSVG("WebCore", $svgSupport); +if (isInspectorFrontend()) { + exit exitStatus(copyInspectorFrontendFiles()); +} + if (isWx()) { downloadWafIfNeeded(); push @projects, 'WebKitTools/DumpRenderTree'; diff --git a/WebKitTools/Scripts/check-webkit-style b/WebKitTools/Scripts/check-webkit-style index 5709cf0..501264b 100755 --- a/WebKitTools/Scripts/check-webkit-style +++ b/WebKitTools/Scripts/check-webkit-style @@ -1,6 +1,7 @@ #!/usr/bin/env python # # Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -28,105 +29,28 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Script to run the linter for source code of WebKit.""" +"""Does WebKit-lint on C/C++ or text files. + +The goal of this script is to identify places in the code that *may* +be in non-compliance with WebKit style. It does not attempt to fix +up these problems -- the point is to educate. It does also not +attempt to find all problems, or to ensure that everything it does +find is legitimately a problem. + +In particular, we can get very confused by /* and // inside strings! +We do a small hack, which is to ignore //'s with "'s after them on the +same line, but it is far from perfect (in either direction). +""" import codecs import os import os.path import sys -import modules.cpp_style as cpp_style -from modules.diff_parser import DiffParser -from modules.scm import detect_scm_system - - -# Override the usage of the lint tool. -cpp_style._USAGE = """ -Syntax: %(program_name)s [--verbose=#] [--git-commit=<SingleCommit>] [--output=vs7] - [--filter=-x,+y,...] [file] ... - - The style guidelines this tries to follow are those in - http://webkit.org/coding/coding-style.html - - Every problem is given a confidence score from 1-5, with 5 meaning we are - certain of the problem, and 1 meaning it could be a legitimate construct. - This will miss some errors, and is not a substitute for a code review. - - To prevent specific lines from being linted, add a '// NOLINT' comment to the - end of the line. - - Linted extensions are .cpp, .c and .h. Other file types will be ignored. - - The file parameter is optional and multiple files to scan be passed in. - Leaving out the file parameter will apply the check to the files changed - according to the scm system. - - Flags: - - verbose=# - Specify a number 0-5 to restrict errors to certain verbosity levels. - - git-commit=<SingleCommit> - Checks the style of everything from the given commit to the local tree. - - output=vs7 - By default, the output is formatted to ease emacs parsing. Visual Studio - compatible output (vs7) may also be used. Other formats are unsupported. - - filter=-x,+y,... - Specify a comma-separated list of category-filters to apply: only - error messages whose category names pass the filters will be printed. - (Category names are printed with the message and look like - "[whitespace/indent]".) Filters are evaluated left to right. - "-FOO" and "FOO" means "do not print categories that start with FOO". - "+FOO" means "do print categories that start with FOO". - - Examples: --filter=-whitespace,+whitespace/braces - --filter=whitespace,runtime/printf,+runtime/printf_format - --filter=-,+build/include_what_you_use - - To see a list of all the categories used in %(program_name)s, pass no arg: - --filter= -""" % {'program_name': os.path.basename(sys.argv[0])} - - -def process_patch(patch_string): - """Does lint on a single patch. - - Args: - patch_string: A string of a patch. - """ - patch = DiffParser(patch_string.splitlines()) - for filename, diff in patch.files.iteritems(): - file_extension = os.path.splitext(filename)[1] - - if file_extension in ['.cpp', '.c', '.h']: - line_numbers = set() - - def error_for_patch(filename, line_number, category, confidence, message): - """Wrapper function of cpp_style.error for patches. - - This function outputs errors only if the line number - corresponds to lines which are modified or added. - """ - if not line_numbers: - for line in diff.lines: - # When deleted line is not set, it means that - # the line is newly added. - if not line[0]: - line_numbers.add(line[1]) - - if line_number in line_numbers: - cpp_style.error(filename, line_number, category, confidence, message) - - cpp_style.process_file(filename, error=error_for_patch) - +import webkitpy.style.checker as checker +from webkitpy.style_references import SimpleScm def main(): - cpp_style.use_webkit_styles() - - (files, flags) = cpp_style.parse_arguments(sys.argv[1:], ["git-commit="]) - # Change stderr to write with replacement characters so we don't die # if we try to print something containing non-ASCII characters. sys.stderr = codecs.StreamReaderWriter(sys.stderr, @@ -134,32 +58,37 @@ def main(): codecs.getwriter('utf8'), 'replace') - if files and "--git-commit" in flags: - sys.stderr.write("ERROR: It is not possible to check files " - "and a specific commit at the same time.\n" + cpp_style._USAGE) - sys.exit(1) + defaults = checker.webkit_argument_defaults() + + parser = checker.ArgumentParser(defaults) + (files, options) = parser.parse(sys.argv[1:]) + + style_checker = checker.StyleChecker(options) if files: for filename in files: - cpp_style.process_file(filename) + style_checker.check_file(filename) else: - cwd = os.path.abspath('.') - scm = detect_scm_system(cwd) + scm = SimpleScm() + + os.chdir(scm.checkout_root()) - if "--git-commit" in flags: - commit = flags["--git-commit"] + if options.git_commit: + commit = options.git_commit if '..' in commit: # FIXME: If the range is a "...", the code should find the common ancestor and # start there (see git diff --help for information about how ... usually works). commit = commit[:commit.find('..')] print >> sys.stderr, "Warning: Ranges are not supported for --git-commit. Checking all changes since %s.\n" % commit - process_patch(scm.create_patch_since_local_commit(commit)) + patch = scm.create_patch_since_local_commit(commit) else: - process_patch(scm.create_patch()) + patch = scm.create_patch() + style_checker.check_patch(patch) - sys.stderr.write('Total errors found: %d\n' % cpp_style.error_count()) - sys.exit(cpp_style.error_count() > 0) + error_count = style_checker.error_count + sys.stderr.write('Total errors found: %d\n' % error_count) + sys.exit(error_count > 0) if __name__ == "__main__": diff --git a/WebKitTools/Scripts/do-webcore-rename b/WebKitTools/Scripts/do-webcore-rename index a65fa4f..56d8bed 100755 --- a/WebKitTools/Scripts/do-webcore-rename +++ b/WebKitTools/Scripts/do-webcore-rename @@ -67,232 +67,14 @@ sub wanted push @paths, $File::Find::name; } -my $isDOMTypeRename = 1; +my $isDOMTypeRename = 0; my %renames = ( - "CanvasActiveInfo" => "WebGLActiveInfo", - "canvasActiveInfo" => "webGLActiveInfo", - "CanvasActiveInfoConstructor" => "WebGLActiveInfoConstructor", - "CanvasActiveInfoPrototype" => "WebGLActiveInfoPrototype", - "toCanvasActiveInfo" => "toWebGLActiveInfo", - "JSCanvasActiveInfo" => "JSWebGLActiveInfo", - "JSCanvasActiveInfoPrototype" => "JSWebGLActiveInfoPrototype", - "JSCanvasActiveInfoConstructor" => "JSWebGLActiveInfoConstructor", - "JSCanvasActiveInfoCustom" => "JSWebGLActiveInfoCustom", - "V8CanvasActiveInfo" => "V8WebGLActiveInfo", - "V8CanvasActiveInfoPrototype" => "V8WebGLActiveInfoPrototype", - "V8CanvasActiveInfoConstructor" => "V8WebGLActiveInfoConstructor", - "V8CanvasActiveInfoCustom" => "V8WebGLActiveInfoCustom", - "CanvasArray" => "WebGLArray", - "canvasArray" => "webGLArray", - "CanvasArrayConstructor" => "WebGLArrayConstructor", - "CanvasArrayPrototype" => "WebGLArrayPrototype", - "toCanvasArray" => "toWebGLArray", - "JSCanvasArray" => "JSWebGLArray", - "JSCanvasArrayPrototype" => "JSWebGLArrayPrototype", - "JSCanvasArrayConstructor" => "JSWebGLArrayConstructor", - "JSCanvasArrayCustom" => "JSWebGLArrayCustom", - "V8CanvasArray" => "V8WebGLArray", - "V8CanvasArrayPrototype" => "V8WebGLArrayPrototype", - "V8CanvasArrayConstructor" => "V8WebGLArrayConstructor", - "V8CanvasArrayCustom" => "V8WebGLArrayCustom", - "CanvasArrayBuffer" => "WebGLArrayBuffer", - "canvasArrayBuffer" => "webGLArrayBuffer", - "CanvasArrayBufferConstructor" => "WebGLArrayBufferConstructor", - "CanvasArrayBufferPrototype" => "WebGLArrayBufferPrototype", - "toCanvasArrayBuffer" => "toWebGLArrayBuffer", - "JSCanvasArrayBuffer" => "JSWebGLArrayBuffer", - "JSCanvasArrayBufferPrototype" => "JSWebGLArrayBufferPrototype", - "JSCanvasArrayBufferConstructor" => "JSWebGLArrayBufferConstructor", - "JSCanvasArrayBufferCustom" => "JSWebGLArrayBufferCustom", - "V8CanvasArrayBuffer" => "V8WebGLArrayBuffer", - "V8CanvasArrayBufferPrototype" => "V8WebGLArrayBufferPrototype", - "V8CanvasArrayBufferConstructor" => "V8WebGLArrayBufferConstructor", - "V8CanvasArrayBufferCustom" => "V8WebGLArrayBufferCustom", - "CanvasBuffer" => "WebGLBuffer", - "canvasBuffer" => "webGLBuffer", - "CanvasBufferConstructor" => "WebGLBufferConstructor", - "CanvasBufferPrototype" => "WebGLBufferPrototype", - "toCanvasBuffer" => "toWebGLBuffer", - "JSCanvasBuffer" => "JSWebGLBuffer", - "JSCanvasBufferPrototype" => "JSWebGLBufferPrototype", - "JSCanvasBufferConstructor" => "JSWebGLBufferConstructor", - "JSCanvasBufferCustom" => "JSWebGLBufferCustom", - "V8CanvasBuffer" => "V8WebGLBuffer", - "V8CanvasBufferPrototype" => "V8WebGLBufferPrototype", - "V8CanvasBufferConstructor" => "V8WebGLBufferConstructor", - "V8CanvasBufferCustom" => "V8WebGLBufferCustom", - "CanvasByteArray" => "WebGLByteArray", - "canvasByteArray" => "webGLByteArray", - "CanvasByteArrayConstructor" => "WebGLByteArrayConstructor", - "CanvasByteArrayPrototype" => "WebGLByteArrayPrototype", - "toCanvasByteArray" => "toWebGLByteArray", - "JSCanvasByteArray" => "JSWebGLByteArray", - "JSCanvasByteArrayPrototype" => "JSWebGLByteArrayPrototype", - "JSCanvasByteArrayConstructor" => "JSWebGLByteArrayConstructor", - "JSCanvasByteArrayCustom" => "JSWebGLByteArrayCustom", - "V8CanvasByteArray" => "V8WebGLByteArray", - "V8CanvasByteArrayPrototype" => "V8WebGLByteArrayPrototype", - "V8CanvasByteArrayConstructor" => "V8WebGLByteArrayConstructor", - "V8CanvasByteArrayCustom" => "V8WebGLByteArrayCustom", - "CanvasFloatArray" => "WebGLFloatArray", - "canvasFloatArray" => "webGLFloatArray", - "CanvasFloatArrayConstructor" => "WebGLFloatArrayConstructor", - "CanvasFloatArrayPrototype" => "WebGLFloatArrayPrototype", - "toCanvasFloatArray" => "toWebGLFloatArray", - "JSCanvasFloatArray" => "JSWebGLFloatArray", - "JSCanvasFloatArrayPrototype" => "JSWebGLFloatArrayPrototype", - "JSCanvasFloatArrayConstructor" => "JSWebGLFloatArrayConstructor", - "JSCanvasFloatArrayCustom" => "JSWebGLFloatArrayCustom", - "V8CanvasFloatArray" => "V8WebGLFloatArray", - "V8CanvasFloatArrayPrototype" => "V8WebGLFloatArrayPrototype", - "V8CanvasFloatArrayConstructor" => "V8WebGLFloatArrayConstructor", - "V8CanvasFloatArrayCustom" => "V8WebGLFloatArrayCustom", - "CanvasFramebuffer" => "WebGLFramebuffer", - "canvasFramebuffer" => "webGLFramebuffer", - "CanvasFramebufferConstructor" => "WebGLFramebufferConstructor", - "CanvasFramebufferPrototype" => "WebGLFramebufferPrototype", - "toCanvasFramebuffer" => "toWebGLFramebuffer", - "JSCanvasFramebuffer" => "JSWebGLFramebuffer", - "JSCanvasFramebufferPrototype" => "JSWebGLFramebufferPrototype", - "JSCanvasFramebufferConstructor" => "JSWebGLFramebufferConstructor", - "JSCanvasFramebufferCustom" => "JSWebGLFramebufferCustom", - "V8CanvasFramebuffer" => "V8WebGLFramebuffer", - "V8CanvasFramebufferPrototype" => "V8WebGLFramebufferPrototype", - "V8CanvasFramebufferConstructor" => "V8WebGLFramebufferConstructor", - "V8CanvasFramebufferCustom" => "V8WebGLFramebufferCustom", - "CanvasIntArray" => "WebGLIntArray", - "canvasIntArray" => "webGLIntArray", - "CanvasIntArrayConstructor" => "WebGLIntArrayConstructor", - "CanvasIntArrayPrototype" => "WebGLIntArrayPrototype", - "toCanvasIntArray" => "toWebGLIntArray", - "JSCanvasIntArray" => "JSWebGLIntArray", - "JSCanvasIntArrayPrototype" => "JSWebGLIntArrayPrototype", - "JSCanvasIntArrayConstructor" => "JSWebGLIntArrayConstructor", - "JSCanvasIntArrayCustom" => "JSWebGLIntArrayCustom", - "V8CanvasIntArray" => "V8WebGLIntArray", - "V8CanvasIntArrayPrototype" => "V8WebGLIntArrayPrototype", - "V8CanvasIntArrayConstructor" => "V8WebGLIntArrayConstructor", - "V8CanvasIntArrayCustom" => "V8WebGLIntArrayCustom", - "CanvasProgram" => "WebGLProgram", - "canvasProgram" => "webGLProgram", - "CanvasProgramConstructor" => "WebGLProgramConstructor", - "CanvasProgramPrototype" => "WebGLProgramPrototype", - "toCanvasProgram" => "toWebGLProgram", - "JSCanvasProgram" => "JSWebGLProgram", - "JSCanvasProgramPrototype" => "JSWebGLProgramPrototype", - "JSCanvasProgramConstructor" => "JSWebGLProgramConstructor", - "JSCanvasProgramCustom" => "JSWebGLProgramCustom", - "V8CanvasProgram" => "V8WebGLProgram", - "V8CanvasProgramPrototype" => "V8WebGLProgramPrototype", - "V8CanvasProgramConstructor" => "V8WebGLProgramConstructor", - "V8CanvasProgramCustom" => "V8WebGLProgramCustom", - "CanvasRenderbuffer" => "WebGLRenderbuffer", - "canvasRenderbuffer" => "webGLRenderbuffer", - "CanvasRenderbufferConstructor" => "WebGLRenderbufferConstructor", - "CanvasRenderbufferPrototype" => "WebGLRenderbufferPrototype", - "toCanvasRenderbuffer" => "toWebGLRenderbuffer", - "JSCanvasRenderbuffer" => "JSWebGLRenderbuffer", - "JSCanvasRenderbufferPrototype" => "JSWebGLRenderbufferPrototype", - "JSCanvasRenderbufferConstructor" => "JSWebGLRenderbufferConstructor", - "JSCanvasRenderbufferCustom" => "JSWebGLRenderbufferCustom", - "V8CanvasRenderbuffer" => "V8WebGLRenderbuffer", - "V8CanvasRenderbufferPrototype" => "V8WebGLRenderbufferPrototype", - "V8CanvasRenderbufferConstructor" => "V8WebGLRenderbufferConstructor", - "V8CanvasRenderbufferCustom" => "V8WebGLRenderbufferCustom", - "CanvasRenderingContext3D" => "WebGLRenderingContext", - "canvasRenderingContext3D" => "webGLRenderingContext", - "CanvasRenderingContext3DConstructor" => "WebGLRenderingContextConstructor", - "CanvasRenderingContext3DPrototype" => "WebGLRenderingContextPrototype", - "toCanvasRenderingContext3D" => "toWebGLRenderingContext", - "JSCanvasRenderingContext3D" => "JSWebGLRenderingContext", - "JSCanvasRenderingContext3DPrototype" => "JSWebGLRenderingContextPrototype", - "JSCanvasRenderingContext3DConstructor" => "JSWebGLRenderingContextConstructor", - "JSCanvasRenderingContext3DCustom" => "JSWebGLRenderingContextCustom", - "V8CanvasRenderingContext3D" => "V8WebGLRenderingContext", - "V8CanvasRenderingContext3DPrototype" => "V8WebGLRenderingContextPrototype", - "V8CanvasRenderingContext3DConstructor" => "V8WebGLRenderingContextConstructor", - "V8CanvasRenderingContext3DCustom" => "V8WebGLRenderingContextCustom", - "CanvasShader" => "WebGLShader", - "canvasShader" => "webGLShader", - "CanvasShaderConstructor" => "WebGLShaderConstructor", - "CanvasShaderPrototype" => "WebGLShaderPrototype", - "toCanvasShader" => "toWebGLShader", - "JSCanvasShader" => "JSWebGLShader", - "JSCanvasShaderPrototype" => "JSWebGLShaderPrototype", - "JSCanvasShaderConstructor" => "JSWebGLShaderConstructor", - "JSCanvasShaderCustom" => "JSWebGLShaderCustom", - "V8CanvasShader" => "V8WebGLShader", - "V8CanvasShaderPrototype" => "V8WebGLShaderPrototype", - "V8CanvasShaderConstructor" => "V8WebGLShaderConstructor", - "V8CanvasShaderCustom" => "V8WebGLShaderCustom", - "CanvasShortArray" => "WebGLShortArray", - "canvasShortArray" => "webGLShortArray", - "CanvasShortArrayConstructor" => "WebGLShortArrayConstructor", - "CanvasShortArrayPrototype" => "WebGLShortArrayPrototype", - "toCanvasShortArray" => "toWebGLShortArray", - "JSCanvasShortArray" => "JSWebGLShortArray", - "JSCanvasShortArrayPrototype" => "JSWebGLShortArrayPrototype", - "JSCanvasShortArrayConstructor" => "JSWebGLShortArrayConstructor", - "JSCanvasShortArrayCustom" => "JSWebGLShortArrayCustom", - "V8CanvasShortArray" => "V8WebGLShortArray", - "V8CanvasShortArrayPrototype" => "V8WebGLShortArrayPrototype", - "V8CanvasShortArrayConstructor" => "V8WebGLShortArrayConstructor", - "V8CanvasShortArrayCustom" => "V8WebGLShortArrayCustom", - "CanvasTexture" => "WebGLTexture", - "canvasTexture" => "webGLTexture", - "CanvasTextureConstructor" => "WebGLTextureConstructor", - "CanvasTexturePrototype" => "WebGLTexturePrototype", - "toCanvasTexture" => "toWebGLTexture", - "JSCanvasTexture" => "JSWebGLTexture", - "JSCanvasTexturePrototype" => "JSWebGLTexturePrototype", - "JSCanvasTextureConstructor" => "JSWebGLTextureConstructor", - "JSCanvasTextureCustom" => "JSWebGLTextureCustom", - "V8CanvasTexture" => "V8WebGLTexture", - "V8CanvasTexturePrototype" => "V8WebGLTexturePrototype", - "V8CanvasTextureConstructor" => "V8WebGLTextureConstructor", - "V8CanvasTextureCustom" => "V8WebGLTextureCustom", - "CanvasUnsignedByteArray" => "WebGLUnsignedByteArray", - "canvasUnsignedByteArray" => "webGLUnsignedByteArray", - "CanvasUnsignedByteArrayConstructor" => "WebGLUnsignedByteArrayConstructor", - "CanvasUnsignedByteArrayPrototype" => "WebGLUnsignedByteArrayPrototype", - "toCanvasUnsignedByteArray" => "toWebGLUnsignedByteArray", - "JSCanvasUnsignedByteArray" => "JSWebGLUnsignedByteArray", - "JSCanvasUnsignedByteArrayPrototype" => "JSWebGLUnsignedByteArrayPrototype", - "JSCanvasUnsignedByteArrayConstructor" => "JSWebGLUnsignedByteArrayConstructor", - "JSCanvasUnsignedByteArrayCustom" => "JSWebGLUnsignedByteArrayCustom", - "V8CanvasUnsignedByteArray" => "V8WebGLUnsignedByteArray", - "V8CanvasUnsignedByteArrayPrototype" => "V8WebGLUnsignedByteArrayPrototype", - "V8CanvasUnsignedByteArrayConstructor" => "V8WebGLUnsignedByteArrayConstructor", - "V8CanvasUnsignedByteArrayCustom" => "V8WebGLUnsignedByteArrayCustom", - "CanvasUnsignedIntArray" => "WebGLUnsignedIntArray", - "canvasUnsignedIntArray" => "webGLUnsignedIntArray", - "CanvasUnsignedIntArrayConstructor" => "WebGLUnsignedIntArrayConstructor", - "CanvasUnsignedIntArrayPrototype" => "WebGLUnsignedIntArrayPrototype", - "toCanvasUnsignedIntArray" => "toWebGLUnsignedIntArray", - "JSCanvasUnsignedIntArray" => "JSWebGLUnsignedIntArray", - "JSCanvasUnsignedIntArrayPrototype" => "JSWebGLUnsignedIntArrayPrototype", - "JSCanvasUnsignedIntArrayConstructor" => "JSWebGLUnsignedIntArrayConstructor", - "JSCanvasUnsignedIntArrayCustom" => "JSWebGLUnsignedIntArrayCustom", - "V8CanvasUnsignedIntArray" => "V8WebGLUnsignedIntArray", - "V8CanvasUnsignedIntArrayPrototype" => "V8WebGLUnsignedIntArrayPrototype", - "V8CanvasUnsignedIntArrayConstructor" => "V8WebGLUnsignedIntArrayConstructor", - "V8CanvasUnsignedIntArrayCustom" => "V8WebGLUnsignedIntArrayCustom", - "CanvasUnsignedShortArray" => "WebGLUnsignedShortArray", - "canvasUnsignedShortArray" => "webGLUnsignedShortArray", - "CanvasUnsignedShortArrayConstructor" => "WebGLUnsignedShortArrayConstructor", - "CanvasUnsignedShortArrayPrototype" => "WebGLUnsignedShortArrayPrototype", - "toCanvasUnsignedShortArray" => "toWebGLUnsignedShortArray", - "JSCanvasUnsignedShortArray" => "JSWebGLUnsignedShortArray", - "JSCanvasUnsignedShortArrayPrototype" => "JSWebGLUnsignedShortArrayPrototype", - "JSCanvasUnsignedShortArrayConstructor" => "JSWebGLUnsignedShortArrayConstructor", - "JSCanvasUnsignedShortArrayCustom" => "JSWebGLUnsignedShortArrayCustom", - "V8CanvasUnsignedShortArray" => "V8WebGLUnsignedShortArray", - "V8CanvasUnsignedShortArrayPrototype" => "V8WebGLUnsignedShortArrayPrototype", - "V8CanvasUnsignedShortArrayConstructor" => "V8WebGLUnsignedShortArrayConstructor", - "V8CanvasUnsignedShortArrayCustom" => "V8WebGLUnsignedShortArrayCustom" + "m_sel" => "m_selection", ); my %renamesContemplatedForTheFuture = ( + "HTMLPlugInImageElement" => "HTMLEmbeddedObjectElement", + "DOMObject" => "JSDOMObject", "runtimeObjectGetter" => "pluginElementGetter", diff --git a/WebKitTools/Scripts/make-script-test-wrappers b/WebKitTools/Scripts/make-script-test-wrappers index 133476c..aed1834 100755 --- a/WebKitTools/Scripts/make-script-test-wrappers +++ b/WebKitTools/Scripts/make-script-test-wrappers @@ -99,7 +99,7 @@ for my $tfile (@templates) { $html =~ s:${templateDirectory}/(.*)\.js:$1.html:; next if -f "$html-disabled"; - system("grep -q 'successfullyParsed =' $file"); + system("cat ${file} | tr '\\0' ' ' | grep -q 'successfullyParsed ='"); if ($? != 0) { `echo "" >> "${file}"`; `echo "var successfullyParsed = true;" >> "${file}"`; diff --git a/WebKitTools/Scripts/mark-bug-fixed b/WebKitTools/Scripts/mark-bug-fixed deleted file mode 100755 index c7086c2..0000000 --- a/WebKitTools/Scripts/mark-bug-fixed +++ /dev/null @@ -1,141 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2009 Apple Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY -# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# Mark a bug as fixed on bugs.webkit.org. - -import os -import re -import sys - -from optparse import OptionParser - -from modules.bugzilla import Bugzilla, parse_bug_id -from modules.comments import bug_comment_from_svn_revision -from modules.logging import error, log -from modules.scm import SCM, detect_scm_system - - -class MarkBugFixedTool: - def __init__(self): - self.bugs = Bugzilla() - self.cached_scm = None - self.option_parser = OptionParser(usage="usage: %prog [options] [rNNNNN]") - self.option_parser.add_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log.") - self.option_parser.add_option("-m", "--comment", action="store", type="string", dest="comment", help="Text to include in bug comment.") - self.option_parser.add_option("-o", "--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only).") - self.option_parser.add_option("-u", "--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it.") - - def scm(self): - # Lazily initialize SCM to not error-out before command line parsing (or when running non-scm commands). - if not self.cached_scm: - original_cwd = os.path.abspath('.') - self.cached_scm = detect_scm_system(original_cwd) - return self.cached_scm - - def _fetch_commit_log(self, scm, svn_revision): - if not svn_revision: - return scm.last_svn_commit_log() - return scm.svn_commit_log(svn_revision) - - def _determine_bug_id_and_svn_revision(self, bug_id, svn_revision): - commit_log = self._fetch_commit_log(self.scm(), svn_revision) - - if not bug_id: - bug_id = parse_bug_id(commit_log) - - if not svn_revision: - match = re.search("^r(?P<svn_revision>\d+) \|", commit_log, re.MULTILINE) - if match: - svn_revision = match.group('svn_revision') - - if not bug_id or not svn_revision: - not_found = [] - if not bug_id: - not_found.append("bug id") - if not svn_revision: - not_found.append("svn revision") - error("Could not find %s on command-line or in %s." - % (" or ".join(not_found), "r%s" % svn_revision if svn_revision else "last commit")) - - return (bug_id, svn_revision) - - def _open_bug_in_web_browser(self, bug_id): - if sys.platform == "darwin": - SCM.run_command(["open", self.bugs.short_bug_url_for_bug_id(bug_id)]) - return - log("WARNING: -o|--open is only supported on Mac OS X.") - - def _prompt_user_for_correctness(self, bug_id, svn_revision): - answer = raw_input("Is this correct (y/N)? ") - if not re.match("^\s*y(es)?", answer, re.IGNORECASE): - exit(1) - - def main(self): - (options, args) = self.option_parser.parse_args(sys.argv[1:]) - - if len(args) > 1: - error("Only one revision may be specified.") - - bug_id = options.bug_id - - svn_revision = args[0] if len(args) == 1 else None - if svn_revision: - if re.match("^r[0-9]+$", svn_revision, re.IGNORECASE): - svn_revision = svn_revision[1:] - if not re.match("^[0-9]+$", svn_revision): - error("Invalid svn revision: '%s'" % svn_revision) - - needs_prompt = False - if not bug_id or not svn_revision: - needs_prompt = True - (bug_id, svn_revision) = self._determine_bug_id_and_svn_revision(bug_id, svn_revision) - - log("Bug: <%s> %s" % (self.bugs.short_bug_url_for_bug_id(bug_id), self.bugs.fetch_title_from_bug(bug_id))) - log("Revision: %s" % svn_revision) - - if options.open_bug: - self._open_bug_in_web_browser(bug_id) - - if needs_prompt: - self._prompt_user_for_correctness(bug_id, svn_revision) - - bug_comment = bug_comment_from_svn_revision(svn_revision) - if options.comment: - bug_comment = "%s\n\n%s" % (options.comment, bug_comment) - - if options.update_only: - log("Adding comment to Bug %s." % bug_id) - self.bugs.post_comment_to_bug(bug_id, bug_comment) - else: - log("Adding comment to Bug %s and marking as Resolved/Fixed." % bug_id) - self.bugs.close_bug_as_fixed(bug_id, bug_comment) - - -def main(): - tool = MarkBugFixedTool() - return tool.main() - -if __name__ == "__main__": - main() diff --git a/WebKitTools/Scripts/modules/bugzilla.py b/WebKitTools/Scripts/modules/bugzilla.py deleted file mode 100644 index be78544..0000000 --- a/WebKitTools/Scripts/modules/bugzilla.py +++ /dev/null @@ -1,595 +0,0 @@ -# Copyright (c) 2009, Google Inc. All rights reserved. -# Copyright (c) 2009 Apple Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# WebKit's Python module for interacting with Bugzilla - -import getpass -import platform -import re -import subprocess -import urllib2 - -from datetime import datetime # used in timestamp() - -# Import WebKit-specific modules. -from modules.logging import error, log -from modules.committers import CommitterList - -# WebKit includes a built copy of BeautifulSoup in Scripts/modules -# so this import should always succeed. -from .BeautifulSoup import BeautifulSoup, SoupStrainer - -try: - from mechanize import Browser -except ImportError, e: - print """ -mechanize is required. - -To install: -sudo easy_install mechanize - -Or from the web: -http://wwwsearch.sourceforge.net/mechanize/ -""" - exit(1) - -def credentials_from_git(): - return [read_config("username"), read_config("password")] - -def credentials_from_keychain(username=None): - if not is_mac_os_x(): - return [username, None] - - command = "/usr/bin/security %s -g -s %s" % ("find-internet-password", Bugzilla.bug_server_host) - if username: - command += " -a %s" % username - - log('Reading Keychain for %s account and password. Click "Allow" to continue...' % Bugzilla.bug_server_host) - keychain_process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) - value = keychain_process.communicate()[0] - exit_code = keychain_process.wait() - - if exit_code: - return [username, None] - - match = re.search('^\s*"acct"<blob>="(?P<username>.+)"', value, re.MULTILINE) - if match: - username = match.group('username') - - password = None - match = re.search('^password: "(?P<password>.+)"', value, re.MULTILINE) - if match: - password = match.group('password') - - return [username, password] - -def is_mac_os_x(): - return platform.mac_ver()[0] - -def parse_bug_id(message): - match = re.search("http\://webkit\.org/b/(?P<bug_id>\d+)", message) - if match: - return int(match.group('bug_id')) - match = re.search(Bugzilla.bug_server_regex + "show_bug\.cgi\?id=(?P<bug_id>\d+)", message) - if match: - return int(match.group('bug_id')) - return None - -# FIXME: This should not depend on git for config storage -def read_config(key): - # Need a way to read from svn too - config_process = subprocess.Popen("git config --get bugzilla.%s" % key, stdout=subprocess.PIPE, shell=True) - value = config_process.communicate()[0] - return_code = config_process.wait() - - if return_code: - return None - return value.rstrip('\n') - -def read_credentials(): - (username, password) = credentials_from_git() - - if not username or not password: - (username, password) = credentials_from_keychain(username) - - if not username: - username = raw_input("Bugzilla login: ") - if not password: - password = getpass.getpass("Bugzilla password for %s: " % username) - - return [username, password] - -def timestamp(): - return datetime.now().strftime("%Y%m%d%H%M%S") - - -class BugzillaError(Exception): - pass - - -class Bugzilla: - def __init__(self, dryrun=False, committers=CommitterList()): - self.dryrun = dryrun - self.authenticated = False - - self.browser = Browser() - # Ignore bugs.webkit.org/robots.txt until we fix it to allow this script - self.browser.set_handle_robots(False) - self.committers = committers - - # Defaults (until we support better option parsing): - bug_server_host = "bugs.webkit.org" - bug_server_regex = "https?://%s/" % re.sub('\.', '\\.', bug_server_host) - bug_server_url = "https://%s/" % bug_server_host - - def bug_url_for_bug_id(self, bug_id, xml=False): - content_type = "&ctype=xml" if xml else "" - return "%sshow_bug.cgi?id=%s%s" % (self.bug_server_url, bug_id, content_type) - - def short_bug_url_for_bug_id(self, bug_id): - return "http://webkit.org/b/%s" % bug_id - - def attachment_url_for_id(self, attachment_id, action="view"): - action_param = "" - if action and action != "view": - action_param = "&action=%s" % action - return "%sattachment.cgi?id=%s%s" % (self.bug_server_url, attachment_id, action_param) - - def _parse_attachment_flag(self, element, flag_name, attachment, result_key): - flag = element.find('flag', attrs={'name' : flag_name}) - if flag: - attachment[flag_name] = flag['status'] - if flag['status'] == '+': - attachment[result_key] = flag['setter'] - - def _parse_attachment_element(self, element, bug_id): - attachment = {} - attachment['bug_id'] = bug_id - attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1") - attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1") - attachment['id'] = int(element.find('attachid').string) - attachment['url'] = self.attachment_url_for_id(attachment['id']) - attachment['name'] = unicode(element.find('desc').string) - attachment['attacher_email'] = str(element.find('attacher').string) - attachment['type'] = str(element.find('type').string) - self._parse_attachment_flag(element, 'review', attachment, 'reviewer_email') - self._parse_attachment_flag(element, 'commit-queue', attachment, 'committer_email') - return attachment - - def fetch_attachments_from_bug(self, bug_id): - bug_url = self.bug_url_for_bug_id(bug_id, xml=True) - log("Fetching: %s" % bug_url) - - page = urllib2.urlopen(bug_url) - soup = BeautifulSoup(page) - - attachments = [] - for element in soup.findAll('attachment'): - attachment = self._parse_attachment_element(element, bug_id) - attachments.append(attachment) - return attachments - - def _parse_bug_id_from_attachment_page(self, page): - up_link = BeautifulSoup(page).find('link', rel='Up') # The "Up" relation happens to point to the bug. - if not up_link: - return None # This attachment does not exist (or you don't have permissions to view it). - match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href']) - return int(match.group('bug_id')) - - def bug_id_for_attachment_id(self, attachment_id): - attachment_url = self.attachment_url_for_id(attachment_id, 'edit') - log("Fetching: %s" % attachment_url) - page = urllib2.urlopen(attachment_url) - return self._parse_bug_id_from_attachment_page(page) - - # This should really return an Attachment object - # which can lazily fetch any missing data. - def fetch_attachment(self, attachment_id): - # We could grab all the attachment details off of the attachment edit page - # but we already have working code to do so off of the bugs page, so re-use that. - bug_id = self.bug_id_for_attachment_id(attachment_id) - if not bug_id: - return None - attachments = self.fetch_attachments_from_bug(bug_id) - for attachment in attachments: - # FIXME: Once we have a real Attachment class we shouldn't paper over this possible comparison failure - # and we should remove the int() == int() hacks and leave it just ==. - if int(attachment['id']) == int(attachment_id): - self._validate_committer_and_reviewer(attachment) - return attachment - return None # This should never be hit. - - def fetch_title_from_bug(self, bug_id): - bug_url = self.bug_url_for_bug_id(bug_id, xml=True) - page = urllib2.urlopen(bug_url) - soup = BeautifulSoup(page) - return soup.find('short_desc').string - - def fetch_patches_from_bug(self, bug_id): - patches = [] - for attachment in self.fetch_attachments_from_bug(bug_id): - if attachment['is_patch'] and not attachment['is_obsolete']: - patches.append(attachment) - return patches - - # _view_source_link belongs in some sort of webkit_config.py module. - def _view_source_link(self, local_path): - return "http://trac.webkit.org/browser/trunk/%s" % local_path - - def _flag_permission_rejection_message(self, setter_email, flag_name): - committer_list = "WebKitTools/Scripts/modules/committers.py" - contribution_guidlines_url = "http://webkit.org/coding/contributing.html" - rejection_message = "%s does not have %s permissions according to %s." % (setter_email, flag_name, self._view_source_link(committer_list)) - rejection_message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed) and then set the %s flag again." % (flag_name, committer_list, flag_name) - rejection_message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % (flag_name, contribution_guidlines_url) - return rejection_message - - def _validate_setter_email(self, patch, result_key, lookup_function, rejection_function, reject_invalid_patches): - setter_email = patch.get(result_key + '_email') - if not setter_email: - return None - - committer = lookup_function(setter_email) - if committer: - patch[result_key] = committer.full_name - return patch[result_key] - - if reject_invalid_patches: - rejection_function(patch['id'], self._flag_permission_rejection_message(setter_email, result_key)) - else: - log("Warning, attachment %s on bug %s has invalid %s (%s)" % (patch['id'], patch['bug_id'], result_key, setter_email)) - return None - - def _validate_reviewer(self, patch, reject_invalid_patches): - return self._validate_setter_email(patch, 'reviewer', self.committers.reviewer_by_email, self.reject_patch_from_review_queue, reject_invalid_patches) - - def _validate_committer(self, patch, reject_invalid_patches): - return self._validate_setter_email(patch, 'committer', self.committers.committer_by_email, self.reject_patch_from_commit_queue, reject_invalid_patches) - - # FIXME: This is a hack until we have a real Attachment object. - # _validate_committer and _validate_reviewer fill in the 'reviewer' and 'committer' - # keys which other parts of the code expect to be filled in. - def _validate_committer_and_reviewer(self, patch): - self._validate_reviewer(patch, reject_invalid_patches=False) - self._validate_committer(patch, reject_invalid_patches=False) - - def fetch_unreviewed_patches_from_bug(self, bug_id): - unreviewed_patches = [] - for attachment in self.fetch_attachments_from_bug(bug_id): - if attachment.get('review') == '?' and not attachment['is_obsolete']: - unreviewed_patches.append(attachment) - return unreviewed_patches - - def fetch_reviewed_patches_from_bug(self, bug_id, reject_invalid_patches=False): - reviewed_patches = [] - for attachment in self.fetch_attachments_from_bug(bug_id): - if self._validate_reviewer(attachment, reject_invalid_patches) and not attachment['is_obsolete']: - reviewed_patches.append(attachment) - return reviewed_patches - - def fetch_commit_queue_patches_from_bug(self, bug_id, reject_invalid_patches=False): - commit_queue_patches = [] - for attachment in self.fetch_reviewed_patches_from_bug(bug_id, reject_invalid_patches): - if self._validate_committer(attachment, reject_invalid_patches) and not attachment['is_obsolete']: - commit_queue_patches.append(attachment) - return commit_queue_patches - - def _fetch_bug_ids_advanced_query(self, query): - page = urllib2.urlopen(query) - soup = BeautifulSoup(page) - - bug_ids = [] - # Grab the cells in the first column (which happens to be the bug ids) - for bug_link_cell in soup('td', "first-child"): # tds with the class "first-child" - bug_link = bug_link_cell.find("a") - bug_ids.append(int(bug_link.string)) # the contents happen to be the bug id - - return bug_ids - - def _parse_attachment_ids_request_query(self, page): - digits = re.compile("\d+") - attachment_href = re.compile("attachment.cgi\?id=\d+&action=review") - attachment_links = SoupStrainer("a", href=attachment_href) - return [int(digits.search(tag["href"]).group(0)) for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)] - - def _fetch_attachment_ids_request_query(self, query): - return self._parse_attachment_ids_request_query(urllib2.urlopen(query)) - - def fetch_bug_ids_from_commit_queue(self): - commit_queue_url = self.bug_server_url + "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B" - return self._fetch_bug_ids_advanced_query(commit_queue_url) - - # List of all r+'d bugs. - def fetch_bug_ids_from_needs_commit_list(self): - needs_commit_query_url = self.bug_server_url + "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B" - return self._fetch_bug_ids_advanced_query(needs_commit_query_url) - - def fetch_bug_ids_from_review_queue(self): - review_queue_url = self.bug_server_url + "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?" - return self._fetch_bug_ids_advanced_query(review_queue_url) - - def fetch_attachment_ids_from_review_queue(self): - review_queue_url = self.bug_server_url + "request.cgi?action=queue&type=review&group=type" - return self._fetch_attachment_ids_request_query(review_queue_url) - - def fetch_patches_from_commit_queue(self, reject_invalid_patches=False): - patches_to_land = [] - for bug_id in self.fetch_bug_ids_from_commit_queue(): - patches = self.fetch_commit_queue_patches_from_bug(bug_id, reject_invalid_patches) - patches_to_land += patches - return patches_to_land - - def fetch_patches_from_pending_commit_list(self): - patches_needing_commit = [] - for bug_id in self.fetch_bug_ids_from_needs_commit_list(): - patches = self.fetch_reviewed_patches_from_bug(bug_id) - patches_needing_commit += patches - return patches_needing_commit - - def fetch_patches_from_review_queue(self, limit=None): - patches_to_review = [] - for bug_id in self.fetch_bug_ids_from_review_queue(): - if limit and len(patches_to_review) >= limit: - break - patches = self.fetch_unreviewed_patches_from_bug(bug_id) - patches_to_review += patches - return patches_to_review - - def authenticate(self): - if self.authenticated: - return - - if self.dryrun: - log("Skipping log in for dry run...") - self.authenticated = True - return - - (username, password) = read_credentials() - - log("Logging in as %s..." % username) - self.browser.open(self.bug_server_url + "index.cgi?GoAheadAndLogIn=1") - self.browser.select_form(name="login") - self.browser['Bugzilla_login'] = username - self.browser['Bugzilla_password'] = password - response = self.browser.submit() - - match = re.search("<title>(.+?)</title>", response.read()) - # If the resulting page has a title, and it contains the word "invalid" assume it's the login failure page. - if match and re.search("Invalid", match.group(1), re.IGNORECASE): - # FIXME: We could add the ability to try again on failure. - raise BugzillaError("Bugzilla login failed: %s" % match.group(1)) - - self.authenticated = True - - def _fill_attachment_form(self, description, patch_file_object, comment_text=None, mark_for_review=False, mark_for_commit_queue=False, bug_id=None): - self.browser['description'] = description - self.browser['ispatch'] = ("1",) - self.browser['flag_type-1'] = ('?',) if mark_for_review else ('X',) - self.browser['flag_type-3'] = ('?',) if mark_for_commit_queue else ('X',) - if bug_id: - patch_name = "bug-%s-%s.patch" % (bug_id, timestamp()) - else: - patch_name ="%s.patch" % timestamp() - self.browser.add_file(patch_file_object, "text/plain", patch_name, 'data') - - def add_patch_to_bug(self, bug_id, patch_file_object, description, comment_text=None, mark_for_review=False, mark_for_commit_queue=False): - self.authenticate() - - log('Adding patch "%s" to bug %s' % (description, bug_id)) - if self.dryrun: - log(comment_text) - return - - self.browser.open("%sattachment.cgi?action=enter&bugid=%s" % (self.bug_server_url, bug_id)) - self.browser.select_form(name="entryform") - self._fill_attachment_form(description, patch_file_object, mark_for_review=mark_for_review, mark_for_commit_queue=mark_for_commit_queue, bug_id=bug_id) - if comment_text: - log(comment_text) - self.browser['comment'] = comment_text - self.browser.submit() - - def prompt_for_component(self, components): - log("Please pick a component:") - i = 0 - for name in components: - i += 1 - log("%2d. %s" % (i, name)) - result = int(raw_input("Enter a number: ")) - 1 - return components[result] - - def _check_create_bug_response(self, response_html): - match = re.search("<title>Bug (?P<bug_id>\d+) Submitted</title>", response_html) - if match: - return match.group('bug_id') - - match = re.search('<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">', response_html, re.DOTALL) - error_message = "FAIL" - if match: - text_lines = BeautifulSoup(match.group('error_message')).findAll(text=True) - error_message = "\n" + '\n'.join([" " + line.strip() for line in text_lines if line.strip()]) - raise BugzillaError("Bug not created: %s" % error_message) - - def create_bug_with_patch(self, bug_title, bug_description, component, patch_file_object, patch_description, cc, mark_for_review=False, mark_for_commit_queue=False): - self.authenticate() - - log('Creating bug with patch description "%s"' % patch_description) - if self.dryrun: - log(bug_description) - return - - self.browser.open(self.bug_server_url + "enter_bug.cgi?product=WebKit") - self.browser.select_form(name="Create") - component_items = self.browser.find_control('component').items - component_names = map(lambda item: item.name, component_items) - if not component or component not in component_names: - component = self.prompt_for_component(component_names) - self.browser['component'] = [component] - if cc: - self.browser['cc'] = cc - self.browser['short_desc'] = bug_title - if bug_description: - log(bug_description) - self.browser['comment'] = bug_description - - self._fill_attachment_form(patch_description, patch_file_object, mark_for_review=mark_for_review, mark_for_commit_queue=mark_for_commit_queue) - response = self.browser.submit() - - bug_id = self._check_create_bug_response(response.read()) - log("Bug %s created." % bug_id) - log("%sshow_bug.cgi?id=%s" % (self.bug_server_url, bug_id)) - return bug_id - - def _find_select_element_for_flag(self, flag_name): - # FIXME: This will break if we ever re-order attachment flags - if flag_name == "review": - return self.browser.find_control(type='select', nr=0) - if flag_name == "commit-queue": - return self.browser.find_control(type='select', nr=1) - raise Exception("Don't know how to find flag named \"%s\"" % flag_name) - - def clear_attachment_flags(self, attachment_id, additional_comment_text=None): - self.authenticate() - - comment_text = "Clearing flags on attachment: %s" % attachment_id - if additional_comment_text: - comment_text += "\n\n%s" % additional_comment_text - log(comment_text) - - if self.dryrun: - return - - self.browser.open(self.attachment_url_for_id(attachment_id, 'edit')) - self.browser.select_form(nr=1) - self.browser.set_value(comment_text, name='comment', nr=0) - self._find_select_element_for_flag('review').value = ("X",) - self._find_select_element_for_flag('commit-queue').value = ("X",) - self.browser.submit() - - # FIXME: We need a way to test this on a live bugzilla instance. - def _set_flag_on_attachment(self, attachment_id, flag_name, flag_value, comment_text, additional_comment_text): - self.authenticate() - - if additional_comment_text: - comment_text += "\n\n%s" % additional_comment_text - log(comment_text) - - if self.dryrun: - return - - self.browser.open(self.attachment_url_for_id(attachment_id, 'edit')) - self.browser.select_form(nr=1) - self.browser.set_value(comment_text, name='comment', nr=0) - self._find_select_element_for_flag(flag_name).value = (flag_value,) - self.browser.submit() - - def reject_patch_from_commit_queue(self, attachment_id, additional_comment_text=None): - comment_text = "Rejecting patch %s from commit-queue." % attachment_id - self._set_flag_on_attachment(attachment_id, 'commit-queue', '-', comment_text, additional_comment_text) - - def reject_patch_from_review_queue(self, attachment_id, additional_comment_text=None): - comment_text = "Rejecting patch %s from review queue." % attachment_id - self._set_flag_on_attachment(attachment_id, 'review', '-', comment_text, additional_comment_text) - - def obsolete_attachment(self, attachment_id, comment_text = None): - self.authenticate() - - log("Obsoleting attachment: %s" % attachment_id) - if self.dryrun: - log(comment_text) - return - - self.browser.open(self.attachment_url_for_id(attachment_id, 'edit')) - self.browser.select_form(nr=1) - self.browser.find_control('isobsolete').items[0].selected = True - # Also clear any review flag (to remove it from review/commit queues) - self._find_select_element_for_flag('review').value = ("X",) - self._find_select_element_for_flag('commit-queue').value = ("X",) - if comment_text: - log(comment_text) - # Bugzilla has two textareas named 'comment', one is somehow hidden. We want the first. - self.browser.set_value(comment_text, name='comment', nr=0) - self.browser.submit() - - def add_cc_to_bug(self, bug_id, email_address): - self.authenticate() - - log("Adding %s to the CC list for bug %s" % (email_address, bug_id)) - if self.dryrun: - return - - self.browser.open(self.bug_url_for_bug_id(bug_id)) - self.browser.select_form(name="changeform") - self.browser["newcc"] = email_address - self.browser.submit() - - def post_comment_to_bug(self, bug_id, comment_text, cc=None): - self.authenticate() - - log("Adding comment to bug %s" % bug_id) - if self.dryrun: - log(comment_text) - return - - self.browser.open(self.bug_url_for_bug_id(bug_id)) - self.browser.select_form(name="changeform") - self.browser["comment"] = comment_text - if cc: - self.browser["newcc"] = cc - self.browser.submit() - - def close_bug_as_fixed(self, bug_id, comment_text=None): - self.authenticate() - - log("Closing bug %s as fixed" % bug_id) - if self.dryrun: - log(comment_text) - return - - self.browser.open(self.bug_url_for_bug_id(bug_id)) - self.browser.select_form(name="changeform") - if comment_text: - log(comment_text) - self.browser['comment'] = comment_text - self.browser['bug_status'] = ['RESOLVED'] - self.browser['resolution'] = ['FIXED'] - self.browser.submit() - - def reopen_bug(self, bug_id, comment_text): - self.authenticate() - - log("Re-opening bug %s" % bug_id) - log(comment_text) # Bugzilla requires a comment when re-opening a bug, so we know it will never be None. - if self.dryrun: - return - - self.browser.open(self.bug_url_for_bug_id(bug_id)) - self.browser.select_form(name="changeform") - self.browser['bug_status'] = ['REOPENED'] - self.browser['comment'] = comment_text - self.browser.submit() diff --git a/WebKitTools/Scripts/modules/buildsteps.py b/WebKitTools/Scripts/modules/buildsteps.py deleted file mode 100644 index 425b912..0000000 --- a/WebKitTools/Scripts/modules/buildsteps.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright (C) 2009 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os - -from optparse import make_option - -from modules.comments import bug_comment_from_commit_text -from modules.logging import log, error -from modules.webkitport import WebKitPort - - -class CommandOptions(object): - force_clean = make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)") - clean = make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches") - check_builders = make_option("--ignore-builders", action="store_false", dest="check_builders", default=True, help="Don't check to see if the build.webkit.org builders are green before landing.") - quiet = make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output.") - non_interactive = make_option("--non-interactive", action="store_true", dest="non_interactive", default=False, help="Never prompt the user, fail as fast as possible.") - parent_command = make_option("--parent-command", action="store", dest="parent_command", default=None, help="(Internal) The command that spawned this instance.") - update = make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory.") - build = make_option("--no-build", action="store_false", dest="build", default=True, help="Commit without building first, implies --no-test.") - test = make_option("--no-test", action="store_false", dest="test", default=True, help="Commit without running run-webkit-tests.") - close_bug = make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing.") - port = make_option("--port", action="store", dest="port", default=None, help="Specify a port (e.g., mac, qt, gtk, ...).") - - -class AbstractStep(object): - def __init__(self, tool, options, patch=None): - self._tool = tool - self._options = options - self._patch = patch - self._port = None - - def _run_script(self, script_name, quiet=False, port=WebKitPort): - log("Running %s" % script_name) - self._tool.executive.run_and_throw_if_fail(port.script_path(script_name), quiet) - - # FIXME: The port should live on the tool. - def port(self): - if self._port: - return self._port - self._port = WebKitPort.port(self._options.port) - return self._port - - @classmethod - def options(cls): - return [] - - def run(self, tool): - raise NotImplementedError, "subclasses must implement" - - -class PrepareChangelogStep(AbstractStep): - def run(self): - self._run_script("prepare-ChangeLog") - - -class CleanWorkingDirectoryStep(AbstractStep): - def __init__(self, tool, options, patch=None, allow_local_commits=False): - AbstractStep.__init__(self, tool, options, patch) - self._allow_local_commits = allow_local_commits - - @classmethod - def options(cls): - return [ - CommandOptions.force_clean, - CommandOptions.clean, - ] - - def run(self): - os.chdir(self._tool.scm().checkout_root) - if not self._allow_local_commits: - self._tool.scm().ensure_no_local_commits(self._options.force_clean) - if self._options.clean: - self._tool.scm().ensure_clean_working_directory(force_clean=self._options.force_clean) - - -class UpdateStep(AbstractStep): - @classmethod - def options(cls): - return [ - CommandOptions.update, - CommandOptions.port, - ] - - def run(self): - if not self._options.update: - return - log("Updating working directory") - self._tool.executive.run_and_throw_if_fail(self.port().update_webkit_command()) - - -class ApplyPatchStep(AbstractStep): - @classmethod - def options(cls): - return [ - CommandOptions.non_interactive, - ] - - def run(self): - log("Processing patch %s from bug %s." % (self._patch["id"], self._patch["bug_id"])) - self._tool.scm().apply_patch(self._patch, force=self._options.non_interactive) - - -class EnsureBuildersAreGreenStep(AbstractStep): - @classmethod - def options(cls): - return [ - CommandOptions.check_builders, - ] - - def run(self): - if not self._options.check_builders: - return - red_builders_names = self._tool.buildbot.red_core_builders_names() - if not red_builders_names: - return - red_builders_names = map(lambda name: "\"%s\"" % name, red_builders_names) # Add quotes around the names. - error("Builders [%s] are red, please do not commit.\nSee http://%s.\nPass --ignore-builders to bypass this check." % (", ".join(red_builders_names), self._tool.buildbot.buildbot_host)) - - -class BuildStep(AbstractStep): - @classmethod - def options(cls): - return [ - CommandOptions.build, - CommandOptions.quiet, - ] - - def run(self): - if not self._options.build: - return - log("Building WebKit") - self._tool.executive.run_and_throw_if_fail(self.port().build_webkit_command(), self._options.quiet) - - -class CheckStyleStep(AbstractStep): - def run(self): - self._run_script("check-webkit-style") - - -class RunTestsStep(AbstractStep): - @classmethod - def options(cls): - return [ - CommandOptions.build, - CommandOptions.test, - CommandOptions.non_interactive, - CommandOptions.quiet, - CommandOptions.port, - ] - - def run(self): - if not self._options.build: - return - if not self._options.test: - return - args = self.port().run_webkit_tests_command() - if self._options.non_interactive: - args.append("--no-launch-safari") - args.append("--exit-after-n-failures=1") - if self._options.quiet: - args.append("--quiet") - self._tool.executive.run_and_throw_if_fail(args) - - -class CommitStep(AbstractStep): - def run(self): - commit_message = self._tool.scm().commit_message_for_this_commit() - return self._tool.scm().commit_with_message(commit_message.message()) - - -class ClosePatchStep(AbstractStep): - def run(self, commit_log): - comment_text = bug_comment_from_commit_text(self._tool.scm(), commit_log) - self._tool.bugs.clear_attachment_flags(self._patch["id"], comment_text) - - -class CloseBugStep(AbstractStep): - @classmethod - def options(cls): - return [ - CommandOptions.close_bug, - ] - - def run(self): - if not self._options.close_bug: - return - # Check to make sure there are no r? or r+ patches on the bug before closing. - # Assume that r- patches are just previous patches someone forgot to obsolete. - patches = self._tool.bugs.fetch_patches_from_bug(self._patch["bug_id"]) - for patch in patches: - review_flag = patch.get("review") - if review_flag == "?" or review_flag == "+": - log("Not closing bug %s as attachment %s has review=%s. Assuming there are more patches to land from this bug." % (patch["bug_id"], patch["id"], review_flag)) - return - self._tool.bugs.close_bug_as_fixed(self._patch["bug_id"], "All reviewed patches have been landed. Closing bug.") - - -# FIXME: This class is a dinosaur and should be extinct soon. -class BuildSteps: - # FIXME: The options should really live on each "Step" object. - @staticmethod - def cleaning_options(): - return [ - CommandOptions.force_clean, - CommandOptions.clean, - ] - - # FIXME: These distinctions are bogus. We need a better model for handling options. - @staticmethod - def build_options(): - return [ - CommandOptions.check_builders, - CommandOptions.quiet, - CommandOptions.non_interactive, - CommandOptions.parent_command, - CommandOptions.port, - ] - - @staticmethod - def land_options(): - return [ - CommandOptions.update, - CommandOptions.build, - CommandOptions.test, - CommandOptions.close_bug, - ] - diff --git a/WebKitTools/Scripts/modules/commands/download.py b/WebKitTools/Scripts/modules/commands/download.py deleted file mode 100644 index 2acd69f..0000000 --- a/WebKitTools/Scripts/modules/commands/download.py +++ /dev/null @@ -1,370 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2009, Google Inc. All rights reserved. -# Copyright (c) 2009 Apple Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os - -from optparse import make_option - -from modules.bugzilla import parse_bug_id -from modules.buildsteps import CommandOptions, BuildSteps, EnsureBuildersAreGreenStep, CleanWorkingDirectoryStep, UpdateStep, ApplyPatchStep, BuildStep, CheckStyleStep, PrepareChangelogStep -from modules.changelogs import ChangeLog -from modules.comments import bug_comment_from_commit_text -from modules.executive import ScriptError -from modules.grammar import pluralize -from modules.landingsequence import LandingSequence -from modules.logging import error, log -from modules.multicommandtool import Command -from modules.stepsequence import StepSequence - - -class Build(Command): - name = "build" - show_in_main_help = False - def __init__(self): - self._sequence = StepSequence([ - CleanWorkingDirectoryStep, - UpdateStep, - BuildStep - ]) - Command.__init__(self, "Update working copy and build", "", self._sequence.options()) - - def execute(self, options, args, tool): - self._sequence.run_and_handle_errors(tool, options) - - -class ApplyAttachment(Command): - name = "apply-attachment" - show_in_main_help = True - def __init__(self): - options = WebKitApplyingScripts.apply_options() - options += BuildSteps.cleaning_options() - Command.__init__(self, "Apply an attachment to the local working directory", "ATTACHMENT_ID", options=options) - - def execute(self, options, args, tool): - WebKitApplyingScripts.setup_for_patch_apply(tool, options) - attachment_id = args[0] - attachment = tool.bugs.fetch_attachment(attachment_id) - WebKitApplyingScripts.apply_patches_with_options(tool.scm(), [attachment], options) - - -class ApplyPatches(Command): - name = "apply-patches" - show_in_main_help = True - def __init__(self): - options = WebKitApplyingScripts.apply_options() - options += BuildSteps.cleaning_options() - Command.__init__(self, "Apply reviewed patches from provided bugs to the local working directory", "BUGID", options=options) - - def execute(self, options, args, tool): - WebKitApplyingScripts.setup_for_patch_apply(tool, options) - bug_id = args[0] - patches = tool.bugs.fetch_reviewed_patches_from_bug(bug_id) - WebKitApplyingScripts.apply_patches_with_options(tool.scm(), patches, options) - - -class WebKitApplyingScripts: - @staticmethod - def apply_options(): - return [ - make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory before applying patches"), - make_option("--local-commit", action="store_true", dest="local_commit", default=False, help="Make a local commit for each applied patch"), - CommandOptions.port, - ] - - @staticmethod - def setup_for_patch_apply(tool, options): - clean_step = CleanWorkingDirectoryStep(tool, options, allow_local_commits=True) - clean_step.run() - update_step = UpdateStep(tool, options) - update_step.run() - - @staticmethod - def apply_patches_with_options(scm, patches, options): - if options.local_commit and not scm.supports_local_commits(): - error("--local-commit passed, but %s does not support local commits" % scm.display_name()) - - for patch in patches: - log("Applying attachment %s from bug %s" % (patch["id"], patch["bug_id"])) - scm.apply_patch(patch) - if options.local_commit: - commit_message = scm.commit_message_for_this_commit() - scm.commit_locally_with_message(commit_message.message() or patch["name"]) - - -class LandDiffSequence(LandingSequence): - def run(self): - self.check_builders() - self.build() - self.test() - commit_log = self.commit() - self.close_bug(commit_log) - - def close_bug(self, commit_log): - comment_test = bug_comment_from_commit_text(self._tool.scm(), commit_log) - bug_id = self._patch["bug_id"] - if bug_id: - log("Updating bug %s" % bug_id) - if self._options.close_bug: - self._tool.bugs.close_bug_as_fixed(bug_id, comment_test) - else: - # FIXME: We should a smart way to figure out if the patch is attached - # to the bug, and if so obsolete it. - self._tool.bugs.post_comment_to_bug(bug_id, comment_test) - else: - log(comment_test) - log("No bug id provided.") - - -class LandDiff(Command): - name = "land-diff" - show_in_main_help = True - def __init__(self): - options = [ - make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER."), - ] - options += BuildSteps.build_options() - options += BuildSteps.land_options() - Command.__init__(self, "Land the current working directory diff and updates the associated bug if any", "[BUGID]", options=options) - - def guess_reviewer_from_bug(self, bugs, bug_id): - patches = bugs.fetch_reviewed_patches_from_bug(bug_id) - if len(patches) != 1: - log("%s on bug %s, cannot infer reviewer." % (pluralize("reviewed patch", len(patches)), bug_id)) - return None - patch = patches[0] - reviewer = patch["reviewer"] - log("Guessing \"%s\" as reviewer from attachment %s on bug %s." % (reviewer, patch["id"], bug_id)) - return reviewer - - def update_changelogs_with_reviewer(self, reviewer, bug_id, tool): - if not reviewer: - if not bug_id: - log("No bug id provided and --reviewer= not provided. Not updating ChangeLogs with reviewer.") - return - reviewer = self.guess_reviewer_from_bug(tool.bugs, bug_id) - - if not reviewer: - log("Failed to guess reviewer from bug %s and --reviewer= not provided. Not updating ChangeLogs with reviewer." % bug_id) - return - - for changelog_path in tool.scm().modified_changelogs(): - ChangeLog(changelog_path).set_reviewer(reviewer) - - def execute(self, options, args, tool): - bug_id = (args and args[0]) or parse_bug_id(tool.scm().create_patch()) - - EnsureBuildersAreGreenStep(tool, options).run() - - os.chdir(tool.scm().checkout_root) - self.update_changelogs_with_reviewer(options.reviewer, bug_id, tool) - - fake_patch = { - "id": None, - "bug_id": bug_id - } - - sequence = LandDiffSequence(fake_patch, options, tool) - sequence.run() - - -class AbstractPatchProcessingCommand(Command): - def __init__(self, help_text, args_description, options): - Command.__init__(self, help_text, args_description, options=options) - - def _fetch_list_of_patches_to_process(self, options, args, tool): - raise NotImplementedError, "subclasses must implement" - - def _prepare_to_process(self, options, args, tool): - raise NotImplementedError, "subclasses must implement" - - @staticmethod - def _collect_patches_by_bug(patches): - bugs_to_patches = {} - for patch in patches: - bug_id = patch["bug_id"] - bugs_to_patches[bug_id] = bugs_to_patches.get(bug_id, []) + [patch] - return bugs_to_patches - - def execute(self, options, args, tool): - self._prepare_to_process(options, args, tool) - patches = self._fetch_list_of_patches_to_process(options, args, tool) - - # It's nice to print out total statistics. - bugs_to_patches = self._collect_patches_by_bug(patches) - log("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches)))) - - for patch in patches: - self._process_patch(patch, options, args, tool) - - -class CheckStyle(AbstractPatchProcessingCommand): - name = "check-style" - show_in_main_help = False - def __init__(self): - self._sequence = StepSequence([ - CleanWorkingDirectoryStep, - UpdateStep, - ApplyPatchStep, - CheckStyleStep, - ]) - AbstractPatchProcessingCommand.__init__(self, "Run check-webkit-style on the specified attachments", "ATTACHMENT_ID [ATTACHMENT_IDS]", self._sequence.options()) - - def _fetch_list_of_patches_to_process(self, options, args, tool): - return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args) - - def _prepare_to_process(self, options, args, tool): - pass - - def _process_patch(self, patch, options, args, tool): - self._sequence.run_and_handle_errors(tool, options, patch) - - -class BuildAttachment(AbstractPatchProcessingCommand): - name = "build-attachment" - show_in_main_help = False - def __init__(self): - self._sequence = StepSequence([ - CleanWorkingDirectoryStep, - UpdateStep, - ApplyPatchStep, - BuildStep, - ]) - AbstractPatchProcessingCommand.__init__(self, "Apply and build patches from bugzilla", "ATTACHMENT_ID [ATTACHMENT_IDS]", self._sequence.options()) - - def _fetch_list_of_patches_to_process(self, options, args, tool): - return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args) - - def _prepare_to_process(self, options, args, tool): - pass - - def _process_patch(self, patch, options, args, tool): - self._sequence.run_and_handle_errors(tool, options, patch) - - -class AbstractPatchLandingCommand(AbstractPatchProcessingCommand): - def __init__(self, help_text, args_description): - options = BuildSteps.cleaning_options() - options += BuildSteps.build_options() - options += BuildSteps.land_options() - AbstractPatchProcessingCommand.__init__(self, help_text, args_description, options) - - def _prepare_to_process(self, options, args, tool): - # Check the tree status first so we can fail early. - EnsureBuildersAreGreenStep(tool, options).run() - - def _process_patch(self, patch, options, args, tool): - sequence = LandingSequence(patch, options, tool) - sequence.run_and_handle_errors() - - -class LandAttachment(AbstractPatchLandingCommand): - name = "land-attachment" - show_in_main_help = True - def __init__(self): - AbstractPatchLandingCommand.__init__(self, "Land patches from bugzilla, optionally building and testing them first", "ATTACHMENT_ID [ATTACHMENT_IDS]") - - def _fetch_list_of_patches_to_process(self, options, args, tool): - return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args) - - -class LandPatches(AbstractPatchLandingCommand): - name = "land-patches" - show_in_main_help = True - def __init__(self): - AbstractPatchLandingCommand.__init__(self, "Land all patches on the given bugs, optionally building and testing them first", "BUGID [BUGIDS]") - - def _fetch_list_of_patches_to_process(self, options, args, tool): - all_patches = [] - for bug_id in args: - patches = tool.bugs.fetch_reviewed_patches_from_bug(bug_id) - log("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id)) - all_patches += patches - return all_patches - - -# FIXME: Requires unit test. -class Rollout(Command): - name = "rollout" - show_in_main_help = True - def __init__(self): - options = BuildSteps.cleaning_options() - options += BuildSteps.build_options() - options += BuildSteps.land_options() - options.append(make_option("--complete-rollout", action="store_true", dest="complete_rollout", help="Commit the revert and re-open the original bug.")) - Command.__init__(self, "Revert the given revision in the working copy and optionally commit the revert and re-open the original bug", "REVISION [BUGID]", options=options) - - @staticmethod - def _create_changelogs_for_revert(tool, revision): - # First, discard the ChangeLog changes from the rollout. - changelog_paths = tool.scm().modified_changelogs() - tool.scm().revert_files(changelog_paths) - - # Second, make new ChangeLog entries for this rollout. - # This could move to prepare-ChangeLog by adding a --revert= option. - PrepareChangelogStep(tool, None).run() - for changelog_path in changelog_paths: - ChangeLog(changelog_path).update_for_revert(revision) - - @staticmethod - def _parse_bug_id_from_revision_diff(tool, revision): - original_diff = tool.scm().diff_for_revision(revision) - return parse_bug_id(original_diff) - - @staticmethod - def _reopen_bug_after_rollout(tool, bug_id, comment_text): - if bug_id: - tool.bugs.reopen_bug(bug_id, comment_text) - else: - log(comment_text) - log("No bugs were updated or re-opened to reflect this rollout.") - - def execute(self, options, args, tool): - revision = args[0] - bug_id = self._parse_bug_id_from_revision_diff(tool, revision) - if options.complete_rollout: - if bug_id: - log("Will re-open bug %s after rollout." % bug_id) - else: - log("Failed to parse bug number from diff. No bugs will be updated/reopened after the rollout.") - - CleanWorkingDirectoryStep(tool, options).run() - UpdateStep(tool, options).run() - tool.scm().apply_reverse_diff(revision) - self._create_changelogs_for_revert(tool, revision) - - # FIXME: Fully automated rollout is not 100% idiot-proof yet, so for now just log with instructions on how to complete the rollout. - # Once we trust rollout we will remove this option. - if not options.complete_rollout: - log("\nNOTE: Rollout support is experimental.\nPlease verify the rollout diff and use \"bugzilla-tool land-diff %s\" to commit the rollout." % bug_id) - else: - # FIXME: This function does not exist!! - # comment_text = WebKitLandingScripts.build_and_commit(tool.scm(), options) - raise ScriptError("OOPS! This option is not implemented (yet).") - self._reopen_bug_after_rollout(tool, bug_id, comment_text) diff --git a/WebKitTools/Scripts/modules/commands/download_unittest.py b/WebKitTools/Scripts/modules/commands/download_unittest.py deleted file mode 100644 index a1ed41a..0000000 --- a/WebKitTools/Scripts/modules/commands/download_unittest.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (C) 2009 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import unittest - -from modules.commands.commandtest import CommandsTest -from modules.commands.download import * -from modules.mock import Mock - -class DownloadCommandsTest(CommandsTest): - def _default_options(self): - options = Mock() - options.force_clean = False - options.clean = True - options.check_builders = True - options.quiet = False - options.non_interactive = False - options.update = True - options.build = True - options.test = True - options.close_bug = True - return options - - def test_build(self): - self.assert_execute_outputs(Build(), [], options=self._default_options()) - - def test_apply_attachment(self): - options = self._default_options() - options.update = True - options.local_commit = True - self.assert_execute_outputs(ApplyAttachment(), [197], options=options) - - def test_apply_patches(self): - options = self._default_options() - options.update = True - options.local_commit = True - self.assert_execute_outputs(ApplyPatches(), [42], options=options) - - def test_land_diff(self): - self.assert_execute_outputs(LandDiff(), [42], options=self._default_options()) - - def test_check_style(self): - self.assert_execute_outputs(CheckStyle(), [197], options=self._default_options()) - - def test_build_attachment(self): - self.assert_execute_outputs(BuildAttachment(), [197], options=self._default_options()) - - def test_land_attachment(self): - self.assert_execute_outputs(LandAttachment(), [197], options=self._default_options()) - - def test_land_patches(self): - self.assert_execute_outputs(LandPatches(), [42], options=self._default_options()) diff --git a/WebKitTools/Scripts/modules/commands/queues.py b/WebKitTools/Scripts/modules/commands/queues.py deleted file mode 100644 index 53b9e48..0000000 --- a/WebKitTools/Scripts/modules/commands/queues.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2009, Google Inc. All rights reserved. -# Copyright (c) 2009 Apple Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import re - -from datetime import datetime -from optparse import make_option - -from modules.executive import ScriptError -from modules.grammar import pluralize -from modules.landingsequence import LandingSequence, LandingSequenceErrorHandler -from modules.logging import error, log -from modules.multicommandtool import Command -from modules.patchcollection import PersistentPatchCollection, PersistentPatchCollectionDelegate -from modules.statusbot import StatusBot -from modules.workqueue import WorkQueue, WorkQueueDelegate - -class AbstractQueue(Command, WorkQueueDelegate): - show_in_main_help = False - watchers = "webkit-bot-watchers@googlegroups.com" - def __init__(self, options=None): # Default values should never be collections (like []) as default values are shared between invocations - options_list = (options or []) + [ - make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue. Dangerous!"), - make_option("--status-host", action="store", type="string", dest="status_host", default=StatusBot.default_host, help="Hostname (e.g. localhost or commit.webkit.org) where status updates should be posted."), - ] - Command.__init__(self, "Run the %s" % self.name, options=options_list) - - def _cc_watchers(self, bug_id): - try: - self.tool.bugs.add_cc_to_bug(bug_id, self.watchers) - except Exception, e: - log("Failed to CC watchers: %s." % e) - - def queue_log_path(self): - return "%s.log" % self.name - - def work_logs_directory(self): - return "%s-logs" % self.name - - def status_host(self): - return self.options.status_host - - def begin_work_queue(self): - log("CAUTION: %s will discard all local changes in %s" % (self.name, self.tool.scm().checkout_root)) - if self.options.confirm: - response = raw_input("Are you sure? Type \"yes\" to continue: ") - if (response != "yes"): - error("User declined.") - log("Running WebKit %s. %s" % (self.name, datetime.now().strftime(WorkQueue.log_date_format))) - - def should_continue_work_queue(self): - return True - - def next_work_item(self): - raise NotImplementedError, "subclasses must implement" - - def should_proceed_with_work_item(self, work_item): - raise NotImplementedError, "subclasses must implement" - - def process_work_item(self, work_item): - raise NotImplementedError, "subclasses must implement" - - def handle_unexpected_error(self, work_item, message): - raise NotImplementedError, "subclasses must implement" - - def run_bugzilla_tool(self, args): - bugzilla_tool_args = [self.tool.path()] + map(str, args) - self.tool.executive.run_and_throw_if_fail(bugzilla_tool_args) - - def log_progress(self, patch_ids): - log("%s in %s [%s]" % (pluralize("patch", len(patch_ids)), self.name, ", ".join(map(str, patch_ids)))) - - def execute(self, options, args, tool): - self.options = options - self.tool = tool - work_queue = WorkQueue(self.name, self) - return work_queue.run() - - -class CommitQueue(AbstractQueue, LandingSequenceErrorHandler): - name = "commit-queue" - def __init__(self): - AbstractQueue.__init__(self) - - # AbstractQueue methods - - def begin_work_queue(self): - AbstractQueue.begin_work_queue(self) - - def next_work_item(self): - patches = self.tool.bugs.fetch_patches_from_commit_queue(reject_invalid_patches=True) - if not patches: - return None - # Only bother logging if we have patches in the queue. - self.log_progress([patch['id'] for patch in patches]) - return patches[0] - - def should_proceed_with_work_item(self, patch): - red_builders_names = self.tool.buildbot.red_core_builders_names() - if red_builders_names: - red_builders_names = map(lambda name: "\"%s\"" % name, red_builders_names) # Add quotes around the names. - return (False, "Builders [%s] are red. See http://build.webkit.org." % ", ".join(red_builders_names), None) - return (True, "Landing patch %s from bug %s." % (patch["id"], patch["bug_id"]), patch) - - def process_work_item(self, patch): - self._cc_watchers(patch["bug_id"]) - self.run_bugzilla_tool(["land-attachment", "--force-clean", "--non-interactive", "--parent-command=commit-queue", "--quiet", patch["id"]]) - - def handle_unexpected_error(self, patch, message): - self.tool.bugs.reject_patch_from_commit_queue(patch["id"], message) - - # LandingSequenceErrorHandler methods - - @classmethod - def handle_script_error(cls, tool, patch, script_error): - tool.bugs.reject_patch_from_commit_queue(patch["id"], script_error.message_with_output()) - - -class AbstractReviewQueue(AbstractQueue, PersistentPatchCollectionDelegate, LandingSequenceErrorHandler): - def __init__(self, options=None): - AbstractQueue.__init__(self, options) - - # PersistentPatchCollectionDelegate methods - - def collection_name(self): - return self.name - - def fetch_potential_patch_ids(self): - return self.tool.bugs.fetch_attachment_ids_from_review_queue() - - def status_server(self): - return self.tool.status() - - # AbstractQueue methods - - def begin_work_queue(self): - AbstractQueue.begin_work_queue(self) - self.tool.status().set_host(self.options.status_host) - self._patches = PersistentPatchCollection(self) - - def next_work_item(self): - patch_id = self._patches.next() - if patch_id: - return self.tool.bugs.fetch_attachment(patch_id) - - def should_proceed_with_work_item(self, patch): - raise NotImplementedError, "subclasses must implement" - - def process_work_item(self, patch): - raise NotImplementedError, "subclasses must implement" - - def handle_unexpected_error(self, patch, message): - log(message) - - # LandingSequenceErrorHandler methods - - @classmethod - def handle_script_error(cls, tool, patch, script_error): - log(script_error.message_with_output()) - - -class StyleQueue(AbstractReviewQueue): - name = "style-queue" - def __init__(self): - AbstractReviewQueue.__init__(self) - - def should_proceed_with_work_item(self, patch): - return (True, "Checking style for patch %s on bug %s." % (patch["id"], patch["bug_id"]), patch) - - def process_work_item(self, patch): - try: - self.run_bugzilla_tool(["check-style", "--force-clean", "--non-interactive", "--parent-command=style-queue", patch["id"]]) - message = "%s ran check-webkit-style on attachment %s without any errors." % (self.name, patch["id"]) - self.tool.bugs.post_comment_to_bug(patch["bug_id"], message, cc=self.watchers) - self._patches.did_pass(patch) - except ScriptError, e: - self._patches.did_fail(patch) - raise e - - @classmethod - def handle_script_error(cls, tool, patch, script_error): - command = script_error.script_args - if type(command) is list: - command = command[0] - # FIXME: We shouldn't need to use a regexp here. ScriptError should - # have a better API. - if re.search("check-webkit-style", command): - message = "Attachment %s did not pass %s:\n\n%s" % (patch["id"], cls.name, script_error.message_with_output(output_limit=5*1024)) - tool.bugs.post_comment_to_bug(patch["bug_id"], message, cc=cls.watchers) diff --git a/WebKitTools/Scripts/modules/commands/queues_unittest.py b/WebKitTools/Scripts/modules/commands/queues_unittest.py deleted file mode 100644 index 75abbe5..0000000 --- a/WebKitTools/Scripts/modules/commands/queues_unittest.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2009 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import unittest - -from modules.commands.commandtest import CommandsTest -from modules.commands.queues import * -from modules.mock_bugzillatool import MockBugzillaTool -from modules.outputcapture import OutputCapture - - -class TestQueue(AbstractQueue): - name = "test-queue" - - -class AbstractQueueTest(CommandsTest): - def _assert_output(self, function, args, expected_stdout="", expected_stderr=""): - capture = OutputCapture() - capture.capture_output() - function(*args) - (stdout_string, stderr_string) = capture.restore_output() - self.assertEqual(stdout_string, expected_stdout) - self.assertEqual(stderr_string, expected_stderr) - - def _assert_log_progress_output(self, patch_ids, progress_output): - self._assert_output(TestQueue().log_progress, [patch_ids], expected_stderr=progress_output) - - def test_log_progress(self): - self._assert_log_progress_output([1,2,3], "3 patches in test-queue [1, 2, 3]\n") - self._assert_log_progress_output(["1","2","3"], "3 patches in test-queue [1, 2, 3]\n") - self._assert_log_progress_output([1], "1 patch in test-queue [1]\n") - - def _assert_run_bugzilla_tool_output(self, run_args, tool_output): - queue = TestQueue() - queue.bind_to_tool(MockBugzillaTool()) - # MockBugzillaTool.path() is "echo" - self._assert_output(queue.run_bugzilla_tool, [run_args], expected_stdout=tool_output) - - def test_run_bugzilla_tool(self): - self._assert_run_bugzilla_tool_output([1], "") - self._assert_run_bugzilla_tool_output(["one", 2], "") diff --git a/WebKitTools/Scripts/modules/commands/upload.py b/WebKitTools/Scripts/modules/commands/upload.py deleted file mode 100644 index 1f892a1..0000000 --- a/WebKitTools/Scripts/modules/commands/upload.py +++ /dev/null @@ -1,246 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2009, Google Inc. All rights reserved. -# Copyright (c) 2009 Apple Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os -import StringIO -import sys - -from optparse import make_option - -from modules.bugzilla import parse_bug_id -from modules.grammar import pluralize -from modules.logging import error, log -from modules.multicommandtool import Command - -# FIXME: Requires unit test. -class CommitMessageForCurrentDiff(Command): - name = "commit-message" - show_in_main_help = False - def __init__(self): - Command.__init__(self, "Print a commit message suitable for the uncommitted changes") - - def execute(self, options, args, tool): - os.chdir(tool.scm().checkout_root) - print "%s" % tool.scm().commit_message_for_this_commit().message() - - -class ObsoleteAttachments(Command): - name = "obsolete-attachments" - show_in_main_help = False - def __init__(self): - Command.__init__(self, "Mark all attachments on a bug as obsolete", "BUGID") - - def execute(self, options, args, tool): - bug_id = args[0] - attachments = tool.bugs.fetch_attachments_from_bug(bug_id) - for attachment in attachments: - if not attachment["is_obsolete"]: - tool.bugs.obsolete_attachment(attachment["id"]) - - -class PostDiff(Command): - name = "post-diff" - show_in_main_help = True - def __init__(self): - options = [ - make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: \"patch\")"), - ] - options += self.posting_options() - Command.__init__(self, "Attach the current working directory diff to a bug as a patch file", "[BUGID]", options=options) - - @staticmethod - def posting_options(): - return [ - make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting this one."), - make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."), - make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."), - ] - - @staticmethod - def obsolete_patches_on_bug(bug_id, bugs): - patches = bugs.fetch_patches_from_bug(bug_id) - if len(patches): - log("Obsoleting %s on bug %s" % (pluralize("old patch", len(patches)), bug_id)) - for patch in patches: - bugs.obsolete_attachment(patch["id"]) - - def execute(self, options, args, tool): - # Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs). - bug_id = (args and args[0]) or parse_bug_id(tool.scm().create_patch()) - if not bug_id: - error("No bug id passed and no bug url found in diff, can't post.") - - if options.obsolete_patches: - self.obsolete_patches_on_bug(bug_id, tool.bugs) - - diff = tool.scm().create_patch() - diff_file = StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object - - description = options.description or "Patch" - tool.bugs.add_patch_to_bug(bug_id, diff_file, description, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) - - -class PostCommits(Command): - name = "post-commits" - show_in_main_help = True - def __init__(self): - options = [ - make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."), - make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."), - make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"), - ] - options += PostDiff.posting_options() - Command.__init__(self, "Attach a range of local commits to bugs as patch files", "COMMITISH", options=options, requires_local_commits=True) - - def _comment_text_for_commit(self, options, commit_message, tool, commit_id): - comment_text = None - if (options.add_log_as_comment): - comment_text = commit_message.body(lstrip=True) - comment_text += "---\n" - comment_text += tool.scm().files_changed_summary_for_commit(commit_id) - return comment_text - - def _diff_file_for_commit(self, tool, commit_id): - diff = tool.scm().create_patch_from_local_commit(commit_id) - return StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object - - def execute(self, options, args, tool): - commit_ids = tool.scm().commit_ids_from_commitish_arguments(args) - if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is. - error("bugzilla-tool does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize("patch", len(commit_ids)))) - - have_obsoleted_patches = set() - for commit_id in commit_ids: - commit_message = tool.scm().commit_message_for_local_commit(commit_id) - - # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs). - bug_id = options.bug_id or parse_bug_id(commit_message.message()) or parse_bug_id(tool.scm().create_patch_from_local_commit(commit_id)) - if not bug_id: - log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id) - continue - - if options.obsolete_patches and bug_id not in have_obsoleted_patches: - PostDiff.obsolete_patches_on_bug(bug_id, tool.bugs) - have_obsoleted_patches.add(bug_id) - - diff_file = self._diff_file_for_commit(tool, commit_id) - description = options.description or commit_message.description(lstrip=True, strip_url=True) - comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id) - tool.bugs.add_patch_to_bug(bug_id, diff_file, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) - - -class MarkFixed(Command): - name = "mark-fixed" - show_in_main_help = False - def __init__(self): - Command.__init__(self, "Mark the specified bug as fixed", "BUG_ID REASON") - - def execute(self, options, args, tool): - tool.bugs.close_bug_as_fixed(args[0], args[1]) - - -# FIXME: Requires unit test. Blocking issue: too complex for now. -class CreateBug(Command): - name = "create-bug" - show_in_main_help = True - def __init__(self): - options = [ - make_option("--cc", action="store", type="string", dest="cc", help="Comma-separated list of email addresses to carbon-copy."), - make_option("--component", action="store", type="string", dest="component", help="Component for the new bug."), - make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."), - make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."), - make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."), - ] - Command.__init__(self, "Create a bug from local changes or local commits", "[COMMITISH]", options=options) - - def create_bug_from_commit(self, options, args, tool): - commit_ids = tool.scm().commit_ids_from_commitish_arguments(args) - if len(commit_ids) > 3: - error("Are you sure you want to create one bug with %s patches?" % len(commit_ids)) - - commit_id = commit_ids[0] - - bug_title = "" - comment_text = "" - if options.prompt: - (bug_title, comment_text) = self.prompt_for_bug_title_and_comment() - else: - commit_message = tool.scm().commit_message_for_local_commit(commit_id) - bug_title = commit_message.description(lstrip=True, strip_url=True) - comment_text = commit_message.body(lstrip=True) - comment_text += "---\n" - comment_text += tool.scm().files_changed_summary_for_commit(commit_id) - - diff = tool.scm().create_patch_from_local_commit(commit_id) - diff_file = StringIO.StringIO(diff) # create_bug_with_patch expects a file-like object - bug_id = tool.bugs.create_bug_with_patch(bug_title, comment_text, options.component, diff_file, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) - - if bug_id and len(commit_ids) > 1: - options.bug_id = bug_id - options.obsolete_patches = False - # FIXME: We should pass through --no-comment switch as well. - PostCommits.execute(self, options, commit_ids[1:], tool) - - def create_bug_from_patch(self, options, args, tool): - bug_title = "" - comment_text = "" - if options.prompt: - (bug_title, comment_text) = self.prompt_for_bug_title_and_comment() - else: - commit_message = tool.scm().commit_message_for_this_commit() - bug_title = commit_message.description(lstrip=True, strip_url=True) - comment_text = commit_message.body(lstrip=True) - - diff = tool.scm().create_patch() - diff_file = StringIO.StringIO(diff) # create_bug_with_patch expects a file-like object - bug_id = tool.bugs.create_bug_with_patch(bug_title, comment_text, options.component, diff_file, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) - - def prompt_for_bug_title_and_comment(self): - bug_title = raw_input("Bug title: ") - print "Bug comment (hit ^D on blank line to end):" - lines = sys.stdin.readlines() - try: - sys.stdin.seek(0, os.SEEK_END) - except IOError: - # Cygwin raises an Illegal Seek (errno 29) exception when the above - # seek() call is made. Ignoring it seems to cause no harm. - # FIXME: Figure out a way to get avoid the exception in the first - # place. - pass - comment_text = "".join(lines) - return (bug_title, comment_text) - - def execute(self, options, args, tool): - if len(args): - if (not tool.scm().supports_local_commits()): - error("Extra arguments not supported; patch is taken from working directory.") - self.create_bug_from_commit(options, args, tool) - else: - self.create_bug_from_patch(options, args, tool) diff --git a/WebKitTools/Scripts/modules/landingsequence.py b/WebKitTools/Scripts/modules/landingsequence.py deleted file mode 100644 index 90683f4..0000000 --- a/WebKitTools/Scripts/modules/landingsequence.py +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2009, Google Inc. All rights reserved. -# Copyright (c) 2009 Apple Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from modules.comments import bug_comment_from_commit_text -from modules.executive import ScriptError -from modules.logging import log -from modules.scm import CheckoutNeedsUpdate -from modules.webkitport import WebKitPort -from modules.workqueue import WorkQueue -from modules.buildsteps import CleanWorkingDirectoryStep, UpdateStep, ApplyPatchStep, EnsureBuildersAreGreenStep, BuildStep, RunTestsStep, CommitStep, ClosePatchStep, CloseBugStep - - -class LandingSequenceErrorHandler(): - @classmethod - def handle_script_error(cls, tool, patch, script_error): - raise NotImplementedError, "subclasses must implement" - -# FIXME: This class is slowing being killed and replaced with StepSequence. -class LandingSequence: - def __init__(self, patch, options, tool): - self._patch = patch - self._options = options - self._tool = tool - self._port = WebKitPort.port(self._options.port) - - def run(self): - self.clean() - self.update() - self.apply_patch() - self.check_builders() - self.build() - self.test() - commit_log = self.commit() - self.close_patch(commit_log) - self.close_bug() - - def run_and_handle_errors(self): - try: - self.run() - except CheckoutNeedsUpdate, e: - log("Commit failed because the checkout is out of date. Please update and try again.") - log("You can pass --no-build to skip building/testing after update if you believe the new commits did not affect the results.") - WorkQueue.exit_after_handled_error(e) - except ScriptError, e: - if not self._options.quiet: - log(e.message_with_output()) - if self._options.parent_command: - command = self._tool.command_by_name(self._options.parent_command) - command.handle_script_error(self._tool, self._patch, e) - WorkQueue.exit_after_handled_error(e) - - def clean(self): - step = CleanWorkingDirectoryStep(self._tool, self._options) - step.run() - - def update(self): - step = UpdateStep(self._tool, self._options) - step.run() - - def apply_patch(self): - step = ApplyPatchStep(self._tool, self._options, self._patch) - step.run() - - def check_builders(self): - step = EnsureBuildersAreGreenStep(self._tool, self._options) - step.run() - - def build(self): - step = BuildStep(self._tool, self._options) - step.run() - - def test(self): - step = RunTestsStep(self._tool, self._options) - step.run() - - def commit(self): - step = CommitStep(self._tool, self._options) - return step.run() - - def close_patch(self, commit_log): - step = ClosePatchStep(self._tool, self._options, self._patch) - step.run(commit_log) - - def close_bug(self): - step = CloseBugStep(self._tool, self._options, self._patch) - step.run() diff --git a/WebKitTools/Scripts/modules/mock_bugzillatool.py b/WebKitTools/Scripts/modules/mock_bugzillatool.py deleted file mode 100644 index e600947..0000000 --- a/WebKitTools/Scripts/modules/mock_bugzillatool.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (C) 2009 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import os - -from modules.mock import Mock -from modules.scm import CommitMessage - - -class MockBugzilla(Mock): - patch1 = { - "id" : 197, - "bug_id" : 42, - "url" : "http://example.com/197", - "is_obsolete" : False, - "reviewer" : "Reviewer1", - "attacher_email" : "Contributer1", - } - patch2 = { - "id" : 128, - "bug_id" : 42, - "url" : "http://example.com/128", - "is_obsolete" : False, - "reviewer" : "Reviewer2", - "attacher_email" : "Contributer2", - } - bug_server_url = "http://example.com" - - def fetch_bug_ids_from_commit_queue(self): - return [42, 75] - - def fetch_attachment_ids_from_review_queue(self): - return [197, 128] - - def fetch_patches_from_commit_queue(self): - return [self.patch1, self.patch2] - - def fetch_patches_from_pending_commit_list(self): - return [self.patch1, self.patch2] - - def fetch_reviewed_patches_from_bug(self, bug_id): - if bug_id == 42: - return [self.patch1, self.patch2] - return None - - def fetch_attachments_from_bug(self, bug_id): - if bug_id == 42: - return [self.patch1, self.patch2] - return None - - def fetch_patches_from_bug(self, bug_id): - if bug_id == 42: - return [self.patch1, self.patch2] - return None - - def fetch_attachment(self, attachment_id): - if attachment_id == 197: - return self.patch1 - if attachment_id == 128: - return self.patch2 - raise Exception("Bogus attachment_id in fetch_attachment.") - - def bug_url_for_bug_id(self, bug_id): - return "%s/%s" % (self.bug_server_url, bug_id) - - def attachment_url_for_id(self, attachment_id, action): - action_param = "" - if action and action != "view": - action_param = "&action=%s" % action - return "%s/%s%s" % (self.bug_server_url, attachment_id, action_param) - - -class MockBuildBot(Mock): - def builder_statuses(self): - return [{ - "name": "Builder1", - "is_green": True - }, { - "name": "Builder2", - "is_green": True - }] - - def red_core_builders_names(self): - return [] - -class MockSCM(Mock): - def __init__(self): - Mock.__init__(self) - self.checkout_root = os.getcwd() - - def create_patch(self): - return "Patch1" - - def commit_ids_from_commitish_arguments(self, args): - return ["Commitish1", "Commitish2"] - - def commit_message_for_local_commit(self, commit_id): - if commit_id == "Commitish1": - return CommitMessage("CommitMessage1\nhttps://bugs.example.org/show_bug.cgi?id=42\n") - if commit_id == "Commitish2": - return CommitMessage("CommitMessage2\nhttps://bugs.example.org/show_bug.cgi?id=75\n") - raise Exception("Bogus commit_id in commit_message_for_local_commit.") - - def create_patch_from_local_commit(self, commit_id): - if commit_id == "Commitish1": - return "Patch1" - if commit_id == "Commitish2": - return "Patch2" - raise Exception("Bogus commit_id in commit_message_for_local_commit.") - - def modified_changelogs(self): - # Ideally we'd return something more interesting here. - # The problem is that LandDiff will try to actually read the path from disk! - return [] - - -class MockBugzillaTool(): - def __init__(self): - self.bugs = MockBugzilla() - self.buildbot = MockBuildBot() - self.executive = Mock() - self._scm = MockSCM() - - def scm(self): - return self._scm - - def path(self): - return "echo" diff --git a/WebKitTools/Scripts/num-cpus b/WebKitTools/Scripts/num-cpus index c5f28a1..ede9995 100755 --- a/WebKitTools/Scripts/num-cpus +++ b/WebKitTools/Scripts/num-cpus @@ -1,16 +1,3 @@ -#!/usr/bin/perl - -use strict; -use warnings; - -use Win32API::Registry 0.21 qw( :ALL ); - - -my $key; -my $i = 0; -while (RegOpenKeyEx(HKEY_LOCAL_MACHINE, "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\$i", 0, KEY_READ, $key)) { - $i++; - RegCloseKey($key); -} - -print "$i\n"; +#!/bin/bash +# Assumes cygwin. +ls /proc/registry/HKEY_LOCAL_MACHINE/HARDWARE/DESCRIPTION/System/CentralProcessor | wc -w diff --git a/WebKitTools/Scripts/pdevenv b/WebKitTools/Scripts/pdevenv index cab8b16..4643728 100755 --- a/WebKitTools/Scripts/pdevenv +++ b/WebKitTools/Scripts/pdevenv @@ -32,13 +32,8 @@ print $fh "\@echo off\n\n"; print $fh "call \"\%" . $vsToolsVar . "\%\\vsvars32.bat\"\n\n"; print $fh "set PATH=$vcBin;$scriptsPath;\%PATH\%\n\n"; -my $useenv = "/useenv "; -if (isChromium()) { - $useenv = ""; -} - -print $fh "IF EXIST \"\%VSINSTALLDIR\%\\Common7\\IDE\\devenv.com\" (devenv.com " . $useenv . join(" ", @ARGV) . ") ELSE "; -print $fh "VCExpress.exe " . $useenv . join(" ", @ARGV) . "\n"; +print $fh "IF EXIST \"\%VSINSTALLDIR\%\\Common7\\IDE\\devenv.com\" (devenv.com /useenv " . join(" ", @ARGV) . ") ELSE "; +print $fh "VCExpress.exe /useenv " . join(" ", @ARGV) . "\n"; close $fh; diff --git a/WebKitTools/Scripts/prepare-ChangeLog b/WebKitTools/Scripts/prepare-ChangeLog index 4c59af9..3350aa3 100755 --- a/WebKitTools/Scripts/prepare-ChangeLog +++ b/WebKitTools/Scripts/prepare-ChangeLog @@ -257,7 +257,11 @@ if ($bugNumber) { my $bugXMLURL = "$bugURL&ctype=xml"; # Perl has no built in XML processing, so we'll fetch and parse with curl and grep my $descriptionLine = `curl --silent "$bugXMLURL" | grep short_desc`; - $descriptionLine =~ /<short_desc>(.*)<\/short_desc>/; + if ($descriptionLine !~ /<short_desc>(.*)<\/short_desc>/) { + print STDERR " Bug $bugNumber has no bug description. Maybe you set wrong bug ID?\n"; + print STDERR " The bug URL: $bugXMLURL\n"; + exit 1; + } $bugDescription = decodeEntities($1); print STDERR " Description from bug $bugNumber:\n \"$bugDescription\".\n"; } @@ -1606,6 +1610,7 @@ sub reviewerAndDescriptionForGitCommit($) $description .= "\n" if $commitLogCount; $commitLogCount++; my $inHeader = 1; + my $commitLogIndent; my @lines = split(/\n/, $commitLog); shift @lines; # Remove initial blank line foreach my $line (@lines) { @@ -1620,11 +1625,18 @@ sub reviewerAndDescriptionForGitCommit($) } else { $reviewer .= ", " . $1; } - } elsif (length $line == 0) { + } elsif ($line =~ /^\s*$/) { $description = $description . "\n"; } else { - $line =~ s/^\s*//; - $description = $description . " " . $line . "\n"; + if (!defined($commitLogIndent)) { + # Let the first line with non-white space determine + # the global indent. + $line =~ /^(\s*)\S/; + $commitLogIndent = length($1); + } + # Strip at most the indent to preserve relative indents. + $line =~ s/^\s{0,$commitLogIndent}//; + $description = $description . (" " x 8) . $line . "\n"; } } } diff --git a/WebKitTools/Scripts/rebaseline-chromium-webkit-tests b/WebKitTools/Scripts/rebaseline-chromium-webkit-tests new file mode 100755 index 0000000..d22c0c4 --- /dev/null +++ b/WebKitTools/Scripts/rebaseline-chromium-webkit-tests @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Wrapper around webkitpy/layout_tests/rebaseline.py""" +import os +import sys + +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), + "webkitpy", "layout_tests")) + +import rebaseline_chromium_webkit_tests + +if __name__ == '__main__': + rebaseline_chromium_webkit_tests.main() diff --git a/WebKitTools/Scripts/run-chromium-webkit-tests b/WebKitTools/Scripts/run-chromium-webkit-tests new file mode 100755 index 0000000..8712836 --- /dev/null +++ b/WebKitTools/Scripts/run-chromium-webkit-tests @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Wrapper around webkitpy/layout_tests/run-chromium-webkit-tests.py""" +import os +import sys + +sys.path.append(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), + "webkitpy", "layout_tests")) +import run_chromium_webkit_tests + +if __name__ == '__main__': + options, args = run_chromium_webkit_tests.parse_args() + run_chromium_webkit_tests.main(options, args) diff --git a/WebKitTools/Scripts/run-iexploder-tests b/WebKitTools/Scripts/run-iexploder-tests index ed5ecd6..1b3976f 100755 --- a/WebKitTools/Scripts/run-iexploder-tests +++ b/WebKitTools/Scripts/run-iexploder-tests @@ -38,16 +38,17 @@ use Getopt::Long; use IPC::Open2; use lib $FindBin::Bin; +use webkitperl::httpd; use webkitdirs; -sub openHTTPDIfNeeded(); -sub closeHTTPD(); +sub configureAndOpenHTTPDIfNeeded(); sub runSafariWithIExploder(); # Argument handling my $guardMalloc = ''; my $httpdPort = 8000; my $downloadTest; +my $iExploderTestDirectory = "/tmp/iExploderTest"; GetOptions( 'guard-malloc|g' => \$guardMalloc, @@ -63,8 +64,8 @@ chdirWebKit(); checkFrameworks(); -my $httpdOpen = 0; -openHTTPDIfNeeded(); +my $isHttpdOpen = 0; +configureAndOpenHTTPDIfNeeded(); if ($downloadTest) { system "/usr/bin/curl -o ~/Desktop/iexploder$downloadTest.html \"http://127.0.0.1:$httpdPort/iexploder.cgi?lookup=1&test=$downloadTest\""; @@ -72,11 +73,11 @@ if ($downloadTest) { } else { runSafariWithIExploder(); print "Last generated tests:\n"; - system "grep 'iexploder.cgi' /tmp/WebKit/access_log.txt | tail -n -5 | awk -F'[ =&\\?]' '{if (\$8 == \"lookup\") print \$11; else print \$9}'"; + system "grep 'iexploder.cgi' $iExploderTestDirectory/access_log.txt | tail -n -5 | awk -F'[ =&\\?]' '{if (\$8 == \"lookup\") print \$11; else print \$9}'"; } -closeHTTPD(); - +rmtree $iExploderTestDirectory; +$isHttpdOpen = !closeHTTPD(); sub runSafariWithIExploder() { @@ -87,7 +88,7 @@ sub runSafariWithIExploder() $redirectTo = "http://127.0.0.1:$httpdPort/index.html"; } - open REDIRECT_HTML, ">", "/tmp/WebKit/redirect.html" or die; + open REDIRECT_HTML, ">", "$iExploderTestDirectory/redirect.html" or die; print REDIRECT_HTML "<html>\n"; print REDIRECT_HTML " <head>\n"; print REDIRECT_HTML " <meta http-equiv=\"refresh\" content=\"1;URL=$redirectTo\" />\n"; @@ -102,35 +103,17 @@ sub runSafariWithIExploder() local %ENV; $ENV{DYLD_INSERT_LIBRARIES} = "/usr/lib/libgmalloc.dylib" if $guardMalloc; - system "WebKitTools/Scripts/run-safari", "-NSOpen", "/tmp/WebKit/redirect.html"; + system "WebKitTools/Scripts/run-safari", "-NSOpen", "$iExploderTestDirectory/redirect.html"; } -sub openHTTPDIfNeeded() +sub configureAndOpenHTTPDIfNeeded() { - return if $httpdOpen; - - mkdir "/tmp/WebKit"; - - if (-f "/tmp/WebKit/httpd.pid") { - my $oldPid = `cat /tmp/WebKit/httpd.pid`; - chomp $oldPid; - if (0 != kill 0, $oldPid) { - print "\nhttpd is already running: pid $oldPid, killing...\n"; - kill 15, $oldPid; - - my $retryCount = 20; - while ((0 != kill 0, $oldPid) && $retryCount) { - sleep 1; - --$retryCount; - } - - die "Timed out waiting for httpd to quit" unless $retryCount; - } - } - - my $testDirectory = getcwd() . "/LayoutTests"; - my $iExploderDirectory = getcwd() . "/WebKitTools/iExploder"; - my $httpdPath = "/usr/sbin/httpd"; + return if $isHttpdOpen; + mkdir $iExploderTestDirectory; + my $httpdPath = getHTTPDPath(); + my $webkitDirectory = getcwd(); + my $testDirectory = $webkitDirectory . "/LayoutTests"; + my $iExploderDirectory = $webkitDirectory . "/WebKitTools/iExploder"; my $httpdConfig = "$testDirectory/http/conf/httpd.conf"; $httpdConfig = "$testDirectory/http/conf/apache2-httpd.conf" if `$httpdPath -v` =~ m|Apache/2|; my $documentRoot = "$iExploderDirectory/htdocs"; @@ -138,36 +121,18 @@ sub openHTTPDIfNeeded() my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem"; my $listen = "127.0.0.1:$httpdPort"; - open2(\*HTTPDIN, \*HTTPDOUT, $httpdPath, + + my @args = ( "-f", "$httpdConfig", "-C", "DocumentRoot \"$documentRoot\"", "-C", "Listen $listen", "-c", "TypesConfig \"$typesConfig\"", - "-c", "CustomLog \"/tmp/WebKit/access_log.txt\" common", - "-c", "ErrorLog \"/tmp/WebKit/error_log.txt\"", + "-c", "CustomLog \"$iExploderTestDirectory/access_log.txt\" common", + "-c", "ErrorLog \"$iExploderTestDirectory/error_log.txt\"", "-c", "SSLCertificateFile \"$sslCertificate\"", # Apache wouldn't run CGIs with permissions==700 otherwise - "-c", "User \"#$<\""); - - my $retryCount = 20; - while (system("/usr/bin/curl -q --silent --stderr - --output " . File::Spec->devnull() . " $listen") && $retryCount) { - sleep 1; - --$retryCount; - } - - die "Timed out waiting for httpd to start" unless $retryCount; - - $httpdOpen = 1; -} - -sub closeHTTPD() -{ - return if !$httpdOpen; - - close HTTPDIN; - close HTTPDOUT; - - kill 15, `cat /tmp/WebKit/httpd.pid` if -f "/tmp/WebKit/httpd.pid"; + "-c", "User \"#$<\"" + ); - $httpdOpen = 0; + $isHttpdOpen = openHTTPD(@args); } diff --git a/WebKitTools/Scripts/run-leaks b/WebKitTools/Scripts/run-leaks index d8f89d3..9dc58de 100755 --- a/WebKitTools/Scripts/run-leaks +++ b/WebKitTools/Scripts/run-leaks @@ -132,6 +132,15 @@ sub parseLeaksOutput(\@) # # We treat every line except for Process 00000: and Leak: as optional + # Newer versions of the leaks output have a header section at the top, with the first line describing the version of the output format. + # If we detect the new format is being used then we eat all of the header section so the output matches the format of older versions. + # FIXME: In the future we may wish to propagate this section through to our output. + if ($leaksOutput->[0] =~ /^leaks Report Version:/) { + while ($leaksOutput->[0] !~ /^Process /) { + shift @$leaksOutput; + } + } + my ($leakCount) = ($leaksOutput->[1] =~ /[[:blank:]]+([0-9]+)[[:blank:]]+leaks?/); if (!defined($leakCount)) { reportError("Could not parse leak count reported by leaks tool."); diff --git a/WebKitTools/Scripts/run-sunspider b/WebKitTools/Scripts/run-sunspider index e63f5d1..1f0d056 100755 --- a/WebKitTools/Scripts/run-sunspider +++ b/WebKitTools/Scripts/run-sunspider @@ -41,6 +41,7 @@ my $testRuns = 10; # This number may be different from what sunspider defaults t my $runShark = 0; my $runShark20 = 0; my $runSharkCache = 0; +my $suite = ""; my $ubench = 0; my $v8 = 0; my $parseonly = 0; @@ -59,9 +60,10 @@ Usage: $programName [options] [options to pass to build system] --shark Sample with the Mac OS X "Shark" performance testing tool (implies --runs=1) --shark20 Like --shark, but with a 20 microsecond sampling interval --shark-cache Like --shark, but performs a L2 cache-miss sample instead of time sample - --ubench Use microbenchmark suite instead of regular tests (to check for core execution regressions) - --v8 Use the V8 benchmark suite. - --parse-only Use the parse-only benchmark suite + --suite Select a specific benchmark suite. The default is sunspider-0.9.1 + --ubench Use microbenchmark suite instead of regular tests. Same as --suite=ubench + --v8-suite Use the V8 benchmark suite. Same as --suite=v8-v4 + --parse-only Use the parse-only benchmark suite. Same as --suite=parse-only EOF GetOptions('root=s' => sub { my ($x, $value) = @_; $root = $value; setConfigurationProductDir(Cwd::abs_path($root)); }, @@ -70,6 +72,7 @@ GetOptions('root=s' => sub { my ($x, $value) = @_; $root = $value; setConfigurat 'shark' => \$runShark, 'shark20' => \$runShark20, 'shark-cache' => \$runSharkCache, + 'suite=s' => \$suite, 'ubench' => \$ubench, 'v8' => \$v8, 'parse-only' => \$parseonly, @@ -117,6 +120,7 @@ push @args, "--set-baseline" if $setBaseline; push @args, "--shark" if $runShark; push @args, "--shark20" if $runShark20; push @args, "--shark-cache" if $runSharkCache; +push @args, "--suite=${suite}" if $suite; push @args, "--ubench" if $ubench; push @args, "--v8" if $v8; push @args, "--parse-only" if $parseonly; diff --git a/WebKitTools/Scripts/run-webkit-httpd b/WebKitTools/Scripts/run-webkit-httpd index 9a97190..018f64c 100755 --- a/WebKitTools/Scripts/run-webkit-httpd +++ b/WebKitTools/Scripts/run-webkit-httpd @@ -33,11 +33,13 @@ use strict; use warnings; use Cwd; +use File::Path; use File::Basename; use Getopt::Long; use FindBin; use lib $FindBin::Bin; +use webkitperl::httpd; use webkitdirs; # Argument handling @@ -62,39 +64,7 @@ if (!$result || @ARGV || $showHelp) { setConfiguration(); my $productDir = productDir(); chdirWebKit(); - -mkdir "/tmp/WebKit"; - -if (-f "/tmp/WebKit/httpd.pid") { - my $oldPid = `cat /tmp/WebKit/httpd.pid`; - chomp $oldPid; - if (0 != kill 0, $oldPid) { - print "\nhttpd is already running: pid $oldPid, killing...\n"; - kill 15, $oldPid; - - my $retryCount = 20; - while ((0 != kill 0, $oldPid) && $retryCount) { - sleep 1; - --$retryCount; - } - - die "Timed out waiting for httpd to quit" unless $retryCount; - } -} - -my $testDirectory = getcwd() . "/LayoutTests"; -my $jsTestResourcesDirectory = $testDirectory . "/fast/js/resources"; -my $httpdPath = "/usr/sbin/httpd"; -$httpdPath = "/usr/sbin/apache2" if isDebianBased(); -my $httpdConfig = "$testDirectory/http/conf/httpd.conf"; -$httpdConfig = "$testDirectory/http/conf/cygwin-httpd.conf" if isCygwin(); -$httpdConfig = "$testDirectory/http/conf/apache2-httpd.conf" if `$httpdPath -v` =~ m|Apache/2|; -$httpdConfig = "$testDirectory/http/conf/apache2-debian-httpd.conf" if isDebianBased(); -$httpdConfig = "$testDirectory/http/conf/fedora-httpd.conf" if isFedoraBased(); -my $documentRoot = "$testDirectory/http/tests"; -my $typesConfig = "$testDirectory/http/conf/mime.types"; -my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem"; - +my $testDirectory = File::Spec->catfile(getcwd(), "LayoutTests"); my $listen = "127.0.0.1:$httpdPort"; $listen = "$httpdPort" if ($allInterfaces); @@ -103,19 +73,13 @@ if ($allInterfaces) { } else { print "Starting httpd on <http://$listen/>...\n"; } +setShouldWaitForUserInterrupt(); print "Press Ctrl+C to stop it.\n\n"; my @args = ( - "-f", "$httpdConfig", - "-C", "DocumentRoot \"$documentRoot\"", - # Setup a link to where the js test templates are stored, use -c so that mod_alias will already be laoded. - "-c", "Alias /js-test-resources \"$jsTestResourcesDirectory\"", "-C", "Listen $listen", - "-c", "TypesConfig \"$typesConfig\"", "-c", "CustomLog |/usr/bin/tee common", "-c", "ErrorLog |/usr/bin/tee", - # Apache wouldn't run CGIs with permissions==700 otherwise. - "-c", "User \"#$<\"", # Run in single-process mode, do not detach from the controlling terminal. "-X", # Disable Keep-Alive support. Makes testing in multiple browsers easier (no need to wait @@ -123,9 +87,6 @@ my @args = ( "-c", "KeepAlive 0" ); -# FIXME: Enable this on Windows once <rdar://problem/5345985> is fixed -push(@args, "-c", "SSLCertificateFile \"$sslCertificate\"") unless isCygwin(); - -system($httpdPath, @args); - -unlink "/tmp/WebKit/httpd.pid"; +my @defaultArgs = getDefaultConfigForTestDirectory($testDirectory); +@args = (@defaultArgs, @args); +openHTTPD(@args); diff --git a/WebKitTools/Scripts/run-webkit-tests b/WebKitTools/Scripts/run-webkit-tests index 6dd8339..bb4fb34 100755 --- a/WebKitTools/Scripts/run-webkit-tests +++ b/WebKitTools/Scripts/run-webkit-tests @@ -67,6 +67,7 @@ use Time::HiRes qw(time usleep); use List::Util 'shuffle'; use lib $FindBin::Bin; +use webkitperl::httpd; use webkitdirs; use VCSUtils; use POSIX; @@ -75,8 +76,8 @@ sub buildPlatformResultHierarchy(); sub buildPlatformTestHierarchy(@); sub closeCygpaths(); sub closeDumpTool(); -sub closeHTTPD(); sub closeWebSocketServer(); +sub configureAndOpenHTTPDIfNeeded(); sub countAndPrintLeaks($$$); sub countFinishedTest($$$$); sub deleteExpectedAndActualResults($); @@ -91,7 +92,6 @@ sub resolveAndMakeTestResultsDirectory(); sub numericcmp($$); sub openDiffTool(); sub openDumpTool(); -sub openHTTPDIfNeeded(); sub parseLeaksandPrintUniqueLeaks(); sub openWebSocketServerIfNeeded(); sub pathcmp($$); @@ -143,7 +143,8 @@ my $showHelp = 0; my $stripEditingCallbacks = isCygwin(); my $testHTTP = 1; my $testMedia = 1; -my $testResultsDirectory = "/tmp/layout-test-results"; +my $tmpDir = "/tmp"; +my $testResultsDirectory = File::Spec->catfile($tmpDir, "layout-test-results"); my $testsPerDumpTool = 1000; my $threaded = 0; # DumpRenderTree has an internal timeout of 15 seconds, so this must be > 15. @@ -383,7 +384,8 @@ my @platformTestHierarchy = buildPlatformTestHierarchy(@platformResultHierarchy) $expectedDirectory = $ENV{"WebKitExpectedTestResultsDirectory"} if $ENV{"WebKitExpectedTestResultsDirectory"}; -my $testResults = catfile($testResultsDirectory, "results.html"); +$testResultsDirectory = File::Spec->rel2abs($testResultsDirectory); +my $testResults = File::Spec->catfile($testResultsDirectory, "results.html"); print "Running tests from $testDirectory\n"; if ($pixelTests) { @@ -605,7 +607,7 @@ for my $test (@tests) { } if ($test =~ /^http\//) { - openHTTPDIfNeeded(); + configureAndOpenHTTPDIfNeeded(); if ($test !~ /^http\/tests\/local\// && $test !~ /^http\/tests\/ssl\// && $test !~ /^http\/tests\/wml\// && $test !~ /^http\/tests\/media\//) { my $path = canonpath($test); $path =~ s/^http\/tests\///; @@ -708,7 +710,7 @@ for my $test (@tests) { my $actualPNG = ""; my $diffPNG = ""; - my $diffPercentage = ""; + my $diffPercentage = 0; my $diffResult = "passed"; my $actualHash = ""; @@ -762,12 +764,12 @@ for my $test (@tests) { } if (/^diff: (.+)% (passed|failed)/) { - $diffPercentage = $1; + $diffPercentage = $1 + 0; $imageDifferences{$base} = $diffPercentage; $diffResult = $2; } - if ($diffPercentage == 0) { + if (!$diffPercentage) { printFailureMessageForTest($test, "pixel hash failed (but pixel test still passes)"); } } elsif ($verbose) { @@ -945,7 +947,7 @@ printf "\n%0.2fs total testing time\n", (time - $overallStartTime) . ""; !$isDumpToolOpen || die "Failed to close $dumpToolName.\n"; -closeHTTPD(); +$isHttpdOpen = !closeHTTPD(); closeWebSocketServer(); # Because multiple instances of this script are running concurrently we cannot @@ -1345,95 +1347,21 @@ sub dumpToolDidCrash() return DumpRenderTreeSupport::processIsCrashing($dumpToolPID); } -sub openHTTPDIfNeeded() +sub configureAndOpenHTTPDIfNeeded() { return if $isHttpdOpen; - - mkdir "/tmp/WebKit"; - - if (-f "/tmp/WebKit/httpd.pid") { - my $oldPid = `cat /tmp/WebKit/httpd.pid`; - chomp $oldPid; - if (0 != kill 0, $oldPid) { - print "\nhttpd is already running: pid $oldPid, killing...\n"; - kill 15, $oldPid; - - my $retryCount = 20; - while ((0 != kill 0, $oldPid) && $retryCount) { - sleep 1; - --$retryCount; - } - - die "Timed out waiting for httpd to quit" unless $retryCount; - } - } - - my $httpdPath = "/usr/sbin/httpd"; - my $httpdConfig; - if (isCygwin()) { - my $windowsConfDirectory = "$testDirectory/http/conf/"; - unless (-x "/usr/lib/apache/libphp4.dll") { - copy("$windowsConfDirectory/libphp4.dll", "/usr/lib/apache/libphp4.dll"); - chmod(0755, "/usr/lib/apache/libphp4.dll"); - } - $httpdConfig = "$windowsConfDirectory/cygwin-httpd.conf"; - } elsif (isDebianBased()) { - $httpdPath = "/usr/sbin/apache2"; - $httpdConfig = "$testDirectory/http/conf/apache2-debian-httpd.conf"; - } elsif (isFedoraBased()) { - $httpdPath = "/usr/sbin/httpd"; - $httpdConfig = "$testDirectory/http/conf/fedora-httpd.conf"; - } else { - $httpdConfig = "$testDirectory/http/conf/httpd.conf"; - $httpdConfig = "$testDirectory/http/conf/apache2-httpd.conf" if `$httpdPath -v` =~ m|Apache/2|; - } - my $documentRoot = "$testDirectory/http/tests"; - my $jsTestResourcesDirectory = $testDirectory . "/fast/js/resources"; - my $typesConfig = "$testDirectory/http/conf/mime.types"; - my $listen = "127.0.0.1:$httpdPort"; my $absTestResultsDirectory = resolveAndMakeTestResultsDirectory(); - my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem"; - + my $listen = "127.0.0.1:$httpdPort"; my @args = ( - "-f", "$httpdConfig", - "-C", "DocumentRoot \"$documentRoot\"", - # Setup a link to where the js test templates are stored, use -c so that mod_alias will already be laoded. - "-c", "Alias /js-test-resources \"$jsTestResourcesDirectory\"", - "-C", "Listen $listen", - "-c", "TypesConfig \"$typesConfig\"", "-c", "CustomLog \"$absTestResultsDirectory/access_log.txt\" common", "-c", "ErrorLog \"$absTestResultsDirectory/error_log.txt\"", - # Apache wouldn't run CGIs with permissions==700 otherwise - "-c", "User \"#$<\"" + "-C", "Listen $listen" ); - # FIXME: Enable this on Windows once <rdar://problem/5345985> is fixed - # The version of Apache we use with Cygwin does not support SSL - push(@args, "-c", "SSLCertificateFile \"$sslCertificate\"") unless isCygwin(); + my @defaultArgs = getDefaultConfigForTestDirectory($testDirectory); + @args = (@defaultArgs, @args); - open2(\*HTTPDIN, \*HTTPDOUT, $httpdPath, @args); - - my $retryCount = 20; - while (system("/usr/bin/curl -q --silent --stderr - --output " . File::Spec->devnull() . " $listen") && $retryCount) { - sleep 1; - --$retryCount; - } - - die "Timed out waiting for httpd to start" unless $retryCount; - - $isHttpdOpen = 1; -} - -sub closeHTTPD() -{ - return if !$isHttpdOpen; - - close HTTPDIN; - close HTTPDOUT; - - kill 15, `cat /tmp/WebKit/httpd.pid` if -f "/tmp/WebKit/httpd.pid"; - - $isHttpdOpen = 0; + $isHttpdOpen = openHTTPD(@args); } sub openWebSocketServerIfNeeded() @@ -1445,6 +1373,7 @@ sub openWebSocketServerIfNeeded() my $webSocketPythonPath = "WebKitTools/pywebsocket"; my $webSocketHandlerDir = "$testDirectory"; my $webSocketHandlerScanDir = "$testDirectory/websocket/tests"; + my $webSocketHandlerMapFile = "$webSocketHandlerScanDir/handler_map.txt"; my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem"; my $absTestResultsDirectory = resolveAndMakeTestResultsDirectory(); my $logFile = "$absTestResultsDirectory/pywebsocket_log.txt"; @@ -1454,7 +1383,9 @@ sub openWebSocketServerIfNeeded() "-p", "$webSocketPort", "-d", "$webSocketHandlerDir", "-s", "$webSocketHandlerScanDir", + "-m", "$webSocketHandlerMapFile", "-l", "$logFile", + "--strict", ); # wss is disabled until all platforms support pyOpenSSL. # my @argsSecure = ( @@ -2220,6 +2151,24 @@ sub findTestsToRun @testsToRun = sort pathcmp @testsToRun; + # We need to minimize the time when Apache and WebSocketServer is locked by tests + # so run them last if no explicit order was specified in the argument list. + if (!scalar @ARGV) { + my @httpTests; + my @websocketTests; + my @otherTests; + foreach my $test (@testsToRun) { + if ($test =~ /^http\//) { + push(@httpTests, $test); + } elsif ($test =~ /^websocket\//) { + push(@websocketTests, $test); + } else { + push(@otherTests, $test); + } + } + @testsToRun = (@otherTests, @httpTests, @websocketTests); + } + # Reverse the tests @testsToRun = reverse @testsToRun if $reverseTests; diff --git a/WebKitTools/Scripts/run-webkit-websocketserver b/WebKitTools/Scripts/run-webkit-websocketserver index e05303a..bbc5af6 100755 --- a/WebKitTools/Scripts/run-webkit-websocketserver +++ b/WebKitTools/Scripts/run-webkit-websocketserver @@ -67,12 +67,14 @@ sub openWebSocketServer() my $webSocketPythonPath = "$srcDir/WebKitTools/pywebsocket"; my $webSocketHandlerDir = "$testDirectory"; my $webSocketHandlerScanDir = "$testDirectory/websocket/tests"; + my $webSocketHandlerMapFile = "$webSocketHandlerScanDir/handler_map.txt"; my @args = ( "$srcDir/WebKitTools/pywebsocket/mod_pywebsocket/standalone.py", "-p", "$webSocketPort", "-d", "$webSocketHandlerDir", "-s", "$webSocketHandlerScanDir", + "-m", "$webSocketHandlerMapFile", ); $ENV{"PYTHONPATH"} = $webSocketPythonPath; diff --git a/WebKitTools/Scripts/sunspider-compare-results b/WebKitTools/Scripts/sunspider-compare-results index a207d7a..3446cd8 100755 --- a/WebKitTools/Scripts/sunspider-compare-results +++ b/WebKitTools/Scripts/sunspider-compare-results @@ -39,21 +39,27 @@ my $configuration = configuration(); my $root; my $showHelp = 0; +my $suite = ""; my $ubench = 0; my $v8 = 0; +my $parseonly = 0; my $programName = basename($0); my $usage = <<EOF; Usage: $programName [options] FILE FILE --help Show this help message --root Path to root tools build - --ubench Compare microbenchmark results - --v8 Compare the V8 benchmark results + --suite Select a specific benchmark suite. The default is sunspider-0.9.1 + --ubench Use microbenchmark suite instead of regular tests. Same as --suite=ubench + --v8-suite Use the V8 benchmark suite. Same as --suite=v8-v4 + --parse-only Use the parse-only benchmark suite. Same as --suite=parse-only EOF GetOptions('root=s' => sub { my ($argName, $value); setConfigurationProductDir(Cwd::abs_path($value)); }, + 'suite=s' => \$suite, 'ubench' => \$ubench, 'v8' => \$v8, + 'parse-only' => \$parseonly, 'help' => \$showHelp); if ($showHelp) { @@ -119,7 +125,9 @@ chdir("SunSpider"); my @args = ("--shell", $jscPath); # This code could be removed if we chose to pass extra args to sunspider instead of Xcode +push @args, "--suite=${suite}" if $suite; push @args, "--ubench" if $ubench; push @args, "--v8" if $v8; +push @args, "--parse-only" if $parseonly; exec currentPerlPath(), "./sunspider-compare-results", @args, @ARGV; diff --git a/WebKitTools/Scripts/svn-apply b/WebKitTools/Scripts/svn-apply index 0373aa5..f586211 100755 --- a/WebKitTools/Scripts/svn-apply +++ b/WebKitTools/Scripts/svn-apply @@ -2,6 +2,7 @@ # Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved. # Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au> +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -116,10 +117,9 @@ my %removeDirectoryIgnoreList = ( '_svn' => 1, ); -my $globalExitCode = 0; +my $globalExitStatus = 0; -my $pathScriptWasRunFrom = Cwd::getcwd(); -my $pathForRepositoryRoot = determineVCSRoot(); +my $repositoryRootPath = determineVCSRoot(); my %checkedDirectories; my %copiedFiles; @@ -133,7 +133,7 @@ my $patch; while (<>) { s/([\n\r]+)$//mg; my $eol = $1; - if (!defined($indexPath) && m#^diff --git a/#) { + if (!defined($indexPath) && m#^diff --git \w/#) { $filter = \&gitdiff2svndiff; } $_ = &$filter($_) if $filter; @@ -193,7 +193,7 @@ for $patch (@patches) { removeDirectoriesIfNeeded(); -exit $globalExitCode; +exit $globalExitStatus; sub addDirectoriesIfNeeded($) { @@ -224,25 +224,22 @@ sub addDirectoriesIfNeeded($) } } +# Args: +# $patch: a patch string. +# $pathRelativeToRoot: the path of the file to be patched, relative to the +# repository root. This should normally be the path +# found in the patch's "Index:" line. +# $options: a reference to an array of options to pass to the patch command. sub applyPatch($$;$) { - my ($patch, $fullPath, $options) = @_; - chdir $pathForRepositoryRoot; - $options = [] if (! $options); - push @{$options}, "--force" if $force; - my $command = "patch " . join(" ", "-p0", @{$options}); - open PATCH, "| $command" or die "Failed to patch $fullPath\n"; - print PATCH $patch; - close PATCH; - chdir $pathScriptWasRunFrom; - - my $exitCode = $? >> 8; - if ($exitCode) { - if (!$force) { - print "$command \"$fullPath\" returned $exitCode. Pass --force to ignore patch failures.\n"; - exit $exitCode; - } - $globalExitCode = $exitCode; + my ($patch, $pathRelativeToRoot, $options) = @_; + + my $optionalArgs = {options => $options, ensureForce => $force}; + + my $exitStatus = runPatchCommand($patch, $repositoryRootPath, $pathRelativeToRoot, $optionalArgs); + + if ($exitStatus) { + $globalExitStatus = $exitStatus; } } diff --git a/WebKitTools/Scripts/svn-unapply b/WebKitTools/Scripts/svn-unapply index c277a3e..eb20ca0 100755 --- a/WebKitTools/Scripts/svn-unapply +++ b/WebKitTools/Scripts/svn-unapply @@ -2,6 +2,7 @@ # Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved. # Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au> +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -76,14 +77,22 @@ sub revertDirectories(); sub unapplyPatch($$;$); sub unsetChangeLogDate($$); +my $force = 0; my $showHelp = 0; -if (!GetOptions("help!" => \$showHelp) || $showHelp) { - print STDERR basename($0) . " [-h|--help] patch1 [patch2 ...]\n"; + +my $optionParseSuccess = GetOptions( + "force!" => \$force, + "help!" => \$showHelp +); + +if (!$optionParseSuccess || $showHelp) { + print STDERR basename($0) . " [-h|--help] [--force] patch1 [patch2 ...]\n"; exit 1; } -my $pathScriptWasRunFrom = Cwd::getcwd(); -my $pathForRepositoryRoot = determineVCSRoot(); +my $globalExitStatus = 0; + +my $repositoryRootPath = determineVCSRoot(); my @copiedFiles; my %directoriesToCheck; @@ -95,7 +104,7 @@ my $patch; while (<>) { s/([\n\r]+)$//mg; my $eol = $1; - if (!defined($indexPath) && m#^diff --git a/#) { + if (!defined($indexPath) && m#^diff --git \w/#) { $filter = \&gitdiff2svndiff; } $_ = &$filter($_) if $filter; @@ -142,7 +151,7 @@ if (isSVN()) { revertDirectories(); } -exit 0; +exit $globalExitStatus; sub checksum($) { @@ -228,7 +237,7 @@ sub patch($) sub revertDirectories() { - chdir $pathForRepositoryRoot; + chdir $repositoryRootPath; my %checkedDirectories; foreach my $path (reverse sort keys %directoriesToCheck) { my @dirs = File::Spec->splitdir($path); @@ -258,16 +267,24 @@ sub revertDirectories() } } +# Args: +# $patch: a patch string. +# $pathRelativeToRoot: the path of the file to be patched, relative to the +# repository root. This should normally be the path +# found in the patch's "Index:" line. +# $options: a reference to an array of options to pass to the patch command. +# Do not include --reverse in this array. sub unapplyPatch($$;$) { - my ($patch, $fullPath, $options) = @_; - chdir $pathForRepositoryRoot; - $options = [] if (! $options); - my $command = "patch " . join(" ", "-p0", "-R", @{$options}); - open PATCH, "| $command" or die "Failed to patch $fullPath: $!"; - print PATCH $patch; - close PATCH; - chdir $pathScriptWasRunFrom; + my ($patch, $pathRelativeToRoot, $options) = @_; + + my $optionalArgs = {options => $options, ensureForce => $force, shouldReverse => 1}; + + my $exitStatus = runPatchCommand($patch, $repositoryRootPath, $pathRelativeToRoot, $optionalArgs); + + if ($exitStatus) { + $globalExitStatus = $exitStatus; + } } sub unsetChangeLogDate($$) diff --git a/WebKitTools/Scripts/test-webkit-scripts b/WebKitTools/Scripts/test-webkit-scripts new file mode 100755 index 0000000..781e8ce --- /dev/null +++ b/WebKitTools/Scripts/test-webkit-scripts @@ -0,0 +1,85 @@ +#!/usr/bin/python +# +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Run unit tests of WebKit's Perl and Python scripts.""" + +# The docstring above is passed as the "description" to the OptionParser +# used in this script's __main__ block. +# +# For the command options supported by this script, see the code below +# that instantiates the OptionParser class, or else pass --help +# while running this script (since argument help is auto-generated). + +import os +import subprocess +import sys +from optparse import OptionParser + +class ScriptsTester(object): + + """Supports running unit tests of WebKit scripts.""" + + def __init__(self, scripts_directory): + self.scripts_directory = scripts_directory + + def script_path(self, script_file_name): + """Return an absolute path to the given script.""" + return os.path.join(self.scripts_directory, script_file_name) + + def run_test_script(self, script_title, script_path, args=None): + """Run the given test script.""" + print('Testing %s:' % script_title) + call_args = [script_path] + if args: + call_args.extend(args) + subprocess.call(call_args) + print(70 * "*") # dividing line + + def main(self): + parser = OptionParser(description=__doc__) + parser.add_option('-a', '--all', dest='all', action='store_true', + default=False, help='run all available tests, ' + 'including those suppressed by default') + (options, args) = parser.parse_args() + + self.run_test_script('Perl scripts', self.script_path('test-webkitperl')) + self.run_test_script('Python scripts', self.script_path('test-webkitpy'), + ['--all'] if options.all else None) + + # FIXME: Display a cumulative indication of success or failure. + # In addition, call sys.exit() with 0 or 1 depending on that + # cumulative success or failure. + print('Note: Perl and Python results appear separately above.') + + +if __name__ == '__main__': + # The scripts directory is the directory containing this file. + tester = ScriptsTester(os.path.dirname(__file__)) + tester.main() diff --git a/WebKitTools/Scripts/test-webkitperl b/WebKitTools/Scripts/test-webkitperl new file mode 100755 index 0000000..2e31593 --- /dev/null +++ b/WebKitTools/Scripts/test-webkitperl @@ -0,0 +1,49 @@ +#!/usr/bin/perl +# +# Copyright (C) 2009 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Runs unit tests of WebKit Perl code. + +use strict; +use warnings; + +use File::Spec; +use FindBin; +use Test::Harness; +use lib $FindBin::Bin; # so this script can be run from any directory. +use VCSUtils; + +# Use an absolute path so this script can be run from any directory. +my $scriptsDir = $FindBin::Bin; + +my $pattern = File::Spec->catfile($scriptsDir, "webkitperl/*_unittest/*.pl"); + +my @files = <${pattern}>; # lists files alphabetically + +runtests(@files); diff --git a/WebKitTools/Scripts/test-webkitpy b/WebKitTools/Scripts/test-webkitpy new file mode 100755 index 0000000..ca58b50 --- /dev/null +++ b/WebKitTools/Scripts/test-webkitpy @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import sys +import unittest + +from webkitpy.bugzilla_unittest import * +from webkitpy.buildbot_unittest import * +from webkitpy.changelogs_unittest import * +from webkitpy.commands.download_unittest import * +from webkitpy.commands.early_warning_system_unittest import * +from webkitpy.commands.openbugs_unittest import OpenBugsTest +from webkitpy.commands.upload_unittest import * +from webkitpy.commands.queries_unittest import * +from webkitpy.commands.queues_unittest import * +from webkitpy.committers_unittest import * +from webkitpy.credentials_unittest import * +from webkitpy.diff_parser_unittest import * +from webkitpy.executive_unittest import * +from webkitpy.multicommandtool_unittest import * +from webkitpy.networktransaction_unittest import * +from webkitpy.patchcollection_unittest import * +from webkitpy.queueengine_unittest import * +from webkitpy.steps.steps_unittest import * +from webkitpy.steps.closebugforlanddiff_unittest import * +from webkitpy.steps.updatechangelogswithreview_unittests import * +from webkitpy.style.unittests import * # for check-webkit-style +from webkitpy.webkit_logging_unittest import * +from webkitpy.webkitport_unittest import * + +if __name__ == "__main__": + # FIXME: This is a hack, but I'm tired of commenting out the test. + # See https://bugs.webkit.org/show_bug.cgi?id=31818 + if len(sys.argv) > 1 and sys.argv[1] == "--all": + sys.argv.remove("--all") + from webkitpy.scm_unittest import * + + unittest.main() diff --git a/WebKitTools/Scripts/update-webkit-chromium b/WebKitTools/Scripts/update-webkit-chromium index 779b9a6..fa94f8c 100755 --- a/WebKitTools/Scripts/update-webkit-chromium +++ b/WebKitTools/Scripts/update-webkit-chromium @@ -28,24 +28,28 @@ # Update script for the WebKit Chromium Port. -# Check if gclient is installed. -if (not `gclient --version`) { - print STDERR "gclient is required for updating chromium dependencies.\n"; - print STDERR "Install depot_tools and add gclient to the environment\n"; - print STDERR "path. For more information, refer to:\n"; - print STDERR "http://dev.chromium.org/developers/how-tos/install-gclient\n"; - die; +chdir("WebKit/chromium") or die $!; + +# Find gclient or install it. +my $gclientPath; +if (`gclient --version`) { + $gclientPath = 'gclient'; +} elsif (-e 'depot_tools/gclient') { + $gclientPath = 'depot_tools/gclient'; +} else { + print "Installing chromium's depot_tools...\n"; + system("svn co http://src.chromium.org/svn/trunk/tools/depot_tools") == 0 or die $1; + $gclientPath = 'depot_tools/gclient'; } -chdir("WebKit/chromium") or die $!; if (! -e ".gclient") { # If .gclient configuration file doesn't exist, create it. print "Configuring gclient...\n"; - system("gclient", + system($gclientPath, "config", "--spec=solutions=[{'name':'./','url':None}]") == 0 or die $!; } # Execute gclient sync. print "Updating chromium port dependencies using gclient...\n"; -system("gclient", "sync", "--force") == 0 or die $!; +system($gclientPath, "sync", "--force") == 0 or die $!; diff --git a/WebKitTools/Scripts/validate-committer-lists b/WebKitTools/Scripts/validate-committer-lists index 05f2b36..2f2dd32 100755 --- a/WebKitTools/Scripts/validate-committer-lists +++ b/WebKitTools/Scripts/validate-committer-lists @@ -36,12 +36,13 @@ import subprocess import re import urllib2 from datetime import date, datetime, timedelta -from modules.committers import CommitterList -from modules.logging import log, error +from webkitpy.committers import CommitterList +from webkitpy.webkit_logging import log, error +from webkitpy.scm import Git -# WebKit includes a built copy of BeautifulSoup in Scripts/modules +# WebKit includes a built copy of BeautifulSoup in Scripts/webkitpy # so this import should always succeed. -from modules.BeautifulSoup import BeautifulSoup +from webkitpy.BeautifulSoup import BeautifulSoup def print_list_if_non_empty(title, list_to_print): if not list_to_print: @@ -243,7 +244,14 @@ class CommitterListFromGit: def main(): committer_list = CommitterList() CommitterListFromMailingList().check_for_emails_missing_from_list(committer_list) - + + if not Git.in_working_directory("."): + print """\n\nWARNING: validate-committer-lists requires a git checkout. +The following checks are disabled: + - List of committers ordered by last commit + - List of historical committers missing from committers.py +""" + return 1 svn_committer_list = CommitterListFromGit() svn_committer_list.print_possibly_expired_committers(committer_list) svn_committer_list.print_committers_missing_from_committer_list(committer_list) diff --git a/WebKitTools/Scripts/bugzilla-tool b/WebKitTools/Scripts/webkit-patch index fdbb740..b4bcc4c 100755 --- a/WebKitTools/Scripts/bugzilla-tool +++ b/WebKitTools/Scripts/webkit-patch @@ -32,34 +32,36 @@ import os -from modules.bugzilla import Bugzilla -from modules.buildbot import BuildBot -from modules.buildsteps import BuildSteps -from modules.commands.download import * -from modules.commands.early_warning_system import * -from modules.commands.queries import * -from modules.commands.queues import * -from modules.commands.upload import * -from modules.executive import Executive -from modules.logging import log -from modules.multicommandtool import MultiCommandTool -from modules.scm import detect_scm_system +from webkitpy.bugzilla import Bugzilla +from webkitpy.buildbot import BuildBot +from webkitpy.commands.download import * +from webkitpy.commands.early_warning_system import * +from webkitpy.commands.openbugs import OpenBugs +from webkitpy.commands.queries import * +from webkitpy.commands.queues import * +from webkitpy.commands.upload import * +from webkitpy.executive import Executive +from webkitpy.webkit_logging import log +from webkitpy.multicommandtool import MultiCommandTool +from webkitpy.scm import detect_scm_system +from webkitpy.user import User + + +class WebKitPatch(MultiCommandTool): + global_options = [ + make_option("--dry-run", action="store_true", dest="dry_run", default=False, help="do not touch remote servers"), + make_option("--status-host", action="store", dest="status_host", type="string", nargs=1, help="Hostname (e.g. localhost or commit.webkit.org) where status updates should be posted."), + ] -class BugzillaTool(MultiCommandTool): def __init__(self): MultiCommandTool.__init__(self) - self.global_option_parser.add_option("--dry-run", action="callback", help="do not touch remote servers", callback=self.dry_run_callback) self.bugs = Bugzilla() self.buildbot = BuildBot() self.executive = Executive() + self.user = User() self._scm = None - self._status = None - self.steps = BuildSteps() - - def dry_run_callback(self, option, opt, value, parser): - self.scm().dryrun = True - self.bugs.dryrun = True + self.status_server = StatusServer() def scm(self): # Lazily initialize SCM to not error-out before command line parsing (or when running non-scm commands). @@ -78,11 +80,6 @@ class BugzillaTool(MultiCommandTool): return self._scm - def status(self): - if not self._status: - self._status = StatusBot() - return self._status - def path(self): return __file__ @@ -93,6 +90,14 @@ class BugzillaTool(MultiCommandTool): return self.scm().supports_local_commits() return True + # FIXME: This may be unnecessary since we pass global options to all commands during execute() as well. + def handle_global_options(self, options): + if options.dry_run: + self.scm().dryrun = True + self.bugs.dryrun = True + if options.status_host: + self.status_server.set_host(options.status_host) + def should_execute_command(self, command): if command.requires_local_commits and not self.scm().supports_local_commits(): failure_reason = "%s requires local commits using %s in %s." % (command.name, self.scm().display_name(), self.scm().checkout_root) @@ -101,4 +106,4 @@ class BugzillaTool(MultiCommandTool): if __name__ == "__main__": - BugzillaTool().main() + WebKitPatch().main() diff --git a/WebKitTools/Scripts/webkit-tools-completion.sh b/WebKitTools/Scripts/webkit-tools-completion.sh index 308711d..5eb6833 100644 --- a/WebKitTools/Scripts/webkit-tools-completion.sh +++ b/WebKitTools/Scripts/webkit-tools-completion.sh @@ -32,12 +32,12 @@ # Add a line like this to your .bashrc: # source /path/to/WebKitCode/WebKitTools/Scripts/webkit-tools-completion.sh -__bugzilla-tool_generate_reply() +__webkit-patch_generate_reply() { COMPREPLY=( $(compgen -W "$1" -- "${COMP_WORDS[COMP_CWORD]}") ) } -_bugzilla-tool_complete() +_webkit-patch_complete() { local command current_command="${COMP_WORDS[1]}" case "$current_command" in @@ -50,41 +50,45 @@ _bugzilla-tool_complete() esac if [ $COMP_CWORD -eq 1 ]; then - __bugzilla-tool_generate_reply "--help apply-patches bugs-to-commit commit-message land-diff land-patches obsolete-attachments patches-to-commit post-commits post-diff reviewed-patches" + __webkit-patch_generate_reply "--help apply-from-bug bugs-to-commit commit-message land land-from-bug obsolete-attachments patches-to-commit post upload tree-status rollout reviewed-patches" return fi case "$command" in - apply-patches) - __bugzilla-tool_generate_reply "--force-clean --local-commit --no-clean --no-update" + apply-from-bug) + __webkit-patch_generate_reply "--force-clean --local-commit --no-clean --no-update" return ;; commit-message) return ;; - land-diff) - __bugzilla-tool_generate_reply "--no-build --no-close --no-test --reviewer= -r" + land) + __webkit-patch_generate_reply "--no-build --no-close --no-test --reviewer= -r" return ;; - land-patches) - __bugzilla-tool_generate_reply "--force-clean --no-build --no-clean --no-test" + land-from-bug) + __webkit-patch_generate_reply "--force-clean --no-build --no-clean --no-test" return ;; obsolete-attachments) return ;; - post-diff) - __bugzilla-tool_generate_reply "--description --no-obsolete --no-review -m" + post) + __webkit-patch_generate_reply "--description --no-obsolete --no-review --request-commit -m --open-bug" + return + ;; + upload) + __webkit-patch_generate_reply "--description --no-obsolete --no-review --request-commit --cc -m --open-bug" return ;; post-commits) - __bugzilla-tool_generate_reply "--bug-id= --no-comment --no-obsolete --no-review -b" + __webkit-patch_generate_reply "--bug-id= --no-comment --no-obsolete --no-review -b" return ;; esac } -complete -F _bugzilla-tool_complete bugzilla-tool +complete -F _webkit-patch_complete webkit-patch complete -W "--continue --fix-merged --help --no-continue --no-warnings --warnings -c -f -h -w" resolve-ChangeLogs complete -W "--bug --diff --git-commit --git-index --git-reviewer --help --no-update --no-write --open --update --write -d -h -o" prepare-ChangeLog complete -W "--clean --debug --help -h" build-webkit diff --git a/WebKitTools/Scripts/webkitdirs.pm b/WebKitTools/Scripts/webkitdirs.pm index 64e5dc4..d667a8a 100644 --- a/WebKitTools/Scripts/webkitdirs.pm +++ b/WebKitTools/Scripts/webkitdirs.pm @@ -32,6 +32,7 @@ use warnings; use Config; use FindBin; use File::Basename; +use File::Path; use File::Spec; use POSIX; use VCSUtils; @@ -58,17 +59,22 @@ my $sourceDir; my $currentSVNRevision; my $osXVersion; my $isQt; +my $isSymbian; my %qtFeatureDefaults; my $isGtk; my $isWx; my @wxArgs; my $isChromium; +my $isInspectorFrontend; # Variables for Win32 support my $vcBuildPath; my $windowsTmpPath; my $windowsSourceDir; +# Defined in VCSUtils. +sub exitStatus($); + sub determineSourceDir { return if $sourceDir; @@ -134,6 +140,9 @@ sub determineBaseProductDir undef $baseProductDir unless $baseProductDir =~ /^\//; } } + } elsif (isSymbian()) { + # Shadow builds are not supported on Symbian + $baseProductDir = $sourceDir; } if (!defined($baseProductDir)) { # Port-spesific checks failed, use default @@ -234,9 +243,11 @@ sub argumentsForConfiguration() push(@args, '--release') if $configuration eq "Release"; push(@args, '--32-bit') if $architecture ne "x86_64"; push(@args, '--qt') if isQt(); + push(@args, '--symbian') if isSymbian(); push(@args, '--gtk') if isGtk(); push(@args, '--wx') if isWx(); push(@args, '--chromium') if isChromium(); + push(@args, '--inspector-frontend') if isInspectorFrontend(); return @args; } @@ -529,8 +540,13 @@ sub builtDylibPathForName $libraryName = "QtWebKit"; if (isDarwin() and -d "$configurationProductDir/lib/$libraryName.framework") { return "$configurationProductDir/lib/$libraryName.framework/$libraryName"; - } elsif (isWindows() or isCygwin()) { - return "$configurationProductDir/lib/$libraryName.dll"; + } elsif (isWindows()) { + chomp(my $mkspec = `qmake -query QMAKE_MKSPECS`); + my $qtMajorVersion = retrieveQMakespecVar("$mkspec/qconfig.pri", "QT_MAJOR_VERSION"); + if ($qtMajorVersion eq "unknown") { + $qtMajorVersion = ""; + } + return "$configurationProductDir/lib/$libraryName$qtMajorVersion.dll"; } else { return "$configurationProductDir/lib/lib$libraryName.so"; } @@ -563,7 +579,7 @@ sub checkFrameworks push(@frameworks, "WebKit") if isAppleMacWebKit(); for my $framework (@frameworks) { my $path = builtDylibPathForName($framework); - die "Can't find built framework at \"$path\".\n" unless -x $path; + die "Can't find built framework at \"$path\".\n" unless -e $path; } } @@ -611,15 +627,12 @@ sub hasSVGSupport { my $path = shift; - if (isQt()) { - return 1; - } - if (isWx()) { return 0; } - return libraryContainsSymbol($path, "SVGElement"); + # We used to look for SVGElement but isSVGElement is a valid symbol in --no-svg builds. + return libraryContainsSymbol($path, "SVGDefsElement"); } sub removeLibraryDependingOnSVG @@ -775,12 +788,30 @@ sub checkWebCoreWCSSSupport return $hasWCSS; } +sub isInspectorFrontend() +{ + determineIsInspectorFrontend(); + return $isInspectorFrontend; +} + +sub determineIsInspectorFrontend() +{ + return if defined($isInspectorFrontend); + $isInspectorFrontend = checkForArgumentAndRemoveFromARGV("--inspector-frontend"); +} + sub isQt() { determineIsQt(); return $isQt; } +sub isSymbian() +{ + determineIsSymbian(); + return $isSymbian; +} + sub qtFeatureDefaults() { determineQtFeatureDefaults(); @@ -831,6 +862,18 @@ sub determineIsQt() $isQt = defined($ENV{'QTDIR'}); } +sub determineIsSymbian() +{ + return if defined($isSymbian); + + if (checkForArgumentAndRemoveFromARGV("--symbian")) { + $isSymbian = 1; + return; + } + + $isSymbian = defined($ENV{'EPOCROOT'}); +} + sub isGtk() { determineIsGtk(); @@ -1172,6 +1215,38 @@ sub setupCygwinEnv() print "WEBKITLIBRARIESDIR is set to: ", $ENV{"WEBKITLIBRARIESDIR"}, "\n"; } +sub copyInspectorFrontendFiles +{ + my $productDir = productDir(); + my $sourceInspectorPath = sourceDir() . "/WebCore/inspector/front-end/"; + my $inspectorResourcesDirPath = $ENV{"WEBKITINSPECTORRESOURCESDIR"}; + + if (!defined($inspectorResourcesDirPath)) { + $inspectorResourcesDirPath = ""; + } + + if (isAppleMacWebKit()) { + $inspectorResourcesDirPath = $productDir . "/WebCore.framework/Resources/inspector"; + } elsif (isAppleWinWebKit()) { + $inspectorResourcesDirPath = $productDir . "/WebKit.resources/inspector"; + } elsif (isQt() || isGtk()) { + my $prefix = $ENV{"WebKitInstallationPrefix"}; + $inspectorResourcesDirPath = (defined($prefix) ? $prefix : "/usr/share") . "/webkit-1.0/webinspector"; + } + + if (! -d $inspectorResourcesDirPath) { + print "*************************************************************\n"; + print "Cannot find '$inspectorResourcesDirPath'.\n" if (defined($inspectorResourcesDirPath)); + print "Make sure that you have built WebKit first.\n" if (! -d $productDir || defined($inspectorResourcesDirPath)); + print "Optionally, set the environment variable 'WebKitInspectorResourcesDir'\n"; + print "to point to the directory that contains the WebKit Inspector front-end\n"; + print "files for the built WebCore framework.\n"; + print "*************************************************************\n"; + die; + } + return system "rsync", "-aut", "--exclude=/.DS_Store", "--exclude=.svn/", !isQt() ? "--exclude=/WebKit.qrc" : "", $sourceInspectorPath, $inspectorResourcesDirPath; +} + sub buildXCodeProject($$@) { my ($project, $clean, @extraOptions) = @_; @@ -1198,9 +1273,7 @@ sub buildVisualStudioProject $action = "/clean"; } - my $useenv = "/useenv"; - - my @command = ($vcBuildPath, $useenv, $winProjectPath, $action, $config); + my @command = ($vcBuildPath, $winProjectPath, $action, $config); print join(" ", @command), "\n"; return system @command; @@ -1398,6 +1471,45 @@ sub buildQMakeProject($@) my $make = qtMakeCommand($qmakebin); my $config = configuration(); my $prefix = $ENV{"WebKitInstallationPrefix"}; + my $dir = File::Spec->canonpath(baseProductDir()); + $dir = File::Spec->catfile($dir, $config) unless isSymbian(); + File::Path::mkpath($dir); + chdir $dir or die "Failed to cd into " . $dir . "\n"; + + print "Generating derived sources\n\n"; + + my @dsQmakeArgs = @buildArgs; + push @dsQmakeArgs, "-r"; + push @dsQmakeArgs, sourceDir() . "/DerivedSources.pro"; + push @dsQmakeArgs, "-o Makefile.DerivedSources"; + print "Calling '$qmakebin @dsQmakeArgs' in " . $dir . "\n\n"; + my $result = system "$qmakebin @dsQmakeArgs"; + if ($result ne 0) { + die "Failed while running $qmakebin to generate derived sources!\n"; + } + + my $dsMakefile = "Makefile.DerivedSources"; + + print "Calling '$make $makeargs -f $dsMakefile generated_files' in " . $dir . "/JavaScriptCore\n\n"; + if ($make eq "nmake") { + $result = system "pushd JavaScriptCore && $make $makeargs -f $dsMakefile generated_files && popd"; + } else { + $result = system "$make $makeargs -C JavaScriptCore -f $dsMakefile generated_files"; + } + if ($result ne 0) { + die "Failed to generate JavaScriptCore's derived sources!\n"; + } + + print "Calling '$make $makeargs -f $dsMakefile generated_files' in " . $dir . "/WebCore\n\n"; + if ($make eq "nmake") { + $result = system "pushd WebCore && $make $makeargs -f $dsMakefile generated_files && popd"; + } else { + $result = system "$make $makeargs -C WebCore -f $dsMakefile generated_files"; + } + if ($result ne 0) { + die "Failed to generate WebCore's derived sources!\n"; + } + push @buildArgs, "OUTPUT_DIR=" . baseProductDir() . "/$config"; push @buildArgs, sourceDir() . "/WebKit.pro"; @@ -1415,36 +1527,23 @@ sub buildQMakeProject($@) } } - my $dir = File::Spec->canonpath(baseProductDir()); - my @mkdirArgs; - push @mkdirArgs, "-p" if !isWindows(); - if (! -d $dir) { - system "mkdir", @mkdirArgs, "$dir"; - if (! -d $dir) { - die "Failed to create product directory " . $dir; - } - } - $dir = File::Spec->catfile($dir, $config); - if (! -d $dir) { - system "mkdir", @mkdirArgs, "$dir"; - if (! -d $dir) { - die "Failed to create build directory " . $dir; - } - } - - chdir $dir or die "Failed to cd into " . $dir . "\n"; - print "Calling '$qmakebin @buildArgs' in " . $dir . "\n\n"; print "Installation directory: $prefix\n" if(defined($prefix)); - my $result = system "$qmakebin @buildArgs"; + $result = system "$qmakebin @buildArgs"; if ($result ne 0) { die "Failed to setup build environment using $qmakebin!\n"; } if ($clean) { + print "Calling '$make $makeargs distclean' in " . $dir . "\n\n"; $result = system "$make $makeargs distclean"; + } elsif (isSymbian()) { + print "\n\nWebKit is now configured for building, but you have to make\n"; + print "a choice about the target yourself. To start the build run:\n\n"; + print " make release-armv5|debug-winscw|etc.\n\n"; } else { + print "Calling '$make $makeargs' in " . $dir . "\n\n"; $result = system "$make $makeargs"; } @@ -1523,7 +1622,7 @@ sub buildChromium($@) $result = buildChromiumVisualStudioProject("WebKit/chromium/WebKit.sln", $clean); } elsif (isLinux()) { # Linux build - build using make. - $ result = buildChromiumMakefile("WebKit/chromium/", "webkit", $clean); + $ result = buildChromiumMakefile("WebKit/chromium/", "all", $clean); } else { print STDERR "This platform is not supported by chromium.\n"; } @@ -1551,15 +1650,6 @@ sub setPathForRunningWebKitApp $env->{PATH} = join(':', productDir(), dirname(installedSafariPath()), appleApplicationSupportPath(), $env->{PATH} || ""); } -sub exitStatus($) -{ - my ($returnvalue) = @_; - if ($^O eq "MSWin32") { - return $returnvalue >> 8; - } - return WEXITSTATUS($returnvalue); -} - sub runSafari { my ($debugger) = @_; diff --git a/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatch.pl b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatch.pl new file mode 100644 index 0000000..d21c706 --- /dev/null +++ b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatch.pl @@ -0,0 +1,290 @@ +#!/usr/bin/perl +# +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of VCSUtils::fixChangeLogPatch(). + +use Test::Simple tests => 7; +use VCSUtils; + +# The source ChangeLog for these tests is the following: +# +# 2009-12-22 Alice <alice@email.address> +# +# Reviewed by Ray. +# +# Changed some code on 2009-12-22. +# +# * File: +# * File2: +# +# 2009-12-21 Alice <alice@email.address> +# +# Reviewed by Ray. +# +# Changed some code on 2009-12-21. +# +# * File: +# * File2: + +my $title; +my $in; +my $out; + +# New test +$title = "fixChangeLogPatch: [no change] First line is new line."; + +$in = <<'END'; +--- ChangeLog ++++ ChangeLog +@@ -1,3 +1,11 @@ ++2009-12-22 Bob <bob@email.address> ++ ++ Reviewed by Ray. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + +ok(fixChangeLogPatch($in) eq $in, $title); + +# New test +$title = "fixChangeLogPatch: [no change] No date string."; + +$in = <<'END'; +--- ChangeLog ++++ ChangeLog +@@ -6,6 +6,7 @@ + + * File: + * File2: ++ * File3: + + 2009-12-21 Alice <alice@email.address> + +END + +ok(fixChangeLogPatch($in) eq $in, $title); + +# New test +$title = "fixChangeLogPatch: [no change] New entry inserted in middle."; + +$in = <<'END'; +--- ChangeLog ++++ ChangeLog +@@ -11,6 +11,14 @@ + + Reviewed by Ray. + ++ Changed some more code on 2009-12-21. ++ ++ * File: ++ ++2009-12-21 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ + Changed some code on 2009-12-21. + + * File: +END + +ok(fixChangeLogPatch($in) eq $in, $title); + +# New test +$title = "fixChangeLogPatch: Leading context includes first line."; + +$in = <<'END'; +--- ChangeLog ++++ ChangeLog +@@ -1,5 +1,13 @@ + 2009-12-22 Alice <alice@email.address> + ++ Reviewed by Sue. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ ++2009-12-22 Alice <alice@email.address> ++ + Reviewed by Ray. + + Changed some code on 2009-12-22. +END + +$out = <<'END'; +--- ChangeLog ++++ ChangeLog +@@ -1,3 +1,11 @@ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Sue. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + +ok(fixChangeLogPatch($in) eq $out, $title); + +# New test +$title = "fixChangeLogPatch: Leading context does not include first line."; + +$in = <<'END'; +@@ -2,6 +2,14 @@ + + Reviewed by Ray. + ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ + Changed some code on 2009-12-22. + + * File: +END + +$out = <<'END'; +@@ -1,3 +1,11 @@ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + +ok(fixChangeLogPatch($in) eq $out, $title); + +# New test +$title = "fixChangeLogPatch: Non-consecutive line additions."; + +# This can occur, for example, if the new ChangeLog entry includes +# trailing white space in the first blank line but not the second. +# A diff command can then match the second blank line of the new +# ChangeLog entry with the first blank line of the old. +# The svn diff command with the default --diff-cmd has done this. +$in = <<'END'; +@@ -1,5 +1,11 @@ + 2009-12-22 Alice <alice@email.address> ++ <pretend-whitespace> ++ Reviewed by Ray. + ++ Changed some more code on 2009-12-22. ++ ++2009-12-22 Alice <alice@email.address> ++ + Reviewed by Ray. + + Changed some code on 2009-12-22. +END + +$out = <<'END'; +@@ -1,3 +1,9 @@ ++2009-12-22 Alice <alice@email.address> ++ <pretend-whitespace> ++ Reviewed by Ray. ++ ++ Changed some more code on 2009-12-22. ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + +ok(fixChangeLogPatch($in) eq $out, $title); + +# New test +$title = "fixChangeLogPatch: Additional edits after new entry."; + +$in = <<'END'; +@@ -2,10 +2,17 @@ + + Reviewed by Ray. + ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ + Changed some code on 2009-12-22. + + * File: +- * File2: + + 2009-12-21 Alice <alice@email.address> + +END + +$out = <<'END'; +@@ -1,11 +1,18 @@ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. + + Changed some code on 2009-12-22. + + * File: +- * File2: + + 2009-12-21 Alice <alice@email.address> + +END + +ok(fixChangeLogPatch($in) eq $out, $title); diff --git a/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/generatePatchCommand.pl b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/generatePatchCommand.pl new file mode 100644 index 0000000..483a0a8 --- /dev/null +++ b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/generatePatchCommand.pl @@ -0,0 +1,87 @@ +#!/usr/bin/perl +# +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of VCSUtils::generatePatchCommand(). + +use Test::Simple tests => 10; +use VCSUtils; + +# New test +$title = "generatePatchCommand: Undefined optional arguments."; + +my $argsHashRef; +my ($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0", $title); +ok($isForcing == 0, $title); + +# New test +$title = "generatePatchCommand: Undefined options."; + +my $options; +$argsHashRef = {options => $options}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0", $title); +ok($isForcing == 0, $title); + +# New test +$title = "generatePatchCommand: --force and no \"ensure force\"."; + +$argsHashRef = {options => ["--force"]}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0 --force", $title); +ok($isForcing == 1, $title); + +# New test +$title = "generatePatchCommand: no --force and \"ensure force\"."; + +$argsHashRef = {ensureForce => 1}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0 --force", $title); +ok($isForcing == 1, $title); + +# New test +$title = "generatePatchCommand: \"should reverse\"."; + +$argsHashRef = {shouldReverse => 1}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0 --reverse", $title); + +# New test +$title = "generatePatchCommand: --fuzz=3, --force."; + +$argsHashRef = {options => ["--fuzz=3", "--force"]}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0 --force --fuzz=3", $title); diff --git a/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/gitdiff2svndiff.pl b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/gitdiff2svndiff.pl new file mode 100644 index 0000000..93708d6 --- /dev/null +++ b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/gitdiff2svndiff.pl @@ -0,0 +1,117 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of VCSUtils::gitdiff2svndiff() + +use strict; +use warnings; + +use Test::Simple tests => 20; +use VCSUtils; + +# We use this for display purposes, to keep each test title on one line. +sub excerptString($) +{ + my ($text) = @_; + + my $length = 25; + + my $shortened = substr($text, 0, $length); + $shortened .= "..." if (length($text) > $length); + + return $shortened; +} + +my $git_patch = <<END; +diff --git a/WebCore/rendering/style/StyleFlexibleBoxData.h b/WebCore/rendering/style/StyleFlexibleBoxData.h +index f5d5e74..3b6aa92 100644 +--- a/WebCore/rendering/style/StyleFlexibleBoxData.h ++++ b/WebCore/rendering/style/StyleFlexibleBoxData.h +@@ -47,7 +47,6 @@ public: +END + +my $svn_patch = <<END; +Index: WebCore/rendering/style/StyleFlexibleBoxData.h +=================================================================== +--- WebCore/rendering/style/StyleFlexibleBoxData.h ++++ WebCore/rendering/style/StyleFlexibleBoxData.h +@@ -47,7 +47,6 @@ public: +END + +my @gitLines = split("\n", $git_patch); +my @svnLines = split("\n", $svn_patch); + +# New test: check each git header line with different line endings +my $titleHeader = "gitdiff2svndiff: "; + +my @lineEndingPairs = ( # display name, value + ["", ""], + ["\\n", "\n"], + ["\\r\\n", "\r\n"], +); + +for (my $i = 0; $i < @gitLines; $i++) { + foreach my $pair (@lineEndingPairs) { + my $gitLine = $gitLines[$i] . $pair->[1]; + my $expected = $svnLines[$i] . $pair->[1]; + my $title = $titleHeader . excerptString($gitLine); + $title .= " [line-end: \"$pair->[0]\"]"; + + ok($expected eq gitdiff2svndiff($gitLine), $title); + } +} + +# New test +my $title = "gitdiff2svndiff: Convert mnemonic git diff to svn diff"; + +my @prefixes = ( + { 'a' => 'i', 'b' => 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree) + { 'a' => 'c', 'b' => 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree) + { 'a' => 'c', 'b' => 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex) + { 'a' => 'o', 'b' => 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity) + { 'a' => '1', 'b' => '2' }, # git diff --no-index a b (compares two non-git things (1) and (2)) +); + +my $out = ""; + +foreach my $prefix (@prefixes) { + my $mnemonic_patch = $git_patch; + $mnemonic_patch =~ s/ a\// $prefix->{'a'}\//g; + $mnemonic_patch =~ s/ b\// $prefix->{'b'}\//g; + + $out = ""; + foreach my $line (split('\n', $mnemonic_patch)) { + $out .= gitdiff2svndiff($line) . "\n"; + } + + ok($svn_patch eq $out, $title . " (" . $prefix->{'a'} . "," . $prefix->{'b'} . ")"); +} + diff --git a/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl new file mode 100644 index 0000000..2507d2d --- /dev/null +++ b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl @@ -0,0 +1,328 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseDiff(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +my @diffHashRefKeys = ( # The $diffHashRef keys to check. + "copiedFromPath", + "indexPath", + "sourceRevision", + "svnConvertedText", +); + +# The array of test cases. +my @testCaseHashRefs = ( +{ + # New test + diffName => "SVN: simple", + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53052) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools + + all: +END + # Header keys to check + svnConvertedText => <<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 53052) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools + + all: +END + copiedFromPath => undef, + indexPath => "Makefile", + sourceRevision => "53052", + # Other values to check + lastReadLine => undef, + nextLine => undef, +}, +{ + # New test + diffName => "SVN: leading junk", + inputText => <<'END', + +LEADING JUNK + +Index: Makefile +=================================================================== +--- Makefile (revision 53052) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools + + all: +END + # Header keys to check + svnConvertedText => <<'END', # Same as input text + +LEADING JUNK + +Index: Makefile +=================================================================== +--- Makefile (revision 53052) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools + + all: +END + copiedFromPath => undef, + indexPath => "Makefile", + sourceRevision => "53052", + # Other values to check + lastReadLine => undef, + nextLine => undef, +}, +{ + # New test + diffName => "SVN: copied file", + inputText => <<'END', +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) ++++ Makefile_new (working copy) +@@ -0,0 +1,1 @@ ++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + # Header keys to check + svnConvertedText => <<'END', # Same as input text +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) ++++ Makefile_new (working copy) +@@ -0,0 +1,1 @@ ++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + copiedFromPath => "Makefile", + indexPath => "Makefile_new", + sourceRevision => "53131", + # Other values to check + lastReadLine => undef, + nextLine => undef, +}, +{ + # New test + diffName => "SVN: two diffs", + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) +END + # Header keys to check + svnConvertedText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + copiedFromPath => undef, + indexPath => "Makefile", + sourceRevision => "53131", + # Other values to check + lastReadLine => "Index: Makefile_new\n", + nextLine => "===================================================================\n", +}, +{ + # New test + diffName => "SVN: SVN diff followed by Git diff", # Should not recognize Git start + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +diff --git a/Makefile b/Makefile +index f5d5e74..3b6aa92 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,1 1,1 @@ public: +END + # Header keys to check + svnConvertedText => <<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +diff --git a/Makefile b/Makefile +index f5d5e74..3b6aa92 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,1 1,1 @@ public: +END + copiedFromPath => undef, + indexPath => "Makefile", + sourceRevision => "53131", + # Other values to check + lastReadLine => undef, + nextLine => undef, +}, +{ + # New test + diffName => "Git: simple", + inputText => <<'END', +diff --git a/Makefile b/Makefile +index f5d5e74..3b6aa92 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,1 1,1 @@ public: +END + # Header keys to check + svnConvertedText => <<'END', +Index: Makefile +=================================================================== +--- Makefile ++++ Makefile +@@ -1,1 1,1 @@ public: +END + copiedFromPath => undef, + indexPath => "Makefile", + sourceRevision => undef, + # Other values to check + lastReadLine => undef, + nextLine => undef, +}, +{ + # New test + diffName => "Git: Git diff followed by SVN diff", # Should not recognize SVN start + inputText => <<'END', +diff --git a/Makefile b/Makefile +index f5d5e74..3b6aa92 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,1 1,1 @@ public: +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) +END + # Header keys to check + svnConvertedText => <<'END', +Index: Makefile +=================================================================== +--- Makefile ++++ Makefile +@@ -1,1 1,1 @@ public: +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) +END + copiedFromPath => undef, + indexPath => "Makefile", + sourceRevision => undef, + # Other values to check + lastReadLine => undef, + nextLine => undef, +}, +); + +# Return the arguments for each assertion per test case. +# +# In particular, the number of assertions per test case is the length +# of the return value of this subroutine on a sample input. +# +# Returns @assertionArgsArrayRefs: +# $assertionArgsArrayRef: A reference to an array of parameters to pass +# to each call to is(). The parameters are-- +# $got: The value obtained +# $expected: The expected value +# $testName: The name of the test +sub testParseDiffAssertionArgs($) +{ + my ($testCaseHashRef) = @_; + + my $fileHandle; + open($fileHandle, "<", \$testCaseHashRef->{inputText}); + + my $line = <$fileHandle>; + + my ($diffHashRef, $lastReadLine) = VCSUtils::parseDiff($fileHandle, $line); + + my $testNameStart = "parseDiff(): [$testCaseHashRef->{diffName}] "; + + my @assertionArgsArrayRefs; # Return value + my @assertionArgs; + foreach my $diffHashRefKey (@diffHashRefKeys) { + my $testName = "${testNameStart}key=\"$diffHashRefKey\""; + @assertionArgs = ($diffHashRef->{$diffHashRefKey}, $testCaseHashRef->{$diffHashRefKey}, $testName); + push(@assertionArgsArrayRefs, \@assertionArgs); + } + + @assertionArgs = ($lastReadLine, $testCaseHashRef->{lastReadLine}, "${testNameStart}lastReadLine"); + push(@assertionArgsArrayRefs, \@assertionArgs); + + my $nextLine = <$fileHandle>; + @assertionArgs = ($nextLine, $testCaseHashRef->{nextLine}, "${testNameStart}nextLine"); + push(@assertionArgsArrayRefs, \@assertionArgs); + + return @assertionArgsArrayRefs; +} + +# Test parseDiff() for the given test case. +sub testParseDiff($) +{ + my ($testCaseHashRef) = @_; + + my @assertionArgsArrayRefs = testParseDiffAssertionArgs($testCaseHashRef); + + foreach my $arrayRef (@assertionArgsArrayRefs) { + # The parameters are -- is($got, $expected, $testName). + is($arrayRef->[0], $arrayRef->[1], $arrayRef->[2]); + } +} + +# Count the number of assertions per test case, using a sample test case. +my $assertionCount = testParseDiffAssertionArgs($testCaseHashRefs[0]); + +plan(tests => @testCaseHashRefs * $assertionCount); # Total number of tests + +foreach my $testCaseHashRef (@testCaseHashRefs) { + testParseDiff($testCaseHashRef); +} diff --git a/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl new file mode 100644 index 0000000..a7a3c26 --- /dev/null +++ b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl @@ -0,0 +1,288 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseDiffHeader(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +my @diffHeaderHashRefKeys = ( # The $diffHeaderHashRef keys to check. + "copiedFromPath", + "indexPath", + "sourceRevision", + "svnConvertedText", +); + +# The array of test cases. +my @testCaseHashRefs = ( +{ + # New test + diffName => "SVN: simple", + inputText => <<'END', +Index: WebKitTools/Scripts/VCSUtils.pm +=================================================================== +--- WebKitTools/Scripts/VCSUtils.pm (revision 53004) ++++ WebKitTools/Scripts/VCSUtils.pm (working copy) +@@ -32,6 +32,7 @@ use strict; + use warnings; +END + # Header keys to check + svnConvertedText => <<'END', +Index: WebKitTools/Scripts/VCSUtils.pm +=================================================================== +--- WebKitTools/Scripts/VCSUtils.pm (revision 53004) ++++ WebKitTools/Scripts/VCSUtils.pm (working copy) +END + copiedFromPath => undef, + indexPath => "WebKitTools/Scripts/VCSUtils.pm", + sourceRevision => "53004", + # Other values to check + lastReadLine => "@@ -32,6 +32,7 @@ use strict;\n", + nextLine => " use warnings;\n", +}, +{ + # New test + diffName => "SVN: new file", + inputText => <<'END', +Index: WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl +=================================================================== +--- WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0) ++++ WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0) +@@ -0,0 +1,262 @@ ++#!/usr/bin/perl -w +END + # Header keys to check + svnConvertedText => <<'END', +Index: WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl +=================================================================== +--- WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0) ++++ WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0) +END + copiedFromPath => undef, + indexPath => "WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl", + sourceRevision => undef, + # Other values to check + lastReadLine => "@@ -0,0 +1,262 @@\n", + nextLine => "+#!/usr/bin/perl -w\n", +}, +{ + # New test + diffName => "SVN: copy", + inputText => <<'END', +Index: index_path.py +=================================================================== +--- index_path.py (revision 53048) (from copied_from_path.py:53048) ++++ index_path.py (working copy) +@@ -0,0 +1,7 @@ ++# Python file... +END + # Header keys to check + svnConvertedText => <<'END', +Index: index_path.py +=================================================================== +--- index_path.py (revision 53048) (from copied_from_path.py:53048) ++++ index_path.py (working copy) +END + copiedFromPath => "copied_from_path.py", + indexPath => "index_path.py", + sourceRevision => 53048, + # Other values to check + lastReadLine => "@@ -0,0 +1,7 @@\n", + nextLine => "+# Python file...\n", +}, +{ + # New test + diffName => "SVN: \\r\\n lines", + inputText => <<END, # No single quotes to allow interpolation of "\r" +Index: index_path.py\r +===================================================================\r +--- index_path.py (revision 53048) (from copied_from_path.py:53048)\r ++++ index_path.py (working copy)\r +@@ -0,0 +1,7 @@\r ++# Python file...\r +END + # Header keys to check + svnConvertedText => <<END, # No single quotes to allow interpolation of "\r" +Index: index_path.py\r +===================================================================\r +--- index_path.py (revision 53048) (from copied_from_path.py:53048)\r ++++ index_path.py (working copy)\r +END + copiedFromPath => "copied_from_path.py", + indexPath => "index_path.py", + sourceRevision => 53048, + # Other values to check + lastReadLine => "@@ -0,0 +1,7 @@\r\n", + nextLine => "+# Python file...\r\n", +}, +{ + # New test + diffName => "SVN: path corrections", + inputText => <<'END', +Index: index_path.py +=================================================================== +--- bad_path (revision 53048) (from copied_from_path.py:53048) ++++ bad_path (working copy) +@@ -0,0 +1,7 @@ ++# Python file... +END + # Header keys to check + svnConvertedText => <<'END', +Index: index_path.py +=================================================================== +--- index_path.py (revision 53048) (from copied_from_path.py:53048) ++++ index_path.py (working copy) +END + copiedFromPath => "copied_from_path.py", + indexPath => "index_path.py", + sourceRevision => 53048, + # Other values to check + lastReadLine => "@@ -0,0 +1,7 @@\n", + nextLine => "+# Python file...\n", +}, +{ + # New test + diffName => "Git: simple", + inputText => <<'END', +diff --git a/WebCore/rendering/style/StyleFlexibleBoxData.h b/WebCore/rendering/style/StyleFlexibleBoxData.h +index f5d5e74..3b6aa92 100644 +--- a/WebCore/rendering/style/StyleFlexibleBoxData.h ++++ b/WebCore/rendering/style/StyleFlexibleBoxData.h +@@ -47,7 +47,6 @@ public: +END + # Header keys to check + svnConvertedText => <<'END', +Index: WebCore/rendering/style/StyleFlexibleBoxData.h +=================================================================== +--- WebCore/rendering/style/StyleFlexibleBoxData.h ++++ WebCore/rendering/style/StyleFlexibleBoxData.h +END + copiedFromPath => undef, + indexPath => "WebCore/rendering/style/StyleFlexibleBoxData.h", + sourceRevision => undef, + # Other values to check + lastReadLine => "@@ -47,7 +47,6 @@ public:\n", + nextLine => undef, +}, +{ + # New test + diffName => "Git: unrecognized lines", + inputText => <<'END', +diff --git a/LayoutTests/http/tests/security/listener/xss-inactive-closure.html b/LayoutTests/http/tests/security/listener/xss-inactive-closure.html +new file mode 100644 +index 0000000..3c9f114 +--- /dev/null ++++ b/LayoutTests/http/tests/security/listener/xss-inactive-closure.html +@@ -0,0 +1,34 @@ ++<html> +END + # Header keys to check + svnConvertedText => <<'END', +Index: LayoutTests/http/tests/security/listener/xss-inactive-closure.html +=================================================================== +--- LayoutTests/http/tests/security/listener/xss-inactive-closure.html ++++ LayoutTests/http/tests/security/listener/xss-inactive-closure.html +END + copiedFromPath => undef, + indexPath => "LayoutTests/http/tests/security/listener/xss-inactive-closure.html", + sourceRevision => undef, + # Other values to check + lastReadLine => "@@ -0,0 +1,34 @@\n", + nextLine => "+<html>\n", +}, +); + +# Return the arguments for each assertion per test case. +# +# In particular, the number of assertions per test case is the length +# of the return value of this subroutine on a sample input. +# +# Returns @assertionArgsArrayRefs: +# $assertionArgsArrayRef: A reference to an array of parameters to pass +# to each call to is(). The parameters are-- +# $got: The value obtained +# $expected: The expected value +# $testName: The name of the test +sub testParseDiffHeaderAssertionArgs($) +{ + my ($testCaseHashRef) = @_; + + my $fileHandle; + open($fileHandle, "<", \$testCaseHashRef->{inputText}); + + my $line = <$fileHandle>; + + my ($headerHashRef, $lastReadLine) = VCSUtils::parseDiffHeader($fileHandle, $line); + + my $testNameStart = "parseDiffHeader(): [$testCaseHashRef->{diffName}] "; + + my @assertionArgsArrayRefs; # Return value + my @assertionArgs; + foreach my $diffHeaderHashRefKey (@diffHeaderHashRefKeys) { + my $testName = "${testNameStart}key=\"$diffHeaderHashRefKey\""; + @assertionArgs = ($headerHashRef->{$diffHeaderHashRefKey}, $testCaseHashRef->{$diffHeaderHashRefKey}, $testName); + push(@assertionArgsArrayRefs, \@assertionArgs); + } + + @assertionArgs = ($lastReadLine, $testCaseHashRef->{lastReadLine}, "${testNameStart}lastReadLine"); + push(@assertionArgsArrayRefs, \@assertionArgs); + + my $nextLine = <$fileHandle>; + @assertionArgs = ($nextLine, $testCaseHashRef->{nextLine}, "${testNameStart}nextLine"); + push(@assertionArgsArrayRefs, \@assertionArgs); + + return @assertionArgsArrayRefs; +} + +# Test parseDiffHeader() for the given test case. +sub testParseDiffHeader($) +{ + my ($testCaseHashRef) = @_; + + my @assertionArgsArrayRefs = testParseDiffHeaderAssertionArgs($testCaseHashRef); + + foreach my $arrayRef (@assertionArgsArrayRefs) { + # The parameters are -- is($got, $expected, $testName). + is($arrayRef->[0], $arrayRef->[1], $arrayRef->[2]); + } +} + +# Count the number of assertions per test case to calculate the total number +# of Test::More tests. We could have used any test case for the count. +my $assertionCount = testParseDiffHeaderAssertionArgs($testCaseHashRefs[0]); + +plan(tests => @testCaseHashRefs * $assertionCount); # Total number of tests + +foreach my $testCaseHashRef (@testCaseHashRefs) { + testParseDiffHeader($testCaseHashRef); +} diff --git a/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parsePatch.pl b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parsePatch.pl new file mode 100644 index 0000000..e6f82ca --- /dev/null +++ b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parsePatch.pl @@ -0,0 +1,102 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseDiffHeader(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +my @diffHashRefKeys = ( # The hash reference keys to check per diff. + "copiedFromPath", + "indexPath", + "sourceRevision", + "svnConvertedText", +); + +# New test +my $testNameStart = "parsePatch(): [SVN: Rename] "; +my $patch = <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) ++++ Makefile_new (working copy) +@@ -0,0 +1,1 @@ ++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + +my @expectedDiffHashRefs = ( +{ + svnConvertedText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + copiedFromPath => undef, + indexPath => "Makefile", + sourceRevision => "53131", +}, +{ + svnConvertedText => <<'END', +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) ++++ Makefile_new (working copy) +@@ -0,0 +1,1 @@ ++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + copiedFromPath => "Makefile", + indexPath => "Makefile_new", + sourceRevision => "53131", +}, +); + +plan(tests => @expectedDiffHashRefs * @diffHashRefKeys); + +my $fileHandle; +open($fileHandle, "<", \$patch); + +my @gotDiffHashRefs = parsePatch($fileHandle); + +my $i = 0; +foreach my $expectedDiffHashRef (@expectedDiffHashRefs) { + + my $gotDiffHashRef = $gotDiffHashRefs[$i++]; + + foreach my $diffHashRefKey (@diffHashRefKeys) { + my $testName = "${testNameStart}[diff $i] key=\"$diffHashRefKey\""; + is($gotDiffHashRef->{$diffHashRefKey}, $expectedDiffHashRef->{$diffHashRefKey}, $testName); + } +} diff --git a/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/runPatchCommand.pl b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/runPatchCommand.pl new file mode 100644 index 0000000..8111def --- /dev/null +++ b/WebKitTools/Scripts/webkitperl/VCSUtils_unittest/runPatchCommand.pl @@ -0,0 +1,105 @@ +#!/usr/bin/perl +# +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of VCSUtils::runPatchCommand(). + +use Test::Simple tests => 4; +use VCSUtils; + +# Call a function while suppressing STDERR. +sub callSilently($@) { + my ($func, @args) = @_; + + open(OLDERR, ">&STDERR"); + close(STDERR); + my @returnValue = &$func(@args); + open(STDERR, ">&OLDERR"); + close(OLDERR); # FIXME: Is this necessary? + + return @returnValue; +} + +# New test +$title = "runPatchCommand: Unsuccessful patch, forcing."; + +# Since $patch has no "Index:" path, passing this to runPatchCommand +# should not affect any files. +my $patch = <<'END'; +Garbage patch contents +END + +# We call via callSilently() to avoid output like the following to STDERR: +# patch: **** Only garbage was found in the patch input. +$argsHashRef = {ensureForce => 1}; +$exitStatus = callSilently(\&runPatchCommand, $patch, ".", "file_to_patch.txt", $argsHashRef); + +ok($exitStatus != 0, $title); + +# New test +$title = "runPatchCommand: New file, --dry-run."; + +# This file should not exist after the tests, but we take care with the +# file name and contents just in case. +my $fileToPatch = "temp_OK_TO_ERASE__README_FOR_MORE.txt"; +$patch = <<END; +Index: $fileToPatch +=================================================================== +--- $fileToPatch (revision 0) ++++ $fileToPatch (revision 0) +@@ -0,0 +1,5 @@ ++This is a test file for WebKitTools/Scripts/VCSUtils_unittest.pl. ++This file should not have gotten created on your system. ++If it did, some unit tests don't seem to be working quite right: ++It would be great if you could file a bug report. Thanks! ++--------------------------------------------------------------------- +END + +# --dry-run prevents creating any files. +# --silent suppresses the success message to STDOUT. +$argsHashRef = {options => ["--dry-run", "--silent"]}; +$exitStatus = runPatchCommand($patch, ".", $fileToPatch, $argsHashRef); + +ok($exitStatus == 0, $title); + +# New test +$title = "runPatchCommand: New file: \"$fileToPatch\"."; + +$argsHashRef = {options => ["--silent"]}; +$exitStatus = runPatchCommand($patch, ".", $fileToPatch, $argsHashRef); + +ok($exitStatus == 0, $title); + +# New test +$title = "runPatchCommand: Reverse new file (clean up previous)."; + +$argsHashRef = {shouldReverse => 1, + options => ["--silent", "--remove-empty-files"]}; # To clean up. +$exitStatus = runPatchCommand($patch, ".", $fileToPatch, $argsHashRef); +ok($exitStatus == 0, $title); diff --git a/WebKitTools/Scripts/webkitperl/httpd.pm b/WebKitTools/Scripts/webkitperl/httpd.pm new file mode 100644 index 0000000..d082870 --- /dev/null +++ b/WebKitTools/Scripts/webkitperl/httpd.pm @@ -0,0 +1,202 @@ +# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved +# Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com) +# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Module to share code to start and stop the Apache daemon. + +use strict; +use warnings; + +use File::Path; +use File::Spec; +use File::Spec::Functions; +use IPC::Open2; + +use webkitdirs; + +BEGIN { + use Exporter (); + our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS); + $VERSION = 1.00; + @ISA = qw(Exporter); + @EXPORT = qw(&getHTTPDPath &getDefaultConfigForTestDirectory &openHTTPD &closeHTTPD &getHTTPDPid &setShouldWaitForUserInterrupt); + %EXPORT_TAGS = ( ); + @EXPORT_OK = (); +} + +my $tmpDir = "/tmp"; +my $httpdPath; +my $httpdPidDir = File::Spec->catfile($tmpDir, "WebKit"); +my $httpdPidFile = File::Spec->catfile($httpdPidDir, "httpd.pid"); +my $httpdPid; +my $waitForUserInterrupt = 0; + +$SIG{'INT'} = 'cleanup'; +$SIG{'TERM'} = 'cleanup'; + +sub getHTTPDPath +{ + if (isDebianBased()) { + $httpdPath = "/usr/sbin/apache2"; + } else { + $httpdPath = "/usr/sbin/httpd"; + } + return $httpdPath; +} + +sub getDefaultConfigForTestDirectory +{ + my ($testDirectory) = @_; + die "No test directory has been specified." unless ($testDirectory); + + my $httpdConfig; + getHTTPDPath(); + if (isCygwin()) { + my $windowsConfDirectory = "$testDirectory/http/conf/"; + unless (-x "/usr/lib/apache/libphp4.dll") { + copy("$windowsConfDirectory/libphp4.dll", "/usr/lib/apache/libphp4.dll"); + chmod(0755, "/usr/lib/apache/libphp4.dll"); + } + $httpdConfig = "$windowsConfDirectory/cygwin-httpd.conf"; + } elsif (isDebianBased()) { + $httpdConfig = "$testDirectory/http/conf/apache2-debian-httpd.conf"; + } elsif (isFedoraBased()) { + $httpdConfig = "$testDirectory/http/conf/fedora-httpd.conf"; + } else { + $httpdConfig = "$testDirectory/http/conf/httpd.conf"; + $httpdConfig = "$testDirectory/http/conf/apache2-httpd.conf" if `$httpdPath -v` =~ m|Apache/2|; + } + + my $documentRoot = "$testDirectory/http/tests"; + my $jsTestResourcesDirectory = $testDirectory . "/fast/js/resources"; + my $typesConfig = "$testDirectory/http/conf/mime.types"; + my $httpdLockFile = File::Spec->catfile($httpdPidDir, "httpd.lock"); + my $httpdScoreBoardFile = File::Spec->catfile($httpdPidDir, "httpd.scoreboard"); + + my @httpdArgs = ( + "-f", "$httpdConfig", + "-C", "DocumentRoot \"$documentRoot\"", + # Setup a link to where the js test templates are stored, use -c so that mod_alias will already be loaded. + "-c", "Alias /js-test-resources \"$jsTestResourcesDirectory\"", + "-c", "TypesConfig \"$typesConfig\"", + # Apache wouldn't run CGIs with permissions==700 otherwise + "-c", "User \"#$<\"", + "-c", "LockFile \"$httpdLockFile\"", + "-c", "PidFile \"$httpdPidFile\"", + "-c", "ScoreBoardFile \"$httpdScoreBoardFile\"", + ); + + # FIXME: Enable this on Windows once <rdar://problem/5345985> is fixed + # The version of Apache we use with Cygwin does not support SSL + my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem"; + push(@httpdArgs, "-c", "SSLCertificateFile \"$sslCertificate\"") unless isCygwin(); + + return @httpdArgs; + +} + +sub openHTTPD(@) +{ + my (@args) = @_; + die "No HTTPD configuration has been specified" unless (@args); + mkdir($httpdPidDir, 0755); + die "No write permissions to $httpdPidDir" unless (-w $httpdPidDir); + + if (-f $httpdPidFile) { + open (PIDFILE, $httpdPidFile); + my $oldPid = <PIDFILE>; + chomp $oldPid; + close PIDFILE; + if (0 != kill 0, $oldPid) { + print "\nhttpd is already running: pid $oldPid, killing...\n"; + kill 15, $oldPid; + + my $retryCount = 20; + while ((kill(0, $oldPid) != 0) && $retryCount) { + sleep 1; + --$retryCount; + } + + die "Timed out waiting for httpd to quit" unless $retryCount; + } + } + + $httpdPath = "/usr/sbin/httpd" unless ($httpdPath); + + open2(">&1", \*HTTPDIN, $httpdPath, @args); + + my $retryCount = 20; + while (!-f $httpdPidFile && $retryCount) { + sleep 1; + --$retryCount; + } + + if (!$retryCount) { + rmtree $httpdPidDir; + die "Timed out waiting for httpd to start"; + } + + $httpdPid = <PIDFILE> if open(PIDFILE, $httpdPidFile); + chomp $httpdPid if $httpdPid; + close PIDFILE; + + waitpid($httpdPid, 0) if ($waitForUserInterrupt && $httpdPid); + + return 1; +} + +sub closeHTTPD +{ + close HTTPDIN; + if ($httpdPid) { + kill 15, $httpdPid; + my $retryCount = 20; + while (-f $httpdPidFile && $retryCount) { + sleep 1; + --$retryCount; + } + + if (!$retryCount) { + print STDERR "Timed out waiting for httpd to terminate!\n"; + return 0; + } + } + rmdir $httpdPidDir; + return 1; +} + +sub setShouldWaitForUserInterrupt +{ + $waitForUserInterrupt = 1; +} + +sub cleanup +{ + closeHTTPD(); + print "\n"; + exit(1); +} diff --git a/WebKitTools/Scripts/modules/BeautifulSoup.py b/WebKitTools/Scripts/webkitpy/BeautifulSoup.py index 34204e7..34204e7 100644 --- a/WebKitTools/Scripts/modules/BeautifulSoup.py +++ b/WebKitTools/Scripts/webkitpy/BeautifulSoup.py diff --git a/WebKitTools/Scripts/webkitpy/BeautifulSoup.pyc b/WebKitTools/Scripts/webkitpy/BeautifulSoup.pyc Binary files differnew file mode 100644 index 0000000..dffb144 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/BeautifulSoup.pyc diff --git a/WebKitTools/Scripts/webkitpy/__init__.py b/WebKitTools/Scripts/webkitpy/__init__.py new file mode 100644 index 0000000..94ecc70 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/__init__.py @@ -0,0 +1,8 @@ +# Required for Python to search this directory for module files + +import autoinstall + +# List our third-party library dependencies here and where they can be +# downloaded. +autoinstall.bind("ClientForm", "http://pypi.python.org/packages/source/C/ClientForm/ClientForm-0.2.10.zip", "ClientForm-0.2.10") +autoinstall.bind("mechanize", "http://pypi.python.org/packages/source/m/mechanize/mechanize-0.1.11.zip", "mechanize-0.1.11") diff --git a/WebKitTools/Scripts/webkitpy/__init__.pyc b/WebKitTools/Scripts/webkitpy/__init__.pyc Binary files differnew file mode 100644 index 0000000..d1ffa10 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/__init__.pyc diff --git a/WebKitTools/Scripts/webkitpy/autoinstall.py b/WebKitTools/Scripts/webkitpy/autoinstall.py new file mode 100644 index 0000000..467e6b4 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/autoinstall.py @@ -0,0 +1,335 @@ +# Copyright (c) 2009, Daniel Krech All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of the Daniel Krech nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""\ +package loader for auto installing Python packages. + +A package loader in the spirit of Zero Install that can be used to +inject dependencies into the import process. + + +To install:: + + easy_install -U autoinstall + + or + + download, unpack, python setup.py install + + or + + try the bootstrap loader. See below. + + +To use:: + + # You can bind any package name to a URL pointing to something + # that can be imported using the zipimporter. + + autoinstall.bind("pymarc", "http://pypi.python.org/packages/2.5/p/pymarc/pymarc-2.1-py2.5.egg") + + import pymarc + + print pymarc.__version__, pymarc.__file__ + + +Changelog:: + +- added support for non top level packages. +- cache files now use filename part from URL. +- applied patch from Eric Seidel <eseidel@google.com> to add support +for loading modules where the module is not at the root of the .zip +file. + + +TODO:: + +- a description of the intended use case +- address other issues pointed out in: + + http://mail.python.org/pipermail/python-dev/2008-March/077926.html + +Scribbles:: + +pull vs. push +user vs. system +web vs. filesystem +auto vs. manual + +manage development sandboxes + +optional interfaces... + + def get_data(pathname) -> string with file data. + + Return the data associated with 'pathname'. Raise IOError if + the file wasn't found."); + + def is_package, + "is_package(fullname) -> bool. + + Return True if the module specified by fullname is a package. + Raise ZipImportError is the module couldn't be found."); + + def get_code, + "get_code(fullname) -> code object. + + Return the code object for the specified module. Raise ZipImportError + is the module couldn't be found."); + + def get_source, + "get_source(fullname) -> source string. + + Return the source code for the specified module. Raise ZipImportError + is the module couldn't be found, return None if the archive does + contain the module, but has no source for it."); + + +Autoinstall can also be bootstraped with the nascent package loader +bootstrap module. For example:: + + # or via the bootstrap + # loader. + + try: + _version = "0.2" + import autoinstall + if autoinstall.__version__ != _version: + raise ImportError("A different version than expected found.") + except ImportError, e: + # http://svn.python.org/projects/sandbox/trunk/bootstrap/bootstrap.py + import bootstrap + pypi = "http://pypi.python.org" + dir = "packages/source/a/autoinstall" + url = "%s/%s/autoinstall-%s.tar.gz" % (pypi, dir, _version) + bootstrap.main((url,)) + import autoinstall + +References:: + + http://0install.net/ + http://www.python.org/dev/peps/pep-0302/ + http://svn.python.org/projects/sandbox/trunk/import_in_py + http://0install.net/injector-find.html + http://roscidus.com/desktop/node/903 + +""" + +# To allow use of the "with" keyword for Python 2.5 users. +from __future__ import with_statement + +__version__ = "0.2" +__docformat__ = "restructuredtext en" + +import os +import new +import sys +import urllib +import logging +import tempfile +import zipimport + +_logger = logging.getLogger(__name__) + + +_importer = None + +def _getImporter(): + global _importer + if _importer is None: + _importer = Importer() + sys.meta_path.append(_importer) + return _importer + +def bind(package_name, url, zip_subpath=None): + """bind a top level package name to a URL. + + The package name should be a package name and the url should be a + url to something that can be imported using the zipimporter. + + Optional zip_subpath parameter allows searching for modules + below the root level of the zip file. + """ + _getImporter().bind(package_name, url, zip_subpath) + + +class Cache(object): + + def __init__(self, directory=None): + if directory is None: + # Default to putting the cache directory in the same directory + # as this file. + containing_directory = os.path.dirname(__file__) + directory = os.path.join(containing_directory, "autoinstall.cache.d"); + + self.directory = directory + try: + if not os.path.exists(self.directory): + self._create_cache_directory() + except Exception, err: + _logger.exception(err) + self.cache_directry = tempfile.mkdtemp() + _logger.info("Using cache directory '%s'." % self.directory) + + def _create_cache_directory(self): + _logger.debug("Creating cache directory '%s'." % self.directory) + os.mkdir(self.directory) + readme_path = os.path.join(self.directory, "README") + with open(readme_path, "w") as f: + f.write("This directory was auto-generated by '%s'.\n" + "It is safe to delete.\n" % __file__) + + def get(self, url): + _logger.info("Getting '%s' from cache." % url) + filename = url.rsplit("/")[-1] + + # so that source url is significant in determining cache hits + d = os.path.join(self.directory, "%s" % hash(url)) + if not os.path.exists(d): + os.mkdir(d) + + filename = os.path.join(d, filename) + + if os.path.exists(filename): + _logger.debug("... already cached in file '%s'." % filename) + else: + _logger.debug("... not in cache. Caching in '%s'." % filename) + stream = file(filename, "wb") + self.download(url, stream) + stream.close() + return filename + + def download(self, url, stream): + _logger.info("Downloading: %s" % url) + try: + netstream = urllib.urlopen(url) + code = 200 + if hasattr(netstream, "getcode"): + code = netstream.getcode() + if not 200 <= code < 300: + raise ValueError("HTTP Error code %s" % code) + except Exception, err: + _logger.exception(err) + + BUFSIZE = 2**13 # 8KB + size = 0 + while True: + data = netstream.read(BUFSIZE) + if not data: + break + stream.write(data) + size += len(data) + netstream.close() + _logger.info("Downloaded %d bytes." % size) + + +class Importer(object): + + def __init__(self): + self.packages = {} + self.__cache = None + + def __get_store(self): + return self.__store + store = property(__get_store) + + def _get_cache(self): + if self.__cache is None: + self.__cache = Cache() + return self.__cache + def _set_cache(self, cache): + self.__cache = cache + cache = property(_get_cache, _set_cache) + + def find_module(self, fullname, path=None): + """-> self or None. + + Search for a module specified by 'fullname'. 'fullname' must be + the fully qualified (dotted) module name. It returns the + zipimporter instance itself if the module was found, or None if + it wasn't. The optional 'path' argument is ignored -- it's + there for compatibility with the importer protocol."); + """ + _logger.debug("find_module(%s, path=%s)" % (fullname, path)) + + if fullname in self.packages: + (url, zip_subpath) = self.packages[fullname] + filename = self.cache.get(url) + zip_path = "%s/%s" % (filename, zip_subpath) if zip_subpath else filename + _logger.debug("fullname: %s url: %s path: %s zip_path: %s" % (fullname, url, path, zip_path)) + try: + loader = zipimport.zipimporter(zip_path) + _logger.debug("returning: %s" % loader) + except Exception, e: + _logger.exception(e) + return None + return loader + return None + + def bind(self, package_name, url, zip_subpath): + _logger.info("binding: %s -> %s subpath: %s" % (package_name, url, zip_subpath)) + self.packages[package_name] = (url, zip_subpath) + + +if __name__=="__main__": + import logging + #logging.basicConfig() + logger = logging.getLogger() + + console = logging.StreamHandler() + console.setLevel(logging.DEBUG) + # set a format which is simpler for console use + formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s') + # tell the handler to use this format + console.setFormatter(formatter) + # add the handler to the root logger + logger.addHandler(console) + logger.setLevel(logging.INFO) + + bind("pymarc", "http://pypi.python.org/packages/2.5/p/pymarc/pymarc-2.1-py2.5.egg") + + import pymarc + + print pymarc.__version__, pymarc.__file__ + + assert pymarc.__version__=="2.1" + + d = _getImporter().cache.directory + assert d in pymarc.__file__, "'%s' not found in pymarc.__file__ (%s)" % (d, pymarc.__file__) + + # Can now also bind to non top level packages. The packages + # leading up to the package being bound will need to be defined + # however. + # + # bind("rdf.plugins.stores.memory", + # "http://pypi.python.org/packages/2.5/r/rdf.plugins.stores.memeory/rdf.plugins.stores.memory-0.9a-py2.5.egg") + # + # from rdf.plugins.stores.memory import Memory + + diff --git a/WebKitTools/Scripts/webkitpy/autoinstall.pyc b/WebKitTools/Scripts/webkitpy/autoinstall.pyc Binary files differnew file mode 100644 index 0000000..68e46fd --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/autoinstall.pyc diff --git a/WebKitTools/Scripts/webkitpy/bugzilla.py b/WebKitTools/Scripts/webkitpy/bugzilla.py new file mode 100644 index 0000000..c1cf41d --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/bugzilla.py @@ -0,0 +1,789 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# Copyright (c) 2010 Research In Motion Limited. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# WebKit's Python module for interacting with Bugzilla + +import re +import subprocess + +from datetime import datetime # used in timestamp() + +# Import WebKit-specific modules. +from webkitpy.webkit_logging import error, log +from webkitpy.committers import CommitterList +from webkitpy.credentials import Credentials + +# WebKit includes a built copy of BeautifulSoup in Scripts/webkitpy +# so this import should always succeed. +from .BeautifulSoup import BeautifulSoup, SoupStrainer + +from mechanize import Browser + + +def parse_bug_id(message): + match = re.search("http\://webkit\.org/b/(?P<bug_id>\d+)", message) + if match: + return int(match.group('bug_id')) + match = re.search( + Bugzilla.bug_server_regex + "show_bug\.cgi\?id=(?P<bug_id>\d+)", + message) + if match: + return int(match.group('bug_id')) + return None + + +def timestamp(): + return datetime.now().strftime("%Y%m%d%H%M%S") + + +class Attachment(object): + + def __init__(self, attachment_dictionary, bug): + self._attachment_dictionary = attachment_dictionary + self._bug = bug + self._reviewer = None + self._committer = None + + def _bugzilla(self): + return self._bug._bugzilla + + def id(self): + return int(self._attachment_dictionary.get("id")) + + def attacher_is_committer(self): + return self._bugzilla.committers.committer_by_email( + patch.attacher_email()) + + def attacher_email(self): + return self._attachment_dictionary.get("attacher_email") + + def bug(self): + return self._bug + + def bug_id(self): + return int(self._attachment_dictionary.get("bug_id")) + + def is_patch(self): + return not not self._attachment_dictionary.get("is_patch") + + def is_obsolete(self): + return not not self._attachment_dictionary.get("is_obsolete") + + def name(self): + return self._attachment_dictionary.get("name") + + def review(self): + return self._attachment_dictionary.get("review") + + def commit_queue(self): + return self._attachment_dictionary.get("commit-queue") + + def url(self): + # FIXME: This should just return + # self._bugzilla().attachment_url_for_id(self.id()). scm_unittest.py + # depends on the current behavior. + return self._attachment_dictionary.get("url") + + def _validate_flag_value(self, flag): + email = self._attachment_dictionary.get("%s_email" % flag) + if not email: + return None + committer = getattr(self._bugzilla().committers, + "%s_by_email" % flag)(email) + if committer: + return committer + log("Warning, attachment %s on bug %s has invalid %s (%s)" % ( + self._attachment_dictionary['id'], + self._attachment_dictionary['bug_id'], flag, email)) + + def reviewer(self): + if not self._reviewer: + self._reviewer = self._validate_flag_value("reviewer") + return self._reviewer + + def committer(self): + if not self._committer: + self._committer = self._validate_flag_value("committer") + return self._committer + + +class Bug(object): + # FIXME: This class is kinda a hack for now. It exists so we have one + # place to hold bug logic, even if much of the code deals with + # dictionaries still. + + def __init__(self, bug_dictionary, bugzilla): + self.bug_dictionary = bug_dictionary + self._bugzilla = bugzilla + + def id(self): + return self.bug_dictionary["id"] + + def assigned_to_email(self): + return self.bug_dictionary["assigned_to_email"] + + # Rarely do we actually want obsolete attachments + def attachments(self, include_obsolete=False): + attachments = self.bug_dictionary["attachments"] + if not include_obsolete: + attachments = filter(lambda attachment: + not attachment["is_obsolete"], attachments) + return [Attachment(attachment, self) for attachment in attachments] + + def patches(self, include_obsolete=False): + return [patch for patch in self.attachments(include_obsolete) + if patch.is_patch()] + + def unreviewed_patches(self): + return [patch for patch in self.patches() if patch.review() == "?"] + + def reviewed_patches(self, include_invalid=False): + patches = [patch for patch in self.patches() if patch.review() == "+"] + if include_invalid: + return patches + # Checking reviewer() ensures that it was both reviewed and has a valid + # reviewer. + return filter(lambda patch: patch.reviewer(), patches) + + def commit_queued_patches(self, include_invalid=False): + patches = [patch for patch in self.patches() + if patch.commit_queue() == "+"] + if include_invalid: + return patches + # Checking committer() ensures that it was both commit-queue+'d and has + # a valid committer. + return filter(lambda patch: patch.committer(), patches) + + +# A container for all of the logic for making and parsing buzilla queries. +class BugzillaQueries(object): + + def __init__(self, bugzilla): + self._bugzilla = bugzilla + + # Note: _load_query and _fetch_bug are the only two methods which access + # self._bugzilla. + + def _load_query(self, query): + self._bugzilla.authenticate() + + full_url = "%s%s" % (self._bugzilla.bug_server_url, query) + return self._bugzilla.browser.open(full_url) + + def _fetch_bug(self, bug_id): + return self._bugzilla.fetch_bug(bug_id) + + def _fetch_bug_ids_advanced_query(self, query): + soup = BeautifulSoup(self._load_query(query)) + # The contents of the <a> inside the cells in the first column happen + # to be the bug id. + return [int(bug_link_cell.find("a").string) + for bug_link_cell in soup('td', "first-child")] + + def _parse_attachment_ids_request_query(self, page): + digits = re.compile("\d+") + attachment_href = re.compile("attachment.cgi\?id=\d+&action=review") + attachment_links = SoupStrainer("a", href=attachment_href) + return [int(digits.search(tag["href"]).group(0)) + for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)] + + def _fetch_attachment_ids_request_query(self, query): + return self._parse_attachment_ids_request_query(self._load_query(query)) + + # List of all r+'d bugs. + def fetch_bug_ids_from_pending_commit_list(self): + needs_commit_query_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B" + return self._fetch_bug_ids_advanced_query(needs_commit_query_url) + + def fetch_patches_from_pending_commit_list(self): + return sum([self._fetch_bug(bug_id).reviewed_patches() + for bug_id in self.fetch_bug_ids_from_pending_commit_list()], []) + + def fetch_bug_ids_from_commit_queue(self): + commit_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B&order=Last+Changed" + return self._fetch_bug_ids_advanced_query(commit_queue_url) + + def fetch_patches_from_commit_queue(self): + # This function will only return patches which have valid committers + # set. It won't reject patches with invalid committers/reviewers. + return sum([self._fetch_bug(bug_id).commit_queued_patches() + for bug_id in self.fetch_bug_ids_from_commit_queue()], []) + + def _fetch_bug_ids_from_review_queue(self): + review_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?" + return self._fetch_bug_ids_advanced_query(review_queue_url) + + def fetch_patches_from_review_queue(self, limit=None): + # [:None] returns the whole array. + return sum([self._fetch_bug(bug_id).unreviewed_patches() + for bug_id in self._fetch_bug_ids_from_review_queue()[:limit]], []) + + # FIXME: Why do we have both fetch_patches_from_review_queue and + # fetch_attachment_ids_from_review_queue?? + # NOTE: This is also the only client of _fetch_attachment_ids_request_query + + def fetch_attachment_ids_from_review_queue(self): + review_queue_url = "request.cgi?action=queue&type=review&group=type" + return self._fetch_attachment_ids_request_query(review_queue_url) + + +class CommitterValidator(object): + + def __init__(self, bugzilla): + self._bugzilla = bugzilla + + # _view_source_url belongs in some sort of webkit_config.py module. + def _view_source_url(self, local_path): + return "http://trac.webkit.org/browser/trunk/%s" % local_path + + def _flag_permission_rejection_message(self, setter_email, flag_name): + # This could be computed from CommitterList.__file__ + committer_list = "WebKitTools/Scripts/webkitpy/committers.py" + # Should come from some webkit_config.py + contribution_guidlines = "http://webkit.org/coding/contributing.html" + # This could be queried from the status_server. + queue_administrator = "eseidel@chromium.org" + # This could be queried from the tool. + queue_name = "commit-queue" + message = "%s does not have %s permissions according to %s." % ( + setter_email, + flag_name, + self._view_source_url(committer_list)) + message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % ( + flag_name, contribution_guidlines) + message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed). " % ( + flag_name, committer_list) + message += "Due to bug 30084 the %s will require a restart after your change. " % queue_name + message += "Please contact %s to request a %s restart. " % ( + queue_administrator, queue_name) + message += "After restart the %s will correctly respect your %s rights." % ( + queue_name, flag_name) + return message + + def _validate_setter_email(self, patch, result_key, rejection_function): + committer = getattr(patch, result_key)() + # If the flag is set, and we don't recognize the setter, reject the + # flag! + setter_email = patch._attachment_dictionary.get("%s_email" % result_key) + if setter_email and not committer: + rejection_function(patch.id(), + self._flag_permission_rejection_message(setter_email, + result_key)) + return False + return True + + def patches_after_rejecting_invalid_commiters_and_reviewers(self, patches): + validated_patches = [] + for patch in patches: + if (self._validate_setter_email( + patch, "reviewer", self.reject_patch_from_review_queue) + and self._validate_setter_email( + patch, "committer", self.reject_patch_from_commit_queue)): + validated_patches.append(patch) + return validated_patches + + def reject_patch_from_commit_queue(self, + attachment_id, + additional_comment_text=None): + comment_text = "Rejecting patch %s from commit-queue." % attachment_id + self._bugzilla.set_flag_on_attachment(attachment_id, + "commit-queue", + "-", + comment_text, + additional_comment_text) + + def reject_patch_from_review_queue(self, + attachment_id, + additional_comment_text=None): + comment_text = "Rejecting patch %s from review queue." % attachment_id + self._bugzilla.set_flag_on_attachment(attachment_id, + 'review', + '-', + comment_text, + additional_comment_text) + + +class Bugzilla(object): + + def __init__(self, dryrun=False, committers=CommitterList()): + self.dryrun = dryrun + self.authenticated = False + self.queries = BugzillaQueries(self) + self.committers = committers + + # FIXME: We should use some sort of Browser mock object when in dryrun + # mode (to prevent any mistakes). + self.browser = Browser() + # Ignore bugs.webkit.org/robots.txt until we fix it to allow this + # script. + self.browser.set_handle_robots(False) + + # FIXME: Much of this should go into some sort of config module: + bug_server_host = "bugs.webkit.org" + bug_server_regex = "https?://%s/" % re.sub('\.', '\\.', bug_server_host) + bug_server_url = "https://%s/" % bug_server_host + unassigned_email = "webkit-unassigned@lists.webkit.org" + + def bug_url_for_bug_id(self, bug_id, xml=False): + content_type = "&ctype=xml" if xml else "" + return "%sshow_bug.cgi?id=%s%s" % (self.bug_server_url, + bug_id, + content_type) + + def short_bug_url_for_bug_id(self, bug_id): + return "http://webkit.org/b/%s" % bug_id + + def attachment_url_for_id(self, attachment_id, action="view"): + action_param = "" + if action and action != "view": + action_param = "&action=%s" % action + return "%sattachment.cgi?id=%s%s" % (self.bug_server_url, + attachment_id, + action_param) + + def _parse_attachment_flag(self, + element, + flag_name, + attachment, + result_key): + flag = element.find('flag', attrs={'name': flag_name}) + if flag: + attachment[flag_name] = flag['status'] + if flag['status'] == '+': + attachment[result_key] = flag['setter'] + + def _parse_attachment_element(self, element, bug_id): + attachment = {} + attachment['bug_id'] = bug_id + attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1") + attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1") + attachment['id'] = int(element.find('attachid').string) + # FIXME: No need to parse out the url here. + attachment['url'] = self.attachment_url_for_id(attachment['id']) + attachment['name'] = unicode(element.find('desc').string) + attachment['attacher_email'] = str(element.find('attacher').string) + attachment['type'] = str(element.find('type').string) + self._parse_attachment_flag( + element, 'review', attachment, 'reviewer_email') + self._parse_attachment_flag( + element, 'commit-queue', attachment, 'committer_email') + return attachment + + def _parse_bug_page(self, page): + soup = BeautifulSoup(page) + bug = {} + bug["id"] = int(soup.find("bug_id").string) + bug["title"] = unicode(soup.find("short_desc").string) + bug["reporter_email"] = str(soup.find("reporter").string) + bug["assigned_to_email"] = str(soup.find("assigned_to").string) + bug["cc_emails"] = [str(element.string) + for element in soup.findAll('cc')] + bug["attachments"] = [self._parse_attachment_element(element, bug["id"]) for element in soup.findAll('attachment')] + return bug + + # Makes testing fetch_*_from_bug() possible until we have a better + # BugzillaNetwork abstration. + + def _fetch_bug_page(self, bug_id): + bug_url = self.bug_url_for_bug_id(bug_id, xml=True) + log("Fetching: %s" % bug_url) + return self.browser.open(bug_url) + + def fetch_bug_dictionary(self, bug_id): + return self._parse_bug_page(self._fetch_bug_page(bug_id)) + + # FIXME: A BugzillaCache object should provide all these fetch_ methods. + + def fetch_bug(self, bug_id): + return Bug(self.fetch_bug_dictionary(bug_id), self) + + def _parse_bug_id_from_attachment_page(self, page): + # The "Up" relation happens to point to the bug. + up_link = BeautifulSoup(page).find('link', rel='Up') + if not up_link: + # This attachment does not exist (or you don't have permissions to + # view it). + return None + match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href']) + return int(match.group('bug_id')) + + def bug_id_for_attachment_id(self, attachment_id): + self.authenticate() + + attachment_url = self.attachment_url_for_id(attachment_id, 'edit') + log("Fetching: %s" % attachment_url) + page = self.browser.open(attachment_url) + return self._parse_bug_id_from_attachment_page(page) + + # FIXME: This should just return Attachment(id), which should be able to + # lazily fetch needed data. + + def fetch_attachment(self, attachment_id): + # We could grab all the attachment details off of the attachment edit + # page but we already have working code to do so off of the bugs page, + # so re-use that. + bug_id = self.bug_id_for_attachment_id(attachment_id) + if not bug_id: + return None + attachments = self.fetch_bug(bug_id).attachments(include_obsolete=True) + for attachment in attachments: + if attachment.id() == int(attachment_id): + return attachment + return None # This should never be hit. + + def authenticate(self): + if self.authenticated: + return + + if self.dryrun: + log("Skipping log in for dry run...") + self.authenticated = True + return + + attempts = 0 + while not self.authenticated: + attempts += 1 + (username, password) = Credentials( + self.bug_server_host, git_prefix="bugzilla").read_credentials() + + log("Logging in as %s..." % username) + self.browser.open(self.bug_server_url + + "index.cgi?GoAheadAndLogIn=1") + self.browser.select_form(name="login") + self.browser['Bugzilla_login'] = username + self.browser['Bugzilla_password'] = password + response = self.browser.submit() + + match = re.search("<title>(.+?)</title>", response.read()) + # If the resulting page has a title, and it contains the word + # "invalid" assume it's the login failure page. + if match and re.search("Invalid", match.group(1), re.IGNORECASE): + errorMessage = "Bugzilla login failed: %s" % match.group(1) + # raise an exception only if this was the last attempt + if attempts < 5: + log(errorMessage) + else: + raise Exception(errorMessage) + else: + self.authenticated = True + + def _fill_attachment_form(self, + description, + patch_file_object, + comment_text=None, + mark_for_review=False, + mark_for_commit_queue=False, + mark_for_landing=False, bug_id=None): + self.browser['description'] = description + self.browser['ispatch'] = ("1",) + self.browser['flag_type-1'] = ('?',) if mark_for_review else ('X',) + + if mark_for_landing: + self.browser['flag_type-3'] = ('+',) + elif mark_for_commit_queue: + self.browser['flag_type-3'] = ('?',) + else: + self.browser['flag_type-3'] = ('X',) + + if bug_id: + patch_name = "bug-%s-%s.patch" % (bug_id, timestamp()) + else: + patch_name ="%s.patch" % timestamp() + self.browser.add_file(patch_file_object, + "text/plain", + patch_name, + 'data') + + def add_patch_to_bug(self, + bug_id, + patch_file_object, + description, + comment_text=None, + mark_for_review=False, + mark_for_commit_queue=False, + mark_for_landing=False): + self.authenticate() + + log('Adding patch "%s" to %sshow_bug.cgi?id=%s' % (description, + self.bug_server_url, + bug_id)) + + if self.dryrun: + log(comment_text) + return + + self.browser.open("%sattachment.cgi?action=enter&bugid=%s" % ( + self.bug_server_url, bug_id)) + self.browser.select_form(name="entryform") + self._fill_attachment_form(description, + patch_file_object, + mark_for_review=mark_for_review, + mark_for_commit_queue=mark_for_commit_queue, + mark_for_landing=mark_for_landing, + bug_id=bug_id) + if comment_text: + log(comment_text) + self.browser['comment'] = comment_text + self.browser.submit() + + def prompt_for_component(self, components): + log("Please pick a component:") + i = 0 + for name in components: + i += 1 + log("%2d. %s" % (i, name)) + result = int(raw_input("Enter a number: ")) - 1 + return components[result] + + def _check_create_bug_response(self, response_html): + match = re.search("<title>Bug (?P<bug_id>\d+) Submitted</title>", + response_html) + if match: + return match.group('bug_id') + + match = re.search( + '<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">', + response_html, + re.DOTALL) + error_message = "FAIL" + if match: + text_lines = BeautifulSoup( + match.group('error_message')).findAll(text=True) + error_message = "\n" + '\n'.join( + [" " + line.strip() + for line in text_lines if line.strip()]) + raise Exception("Bug not created: %s" % error_message) + + def create_bug(self, + bug_title, + bug_description, + component=None, + patch_file_object=None, + patch_description=None, + cc=None, + mark_for_review=False, + mark_for_commit_queue=False): + self.authenticate() + + log('Creating bug with title "%s"' % bug_title) + if self.dryrun: + log(bug_description) + return + + self.browser.open(self.bug_server_url + "enter_bug.cgi?product=WebKit") + self.browser.select_form(name="Create") + component_items = self.browser.find_control('component').items + component_names = map(lambda item: item.name, component_items) + if not component: + component = "New Bugs" + if component not in component_names: + component = self.prompt_for_component(component_names) + self.browser['component'] = [component] + if cc: + self.browser['cc'] = cc + self.browser['short_desc'] = bug_title + self.browser['comment'] = bug_description + + if patch_file_object: + self._fill_attachment_form( + patch_description, + patch_file_object, + mark_for_review=mark_for_review, + mark_for_commit_queue=mark_for_commit_queue) + + response = self.browser.submit() + + bug_id = self._check_create_bug_response(response.read()) + log("Bug %s created." % bug_id) + log("%sshow_bug.cgi?id=%s" % (self.bug_server_url, bug_id)) + return bug_id + + def _find_select_element_for_flag(self, flag_name): + # FIXME: This will break if we ever re-order attachment flags + if flag_name == "review": + return self.browser.find_control(type='select', nr=0) + if flag_name == "commit-queue": + return self.browser.find_control(type='select', nr=1) + raise Exception("Don't know how to find flag named \"%s\"" % flag_name) + + def clear_attachment_flags(self, + attachment_id, + additional_comment_text=None): + self.authenticate() + + comment_text = "Clearing flags on attachment: %s" % attachment_id + if additional_comment_text: + comment_text += "\n\n%s" % additional_comment_text + log(comment_text) + + if self.dryrun: + return + + self.browser.open(self.attachment_url_for_id(attachment_id, 'edit')) + self.browser.select_form(nr=1) + self.browser.set_value(comment_text, name='comment', nr=0) + self._find_select_element_for_flag('review').value = ("X",) + self._find_select_element_for_flag('commit-queue').value = ("X",) + self.browser.submit() + + def set_flag_on_attachment(self, + attachment_id, + flag_name, + flag_value, + comment_text, + additional_comment_text): + # FIXME: We need a way to test this function on a live bugzilla + # instance. + + self.authenticate() + + if additional_comment_text: + comment_text += "\n\n%s" % additional_comment_text + log(comment_text) + + if self.dryrun: + return + + self.browser.open(self.attachment_url_for_id(attachment_id, 'edit')) + self.browser.select_form(nr=1) + self.browser.set_value(comment_text, name='comment', nr=0) + self._find_select_element_for_flag(flag_name).value = (flag_value,) + self.browser.submit() + + # FIXME: All of these bug editing methods have a ridiculous amount of + # copy/paste code. + + def obsolete_attachment(self, attachment_id, comment_text=None): + self.authenticate() + + log("Obsoleting attachment: %s" % attachment_id) + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.attachment_url_for_id(attachment_id, 'edit')) + self.browser.select_form(nr=1) + self.browser.find_control('isobsolete').items[0].selected = True + # Also clear any review flag (to remove it from review/commit queues) + self._find_select_element_for_flag('review').value = ("X",) + self._find_select_element_for_flag('commit-queue').value = ("X",) + if comment_text: + log(comment_text) + # Bugzilla has two textareas named 'comment', one is somehow + # hidden. We want the first. + self.browser.set_value(comment_text, name='comment', nr=0) + self.browser.submit() + + def add_cc_to_bug(self, bug_id, email_address_list): + self.authenticate() + + log("Adding %s to the CC list for bug %s" % (email_address_list, + bug_id)) + if self.dryrun: + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + self.browser["newcc"] = ", ".join(email_address_list) + self.browser.submit() + + def post_comment_to_bug(self, bug_id, comment_text, cc=None): + self.authenticate() + + log("Adding comment to bug %s" % bug_id) + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + self.browser["comment"] = comment_text + if cc: + self.browser["newcc"] = ", ".join(cc) + self.browser.submit() + + def close_bug_as_fixed(self, bug_id, comment_text=None): + self.authenticate() + + log("Closing bug %s as fixed" % bug_id) + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + if comment_text: + log(comment_text) + self.browser['comment'] = comment_text + self.browser['bug_status'] = ['RESOLVED'] + self.browser['resolution'] = ['FIXED'] + self.browser.submit() + + def reassign_bug(self, bug_id, assignee, comment_text=None): + self.authenticate() + + log("Assigning bug %s to %s" % (bug_id, assignee)) + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + if comment_text: + log(comment_text) + self.browser["comment"] = comment_text + self.browser["assigned_to"] = assignee + self.browser.submit() + + def reopen_bug(self, bug_id, comment_text): + self.authenticate() + + log("Re-opening bug %s" % bug_id) + # Bugzilla requires a comment when re-opening a bug, so we know it will + # never be None. + log(comment_text) + if self.dryrun: + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + bug_status = self.browser.find_control("bug_status", type="select") + # This is a hack around the fact that ClientForm.ListControl seems to + # have no simpler way to ask if a control has an item named "REOPENED" + # without using exceptions for control flow. + possible_bug_statuses = map(lambda item: item.name, bug_status.items) + if "REOPENED" in possible_bug_statuses: + bug_status.value = ["REOPENED"] + else: + log("Did not reopen bug %s. " + + "It appears to already be open with status %s." % ( + bug_id, bug_status.value)) + self.browser['comment'] = comment_text + self.browser.submit() diff --git a/WebKitTools/Scripts/webkitpy/bugzilla.pyc b/WebKitTools/Scripts/webkitpy/bugzilla.pyc Binary files differnew file mode 100644 index 0000000..dfde47c --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/bugzilla.pyc diff --git a/WebKitTools/Scripts/modules/bugzilla_unittest.py b/WebKitTools/Scripts/webkitpy/bugzilla_unittest.py index fb7f8c4..d555f78 100644 --- a/WebKitTools/Scripts/modules/bugzilla_unittest.py +++ b/WebKitTools/Scripts/webkitpy/bugzilla_unittest.py @@ -28,13 +28,39 @@ import unittest -from modules.committers import CommitterList, Reviewer, Committer -from modules.bugzilla import Bugzilla, parse_bug_id +from webkitpy.committers import CommitterList, Reviewer, Committer +from webkitpy.bugzilla import Bugzilla, BugzillaQueries, parse_bug_id, CommitterValidator +from webkitpy.outputcapture import OutputCapture +from webkitpy.mock import Mock -from modules.BeautifulSoup import BeautifulSoup +from webkitpy.BeautifulSoup import BeautifulSoup -class BugzillaTest(unittest.TestCase): +class MockBrowser(object): + def open(self, url): + pass + + def select_form(self, name): + pass + + def __setitem__(self, key, value): + pass + + def submit(self): + pass + +class CommitterValidatorTest(unittest.TestCase): + def test_flag_permission_rejection_message(self): + validator = CommitterValidator(bugzilla=None) + expected_messsage="""foo@foo.com does not have review permissions according to http://trac.webkit.org/browser/trunk/WebKitTools/Scripts/webkitpy/committers.py. + +- If you do not have review rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags. + +- If you have review rights please correct the error in WebKitTools/Scripts/webkitpy/committers.py by adding yourself to the file (no review needed). Due to bug 30084 the commit-queue will require a restart after your change. Please contact eseidel@chromium.org to request a commit-queue restart. After restart the commit-queue will correctly respect your review rights.""" + self.assertEqual(validator._flag_permission_rejection_message("foo@foo.com", "review"), expected_messsage) + + +class BugzillaTest(unittest.TestCase): _example_attachment = ''' <attachment isobsolete="1" @@ -88,20 +114,110 @@ class BugzillaTest(unittest.TestCase): self.assertEquals(None, parse_bug_id("http://www.webkit.org/b/12345")) self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345")) + _example_bug = """ +<?xml version="1.0" encoding="UTF-8" standalone="yes" ?> +<!DOCTYPE bugzilla SYSTEM "https://bugs.webkit.org/bugzilla.dtd"> +<bugzilla version="3.2.3" + urlbase="https://bugs.webkit.org/" + maintainer="admin@webkit.org" + exporter="eric@webkit.org" +> + <bug> + <bug_id>32585</bug_id> + <creation_ts>2009-12-15 15:17 PST</creation_ts> + <short_desc>bug to test webkit-patch and commit-queue failures</short_desc> + <delta_ts>2009-12-27 21:04:50 PST</delta_ts> + <reporter_accessible>1</reporter_accessible> + <cclist_accessible>1</cclist_accessible> + <classification_id>1</classification_id> + <classification>Unclassified</classification> + <product>WebKit</product> + <component>Tools / Tests</component> + <version>528+ (Nightly build)</version> + <rep_platform>PC</rep_platform> + <op_sys>Mac OS X 10.5</op_sys> + <bug_status>NEW</bug_status> + <priority>P2</priority> + <bug_severity>Normal</bug_severity> + <target_milestone>---</target_milestone> + <everconfirmed>1</everconfirmed> + <reporter name="Eric Seidel">eric@webkit.org</reporter> + <assigned_to name="Nobody">webkit-unassigned@lists.webkit.org</assigned_to> + <cc>foo@bar.com</cc> + <cc>example@example.com</cc> + <long_desc isprivate="0"> + <who name="Eric Seidel">eric@webkit.org</who> + <bug_when>2009-12-15 15:17:28 PST</bug_when> + <thetext>bug to test webkit-patch and commit-queue failures +Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.</thetext> + </long_desc> + <attachment + isobsolete="0" + ispatch="1" + isprivate="0" + > + <attachid>45548</attachid> + <date>2009-12-27 23:51 PST</date> + <desc>Patch</desc> + <filename>bug-32585-20091228005112.patch</filename> + <type>text/plain</type> + <size>10882</size> + <attacher>mjs@apple.com</attacher> + + <token>1261988248-dc51409e9c421a4358f365fa8bec8357</token> + <data encoding="base64">SW5kZXg6IFdlYktpdC9tYWMvQ2hhbmdlTG9nCj09PT09PT09PT09PT09PT09PT09PT09PT09PT09 +removed-because-it-was-really-long +ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg== +</data> + + <flag name="review" + id="27602" + status="?" + setter="mjs@apple.com" + /> + </attachment> + </bug> +</bugzilla> +""" + _expected_example_bug_parsing = { + "id" : 32585, + "title" : u"bug to test webkit-patch and commit-queue failures", + "cc_emails" : ["foo@bar.com", "example@example.com"], + "reporter_email" : "eric@webkit.org", + "assigned_to_email" : "webkit-unassigned@lists.webkit.org", + "attachments" : [{ + 'name': u'Patch', + 'url' : "https://bugs.webkit.org/attachment.cgi?id=45548", + 'is_obsolete': False, + 'review': '?', + 'is_patch': True, + 'attacher_email': 'mjs@apple.com', + 'bug_id': 32585, + 'type': 'text/plain', + 'id': 45548 + }], + } + + def _assert_dictionaries_equal(self, actual, expected): + # Make sure we aren't parsing more or less than we expect + self.assertEquals(sorted(actual.keys()), sorted(expected.keys())) + + for key, expected_value in expected.items(): + self.assertEquals(actual[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, actual[key], expected_value))) + + def test_bug_parsing(self): + bug = Bugzilla()._parse_bug_page(self._example_bug) + self._assert_dictionaries_equal(bug, self._expected_example_bug_parsing) + + # This could be combined into test_bug_parsing later if desired. def test_attachment_parsing(self): bugzilla = Bugzilla() - soup = BeautifulSoup(self._example_attachment) attachment_element = soup.find("attachment") attachment = bugzilla._parse_attachment_element(attachment_element, self._expected_example_attachment_parsing['bug_id']) self.assertTrue(attachment) - - # Make sure we aren't parsing more or less than we expect - self.assertEquals(attachment.keys(), self._expected_example_attachment_parsing.keys()) - - for key, expected_value in self._expected_example_attachment_parsing.items(): - self.assertEquals(attachment[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, attachment[key], expected_value))) + self._assert_dictionaries_equal(attachment, self._expected_example_attachment_parsing) _sample_attachment_detail_page = """ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" @@ -118,6 +234,15 @@ class BugzillaTest(unittest.TestCase): bugzilla = Bugzilla() self.assertEquals(27314, bugzilla._parse_bug_id_from_attachment_page(self._sample_attachment_detail_page)) + def test_add_cc_to_bug(self): + bugzilla = Bugzilla() + bugzilla.browser = MockBrowser() + bugzilla.authenticate = lambda: None + expected_stderr = "Adding ['adam@example.com'] to the CC list for bug 42\n" + OutputCapture().assert_outputs(self, bugzilla.add_cc_to_bug, [42, ["adam@example.com"]], expected_stderr=expected_stderr) + + +class BugzillaQueriesTest(unittest.TestCase): _sample_request_page = """ <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> @@ -166,8 +291,13 @@ class BugzillaTest(unittest.TestCase): """ def test_request_page_parsing(self): - bugzilla = Bugzilla() - self.assertEquals([40511, 40722, 40723], bugzilla._parse_attachment_ids_request_query(self._sample_request_page)) + queries = BugzillaQueries(None) + self.assertEquals([40511, 40722, 40723], queries._parse_attachment_ids_request_query(self._sample_request_page)) + + def test_load_query(self): + queries = BugzillaQueries(Mock()) + queries._load_query("request.cgi?action=queue&type=review&group=type") + if __name__ == '__main__': unittest.main() diff --git a/WebKitTools/Scripts/modules/buildbot.py b/WebKitTools/Scripts/webkitpy/buildbot.py index 548cad8..38828fd 100644 --- a/WebKitTools/Scripts/modules/buildbot.py +++ b/WebKitTools/Scripts/webkitpy/buildbot.py @@ -1,9 +1,9 @@ # Copyright (c) 2009, Google Inc. All rights reserved. -# +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: -# +# # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above @@ -13,7 +13,7 @@ # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -32,51 +32,71 @@ import re import urllib2 # Import WebKit-specific modules. -from modules.logging import log +from webkitpy.webkit_logging import log -# WebKit includes a built copy of BeautifulSoup in Scripts/modules +# WebKit includes a built copy of BeautifulSoup in Scripts/webkitpy # so this import should always succeed. from .BeautifulSoup import BeautifulSoup + class BuildBot: + default_host = "build.webkit.org" + def __init__(self, host=default_host): self.buildbot_host = host self.buildbot_server_url = "http://%s/" % self.buildbot_host - - # If any of the Leopard build/test bots or the Windows builders are red we should not be landing patches. - # Other builders should be added to this list once they're known to be stable. - self.core_builder_names_regexps = [ 'Leopard', "Windows.*Build" ] - # If WebKit's buildbot has an XMLRPC interface we could use, we could do something more sophisticated here. - # For now we just parse out the basics, enough to support basic questions like "is the tree green?" + # If any Leopard builder/tester, Windows builder or Chromium builder is + # red we should not be landing patches. Other builders should be added + # to this list once they are known to be reliable. + # See https://bugs.webkit.org/show_bug.cgi?id=33296 and related bugs. + self.core_builder_names_regexps = [ + "Leopard", + "Windows.*Build", + "Chromium", + ] + def _parse_builder_status_from_row(self, status_row): + # If WebKit's buildbot has an XMLRPC interface we could use, we could + # do something more sophisticated here. For now we just parse out the + # basics, enough to support basic questions like "is the tree green?" status_cells = status_row.findAll('td') builder = {} name_link = status_cells[0].find('a') builder['name'] = name_link.string - # We could generate the builder_url from the name in a future version of this code. + # We could generate the builder_url from the name in a future version + # of this code. builder['builder_url'] = self.buildbot_server_url + name_link['href'] status_link = status_cells[1].find('a') if not status_link: - # We failed to find a link in the first cell, just give up. - # This can happen if a builder is just-added, the first cell will just be "no build" - builder['is_green'] = False # Other parts of the code depend on is_green being present. + # We failed to find a link in the first cell, just give up. This + # can happen if a builder is just-added, the first cell will just + # be "no build" + # Other parts of the code depend on is_green being present. + builder['is_green'] = False return builder - revision_string = status_link.string # Will be either a revision number or a build number + # Will be either a revision number or a build number + revision_string = status_link.string # If revision_string has non-digits assume it's not a revision number. - builder['built_revision'] = int(revision_string) if not re.match('\D', revision_string) else None - builder['is_green'] = not re.search('fail', status_cells[1].renderContents()) - # We could parse out the build number instead, but for now just store the URL. + builder['built_revision'] = int(revision_string) \ + if not re.match('\D', revision_string) \ + else None + builder['is_green'] = not re.search('fail', + status_cells[1].renderContents()) + # We could parse out the build number instead, but for now just store + # the URL. builder['build_url'] = self.buildbot_server_url + status_link['href'] # We could parse out the current activity too. return builder - def _builder_statuses_with_names_matching_regexps(self, builder_statuses, name_regexps): + def _builder_statuses_with_names_matching_regexps(self, + builder_statuses, + name_regexps): builders = [] for builder in builder_statuses: for name_regexp in name_regexps: @@ -86,7 +106,9 @@ class BuildBot: def red_core_builders(self): red_builders = [] - for builder in self._builder_statuses_with_names_matching_regexps(self.builder_statuses(), self.core_builder_names_regexps): + for builder in self._builder_statuses_with_names_matching_regexps( + self.builder_statuses(), + self.core_builder_names_regexps): if not builder['is_green']: red_builders.append(builder) return red_builders diff --git a/WebKitTools/Scripts/webkitpy/buildbot.pyc b/WebKitTools/Scripts/webkitpy/buildbot.pyc Binary files differnew file mode 100644 index 0000000..49b1e68 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/buildbot.pyc diff --git a/WebKitTools/Scripts/modules/buildbot_unittest.py b/WebKitTools/Scripts/webkitpy/buildbot_unittest.py index a85f2ea..bde3e04 100644 --- a/WebKitTools/Scripts/modules/buildbot_unittest.py +++ b/WebKitTools/Scripts/webkitpy/buildbot_unittest.py @@ -28,9 +28,9 @@ import unittest -from modules.buildbot import BuildBot +from webkitpy.buildbot import BuildBot -from modules.BeautifulSoup import BeautifulSoup +from webkitpy.BeautifulSoup import BeautifulSoup class BuildBotTest(unittest.TestCase): @@ -110,18 +110,39 @@ class BuildBotTest(unittest.TestCase): def test_builder_name_regexps(self): buildbot = BuildBot() + # For complete testing, this list should match the list of builders at build.webkit.org: example_builders = [ - { 'name': u'Leopard Debug (Build)', }, - { 'name': u'Leopard Debug (Tests)', }, + { 'name': u'Tiger Intel Release', }, + { 'name': u'Leopard Intel Release (Build)', }, + { 'name': u'Leopard Intel Release (Tests)', }, + { 'name': u'Leopard Intel Debug (Build)', }, + { 'name': u'Leopard Intel Debug (Tests)', }, + { 'name': u'SnowLeopard Intel Release (Build)', }, + { 'name': u'SnowLeopard Intel Release (Tests)', }, + { 'name': u'SnowLeopard Intel Leaks', }, { 'name': u'Windows Release (Build)', }, + { 'name': u'Windows Release (Tests)', }, + { 'name': u'Windows Debug (Build)', }, { 'name': u'Windows Debug (Tests)', }, { 'name': u'Qt Linux Release', }, + { 'name': u'Gtk Linux Release', }, + { 'name': u'Gtk Linux 32-bit Debug', }, + { 'name': u'Gtk Linux 64-bit Debug', }, + { 'name': u'Chromium Linux Release', }, + { 'name': u'Chromium Mac Release', }, + { 'name': u'Chromium Win Release', }, ] - name_regexps = [ 'Leopard', "Windows.*Build" ] + name_regexps = [ "Leopard", "Windows.*Build", "Chromium" ] expected_builders = [ - { 'name': u'Leopard Debug (Build)', }, - { 'name': u'Leopard Debug (Tests)', }, + { 'name': u'Leopard Intel Release (Build)', }, + { 'name': u'Leopard Intel Release (Tests)', }, + { 'name': u'Leopard Intel Debug (Build)', }, + { 'name': u'Leopard Intel Debug (Tests)', }, { 'name': u'Windows Release (Build)', }, + { 'name': u'Windows Debug (Build)', }, + { 'name': u'Chromium Linux Release', }, + { 'name': u'Chromium Mac Release', }, + { 'name': u'Chromium Win Release', }, ] # This test should probably be updated if the default regexp list changes diff --git a/WebKitTools/Scripts/modules/changelogs.py b/WebKitTools/Scripts/webkitpy/changelogs.py index a407d23..ebc89c4 100644 --- a/WebKitTools/Scripts/modules/changelogs.py +++ b/WebKitTools/Scripts/webkitpy/changelogs.py @@ -1,9 +1,9 @@ # Copyright (C) 2009, Google Inc. All rights reserved. -# +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: -# +# # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above @@ -13,7 +13,7 @@ # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -30,21 +30,27 @@ import fileinput # inplace file editing for set_reviewer_in_changelog import re +import textwrap + -# FIMXE: This doesn't really belong in this file, but we don't have a better home for it yet. -# Maybe eventually a webkit_config.py? def view_source_url(revision_number): + # FIMXE: This doesn't really belong in this file, but we don't have a + # better home for it yet. + # Maybe eventually a webkit_config.py? return "http://trac.webkit.org/changeset/%s" % revision_number class ChangeLog: + def __init__(self, path): self.path = path + _changelog_indent = " " * 8 + # e.g. 2009-06-03 Eric Seidel <eric@webkit.org> date_line_regexp = re.compile('^(\d{4}-\d{2}-\d{2})' # Consume the date. + '\s+(.+)\s+' # Consume the name. - + '<([^<>]+)>$') # And finally the email address. + + '<([^<>]+)>$') # And the email address. @staticmethod def _parse_latest_entry_from_file(changelog_file): @@ -58,7 +64,8 @@ class ChangeLog: for line in changelog_file: # If we've hit the next entry, return. if ChangeLog.date_line_regexp.match(line): - return ''.join(entry_lines[:-1]) # Remove the extra newline at the end + # Remove the extra newline at the end + return ''.join(entry_lines[:-1]) entry_lines.append(line) return None # We never found a date line! @@ -69,18 +76,52 @@ class ChangeLog: finally: changelog_file.close() - def update_for_revert(self, revision): - reviewed_by_regexp = re.compile('Reviewed by NOBODY \(OOPS!\)\.') + # _wrap_line and _wrap_lines exist to work around + # http://bugs.python.org/issue1859 + + def _wrap_line(self, line): + return textwrap.fill(line, + width=70, + initial_indent=self._changelog_indent, + # Don't break urls which may be longer than width. + break_long_words=False, + subsequent_indent=self._changelog_indent) + + # Workaround as suggested by guido in + # http://bugs.python.org/issue1859#msg60040 + + def _wrap_lines(self, message): + lines = [self._wrap_line(line) for line in message.splitlines()] + return "\n".join(lines) + + # This probably does not belong in changelogs.py + def _message_for_revert(self, revision, reason, bug_url): + message = "No review, rolling out r%s.\n" % revision + message += "%s\n" % view_source_url(revision) + if bug_url: + message += "%s\n" % bug_url + # Add an extra new line after the rollout links, before any reason. + message += "\n" + if reason: + message += "%s\n\n" % reason + return self._wrap_lines(message) + + def update_for_revert(self, revision, reason, bug_url=None): + reviewed_by_regexp = re.compile( + "%sReviewed by NOBODY \(OOPS!\)\." % self._changelog_indent) removing_boilerplate = False # inplace=1 creates a backup file and re-directs stdout to the file for line in fileinput.FileInput(self.path, inplace=1): if reviewed_by_regexp.search(line): - print reviewed_by_regexp.sub("No review, rolling out r%s." % revision, line), - print " %s\n" % view_source_url(revision) - # Remove all the ChangeLog boilerplate between the Reviewed by line and the first changed file. + message_lines = self._message_for_revert(revision, + reason, + bug_url) + print reviewed_by_regexp.sub(message_lines, line), + # Remove all the ChangeLog boilerplate between the Reviewed by + # line and the first changed file. removing_boilerplate = True elif removing_boilerplate: - if line.find('*') >= 0 : # each changed file is preceded by a * + if line.find('*') >= 0: # each changed file is preceded by a * removing_boilerplate = False if not removing_boilerplate: @@ -89,4 +130,5 @@ class ChangeLog: def set_reviewer(self, reviewer): # inplace=1 creates a backup file and re-directs stdout to the file for line in fileinput.FileInput(self.path, inplace=1): - print line.replace("NOBODY (OOPS!)", reviewer.encode("utf-8")), # Trailing comma suppresses printing newline + # Trailing comma suppresses printing newline + print line.replace("NOBODY (OOPS!)", reviewer.encode("utf-8")), diff --git a/WebKitTools/Scripts/webkitpy/changelogs.pyc b/WebKitTools/Scripts/webkitpy/changelogs.pyc Binary files differnew file mode 100644 index 0000000..2fca994 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/changelogs.pyc diff --git a/WebKitTools/Scripts/modules/changelogs_unittest.py b/WebKitTools/Scripts/webkitpy/changelogs_unittest.py index dd14cb7..de3e60c 100644 --- a/WebKitTools/Scripts/modules/changelogs_unittest.py +++ b/WebKitTools/Scripts/webkitpy/changelogs_unittest.py @@ -124,22 +124,56 @@ class ChangeLogsTest(unittest.TestCase): os.remove(changelog_path) self.assertEquals(actual_contents, expected_contents) - _expected_revert_entry = '''2009-08-19 Eric Seidel <eric@webkit.org> + _revert_message = """ No review, rolling out r12345. + http://trac.webkit.org/changeset/12345 + http://example.com/123 + + This is a very long reason which should be long enough so that + _message_for_revert will need to wrap it. We'll also include + a + https://veryveryveryveryverylongbugurl.com/reallylongbugthingy.cgi?bug_id=12354 + link so that we can make sure we wrap that right too. +""" + + def test_message_for_revert(self): + changelog = ChangeLog("/fake/path") + long_reason = "This is a very long reason which should be long enough so that _message_for_revert will need to wrap it. We'll also include a https://veryveryveryveryverylongbugurl.com/reallylongbugthingy.cgi?bug_id=12354 link so that we can make sure we wrap that right too." + message = changelog._message_for_revert(12345, long_reason, "http://example.com/123") + self.assertEquals(message, self._revert_message) + + _revert_entry_with_bug_url = '''2009-08-19 Eric Seidel <eric@webkit.org> No review, rolling out r12345. http://trac.webkit.org/changeset/12345 + http://example.com/123 + + Reason * Scripts/bugzilla-tool: ''' - def test_update_for_revert(self): + _revert_entry_without_bug_url = '''2009-08-19 Eric Seidel <eric@webkit.org> + + No review, rolling out r12345. + http://trac.webkit.org/changeset/12345 + + Reason + + * Scripts/bugzilla-tool: +''' + + def _assert_update_for_revert_output(self, args, expected_entry): changelog_contents = "%s\n%s" % (self._new_entry_boilerplate, self._example_changelog) changelog_path = self._write_tmp_file_with_contents(changelog_contents) changelog = ChangeLog(changelog_path) - changelog.update_for_revert(12345) + changelog.update_for_revert(*args) actual_entry = changelog.latest_entry() os.remove(changelog_path) - self.assertEquals(actual_entry, self._expected_revert_entry) + self.assertEquals(actual_entry, expected_entry) + + def test_update_for_revert(self): + self._assert_update_for_revert_output([12345, "Reason"], self._revert_entry_without_bug_url) + self._assert_update_for_revert_output([12345, "Reason", "http://example.com/123"], self._revert_entry_with_bug_url) if __name__ == '__main__': unittest.main() diff --git a/WebKitTools/Scripts/modules/__init__.py b/WebKitTools/Scripts/webkitpy/commands/__init__.py index ef65bee..ef65bee 100644 --- a/WebKitTools/Scripts/modules/__init__.py +++ b/WebKitTools/Scripts/webkitpy/commands/__init__.py diff --git a/WebKitTools/Scripts/webkitpy/commands/__init__.pyc b/WebKitTools/Scripts/webkitpy/commands/__init__.pyc Binary files differnew file mode 100644 index 0000000..ac801ef --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/__init__.pyc diff --git a/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.py b/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.py new file mode 100644 index 0000000..53af5b1 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.py @@ -0,0 +1,43 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.multicommandtool import AbstractDeclarativeCommand +from webkitpy.stepsequence import StepSequence + + +class AbstractSequencedCommand(AbstractDeclarativeCommand): + steps = None + def __init__(self): + self._sequence = StepSequence(self.steps) + AbstractDeclarativeCommand.__init__(self, self._sequence.options()) + + def _prepare_state(self, options, args, tool): + return None + + def execute(self, options, args, tool): + self._sequence.run_and_handle_errors(tool, options, self._prepare_state(options, args, tool)) diff --git a/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.pyc b/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.pyc Binary files differnew file mode 100644 index 0000000..6d3afc4 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.pyc diff --git a/WebKitTools/Scripts/modules/commands/commandtest.py b/WebKitTools/Scripts/webkitpy/commands/commandtest.py index 618a517..a56cb05 100644 --- a/WebKitTools/Scripts/modules/commands/commandtest.py +++ b/WebKitTools/Scripts/webkitpy/commands/commandtest.py @@ -28,15 +28,11 @@ import unittest -from modules.mock import Mock -from modules.mock_bugzillatool import MockBugzillaTool -from modules.outputcapture import OutputCapture +from webkitpy.mock import Mock +from webkitpy.mock_bugzillatool import MockBugzillaTool +from webkitpy.outputcapture import OutputCapture class CommandsTest(unittest.TestCase): def assert_execute_outputs(self, command, args, expected_stdout="", expected_stderr="", options=Mock(), tool=MockBugzillaTool()): - capture = OutputCapture() - capture.capture_output() - command.execute(options, args, tool) - (stdout_string, stderr_string) = capture.restore_output() - self.assertEqual(stdout_string, expected_stdout) - self.assertEqual(expected_stderr, expected_stderr) + command.bind_to_tool(tool) + OutputCapture().assert_outputs(self, command.execute, [options, args, tool], expected_stdout=expected_stdout, expected_stderr=expected_stderr) diff --git a/WebKitTools/Scripts/webkitpy/commands/download.py b/WebKitTools/Scripts/webkitpy/commands/download.py new file mode 100644 index 0000000..49a6862 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/download.py @@ -0,0 +1,284 @@ +# Copyright (c) 2009, Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from optparse import make_option + +import webkitpy.steps as steps + +from webkitpy.bugzilla import parse_bug_id +# We could instead use from modules import buildsteps and then prefix every buildstep with "buildsteps." +from webkitpy.changelogs import ChangeLog +from webkitpy.commands.abstractsequencedcommand import AbstractSequencedCommand +from webkitpy.comments import bug_comment_from_commit_text +from webkitpy.executive import ScriptError +from webkitpy.grammar import pluralize +from webkitpy.webkit_logging import error, log +from webkitpy.multicommandtool import AbstractDeclarativeCommand +from webkitpy.stepsequence import StepSequence + + +class Build(AbstractSequencedCommand): + name = "build" + help_text = "Update working copy and build" + steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.Build, + ] + + +class BuildAndTest(AbstractSequencedCommand): + name = "build-and-test" + help_text = "Update working copy, build, and run the tests" + steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.Build, + steps.RunTests, + ] + + +class Land(AbstractSequencedCommand): + name = "land" + help_text = "Land the current working directory diff and updates the associated bug if any" + argument_names = "[BUGID]" + show_in_main_help = True + steps = [ + steps.EnsureBuildersAreGreen, + steps.UpdateChangeLogsWithReviewer, + steps.EnsureBuildersAreGreen, + steps.Build, + steps.RunTests, + steps.Commit, + steps.CloseBugForLandDiff, + ] + long_help = """land commits the current working copy diff (just as svn or git commit would). +land will build and run the tests before committing. +If a bug id is provided, or one can be found in the ChangeLog land will update the bug after committing.""" + + def _prepare_state(self, options, args, tool): + return { + "bug_id" : (args and args[0]) or parse_bug_id(tool.scm().create_patch()), + } + + +class AbstractPatchProcessingCommand(AbstractDeclarativeCommand): + # Subclasses must implement the methods below. We don't declare them here + # because we want to be able to implement them with mix-ins. + # + # def _fetch_list_of_patches_to_process(self, options, args, tool): + # def _prepare_to_process(self, options, args, tool): + + @staticmethod + def _collect_patches_by_bug(patches): + bugs_to_patches = {} + for patch in patches: + bugs_to_patches[patch.bug_id()] = bugs_to_patches.get(patch.bug_id(), []) + [patch] + return bugs_to_patches + + def execute(self, options, args, tool): + self._prepare_to_process(options, args, tool) + patches = self._fetch_list_of_patches_to_process(options, args, tool) + + # It's nice to print out total statistics. + bugs_to_patches = self._collect_patches_by_bug(patches) + log("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches)))) + + for patch in patches: + self._process_patch(patch, options, args, tool) + + +class AbstractPatchSequencingCommand(AbstractPatchProcessingCommand): + prepare_steps = None + main_steps = None + + def __init__(self): + options = [] + self._prepare_sequence = StepSequence(self.prepare_steps) + self._main_sequence = StepSequence(self.main_steps) + options = sorted(set(self._prepare_sequence.options() + self._main_sequence.options())) + AbstractPatchProcessingCommand.__init__(self, options) + + def _prepare_to_process(self, options, args, tool): + self._prepare_sequence.run_and_handle_errors(tool, options) + + def _process_patch(self, patch, options, args, tool): + state = { "patch" : patch } + self._main_sequence.run_and_handle_errors(tool, options, state) + + +class ProcessAttachmentsMixin(object): + def _fetch_list_of_patches_to_process(self, options, args, tool): + return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args) + + +class ProcessBugsMixin(object): + def _fetch_list_of_patches_to_process(self, options, args, tool): + all_patches = [] + for bug_id in args: + patches = tool.bugs.fetch_bug(bug_id).reviewed_patches() + log("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id)) + all_patches += patches + return all_patches + + +class CheckStyle(AbstractPatchSequencingCommand, ProcessAttachmentsMixin): + name = "check-style" + help_text = "Run check-webkit-style on the specified attachments" + argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]" + main_steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.ApplyPatch, + steps.CheckStyle, + ] + + +class BuildAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin): + name = "build-attachment" + help_text = "Apply and build patches from bugzilla" + argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]" + main_steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.ApplyPatch, + steps.Build, + ] + + +class AbstractPatchApplyingCommand(AbstractPatchSequencingCommand): + prepare_steps = [ + steps.EnsureLocalCommitIfNeeded, + steps.CleanWorkingDirectoryWithLocalCommits, + steps.Update, + ] + main_steps = [ + steps.ApplyPatchWithLocalCommit, + ] + long_help = """Updates the working copy. +Downloads and applies the patches, creating local commits if necessary.""" + + +class ApplyAttachment(AbstractPatchApplyingCommand, ProcessAttachmentsMixin): + name = "apply-attachment" + help_text = "Apply an attachment to the local working directory" + argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]" + show_in_main_help = True + + +class ApplyFromBug(AbstractPatchApplyingCommand, ProcessBugsMixin): + name = "apply-from-bug" + help_text = "Apply reviewed patches from provided bugs to the local working directory" + argument_names = "BUGID [BUGIDS]" + show_in_main_help = True + + +class AbstractPatchLandingCommand(AbstractPatchSequencingCommand): + prepare_steps = [ + steps.EnsureBuildersAreGreen, + ] + main_steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.ApplyPatch, + steps.EnsureBuildersAreGreen, + steps.Build, + steps.RunTests, + steps.Commit, + steps.ClosePatch, + steps.CloseBug, + ] + long_help = """Checks to make sure builders are green. +Updates the working copy. +Applies the patch. +Builds. +Runs the layout tests. +Commits the patch. +Clears the flags on the patch. +Closes the bug if no patches are marked for review.""" + + +class LandAttachment(AbstractPatchLandingCommand, ProcessAttachmentsMixin): + name = "land-attachment" + help_text = "Land patches from bugzilla, optionally building and testing them first" + argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]" + show_in_main_help = True + + +class LandFromBug(AbstractPatchLandingCommand, ProcessBugsMixin): + name = "land-from-bug" + help_text = "Land all patches on the given bugs, optionally building and testing them first" + argument_names = "BUGID [BUGIDS]" + show_in_main_help = True + + +class Rollout(AbstractSequencedCommand): + name = "rollout" + show_in_main_help = True + help_text = "Revert the given revision in the working copy and optionally commit the revert and re-open the original bug" + argument_names = "REVISION REASON" + long_help = """Updates the working copy. +Applies the inverse diff for the provided revision. +Creates an appropriate rollout ChangeLog, including a trac link and bug link. +Opens the generated ChangeLogs in $EDITOR. +Shows the prepared diff for confirmation. +Commits the revert and updates the bug (including re-opening the bug if necessary).""" + steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.RevertRevision, + steps.PrepareChangeLogForRevert, + steps.EditChangeLog, + steps.ConfirmDiff, + steps.CompleteRollout, + ] + + @staticmethod + def _parse_bug_id_from_revision_diff(tool, revision): + original_diff = tool.scm().diff_for_revision(revision) + return parse_bug_id(original_diff) + + def execute(self, options, args, tool): + revision = args[0] + reason = args[1] + bug_id = self._parse_bug_id_from_revision_diff(tool, revision) + if options.complete_rollout: + if bug_id: + log("Will re-open bug %s after rollout." % bug_id) + else: + log("Failed to parse bug number from diff. No bugs will be updated/reopened after the rollout.") + + state = { + "revision" : revision, + "bug_id" : bug_id, + "reason" : reason, + } + self._sequence.run_and_handle_errors(tool, options, state) diff --git a/WebKitTools/Scripts/webkitpy/commands/download.pyc b/WebKitTools/Scripts/webkitpy/commands/download.pyc Binary files differnew file mode 100644 index 0000000..3a0046f --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/download.pyc diff --git a/WebKitTools/Scripts/webkitpy/commands/download_unittest.py b/WebKitTools/Scripts/webkitpy/commands/download_unittest.py new file mode 100644 index 0000000..f60c5b8 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/download_unittest.py @@ -0,0 +1,127 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.commands.commandtest import CommandsTest +from webkitpy.commands.download import * +from webkitpy.mock import Mock + +class DownloadCommandsTest(CommandsTest): + def _default_options(self): + options = Mock() + options.force_clean = False + options.clean = True + options.check_builders = True + options.quiet = False + options.non_interactive = False + options.update = True + options.build = True + options.test = True + options.close_bug = True + options.complete_rollout = False + return options + + def test_build(self): + expected_stderr = "Updating working directory\nBuilding WebKit\n" + self.assert_execute_outputs(Build(), [], options=self._default_options(), expected_stderr=expected_stderr) + + def test_build_and_test(self): + expected_stderr = "Updating working directory\nBuilding WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\n" + self.assert_execute_outputs(BuildAndTest(), [], options=self._default_options(), expected_stderr=expected_stderr) + + def test_apply_attachment(self): + options = self._default_options() + options.update = True + options.local_commit = True + expected_stderr = "Updating working directory\nProcessing 1 patch from 1 bug.\nProcessing patch 197 from bug 42.\n" + self.assert_execute_outputs(ApplyAttachment(), [197], options=options, expected_stderr=expected_stderr) + + def test_apply_patches(self): + options = self._default_options() + options.update = True + options.local_commit = True + expected_stderr = "Updating working directory\n2 reviewed patches found on bug 42.\nProcessing 2 patches from 1 bug.\nProcessing patch 197 from bug 42.\nProcessing patch 128 from bug 42.\n" + self.assert_execute_outputs(ApplyFromBug(), [42], options=options, expected_stderr=expected_stderr) + + def test_land_diff(self): + expected_stderr = "Building WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\nUpdating bug 42\n" + self.assert_execute_outputs(Land(), [42], options=self._default_options(), expected_stderr=expected_stderr) + + def test_check_style(self): + expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 197 from bug 42.\nRunning check-webkit-style\n" + self.assert_execute_outputs(CheckStyle(), [197], options=self._default_options(), expected_stderr=expected_stderr) + + def test_build_attachment(self): + expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 197 from bug 42.\nBuilding WebKit\n" + self.assert_execute_outputs(BuildAttachment(), [197], options=self._default_options(), expected_stderr=expected_stderr) + + def test_land_attachment(self): + # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags. + expected_stderr = """Processing 1 patch from 1 bug. +Updating working directory +Processing patch 197 from bug 42. +Building WebKit +Running Python unit tests +Running Perl unit tests +Running JavaScriptCore tests +Running run-webkit-tests +Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug. +""" + self.assert_execute_outputs(LandAttachment(), [197], options=self._default_options(), expected_stderr=expected_stderr) + + def test_land_patches(self): + # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags. + expected_stderr = """2 reviewed patches found on bug 42. +Processing 2 patches from 1 bug. +Updating working directory +Processing patch 197 from bug 42. +Building WebKit +Running Python unit tests +Running Perl unit tests +Running JavaScriptCore tests +Running run-webkit-tests +Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug. +Updating working directory +Processing patch 128 from bug 42. +Building WebKit +Running Python unit tests +Running Perl unit tests +Running JavaScriptCore tests +Running run-webkit-tests +Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug. +""" + self.assert_execute_outputs(LandFromBug(), [42], options=self._default_options(), expected_stderr=expected_stderr) + + def test_rollout(self): + expected_stderr = "Updating working directory\nRunning prepare-ChangeLog\n\nNOTE: Rollout support is experimental.\nPlease verify the rollout diff and use \"webkit-patch land 12345\" to commit the rollout.\n" + self.assert_execute_outputs(Rollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr) + + def test_complete_rollout(self): + options = self._default_options() + options.complete_rollout = True + expected_stderr = "Will re-open bug 12345 after rollout.\nUpdating working directory\nRunning prepare-ChangeLog\nBuilding WebKit\n" + self.assert_execute_outputs(Rollout(), [852, "Reason"], options=options, expected_stderr=expected_stderr) diff --git a/WebKitTools/Scripts/webkitpy/commands/early_warning_system.py b/WebKitTools/Scripts/webkitpy/commands/early_warning_system.py new file mode 100644 index 0000000..e3e14dd --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/early_warning_system.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# Copyright (c) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from StringIO import StringIO + +from webkitpy.commands.queues import AbstractReviewQueue +from webkitpy.committers import CommitterList +from webkitpy.executive import ScriptError +from webkitpy.webkitport import WebKitPort +from webkitpy.queueengine import QueueEngine + + +class AbstractEarlyWarningSystem(AbstractReviewQueue): + _build_style = "release" + + def __init__(self): + AbstractReviewQueue.__init__(self) + self.port = WebKitPort.port(self.port_name) + + def should_proceed_with_work_item(self, patch): + try: + self.run_webkit_patch([ + "build", + self.port.flag(), + "--build-style=%s" % self._build_style, + "--force-clean", + "--quiet"]) + self._update_status("Building", patch) + except ScriptError, e: + self._update_status("Unable to perform a build") + return False + return True + + def _review_patch(self, patch): + self.run_webkit_patch([ + "build-attachment", + self.port.flag(), + "--build-style=%s" % self._build_style, + "--force-clean", + "--quiet", + "--non-interactive", + "--parent-command=%s" % self.name, + "--no-update", + patch.id()]) + + @classmethod + def handle_script_error(cls, tool, state, script_error): + is_svn_apply = script_error.command_name() == "svn-apply" + status_id = cls._update_status_for_script_error(tool, state, script_error, is_error=is_svn_apply) + if is_svn_apply: + QueueEngine.exit_after_handled_error(script_error) + results_link = tool.status_server.results_url_for_status(status_id) + message = "Attachment %s did not build on %s:\nBuild output: %s" % (state["patch"].id(), cls.port_name, results_link) + tool.bugs.post_comment_to_bug(state["patch"].bug_id(), message, cc=cls.watchers) + exit(1) + + +class GtkEWS(AbstractEarlyWarningSystem): + name = "gtk-ews" + port_name = "gtk" + watchers = AbstractEarlyWarningSystem.watchers + [ + "gns@gnome.org", + "xan.lopez@gmail.com", + ] + + +class QtEWS(AbstractEarlyWarningSystem): + name = "qt-ews" + port_name = "qt" + + +class ChromiumEWS(AbstractEarlyWarningSystem): + name = "chromium-ews" + port_name = "chromium" + watchers = AbstractEarlyWarningSystem.watchers + [ + "dglazkov@chromium.org", + ] + + +# For platforms that we can't run inside a VM (like Mac OS X), we require +# patches to be uploaded by committers, who are generally trustworthy folk. :) +class AbstractCommitterOnlyEWS(AbstractEarlyWarningSystem): + def __init__(self, committers=CommitterList()): + AbstractEarlyWarningSystem.__init__(self) + self._committers = committers + + def process_work_item(self, patch): + if not self._committers.committer_by_email(patch.attacher_email()): + self._did_error(patch, "%s cannot process patches from non-committers :(" % self.name) + return + AbstractEarlyWarningSystem.process_work_item(self, patch) + + +class MacEWS(AbstractCommitterOnlyEWS): + name = "mac-ews" + port_name = "mac" diff --git a/WebKitTools/Scripts/webkitpy/commands/early_warning_system.pyc b/WebKitTools/Scripts/webkitpy/commands/early_warning_system.pyc Binary files differnew file mode 100644 index 0000000..d6e0800 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/early_warning_system.pyc diff --git a/WebKitTools/Scripts/webkitpy/commands/early_warning_system_unittest.py b/WebKitTools/Scripts/webkitpy/commands/early_warning_system_unittest.py new file mode 100644 index 0000000..d516b84 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/early_warning_system_unittest.py @@ -0,0 +1,62 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.commands.early_warning_system import * +from webkitpy.commands.queuestest import QueuesTest +from webkitpy.mock import Mock + +class EarlyWarningSytemTest(QueuesTest): + def test_chromium_ews(self): + expected_stderr = { + "begin_work_queue" : "CAUTION: chromium-ews will discard all local changes in \"%s\"\nRunning WebKit chromium-ews.\n" % os.getcwd(), + "handle_unexpected_error" : "Mock error message\n", + } + self.assert_queue_outputs(ChromiumEWS(), expected_stderr=expected_stderr) + + def test_qt_ews(self): + expected_stderr = { + "begin_work_queue" : "CAUTION: qt-ews will discard all local changes in \"%s\"\nRunning WebKit qt-ews.\n" % os.getcwd(), + "handle_unexpected_error" : "Mock error message\n", + } + self.assert_queue_outputs(QtEWS(), expected_stderr=expected_stderr) + + def test_gtk_ews(self): + expected_stderr = { + "begin_work_queue" : "CAUTION: gtk-ews will discard all local changes in \"%s\"\nRunning WebKit gtk-ews.\n" % os.getcwd(), + "handle_unexpected_error" : "Mock error message\n", + } + self.assert_queue_outputs(GtkEWS(), expected_stderr=expected_stderr) + + def test_mac_ews(self): + expected_stderr = { + "begin_work_queue" : "CAUTION: mac-ews will discard all local changes in \"%s\"\nRunning WebKit mac-ews.\n" % os.getcwd(), + "handle_unexpected_error" : "Mock error message\n", + } + self.assert_queue_outputs(MacEWS(), expected_stderr=expected_stderr) diff --git a/WebKitTools/Scripts/webkitpy/commands/openbugs.py b/WebKitTools/Scripts/webkitpy/commands/openbugs.py new file mode 100644 index 0000000..25bdefc --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/openbugs.py @@ -0,0 +1,63 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import re +import sys + +from webkitpy.multicommandtool import AbstractDeclarativeCommand +from webkitpy.webkit_logging import log + + +class OpenBugs(AbstractDeclarativeCommand): + name = "open-bugs" + help_text = "Finds all bug numbers passed in arguments (or stdin if no args provided) and opens them in a web browser" + + bug_number_regexp = re.compile(r"\b\d{4,6}\b") + + def _open_bugs(self, bug_ids): + for bug_id in bug_ids: + bug_url = self.tool.bugs.bug_url_for_bug_id(bug_id) + self.tool.user.open_url(bug_url) + + # _find_bugs_in_string mostly exists for easy unit testing. + def _find_bugs_in_string(self, string): + return self.bug_number_regexp.findall(string) + + def _find_bugs_in_iterable(self, iterable): + return sum([self._find_bugs_in_string(string) for string in iterable], []) + + def execute(self, options, args, tool): + if args: + bug_ids = self._find_bugs_in_iterable(args) + else: + # This won't open bugs until stdin is closed but could be made to easily. That would just make unit testing slightly harder. + bug_ids = self._find_bugs_in_iterable(sys.stdin) + + log("%s bugs found in input." % len(bug_ids)) + + self._open_bugs(bug_ids) diff --git a/WebKitTools/Scripts/webkitpy/commands/openbugs_unittest.py b/WebKitTools/Scripts/webkitpy/commands/openbugs_unittest.py new file mode 100644 index 0000000..71fefd2 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/openbugs_unittest.py @@ -0,0 +1,50 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.commands.commandtest import CommandsTest +from webkitpy.commands.openbugs import OpenBugs + +class OpenBugsTest(CommandsTest): + + find_bugs_in_string_expectations = [ + ["123", []], + ["1234", ["1234"]], + ["12345", ["12345"]], + ["123456", ["123456"]], + ["1234567", []], + [" 123456 234567", ["123456", "234567"]], + ] + + def test_find_bugs_in_string(self): + openbugs = OpenBugs() + for expectation in self.find_bugs_in_string_expectations: + self.assertEquals(openbugs._find_bugs_in_string(expectation[0]), expectation[1]) + + def test_args_parsing(self): + expected_stderr = "2 bugs found in input.\nMOCK: user.open_url: http://example.com/12345\nMOCK: user.open_url: http://example.com/23456\n" + self.assert_execute_outputs(OpenBugs(), ["12345\n23456"], expected_stderr=expected_stderr) diff --git a/WebKitTools/Scripts/modules/commands/queries.py b/WebKitTools/Scripts/webkitpy/commands/queries.py index 98310e3..3ca4f42 100644 --- a/WebKitTools/Scripts/modules/commands/queries.py +++ b/WebKitTools/Scripts/webkitpy/commands/queries.py @@ -31,103 +31,84 @@ from optparse import make_option -from modules.buildbot import BuildBot -from modules.committers import CommitterList -from modules.logging import log -from modules.multicommandtool import Command +from webkitpy.buildbot import BuildBot +from webkitpy.committers import CommitterList +from webkitpy.webkit_logging import log +from webkitpy.multicommandtool import AbstractDeclarativeCommand -class BugsToCommit(Command): +class BugsToCommit(AbstractDeclarativeCommand): name = "bugs-to-commit" - show_in_main_help = False - def __init__(self): - Command.__init__(self, "List bugs in the commit-queue") + help_text = "List bugs in the commit-queue" def execute(self, options, args, tool): - bug_ids = tool.bugs.fetch_bug_ids_from_commit_queue() + # FIXME: This command is poorly named. It's fetching the commit-queue list here. The name implies it's fetching pending-commit (all r+'d patches). + bug_ids = tool.bugs.queries.fetch_bug_ids_from_commit_queue() for bug_id in bug_ids: print "%s" % bug_id -class PatchesToCommit(Command): - name = "patches-to-commit" - show_in_main_help = False - def __init__(self): - Command.__init__(self, "List patches in the commit-queue") +class PatchesInCommitQueue(AbstractDeclarativeCommand): + name = "patches-in-commit-queue" + help_text = "List patches in the commit-queue" def execute(self, options, args, tool): - patches = tool.bugs.fetch_patches_from_commit_queue() + patches = tool.bugs.queries.fetch_patches_from_commit_queue() log("Patches in commit queue:") for patch in patches: - print "%s" % patch["url"] + print patch.url() -class PatchesToCommitQueue(Command): +class PatchesToCommitQueue(AbstractDeclarativeCommand): name = "patches-to-commit-queue" - show_in_main_help = False + help_text = "Patches which should be added to the commit queue" def __init__(self): options = [ make_option("--bugs", action="store_true", dest="bugs", help="Output bug links instead of patch links"), ] - Command.__init__(self, "Patches which should be added to the commit queue", options=options) + AbstractDeclarativeCommand.__init__(self, options=options) @staticmethod def _needs_commit_queue(patch): - commit_queue_flag = patch.get("commit-queue") - if (commit_queue_flag and commit_queue_flag == '+'): # If it's already cq+, ignore the patch. - log("%s already has cq=%s" % (patch["id"], commit_queue_flag)) + if patch.commit_queue() == "+": # If it's already cq+, ignore the patch. + log("%s already has cq=%s" % (patch.id(), patch.commit_queue())) return False # We only need to worry about patches from contributers who are not yet committers. - committer_record = CommitterList().committer_by_email(patch["attacher_email"]) + committer_record = CommitterList().committer_by_email(patch.attacher_email()) if committer_record: - log("%s committer = %s" % (patch["id"], committer_record)) + log("%s committer = %s" % (patch.id(), committer_record)) return not committer_record def execute(self, options, args, tool): - patches = tool.bugs.fetch_patches_from_pending_commit_list() + patches = tool.bugs.queries.fetch_patches_from_pending_commit_list() patches_needing_cq = filter(self._needs_commit_queue, patches) if options.bugs: - bugs_needing_cq = map(lambda patch: patch['bug_id'], patches_needing_cq) + bugs_needing_cq = map(lambda patch: patch.bug_id(), patches_needing_cq) bugs_needing_cq = sorted(set(bugs_needing_cq)) for bug_id in bugs_needing_cq: print "%s" % tool.bugs.bug_url_for_bug_id(bug_id) else: for patch in patches_needing_cq: - print "%s" % tool.bugs.attachment_url_for_id(patch["id"], action="edit") + print "%s" % tool.bugs.attachment_url_for_id(patch.id(), action="edit") -class PatchesToReview(Command): +class PatchesToReview(AbstractDeclarativeCommand): name = "patches-to-review" - show_in_main_help = False - def __init__(self): - Command.__init__(self, "List patches that are pending review") + help_text = "List patches that are pending review" def execute(self, options, args, tool): - patch_ids = tool.bugs.fetch_attachment_ids_from_review_queue() + patch_ids = tool.bugs.queries.fetch_attachment_ids_from_review_queue() log("Patches pending review:") for patch_id in patch_ids: print patch_id -class ReviewedPatches(Command): - name = "reviewed-patches" - show_in_main_help = False - def __init__(self): - Command.__init__(self, "List r+'d patches on a bug", "BUGID") - - def execute(self, options, args, tool): - bug_id = args[0] - patches_to_land = tool.bugs.fetch_reviewed_patches_from_bug(bug_id) - for patch in patches_to_land: - print "%s" % patch["url"] - - -class TreeStatus(Command): +class TreeStatus(AbstractDeclarativeCommand): name = "tree-status" - show_in_main_help = True - def __init__(self): - Command.__init__(self, "Print the status of the %s buildbots" % BuildBot.default_host) + help_text = "Print the status of the %s buildbots" % BuildBot.default_host + long_help = """Fetches build status from http://build.webkit.org/one_box_per_builder +and displayes the status of each builder.""" def execute(self, options, args, tool): for builder in tool.buildbot.builder_statuses(): diff --git a/WebKitTools/Scripts/webkitpy/commands/queries.pyc b/WebKitTools/Scripts/webkitpy/commands/queries.pyc Binary files differnew file mode 100644 index 0000000..829b6e3 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/queries.pyc diff --git a/WebKitTools/Scripts/modules/commands/queries_unittest.py b/WebKitTools/Scripts/webkitpy/commands/queries_unittest.py index 0d1c82a..b858777 100644 --- a/WebKitTools/Scripts/modules/commands/queries_unittest.py +++ b/WebKitTools/Scripts/webkitpy/commands/queries_unittest.py @@ -26,43 +26,38 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import unittest - -from modules.bugzilla import Bugzilla -from modules.commands.commandtest import CommandsTest -from modules.commands.queries import * -from modules.mock import Mock -from modules.mock_bugzillatool import MockBugzillaTool +from webkitpy.bugzilla import Bugzilla +from webkitpy.commands.commandtest import CommandsTest +from webkitpy.commands.queries import * +from webkitpy.mock import Mock +from webkitpy.mock_bugzillatool import MockBugzillaTool class QueryCommandsTest(CommandsTest): def test_bugs_to_commit(self): - self.assert_execute_outputs(BugsToCommit(), None, "42\n75\n") + expected_stderr = "Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)\n" + self.assert_execute_outputs(BugsToCommit(), None, "42\n77\n", expected_stderr) - def test_patches_to_commit(self): - expected_stdout = "http://example.com/197\nhttp://example.com/128\n" - expected_stderr = "Patches in commit queue:\n" - self.assert_execute_outputs(PatchesToCommit(), None, expected_stdout, expected_stderr) + def test_patches_in_commit_queue(self): + expected_stdout = "http://example.com/197\nhttp://example.com/103\n" + expected_stderr = "Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)\nPatches in commit queue:\n" + self.assert_execute_outputs(PatchesInCommitQueue(), None, expected_stdout, expected_stderr) def test_patches_to_commit_queue(self): - expected_stdout = "http://example.com/197&action=edit\nhttp://example.com/128&action=edit\n" - expected_stderr = "" + expected_stdout = "http://example.com/104&action=edit\n" + expected_stderr = "197 already has cq=+\n128 already has cq=+\n105 committer = \"Eric Seidel\" <eric@webkit.org>\n" options = Mock() options.bugs = False self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options) - expected_stdout = "http://example.com/42\n" + expected_stdout = "http://example.com/77\n" options.bugs = True self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options) def test_patches_to_review(self): - expected_stdout = "197\n128\n" + expected_stdout = "103\n" expected_stderr = "Patches pending review:\n" self.assert_execute_outputs(PatchesToReview(), None, expected_stdout, expected_stderr) - def test_reviewed_patches(self): - expected_stdout = "http://example.com/197\nhttp://example.com/128\n" - self.assert_execute_outputs(ReviewedPatches(), [42], expected_stdout) - def test_tree_status(self): expected_stdout = "ok : Builder1\nok : Builder2\n" self.assert_execute_outputs(TreeStatus(), None, expected_stdout) diff --git a/WebKitTools/Scripts/webkitpy/commands/queues.py b/WebKitTools/Scripts/webkitpy/commands/queues.py new file mode 100644 index 0000000..6ea1c48 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/queues.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python +# Copyright (c) 2009, Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import traceback +import os + +from datetime import datetime +from optparse import make_option +from StringIO import StringIO + +from webkitpy.bugzilla import CommitterValidator +from webkitpy.executive import ScriptError +from webkitpy.grammar import pluralize +from webkitpy.webkit_logging import error, log +from webkitpy.multicommandtool import Command +from webkitpy.patchcollection import PersistentPatchCollection, PersistentPatchCollectionDelegate +from webkitpy.statusserver import StatusServer +from webkitpy.stepsequence import StepSequenceErrorHandler +from webkitpy.queueengine import QueueEngine, QueueEngineDelegate + +class AbstractQueue(Command, QueueEngineDelegate): + watchers = [ + "webkit-bot-watchers@googlegroups.com", + ] + + _pass_status = "Pass" + _fail_status = "Fail" + _error_status = "Error" + + def __init__(self, options=None): # Default values should never be collections (like []) as default values are shared between invocations + options_list = (options or []) + [ + make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue. Dangerous!"), + ] + Command.__init__(self, "Run the %s" % self.name, options=options_list) + + def _cc_watchers(self, bug_id): + try: + self.tool.bugs.add_cc_to_bug(bug_id, self.watchers) + except Exception, e: + traceback.print_exc() + log("Failed to CC watchers.") + + def _update_status(self, message, patch=None, results_file=None): + self.tool.status_server.update_status(self.name, message, patch, results_file) + + def _did_pass(self, patch): + self._update_status(self._pass_status, patch) + + def _did_fail(self, patch): + self._update_status(self._fail_status, patch) + + def _did_error(self, patch, reason): + message = "%s: %s" % (self._error_status, reason) + self._update_status(message, patch) + + def queue_log_path(self): + return "%s.log" % self.name + + def work_item_log_path(self, patch): + return os.path.join("%s-logs" % self.name, "%s.log" % patch.bug_id()) + + def begin_work_queue(self): + log("CAUTION: %s will discard all local changes in \"%s\"" % (self.name, self.tool.scm().checkout_root)) + if self.options.confirm: + response = self.tool.user.prompt("Are you sure? Type \"yes\" to continue: ") + if (response != "yes"): + error("User declined.") + log("Running WebKit %s." % self.name) + + def should_continue_work_queue(self): + return True + + def next_work_item(self): + raise NotImplementedError, "subclasses must implement" + + def should_proceed_with_work_item(self, work_item): + raise NotImplementedError, "subclasses must implement" + + def process_work_item(self, work_item): + raise NotImplementedError, "subclasses must implement" + + def handle_unexpected_error(self, work_item, message): + raise NotImplementedError, "subclasses must implement" + + def run_webkit_patch(self, args): + webkit_patch_args = [self.tool.path()] + # FIXME: This is a hack, we should have a more general way to pass global options. + webkit_patch_args += ["--status-host=%s" % self.tool.status_server.host] + webkit_patch_args += map(str, args) + self.tool.executive.run_and_throw_if_fail(webkit_patch_args) + + def log_progress(self, patch_ids): + log("%s in %s [%s]" % (pluralize("patch", len(patch_ids)), self.name, ", ".join(map(str, patch_ids)))) + + def execute(self, options, args, tool, engine=QueueEngine): + self.options = options + self.tool = tool + return engine(self.name, self).run() + + @classmethod + def _update_status_for_script_error(cls, tool, state, script_error, is_error=False): + message = script_error.message + if is_error: + message = "Error: %s" % message + output = script_error.message_with_output(output_limit=5*1024*1024) # 5MB + return tool.status_server.update_status(cls.name, message, state["patch"], StringIO(output)) + + +class CommitQueue(AbstractQueue, StepSequenceErrorHandler): + name = "commit-queue" + def __init__(self): + AbstractQueue.__init__(self) + + # AbstractQueue methods + + def begin_work_queue(self): + AbstractQueue.begin_work_queue(self) + self.committer_validator = CommitterValidator(self.tool.bugs) + + def _validate_patches_in_commit_queue(self): + # Not using BugzillaQueries.fetch_patches_from_commit_queue() so we can reject patches with invalid committers/reviewers. + bug_ids = self.tool.bugs.queries.fetch_bug_ids_from_commit_queue() + all_patches = sum([self.tool.bugs.fetch_bug(bug_id).commit_queued_patches(include_invalid=True) for bug_id in bug_ids], []) + return self.committer_validator.patches_after_rejecting_invalid_commiters_and_reviewers(all_patches) + + def next_work_item(self): + patches = self._validate_patches_in_commit_queue() + # FIXME: We could sort the patches in a specific order here, was suggested by https://bugs.webkit.org/show_bug.cgi?id=33395 + if not patches: + self._update_status("Empty queue") + return None + # Only bother logging if we have patches in the queue. + self.log_progress([patch.id() for patch in patches]) + return patches[0] + + def _can_build_and_test(self): + try: + self.run_webkit_patch(["build-and-test", "--force-clean", "--non-interactive", "--build-style=both", "--quiet"]) + except ScriptError, e: + self._update_status("Unabled to successfully build and test", None) + return False + return True + + def _builders_are_green(self): + red_builders_names = self.tool.buildbot.red_core_builders_names() + if red_builders_names: + red_builders_names = map(lambda name: "\"%s\"" % name, red_builders_names) # Add quotes around the names. + self._update_status("Builders [%s] are red. See http://build.webkit.org" % ", ".join(red_builders_names), None) + return False + return True + + def should_proceed_with_work_item(self, patch): + if not self._builders_are_green(): + return False + if not self._can_build_and_test(): + return False + if not self._builders_are_green(): + return False + self._update_status("Landing patch", patch) + return True + + def process_work_item(self, patch): + try: + self._cc_watchers(patch.bug_id()) + # We pass --no-update here because we've already validated + # that the current revision actually builds and passes the tests. + # If we update, we risk moving to a revision that doesn't! + self.run_webkit_patch(["land-attachment", "--force-clean", "--non-interactive", "--no-update", "--parent-command=commit-queue", "--build-style=both", "--quiet", patch.id()]) + self._did_pass(patch) + except ScriptError, e: + self._did_fail(patch) + raise e + + def handle_unexpected_error(self, patch, message): + self.committer_validator.reject_patch_from_commit_queue(patch.id(), message) + + # StepSequenceErrorHandler methods + + @staticmethod + def _error_message_for_bug(tool, status_id, script_error): + if not script_error.output: + return script_error.message_with_output() + results_link = tool.status_server.results_url_for_status(status_id) + return "%s\nFull output: %s" % (script_error.message_with_output(), results_link) + + @classmethod + def handle_script_error(cls, tool, state, script_error): + status_id = cls._update_status_for_script_error(tool, state, script_error) + validator = CommitterValidator(tool.bugs) + validator.reject_patch_from_commit_queue(state["patch"].id(), cls._error_message_for_bug(tool, status_id, script_error)) + + +class AbstractReviewQueue(AbstractQueue, PersistentPatchCollectionDelegate, StepSequenceErrorHandler): + def __init__(self, options=None): + AbstractQueue.__init__(self, options) + + def _review_patch(self, patch): + raise NotImplementedError, "subclasses must implement" + + # PersistentPatchCollectionDelegate methods + + def collection_name(self): + return self.name + + def fetch_potential_patch_ids(self): + return self.tool.bugs.queries.fetch_attachment_ids_from_review_queue() + + def status_server(self): + return self.tool.status_server + + def is_terminal_status(self, status): + return status == "Pass" or status == "Fail" or status.startswith("Error:") + + # AbstractQueue methods + + def begin_work_queue(self): + AbstractQueue.begin_work_queue(self) + self._patches = PersistentPatchCollection(self) + + def next_work_item(self): + patch_id = self._patches.next() + if patch_id: + return self.tool.bugs.fetch_attachment(patch_id) + self._update_status("Empty queue") + + def should_proceed_with_work_item(self, patch): + raise NotImplementedError, "subclasses must implement" + + def process_work_item(self, patch): + try: + self._review_patch(patch) + self._did_pass(patch) + except ScriptError, e: + if e.exit_code != QueueEngine.handled_error_code: + self._did_fail(patch) + raise e + + def handle_unexpected_error(self, patch, message): + log(message) + + # StepSequenceErrorHandler methods + + @classmethod + def handle_script_error(cls, tool, state, script_error): + log(script_error.message_with_output()) + + +class StyleQueue(AbstractReviewQueue): + name = "style-queue" + def __init__(self): + AbstractReviewQueue.__init__(self) + + def should_proceed_with_work_item(self, patch): + self._update_status("Checking style", patch) + return True + + def _review_patch(self, patch): + self.run_webkit_patch(["check-style", "--force-clean", "--non-interactive", "--parent-command=style-queue", patch.id()]) + + @classmethod + def handle_script_error(cls, tool, state, script_error): + is_svn_apply = script_error.command_name() == "svn-apply" + status_id = cls._update_status_for_script_error(tool, state, script_error, is_error=is_svn_apply) + if is_svn_apply: + QueueEngine.exit_after_handled_error(script_error) + message = "Attachment %s did not pass %s:\n\n%s\n\nIf any of these errors are false positives, please file a bug against check-webkit-style." % (state["patch"].id(), cls.name, script_error.message_with_output(output_limit=3*1024)) + tool.bugs.post_comment_to_bug(state["patch"].bug_id(), message, cc=cls.watchers) + exit(1) diff --git a/WebKitTools/Scripts/webkitpy/commands/queues.pyc b/WebKitTools/Scripts/webkitpy/commands/queues.pyc Binary files differnew file mode 100644 index 0000000..8d52d05 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/queues.pyc diff --git a/WebKitTools/Scripts/webkitpy/commands/queues_unittest.py b/WebKitTools/Scripts/webkitpy/commands/queues_unittest.py new file mode 100644 index 0000000..87cd645 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/queues_unittest.py @@ -0,0 +1,102 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.commands.commandtest import CommandsTest +from webkitpy.commands.queues import * +from webkitpy.commands.queuestest import QueuesTest +from webkitpy.mock_bugzillatool import MockBugzillaTool +from webkitpy.outputcapture import OutputCapture + + +class TestQueue(AbstractQueue): + name = "test-queue" + + +class TestReviewQueue(AbstractReviewQueue): + name = "test-review-queue" + + +class AbstractQueueTest(CommandsTest): + def _assert_log_progress_output(self, patch_ids, progress_output): + OutputCapture().assert_outputs(self, TestQueue().log_progress, [patch_ids], expected_stderr=progress_output) + + def test_log_progress(self): + self._assert_log_progress_output([1,2,3], "3 patches in test-queue [1, 2, 3]\n") + self._assert_log_progress_output(["1","2","3"], "3 patches in test-queue [1, 2, 3]\n") + self._assert_log_progress_output([1], "1 patch in test-queue [1]\n") + + def _assert_run_webkit_patch(self, run_args): + queue = TestQueue() + tool = MockBugzillaTool() + queue.bind_to_tool(tool) + + queue.run_webkit_patch(run_args) + expected_run_args = ["echo", "--status-host=example.com"] + map(str, run_args) + tool.executive.run_and_throw_if_fail.assert_called_with(expected_run_args) + + def test_run_webkit_patch(self): + self._assert_run_webkit_patch([1]) + self._assert_run_webkit_patch(["one", 2]) + + +class AbstractReviewQueueTest(CommandsTest): + def test_patch_collection_delegate_methods(self): + queue = TestReviewQueue() + tool = MockBugzillaTool() + queue.bind_to_tool(tool) + self.assertEquals(queue.collection_name(), "test-review-queue") + self.assertEquals(queue.fetch_potential_patch_ids(), [103]) + queue.status_server() + self.assertTrue(queue.is_terminal_status("Pass")) + self.assertTrue(queue.is_terminal_status("Fail")) + self.assertTrue(queue.is_terminal_status("Error: Your patch exploded")) + self.assertFalse(queue.is_terminal_status("Foo")) + + +class CommitQueueTest(QueuesTest): + def test_commit_queue(self): + expected_stderr = { + "begin_work_queue" : "CAUTION: commit-queue will discard all local changes in \"%s\"\nRunning WebKit commit-queue.\n" % os.getcwd(), + # FIXME: The commit-queue warns about bad committers twice. This is due to the fact that we access Attachment.reviewer() twice and it logs each time. + "next_work_item" : """Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com) +Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com) +2 patches in commit-queue [197, 106] +""", + } + self.assert_queue_outputs(CommitQueue(), expected_stderr=expected_stderr) + + +class StyleQueueTest(QueuesTest): + def test_style_queue(self): + expected_stderr = { + "begin_work_queue" : "CAUTION: style-queue will discard all local changes in \"%s\"\nRunning WebKit style-queue.\n" % os.getcwd(), + "handle_unexpected_error" : "Mock error message\n", + } + self.assert_queue_outputs(StyleQueue(), expected_stderr=expected_stderr) diff --git a/WebKitTools/Scripts/webkitpy/commands/queuestest.py b/WebKitTools/Scripts/webkitpy/commands/queuestest.py new file mode 100644 index 0000000..09d1c26 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/queuestest.py @@ -0,0 +1,99 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.bugzilla import Attachment +from webkitpy.mock import Mock +from webkitpy.mock_bugzillatool import MockBugzillaTool +from webkitpy.outputcapture import OutputCapture + + +class MockQueueEngine(object): + def __init__(self, name, queue): + pass + + def run(self): + pass + + +class QueuesTest(unittest.TestCase): + mock_work_item = Attachment({ + "id" : 1234, + "bug_id" : 345, + "attacher_email": "adam@example.com", + }, None) + + def assert_queue_outputs(self, queue, args=None, work_item=None, expected_stdout=None, expected_stderr=None, options=Mock(), tool=MockBugzillaTool()): + if not expected_stdout: + expected_stdout = {} + if not expected_stderr: + expected_stderr = {} + if not args: + args = [] + if not work_item: + work_item = self.mock_work_item + tool.user.prompt = lambda message: "yes" + + queue.execute(options, args, tool, engine=MockQueueEngine) + + OutputCapture().assert_outputs(self, + queue.queue_log_path, + expected_stdout=expected_stdout.get("queue_log_path", ""), + expected_stderr=expected_stderr.get("queue_log_path", "")) + OutputCapture().assert_outputs(self, + queue.work_item_log_path, + args=[work_item], + expected_stdout=expected_stdout.get("work_item_log_path", ""), + expected_stderr=expected_stderr.get("work_item_log_path", "")) + OutputCapture().assert_outputs(self, + queue.begin_work_queue, + expected_stdout=expected_stdout.get("begin_work_queue", ""), + expected_stderr=expected_stderr.get("begin_work_queue", "")) + OutputCapture().assert_outputs(self, + queue.should_continue_work_queue, + expected_stdout=expected_stdout.get("should_continue_work_queue", ""), expected_stderr=expected_stderr.get("should_continue_work_queue", "")) + OutputCapture().assert_outputs(self, + queue.next_work_item, + expected_stdout=expected_stdout.get("next_work_item", ""), + expected_stderr=expected_stderr.get("next_work_item", "")) + OutputCapture().assert_outputs(self, + queue.should_proceed_with_work_item, + args=[work_item], + expected_stdout=expected_stdout.get("should_proceed_with_work_item", ""), + expected_stderr=expected_stderr.get("should_proceed_with_work_item", "")) + OutputCapture().assert_outputs(self, + queue.process_work_item, + args=[work_item], + expected_stdout=expected_stdout.get("process_work_item", ""), + expected_stderr=expected_stderr.get("process_work_item", "")) + OutputCapture().assert_outputs(self, + queue.handle_unexpected_error, + args=[work_item, "Mock error message"], + expected_stdout=expected_stdout.get("handle_unexpected_error", ""), + expected_stderr=expected_stderr.get("handle_unexpected_error", "")) diff --git a/WebKitTools/Scripts/webkitpy/commands/upload.py b/WebKitTools/Scripts/webkitpy/commands/upload.py new file mode 100644 index 0000000..8d23d8b --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/upload.py @@ -0,0 +1,406 @@ +#!/usr/bin/env python +# Copyright (c) 2009, Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import re +import StringIO +import sys + +from optparse import make_option + +import webkitpy.steps as steps + +from webkitpy.bugzilla import parse_bug_id +from webkitpy.commands.abstractsequencedcommand import AbstractSequencedCommand +from webkitpy.comments import bug_comment_from_svn_revision +from webkitpy.committers import CommitterList +from webkitpy.grammar import pluralize +from webkitpy.webkit_logging import error, log +from webkitpy.mock import Mock +from webkitpy.multicommandtool import AbstractDeclarativeCommand + +class CommitMessageForCurrentDiff(AbstractDeclarativeCommand): + name = "commit-message" + help_text = "Print a commit message suitable for the uncommitted changes" + + def execute(self, options, args, tool): + os.chdir(tool.scm().checkout_root) + print "%s" % tool.scm().commit_message_for_this_commit().message() + + +class AssignToCommitter(AbstractDeclarativeCommand): + name = "assign-to-committer" + help_text = "Assign bug to whoever attached the most recent r+'d patch" + + def _assign_bug_to_last_patch_attacher(self, bug_id): + committers = CommitterList() + bug = self.tool.bugs.fetch_bug(bug_id) + assigned_to_email = bug.assigned_to_email() + if assigned_to_email != self.tool.bugs.unassigned_email: + log("Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email))) + return + + reviewed_patches = bug.reviewed_patches() + if not reviewed_patches: + log("Bug %s has no non-obsolete patches, ignoring." % bug_id) + return + latest_patch = reviewed_patches[-1] + attacher_email = latest_patch.attacher_email() + committer = committers.committer_by_email(attacher_email) + if not committer: + log("Attacher %s is not a committer. Bug %s likely needs commit-queue+." % (attacher_email, bug_id)) + return + + reassign_message = "Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name) + self.tool.bugs.reassign_bug(bug_id, committer.bugzilla_email(), reassign_message) + + def execute(self, options, args, tool): + for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list(): + self._assign_bug_to_last_patch_attacher(bug_id) + + +class ObsoleteAttachments(AbstractSequencedCommand): + name = "obsolete-attachments" + help_text = "Mark all attachments on a bug as obsolete" + argument_names = "BUGID" + steps = [ + steps.ObsoletePatches, + ] + + def _prepare_state(self, options, args, tool): + return { "bug_id" : args[0] } + + +class AbstractPatchUploadingCommand(AbstractSequencedCommand): + def _bug_id(self, args, tool, state): + # Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs). + bug_id = args and args[0] + if not bug_id: + state["diff"] = tool.scm().create_patch() + bug_id = parse_bug_id(state["diff"]) + return bug_id + + def _prepare_state(self, options, args, tool): + state = {} + state["bug_id"] = self._bug_id(args, tool, state) + if not state["bug_id"]: + error("No bug id passed and no bug url found in diff.") + return state + + +class Post(AbstractPatchUploadingCommand): + name = "post" + help_text = "Attach the current working directory diff to a bug as a patch file" + argument_names = "[BUGID]" + show_in_main_help = True + steps = [ + steps.CheckStyle, + steps.ConfirmDiff, + steps.ObsoletePatches, + steps.PostDiff, + ] + + +class LandSafely(AbstractPatchUploadingCommand): + name = "land-safely" + help_text = "Land the current diff via the commit-queue (Experimental)" + argument_names = "[BUGID]" + steps = [ + steps.UpdateChangeLogsWithReviewer, + steps.ObsoletePatches, + steps.PostDiffForCommit, + ] + + +class Prepare(AbstractSequencedCommand): + name = "prepare" + help_text = "Creates a bug (or prompts for an existing bug) and prepares the ChangeLogs" + argument_names = "[BUGID]" + show_in_main_help = True + steps = [ + steps.PromptForBugOrTitle, + steps.CreateBug, + steps.PrepareChangeLog, + ] + + def _prepare_state(self, options, args, tool): + bug_id = args and args[0] + return { "bug_id" : bug_id } + + +class Upload(AbstractPatchUploadingCommand): + name = "upload" + help_text = "Automates the process of uploading a patch for review" + argument_names = "[BUGID]" + show_in_main_help = True + steps = [ + steps.CheckStyle, + steps.PromptForBugOrTitle, + steps.CreateBug, + steps.PrepareChangeLog, + steps.EditChangeLog, + steps.ConfirmDiff, + steps.ObsoletePatches, + steps.PostDiff, + ] + long_help = """upload uploads the current diff to bugs.webkit.org. + If no bug id is provided, upload will create a bug. + If the current diff does not have a ChangeLog, upload + will prepare a ChangeLog. Once a patch is read, upload + will open the ChangeLogs for editing using the command in the + EDITOR environment variable and will display the diff using the + command in the PAGER environment variable.""" + + def _prepare_state(self, options, args, tool): + state = {} + state["bug_id"] = self._bug_id(args, tool, state) + return state + + +class EditChangeLogs(AbstractSequencedCommand): + name = "edit-changelogs" + help_text = "Opens modified ChangeLogs in $EDITOR" + show_in_main_help = True + steps = [ + steps.EditChangeLog, + ] + + +class PostCommits(AbstractDeclarativeCommand): + name = "post-commits" + help_text = "Attach a range of local commits to bugs as patch files" + argument_names = "COMMITISH" + + def __init__(self): + options = [ + make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."), + make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."), + make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"), + steps.Options.obsolete_patches, + steps.Options.review, + steps.Options.request_commit, + ] + AbstractDeclarativeCommand.__init__(self, options=options, requires_local_commits=True) + + def _comment_text_for_commit(self, options, commit_message, tool, commit_id): + comment_text = None + if (options.add_log_as_comment): + comment_text = commit_message.body(lstrip=True) + comment_text += "---\n" + comment_text += tool.scm().files_changed_summary_for_commit(commit_id) + return comment_text + + def _diff_file_for_commit(self, tool, commit_id): + diff = tool.scm().create_patch_from_local_commit(commit_id) + return StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object + + def execute(self, options, args, tool): + commit_ids = tool.scm().commit_ids_from_commitish_arguments(args) + if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is. + error("webkit-patch does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize("patch", len(commit_ids)))) + + have_obsoleted_patches = set() + for commit_id in commit_ids: + commit_message = tool.scm().commit_message_for_local_commit(commit_id) + + # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs). + bug_id = options.bug_id or parse_bug_id(commit_message.message()) or parse_bug_id(tool.scm().create_patch_from_local_commit(commit_id)) + if not bug_id: + log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id) + continue + + if options.obsolete_patches and bug_id not in have_obsoleted_patches: + state = { "bug_id": bug_id } + steps.ObsoletePatches(tool, options).run(state) + have_obsoleted_patches.add(bug_id) + + diff_file = self._diff_file_for_commit(tool, commit_id) + description = options.description or commit_message.description(lstrip=True, strip_url=True) + comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id) + tool.bugs.add_patch_to_bug(bug_id, diff_file, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) + + +class MarkBugFixed(AbstractDeclarativeCommand): + name = "mark-bug-fixed" + help_text = "Mark the specified bug as fixed" + argument_names = "[SVN_REVISION]" + def __init__(self): + options = [ + make_option("--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."), + make_option("--comment", action="store", type="string", dest="comment", help="Text to include in bug comment."), + make_option("--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only)."), + make_option("--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it."), + ] + AbstractDeclarativeCommand.__init__(self, options=options) + + def _fetch_commit_log(self, tool, svn_revision): + if not svn_revision: + return tool.scm().last_svn_commit_log() + return tool.scm().svn_commit_log(svn_revision) + + def _determine_bug_id_and_svn_revision(self, tool, bug_id, svn_revision): + commit_log = self._fetch_commit_log(tool, svn_revision) + + if not bug_id: + bug_id = parse_bug_id(commit_log) + + if not svn_revision: + match = re.search("^r(?P<svn_revision>\d+) \|", commit_log, re.MULTILINE) + if match: + svn_revision = match.group('svn_revision') + + if not bug_id or not svn_revision: + not_found = [] + if not bug_id: + not_found.append("bug id") + if not svn_revision: + not_found.append("svn revision") + error("Could not find %s on command-line or in %s." + % (" or ".join(not_found), "r%s" % svn_revision if svn_revision else "last commit")) + + return (bug_id, svn_revision) + + def execute(self, options, args, tool): + bug_id = options.bug_id + + svn_revision = args and args[0] + if svn_revision: + if re.match("^r[0-9]+$", svn_revision, re.IGNORECASE): + svn_revision = svn_revision[1:] + if not re.match("^[0-9]+$", svn_revision): + error("Invalid svn revision: '%s'" % svn_revision) + + needs_prompt = False + if not bug_id or not svn_revision: + needs_prompt = True + (bug_id, svn_revision) = self._determine_bug_id_and_svn_revision(tool, bug_id, svn_revision) + + log("Bug: <%s> %s" % (tool.bugs.bug_url_for_bug_id(bug_id), tool.bugs.fetch_bug_dictionary(bug_id)["title"])) + log("Revision: %s" % svn_revision) + + if options.open_bug: + tool.user.open_url(tool.bugs.bug_url_for_bug_id(bug_id)) + + if needs_prompt: + if not tool.user.confirm("Is this correct?"): + exit(1) + + bug_comment = bug_comment_from_svn_revision(svn_revision) + if options.comment: + bug_comment = "%s\n\n%s" % (options.comment, bug_comment) + + if options.update_only: + log("Adding comment to Bug %s." % bug_id) + tool.bugs.post_comment_to_bug(bug_id, bug_comment) + else: + log("Adding comment to Bug %s and marking as Resolved/Fixed." % bug_id) + tool.bugs.close_bug_as_fixed(bug_id, bug_comment) + + +# FIXME: Requires unit test. Blocking issue: too complex for now. +class CreateBug(AbstractDeclarativeCommand): + name = "create-bug" + help_text = "Create a bug from local changes or local commits" + argument_names = "[COMMITISH]" + + def __init__(self): + options = [ + steps.Options.cc, + steps.Options.component, + make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."), + make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."), + make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."), + ] + AbstractDeclarativeCommand.__init__(self, options=options) + + def create_bug_from_commit(self, options, args, tool): + commit_ids = tool.scm().commit_ids_from_commitish_arguments(args) + if len(commit_ids) > 3: + error("Are you sure you want to create one bug with %s patches?" % len(commit_ids)) + + commit_id = commit_ids[0] + + bug_title = "" + comment_text = "" + if options.prompt: + (bug_title, comment_text) = self.prompt_for_bug_title_and_comment() + else: + commit_message = tool.scm().commit_message_for_local_commit(commit_id) + bug_title = commit_message.description(lstrip=True, strip_url=True) + comment_text = commit_message.body(lstrip=True) + comment_text += "---\n" + comment_text += tool.scm().files_changed_summary_for_commit(commit_id) + + diff = tool.scm().create_patch_from_local_commit(commit_id) + diff_file = StringIO.StringIO(diff) # create_bug expects a file-like object + bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff_file, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) + + if bug_id and len(commit_ids) > 1: + options.bug_id = bug_id + options.obsolete_patches = False + # FIXME: We should pass through --no-comment switch as well. + PostCommits.execute(self, options, commit_ids[1:], tool) + + def create_bug_from_patch(self, options, args, tool): + bug_title = "" + comment_text = "" + if options.prompt: + (bug_title, comment_text) = self.prompt_for_bug_title_and_comment() + else: + commit_message = tool.scm().commit_message_for_this_commit() + bug_title = commit_message.description(lstrip=True, strip_url=True) + comment_text = commit_message.body(lstrip=True) + + diff = tool.scm().create_patch() + diff_file = StringIO.StringIO(diff) # create_bug expects a file-like object + bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff_file, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) + + def prompt_for_bug_title_and_comment(self): + bug_title = raw_input("Bug title: ") + print "Bug comment (hit ^D on blank line to end):" + lines = sys.stdin.readlines() + try: + sys.stdin.seek(0, os.SEEK_END) + except IOError: + # Cygwin raises an Illegal Seek (errno 29) exception when the above + # seek() call is made. Ignoring it seems to cause no harm. + # FIXME: Figure out a way to get avoid the exception in the first + # place. + pass + comment_text = "".join(lines) + return (bug_title, comment_text) + + def execute(self, options, args, tool): + if len(args): + if (not tool.scm().supports_local_commits()): + error("Extra arguments not supported; patch is taken from working directory.") + self.create_bug_from_commit(options, args, tool) + else: + self.create_bug_from_patch(options, args, tool) diff --git a/WebKitTools/Scripts/webkitpy/commands/upload.pyc b/WebKitTools/Scripts/webkitpy/commands/upload.pyc Binary files differnew file mode 100644 index 0000000..a4bd81b --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/upload.pyc diff --git a/WebKitTools/Scripts/webkitpy/commands/upload_unittest.py b/WebKitTools/Scripts/webkitpy/commands/upload_unittest.py new file mode 100644 index 0000000..33001ac --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/commands/upload_unittest.py @@ -0,0 +1,84 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.commands.commandtest import CommandsTest +from webkitpy.commands.upload import * +from webkitpy.mock import Mock +from webkitpy.mock_bugzillatool import MockBugzillaTool + +class UploadCommandsTest(CommandsTest): + def test_commit_message_for_current_diff(self): + tool = MockBugzillaTool() + mock_commit_message_for_this_commit = Mock() + mock_commit_message_for_this_commit.message = lambda: "Mock message" + tool._scm.commit_message_for_this_commit = lambda: mock_commit_message_for_this_commit + expected_stdout = "Mock message\n" + self.assert_execute_outputs(CommitMessageForCurrentDiff(), [], expected_stdout=expected_stdout, tool=tool) + + def test_assign_to_committer(self): + tool = MockBugzillaTool() + expected_stderr = "Bug 77 is already assigned to foo@foo.com (None).\nBug 76 has no non-obsolete patches, ignoring.\n" + self.assert_execute_outputs(AssignToCommitter(), [], expected_stderr=expected_stderr, tool=tool) + tool.bugs.reassign_bug.assert_called_with(42, "eric@webkit.org", "Attachment 128 was posted by a committer and has review+, assigning to Eric Seidel for commit.") + + def test_obsolete_attachments(self): + expected_stderr = "Obsoleting 2 old patches on bug 42\n" + self.assert_execute_outputs(ObsoleteAttachments(), [42], expected_stderr=expected_stderr) + + def test_post(self): + expected_stderr = "Running check-webkit-style\nObsoleting 2 old patches on bug 42\n" + self.assert_execute_outputs(Post(), [42], expected_stderr=expected_stderr) + + def test_post(self): + expected_stderr = "Obsoleting 2 old patches on bug 42\n" + self.assert_execute_outputs(LandSafely(), [42], expected_stderr=expected_stderr) + + def test_prepare_diff_with_arg(self): + self.assert_execute_outputs(Prepare(), [42]) + + def test_prepare(self): + self.assert_execute_outputs(Prepare(), []) + + def test_upload(self): + expected_stderr = "Running check-webkit-style\nObsoleting 2 old patches on bug 42\nMOCK: user.open_url: http://example.com/42\n" + self.assert_execute_outputs(Upload(), [42], expected_stderr=expected_stderr) + + def test_mark_bug_fixed(self): + tool = MockBugzillaTool() + tool._scm.last_svn_commit_log = lambda: "r9876 |" + options = Mock() + options.bug_id = 42 + expected_stderr = """Bug: <http://example.com/42> Bug with two r+'d and cq+'d patches, one of which has an invalid commit-queue setter. +Revision: 9876 +MOCK: user.open_url: http://example.com/42 +Adding comment to Bug 42. +""" + self.assert_execute_outputs(MarkBugFixed(), [], expected_stderr=expected_stderr, tool=tool, options=options) + + def test_edit_changelog(self): + self.assert_execute_outputs(EditChangeLogs(), []) diff --git a/WebKitTools/Scripts/modules/comments.py b/WebKitTools/Scripts/webkitpy/comments.py index eeee655..77ad239 100755 --- a/WebKitTools/Scripts/modules/comments.py +++ b/WebKitTools/Scripts/webkitpy/comments.py @@ -4,7 +4,7 @@ # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: -# +# # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above @@ -14,7 +14,7 @@ # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -27,12 +27,16 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -# A tool for automating dealing with bugzilla, posting patches, committing patches, etc. +# A tool for automating dealing with bugzilla, posting patches, committing +# patches, etc. + +from webkitpy.changelogs import view_source_url -from modules.changelogs import view_source_url def bug_comment_from_svn_revision(svn_revision): - return "Committed r%s: <%s>" % (svn_revision, view_source_url(svn_revision)) + return "Committed r%s: <%s>" % (svn_revision, + view_source_url(svn_revision)) + def bug_comment_from_commit_text(scm, commit_text): svn_revision = scm.svn_revision_from_commit_text(commit_text) diff --git a/WebKitTools/Scripts/webkitpy/comments.pyc b/WebKitTools/Scripts/webkitpy/comments.pyc Binary files differnew file mode 100644 index 0000000..ead9e58 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/comments.pyc diff --git a/WebKitTools/Scripts/modules/committers.py b/WebKitTools/Scripts/webkitpy/committers.py index d32a536..73e4172 100644 --- a/WebKitTools/Scripts/modules/committers.py +++ b/WebKitTools/Scripts/webkitpy/committers.py @@ -1,9 +1,9 @@ # Copyright (c) 2009, Google Inc. All rights reserved. -# +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: -# +# # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above @@ -13,7 +13,7 @@ # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -28,7 +28,9 @@ # # WebKit's Python module for committer and reviewer validation + class Committer: + def __init__(self, name, email_or_emails): self.full_name = name if isinstance(email_or_emails, str): @@ -37,34 +39,49 @@ class Committer: self.emails = email_or_emails self.can_review = False + def bugzilla_email(self): + # FIXME: We're assuming the first email is a valid bugzilla email, + # which might not be right. + return self.emails[0] + def __str__(self): return '"%s" <%s>' % (self.full_name, self.emails[0]) + class Reviewer(Committer): + def __init__(self, name, email_or_emails): Committer.__init__(self, name, email_or_emails) self.can_review = True -# This is intended as a canonical, machine-readable list of all non-reviewer committers for WebKit. -# If your name is missing here and you are a committer, please add it. No review needed. -# All reviewers are committers, so this list is only of committers who are not reviewers. + +# This is intended as a canonical, machine-readable list of all non-reviewer +# committers for WebKit. If your name is missing here and you are a committer, +# please add it. No review needed. All reviewers are committers, so this list +# is only of committers who are not reviewers. + + committers_unable_to_review = [ Committer("Aaron Boodman", "aa@chromium.org"), Committer("Adam Langley", "agl@chromium.org"), Committer("Albert J. Wong", "ajwong@chromium.org"), Committer("Alexander Kellett", ["lypanov@mac.com", "a-lists001@lypanov.net", "lypanov@kde.org"]), + Committer("Alexander Pavlov", "apavlov@chromium.org"), Committer("Andre Boule", "aboule@apple.com"), Committer("Andrew Wellington", ["andrew@webkit.org", "proton@wiretapped.net"]), + Committer("Andras Becsi", "abecsi@webkit.org"), Committer("Anthony Ricaud", "rik@webkit.org"), Committer("Anton Muhin", "antonm@chromium.org"), Committer("Antonio Gomes", "tonikitoo@webkit.org"), Committer("Ben Murdoch", "benm@google.com"), Committer("Benjamin C Meyer", ["ben@meyerhome.net", "ben@webkit.org"]), + Committer("Benjamin Otte", ["otte@gnome.org", "otte@webkit.org"]), Committer("Brent Fulgham", "bfulgham@webkit.org"), Committer("Brett Wilson", "brettw@chromium.org"), Committer("Brian Weinstein", "bweinstein@apple.com"), Committer("Cameron McCormack", "cam@webkit.org"), Committer("Chris Fleizach", "cfleizach@apple.com"), + Committer("Chris Jerdonek", "cjerdonek@webkit.org"), Committer("Chris Marrin", "cmarrin@apple.com"), Committer("Chris Petersen", "cpetersen@apple.com"), Committer("Christian Dywan", ["christian@twotoasts.de", "christian@webkit.org"]), @@ -73,6 +90,7 @@ committers_unable_to_review = [ Committer("Daniel Bates", "dbates@webkit.org"), Committer("David Smith", ["catfish.man@gmail.com", "dsmith@webkit.org"]), Committer("Dean Jackson", "dino@apple.com"), + Committer("Dirk Pranke", "dpranke@chromium.org"), Committer("Drew Wilson", "atwilson@chromium.org"), Committer("Dumitru Daniliuc", "dumi@chromium.org"), Committer("Eli Fidler", "eli@staikos.net"), @@ -81,13 +99,14 @@ committers_unable_to_review = [ Committer("Eric Roman", "eroman@chromium.org"), Committer("Feng Qian", "feng@chromium.org"), Committer("Fumitoshi Ukai", "ukai@chromium.org"), + Committer("Gabor Loki", "loki@webkit.org"), Committer("Girish Ramakrishnan", ["girish@forwardbias.in", "ramakrishnan.girish@gmail.com"]), Committer("Graham Dennis", ["Graham.Dennis@gmail.com", "gdennis@webkit.org"]), Committer("Greg Bolsinga", "bolsinga@apple.com"), Committer("Hin-Chung Lam", ["hclam@google.com", "hclam@chromium.org"]), + Committer("Jakob Petsovits", ["jpetsovits@rim.com", "jpetso@gmx.at"]), Committer("Jens Alfke", ["snej@chromium.org", "jens@apple.com"]), Committer("Jeremy Moskovich", ["playmobil@google.com", "jeremy@chromium.org"]), - Committer("Jeremy Orlow", "jorlow@chromium.org"), Committer("Jessie Berlin", ["jberlin@webkit.org", "jberlin@apple.com"]), Committer("Jian Li", "jianli@chromium.org"), Committer("John Abd-El-Malek", "jam@chromium.org"), @@ -98,45 +117,52 @@ committers_unable_to_review = [ Committer("Jungshik Shin", "jshin@chromium.org"), Committer("Keishi Hattori", "keishi@webkit.org"), Committer("Kelly Norton", "knorton@google.com"), + Committer("Kenneth Russell", "kbr@google.com"), Committer("Kent Tamura", "tkent@chromium.org"), Committer("Krzysztof Kowalczyk", "kkowalczyk@gmail.com"), - Committer("Laszlo Gombos", "laszlo.1.gombos@nokia.com"), Committer("Levi Weintraub", "lweintraub@apple.com"), Committer("Mads Ager", "ager@chromium.org"), Committer("Matt Lilek", ["webkit@mattlilek.com", "pewtermoose@webkit.org"]), Committer("Matt Perry", "mpcomplete@chromium.org"), Committer("Maxime Britto", ["maxime.britto@gmail.com", "britto@apple.com"]), Committer("Maxime Simon", ["simon.maxime@gmail.com", "maxime.simon@webkit.org"]), + Committer("Martin Robinson", ["mrobinson@webkit.org", "martin.james.robinson@gmail.com"]), Committer("Michelangelo De Simone", "michelangelo@webkit.org"), - Committer("Mike Belshe", "mike@belshe.com"), + Committer("Mike Belshe", ["mbelshe@chromium.org", "mike@belshe.com"]), Committer("Mike Fenton", ["mike.fenton@torchmobile.com", "mifenton@rim.com"]), Committer("Mike Thole", ["mthole@mikethole.com", "mthole@apple.com"]), Committer("Nate Chapin", "japhet@chromium.org"), Committer("Ojan Vafai", "ojan@chromium.org"), Committer("Pam Greene", "pam@chromium.org"), Committer("Peter Kasting", ["pkasting@google.com", "pkasting@chromium.org"]), + Committer("Philippe Normand", ["pnormand@igalia.com", "philn@webkit.org"]), Committer("Pierre d'Herbemont", ["pdherbemont@free.fr", "pdherbemont@apple.com"]), Committer("Pierre-Olivier Latour", "pol@apple.com"), Committer("Roland Steiner", "rolandsteiner@chromium.org"), Committer("Ryosuke Niwa", "rniwa@webkit.org"), Committer("Scott Violet", "sky@chromium.org"), - Committer("Shinichiro Hamaji", "hamaji@chromium.org"), Committer("Stephen White", "senorblanco@chromium.org"), Committer("Steve Block", "steveblock@google.com"), Committer("Tony Chang", "tony@chromium.org"), Committer("Trey Matteson", "trey@usa.net"), Committer("Tristan O'Tierney", ["tristan@otierney.net", "tristan@apple.com"]), + Committer("Victor Wang", "victorw@chromium.org"), Committer("William Siegrist", "wsiegrist@apple.com"), Committer("Yael Aharon", "yael.aharon@nokia.com"), Committer("Yaar Schnitman", ["yaar@chromium.org", "yaar@google.com"]), Committer("Yong Li", ["yong.li@torchmobile.com", "yong.li.webkit@gmail.com"]), Committer("Yongjun Zhang", "yongjun.zhang@nokia.com"), Committer("Yury Semikhatsky", "yurys@chromium.org"), + Committer("Zoltan Herczeg", "zherczeg@webkit.org"), Committer("Zoltan Horvath", "zoltan@webkit.org"), ] -# This is intended as a canonical, machine-readable list of all reviewers for WebKit. -# If your name is missing here and you are a reviewer, please add it. No review needed. + +# This is intended as a canonical, machine-readable list of all reviewers for +# WebKit. If your name is missing here and you are a reviewer, please add it. +# No review needed. + + reviewers_list = [ Reviewer("Ada Chan", "adachan@apple.com"), Reviewer("Adam Barth", "abarth@webkit.org"), @@ -148,7 +174,7 @@ reviewers_list = [ Reviewer("Alp Toker", ["alp@nuanti.com", "alp@atoker.com", "alp@webkit.org"]), Reviewer("Anders Carlsson", ["andersca@apple.com", "acarlsson@apple.com"]), Reviewer("Antti Koivisto", ["koivisto@iki.fi", "antti@apple.com"]), - Reviewer("Ariya Hidayat", ["ariya.hidayat@trolltech.com", "ariya.hidayat@gmail.com", "ariya@webkit.org"]), + Reviewer("Ariya Hidayat", ["ariya.hidayat@gmail.com", "ariya@webkit.org"]), Reviewer("Beth Dakin", "bdakin@apple.com"), Reviewer("Brady Eidson", "beidson@apple.com"), Reviewer("Cameron Zwarich", ["zwarich@apple.com", "cwzwarich@apple.com", "cwzwarich@webkit.org"]), @@ -172,6 +198,7 @@ reviewers_list = [ Reviewer("Gustavo Noronha Silva", ["gns@gnome.org", "kov@webkit.org"]), Reviewer("Holger Freyther", ["zecke@selfish.org", "zecke@webkit.org"]), Reviewer("Jan Alonzo", ["jmalonzo@gmail.com", "jmalonzo@webkit.org"]), + Reviewer("Jeremy Orlow", "jorlow@chromium.org"), Reviewer("John Sullivan", "sullivan@apple.com"), Reviewer("Jon Honeycutt", "jhoneycutt@apple.com"), Reviewer("Justin Garcia", "justin.garcia@apple.com"), @@ -181,6 +208,7 @@ reviewers_list = [ Reviewer("Kevin McCullough", "kmccullough@apple.com"), Reviewer("Kevin Ollivier", ["kevino@theolliviers.com", "kevino@webkit.org"]), Reviewer("Lars Knoll", ["lars@trolltech.com", "lars@kde.org"]), + Reviewer("Laszlo Gombos", "laszlo.1.gombos@nokia.com"), Reviewer("Maciej Stachowiak", "mjs@apple.com"), Reviewer("Mark Rowe", "mrowe@apple.com"), Reviewer("Nikolas Zimmermann", ["zimmermann@kde.org", "zimmermann@physik.rwth-aachen.de", "zimmermann@webkit.org"]), @@ -189,8 +217,9 @@ reviewers_list = [ Reviewer("Richard Williamson", "rjw@apple.com"), Reviewer("Rob Buis", ["rwlbuis@gmail.com", "rwlbuis@webkit.org"]), Reviewer("Sam Weinig", ["sam@webkit.org", "weinig@apple.com"]), + Reviewer("Shinichiro Hamaji", "hamaji@chromium.org"), Reviewer("Simon Fraser", "simon.fraser@apple.com"), - Reviewer("Simon Hausmann", ["hausmann@webkit.org", "hausmann@kde.org"]), + Reviewer("Simon Hausmann", ["hausmann@webkit.org", "hausmann@kde.org", "simon.hausmann@nokia.com"]), Reviewer("Stephanie Lewis", "slewis@apple.com"), Reviewer("Steve Falkenburg", "sfalken@apple.com"), Reviewer("Tim Omernick", "timo@apple.com"), @@ -203,8 +232,12 @@ reviewers_list = [ class CommitterList: + # Committers and reviewers are passed in to allow easy testing - def __init__(self, committers=committers_unable_to_review, reviewers=reviewers_list): + + def __init__(self, + committers=committers_unable_to_review, + reviewers=reviewers_list): self._committers = committers + reviewers self._reviewers = reviewers self._committers_by_email = {} diff --git a/WebKitTools/Scripts/webkitpy/committers.pyc b/WebKitTools/Scripts/webkitpy/committers.pyc Binary files differnew file mode 100644 index 0000000..bce8c17 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/committers.pyc diff --git a/WebKitTools/Scripts/modules/committers_unittest.py b/WebKitTools/Scripts/webkitpy/committers_unittest.py index cf9f486..f5dc539 100644 --- a/WebKitTools/Scripts/modules/committers_unittest.py +++ b/WebKitTools/Scripts/webkitpy/committers_unittest.py @@ -43,6 +43,9 @@ class CommittersTest(unittest.TestCase): self.assertEqual(committer_list.committer_by_email('two@rad.com'), reviewer) self.assertEqual(committer_list.reviewer_by_email('so_two@gmail.com'), reviewer) + # Test that the first email is assumed to be the Bugzilla email address (for now) + self.assertEqual(committer_list.committer_by_email('two@rad.com').bugzilla_email(), 'two@test.com') + # Test that a known committer is not returned during reviewer lookup self.assertEqual(committer_list.reviewer_by_email('one@test.com'), None) diff --git a/WebKitTools/Scripts/webkitpy/credentials.py b/WebKitTools/Scripts/webkitpy/credentials.py new file mode 100644 index 0000000..a4d8e34 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/credentials.py @@ -0,0 +1,132 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Python module for reading stored web credentials from the OS. + +import getpass +import os +import platform +import re + +from webkitpy.executive import Executive, ScriptError +from webkitpy.webkit_logging import log +from webkitpy.scm import Git + + +class Credentials(object): + + def __init__(self, host, git_prefix=None, executive=None, cwd=os.getcwd()): + self.host = host + self.git_prefix = git_prefix + self.executive = executive or Executive() + self.cwd = cwd + + def _credentials_from_git(self): + return [self._read_git_config("username"), + self._read_git_config("password")] + + def _read_git_config(self, key): + config_key = "%s.%s" % (self.git_prefix, key) if self.git_prefix \ + else key + return self.executive.run_command( + ["git", "config", "--get", config_key], + error_handler=Executive.ignore_error).rstrip('\n') + + def _keychain_value_with_label(self, label, source_text): + match = re.search("%s\"(?P<value>.+)\"" % label, + source_text, + re.MULTILINE) + if match: + return match.group('value') + + def _is_mac_os_x(self): + return platform.mac_ver()[0] + + def _parse_security_tool_output(self, security_output): + username = self._keychain_value_with_label("^\s*\"acct\"<blob>=", + security_output) + password = self._keychain_value_with_label("^password: ", + security_output) + return [username, password] + + def _run_security_tool(self, username=None): + security_command = [ + "/usr/bin/security", + "find-internet-password", + "-g", + "-s", + self.host, + ] + if username: + security_command += ["-a", username] + + log("Reading Keychain for %s account and password. " + "Click \"Allow\" to continue..." % self.host) + try: + return self.executive.run_command(security_command) + except ScriptError: + # Failed to either find a keychain entry or somekind of OS-related + # error occured (for instance, couldn't find the /usr/sbin/security + # command). + log("Could not find a keychain entry for %s." % self.host) + return None + + def _credentials_from_keychain(self, username=None): + if not self._is_mac_os_x(): + return [username, None] + + security_output = self._run_security_tool(username) + if security_output: + return self._parse_security_tool_output(security_output) + else: + return [None, None] + + def read_credentials(self): + username = None + password = None + + try: + if Git.in_working_directory(self.cwd): + (username, password) = self._credentials_from_git() + except OSError, e: + # Catch and ignore OSError exceptions such as "no such file + # or directory" (OSError errno 2), which imply that the Git + # command cannot be found/is not installed. + pass + + if not username or not password: + (username, password) = self._credentials_from_keychain(username) + + if not username: + username = raw_input("%s login: " % self.host) + if not password: + password = getpass.getpass("%s password for %s: " % (self.host, + username)) + + return [username, password] diff --git a/WebKitTools/Scripts/webkitpy/credentials.pyc b/WebKitTools/Scripts/webkitpy/credentials.pyc Binary files differnew file mode 100644 index 0000000..cd42568 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/credentials.pyc diff --git a/WebKitTools/Scripts/webkitpy/credentials_unittest.py b/WebKitTools/Scripts/webkitpy/credentials_unittest.py new file mode 100644 index 0000000..0bd5340 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/credentials_unittest.py @@ -0,0 +1,127 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import tempfile +import unittest +from webkitpy.credentials import Credentials +from webkitpy.executive import Executive +from webkitpy.outputcapture import OutputCapture +from webkitpy.mock import Mock + +class CredentialsTest(unittest.TestCase): + example_security_output = """keychain: "/Users/test/Library/Keychains/login.keychain" +class: "inet" +attributes: + 0x00000007 <blob>="bugs.webkit.org (test@webkit.org)" + 0x00000008 <blob>=<NULL> + "acct"<blob>="test@webkit.org" + "atyp"<blob>="form" + "cdat"<timedate>=0x32303039303832353233353231365A00 "20090825235216Z\000" + "crtr"<uint32>=<NULL> + "cusi"<sint32>=<NULL> + "desc"<blob>="Web form password" + "icmt"<blob>="default" + "invi"<sint32>=<NULL> + "mdat"<timedate>=0x32303039303930393137323635315A00 "20090909172651Z\000" + "nega"<sint32>=<NULL> + "path"<blob>=<NULL> + "port"<uint32>=0x00000000 + "prot"<blob>=<NULL> + "ptcl"<uint32>="htps" + "scrp"<sint32>=<NULL> + "sdmn"<blob>=<NULL> + "srvr"<blob>="bugs.webkit.org" + "type"<uint32>=<NULL> +password: "SECRETSAUCE" +""" + + def test_keychain_lookup_on_non_mac(self): + class FakeCredentials(Credentials): + def _is_mac_os_x(self): + return False + credentials = FakeCredentials("bugs.webkit.org") + self.assertEqual(credentials._is_mac_os_x(), False) + self.assertEqual(credentials._credentials_from_keychain("foo"), ["foo", None]) + + def test_security_output_parse(self): + credentials = Credentials("bugs.webkit.org") + self.assertEqual(credentials._parse_security_tool_output(self.example_security_output), ["test@webkit.org", "SECRETSAUCE"]) + + def test_security_output_parse_entry_not_found(self): + credentials = Credentials("foo.example.com") + if not credentials._is_mac_os_x(): + return # This test does not run on a non-Mac. + + # Note, we ignore the captured output because it is already covered + # by the test case CredentialsTest._assert_security_call (below). + outputCapture = OutputCapture() + outputCapture.capture_output() + self.assertEqual(credentials._run_security_tool(), None) + outputCapture.restore_output() + + def _assert_security_call(self, username=None): + executive_mock = Mock() + credentials = Credentials("example.com", executive=executive_mock) + + expected_stderr = "Reading Keychain for example.com account and password. Click \"Allow\" to continue...\n" + OutputCapture().assert_outputs(self, credentials._run_security_tool, [username], expected_stderr=expected_stderr) + + security_args = ["/usr/bin/security", "find-internet-password", "-g", "-s", "example.com"] + if username: + security_args += ["-a", username] + executive_mock.run_command.assert_called_with(security_args) + + def test_security_calls(self): + self._assert_security_call() + self._assert_security_call(username="foo") + + def test_git_config_calls(self): + executive_mock = Mock() + credentials = Credentials("example.com", executive=executive_mock) + credentials._read_git_config("foo") + executive_mock.run_command.assert_called_with(["git", "config", "--get", "foo"], error_handler=Executive.ignore_error) + + credentials = Credentials("example.com", git_prefix="test_prefix", executive=executive_mock) + credentials._read_git_config("foo") + executive_mock.run_command.assert_called_with(["git", "config", "--get", "test_prefix.foo"], error_handler=Executive.ignore_error) + + def test_read_credentials_without_git_repo(self): + class FakeCredentials(Credentials): + def _is_mac_os_x(self): + return True + def _credentials_from_keychain(self, username): + return ["test@webkit.org", "SECRETSAUCE"] + + temp_dir_path = tempfile.mkdtemp(suffix="not_a_git_repo") + credentials = FakeCredentials("bugs.webkit.org", cwd=temp_dir_path) + self.assertEqual(credentials.read_credentials(), ["test@webkit.org", "SECRETSAUCE"]) + os.rmdir(temp_dir_path) + +if __name__ == '__main__': + unittest.main() diff --git a/WebKitTools/Scripts/modules/diff_parser.py b/WebKitTools/Scripts/webkitpy/diff_parser.py index 91898af..7dce7e8 100644 --- a/WebKitTools/Scripts/modules/diff_parser.py +++ b/WebKitTools/Scripts/webkitpy/diff_parser.py @@ -48,11 +48,11 @@ def git_diff_to_svn_diff(line): Args: line: A string representing a line of the diff. """ - conversion_patterns = (("^diff --git a/(.+) b/(?P<FilePath>.+)", lambda matched: "Index: " + matched.group('FilePath') + "\n"), + conversion_patterns = (("^diff --git \w/(.+) \w/(?P<FilePath>.+)", lambda matched: "Index: " + matched.group('FilePath') + "\n"), ("^new file.*", lambda matched: "\n"), ("^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}", lambda matched: "===================================================================\n"), - ("^--- a/(?P<FilePath>.+)", lambda matched: "--- " + matched.group('FilePath') + "\n"), - ("^\+\+\+ b/(?P<FilePath>.+)", lambda matched: "+++ " + matched.group('FilePath') + "\n")) + ("^--- \w/(?P<FilePath>.+)", lambda matched: "--- " + matched.group('FilePath') + "\n"), + ("^\+\+\+ \w/(?P<FilePath>.+)", lambda matched: "+++ " + matched.group('FilePath') + "\n")) for pattern, conversion in conversion_patterns: matched = match(pattern, line) @@ -69,7 +69,7 @@ def get_diff_converter(first_diff_line): If this line is git formatted, we'll return a converter from git to SVN. """ - if match(r"^diff --git a/", first_diff_line): + if match(r"^diff --git \w/", first_diff_line): return git_diff_to_svn_diff return lambda input: input diff --git a/WebKitTools/Scripts/modules/diff_parser_unittest.py b/WebKitTools/Scripts/webkitpy/diff_parser_unittest.py index 1c806f0..7eb0eab 100644 --- a/WebKitTools/Scripts/modules/diff_parser_unittest.py +++ b/WebKitTools/Scripts/webkitpy/diff_parser_unittest.py @@ -28,6 +28,7 @@ import unittest import diff_parser +import re class DiffParserTest(unittest.TestCase): @@ -82,11 +83,11 @@ index 0000000..6db26bd @@ -0,0 +1 @@ +61a373ee739673a9dcd7bac62b9f182e \ No newline at end of file -'''.splitlines() +''' - - def test_diff_parser(self): - parser = diff_parser.DiffParser(self._PATCH) + def test_diff_parser(self, parser = None): + if not parser: + parser = diff_parser.DiffParser(self._PATCH.splitlines()) self.assertEquals(3, len(parser.files)) self.assertTrue('WebCore/rendering/style/StyleFlexibleBoxData.h' in parser.files) @@ -126,6 +127,20 @@ index 0000000..6db26bd self.assertEquals(1, len(diff.lines)) self.assertEquals((0, 1), diff.lines[0][0:2]) + def test_git_mnemonicprefix(self): + p = re.compile(r' ([a|b])/') + + prefixes = [ + { 'a' : 'i', 'b' : 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree) + { 'a' : 'c', 'b' : 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree) + { 'a' : 'c', 'b' : 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex) + { 'a' : 'o', 'b' : 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity) + { 'a' : '1', 'b' : '2' }, # git diff --no-index a b (compares two non-git things (1) and (2)) + ] + + for prefix in prefixes: + patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], self._PATCH) + self.test_diff_parser(diff_parser.DiffParser(patch.splitlines())) if __name__ == '__main__': unittest.main() diff --git a/WebKitTools/Scripts/modules/executive.py b/WebKitTools/Scripts/webkitpy/executive.py index b73e17d..50b119b 100644 --- a/WebKitTools/Scripts/modules/executive.py +++ b/WebKitTools/Scripts/webkitpy/executive.py @@ -1,10 +1,10 @@ # Copyright (c) 2009, Google Inc. All rights reserved. # Copyright (c) 2009 Apple Inc. All rights reserved. -# +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: -# +# # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above @@ -14,7 +14,7 @@ # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -32,11 +32,17 @@ import StringIO import subprocess import sys -from modules.logging import tee +from webkitpy.webkit_logging import tee class ScriptError(Exception): - def __init__(self, message=None, script_args=None, exit_code=None, output=None, cwd=None): + + def __init__(self, + message=None, + script_args=None, + exit_code=None, + output=None, + cwd=None): if not message: message = 'Failed to run "%s"' % script_args if exit_code: @@ -53,22 +59,33 @@ class ScriptError(Exception): def message_with_output(self, output_limit=500): if self.output: if output_limit and len(self.output) > output_limit: - return "%s\nLast %s characters of output:\n%s" % (self, output_limit, self.output[-output_limit:]) + return "%s\nLast %s characters of output:\n%s" % \ + (self, output_limit, self.output[-output_limit:]) return "%s\n%s" % (self, self.output) return str(self) + def command_name(self): + command_path = self.script_args + if type(command_path) is list: + command_path = command_path[0] + return os.path.basename(command_path) + -# FIXME: This should not be a global static. -# New code should use Executive.run_command directly instead def run_command(*args, **kwargs): + # FIXME: This should not be a global static. + # New code should use Executive.run_command directly instead return Executive().run_command(*args, **kwargs) class Executive(object): + def _run_command_with_teed_output(self, args, teed_output): - child_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + child_process = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) - # Use our own custom wait loop because Popen ignores a tee'd stderr/stdout. + # Use our own custom wait loop because Popen ignores a tee'd + # stderr/stdout. # FIXME: This could be improved not to flatten output to stdout. while True: output_line = child_process.stdout.readline() @@ -90,9 +107,24 @@ class Executive(object): child_out_file.close() if exit_code: - raise ScriptError(script_args=args, exit_code=exit_code, output=child_output) + raise ScriptError(script_args=args, + exit_code=exit_code, + output=child_output) + + @staticmethod + def cpu_count(): + # This API exists only in Python 2.6 and higher. :( + try: + import multiprocessing + return multiprocessing.cpu_count() + except (ImportError, NotImplementedError): + # This quantity is a lie but probably a reasonable guess for modern + # machines. + return 2 + + # Error handlers do not need to be static methods once all callers are + # updated to use an Executive object. - # Error handlers do not need to be static methods once all callers are updated to use an Executive object. @staticmethod def default_error_handler(error): raise error @@ -102,7 +134,14 @@ class Executive(object): pass # FIXME: This should be merged with run_and_throw_if_fail - def run_command(self, args, cwd=None, input=None, error_handler=None, return_exit_code=False, return_stderr=True): + + def run_command(self, + args, + cwd=None, + input=None, + error_handler=None, + return_exit_code=False, + return_stderr=True): if hasattr(input, 'read'): # Check if the input is a file. stdin = input string_to_communicate = None @@ -113,11 +152,19 @@ class Executive(object): stderr = subprocess.STDOUT else: stderr = None - process = subprocess.Popen(args, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr, cwd=cwd) + + process = subprocess.Popen(args, + stdin=stdin, + stdout=subprocess.PIPE, + stderr=stderr, + cwd=cwd) output = process.communicate(string_to_communicate)[0] exit_code = process.wait() if exit_code: - script_error = ScriptError(script_args=args, exit_code=exit_code, output=output, cwd=cwd) + script_error = ScriptError(script_args=args, + exit_code=exit_code, + output=output, + cwd=cwd) (error_handler or self.default_error_handler)(script_error) if return_exit_code: return exit_code diff --git a/WebKitTools/Scripts/webkitpy/executive.pyc b/WebKitTools/Scripts/webkitpy/executive.pyc Binary files differnew file mode 100644 index 0000000..190fabb --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/executive.pyc diff --git a/WebKitTools/Scripts/modules/commands/upload_unittest.py b/WebKitTools/Scripts/webkitpy/executive_unittest.py index 4d3f85c..f78e301 100644 --- a/WebKitTools/Scripts/modules/commands/upload_unittest.py +++ b/WebKitTools/Scripts/webkitpy/executive_unittest.py @@ -1,4 +1,5 @@ # Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Daniel Bates (dbates@intudata.com). All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -27,16 +28,14 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import unittest +from webkitpy.executive import Executive, run_command -from modules.commands.commandtest import CommandsTest -from modules.commands.upload import * +class ExecutiveTest(unittest.TestCase): -class UploadCommandsTest(CommandsTest): - def test_mark_fixed(self): - self.assert_execute_outputs(MarkFixed(), [43, "Test comment"]) + def test_run_command_with_bad_command(self): + def run_bad_command(): + run_command(["foo_bar_command_blah"], error_handler=Executive.ignore_error, return_exit_code=True) + self.failUnlessRaises(OSError, run_bad_command) - def test_obsolete_attachments(self): - self.assert_execute_outputs(ObsoleteAttachments(), [42]) - - def test_post_diff(self): - self.assert_execute_outputs(PostDiff(), [42]) +if __name__ == '__main__': + unittest.main() diff --git a/WebKitTools/Scripts/modules/grammar.py b/WebKitTools/Scripts/webkitpy/grammar.py index dd2967a..78809e0 100644 --- a/WebKitTools/Scripts/modules/grammar.py +++ b/WebKitTools/Scripts/webkitpy/grammar.py @@ -5,7 +5,7 @@ # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: -# +# # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above @@ -15,7 +15,7 @@ # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -30,13 +30,15 @@ import re + def plural(noun): - # This is a dumb plural() implementation which was just enough for our uses. + # This is a dumb plural() implementation that is just enough for our uses. if re.search("h$", noun): return noun + "es" else: return noun + "s" + def pluralize(noun, count): if count != 1: noun = plural(noun) diff --git a/WebKitTools/Scripts/webkitpy/grammar.pyc b/WebKitTools/Scripts/webkitpy/grammar.pyc Binary files differnew file mode 100644 index 0000000..50edeeb --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/grammar.pyc diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/apache_http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/apache_http_server.py new file mode 100644 index 0000000..15f2065 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/apache_http_server.py @@ -0,0 +1,229 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A class to start/stop the apache http server used by layout tests.""" + +import logging +import optparse +import os +import re +import subprocess +import sys + +import http_server_base +import path_utils +import platform_utils + + +class LayoutTestApacheHttpd(http_server_base.HttpServerBase): + + def __init__(self, output_dir): + """Args: + output_dir: the absolute path to the layout test result directory + """ + self._output_dir = output_dir + self._httpd_proc = None + path_utils.maybe_make_directory(output_dir) + + self.mappings = [{'port': 8000}, + {'port': 8080}, + {'port': 8081}, + {'port': 8443, 'sslcert': True}] + + # The upstream .conf file assumed the existence of /tmp/WebKit for + # placing apache files like the lock file there. + self._runtime_path = os.path.join("/tmp", "WebKit") + path_utils.maybe_make_directory(self._runtime_path) + + # The PID returned when Apache is started goes away (due to dropping + # privileges?). The proper controlling PID is written to a file in the + # apache runtime directory. + self._pid_file = os.path.join(self._runtime_path, 'httpd.pid') + + test_dir = path_utils.path_from_base('third_party', 'WebKit', + 'LayoutTests') + js_test_resources_dir = self._cygwin_safe_join(test_dir, "fast", "js", + "resources") + mime_types_path = self._cygwin_safe_join(test_dir, "http", "conf", + "mime.types") + cert_file = self._cygwin_safe_join(test_dir, "http", "conf", + "webkit-httpd.pem") + access_log = self._cygwin_safe_join(output_dir, "access_log.txt") + error_log = self._cygwin_safe_join(output_dir, "error_log.txt") + document_root = self._cygwin_safe_join(test_dir, "http", "tests") + + executable = platform_utils.apache_executable_path() + if self._is_cygwin(): + executable = self._get_cygwin_path(executable) + + cmd = [executable, + '-f', self._get_apache_config_file_path(test_dir, output_dir), + '-C', "\'DocumentRoot %s\'" % document_root, + '-c', "\'Alias /js-test-resources %s\'" % js_test_resources_dir, + '-C', "\'Listen %s\'" % "127.0.0.1:8000", + '-C', "\'Listen %s\'" % "127.0.0.1:8081", + '-c', "\'TypesConfig \"%s\"\'" % mime_types_path, + '-c', "\'CustomLog \"%s\" common\'" % access_log, + '-c', "\'ErrorLog \"%s\"\'" % error_log, + '-C', "\'User \"%s\"\'" % os.environ.get("USERNAME", + os.environ.get("USER", ""))] + + if self._is_cygwin(): + cygbin = path_utils.path_from_base('third_party', 'cygwin', 'bin') + # Not entirely sure why, but from cygwin we need to run the + # httpd command through bash. + self._start_cmd = [ + os.path.join(cygbin, 'bash.exe'), + '-c', + 'PATH=%s %s' % (self._get_cygwin_path(cygbin), " ".join(cmd)), + ] + else: + # TODO(ojan): When we get cygwin using Apache 2, use set the + # cert file for cygwin as well. + cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file]) + # Join the string here so that Cygwin/Windows and Mac/Linux + # can use the same code. Otherwise, we could remove the single + # quotes above and keep cmd as a sequence. + self._start_cmd = " ".join(cmd) + + def _is_cygwin(self): + return sys.platform in ("win32", "cygwin") + + def _cygwin_safe_join(self, *parts): + """Returns a platform appropriate path.""" + path = os.path.join(*parts) + if self._is_cygwin(): + return self._get_cygwin_path(path) + return path + + def _get_cygwin_path(self, path): + """Convert a Windows path to a cygwin path. + + The cygpath utility insists on converting paths that it thinks are + Cygwin root paths to what it thinks the correct roots are. So paths + such as "C:\b\slave\webkit-release\build\third_party\cygwin\bin" + are converted to plain "/usr/bin". To avoid this, we + do the conversion manually. + + The path is expected to be an absolute path, on any drive. + """ + drive_regexp = re.compile(r'([a-z]):[/\\]', re.IGNORECASE) + + def lower_drive(matchobj): + return '/cygdrive/%s/' % matchobj.group(1).lower() + path = drive_regexp.sub(lower_drive, path) + return path.replace('\\', '/') + + def _get_apache_config_file_path(self, test_dir, output_dir): + """Returns the path to the apache config file to use. + Args: + test_dir: absolute path to the LayoutTests directory. + output_dir: absolute path to the layout test results directory. + """ + httpd_config = platform_utils.apache_config_file_path() + httpd_config_copy = os.path.join(output_dir, "httpd.conf") + httpd_conf = open(httpd_config).read() + if self._is_cygwin(): + # This is a gross hack, but it lets us use the upstream .conf file + # and our checked in cygwin. This tells the server the root + # directory to look in for .so modules. It will use this path + # plus the relative paths to the .so files listed in the .conf + # file. We have apache/cygwin checked into our tree so + # people don't have to install it into their cygwin. + cygusr = path_utils.path_from_base('third_party', 'cygwin', 'usr') + httpd_conf = httpd_conf.replace('ServerRoot "/usr"', + 'ServerRoot "%s"' % self._get_cygwin_path(cygusr)) + + # TODO(ojan): Instead of writing an extra file, checkin a conf file + # upstream. Or, even better, upstream/delete all our chrome http + # tests so we don't need this special-cased DocumentRoot and then + # just use the upstream + # conf file. + chrome_document_root = path_utils.path_from_base('webkit', 'data', + 'layout_tests') + if self._is_cygwin(): + chrome_document_root = self._get_cygwin_path(chrome_document_root) + httpd_conf = (httpd_conf + + self._get_virtual_host_config(chrome_document_root, 8081)) + + f = open(httpd_config_copy, 'wb') + f.write(httpd_conf) + f.close() + + if self._is_cygwin(): + return self._get_cygwin_path(httpd_config_copy) + return httpd_config_copy + + def _get_virtual_host_config(self, document_root, port, ssl=False): + """Returns a <VirtualHost> directive block for an httpd.conf file. + It will listen to 127.0.0.1 on each of the given port. + """ + return '\n'.join(('<VirtualHost 127.0.0.1:%s>' % port, + 'DocumentRoot %s' % document_root, + ssl and 'SSLEngine On' or '', + '</VirtualHost>', '')) + + def _start_httpd_process(self): + """Starts the httpd process and returns whether there were errors.""" + # Use shell=True because we join the arguments into a string for + # the sake of Window/Cygwin and it needs quoting that breaks + # shell=False. + self._httpd_proc = subprocess.Popen(self._start_cmd, + stderr=subprocess.PIPE, + shell=True) + err = self._httpd_proc.stderr.read() + if len(err): + logging.debug(err) + return False + return True + + def start(self): + """Starts the apache http server.""" + # Stop any currently running servers. + self.stop() + + logging.debug("Starting apache http server") + server_started = self.wait_for_action(self._start_httpd_process) + if server_started: + logging.debug("Apache started. Testing ports") + server_started = self.wait_for_action( + self.is_server_running_on_all_ports) + + if server_started: + logging.debug("Server successfully started") + else: + raise Exception('Failed to start http server') + + def stop(self): + """Stops the apache http server.""" + logging.debug("Shutting down any running http servers") + httpd_pid = None + if os.path.exists(self._pid_file): + httpd_pid = int(open(self._pid_file).readline()) + path_utils.shut_down_http_server(httpd_pid) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server.py new file mode 100755 index 0000000..dfcb44f --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server.py @@ -0,0 +1,279 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A class to help start/stop the lighttpd server used by layout tests.""" + + +import logging +import optparse +import os +import shutil +import subprocess +import sys +import tempfile +import time +import urllib + +import http_server_base +import path_utils + +class HttpdNotStarted(Exception): pass + +def remove_log_files(folder, starts_with): + files = os.listdir(folder) + for file in files: + if file.startswith(starts_with): + full_path = os.path.join(folder, file) + os.remove(full_path) + + +class Lighttpd(http_server_base.HttpServerBase): + # Webkit tests + try: + _webkit_tests = path_utils.path_from_base('third_party', 'WebKit', + 'LayoutTests', 'http', + 'tests') + _js_test_resource = path_utils.path_from_base('third_party', 'WebKit', + 'LayoutTests', 'fast', + 'js', 'resources') + except path_utils.PathNotFound: + _webkit_tests = None + _js_test_resource = None + + # Path where we can access all of the tests + _all_tests = path_utils.path_from_base('webkit', 'data', 'layout_tests') + # Self generated certificate for SSL server (for client cert get + # <base-path>\chrome\test\data\ssl\certs\root_ca_cert.crt) + _pem_file = path_utils.path_from_base( + os.path.dirname(os.path.abspath(__file__)), 'httpd2.pem') + # One mapping where we can get to everything + VIRTUALCONFIG = [{'port': 8081, 'docroot': _all_tests}] + + if _webkit_tests: + VIRTUALCONFIG.extend( + # Three mappings (one with SSL enabled) for LayoutTests http tests + [{'port': 8000, 'docroot': _webkit_tests}, + {'port': 8080, 'docroot': _webkit_tests}, + {'port': 8443, 'docroot': _webkit_tests, 'sslcert': _pem_file}]) + + def __init__(self, output_dir, background=False, port=None, + root=None, register_cygwin=None, run_background=None): + """Args: + output_dir: the absolute path to the layout test result directory + """ + self._output_dir = output_dir + self._process = None + self._port = port + self._root = root + self._register_cygwin = register_cygwin + self._run_background = run_background + if self._port: + self._port = int(self._port) + + def is_running(self): + return self._process != None + + def start(self): + if self.is_running(): + raise 'Lighttpd already running' + + base_conf_file = path_utils.path_from_base('third_party', + 'WebKitTools', 'Scripts', 'webkitpy', 'layout_tests', + 'layout_package', 'lighttpd.conf') + out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf') + time_str = time.strftime("%d%b%Y-%H%M%S") + access_file_name = "access.log-" + time_str + ".txt" + access_log = os.path.join(self._output_dir, access_file_name) + log_file_name = "error.log-" + time_str + ".txt" + error_log = os.path.join(self._output_dir, log_file_name) + + # Remove old log files. We only need to keep the last ones. + remove_log_files(self._output_dir, "access.log-") + remove_log_files(self._output_dir, "error.log-") + + # Write out the config + f = file(base_conf_file, 'rb') + base_conf = f.read() + f.close() + + f = file(out_conf_file, 'wb') + f.write(base_conf) + + # Write out our cgi handlers. Run perl through env so that it + # processes the #! line and runs perl with the proper command + # line arguments. Emulate apache's mod_asis with a cat cgi handler. + f.write(('cgi.assign = ( ".cgi" => "/usr/bin/env",\n' + ' ".pl" => "/usr/bin/env",\n' + ' ".asis" => "/bin/cat",\n' + ' ".php" => "%s" )\n\n') % + path_utils.lighttpd_php_path()) + + # Setup log files + f.write(('server.errorlog = "%s"\n' + 'accesslog.filename = "%s"\n\n') % (error_log, access_log)) + + # Setup upload folders. Upload folder is to hold temporary upload files + # and also POST data. This is used to support XHR layout tests that + # does POST. + f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir)) + + # Setup a link to where the js test templates are stored + f.write(('alias.url = ( "/js-test-resources" => "%s" )\n\n') % + (self._js_test_resource)) + + # dump out of virtual host config at the bottom. + if self._root: + if self._port: + # Have both port and root dir. + mappings = [{'port': self._port, 'docroot': self._root}] + else: + # Have only a root dir - set the ports as for LayoutTests. + # This is used in ui_tests to run http tests against a browser. + + # default set of ports as for LayoutTests but with a + # specified root. + mappings = [{'port': 8000, 'docroot': self._root}, + {'port': 8080, 'docroot': self._root}, + {'port': 8443, 'docroot': self._root, + 'sslcert': Lighttpd._pem_file}] + else: + mappings = self.VIRTUALCONFIG + for mapping in mappings: + ssl_setup = '' + if 'sslcert' in mapping: + ssl_setup = (' ssl.engine = "enable"\n' + ' ssl.pemfile = "%s"\n' % mapping['sslcert']) + + f.write(('$SERVER["socket"] == "127.0.0.1:%d" {\n' + ' server.document-root = "%s"\n' + + ssl_setup + + '}\n\n') % (mapping['port'], mapping['docroot'])) + f.close() + + executable = path_utils.lighttpd_executable_path() + module_path = path_utils.lighttpd_module_path() + start_cmd = [executable, + # Newly written config file + '-f', path_utils.path_from_base(self._output_dir, + 'lighttpd.conf'), + # Where it can find its module dynamic libraries + '-m', module_path] + + if not self._run_background: + start_cmd.append(# Don't background + '-D') + + # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the + # bug that mod_alias.so loads it from the hard coded path. + if sys.platform == 'darwin': + tmp_module_path = '/tmp/lighttpd/lib' + if not os.path.exists(tmp_module_path): + os.makedirs(tmp_module_path) + lib_file = 'liblightcomp.dylib' + shutil.copyfile(os.path.join(module_path, lib_file), + os.path.join(tmp_module_path, lib_file)) + + # Put the cygwin directory first in the path to find cygwin1.dll + env = os.environ + if sys.platform in ('cygwin', 'win32'): + env['PATH'] = '%s;%s' % ( + path_utils.path_from_base('third_party', 'cygwin', 'bin'), + env['PATH']) + + if sys.platform == 'win32' and self._register_cygwin: + setup_mount = path_utils.path_from_base('third_party', 'cygwin', + 'setup_mount.bat') + subprocess.Popen(setup_mount).wait() + + logging.debug('Starting http server') + self._process = subprocess.Popen(start_cmd, env=env) + + # Wait for server to start. + self.mappings = mappings + server_started = self.wait_for_action( + self.is_server_running_on_all_ports) + + # Our process terminated already + if not server_started or self._process.returncode != None: + raise google.httpd_utils.HttpdNotStarted('Failed to start httpd.') + + logging.debug("Server successfully started") + + # TODO(deanm): Find a nicer way to shutdown cleanly. Our log files are + # probably not being flushed, etc... why doesn't our python have os.kill ? + + def stop(self, force=False): + if not force and not self.is_running(): + return + + httpd_pid = None + if self._process: + httpd_pid = self._process.pid + path_utils.shut_down_http_server(httpd_pid) + + if self._process: + self._process.wait() + self._process = None + +if '__main__' == __name__: + # Provide some command line params for starting/stopping the http server + # manually. Also used in ui_tests to run http layout tests in a browser. + option_parser = optparse.OptionParser() + option_parser.add_option('-k', '--server', + help='Server action (start|stop)') + option_parser.add_option('-p', '--port', + help='Port to listen on (overrides layout test ports)') + option_parser.add_option('-r', '--root', + help='Absolute path to DocumentRoot (overrides layout test roots)') + option_parser.add_option('--register_cygwin', action="store_true", + dest="register_cygwin", help='Register Cygwin paths (on Win try bots)') + option_parser.add_option('--run_background', action="store_true", + dest="run_background", + help='Run on background (for running as UI test)') + options, args = option_parser.parse_args() + + if not options.server: + print ('Usage: %s --server {start|stop} [--root=root_dir]' + ' [--port=port_number]' % sys.argv[0]) + else: + if (options.root is None) and (options.port is not None): + # specifying root but not port means we want httpd on default + # set of ports that LayoutTest use, but pointing to a different + # source of tests. Specifying port but no root does not seem + # meaningful. + raise 'Specifying port requires also a root.' + httpd = Lighttpd(tempfile.gettempdir(), + port=options.port, + root=options.root, + register_cygwin=options.register_cygwin, + run_background=options.run_background) + if 'start' == options.server: + httpd.start() + else: + httpd.stop(force=True) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server_base.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server_base.py new file mode 100644 index 0000000..2720486 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server_base.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Base class with common routines between the Apache and Lighttpd servers.""" + +import logging +import time +import urllib + + +class HttpServerBase(object): + + def wait_for_action(self, action): + """Repeat the action for 20 seconds or until it succeeds. Returns + whether it succeeded.""" + start_time = time.time() + while time.time() - start_time < 20: + if action(): + return True + time.sleep(1) + + return False + + def is_server_running_on_all_ports(self): + """Returns whether the server is running on all the desired ports.""" + for mapping in self.mappings: + if 'sslcert' in mapping: + http_suffix = 's' + else: + http_suffix = '' + + url = 'http%s://127.0.0.1:%d/' % (http_suffix, mapping['port']) + + try: + response = urllib.urlopen(url) + logging.debug("Server running at %s" % url) + except IOError: + logging.debug("Server NOT running at %s" % url) + return False + + return True diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/httpd2.pem b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/httpd2.pem new file mode 100644 index 0000000..6349b78 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/httpd2.pem @@ -0,0 +1,41 @@ +-----BEGIN CERTIFICATE----- +MIIEZDCCAkygAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMRAwDgYDVQQDEwdUZXN0 +IENBMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMN +TW91bnRhaW4gVmlldzESMBAGA1UEChMJQ2VydCBUZXN0MB4XDTA4MDcyODIyMzIy +OFoXDTEzMDcyNzIyMzIyOFowSjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm +b3JuaWExEjAQBgNVBAoTCUNlcnQgVGVzdDESMBAGA1UEAxMJMTI3LjAuMC4xMIGf +MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU3 +3BdRCd67DFM44GRrsjDSH4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYG +qgNiV2ywxTlMj7NlN2C7SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a +3JPJe1TaIab5GwIDAQABo4HCMIG/MAkGA1UdEwQCMAAwHQYDVR0OBBYEFCYLBv5K +x5sLNVlpLh5FwTwhdDl7MIGSBgNVHSMEgYowgYeAFF3Of5nj1BlBMU/Gz7El9Vqv +45cxoWSkYjBgMRAwDgYDVQQDEwdUZXN0IENBMQswCQYDVQQGEwJVUzETMBEGA1UE +CBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzESMBAGA1UEChMJ +Q2VydCBUZXN0ggkA1FGT1D/e2U4wDQYJKoZIhvcNAQEFBQADggIBAEtkVmLObUgk +b2cIA2S+QDtifq1UgVfBbytvR2lFmnADOR55mo0gHQG3HHqq4g034LmoVXDHhUk8 +Gb6aFiv4QubmVhLXcUelTRXwiNvGzkW7pC6Jrq105hdPjzXMKTcmiLaopm5Fqfc7 +hj5Cn1Sjspc8pdeQjrbeMdvca7KlFrGP8YkwCU2xOOX9PiN9G0966BWfjnr/fZZp ++OQVuUFHdiAZwthEMuDpAAXHqYXIsermgdOpgJaA53cf8NqBV2QGhtFgtsJCRoiu +7DKqhyRWBGyz19VIH2b7y+6qvQVxuHk19kKRM0nftw/yNcJnm7gtttespMUPsOMa +a2SD1G0hm0TND6vxaBhgR3cVqpl/qIpAdFi00Tm7hTyYE7I43zPW03t+/DpCt3Um +EMRZsQ90co5q+bcx/vQ7YAtwUh30uMb0wpibeyCwDp8cqNmSiRkEuc/FjTYes5t8 +5gR//WX1l0+qjrjusO9NmoLnq2Yk6UcioX+z+q6Z/dudGfqhLfeWD2Q0LWYA242C +d7km5Y3KAt1PJdVsof/aiVhVdddY/OIEKTRQhWEdDbosy2eh16BCKXT2FFvhNDg1 +AYFvn6I8nj9IldMJiIc3DdhacEAEzRMeRgPdzAa1griKUGknxsyTyRii8ru0WS6w +DCNrlDOVXdzYGEZooBI76BDVY0W0akjV +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU33BdRCd67DFM44GRrsjDS +H4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYGqgNiV2ywxTlMj7NlN2C7 +SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a3JPJe1TaIab5GwIDAQAB +AoGANHXu8z2YIzlhE+bwhGm8MGBpKL3qhRuKjeriqMA36tWezOw8lY4ymEAU+Ulv +BsCdaxqydQoTYou57m4TyUHEcxq9pq3H0zB0qL709DdHi/t4zbV9XIoAzC5v0/hG +9+Ca29TwC02FCw+qLkNrtwCpwOcQmc+bPxqvFu1iMiahURECQQD2I/Hi2413CMZz +TBjl8fMiVO9GhA2J0sc8Qi+YcgJakaLD9xcbaiLkTzPZDlA389C1b6Ia+poAr4YA +Ve0FFbxpAkEA2OobayyHE/QtPEqoy6NLR57jirmVBNmSWWd4lAyL5UIHIYVttJZg +8CLvbzaU/iDGwR+wKsM664rKPHEmtlyo4wJBAMeSqYO5ZOCJGu9NWjrHjM3fdAsG +8zs2zhiLya+fcU0iHIksBW5TBmt71Jw/wMc9R5J1K0kYvFml98653O5si1ECQBCk +RV4/mE1rmlzZzYFyEcB47DQkcM5ictvxGEsje0gnfKyRtAz6zI0f4QbDRUMJ+LWw +XK+rMsYHa+SfOb0b9skCQQCLdeonsIpFDv/Uv+flHISy0WA+AFkLXrRkBKh6G/OD +dMHaNevkJgUnpceVEnkrdenp5CcEoFTI17pd+nBgDm/B +-----END RSA PRIVATE KEY----- diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py new file mode 100644 index 0000000..b7b26e9 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import os + +from layout_package import json_results_generator +from layout_package import path_utils +from layout_package import test_expectations +from layout_package import test_failures + + +class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator): + """A JSON results generator for layout tests.""" + + LAYOUT_TESTS_PATH = "LayoutTests" + + # Additional JSON fields. + WONTFIX = "wontfixCounts" + DEFERRED = "deferredCounts" + + def __init__(self, builder_name, build_name, build_number, + results_file_base_path, builder_base_url, + test_timings, expectations, result_summary, all_tests): + """Modifies the results.json file. Grabs it off the archive directory + if it is not found locally. + + Args: + result_summary: ResultsSummary object storing the summary of the test + results. + (see the comment of JSONResultsGenerator.__init__ for other Args) + """ + + self._builder_name = builder_name + self._build_name = build_name + self._build_number = build_number + self._builder_base_url = builder_base_url + self._results_file_path = os.path.join(results_file_base_path, + self.RESULTS_FILENAME) + self._expectations = expectations + + # We don't use self._skipped_tests and self._passed_tests as we + # override _InsertFailureSummaries. + + # We want relative paths to LayoutTest root for JSON output. + path_to_name = self._get_path_relative_to_layout_test_root + self._result_summary = result_summary + self._failures = dict( + (path_to_name(test), test_failures.determine_result_type(failures)) + for (test, failures) in result_summary.failures.iteritems()) + self._all_tests = [path_to_name(test) for test in all_tests] + self._test_timings = dict( + (path_to_name(test_tuple.filename), test_tuple.test_run_time) + for test_tuple in test_timings) + + self._generate_json_output() + + def _get_path_relative_to_layout_test_root(self, test): + """Returns the path of the test relative to the layout test root. + For example, for: + src/third_party/WebKit/LayoutTests/fast/forms/foo.html + We would return + fast/forms/foo.html + """ + index = test.find(self.LAYOUT_TESTS_PATH) + if index is not -1: + index += len(self.LAYOUT_TESTS_PATH) + + if index is -1: + # Already a relative path. + relativePath = test + else: + relativePath = test[index + 1:] + + # Make sure all paths are unix-style. + return relativePath.replace('\\', '/') + + # override + def _convert_json_to_current_version(self, results_json): + archive_version = None + if self.VERSION_KEY in results_json: + archive_version = results_json[self.VERSION_KEY] + + super(JSONLayoutResultsGenerator, + self)._convert_json_to_current_version(results_json) + + # version 2->3 + if archive_version == 2: + for results_for_builder in results_json.itervalues(): + try: + test_results = results_for_builder[self.TESTS] + except: + continue + + for test in test_results: + # Make sure all paths are relative + test_path = self._get_path_relative_to_layout_test_root(test) + if test_path != test: + test_results[test_path] = test_results[test] + del test_results[test] + + # override + def _insert_failure_summaries(self, results_for_builder): + summary = self._result_summary + + self._insert_item_into_raw_list(results_for_builder, + len((set(summary.failures.keys()) | + summary.tests_by_expectation[test_expectations.SKIP]) & + summary.tests_by_timeline[test_expectations.NOW]), + self.FIXABLE_COUNT) + self._insert_item_into_raw_list(results_for_builder, + self._get_failure_summary_entry(test_expectations.NOW), + self.FIXABLE) + self._insert_item_into_raw_list(results_for_builder, + len(self._expectations.get_tests_with_timeline( + test_expectations.NOW)), self.ALL_FIXABLE_COUNT) + self._insert_item_into_raw_list(results_for_builder, + self._get_failure_summary_entry(test_expectations.DEFER), + self.DEFERRED) + self._insert_item_into_raw_list(results_for_builder, + self._get_failure_summary_entry(test_expectations.WONTFIX), + self.WONTFIX) + + # override + def _normalize_results_json(self, test, test_name, tests): + super(JSONLayoutResultsGenerator, self)._normalize_results_json( + test, test_name, tests) + + # Remove tests that don't exist anymore. + full_path = os.path.join(path_utils.layout_tests_dir(), test_name) + full_path = os.path.normpath(full_path) + if not os.path.exists(full_path): + del tests[test_name] + + def _get_failure_summary_entry(self, timeline): + """Creates a summary object to insert into the JSON. + + Args: + summary ResultSummary object with test results + timeline current test_expectations timeline to build entry for + (e.g., test_expectations.NOW, etc.) + """ + entry = {} + summary = self._result_summary + timeline_tests = summary.tests_by_timeline[timeline] + entry[self.SKIP_RESULT] = len( + summary.tests_by_expectation[test_expectations.SKIP] & + timeline_tests) + entry[self.PASS_RESULT] = len( + summary.tests_by_expectation[test_expectations.PASS] & + timeline_tests) + for failure_type in summary.tests_by_expectation.keys(): + if failure_type not in self.FAILURE_TO_CHAR: + continue + count = len(summary.tests_by_expectation[failure_type] & + timeline_tests) + entry[self.FAILURE_TO_CHAR[failure_type]] = count + return entry diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py new file mode 100644 index 0000000..596e1e4 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py @@ -0,0 +1,418 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import os +import subprocess +import sys +import time +import urllib2 +import xml.dom.minidom + +from layout_package import path_utils +from layout_package import test_expectations + +sys.path.append(path_utils.path_from_base('third_party', 'WebKit', + 'WebKitTools')) +import simplejson + + +class JSONResultsGenerator(object): + + MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750 + # Min time (seconds) that will be added to the JSON. + MIN_TIME = 1 + JSON_PREFIX = "ADD_RESULTS(" + JSON_SUFFIX = ");" + PASS_RESULT = "P" + SKIP_RESULT = "X" + NO_DATA_RESULT = "N" + VERSION = 3 + VERSION_KEY = "version" + RESULTS = "results" + TIMES = "times" + BUILD_NUMBERS = "buildNumbers" + WEBKIT_SVN = "webkitRevision" + CHROME_SVN = "chromeRevision" + TIME = "secondsSinceEpoch" + TESTS = "tests" + + FIXABLE_COUNT = "fixableCount" + FIXABLE = "fixableCounts" + ALL_FIXABLE_COUNT = "allFixableCount" + + # Note that we omit test_expectations.FAIL from this list because + # it should never show up (it's a legacy input expectation, never + # an output expectation). + FAILURE_TO_CHAR = {test_expectations.CRASH: "C", + test_expectations.TIMEOUT: "T", + test_expectations.IMAGE: "I", + test_expectations.TEXT: "F", + test_expectations.MISSING: "O", + test_expectations.IMAGE_PLUS_TEXT: "Z"} + FAILURE_CHARS = FAILURE_TO_CHAR.values() + + RESULTS_FILENAME = "results.json" + + def __init__(self, builder_name, build_name, build_number, + results_file_base_path, builder_base_url, + test_timings, failures, passed_tests, skipped_tests, all_tests): + """Modifies the results.json file. Grabs it off the archive directory + if it is not found locally. + + Args + builder_name: the builder name (e.g. Webkit). + build_name: the build name (e.g. webkit-rel). + build_number: the build number. + results_file_base_path: Absolute path to the directory containing the + results json file. + builder_base_url: the URL where we have the archived test results. + test_timings: Map of test name to a test_run-time. + failures: Map of test name to a failure type (of test_expectations). + passed_tests: A set containing all the passed tests. + skipped_tests: A set containing all the skipped tests. + all_tests: List of all the tests that were run. This should not + include skipped tests. + """ + self._builder_name = builder_name + self._build_name = build_name + self._build_number = build_number + self._builder_base_url = builder_base_url + self._results_file_path = os.path.join(results_file_base_path, + self.RESULTS_FILENAME) + self._test_timings = test_timings + self._failures = failures + self._passed_tests = passed_tests + self._skipped_tests = skipped_tests + self._all_tests = all_tests + + self._generate_json_output() + + def _generate_json_output(self): + """Generates the JSON output file.""" + json = self._get_json() + if json: + results_file = open(self._results_file_path, "w") + results_file.write(json) + results_file.close() + + def _get_svn_revision(self, in_directory=None): + """Returns the svn revision for the given directory. + + Args: + in_directory: The directory where svn is to be run. + """ + output = subprocess.Popen(["svn", "info", "--xml"], + cwd=in_directory, + shell=(sys.platform == 'win32'), + stdout=subprocess.PIPE).communicate()[0] + try: + dom = xml.dom.minidom.parseString(output) + return dom.getElementsByTagName('entry')[0].getAttribute( + 'revision') + except xml.parsers.expat.ExpatError: + return "" + + def _get_archived_json_results(self): + """Reads old results JSON file if it exists. + Returns (archived_results, error) tuple where error is None if results + were successfully read. + """ + results_json = {} + old_results = None + error = None + + if os.path.exists(self._results_file_path): + old_results_file = open(self._results_file_path, "r") + old_results = old_results_file.read() + elif self._builder_base_url: + # Check if we have the archived JSON file on the buildbot server. + results_file_url = (self._builder_base_url + + self._build_name + "/" + self.RESULTS_FILENAME) + logging.error("Local results.json file does not exist. Grabbing " + "it off the archive at " + results_file_url) + + try: + results_file = urllib2.urlopen(results_file_url) + info = results_file.info() + old_results = results_file.read() + except urllib2.HTTPError, http_error: + # A non-4xx status code means the bot is hosed for some reason + # and we can't grab the results.json file off of it. + if (http_error.code < 400 and http_error.code >= 500): + error = http_error + except urllib2.URLError, url_error: + error = url_error + + if old_results: + # Strip the prefix and suffix so we can get the actual JSON object. + old_results = old_results[len(self.JSON_PREFIX): + len(old_results) - len(self.JSON_SUFFIX)] + + try: + results_json = simplejson.loads(old_results) + except: + logging.debug("results.json was not valid JSON. Clobbering.") + # The JSON file is not valid JSON. Just clobber the results. + results_json = {} + else: + logging.debug('Old JSON results do not exist. Starting fresh.') + results_json = {} + + return results_json, error + + def _get_json(self): + """Gets the results for the results.json file.""" + results_json, error = self._get_archived_json_results() + if error: + # If there was an error don't write a results.json + # file at all as it would lose all the information on the bot. + logging.error("Archive directory is inaccessible. Not modifying " + "or clobbering the results.json file: " + str(error)) + return None + + builder_name = self._builder_name + if results_json and builder_name not in results_json: + logging.debug("Builder name (%s) is not in the results.json file." + % builder_name) + + self._convert_json_to_current_version(results_json) + + if builder_name not in results_json: + results_json[builder_name] = ( + self._create_results_for_builder_json()) + + results_for_builder = results_json[builder_name] + + self._insert_generic_metadata(results_for_builder) + + self._insert_failure_summaries(results_for_builder) + + # Update the all failing tests with result type and time. + tests = results_for_builder[self.TESTS] + all_failing_tests = set(self._failures.iterkeys()) + all_failing_tests.update(tests.iterkeys()) + for test in all_failing_tests: + self._insert_test_time_and_result(test, tests) + + # Specify separators in order to get compact encoding. + results_str = simplejson.dumps(results_json, separators=(',', ':')) + return self.JSON_PREFIX + results_str + self.JSON_SUFFIX + + def _insert_failure_summaries(self, results_for_builder): + """Inserts aggregate pass/failure statistics into the JSON. + This method reads self._skipped_tests, self._passed_tests and + self._failures and inserts FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT + entries. + + Args: + results_for_builder: Dictionary containing the test results for a + single builder. + """ + # Insert the number of tests that failed. + self._insert_item_into_raw_list(results_for_builder, + len(set(self._failures.keys()) | self._skipped_tests), + self.FIXABLE_COUNT) + + # Create a pass/skip/failure summary dictionary. + entry = {} + entry[self.SKIP_RESULT] = len(self._skipped_tests) + entry[self.PASS_RESULT] = len(self._passed_tests) + get = entry.get + for failure_type in self._failures.values(): + failure_char = self.FAILURE_TO_CHAR[failure_type] + entry[failure_char] = get(failure_char, 0) + 1 + + # Insert the pass/skip/failure summary dictionary. + self._insert_item_into_raw_list(results_for_builder, entry, + self.FIXABLE) + + # Insert the number of all the tests that are supposed to pass. + self._insert_item_into_raw_list(results_for_builder, + len(self._skipped_tests | self._all_tests), + self.ALL_FIXABLE_COUNT) + + def _insert_item_into_raw_list(self, results_for_builder, item, key): + """Inserts the item into the list with the given key in the results for + this builder. Creates the list if no such list exists. + + Args: + results_for_builder: Dictionary containing the test results for a + single builder. + item: Number or string to insert into the list. + key: Key in results_for_builder for the list to insert into. + """ + if key in results_for_builder: + raw_list = results_for_builder[key] + else: + raw_list = [] + + raw_list.insert(0, item) + raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG] + results_for_builder[key] = raw_list + + def _insert_item_run_length_encoded(self, item, encoded_results): + """Inserts the item into the run-length encoded results. + + Args: + item: String or number to insert. + encoded_results: run-length encoded results. An array of arrays, e.g. + [[3,'A'],[1,'Q']] encodes AAAQ. + """ + if len(encoded_results) and item == encoded_results[0][1]: + num_results = encoded_results[0][0] + if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: + encoded_results[0][0] = num_results + 1 + else: + # Use a list instead of a class for the run-length encoding since + # we want the serialized form to be concise. + encoded_results.insert(0, [1, item]) + + def _insert_generic_metadata(self, results_for_builder): + """ Inserts generic metadata (such as version number, current time etc) + into the JSON. + + Args: + results_for_builder: Dictionary containing the test results for + a single builder. + """ + self._insert_item_into_raw_list(results_for_builder, + self._build_number, self.BUILD_NUMBERS) + + path_to_webkit = path_utils.path_from_base('third_party', 'WebKit', + 'WebCore') + self._insert_item_into_raw_list(results_for_builder, + self._get_svn_revision(path_to_webkit), + self.WEBKIT_SVN) + + path_to_chrome_base = path_utils.path_from_base() + self._insert_item_into_raw_list(results_for_builder, + self._get_svn_revision(path_to_chrome_base), + self.CHROME_SVN) + + self._insert_item_into_raw_list(results_for_builder, + int(time.time()), + self.TIME) + + def _insert_test_time_and_result(self, test_name, tests): + """ Insert a test item with its results to the given tests dictionary. + + Args: + tests: Dictionary containing test result entries. + """ + + result = JSONResultsGenerator.PASS_RESULT + time = 0 + + if test_name not in self._all_tests: + result = JSONResultsGenerator.NO_DATA_RESULT + + if test_name in self._failures: + result = self.FAILURE_TO_CHAR[self._failures[test_name]] + + if test_name in self._test_timings: + # Floor for now to get time in seconds. + time = int(self._test_timings[test_name]) + + if test_name not in tests: + tests[test_name] = self._create_results_and_times_json() + + thisTest = tests[test_name] + self._insert_item_run_length_encoded(result, thisTest[self.RESULTS]) + self._insert_item_run_length_encoded(time, thisTest[self.TIMES]) + self._normalize_results_json(thisTest, test_name, tests) + + def _convert_json_to_current_version(self, results_json): + """If the JSON does not match the current version, converts it to the + current version and adds in the new version number. + """ + if (self.VERSION_KEY in results_json and + results_json[self.VERSION_KEY] == self.VERSION): + return + + results_json[self.VERSION_KEY] = self.VERSION + + def _create_results_and_times_json(self): + results_and_times = {} + results_and_times[self.RESULTS] = [] + results_and_times[self.TIMES] = [] + return results_and_times + + def _create_results_for_builder_json(self): + results_for_builder = {} + results_for_builder[self.TESTS] = {} + return results_for_builder + + def _remove_items_over_max_number_of_builds(self, encoded_list): + """Removes items from the run-length encoded list after the final + item that exceeds the max number of builds to track. + + Args: + encoded_results: run-length encoded results. An array of arrays, e.g. + [[3,'A'],[1,'Q']] encodes AAAQ. + """ + num_builds = 0 + index = 0 + for result in encoded_list: + num_builds = num_builds + result[0] + index = index + 1 + if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: + return encoded_list[:index] + return encoded_list + + def _normalize_results_json(self, test, test_name, tests): + """ Prune tests where all runs pass or tests that no longer exist and + truncate all results to maxNumberOfBuilds. + + Args: + test: ResultsAndTimes object for this test. + test_name: Name of the test. + tests: The JSON object with all the test results for this builder. + """ + test[self.RESULTS] = self._remove_items_over_max_number_of_builds( + test[self.RESULTS]) + test[self.TIMES] = self._remove_items_over_max_number_of_builds( + test[self.TIMES]) + + is_all_pass = self._is_results_all_of_type(test[self.RESULTS], + self.PASS_RESULT) + is_all_no_data = self._is_results_all_of_type(test[self.RESULTS], + self.NO_DATA_RESULT) + max_time = max([time[1] for time in test[self.TIMES]]) + + # Remove all passes/no-data from the results to reduce noise and + # filesize. If a test passes every run, but takes > MIN_TIME to run, + # don't throw away the data. + if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME): + del tests[test_name] + + def _is_results_all_of_type(self, results, type): + """Returns whether all the results are of the given type + (e.g. all passes).""" + return len(results) == 1 and results[0][1] == type diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/lighttpd.conf b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/lighttpd.conf new file mode 100644 index 0000000..d3150dd --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/lighttpd.conf @@ -0,0 +1,89 @@ +server.tag = "LightTPD/1.4.19 (Win32)" +server.modules = ( "mod_accesslog", + "mod_alias", + "mod_cgi", + "mod_rewrite" ) + +# default document root required +server.document-root = "." + +# files to check for if .../ is requested +index-file.names = ( "index.php", "index.pl", "index.cgi", + "index.html", "index.htm", "default.htm" ) +# mimetype mapping +mimetype.assign = ( + ".gif" => "image/gif", + ".jpg" => "image/jpeg", + ".jpeg" => "image/jpeg", + ".png" => "image/png", + ".svg" => "image/svg+xml", + ".css" => "text/css", + ".html" => "text/html", + ".htm" => "text/html", + ".xhtml" => "application/xhtml+xml", + ".js" => "text/javascript", + ".log" => "text/plain", + ".conf" => "text/plain", + ".text" => "text/plain", + ".txt" => "text/plain", + ".dtd" => "text/xml", + ".xml" => "text/xml", + ".manifest" => "text/cache-manifest", + ) + +# Use the "Content-Type" extended attribute to obtain mime type if possible +mimetype.use-xattr = "enable" + +## +# which extensions should not be handle via static-file transfer +# +# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi +static-file.exclude-extensions = ( ".php", ".pl", ".cgi" ) + +server.bind = "localhost" +server.port = 8001 + +## virtual directory listings +dir-listing.activate = "enable" +#dir-listing.encoding = "iso-8859-2" +#dir-listing.external-css = "style/oldstyle.css" + +## enable debugging +#debug.log-request-header = "enable" +#debug.log-response-header = "enable" +#debug.log-request-handling = "enable" +#debug.log-file-not-found = "enable" + +#### SSL engine +#ssl.engine = "enable" +#ssl.pemfile = "server.pem" + +# Rewrite rule for utf-8 path test (LayoutTests/http/tests/uri/utf8-path.html) +# See the apache rewrite rule at LayoutTests/http/tests/uri/intercept/.htaccess +# Rewrite rule for LayoutTests/http/tests/appcache/cyrillic-uri.html. +# See the apache rewrite rule at +# LayoutTests/http/tests/appcache/resources/intercept/.htaccess +url.rewrite-once = ( + "^/uri/intercept/(.*)" => "/uri/resources/print-uri.php", + "^/appcache/resources/intercept/(.*)" => "/appcache/resources/print-uri.php" +) + +# LayoutTests/http/tests/xmlhttprequest/response-encoding.html uses an htaccess +# to override charset for reply2.txt, reply2.xml, and reply4.txt. +$HTTP["url"] =~ "^/xmlhttprequest/resources/reply2.(txt|xml)" { + mimetype.assign = ( + ".txt" => "text/plain; charset=windows-1251", + ".xml" => "text/xml; charset=windows-1251" + ) +} +$HTTP["url"] =~ "^/xmlhttprequest/resources/reply4.txt" { + mimetype.assign = ( ".txt" => "text/plain; charset=koi8-r" ) +} + +# LayoutTests/http/tests/appcache/wrong-content-type.html uses an htaccess +# to override mime type for wrong-content-type.manifest. +$HTTP["url"] =~ "^/appcache/resources/wrong-content-type.manifest" { + mimetype.assign = ( ".manifest" => "text/plain" ) +} + +# Autogenerated test-specific config follows. diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py new file mode 100644 index 0000000..6c094e3 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +Package that implements a stream wrapper that has 'meters' as well as +regular output. A 'meter' is a single line of text that can be erased +and rewritten repeatedly, without producing multiple lines of output. It +can be used to produce effects like progress bars. +""" + + +class MeteredStream: + """This class is a wrapper around a stream that allows you to implement + meters. + + It can be used like a stream, but calling update() will print + the string followed by only a carriage return (instead of a carriage + return and a line feed). This can be used to implement progress bars and + other sorts of meters. Note that anything written by update() will be + erased by a subsequent update(), write(), or flush().""" + + def __init__(self, verbose, stream): + """ + Args: + verbose: whether update is a no-op + stream: output stream to write to + """ + self._dirty = False + self._verbose = verbose + self._stream = stream + self._last_update = "" + + def write(self, txt): + """Write text directly to the stream, overwriting and resetting the + meter.""" + if self._dirty: + self.update("") + self._dirty = False + self._stream.write(txt) + + def flush(self): + """Flush any buffered output.""" + self._stream.flush() + + def update(self, str): + """Write an update to the stream that will get overwritten by the next + update() or by a write(). + + This is used for progress updates that don't need to be preserved in + the log. Note that verbose disables this routine; we have this in + case we are logging lots of output and the update()s will get lost + or won't work properly (typically because verbose streams are + redirected to files. + + TODO(dpranke): figure out if there is a way to detect if we're writing + to a stream that handles CRs correctly (e.g., terminals). That might + be a cleaner way of handling this. + """ + if self._verbose: + return + + # Print the necessary number of backspaces to erase the previous + # message. + self._stream.write("\b" * len(self._last_update)) + self._stream.write(str) + num_remaining = len(self._last_update) - len(str) + if num_remaining > 0: + self._stream.write(" " * num_remaining + "\b" * num_remaining) + self._last_update = str + self._dirty = True diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/path_utils.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/path_utils.py new file mode 100644 index 0000000..26d062b --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/path_utils.py @@ -0,0 +1,395 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""This package contains utility methods for manipulating paths and +filenames for test results and baselines. It also contains wrappers +of a few routines in platform_utils.py so that platform_utils.py can +be considered a 'protected' package - i.e., this file should be +the only file that ever includes platform_utils. This leads to +us including a few things that don't really have anything to do + with paths, unfortunately.""" + +import errno +import os +import stat +import sys + +import platform_utils +import platform_utils_win +import platform_utils_mac +import platform_utils_linux + +# Cache some values so we don't have to recalculate them. _basedir is +# used by PathFromBase() and caches the full (native) path to the top +# of the source tree (/src). _baseline_search_path is used by +# ExpectedBaselines() and caches the list of native paths to search +# for baseline results. +_basedir = None +_baseline_search_path = None + + +class PathNotFound(Exception): + pass + + +def layout_tests_dir(): + """Returns the fully-qualified path to the directory containing the input + data for the specified layout test.""" + return path_from_base('third_party', 'WebKit', 'LayoutTests') + + +def chromium_baseline_path(platform=None): + """Returns the full path to the directory containing expected + baseline results from chromium ports. If |platform| is None, the + currently executing platform is used. + + Note: although directly referencing individual platform_utils_* files is + usually discouraged, we allow it here so that the rebaselining tool can + pull baselines for platforms other than the host platform.""" + + # Normalize the platform string. + platform = platform_name(platform) + if platform.startswith('chromium-mac'): + return platform_utils_mac.baseline_path(platform) + elif platform.startswith('chromium-win'): + return platform_utils_win.baseline_path(platform) + elif platform.startswith('chromium-linux'): + return platform_utils_linux.baseline_path(platform) + + return platform_utils.baseline_path() + + +def webkit_baseline_path(platform): + """Returns the full path to the directory containing expected + baseline results from WebKit ports.""" + return path_from_base('third_party', 'WebKit', 'LayoutTests', + 'platform', platform) + + +def baseline_search_path(platform=None): + """Returns the list of directories to search for baselines/results for a + given platform, in order of preference. Paths are relative to the top of + the source tree. If parameter platform is None, returns the list for the + current platform that the script is running on. + + Note: although directly referencing individual platform_utils_* files is + usually discouraged, we allow it here so that the rebaselining tool can + pull baselines for platforms other than the host platform.""" + + # Normalize the platform name. + platform = platform_name(platform) + if platform.startswith('chromium-mac'): + return platform_utils_mac.baseline_search_path(platform) + elif platform.startswith('chromium-win'): + return platform_utils_win.baseline_search_path(platform) + elif platform.startswith('chromium-linux'): + return platform_utils_linux.baseline_search_path(platform) + return platform_utils.baseline_search_path() + + +def expected_baselines(filename, suffix, platform=None, all_baselines=False): + """Given a test name, finds where the baseline results are located. + + Args: + filename: absolute filename to test file + suffix: file suffix of the expected results, including dot; e.g. '.txt' + or '.png'. This should not be None, but may be an empty string. + platform: layout test platform: 'win', 'linux' or 'mac'. Defaults to + the current platform. + all_baselines: If True, return an ordered list of all baseline paths + for the given platform. If False, return only the first + one. + Returns + a list of ( platform_dir, results_filename ), where + platform_dir - abs path to the top of the results tree (or test tree) + results_filename - relative path from top of tree to the results file + (os.path.join of the two gives you the full path to the file, + unless None was returned.) + Return values will be in the format appropriate for the current platform + (e.g., "\\" for path separators on Windows). If the results file is not + found, then None will be returned for the directory, but the expected + relative pathname will still be returned. + """ + global _baseline_search_path + global _search_path_platform + testname = os.path.splitext(relative_test_filename(filename))[0] + + baseline_filename = testname + '-expected' + suffix + + if (_baseline_search_path is None) or (_search_path_platform != platform): + _baseline_search_path = baseline_search_path(platform) + _search_path_platform = platform + + baselines = [] + for platform_dir in _baseline_search_path: + if os.path.exists(os.path.join(platform_dir, baseline_filename)): + baselines.append((platform_dir, baseline_filename)) + + if not all_baselines and baselines: + return baselines + + # If it wasn't found in a platform directory, return the expected result + # in the test directory, even if no such file actually exists. + platform_dir = layout_tests_dir() + if os.path.exists(os.path.join(platform_dir, baseline_filename)): + baselines.append((platform_dir, baseline_filename)) + + if baselines: + return baselines + + return [(None, baseline_filename)] + + +def expected_filename(filename, suffix): + """Given a test name, returns an absolute path to its expected results. + + If no expected results are found in any of the searched directories, the + directory in which the test itself is located will be returned. The return + value is in the format appropriate for the platform (e.g., "\\" for + path separators on windows). + + Args: + filename: absolute filename to test file + suffix: file suffix of the expected results, including dot; e.g. '.txt' + or '.png'. This should not be None, but may be an empty string. + platform: the most-specific directory name to use to build the + search list of directories, e.g., 'chromium-win', or + 'chromium-mac-leopard' (we follow the WebKit format) + """ + platform_dir, baseline_filename = expected_baselines(filename, suffix)[0] + if platform_dir: + return os.path.join(platform_dir, baseline_filename) + return os.path.join(layout_tests_dir(), baseline_filename) + + +def relative_test_filename(filename): + """Provide the filename of the test relative to the layout tests + directory as a unix style path (a/b/c).""" + return _win_path_to_unix(filename[len(layout_tests_dir()) + 1:]) + + +def _win_path_to_unix(path): + """Convert a windows path to use unix-style path separators (a/b/c).""" + return path.replace('\\', '/') + +# +# Routines that are arguably platform-specific but have been made +# generic for now (they used to be in platform_utils_*) +# + + +def filename_to_uri(full_path): + """Convert a test file to a URI.""" + LAYOUTTEST_HTTP_DIR = "http/tests/" + LAYOUTTEST_WEBSOCKET_DIR = "websocket/tests/" + + relative_path = _win_path_to_unix(relative_test_filename(full_path)) + port = None + use_ssl = False + + if relative_path.startswith(LAYOUTTEST_HTTP_DIR): + # http/tests/ run off port 8000 and ssl/ off 8443 + relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):] + port = 8000 + elif relative_path.startswith(LAYOUTTEST_WEBSOCKET_DIR): + # websocket/tests/ run off port 8880 and 9323 + # Note: the root is /, not websocket/tests/ + port = 8880 + + # Make http/tests/local run as local files. This is to mimic the + # logic in run-webkit-tests. + # TODO(jianli): Consider extending this to "media/". + if port and not relative_path.startswith("local/"): + if relative_path.startswith("ssl/"): + port += 443 + protocol = "https" + else: + protocol = "http" + return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path) + + if sys.platform in ('cygwin', 'win32'): + return "file:///" + get_absolute_path(full_path) + return "file://" + get_absolute_path(full_path) + + +def get_absolute_path(path): + """Returns an absolute UNIX path.""" + return _win_path_to_unix(os.path.abspath(path)) + + +def maybe_make_directory(*path): + """Creates the specified directory if it doesn't already exist.""" + try: + os.makedirs(os.path.join(*path)) + except OSError, e: + if e.errno != errno.EEXIST: + raise + + +def path_from_base(*comps): + """Returns an absolute filename from a set of components specified + relative to the top of the source tree. If the path does not exist, + the exception PathNotFound is raised.""" + global _basedir + if _basedir == None: + # We compute the top of the source tree by finding the absolute + # path of this source file, and then climbing up three directories + # as given in subpath. If we move this file, subpath needs to be + # updated. + path = os.path.abspath(__file__) + subpath = os.path.join('third_party', 'WebKit') + _basedir = path[:path.index(subpath)] + path = os.path.join(_basedir, *comps) + if not os.path.exists(path): + raise PathNotFound('could not find %s' % (path)) + return path + + +def remove_directory(*path): + """Recursively removes a directory, even if it's marked read-only. + + Remove the directory located at *path, if it exists. + + shutil.rmtree() doesn't work on Windows if any of the files or directories + are read-only, which svn repositories and some .svn files are. We need to + be able to force the files to be writable (i.e., deletable) as we traverse + the tree. + + Even with all this, Windows still sometimes fails to delete a file, citing + a permission error (maybe something to do with antivirus scans or disk + indexing). The best suggestion any of the user forums had was to wait a + bit and try again, so we do that too. It's hand-waving, but sometimes it + works. :/ + """ + file_path = os.path.join(*path) + if not os.path.exists(file_path): + return + + win32 = False + if sys.platform == 'win32': + win32 = True + # Some people don't have the APIs installed. In that case we'll do + # without. + try: + win32api = __import__('win32api') + win32con = __import__('win32con') + except ImportError: + win32 = False + + def remove_with_retry(rmfunc, path): + os.chmod(path, stat.S_IWRITE) + if win32: + win32api.SetFileAttributes(path, + win32con.FILE_ATTRIBUTE_NORMAL) + try: + return rmfunc(path) + except EnvironmentError, e: + if e.errno != errno.EACCES: + raise + print 'Failed to delete %s: trying again' % repr(path) + time.sleep(0.1) + return rmfunc(path) + else: + + def remove_with_retry(rmfunc, path): + if os.path.islink(path): + return os.remove(path) + else: + return rmfunc(path) + + for root, dirs, files in os.walk(file_path, topdown=False): + # For POSIX: making the directory writable guarantees removability. + # Windows will ignore the non-read-only bits in the chmod value. + os.chmod(root, 0770) + for name in files: + remove_with_retry(os.remove, os.path.join(root, name)) + for name in dirs: + remove_with_retry(os.rmdir, os.path.join(root, name)) + + remove_with_retry(os.rmdir, file_path) + +# +# Wrappers around platform_utils +# + + +def platform_name(platform=None): + """Returns the appropriate chromium platform name for |platform|. If + |platform| is None, returns the name of the chromium platform on the + currently running system. If |platform| is of the form 'chromium-*', + it is returned unchanged, otherwise 'chromium-' is prepended.""" + if platform == None: + return platform_utils.platform_name() + if not platform.startswith('chromium-'): + platform = "chromium-" + platform + return platform + + +def platform_version(): + return platform_utils.platform_version() + + +def lighttpd_executable_path(): + return platform_utils.lighttpd_executable_path() + + +def lighttpd_module_path(): + return platform_utils.lighttpd_module_path() + + +def lighttpd_php_path(): + return platform_utils.lighttpd_php_path() + + +def wdiff_path(): + return platform_utils.wdiff_path() + + +def test_shell_path(target): + return platform_utils.test_shell_path(target) + + +def image_diff_path(target): + return platform_utils.image_diff_path(target) + + +def layout_test_helper_path(target): + return platform_utils.layout_test_helper_path(target) + + +def fuzzy_match_path(): + return platform_utils.fuzzy_match_path() + + +def shut_down_http_server(server_pid): + return platform_utils.shut_down_http_server(server_pid) + + +def kill_all_test_shells(): + platform_utils.kill_all_test_shells() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils.py new file mode 100644 index 0000000..09e7b4b --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Platform-specific utilities and pseudo-constants + +Any functions whose implementations or values differ from one platform to +another should be defined in their respective platform_utils_<platform>.py +modules. The appropriate one of those will be imported into this module to +provide callers with a common, platform-independent interface. + +This file should only ever be imported by layout_package.path_utils. +""" + +import sys + +# We may not support the version of Python that a user has installed (Cygwin +# especially has had problems), but we'll allow the platform utils to be +# included in any case so we don't get an import error. +if sys.platform in ('cygwin', 'win32'): + from platform_utils_win import * +elif sys.platform == 'darwin': + from platform_utils_mac import * +elif sys.platform in ('linux', 'linux2', 'freebsd7', 'openbsd4'): + from platform_utils_linux import * diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_linux.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_linux.py new file mode 100644 index 0000000..87b27c7 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_linux.py @@ -0,0 +1,248 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""This is the Linux implementation of the layout_package.platform_utils + package. This file should only be imported by that package.""" + +import os +import signal +import subprocess +import sys +import logging + +import path_utils +import platform_utils_win + + +def platform_name(): + """Returns the name of the platform we're currently running on.""" + return 'chromium-linux' + platform_version() + + +def platform_version(): + """Returns the version string for the platform, e.g. '-vista' or + '-snowleopard'. If the platform does not distinguish between + minor versions, it returns ''.""" + return '' + + +def get_num_cores(): + """Returns the number of cores on the machine. For hyperthreaded machines, + this will be double the number of actual processors.""" + num_cores = os.sysconf("SC_NPROCESSORS_ONLN") + if isinstance(num_cores, int) and num_cores > 0: + return num_cores + return 1 + + +def baseline_path(platform=None): + """Returns the path relative to the top of the source tree for the + baselines for the specified platform version. If |platform| is None, + then the version currently in use is used.""" + if platform is None: + platform = platform_name() + return path_utils.path_from_base('webkit', 'data', 'layout_tests', + 'platform', platform, 'LayoutTests') + + +def baseline_search_path(platform=None): + """Returns the list of directories to search for baselines/results, in + order of preference. Paths are relative to the top of the source tree.""" + return [baseline_path(platform), + platform_utils_win.baseline_path('chromium-win'), + path_utils.webkit_baseline_path('win'), + path_utils.webkit_baseline_path('mac')] + + +def apache_executable_path(): + """Returns the executable path to start Apache""" + path = os.path.join("/usr", "sbin", "apache2") + if os.path.exists(path): + return path + print "Unable to fine Apache executable %s" % path + _missing_apache() + + +def apache_config_file_path(): + """Returns the path to Apache config file""" + return path_utils.path_from_base("third_party", "WebKit", "LayoutTests", + "http", "conf", "apache2-debian-httpd.conf") + + +def lighttpd_executable_path(): + """Returns the executable path to start LigHTTPd""" + binpath = "/usr/sbin/lighttpd" + if os.path.exists(binpath): + return binpath + print "Unable to find LigHTTPd executable %s" % binpath + _missing_lighttpd() + + +def lighttpd_module_path(): + """Returns the library module path for LigHTTPd""" + modpath = "/usr/lib/lighttpd" + if os.path.exists(modpath): + return modpath + print "Unable to find LigHTTPd modules %s" % modpath + _missing_lighttpd() + + +def lighttpd_php_path(): + """Returns the PHP executable path for LigHTTPd""" + binpath = "/usr/bin/php-cgi" + if os.path.exists(binpath): + return binpath + print "Unable to find PHP CGI executable %s" % binpath + _missing_lighttpd() + + +def wdiff_path(): + """Path to the WDiff executable, which we assume is already installed and + in the user's $PATH.""" + return 'wdiff' + + +def image_diff_path(target): + """Path to the image_diff binary. + + Args: + target: Build target mode (debug or release)""" + return _path_from_build_results(target, 'image_diff') + + +def layout_test_helper_path(target): + """Path to the layout_test helper binary, if needed, empty otherwise""" + return '' + + +def test_shell_path(target): + """Return the platform-specific binary path for our TestShell. + + Args: + target: Build target mode (debug or release) """ + if target in ('Debug', 'Release'): + try: + debug_path = _path_from_build_results('Debug', 'test_shell') + release_path = _path_from_build_results('Release', 'test_shell') + + debug_mtime = os.stat(debug_path).st_mtime + release_mtime = os.stat(release_path).st_mtime + + if debug_mtime > release_mtime and target == 'Release' or \ + release_mtime > debug_mtime and target == 'Debug': + logging.info('\x1b[31mWarning: you are not running the most ' + 'recent test_shell binary. You need to pass ' + '--debug or not to select between Debug and ' + 'Release.\x1b[0m') + # This will fail if we don't have both a debug and release binary. + # That's fine because, in this case, we must already be running the + # most up-to-date one. + except path_utils.PathNotFound: + pass + + return _path_from_build_results(target, 'test_shell') + + +def fuzzy_match_path(): + """Return the path to the fuzzy matcher binary.""" + return path_utils.path_from_base('third_party', 'fuzzymatch', 'fuzzymatch') + + +def shut_down_http_server(server_pid): + """Shut down the lighttpd web server. Blocks until it's fully shut down. + + Args: + server_pid: The process ID of the running server. + """ + # server_pid is not set when "http_server.py stop" is run manually. + if server_pid is None: + # This isn't ideal, since it could conflict with web server processes + # not started by http_server.py, but good enough for now. + kill_all_process('lighttpd') + kill_all_process('apache2') + else: + try: + os.kill(server_pid, signal.SIGTERM) + #TODO(mmoss) Maybe throw in a SIGKILL just to be sure? + except OSError: + # Sometimes we get a bad PID (e.g. from a stale httpd.pid file), + # so if kill fails on the given PID, just try to 'killall' web + # servers. + shut_down_http_server(None) + + +def kill_process(pid): + """Forcefully kill the process. + + Args: + pid: The id of the process to be killed. + """ + os.kill(pid, signal.SIGKILL) + + +def kill_all_process(process_name): + null = open(os.devnull) + subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'), + process_name], stderr=null) + null.close() + + +def kill_all_test_shells(): + """Kills all instances of the test_shell binary currently running.""" + kill_all_process('test_shell') + +# +# Private helper functions +# + + +def _missing_lighttpd(): + print 'Please install using: "sudo apt-get install lighttpd php5-cgi"' + print 'For complete Linux build requirements, please see:' + print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions' + sys.exit(1) + + +def _missing_apache(): + print ('Please install using: "sudo apt-get install apache2 ' + 'libapache2-mod-php5"') + print 'For complete Linux build requirements, please see:' + print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions' + sys.exit(1) + + +def _path_from_build_results(*pathies): + # FIXME(dkegel): use latest or warn if more than one found? + for dir in ["sconsbuild", "out", "xcodebuild"]: + try: + return path_utils.path_from_base(dir, *pathies) + except: + pass + raise path_utils.PathNotFound("Unable to find %s in build tree" % + (os.path.join(*pathies))) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_mac.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_mac.py new file mode 100644 index 0000000..1eaa10c --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_mac.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""This is the Mac implementation of the layout_package.platform_utils + package. This file should only be imported by that package.""" + +import os +import platform +import signal +import subprocess + +import path_utils + + +def platform_name(): + """Returns the name of the platform we're currently running on.""" + # At the moment all chromium mac results are version-independent. At some + # point we may need to return 'chromium-mac' + PlatformVersion() + return 'chromium-mac' + + +def platform_version(): + """Returns the version string for the platform, e.g. '-vista' or + '-snowleopard'. If the platform does not distinguish between + minor versions, it returns ''.""" + os_version_string = platform.mac_ver()[0] # e.g. "10.5.6" + if not os_version_string: + return '-leopard' + + release_version = int(os_version_string.split('.')[1]) + + # we don't support 'tiger' or earlier releases + if release_version == 5: + return '-leopard' + elif release_version == 6: + return '-snowleopard' + + return '' + + +def get_num_cores(): + """Returns the number of cores on the machine. For hyperthreaded machines, + this will be double the number of actual processors.""" + return int(os.popen2("sysctl -n hw.ncpu")[1].read()) + + +def baseline_path(platform=None): + """Returns the path relative to the top of the source tree for the + baselines for the specified platform version. If |platform| is None, + then the version currently in use is used.""" + if platform is None: + platform = platform_name() + return path_utils.path_from_base('webkit', 'data', 'layout_tests', + 'platform', platform, 'LayoutTests') + +# TODO: We should add leopard and snowleopard to the list of paths to check +# once we start running the tests from snowleopard. + + +def baseline_search_path(platform=None): + """Returns the list of directories to search for baselines/results, in + order of preference. Paths are relative to the top of the source tree.""" + return [baseline_path(platform), + path_utils.webkit_baseline_path('mac' + platform_version()), + path_utils.webkit_baseline_path('mac')] + + +def wdiff_path(): + """Path to the WDiff executable, which we assume is already installed and + in the user's $PATH.""" + return 'wdiff' + + +def image_diff_path(target): + """Path to the image_diff executable + + Args: + target: build type - 'Debug','Release',etc.""" + return path_utils.path_from_base('xcodebuild', target, 'image_diff') + + +def layout_test_helper_path(target): + """Path to the layout_test_helper executable, if needed, empty otherwise + + Args: + target: build type - 'Debug','Release',etc.""" + return path_utils.path_from_base('xcodebuild', target, + 'layout_test_helper') + + +def test_shell_path(target): + """Path to the test_shell executable. + + Args: + target: build type - 'Debug','Release',etc.""" + # TODO(pinkerton): make |target| happy with case-sensitive file systems. + return path_utils.path_from_base('xcodebuild', target, 'TestShell.app', + 'Contents', 'MacOS', 'TestShell') + + +def apache_executable_path(): + """Returns the executable path to start Apache""" + return os.path.join("/usr", "sbin", "httpd") + + +def apache_config_file_path(): + """Returns the path to Apache config file""" + return path_utils.path_from_base("third_party", "WebKit", "LayoutTests", + "http", "conf", "apache2-httpd.conf") + + +def lighttpd_executable_path(): + """Returns the executable path to start LigHTTPd""" + return path_utils.path_from_base('third_party', 'lighttpd', 'mac', + 'bin', 'lighttpd') + + +def lighttpd_module_path(): + """Returns the library module path for LigHTTPd""" + return path_utils.path_from_base('third_party', 'lighttpd', 'mac', 'lib') + + +def lighttpd_php_path(): + """Returns the PHP executable path for LigHTTPd""" + return path_utils.path_from_base('third_party', 'lighttpd', 'mac', 'bin', + 'php-cgi') + + +def shut_down_http_server(server_pid): + """Shut down the lighttpd web server. Blocks until it's fully shut down. + + Args: + server_pid: The process ID of the running server. + """ + # server_pid is not set when "http_server.py stop" is run manually. + if server_pid is None: + # TODO(mmoss) This isn't ideal, since it could conflict with lighttpd + # processes not started by http_server.py, but good enough for now. + kill_all_process('lighttpd') + kill_all_process('httpd') + else: + try: + os.kill(server_pid, signal.SIGTERM) + # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? + except OSError: + # Sometimes we get a bad PID (e.g. from a stale httpd.pid file), + # so if kill fails on the given PID, just try to 'killall' web + # servers. + shut_down_http_server(None) + + +def kill_process(pid): + """Forcefully kill the process. + + Args: + pid: The id of the process to be killed. + """ + os.kill(pid, signal.SIGKILL) + + +def kill_all_process(process_name): + # On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or + # -SIGNALNUMBER must come first. Example problem: + # $ killall -u $USER -TERM lighttpd + # killall: illegal option -- T + # Use of the earlier -TERM placement is just fine on 10.5. + null = open(os.devnull) + subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'), + process_name], stderr=null) + null.close() + + +def kill_all_test_shells(): + """Kills all instances of the test_shell binary currently running.""" + kill_all_process('TestShell') diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_win.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_win.py new file mode 100644 index 0000000..3cbbec3 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_win.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""This is the Linux implementation of the layout_package.platform_utils + package. This file should only be imported by that package.""" + +import os +import path_utils +import subprocess +import sys + + +def platform_name(): + """Returns the name of the platform we're currently running on.""" + # We're not ready for version-specific results yet. When we uncomment + # this, we also need to add it to the BaselineSearchPath() + return 'chromium-win' + platform_version() + + +def platform_version(): + """Returns the version string for the platform, e.g. '-vista' or + '-snowleopard'. If the platform does not distinguish between + minor versions, it returns ''.""" + winver = sys.getwindowsversion() + if winver[0] == 6 and (winver[1] == 1): + return '-7' + if winver[0] == 6 and (winver[1] == 0): + return '-vista' + if winver[0] == 5 and (winver[1] == 1 or winver[1] == 2): + return '-xp' + return '' + + +def get_num_cores(): + """Returns the number of cores on the machine. For hyperthreaded machines, + this will be double the number of actual processors.""" + return int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) + + +def baseline_path(platform=None): + """Returns the path relative to the top of the source tree for the + baselines for the specified platform version. If |platform| is None, + then the version currently in use is used.""" + if platform is None: + platform = platform_name() + return path_utils.path_from_base('webkit', 'data', 'layout_tests', + 'platform', platform, 'LayoutTests') + + +def baseline_search_path(platform=None): + """Returns the list of directories to search for baselines/results, in + order of preference. Paths are relative to the top of the source tree.""" + dirs = [] + if platform is None: + platform = platform_name() + + if platform == 'chromium-win-xp': + dirs.append(baseline_path(platform)) + if platform in ('chromium-win-xp', 'chromium-win-vista'): + dirs.append(baseline_path('chromium-win-vista')) + dirs.append(baseline_path('chromium-win')) + dirs.append(path_utils.webkit_baseline_path('win')) + dirs.append(path_utils.webkit_baseline_path('mac')) + return dirs + + +def wdiff_path(): + """Path to the WDiff executable, whose binary is checked in on Win""" + return path_utils.path_from_base('third_party', 'cygwin', 'bin', + 'wdiff.exe') + + +def image_diff_path(target): + """Return the platform-specific binary path for the image compare util. + We use this if we can't find the binary in the default location + in path_utils. + + Args: + target: Build target mode (debug or release) + """ + return _find_binary(target, 'image_diff.exe') + + +def layout_test_helper_path(target): + """Return the platform-specific binary path for the layout test helper. + We use this if we can't find the binary in the default location + in path_utils. + + Args: + target: Build target mode (debug or release) + """ + return _find_binary(target, 'layout_test_helper.exe') + + +def test_shell_path(target): + """Return the platform-specific binary path for our TestShell. + We use this if we can't find the binary in the default location + in path_utils. + + Args: + target: Build target mode (debug or release) + """ + return _find_binary(target, 'test_shell.exe') + + +def apache_executable_path(): + """Returns the executable path to start Apache""" + path = path_utils.path_from_base('third_party', 'cygwin', "usr", "sbin") + # Don't return httpd.exe since we want to use this from cygwin. + return os.path.join(path, "httpd") + + +def apache_config_file_path(): + """Returns the path to Apache config file""" + return path_utils.path_from_base("third_party", "WebKit", "LayoutTests", + "http", "conf", "cygwin-httpd.conf") + + +def lighttpd_executable_path(): + """Returns the executable path to start LigHTTPd""" + return path_utils.path_from_base('third_party', 'lighttpd', 'win', + 'LightTPD.exe') + + +def lighttpd_module_path(): + """Returns the library module path for LigHTTPd""" + return path_utils.path_from_base('third_party', 'lighttpd', 'win', 'lib') + + +def lighttpd_php_path(): + """Returns the PHP executable path for LigHTTPd""" + return path_utils.path_from_base('third_party', 'lighttpd', 'win', 'php5', + 'php-cgi.exe') + + +def shut_down_http_server(server_pid): + """Shut down the lighttpd web server. Blocks until it's fully shut down. + + Args: + server_pid: The process ID of the running server. + Unused in this implementation of the method. + """ + subprocess.Popen(('taskkill.exe', '/f', '/im', 'LightTPD.exe'), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).wait() + subprocess.Popen(('taskkill.exe', '/f', '/im', 'httpd.exe'), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).wait() + + +def kill_process(pid): + """Forcefully kill the process. + + Args: + pid: The id of the process to be killed. + """ + subprocess.call(('taskkill.exe', '/f', '/pid', str(pid)), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + +def kill_all_test_shells(self): + """Kills all instances of the test_shell binary currently running.""" + subprocess.Popen(('taskkill.exe', '/f', '/im', 'test_shell.exe'), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).wait() + +# +# Private helper functions. +# + + +def _find_binary(target, binary): + """On Windows, we look for binaries that we compile in potentially + two places: src/webkit/$target (preferably, which we get if we + built using webkit_glue.gyp), or src/chrome/$target (if compiled some + other way).""" + try: + return path_utils.path_from_base('webkit', target, binary) + except path_utils.PathNotFound: + try: + return path_utils.path_from_base('chrome', target, binary) + except path_utils.PathNotFound: + return path_utils.path_from_base('build', target, binary) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py new file mode 100644 index 0000000..f1647f7 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py @@ -0,0 +1,818 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A helper class for reading in and dealing with tests expectations +for layout tests. +""" + +import logging +import os +import re +import sys +import time +import path_utils + +sys.path.append(path_utils.path_from_base('third_party', 'WebKit', + 'WebKitTools')) +import simplejson + +# Test expectation and modifier constants. +(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX, + DEFER, SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16) + +# Test expectation file update action constants +(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4) + + +class TestExpectations: + TEST_LIST = "test_expectations.txt" + + def __init__(self, tests, directory, platform, is_debug_mode, is_lint_mode, + tests_are_present=True): + """Reads the test expectations files from the given directory.""" + path = os.path.join(directory, self.TEST_LIST) + self._expected_failures = TestExpectationsFile(path, tests, platform, + is_debug_mode, is_lint_mode, tests_are_present=tests_are_present) + + # TODO(ojan): Allow for removing skipped tests when getting the list of + # tests to run, but not when getting metrics. + # TODO(ojan): Replace the Get* calls here with the more sane API exposed + # by TestExpectationsFile below. Maybe merge the two classes entirely? + + def get_expectations_json_for_all_platforms(self): + return ( + self._expected_failures.get_expectations_json_for_all_platforms()) + + def get_rebaselining_failures(self): + return (self._expected_failures.get_test_set(REBASELINE, FAIL) | + self._expected_failures.get_test_set(REBASELINE, IMAGE) | + self._expected_failures.get_test_set(REBASELINE, TEXT) | + self._expected_failures.get_test_set(REBASELINE, + IMAGE_PLUS_TEXT)) + + def get_options(self, test): + return self._expected_failures.get_options(test) + + def get_expectations(self, test): + return self._expected_failures.get_expectations(test) + + def get_expectations_string(self, test): + """Returns the expectatons for the given test as an uppercase string. + If there are no expectations for the test, then "PASS" is returned.""" + expectations = self.get_expectations(test) + retval = [] + + for expectation in expectations: + for item in TestExpectationsFile.EXPECTATIONS.items(): + if item[1] == expectation: + retval.append(item[0]) + break + + return " ".join(retval).upper() + + def get_timeline_for_test(self, test): + return self._expected_failures.get_timeline_for_test(test) + + def get_tests_with_result_type(self, result_type): + return self._expected_failures.get_tests_with_result_type(result_type) + + def get_tests_with_timeline(self, timeline): + return self._expected_failures.get_tests_with_timeline(timeline) + + def matches_an_expected_result(self, test, result): + """Returns whether we got one of the expected results for this test.""" + return (result in self._expected_failures.get_expectations(test) or + (result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and + FAIL in self._expected_failures.get_expectations(test)) or + result == MISSING and self.is_rebaselining(test) or + result == SKIP and self._expected_failures.has_modifier(test, + SKIP)) + + def is_rebaselining(self, test): + return self._expected_failures.has_modifier(test, REBASELINE) + + def has_modifier(self, test, modifier): + return self._expected_failures.has_modifier(test, modifier) + + def remove_platform_from_file(self, tests, platform, backup=False): + return self._expected_failures.remove_platform_from_file(tests, + platform, + backup) + + +def strip_comments(line): + """Strips comments from a line and return None if the line is empty + or else the contents of line with leading and trailing spaces removed + and all other whitespace collapsed""" + + commentIndex = line.find('//') + if commentIndex is -1: + commentIndex = len(line) + + line = re.sub(r'\s+', ' ', line[:commentIndex].strip()) + if line == '': + return None + else: + return line + + +class ModifiersAndExpectations: + """A holder for modifiers and expectations on a test that serializes to + JSON.""" + + def __init__(self, modifiers, expectations): + self.modifiers = modifiers + self.expectations = expectations + + +class ExpectationsJsonEncoder(simplejson.JSONEncoder): + """JSON encoder that can handle ModifiersAndExpectations objects. + """ + + def default(self, obj): + if isinstance(obj, ModifiersAndExpectations): + return {"modifiers": obj.modifiers, + "expectations": obj.expectations} + else: + return JSONEncoder.default(self, obj) + + +class TestExpectationsFile: + """Test expectation files consist of lines with specifications of what + to expect from layout test cases. The test cases can be directories + in which case the expectations apply to all test cases in that + directory and any subdirectory. The format of the file is along the + lines of: + + LayoutTests/fast/js/fixme.js = FAIL + LayoutTests/fast/js/flaky.js = FAIL PASS + LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS + ... + + To add other options: + SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + DEFER LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + + SKIP: Doesn't run the test. + SLOW: The test takes a long time to run, but does not timeout indefinitely. + WONTFIX: For tests that we never intend to pass on a given platform. + DEFER: Test does not count in our statistics for the current release. + DEBUG: Expectations apply only to the debug build. + RELEASE: Expectations apply only to release build. + LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these + platforms. + + Notes: + -A test cannot be both SLOW and TIMEOUT + -A test cannot be both DEFER and WONTFIX + -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is + a migratory state that currently means either IMAGE, TEXT, or + IMAGE+TEXT. Once we have finished migrating the expectations, we will + change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT + identifier. + -A test can be included twice, but not via the same path. + -If a test is included twice, then the more precise path wins. + -CRASH tests cannot be DEFER or WONTFIX + """ + + EXPECTATIONS = {'pass': PASS, + 'fail': FAIL, + 'text': TEXT, + 'image': IMAGE, + 'image+text': IMAGE_PLUS_TEXT, + 'timeout': TIMEOUT, + 'crash': CRASH, + 'missing': MISSING} + + EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'), + PASS: ('pass', 'passes'), + FAIL: ('failure', 'failures'), + TEXT: ('text diff mismatch', + 'text diff mismatch'), + IMAGE: ('image mismatch', 'image mismatch'), + IMAGE_PLUS_TEXT: ('image and text mismatch', + 'image and text mismatch'), + CRASH: ('test shell crash', + 'test shell crashes'), + TIMEOUT: ('test timed out', 'tests timed out'), + MISSING: ('no expected result found', + 'no expected results found')} + + EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT, + TEXT, IMAGE, FAIL, SKIP) + + BASE_PLATFORMS = ('linux', 'mac', 'win') + PLATFORMS = BASE_PLATFORMS + ('win-xp', 'win-vista', 'win-7') + + BUILD_TYPES = ('debug', 'release') + + MODIFIERS = {'skip': SKIP, + 'wontfix': WONTFIX, + 'defer': DEFER, + 'slow': SLOW, + 'rebaseline': REBASELINE, + 'none': NONE} + + TIMELINES = {'wontfix': WONTFIX, + 'now': NOW, + 'defer': DEFER} + + RESULT_TYPES = {'skip': SKIP, + 'pass': PASS, + 'fail': FAIL, + 'flaky': FLAKY} + + def __init__(self, path, full_test_list, platform, is_debug_mode, + is_lint_mode, expectations_as_str=None, suppress_errors=False, + tests_are_present=True): + """ + path: The path to the expectation file. An error is thrown if a test is + listed more than once. + full_test_list: The list of all tests to be run pending processing of + the expections for those tests. + platform: Which platform from self.PLATFORMS to filter tests for. + is_debug_mode: Whether we testing a test_shell built debug mode. + is_lint_mode: Whether this is just linting test_expecatations.txt. + expectations_as_str: Contents of the expectations file. Used instead of + the path. This makes unittesting sane. + suppress_errors: Whether to suppress lint errors. + tests_are_present: Whether the test files are present in the local + filesystem. The LTTF Dashboard uses False here to avoid having to + keep a local copy of the tree. + """ + + self._path = path + self._expectations_as_str = expectations_as_str + self._is_lint_mode = is_lint_mode + self._tests_are_present = tests_are_present + self._full_test_list = full_test_list + self._suppress_errors = suppress_errors + self._errors = [] + self._non_fatal_errors = [] + self._platform = self.to_test_platform_name(platform) + if self._platform is None: + raise Exception("Unknown platform '%s'" % (platform)) + self._is_debug_mode = is_debug_mode + + # Maps relative test paths as listed in the expectations file to a + # list of maps containing modifiers and expectations for each time + # the test is listed in the expectations file. + self._all_expectations = {} + + # Maps a test to its list of expectations. + self._test_to_expectations = {} + + # Maps a test to its list of options (string values) + self._test_to_options = {} + + # Maps a test to its list of modifiers: the constants associated with + # the options minus any bug or platform strings + self._test_to_modifiers = {} + + # Maps a test to the base path that it was listed with in the list. + self._test_list_paths = {} + + self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS) + self._expectation_to_tests = self._dict_of_sets(self.EXPECTATIONS) + self._timeline_to_tests = self._dict_of_sets(self.TIMELINES) + self._result_type_to_tests = self._dict_of_sets(self.RESULT_TYPES) + + self._read(self._get_iterable_expectations()) + + def _dict_of_sets(self, strings_to_constants): + """Takes a dict of strings->constants and returns a dict mapping + each constant to an empty set.""" + d = {} + for c in strings_to_constants.values(): + d[c] = set() + return d + + def _get_iterable_expectations(self): + """Returns an object that can be iterated over. Allows for not caring + about whether we're iterating over a file or a new-line separated + string.""" + if self._expectations_as_str: + iterable = [x + "\n" for x in + self._expectations_as_str.split("\n")] + # Strip final entry if it's empty to avoid added in an extra + # newline. + if iterable[len(iterable) - 1] == "\n": + return iterable[:len(iterable) - 1] + return iterable + else: + return open(self._path) + + def to_test_platform_name(self, name): + """Returns the test expectation platform that will be used for a + given platform name, or None if there is no match.""" + chromium_prefix = 'chromium-' + name = name.lower() + if name.startswith(chromium_prefix): + name = name[len(chromium_prefix):] + if name in self.PLATFORMS: + return name + return None + + def get_test_set(self, modifier, expectation=None, include_skips=True): + if expectation is None: + tests = self._modifier_to_tests[modifier] + else: + tests = (self._expectation_to_tests[expectation] & + self._modifier_to_tests[modifier]) + + if not include_skips: + tests = tests - self.get_test_set(SKIP, expectation) + + return tests + + def get_tests_with_result_type(self, result_type): + return self._result_type_to_tests[result_type] + + def get_tests_with_timeline(self, timeline): + return self._timeline_to_tests[timeline] + + def get_options(self, test): + """This returns the entire set of options for the given test + (the modifiers plus the BUGXXXX identifier). This is used by the + LTTF dashboard.""" + return self._test_to_options[test] + + def has_modifier(self, test, modifier): + return test in self._modifier_to_tests[modifier] + + def get_expectations(self, test): + return self._test_to_expectations[test] + + def get_expectations_json_for_all_platforms(self): + # Specify separators in order to get compact encoding. + return ExpectationsJsonEncoder(separators=(',', ':')).encode( + self._all_expectations) + + def contains(self, test): + return test in self._test_to_expectations + + def remove_platform_from_file(self, tests, platform, backup=False): + """Remove the platform option from test expectations file. + + If a test is in the test list and has an option that matches the given + platform, remove the matching platform and save the updated test back + to the file. If no other platforms remaining after removal, delete the + test from the file. + + Args: + tests: list of tests that need to update.. + platform: which platform option to remove. + backup: if true, the original test expectations file is saved as + [self.TEST_LIST].orig.YYYYMMDDHHMMSS + + Returns: + no + """ + + new_file = self._path + '.new' + logging.debug('Original file: "%s"', self._path) + logging.debug('New file: "%s"', new_file) + f_orig = self._get_iterable_expectations() + f_new = open(new_file, 'w') + + tests_removed = 0 + tests_updated = 0 + lineno = 0 + for line in f_orig: + lineno += 1 + action = self._get_platform_update_action(line, lineno, tests, + platform) + if action == NO_CHANGE: + # Save the original line back to the file + logging.debug('No change to test: %s', line) + f_new.write(line) + elif action == REMOVE_TEST: + tests_removed += 1 + logging.info('Test removed: %s', line) + elif action == REMOVE_PLATFORM: + parts = line.split(':') + new_options = parts[0].replace(platform.upper() + ' ', '', 1) + new_line = ('%s:%s' % (new_options, parts[1])) + f_new.write(new_line) + tests_updated += 1 + logging.info('Test updated: ') + logging.info(' old: %s', line) + logging.info(' new: %s', new_line) + elif action == ADD_PLATFORMS_EXCEPT_THIS: + parts = line.split(':') + new_options = parts[0] + for p in self.PLATFORMS: + p = p.upper(); + # This is a temp solution for rebaselining tool. + # Do not add tags WIN-7 and WIN-VISTA to test expectations + # if the original line does not specify the platform option. + # TODO(victorw): Remove WIN-VISTA and WIN-7 once we have + # reliable Win 7 and Win Vista buildbots setup. + if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'): + new_options += p + ' ' + new_line = ('%s:%s' % (new_options, parts[1])) + f_new.write(new_line) + tests_updated += 1 + logging.info('Test updated: ') + logging.info(' old: %s', line) + logging.info(' new: %s', new_line) + else: + logging.error('Unknown update action: %d; line: %s', + action, line) + + logging.info('Total tests removed: %d', tests_removed) + logging.info('Total tests updated: %d', tests_updated) + + f_orig.close() + f_new.close() + + if backup: + date_suffix = time.strftime('%Y%m%d%H%M%S', + time.localtime(time.time())) + backup_file = ('%s.orig.%s' % (self._path, date_suffix)) + if os.path.exists(backup_file): + os.remove(backup_file) + logging.info('Saving original file to "%s"', backup_file) + os.rename(self._path, backup_file) + else: + os.remove(self._path) + + logging.debug('Saving new file to "%s"', self._path) + os.rename(new_file, self._path) + return True + + def parse_expectations_line(self, line, lineno): + """Parses a line from test_expectations.txt and returns a tuple + with the test path, options as a list, expectations as a list.""" + line = strip_comments(line) + if not line: + return (None, None, None) + + options = [] + if line.find(":") is -1: + test_and_expectation = line.split("=") + else: + parts = line.split(":") + options = self._get_options_list(parts[0]) + test_and_expectation = parts[1].split('=') + + test = test_and_expectation[0].strip() + if (len(test_and_expectation) is not 2): + self._add_error(lineno, "Missing expectations.", + test_and_expectation) + expectations = None + else: + expectations = self._get_options_list(test_and_expectation[1]) + + return (test, options, expectations) + + def _get_platform_update_action(self, line, lineno, tests, platform): + """Check the platform option and return the action needs to be taken. + + Args: + line: current line in test expectations file. + lineno: current line number of line + tests: list of tests that need to update.. + platform: which platform option to remove. + + Returns: + NO_CHANGE: no change to the line (comments, test not in the list etc) + REMOVE_TEST: remove the test from file. + REMOVE_PLATFORM: remove this platform option from the test. + ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one. + """ + test, options, expectations = self.parse_expectations_line(line, + lineno) + if not test or test not in tests: + return NO_CHANGE + + has_any_platform = False + for option in options: + if option in self.PLATFORMS: + has_any_platform = True + if not option == platform: + return REMOVE_PLATFORM + + # If there is no platform specified, then it means apply to all + # platforms. Return the action to add all the platforms except this + # one. + if not has_any_platform: + return ADD_PLATFORMS_EXCEPT_THIS + + return REMOVE_TEST + + def _has_valid_modifiers_for_current_platform(self, options, lineno, + test_and_expectations, modifiers): + """Returns true if the current platform is in the options list or if + no platforms are listed and if there are no fatal errors in the + options list. + + Args: + options: List of lowercase options. + lineno: The line in the file where the test is listed. + test_and_expectations: The path and expectations for the test. + modifiers: The set to populate with modifiers. + """ + has_any_platform = False + has_bug_id = False + for option in options: + if option in self.MODIFIERS: + modifiers.add(option) + elif option in self.PLATFORMS: + has_any_platform = True + elif option.startswith('bug'): + has_bug_id = True + elif option not in self.BUILD_TYPES: + self._add_error(lineno, 'Invalid modifier for test: %s' % + option, test_and_expectations) + + if has_any_platform and not self._match_platform(options): + return False + + if not has_bug_id and 'wontfix' not in options: + # TODO(ojan): Turn this into an AddError call once all the + # tests have BUG identifiers. + self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.', + test_and_expectations) + + if 'release' in options or 'debug' in options: + if self._is_debug_mode and 'debug' not in options: + return False + if not self._is_debug_mode and 'release' not in options: + return False + + if 'wontfix' in options and 'defer' in options: + self._add_error(lineno, 'Test cannot be both DEFER and WONTFIX.', + test_and_expectations) + + if self._is_lint_mode and 'rebaseline' in options: + self._add_error(lineno, + 'REBASELINE should only be used for running rebaseline.py. ' + 'Cannot be checked in.', test_and_expectations) + + return True + + def _match_platform(self, options): + """Match the list of options against our specified platform. If any + of the options prefix-match self._platform, return True. This handles + the case where a test is marked WIN and the platform is WIN-VISTA. + + Args: + options: list of options + """ + for opt in options: + if self._platform.startswith(opt): + return True + return False + + def _add_to_all_expectations(self, test, options, expectations): + # Make all paths unix-style so the dashboard doesn't need to. + test = test.replace('\\', '/') + if not test in self._all_expectations: + self._all_expectations[test] = [] + self._all_expectations[test].append( + ModifiersAndExpectations(options, expectations)) + + def _read(self, expectations): + """For each test in an expectations iterable, generate the + expectations for it.""" + lineno = 0 + for line in expectations: + lineno += 1 + + test_list_path, options, expectations = \ + self.parse_expectations_line(line, lineno) + if not expectations: + continue + + self._add_to_all_expectations(test_list_path, + " ".join(options).upper(), + " ".join(expectations).upper()) + + modifiers = set() + if options and not self._has_valid_modifiers_for_current_platform( + options, lineno, test_list_path, modifiers): + continue + + expectations = self._parse_expectations(expectations, lineno, + test_list_path) + + if 'slow' in options and TIMEOUT in expectations: + self._add_error(lineno, + 'A test can not be both slow and timeout. If it times out ' + 'indefinitely, then it should be just timeout.', + test_list_path) + + full_path = os.path.join(path_utils.layout_tests_dir(), + test_list_path) + full_path = os.path.normpath(full_path) + # WebKit's way of skipping tests is to add a -disabled suffix. + # So we should consider the path existing if the path or the + # -disabled version exists. + if (self._tests_are_present and not os.path.exists(full_path) + and not os.path.exists(full_path + '-disabled')): + # Log a non fatal error here since you hit this case any + # time you update test_expectations.txt without syncing + # the LayoutTests directory + self._log_non_fatal_error(lineno, 'Path does not exist.', + test_list_path) + continue + + if not self._full_test_list: + tests = [test_list_path] + else: + tests = self._expand_tests(test_list_path) + + self._add_tests(tests, expectations, test_list_path, lineno, + modifiers, options) + + if not self._suppress_errors and ( + len(self._errors) or len(self._non_fatal_errors)): + if self._is_debug_mode: + build_type = 'DEBUG' + else: + build_type = 'RELEASE' + print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \ + % (self._platform.upper(), build_type) + + for error in self._non_fatal_errors: + logging.error(error) + if len(self._errors): + raise SyntaxError('\n'.join(map(str, self._errors))) + + # Now add in the tests that weren't present in the expectations file + expectations = set([PASS]) + options = [] + modifiers = [] + if self._full_test_list: + for test in self._full_test_list: + if not test in self._test_list_paths: + self._add_test(test, modifiers, expectations, options) + + def _get_options_list(self, listString): + return [part.strip().lower() for part in listString.strip().split(' ')] + + def _parse_expectations(self, expectations, lineno, test_list_path): + result = set() + for part in expectations: + if not part in self.EXPECTATIONS: + self._add_error(lineno, 'Unsupported expectation: %s' % part, + test_list_path) + continue + expectation = self.EXPECTATIONS[part] + result.add(expectation) + return result + + def _expand_tests(self, test_list_path): + """Convert the test specification to an absolute, normalized + path and make sure directories end with the OS path separator.""" + path = os.path.join(path_utils.layout_tests_dir(), test_list_path) + path = os.path.normpath(path) + path = self._fix_dir(path) + + result = [] + for test in self._full_test_list: + if test.startswith(path): + result.append(test) + return result + + def _fix_dir(self, path): + """Check to see if the path points to a directory, and if so, append + the directory separator if necessary.""" + if self._tests_are_present: + if os.path.isdir(path): + path = os.path.join(path, '') + else: + # If we can't check the filesystem to see if this is a directory, + # we assume that files w/o an extension are directories. + # TODO(dpranke): What happens w/ LayoutTests/css2.1 ? + if os.path.splitext(path)[1] == '': + path = os.path.join(path, '') + return path + + def _add_tests(self, tests, expectations, test_list_path, lineno, + modifiers, options): + for test in tests: + if self._already_seen_test(test, test_list_path, lineno): + continue + + self._clear_expectations_for_test(test, test_list_path) + self._add_test(test, modifiers, expectations, options) + + def _add_test(self, test, modifiers, expectations, options): + """Sets the expected state for a given test. + + This routine assumes the test has not been added before. If it has, + use _ClearExpectationsForTest() to reset the state prior to + calling this. + + Args: + test: test to add + modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.) + expectations: sequence of expectations (PASS, IMAGE, etc.) + options: sequence of keywords and bug identifiers.""" + self._test_to_expectations[test] = expectations + for expectation in expectations: + self._expectation_to_tests[expectation].add(test) + + self._test_to_options[test] = options + self._test_to_modifiers[test] = set() + for modifier in modifiers: + mod_value = self.MODIFIERS[modifier] + self._modifier_to_tests[mod_value].add(test) + self._test_to_modifiers[test].add(mod_value) + + if 'wontfix' in modifiers: + self._timeline_to_tests[WONTFIX].add(test) + elif 'defer' in modifiers: + self._timeline_to_tests[DEFER].add(test) + else: + self._timeline_to_tests[NOW].add(test) + + if 'skip' in modifiers: + self._result_type_to_tests[SKIP].add(test) + elif expectations == set([PASS]): + self._result_type_to_tests[PASS].add(test) + elif len(expectations) > 1: + self._result_type_to_tests[FLAKY].add(test) + else: + self._result_type_to_tests[FAIL].add(test) + + def _clear_expectations_for_test(self, test, test_list_path): + """Remove prexisting expectations for this test. + This happens if we are seeing a more precise path + than a previous listing. + """ + if test in self._test_list_paths: + self._test_to_expectations.pop(test, '') + self._remove_from_sets(test, self._expectation_to_tests) + self._remove_from_sets(test, self._modifier_to_tests) + self._remove_from_sets(test, self._timeline_to_tests) + self._remove_from_sets(test, self._result_type_to_tests) + + self._test_list_paths[test] = os.path.normpath(test_list_path) + + def _remove_from_sets(self, test, dict): + """Removes the given test from the sets in the dictionary. + + Args: + test: test to look for + dict: dict of sets of files""" + for set_of_tests in dict.itervalues(): + if test in set_of_tests: + set_of_tests.remove(test) + + def _already_seen_test(self, test, test_list_path, lineno): + """Returns true if we've already seen a more precise path for this test + than the test_list_path. + """ + if not test in self._test_list_paths: + return False + + prev_base_path = self._test_list_paths[test] + if (prev_base_path == os.path.normpath(test_list_path)): + self._add_error(lineno, 'Duplicate expectations.', test) + return True + + # Check if we've already seen a more precise path. + return prev_base_path.startswith(os.path.normpath(test_list_path)) + + def _add_error(self, lineno, msg, path): + """Reports an error that will prevent running the tests. Does not + immediately raise an exception because we'd like to aggregate all the + errors so they can all be printed out.""" + self._errors.append('\nLine:%s %s %s' % (lineno, msg, path)) + + def _log_non_fatal_error(self, lineno, msg, path): + """Reports an error that will not prevent running the tests. These are + still errors, but not bad enough to warrant breaking test running.""" + self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path)) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py new file mode 100644 index 0000000..6957dea --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Classes for failures that occur during tests.""" + +import os +import test_expectations + + +def determine_result_type(failure_list): + """Takes a set of test_failures and returns which result type best fits + the list of failures. "Best fits" means we use the worst type of failure. + + Returns: + one of the test_expectations result types - PASS, TEXT, CRASH, etc.""" + + if not failure_list or len(failure_list) == 0: + return test_expectations.PASS + + failure_types = [type(f) for f in failure_list] + if FailureCrash in failure_types: + return test_expectations.CRASH + elif FailureTimeout in failure_types: + return test_expectations.TIMEOUT + elif (FailureMissingResult in failure_types or + FailureMissingImage in failure_types or + FailureMissingImageHash in failure_types): + return test_expectations.MISSING + else: + is_text_failure = FailureTextMismatch in failure_types + is_image_failure = (FailureImageHashIncorrect in failure_types or + FailureImageHashMismatch in failure_types) + if is_text_failure and is_image_failure: + return test_expectations.IMAGE_PLUS_TEXT + elif is_text_failure: + return test_expectations.TEXT + elif is_image_failure: + return test_expectations.IMAGE + else: + raise ValueError("unclassifiable set of failures: " + + str(failure_types)) + + +class TestFailure(object): + """Abstract base class that defines the failure interface.""" + + @staticmethod + def message(): + """Returns a string describing the failure in more detail.""" + raise NotImplemented + + def result_html_output(self, filename): + """Returns an HTML string to be included on the results.html page.""" + raise NotImplemented + + def should_kill_test_shell(self): + """Returns True if we should kill the test shell before the next + test.""" + return False + + def relative_output_filename(self, filename, modifier): + """Returns a relative filename inside the output dir that contains + modifier. + + For example, if filename is fast\dom\foo.html and modifier is + "-expected.txt", the return value is fast\dom\foo-expected.txt + + Args: + filename: relative filename to test file + modifier: a string to replace the extension of filename with + + Return: + The relative windows path to the output filename + """ + return os.path.splitext(filename)[0] + modifier + + +class FailureWithType(TestFailure): + """Base class that produces standard HTML output based on the test type. + + Subclasses may commonly choose to override the ResultHtmlOutput, but still + use the standard OutputLinks. + """ + + def __init__(self, test_type): + TestFailure.__init__(self) + # TODO(ojan): This class no longer needs to know the test_type. + self._test_type = test_type + + # Filename suffixes used by ResultHtmlOutput. + OUT_FILENAMES = [] + + def output_links(self, filename, out_names): + """Returns a string holding all applicable output file links. + + Args: + filename: the test filename, used to construct the result file names + out_names: list of filename suffixes for the files. If three or more + suffixes are in the list, they should be [actual, expected, diff, + wdiff]. Two suffixes should be [actual, expected], and a + single item is the [actual] filename suffix. + If out_names is empty, returns the empty string. + """ + links = [''] + uris = [self.relative_output_filename(filename, fn) for + fn in out_names] + if len(uris) > 1: + links.append("<a href='%s'>expected</a>" % uris[1]) + if len(uris) > 0: + links.append("<a href='%s'>actual</a>" % uris[0]) + if len(uris) > 2: + links.append("<a href='%s'>diff</a>" % uris[2]) + if len(uris) > 3: + links.append("<a href='%s'>wdiff</a>" % uris[3]) + return ' '.join(links) + + def result_html_output(self, filename): + return self.message() + self.output_links(filename, self.OUT_FILENAMES) + + +class FailureTimeout(TestFailure): + """Test timed out. We also want to restart the test shell if this + happens.""" + + @staticmethod + def message(): + return "Test timed out" + + def result_html_output(self, filename): + return "<strong>%s</strong>" % self.message() + + def should_kill_test_shell(self): + return True + + +class FailureCrash(TestFailure): + """Test shell crashed.""" + + @staticmethod + def message(): + return "Test shell crashed" + + def result_html_output(self, filename): + # TODO(tc): create a link to the minidump file + stack = self.relative_output_filename(filename, "-stack.txt") + return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(), + stack) + + def should_kill_test_shell(self): + return True + + +class FailureMissingResult(FailureWithType): + """Expected result was missing.""" + OUT_FILENAMES = ["-actual.txt"] + + @staticmethod + def message(): + return "No expected results found" + + def result_html_output(self, filename): + return ("<strong>%s</strong>" % self.message() + + self.output_links(filename, self.OUT_FILENAMES)) + + +class FailureTextMismatch(FailureWithType): + """Text diff output failed.""" + # Filename suffixes used by ResultHtmlOutput. + OUT_FILENAMES = ["-actual.txt", "-expected.txt", "-diff.txt"] + OUT_FILENAMES_WDIFF = ["-actual.txt", "-expected.txt", "-diff.txt", + "-wdiff.html"] + + def __init__(self, test_type, has_wdiff): + FailureWithType.__init__(self, test_type) + if has_wdiff: + self.OUT_FILENAMES = self.OUT_FILENAMES_WDIFF + + @staticmethod + def message(): + return "Text diff mismatch" + + +class FailureMissingImageHash(FailureWithType): + """Actual result hash was missing.""" + # Chrome doesn't know to display a .checksum file as text, so don't bother + # putting in a link to the actual result. + OUT_FILENAMES = [] + + @staticmethod + def message(): + return "No expected image hash found" + + def result_html_output(self, filename): + return "<strong>%s</strong>" % self.message() + + +class FailureMissingImage(FailureWithType): + """Actual result image was missing.""" + OUT_FILENAMES = ["-actual.png"] + + @staticmethod + def message(): + return "No expected image found" + + def result_html_output(self, filename): + return ("<strong>%s</strong>" % self.message() + + self.output_links(filename, self.OUT_FILENAMES)) + + +class FailureImageHashMismatch(FailureWithType): + """Image hashes didn't match.""" + OUT_FILENAMES = ["-actual.png", "-expected.png", "-diff.png"] + + @staticmethod + def message(): + # We call this a simple image mismatch to avoid confusion, since + # we link to the PNGs rather than the checksums. + return "Image mismatch" + + +class FailureFuzzyFailure(FailureWithType): + """Image hashes didn't match.""" + OUT_FILENAMES = ["-actual.png", "-expected.png"] + + @staticmethod + def message(): + return "Fuzzy image match also failed" + + +class FailureImageHashIncorrect(FailureWithType): + """Actual result hash is incorrect.""" + # Chrome doesn't know to display a .checksum file as text, so don't bother + # putting in a link to the actual result. + OUT_FILENAMES = [] + + @staticmethod + def message(): + return "Images match, expected image hash incorrect. " + + def result_html_output(self, filename): + return "<strong>%s</strong>" % self.message() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py new file mode 100644 index 0000000..91fe136 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""This module is used to find all of the layout test files used by Chromium +(across all platforms). It exposes one public function - GatherTestFiles() - +which takes an optional list of paths. If a list is passed in, the returned +list of test files is constrained to those found under the paths passed in, +i.e. calling GatherTestFiles(["LayoutTests/fast"]) will only return files +under that directory.""" + +import glob +import os +import path_utils + +# When collecting test cases, we include any file with these extensions. +_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl', + '.php', '.svg']) +# When collecting test cases, skip these directories +_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests']) + + +def gather_test_files(paths): + """Generate a set of test files and return them. + + Args: + paths: a list of command line paths relative to the webkit/tests + directory. glob patterns are ok. + """ + paths_to_walk = set() + # if paths is empty, provide a pre-defined list. + if paths: + for path in paths: + # If there's an * in the name, assume it's a glob pattern. + path = os.path.join(path_utils.layout_tests_dir(), path) + if path.find('*') > -1: + filenames = glob.glob(path) + paths_to_walk.update(filenames) + else: + paths_to_walk.add(path) + else: + paths_to_walk.add(path_utils.layout_tests_dir()) + + # Now walk all the paths passed in on the command line and get filenames + test_files = set() + for path in paths_to_walk: + if os.path.isfile(path) and _has_supported_extension(path): + test_files.add(os.path.normpath(path)) + continue + + for root, dirs, files in os.walk(path): + # don't walk skipped directories and sub directories + if os.path.basename(root) in _skipped_directories: + del dirs[:] + continue + + for filename in files: + if _has_supported_extension(filename): + filename = os.path.join(root, filename) + filename = os.path.normpath(filename) + test_files.add(filename) + + return test_files + + +def _has_supported_extension(filename): + """Return true if filename is one of the file extensions we want to run a + test on.""" + extension = os.path.splitext(filename)[1] + return extension in _supported_file_extensions diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py new file mode 100644 index 0000000..10d0509 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py @@ -0,0 +1,511 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A Thread object for running the test shell and processing URLs from a +shared queue. + +Each thread runs a separate instance of the test_shell binary and validates +the output. When there are no more URLs to process in the shared queue, the +thread exits. +""" + +import copy +import logging +import os +import Queue +import signal +import subprocess +import sys +import thread +import threading +import time + +import path_utils +import test_failures + + +def process_output(proc, test_info, test_types, test_args, target, output_dir): + """Receives the output from a test_shell process, subjects it to a number + of tests, and returns a list of failure types the test produced. + + Args: + proc: an active test_shell process + test_info: Object containing the test filename, uri and timeout + test_types: list of test types to subject the output to + test_args: arguments to be passed to each test + target: Debug or Release + output_dir: directory to put crash stack traces into + + Returns: a list of failure objects and times for the test being processed + """ + outlines = [] + extra_lines = [] + failures = [] + crash = False + + # Some test args, such as the image hash, may be added or changed on a + # test-by-test basis. + local_test_args = copy.copy(test_args) + + start_time = time.time() + + line = proc.stdout.readline() + + # Only start saving output lines once we've loaded the URL for the test. + url = None + test_string = test_info.uri.strip() + + while line.rstrip() != "#EOF": + # Make sure we haven't crashed. + if line == '' and proc.poll() is not None: + failures.append(test_failures.FailureCrash()) + + # This is hex code 0xc000001d, which is used for abrupt + # termination. This happens if we hit ctrl+c from the prompt and + # we happen to be waiting on the test_shell. + # sdoyon: Not sure for which OS and in what circumstances the + # above code is valid. What works for me under Linux to detect + # ctrl+c is for the subprocess returncode to be negative SIGINT. + # And that agrees with the subprocess documentation. + if (-1073741510 == proc.returncode or + - signal.SIGINT == proc.returncode): + raise KeyboardInterrupt + crash = True + break + + # Don't include #URL lines in our output + if line.startswith("#URL:"): + url = line.rstrip()[5:] + if url != test_string: + logging.fatal("Test got out of sync:\n|%s|\n|%s|" % + (url, test_string)) + raise AssertionError("test out of sync") + elif line.startswith("#MD5:"): + local_test_args.hash = line.rstrip()[5:] + elif line.startswith("#TEST_TIMED_OUT"): + # Test timed out, but we still need to read until #EOF. + failures.append(test_failures.FailureTimeout()) + elif url: + outlines.append(line) + else: + extra_lines.append(line) + + line = proc.stdout.readline() + + end_test_time = time.time() + + if len(extra_lines): + extra = "".join(extra_lines) + if crash: + logging.debug("Stacktrace for %s:\n%s" % (test_string, extra)) + # Strip off "file://" since RelativeTestFilename expects + # filesystem paths. + filename = os.path.join(output_dir, + path_utils.relative_test_filename(test_string[7:])) + filename = os.path.splitext(filename)[0] + "-stack.txt" + path_utils.maybe_make_directory(os.path.split(filename)[0]) + open(filename, "wb").write(extra) + else: + logging.debug("Previous test output extra lines after dump:\n%s" % + extra) + + # Check the output and save the results. + time_for_diffs = {} + for test_type in test_types: + start_diff_time = time.time() + new_failures = test_type.compare_output(test_info.filename, + proc, ''.join(outlines), + local_test_args, target) + # Don't add any more failures if we already have a crash, so we don't + # double-report those tests. We do double-report for timeouts since + # we still want to see the text and image output. + if not crash: + failures.extend(new_failures) + time_for_diffs[test_type.__class__.__name__] = ( + time.time() - start_diff_time) + + total_time_for_all_diffs = time.time() - end_test_time + test_run_time = end_test_time - start_time + return TestStats(test_info.filename, failures, test_run_time, + total_time_for_all_diffs, time_for_diffs) + + +def start_test_shell(command, args): + """Returns the process for a new test_shell started in layout-tests mode. + """ + cmd = [] + # Hook for injecting valgrind or other runtime instrumentation, + # used by e.g. tools/valgrind/valgrind_tests.py. + wrapper = os.environ.get("BROWSER_WRAPPER", None) + if wrapper != None: + cmd += [wrapper] + cmd += command + ['--layout-tests'] + args + return subprocess.Popen(cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + +class TestStats: + + def __init__(self, filename, failures, test_run_time, + total_time_for_all_diffs, time_for_diffs): + self.filename = filename + self.failures = failures + self.test_run_time = test_run_time + self.total_time_for_all_diffs = total_time_for_all_diffs + self.time_for_diffs = time_for_diffs + + +class SingleTestThread(threading.Thread): + """Thread wrapper for running a single test file.""" + + def __init__(self, test_shell_command, shell_args, test_info, test_types, + test_args, target, output_dir): + """ + Args: + test_info: Object containing the test filename, uri and timeout + output_dir: Directory to put crash stacks into. + See TestShellThread for documentation of the remaining arguments. + """ + + threading.Thread.__init__(self) + self._command = test_shell_command + self._shell_args = shell_args + self._test_info = test_info + self._test_types = test_types + self._test_args = test_args + self._target = target + self._output_dir = output_dir + + def run(self): + proc = start_test_shell(self._command, self._shell_args + + ["--time-out-ms=" + self._test_info.timeout, self._test_info.uri]) + self._test_stats = process_output(proc, self._test_info, + self._test_types, self._test_args, self._target, self._output_dir) + + def get_test_stats(self): + return self._test_stats + + +class TestShellThread(threading.Thread): + + def __init__(self, filename_list_queue, result_queue, test_shell_command, + test_types, test_args, shell_args, options): + """Initialize all the local state for this test shell thread. + + Args: + filename_list_queue: A thread safe Queue class that contains lists + of tuples of (filename, uri) pairs. + result_queue: A thread safe Queue class that will contain tuples of + (test, failure lists) for the test results. + test_shell_command: A list specifying the command+args for + test_shell + test_types: A list of TestType objects to run the test output + against. + test_args: A TestArguments object to pass to each TestType. + shell_args: Any extra arguments to be passed to test_shell.exe. + options: A property dictionary as produced by optparse. The + command-line options should match those expected by + run_webkit_tests; they are typically passed via the + run_webkit_tests.TestRunner class.""" + threading.Thread.__init__(self) + self._filename_list_queue = filename_list_queue + self._result_queue = result_queue + self._filename_list = [] + self._test_shell_command = test_shell_command + self._test_types = test_types + self._test_args = test_args + self._test_shell_proc = None + self._shell_args = shell_args + self._options = options + self._canceled = False + self._exception_info = None + self._directory_timing_stats = {} + self._test_stats = [] + self._num_tests = 0 + self._start_time = 0 + self._stop_time = 0 + + # Current directory of tests we're running. + self._current_dir = None + # Number of tests in self._current_dir. + self._num_tests_in_current_dir = None + # Time at which we started running tests from self._current_dir. + self._current_dir_start_time = None + + def get_directory_timing_stats(self): + """Returns a dictionary mapping test directory to a tuple of + (number of tests in that directory, time to run the tests)""" + return self._directory_timing_stats + + def get_individual_test_stats(self): + """Returns a list of (test_filename, time_to_run_test, + total_time_for_all_diffs, time_for_diffs) tuples.""" + return self._test_stats + + def cancel(self): + """Set a flag telling this thread to quit.""" + self._canceled = True + + def get_exception_info(self): + """If run() terminated on an uncaught exception, return it here + ((type, value, traceback) tuple). + Returns None if run() terminated normally. Meant to be called after + joining this thread.""" + return self._exception_info + + def get_total_time(self): + return max(self._stop_time - self._start_time, 0.0) + + def get_num_tests(self): + return self._num_tests + + def run(self): + """Delegate main work to a helper method and watch for uncaught + exceptions.""" + self._start_time = time.time() + self._num_tests = 0 + try: + logging.debug('%s starting' % (self.getName())) + self._run(test_runner=None, result_summary=None) + logging.debug('%s done (%d tests)' % (self.getName(), + self.get_num_tests())) + except: + # Save the exception for our caller to see. + self._exception_info = sys.exc_info() + self._stop_time = time.time() + # Re-raise it and die. + logging.error('%s dying: %s' % (self.getName(), + self._exception_info)) + raise + self._stop_time = time.time() + + def run_in_main_thread(self, test_runner, result_summary): + """This hook allows us to run the tests from the main thread if + --num-test-shells==1, instead of having to always run two or more + threads. This allows us to debug the test harness without having to + do multi-threaded debugging.""" + self._run(test_runner, result_summary) + + def _run(self, test_runner, result_summary): + """Main work entry point of the thread. Basically we pull urls from the + filename queue and run the tests until we run out of urls. + + If test_runner is not None, then we call test_runner.UpdateSummary() + with the results of each test.""" + batch_size = 0 + batch_count = 0 + if self._options.batch_size: + try: + batch_size = int(self._options.batch_size) + except: + logging.info("Ignoring invalid batch size '%s'" % + self._options.batch_size) + + # Append tests we're running to the existing tests_run.txt file. + # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput. + tests_run_filename = os.path.join(self._options.results_directory, + "tests_run.txt") + tests_run_file = open(tests_run_filename, "a") + + while True: + if self._canceled: + logging.info('Testing canceled') + tests_run_file.close() + return + + if len(self._filename_list) is 0: + if self._current_dir is not None: + self._directory_timing_stats[self._current_dir] = \ + (self._num_tests_in_current_dir, + time.time() - self._current_dir_start_time) + + try: + self._current_dir, self._filename_list = \ + self._filename_list_queue.get_nowait() + except Queue.Empty: + self._kill_test_shell() + tests_run_file.close() + return + + self._num_tests_in_current_dir = len(self._filename_list) + self._current_dir_start_time = time.time() + + test_info = self._filename_list.pop() + + # We have a url, run tests. + batch_count += 1 + self._num_tests += 1 + if self._options.run_singly: + failures = self._run_test_singly(test_info) + else: + failures = self._run_test(test_info) + + filename = test_info.filename + tests_run_file.write(filename + "\n") + if failures: + # Check and kill test shell if we need too. + if len([1 for f in failures if f.should_kill_test_shell()]): + self._kill_test_shell() + # Reset the batch count since the shell just bounced. + batch_count = 0 + # Print the error message(s). + error_str = '\n'.join([' ' + f.message() for f in failures]) + logging.debug("%s %s failed:\n%s" % (self.getName(), + path_utils.relative_test_filename(filename), + error_str)) + else: + logging.debug("%s %s passed" % (self.getName(), + path_utils.relative_test_filename(filename))) + self._result_queue.put((filename, failures)) + + if batch_size > 0 and batch_count > batch_size: + # Bounce the shell and reset count. + self._kill_test_shell() + batch_count = 0 + + if test_runner: + test_runner.update_summary(result_summary) + + def _run_test_singly(self, test_info): + """Run a test in a separate thread, enforcing a hard time limit. + + Since we can only detect the termination of a thread, not any internal + state or progress, we can only run per-test timeouts when running test + files singly. + + Args: + test_info: Object containing the test filename, uri and timeout + + Return: + A list of TestFailure objects describing the error. + """ + worker = SingleTestThread(self._test_shell_command, + self._shell_args, + test_info, + self._test_types, + self._test_args, + self._options.target, + self._options.results_directory) + + worker.start() + + # When we're running one test per test_shell process, we can enforce + # a hard timeout. the test_shell watchdog uses 2.5x the timeout + # We want to be larger than that. + worker.join(int(test_info.timeout) * 3.0 / 1000.0) + if worker.isAlive(): + # If join() returned with the thread still running, the + # test_shell.exe is completely hung and there's nothing + # more we can do with it. We have to kill all the + # test_shells to free it up. If we're running more than + # one test_shell thread, we'll end up killing the other + # test_shells too, introducing spurious crashes. We accept that + # tradeoff in order to avoid losing the rest of this thread's + # results. + logging.error('Test thread hung: killing all test_shells') + path_utils.kill_all_test_shells() + + try: + stats = worker.get_test_stats() + self._test_stats.append(stats) + failures = stats.failures + except AttributeError, e: + failures = [] + logging.error('Cannot get results of test: %s' % + test_info.filename) + + return failures + + def _run_test(self, test_info): + """Run a single test file using a shared test_shell process. + + Args: + test_info: Object containing the test filename, uri and timeout + + Return: + A list of TestFailure objects describing the error. + """ + self._ensure_test_shell_is_running() + # Args to test_shell is a space-separated list of + # "uri timeout pixel_hash" + # The timeout and pixel_hash are optional. The timeout is used if this + # test has a custom timeout. The pixel_hash is used to avoid doing an + # image dump if the checksums match, so it should be set to a blank + # value if we are generating a new baseline. + # (Otherwise, an image from a previous run will be copied into + # the baseline.) + image_hash = test_info.image_hash + if image_hash and self._test_args.new_baseline: + image_hash = "" + self._test_shell_proc.stdin.write(("%s %s %s\n" % + (test_info.uri, test_info.timeout, image_hash))) + + # If the test shell is dead, the above may cause an IOError as we + # try to write onto the broken pipe. If this is the first test for + # this test shell process, than the test shell did not + # successfully start. If this is not the first test, then the + # previous tests have caused some kind of delayed crash. We don't + # try to recover here. + self._test_shell_proc.stdin.flush() + + stats = process_output(self._test_shell_proc, test_info, + self._test_types, self._test_args, + self._options.target, + self._options.results_directory) + + self._test_stats.append(stats) + return stats.failures + + def _ensure_test_shell_is_running(self): + """Start the shared test shell, if it's not running. Not for use when + running tests singly, since those each start a separate test shell in + their own thread. + """ + if (not self._test_shell_proc or + self._test_shell_proc.poll() is not None): + self._test_shell_proc = start_test_shell(self._test_shell_command, + self._shell_args) + + def _kill_test_shell(self): + """Kill the test shell process if it's running.""" + if self._test_shell_proc: + self._test_shell_proc.stdin.close() + self._test_shell_proc.stdout.close() + if self._test_shell_proc.stderr: + self._test_shell_proc.stderr.close() + if (sys.platform not in ('win32', 'cygwin') and + not self._test_shell_proc.poll()): + # Closing stdin/stdout/stderr hangs sometimes on OS X. + null = open(os.devnull, "w") + subprocess.Popen(["kill", "-9", + str(self._test_shell_proc.pid)], stderr=null) + null.close() + self._test_shell_proc = None diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/websocket_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/websocket_server.py new file mode 100644 index 0000000..7fc47a0 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/websocket_server.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A class to help start/stop the PyWebSocket server used by layout tests.""" + + +import logging +import optparse +import os +import subprocess +import sys +import tempfile +import time +import urllib + +import path_utils +import platform_utils +import http_server + +_WS_LOG_PREFIX = 'pywebsocket.ws.log-' +_WSS_LOG_PREFIX = 'pywebsocket.wss.log-' + +_DEFAULT_WS_PORT = 8880 +_DEFAULT_WSS_PORT = 9323 + + +def url_is_alive(url): + """Checks to see if we get an http response from |url|. + We poll the url 5 times with a 1 second delay. If we don't + get a reply in that time, we give up and assume the httpd + didn't start properly. + + Args: + url: The URL to check. + Return: + True if the url is alive. + """ + wait_time = 5 + while wait_time > 0: + try: + response = urllib.urlopen(url) + # Server is up and responding. + return True + except IOError: + pass + wait_time -= 1 + # Wait a second and try again. + time.sleep(1) + + return False + + +def remove_log_files(folder, starts_with): + files = os.listdir(folder) + for file in files: + if file.startswith(starts_with): + full_path = os.path.join(folder, file) + os.remove(full_path) + + +class PyWebSocketNotStarted(Exception): + pass + + +class PyWebSocketNotFound(Exception): + pass + + +class PyWebSocket(http_server.Lighttpd): + + def __init__(self, output_dir, port=_DEFAULT_WS_PORT, + root=None, + use_tls=False, + private_key=http_server.Lighttpd._pem_file, + certificate=http_server.Lighttpd._pem_file, + register_cygwin=None, + pidfile=None): + """Args: + output_dir: the absolute path to the layout test result directory + """ + http_server.Lighttpd.__init__(self, output_dir, + port=port, + root=root, + register_cygwin=register_cygwin) + self._output_dir = output_dir + self._process = None + self._port = port + self._root = root + self._use_tls = use_tls + self._private_key = private_key + self._certificate = certificate + if self._port: + self._port = int(self._port) + if self._use_tls: + self._server_name = 'PyWebSocket(Secure)' + else: + self._server_name = 'PyWebSocket' + self._pidfile = pidfile + self._wsout = None + + # Webkit tests + if self._root: + self._layout_tests = os.path.abspath(self._root) + self._web_socket_tests = os.path.abspath( + os.path.join(self._root, 'websocket', 'tests')) + else: + try: + self._web_socket_tests = path_utils.path_from_base( + 'third_party', 'WebKit', 'LayoutTests', 'websocket', + 'tests') + self._layout_tests = path_utils.path_from_base( + 'third_party', 'WebKit', 'LayoutTests') + except path_utils.PathNotFound: + self._web_socket_tests = None + + def start(self): + if not self._web_socket_tests: + logging.info('No need to start %s server.' % self._server_name) + return + if self.is_running(): + raise PyWebSocketNotStarted('%s is already running.' % + self._server_name) + + time_str = time.strftime('%d%b%Y-%H%M%S') + if self._use_tls: + log_prefix = _WSS_LOG_PREFIX + else: + log_prefix = _WS_LOG_PREFIX + log_file_name = log_prefix + time_str + + # Remove old log files. We only need to keep the last ones. + remove_log_files(self._output_dir, log_prefix) + + error_log = os.path.join(self._output_dir, log_file_name + "-err.txt") + + output_log = os.path.join(self._output_dir, log_file_name + "-out.txt") + self._wsout = open(output_log, "w") + + python_interp = sys.executable + pywebsocket_base = path_utils.path_from_base( + 'third_party', 'WebKit', 'WebKitTools', 'pywebsocket') + pywebsocket_script = path_utils.path_from_base( + 'third_party', 'WebKit', 'WebKitTools', 'pywebsocket', + 'mod_pywebsocket', 'standalone.py') + start_cmd = [ + python_interp, pywebsocket_script, + '-p', str(self._port), + '-d', self._layout_tests, + '-s', self._web_socket_tests, + '-l', error_log, + ] + + handler_map_file = os.path.join(self._web_socket_tests, + 'handler_map.txt') + if os.path.exists(handler_map_file): + logging.debug('Using handler_map_file: %s' % handler_map_file) + start_cmd.append('-m') + start_cmd.append(handler_map_file) + else: + logging.warning('No handler_map_file found') + + if self._use_tls: + start_cmd.extend(['-t', '-k', self._private_key, + '-c', self._certificate]) + + # Put the cygwin directory first in the path to find cygwin1.dll + env = os.environ + if sys.platform in ('cygwin', 'win32'): + env['PATH'] = '%s;%s' % ( + path_utils.path_from_base('third_party', 'cygwin', 'bin'), + env['PATH']) + + if sys.platform == 'win32' and self._register_cygwin: + setup_mount = path_utils.path_from_base('third_party', 'cygwin', + 'setup_mount.bat') + subprocess.Popen(setup_mount).wait() + + env['PYTHONPATH'] = (pywebsocket_base + os.path.pathsep + + env.get('PYTHONPATH', '')) + + logging.debug('Starting %s server on %d.' % ( + self._server_name, self._port)) + logging.debug('cmdline: %s' % ' '.join(start_cmd)) + self._process = subprocess.Popen(start_cmd, stdout=self._wsout, + stderr=subprocess.STDOUT, + env=env) + + # Wait a bit before checking the liveness of the server. + time.sleep(0.5) + + if self._use_tls: + url = 'https' + else: + url = 'http' + url = url + '://127.0.0.1:%d/' % self._port + if not url_is_alive(url): + fp = open(output_log) + try: + for line in fp: + logging.error(line) + finally: + fp.close() + raise PyWebSocketNotStarted( + 'Failed to start %s server on port %s.' % + (self._server_name, self._port)) + + # Our process terminated already + if self._process.returncode != None: + raise PyWebSocketNotStarted( + 'Failed to start %s server.' % self._server_name) + if self._pidfile: + f = open(self._pidfile, 'w') + f.write("%d" % self._process.pid) + f.close() + + def stop(self, force=False): + if not force and not self.is_running(): + return + + if self._process: + pid = self._process.pid + elif self._pidfile: + f = open(self._pidfile) + pid = int(f.read().strip()) + f.close() + + if not pid: + raise PyWebSocketNotFound( + 'Failed to find %s server pid.' % self._server_name) + + logging.debug('Shutting down %s server %d.' % (self._server_name, pid)) + platform_utils.kill_process(pid) + + if self._process: + self._process.wait() + self._process = None + + if self._wsout: + self._wsout.close() + self._wsout = None + + +if '__main__' == __name__: + # Provide some command line params for starting the PyWebSocket server + # manually. + option_parser = optparse.OptionParser() + option_parser.add_option('--server', type='choice', + choices=['start', 'stop'], default='start', + help='Server action (start|stop)') + option_parser.add_option('-p', '--port', dest='port', + default=None, help='Port to listen on') + option_parser.add_option('-r', '--root', + help='Absolute path to DocumentRoot ' + '(overrides layout test roots)') + option_parser.add_option('-t', '--tls', dest='use_tls', + action='store_true', + default=False, help='use TLS (wss://)') + option_parser.add_option('-k', '--private_key', dest='private_key', + default='', help='TLS private key file.') + option_parser.add_option('-c', '--certificate', dest='certificate', + default='', help='TLS certificate file.') + option_parser.add_option('--register_cygwin', action="store_true", + dest="register_cygwin", + help='Register Cygwin paths (on Win try bots)') + option_parser.add_option('--pidfile', help='path to pid file.') + options, args = option_parser.parse_args() + + if not options.port: + if options.use_tls: + options.port = _DEFAULT_WSS_PORT + else: + options.port = _DEFAULT_WS_PORT + + kwds = {'port': options.port, 'use_tls': options.use_tls} + if options.root: + kwds['root'] = options.root + if options.private_key: + kwds['private_key'] = options.private_key + if options.certificate: + kwds['certificate'] = options.certificate + kwds['register_cygwin'] = options.register_cygwin + if options.pidfile: + kwds['pidfile'] = options.pidfile + + pywebsocket = PyWebSocket(tempfile.gettempdir(), **kwds) + + if 'start' == options.server: + pywebsocket.start() + else: + pywebsocket.stop(force=True) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py new file mode 100644 index 0000000..1db811f --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py @@ -0,0 +1,1028 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Rebaselining tool that automatically produces baselines for all platforms. + +The script does the following for each platform specified: + 1. Compile a list of tests that need rebaselining. + 2. Download test result archive from buildbot for the platform. + 3. Extract baselines from the archive file for all identified files. + 4. Add new baselines to SVN repository. + 5. For each test that has been rebaselined, remove this platform option from + the test in test_expectation.txt. If no other platforms remain after + removal, delete the rebaselined test from the file. + +At the end, the script generates a html that compares old and new baselines. +""" + +import logging +import optparse +import os +import re +import shutil +import subprocess +import sys +import tempfile +import time +import urllib +import webbrowser +import zipfile + +from layout_package import path_utils +from layout_package import test_expectations +from test_types import image_diff +from test_types import text_diff + +# Repository type constants. +REPO_SVN, REPO_UNKNOWN = range(2) + +BASELINE_SUFFIXES = ['.txt', '.png', '.checksum'] +REBASELINE_PLATFORM_ORDER = ['mac', 'win', 'win-xp', 'win-vista', 'linux'] +ARCHIVE_DIR_NAME_DICT = {'win': 'webkit-rel', + 'win-vista': 'webkit-dbg-vista', + 'win-xp': 'webkit-rel', + 'mac': 'webkit-rel-mac5', + 'linux': 'webkit-rel-linux', + 'win-canary': 'webkit-rel-webkit-org', + 'win-vista-canary': 'webkit-dbg-vista', + 'win-xp-canary': 'webkit-rel-webkit-org', + 'mac-canary': 'webkit-rel-mac-webkit-org', + 'linux-canary': 'webkit-rel-linux-webkit-org'} + + +def run_shell_with_return_code(command, print_output=False): + """Executes a command and returns the output and process return code. + + Args: + command: program and arguments. + print_output: if true, print the command results to standard output. + + Returns: + command output, return code + """ + + # Use a shell for subcommands on Windows to get a PATH search. + use_shell = sys.platform.startswith('win') + p = subprocess.Popen(command, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, shell=use_shell) + if print_output: + output_array = [] + while True: + line = p.stdout.readline() + if not line: + break + if print_output: + print line.strip('\n') + output_array.append(line) + output = ''.join(output_array) + else: + output = p.stdout.read() + p.wait() + p.stdout.close() + + return output, p.returncode + + +def run_shell(command, print_output=False): + """Executes a command and returns the output. + + Args: + command: program and arguments. + print_output: if true, print the command results to standard output. + + Returns: + command output + """ + + output, return_code = run_shell_with_return_code(command, print_output) + return output + + +def log_dashed_string(text, platform, logging_level=logging.INFO): + """Log text message with dashes on both sides.""" + + msg = text + if platform: + msg += ': ' + platform + if len(msg) < 78: + dashes = '-' * ((78 - len(msg)) / 2) + msg = '%s %s %s' % (dashes, msg, dashes) + + if logging_level == logging.ERROR: + logging.error(msg) + elif logging_level == logging.WARNING: + logging.warn(msg) + else: + logging.info(msg) + + +def setup_html_directory(html_directory): + """Setup the directory to store html results. + + All html related files are stored in the "rebaseline_html" subdirectory. + + Args: + html_directory: parent directory that stores the rebaselining results. + If None, a temp directory is created. + + Returns: + the directory that stores the html related rebaselining results. + """ + + if not html_directory: + html_directory = tempfile.mkdtemp() + elif not os.path.exists(html_directory): + os.mkdir(html_directory) + + html_directory = os.path.join(html_directory, 'rebaseline_html') + logging.info('Html directory: "%s"', html_directory) + + if os.path.exists(html_directory): + shutil.rmtree(html_directory, True) + logging.info('Deleted file at html directory: "%s"', html_directory) + + if not os.path.exists(html_directory): + os.mkdir(html_directory) + return html_directory + + +def get_result_file_fullpath(html_directory, baseline_filename, platform, + result_type): + """Get full path of the baseline result file. + + Args: + html_directory: directory that stores the html related files. + baseline_filename: name of the baseline file. + platform: win, linux or mac + result_type: type of the baseline result: '.txt', '.png'. + + Returns: + Full path of the baseline file for rebaselining result comparison. + """ + + base, ext = os.path.splitext(baseline_filename) + result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext) + fullpath = os.path.join(html_directory, result_filename) + logging.debug(' Result file full path: "%s".', fullpath) + return fullpath + + +class Rebaseliner(object): + """Class to produce new baselines for a given platform.""" + + REVISION_REGEX = r'<a href=\"(\d+)/\">' + + def __init__(self, platform, options): + self._file_dir = path_utils.path_from_base('webkit', 'tools', + 'layout_tests') + self._platform = platform + self._options = options + self._rebaselining_tests = [] + self._rebaselined_tests = [] + + # Create tests and expectations helper which is used to: + # -. compile list of tests that need rebaselining. + # -. update the tests in test_expectations file after rebaseline + # is done. + self._test_expectations = \ + test_expectations.TestExpectations(None, + self._file_dir, + platform, + False, + False) + + self._repo_type = self._get_repo_type() + + def run(self, backup): + """Run rebaseline process.""" + + log_dashed_string('Compiling rebaselining tests', self._platform) + if not self._compile_rebaselining_tests(): + return True + + log_dashed_string('Downloading archive', self._platform) + archive_file = self._download_buildbot_archive() + logging.info('') + if not archive_file: + logging.error('No archive found.') + return False + + log_dashed_string('Extracting and adding new baselines', + self._platform) + if not self._extract_and_add_new_baselines(archive_file): + return False + + log_dashed_string('Updating rebaselined tests in file', + self._platform) + self._update_rebaselined_tests_in_file(backup) + logging.info('') + + if len(self._rebaselining_tests) != len(self._rebaselined_tests): + logging.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN ' + 'REBASELINED.') + logging.warning(' Total tests needing rebaselining: %d', + len(self._rebaselining_tests)) + logging.warning(' Total tests rebaselined: %d', + len(self._rebaselined_tests)) + return False + + logging.warning('All tests needing rebaselining were successfully ' + 'rebaselined.') + + return True + + def get_rebaselining_tests(self): + return self._rebaselining_tests + + def _get_repo_type(self): + """Get the repository type that client is using.""" + output, return_code = run_shell_with_return_code(['svn', 'info'], + False) + if return_code == 0: + return REPO_SVN + + return REPO_UNKNOWN + + def _compile_rebaselining_tests(self): + """Compile list of tests that need rebaselining for the platform. + + Returns: + List of tests that need rebaselining or + None if there is no such test. + """ + + self._rebaselining_tests = \ + self._test_expectations.get_rebaselining_failures() + if not self._rebaselining_tests: + logging.warn('No tests found that need rebaselining.') + return None + + logging.info('Total number of tests needing rebaselining ' + 'for "%s": "%d"', self._platform, + len(self._rebaselining_tests)) + + test_no = 1 + for test in self._rebaselining_tests: + logging.info(' %d: %s', test_no, test) + test_no += 1 + + return self._rebaselining_tests + + def _get_latest_revision(self, url): + """Get the latest layout test revision number from buildbot. + + Args: + url: Url to retrieve layout test revision numbers. + + Returns: + latest revision or + None on failure. + """ + + logging.debug('Url to retrieve revision: "%s"', url) + + f = urllib.urlopen(url) + content = f.read() + f.close() + + revisions = re.findall(self.REVISION_REGEX, content) + if not revisions: + logging.error('Failed to find revision, content: "%s"', content) + return None + + revisions.sort(key=int) + logging.info('Latest revision: "%s"', revisions[len(revisions) - 1]) + return revisions[len(revisions) - 1] + + def _get_archive_dir_name(self, platform, webkit_canary): + """Get name of the layout test archive directory. + + Returns: + Directory name or + None on failure + """ + + if webkit_canary: + platform += '-canary' + + if platform in ARCHIVE_DIR_NAME_DICT: + return ARCHIVE_DIR_NAME_DICT[platform] + else: + logging.error('Cannot find platform key %s in archive ' + 'directory name dictionary', platform) + return None + + def _get_archive_url(self): + """Generate the url to download latest layout test archive. + + Returns: + Url to download archive or + None on failure + """ + + dir_name = self._get_archive_dir_name(self._platform, + self._options.webkit_canary) + if not dir_name: + return None + + logging.debug('Buildbot platform dir name: "%s"', dir_name) + + url_base = '%s/%s/' % (self._options.archive_url, dir_name) + latest_revision = self._get_latest_revision(url_base) + if latest_revision is None or latest_revision <= 0: + return None + + archive_url = ('%s%s/layout-test-results.zip' % (url_base, + latest_revision)) + logging.info('Archive url: "%s"', archive_url) + return archive_url + + def _download_buildbot_archive(self): + """Download layout test archive file from buildbot. + + Returns: + True if download succeeded or + False otherwise. + """ + + url = self._get_archive_url() + if url is None: + return None + + fn = urllib.urlretrieve(url)[0] + logging.info('Archive downloaded and saved to file: "%s"', fn) + return fn + + def _extract_and_add_new_baselines(self, archive_file): + """Extract new baselines from archive and add them to SVN repository. + + Args: + archive_file: full path to the archive file. + + Returns: + List of tests that have been rebaselined or + None on failure. + """ + + zip_file = zipfile.ZipFile(archive_file, 'r') + zip_namelist = zip_file.namelist() + + logging.debug('zip file namelist:') + for name in zip_namelist: + logging.debug(' ' + name) + + platform = path_utils.platform_name(self._platform) + logging.debug('Platform dir: "%s"', platform) + + test_no = 1 + self._rebaselined_tests = [] + for test in self._rebaselining_tests: + logging.info('Test %d: %s', test_no, test) + + found = False + svn_error = False + test_basename = os.path.splitext(test)[0] + for suffix in BASELINE_SUFFIXES: + archive_test_name = ('layout-test-results/%s-actual%s' % + (test_basename, suffix)) + logging.debug(' Archive test file name: "%s"', + archive_test_name) + if not archive_test_name in zip_namelist: + logging.info(' %s file not in archive.', suffix) + continue + + found = True + logging.info(' %s file found in archive.', suffix) + + # Extract new baseline from archive and save it to a temp file. + data = zip_file.read(archive_test_name) + temp_fd, temp_name = tempfile.mkstemp(suffix) + f = os.fdopen(temp_fd, 'wb') + f.write(data) + f.close() + + expected_filename = '%s-expected%s' % (test_basename, suffix) + expected_fullpath = os.path.join( + path_utils.chromium_baseline_path(platform), + expected_filename) + expected_fullpath = os.path.normpath(expected_fullpath) + logging.debug(' Expected file full path: "%s"', + expected_fullpath) + + # TODO(victorw): for now, the rebaselining tool checks whether + # or not THIS baseline is duplicate and should be skipped. + # We could improve the tool to check all baselines in upper + # and lower + # levels and remove all duplicated baselines. + if self._is_dup_baseline(temp_name, + expected_fullpath, + test, + suffix, + self._platform): + os.remove(temp_name) + self._delete_baseline(expected_fullpath) + continue + + # Create the new baseline directory if it doesn't already + # exist. + path_utils.maybe_make_directory( + os.path.dirname(expected_fullpath)) + + shutil.move(temp_name, expected_fullpath) + + if not self._svn_add(expected_fullpath): + svn_error = True + elif suffix != '.checksum': + self._create_html_baseline_files(expected_fullpath) + + if not found: + logging.warn(' No new baselines found in archive.') + else: + if svn_error: + logging.warn(' Failed to add baselines to SVN.') + else: + logging.info(' Rebaseline succeeded.') + self._rebaselined_tests.append(test) + + test_no += 1 + + zip_file.close() + os.remove(archive_file) + + return self._rebaselined_tests + + def _is_dup_baseline(self, new_baseline, baseline_path, test, suffix, + platform): + """Check whether a baseline is duplicate and can fallback to same + baseline for another platform. For example, if a test has same + baseline on linux and windows, then we only store windows + baseline and linux baseline will fallback to the windows version. + + Args: + expected_filename: baseline expectation file name. + test: test name. + suffix: file suffix of the expected results, including dot; + e.g. '.txt' or '.png'. + platform: baseline platform 'mac', 'win' or 'linux'. + + Returns: + True if the baseline is unnecessary. + False otherwise. + """ + test_filepath = os.path.join(path_utils.layout_tests_dir(), test) + all_baselines = path_utils.expected_baselines(test_filepath, + suffix, platform, True) + for (fallback_dir, fallback_file) in all_baselines: + if fallback_dir and fallback_file: + fallback_fullpath = os.path.normpath( + os.path.join(fallback_dir, fallback_file)) + if fallback_fullpath.lower() != baseline_path.lower(): + if not self._diff_baselines(new_baseline, + fallback_fullpath): + logging.info(' Found same baseline at %s', + fallback_fullpath) + return True + else: + return False + + return False + + def _diff_baselines(self, file1, file2): + """Check whether two baselines are different. + + Args: + file1, file2: full paths of the baselines to compare. + + Returns: + True if two files are different or have different extensions. + False otherwise. + """ + + ext1 = os.path.splitext(file1)[1].upper() + ext2 = os.path.splitext(file2)[1].upper() + if ext1 != ext2: + logging.warn('Files to compare have different ext. ' + 'File1: %s; File2: %s', file1, file2) + return True + + if ext1 == '.PNG': + return image_diff.ImageDiff(self._platform, '').diff_files(file1, + file2) + else: + return text_diff.TestTextDiff(self._platform, '').diff_files(file1, + file2) + + def _delete_baseline(self, filename): + """Remove the file from repository and delete it from disk. + + Args: + filename: full path of the file to delete. + """ + + if not filename or not os.path.isfile(filename): + return + + if self._repo_type == REPO_SVN: + parent_dir, basename = os.path.split(filename) + original_dir = os.getcwd() + os.chdir(parent_dir) + run_shell(['svn', 'delete', '--force', basename], False) + os.chdir(original_dir) + else: + os.remove(filename) + + def _update_rebaselined_tests_in_file(self, backup): + """Update the rebaselined tests in test expectations file. + + Args: + backup: if True, backup the original test expectations file. + + Returns: + no + """ + + if self._rebaselined_tests: + self._test_expectations.remove_platform_from_file( + self._rebaselined_tests, self._platform, backup) + else: + logging.info('No test was rebaselined so nothing to remove.') + + def _svn_add(self, filename): + """Add the file to SVN repository. + + Args: + filename: full path of the file to add. + + Returns: + True if the file already exists in SVN or is sucessfully added + to SVN. + False otherwise. + """ + + if not filename: + return False + + parent_dir, basename = os.path.split(filename) + if self._repo_type != REPO_SVN or parent_dir == filename: + logging.info("No svn checkout found, skip svn add.") + return True + + original_dir = os.getcwd() + os.chdir(parent_dir) + status_output = run_shell(['svn', 'status', basename], False) + os.chdir(original_dir) + output = status_output.upper() + if output.startswith('A') or output.startswith('M'): + logging.info(' File already added to SVN: "%s"', filename) + return True + + if output.find('IS NOT A WORKING COPY') >= 0: + logging.info(' File is not a working copy, add its parent: "%s"', + parent_dir) + return self._svn_add(parent_dir) + + os.chdir(parent_dir) + add_output = run_shell(['svn', 'add', basename], True) + os.chdir(original_dir) + output = add_output.upper().rstrip() + if output.startswith('A') and output.find(basename.upper()) >= 0: + logging.info(' Added new file: "%s"', filename) + self._svn_prop_set(filename) + return True + + if (not status_output) and (add_output.upper().find( + 'ALREADY UNDER VERSION CONTROL') >= 0): + logging.info(' File already under SVN and has no change: "%s"', + filename) + return True + + logging.warn(' Failed to add file to SVN: "%s"', filename) + logging.warn(' Svn status output: "%s"', status_output) + logging.warn(' Svn add output: "%s"', add_output) + return False + + def _svn_prop_set(self, filename): + """Set the baseline property + + Args: + filename: full path of the file to add. + + Returns: + True if the file already exists in SVN or is sucessfully added + to SVN. + False otherwise. + """ + ext = os.path.splitext(filename)[1].upper() + if ext != '.TXT' and ext != '.PNG' and ext != '.CHECKSUM': + return + + parent_dir, basename = os.path.split(filename) + original_dir = os.getcwd() + os.chdir(parent_dir) + if ext == '.PNG': + cmd = ['svn', 'pset', 'svn:mime-type', 'image/png', basename] + else: + cmd = ['svn', 'pset', 'svn:eol-style', 'LF', basename] + + logging.debug(' Set svn prop: %s', ' '.join(cmd)) + run_shell(cmd, False) + os.chdir(original_dir) + + def _create_html_baseline_files(self, baseline_fullpath): + """Create baseline files (old, new and diff) in html directory. + + The files are used to compare the rebaselining results. + + Args: + baseline_fullpath: full path of the expected baseline file. + """ + + if not baseline_fullpath or not os.path.exists(baseline_fullpath): + return + + # Copy the new baseline to html directory for result comparison. + baseline_filename = os.path.basename(baseline_fullpath) + new_file = get_result_file_fullpath(self._options.html_directory, + baseline_filename, self._platform, + 'new') + shutil.copyfile(baseline_fullpath, new_file) + logging.info(' Html: copied new baseline file from "%s" to "%s".', + baseline_fullpath, new_file) + + # Get the old baseline from SVN and save to the html directory. + output = run_shell(['svn', 'cat', '-r', 'BASE', baseline_fullpath]) + if (not output) or (output.upper().rstrip().endswith( + 'NO SUCH FILE OR DIRECTORY')): + logging.info(' No base file: "%s"', baseline_fullpath) + return + base_file = get_result_file_fullpath(self._options.html_directory, + baseline_filename, self._platform, + 'old') + f = open(base_file, 'wb') + f.write(output) + f.close() + logging.info(' Html: created old baseline file: "%s".', + base_file) + + # Get the diff between old and new baselines and save to the html dir. + if baseline_filename.upper().endswith('.TXT'): + # If the user specified a custom diff command in their svn config + # file, then it'll be used when we do svn diff, which we don't want + # to happen since we want the unified diff. Using --diff-cmd=diff + # doesn't always work, since they can have another diff executable + # in their path that gives different line endings. So we use a + # bogus temp directory as the config directory, which gets + # around these problems. + if sys.platform.startswith("win"): + parent_dir = tempfile.gettempdir() + else: + parent_dir = sys.path[0] # tempdir is not secure. + bogus_dir = os.path.join(parent_dir, "temp_svn_config") + logging.debug(' Html: temp config dir: "%s".', bogus_dir) + if not os.path.exists(bogus_dir): + os.mkdir(bogus_dir) + delete_bogus_dir = True + else: + delete_bogus_dir = False + + output = run_shell(["svn", "diff", "--config-dir", bogus_dir, + baseline_fullpath]) + if output: + diff_file = get_result_file_fullpath( + self._options.html_directory, baseline_filename, + self._platform, 'diff') + f = open(diff_file, 'wb') + f.write(output) + f.close() + logging.info(' Html: created baseline diff file: "%s".', + diff_file) + + if delete_bogus_dir: + shutil.rmtree(bogus_dir, True) + logging.debug(' Html: removed temp config dir: "%s".', + bogus_dir) + + +class HtmlGenerator(object): + """Class to generate rebaselining result comparison html.""" + + HTML_REBASELINE = ('<html>' + '<head>' + '<style>' + 'body {font-family: sans-serif;}' + '.mainTable {background: #666666;}' + '.mainTable td , .mainTable th {background: white;}' + '.detail {margin-left: 10px; margin-top: 3px;}' + '</style>' + '<title>Rebaselining Result Comparison (%(time)s)' + '</title>' + '</head>' + '<body>' + '<h2>Rebaselining Result Comparison (%(time)s)</h2>' + '%(body)s' + '</body>' + '</html>') + HTML_NO_REBASELINING_TESTS = ( + '<p>No tests found that need rebaselining.</p>') + HTML_TABLE_TEST = ('<table class="mainTable" cellspacing=1 cellpadding=5>' + '%s</table><br>') + HTML_TR_TEST = ('<tr>' + '<th style="background-color: #CDECDE; border-bottom: ' + '1px solid black; font-size: 18pt; font-weight: bold" ' + 'colspan="5">' + '<a href="%s">%s</a>' + '</th>' + '</tr>') + HTML_TEST_DETAIL = ('<div class="detail">' + '<tr>' + '<th width="100">Baseline</th>' + '<th width="100">Platform</th>' + '<th width="200">Old</th>' + '<th width="200">New</th>' + '<th width="150">Difference</th>' + '</tr>' + '%s' + '</div>') + HTML_TD_NOLINK = '<td align=center><a>%s</a></td>' + HTML_TD_LINK = '<td align=center><a href="%(uri)s">%(name)s</a></td>' + HTML_TD_LINK_IMG = ('<td><a href="%(uri)s">' + '<img style="width: 200" src="%(uri)s" /></a></td>') + HTML_TR = '<tr>%s</tr>' + + def __init__(self, options, platforms, rebaselining_tests): + self._html_directory = options.html_directory + self._platforms = platforms + self._rebaselining_tests = rebaselining_tests + self._html_file = os.path.join(options.html_directory, + 'rebaseline.html') + + def generate_html(self): + """Generate html file for rebaselining result comparison.""" + + logging.info('Generating html file') + + html_body = '' + if not self._rebaselining_tests: + html_body += self.HTML_NO_REBASELINING_TESTS + else: + tests = list(self._rebaselining_tests) + tests.sort() + + test_no = 1 + for test in tests: + logging.info('Test %d: %s', test_no, test) + html_body += self._generate_html_for_one_test(test) + + html = self.HTML_REBASELINE % ({'time': time.asctime(), + 'body': html_body}) + logging.debug(html) + + f = open(self._html_file, 'w') + f.write(html) + f.close() + + logging.info('Baseline comparison html generated at "%s"', + self._html_file) + + def show_html(self): + """Launch the rebaselining html in brwoser.""" + + logging.info('Launching html: "%s"', self._html_file) + + html_uri = path_utils.filename_to_uri(self._html_file) + webbrowser.open(html_uri, 1) + + logging.info('Html launched.') + + def _generate_baseline_links(self, test_basename, suffix, platform): + """Generate links for baseline results (old, new and diff). + + Args: + test_basename: base filename of the test + suffix: baseline file suffixes: '.txt', '.png' + platform: win, linux or mac + + Returns: + html links for showing baseline results (old, new and diff) + """ + + baseline_filename = '%s-expected%s' % (test_basename, suffix) + logging.debug(' baseline filename: "%s"', baseline_filename) + + new_file = get_result_file_fullpath(self._html_directory, + baseline_filename, platform, 'new') + logging.info(' New baseline file: "%s"', new_file) + if not os.path.exists(new_file): + logging.info(' No new baseline file: "%s"', new_file) + return '' + + old_file = get_result_file_fullpath(self._html_directory, + baseline_filename, platform, 'old') + logging.info(' Old baseline file: "%s"', old_file) + if suffix == '.png': + html_td_link = self.HTML_TD_LINK_IMG + else: + html_td_link = self.HTML_TD_LINK + + links = '' + if os.path.exists(old_file): + links += html_td_link % { + 'uri': path_utils.filename_to_uri(old_file), + 'name': baseline_filename} + else: + logging.info(' No old baseline file: "%s"', old_file) + links += self.HTML_TD_NOLINK % '' + + links += html_td_link % {'uri': path_utils.filename_to_uri(new_file), + 'name': baseline_filename} + + diff_file = get_result_file_fullpath(self._html_directory, + baseline_filename, platform, + 'diff') + logging.info(' Baseline diff file: "%s"', diff_file) + if os.path.exists(diff_file): + links += html_td_link % {'uri': path_utils.filename_to_uri( + diff_file), 'name': 'Diff'} + else: + logging.info(' No baseline diff file: "%s"', diff_file) + links += self.HTML_TD_NOLINK % '' + + return links + + def _generate_html_for_one_test(self, test): + """Generate html for one rebaselining test. + + Args: + test: layout test name + + Returns: + html that compares baseline results for the test. + """ + + test_basename = os.path.basename(os.path.splitext(test)[0]) + logging.info(' basename: "%s"', test_basename) + rows = [] + for suffix in BASELINE_SUFFIXES: + if suffix == '.checksum': + continue + + logging.info(' Checking %s files', suffix) + for platform in self._platforms: + links = self._generate_baseline_links(test_basename, suffix, + platform) + if links: + row = self.HTML_TD_NOLINK % self._get_baseline_result_type( + suffix) + row += self.HTML_TD_NOLINK % platform + row += links + logging.debug(' html row: %s', row) + + rows.append(self.HTML_TR % row) + + if rows: + test_path = os.path.join(path_utils.layout_tests_dir(), test) + html = self.HTML_TR_TEST % (path_utils.filename_to_uri(test_path), + test) + html += self.HTML_TEST_DETAIL % ' '.join(rows) + + logging.debug(' html for test: %s', html) + return self.HTML_TABLE_TEST % html + + return '' + + def _get_baseline_result_type(self, suffix): + """Name of the baseline result type.""" + + if suffix == '.png': + return 'Pixel' + elif suffix == '.txt': + return 'Render Tree' + else: + return 'Other' + + +def main(): + """Main function to produce new baselines.""" + + option_parser = optparse.OptionParser() + option_parser.add_option('-v', '--verbose', + action='store_true', + default=False, + help='include debug-level logging.') + + option_parser.add_option('-p', '--platforms', + default='mac,win,win-xp,win-vista,linux', + help=('Comma delimited list of platforms ' + 'that need rebaselining.')) + + option_parser.add_option('-u', '--archive_url', + default=('http://build.chromium.org/buildbot/' + 'layout_test_results'), + help=('Url to find the layout test result archive' + ' file.')) + + option_parser.add_option('-w', '--webkit_canary', + action='store_true', + default=False, + help=('If True, pull baselines from webkit.org ' + 'canary bot.')) + + option_parser.add_option('-b', '--backup', + action='store_true', + default=False, + help=('Whether or not to backup the original test' + ' expectations file after rebaseline.')) + + option_parser.add_option('-d', '--html_directory', + default='', + help=('The directory that stores the results for' + ' rebaselining comparison.')) + + options = option_parser.parse_args()[0] + + # Set up our logging format. + log_level = logging.INFO + if options.verbose: + log_level = logging.DEBUG + logging.basicConfig(level=log_level, + format=('%(asctime)s %(filename)s:%(lineno)-3d ' + '%(levelname)s %(message)s'), + datefmt='%y%m%d %H:%M:%S') + + # Verify 'platforms' option is valid + if not options.platforms: + logging.error('Invalid "platforms" option. --platforms must be ' + 'specified in order to rebaseline.') + sys.exit(1) + platforms = [p.strip().lower() for p in options.platforms.split(',')] + for platform in platforms: + if not platform in REBASELINE_PLATFORM_ORDER: + logging.error('Invalid platform: "%s"' % (platform)) + sys.exit(1) + + # Adjust the platform order so rebaseline tool is running at the order of + # 'mac', 'win' and 'linux'. This is in same order with layout test baseline + # search paths. It simplifies how the rebaseline tool detects duplicate + # baselines. Check _IsDupBaseline method for details. + rebaseline_platforms = [] + for platform in REBASELINE_PLATFORM_ORDER: + if platform in platforms: + rebaseline_platforms.append(platform) + + options.html_directory = setup_html_directory(options.html_directory) + + rebaselining_tests = set() + backup = options.backup + for platform in rebaseline_platforms: + rebaseliner = Rebaseliner(platform, options) + + logging.info('') + log_dashed_string('Rebaseline started', platform) + if rebaseliner.run(backup): + # Only need to backup one original copy of test expectation file. + backup = False + log_dashed_string('Rebaseline done', platform) + else: + log_dashed_string('Rebaseline failed', platform, logging.ERROR) + + rebaselining_tests |= set(rebaseliner.get_rebaselining_tests()) + + logging.info('') + log_dashed_string('Rebaselining result comparison started', None) + html_generator = HtmlGenerator(options, + rebaseline_platforms, + rebaselining_tests) + html_generator.generate_html() + html_generator.show_html() + log_dashed_string('Rebaselining result comparison done', None) + + sys.exit(0) + +if '__main__' == __name__: + main() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py new file mode 100755 index 0000000..88b97f8 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py @@ -0,0 +1,1697 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Run layout tests using the test_shell. + +This is a port of the existing webkit test script run-webkit-tests. + +The TestRunner class runs a series of tests (TestType interface) against a set +of test files. If a test file fails a TestType, it returns a list TestFailure +objects to the TestRunner. The TestRunner then aggregates the TestFailures to +create a final report. + +This script reads several files, if they exist in the test_lists subdirectory +next to this script itself. Each should contain a list of paths to individual +tests or entire subdirectories of tests, relative to the outermost test +directory. Entire lines starting with '//' (comments) will be ignored. + +For details of the files' contents and purposes, see test_lists/README. +""" + +import errno +import glob +import logging +import math +import optparse +import os +import Queue +import random +import re +import shutil +import subprocess +import sys +import time +import traceback + +from layout_package import apache_http_server +from layout_package import test_expectations +from layout_package import http_server +from layout_package import json_layout_results_generator +from layout_package import metered_stream +from layout_package import path_utils +from layout_package import platform_utils +from layout_package import test_failures +from layout_package import test_shell_thread +from layout_package import test_files +from layout_package import websocket_server +from test_types import fuzzy_image_diff +from test_types import image_diff +from test_types import test_type_base +from test_types import text_diff + +sys.path.append(path_utils.path_from_base('third_party')) +import simplejson + +# Indicates that we want detailed progress updates in the output (prints +# directory-by-directory feedback). +LOG_DETAILED_PROGRESS = 'detailed-progress' + +# Log any unexpected results while running (instead of just at the end). +LOG_UNEXPECTED = 'unexpected' + +# Builder base URL where we have the archived test results. +BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" + +TestExpectationsFile = test_expectations.TestExpectationsFile + + +class TestInfo: + """Groups information about a test for easy passing of data.""" + + def __init__(self, filename, timeout): + """Generates the URI and stores the filename and timeout for this test. + Args: + filename: Full path to the test. + timeout: Timeout for running the test in TestShell. + """ + self.filename = filename + self.uri = path_utils.filename_to_uri(filename) + self.timeout = timeout + expected_hash_file = path_utils.expected_filename(filename, + '.checksum') + try: + self.image_hash = open(expected_hash_file, "r").read() + except IOError, e: + if errno.ENOENT != e.errno: + raise + self.image_hash = None + + +class ResultSummary(object): + """A class for partitioning the test results we get into buckets. + + This class is basically a glorified struct and it's private to this file + so we don't bother with any information hiding.""" + + def __init__(self, expectations, test_files): + self.total = len(test_files) + self.remaining = self.total + self.expectations = expectations + self.expected = 0 + self.unexpected = 0 + self.tests_by_expectation = {} + self.tests_by_timeline = {} + self.results = {} + self.unexpected_results = {} + self.failures = {} + self.tests_by_expectation[test_expectations.SKIP] = set() + for expectation in TestExpectationsFile.EXPECTATIONS.values(): + self.tests_by_expectation[expectation] = set() + for timeline in TestExpectationsFile.TIMELINES.values(): + self.tests_by_timeline[timeline] = ( + expectations.get_tests_with_timeline(timeline)) + + def add(self, test, failures, result, expected): + """Add a result into the appropriate bin. + + Args: + test: test file name + failures: list of failure objects from test execution + result: result of test (PASS, IMAGE, etc.). + expected: whether the result was what we expected it to be. + """ + + self.tests_by_expectation[result].add(test) + self.results[test] = result + self.remaining -= 1 + if len(failures): + self.failures[test] = failures + if expected: + self.expected += 1 + else: + self.unexpected_results[test] = result + self.unexpected += 1 + + +class TestRunner: + """A class for managing running a series of tests on a series of layout + test files.""" + + HTTP_SUBDIR = os.sep.join(['', 'http', '']) + WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', '']) + + # The per-test timeout in milliseconds, if no --time-out-ms option was + # given to run_webkit_tests. This should correspond to the default timeout + # in test_shell.exe. + DEFAULT_TEST_TIMEOUT_MS = 6 * 1000 + + NUM_RETRY_ON_UNEXPECTED_FAILURE = 1 + + def __init__(self, options, meter): + """Initialize test runner data structures. + + Args: + options: a dictionary of command line options + meter: a MeteredStream object to record updates to. + """ + self._options = options + self._meter = meter + + if options.use_apache: + self._http_server = apache_http_server.LayoutTestApacheHttpd( + options.results_directory) + else: + self._http_server = http_server.Lighttpd(options.results_directory) + + self._websocket_server = websocket_server.PyWebSocket( + options.results_directory) + # disable wss server. need to install pyOpenSSL on buildbots. + # self._websocket_secure_server = websocket_server.PyWebSocket( + # options.results_directory, use_tls=True, port=9323) + + # a list of TestType objects + self._test_types = [] + + # a set of test files, and the same tests as a list + self._test_files = set() + self._test_files_list = None + self._file_dir = path_utils.path_from_base('webkit', 'tools', + 'layout_tests') + self._result_queue = Queue.Queue() + + # These are used for --log detailed-progress to track status by + # directory. + self._current_dir = None + self._current_progress_str = "" + self._current_test_number = 0 + + def __del__(self): + logging.debug("flushing stdout") + sys.stdout.flush() + logging.debug("flushing stderr") + sys.stderr.flush() + logging.debug("stopping http server") + # Stop the http server. + self._http_server.stop() + # Stop the Web Socket / Web Socket Secure servers. + self._websocket_server.stop() + # self._websocket_secure_server.Stop() + + def gather_file_paths(self, paths): + """Find all the files to test. + + Args: + paths: a list of globs to use instead of the defaults.""" + self._test_files = test_files.gather_test_files(paths) + + def parse_expectations(self, platform, is_debug_mode): + """Parse the expectations from the test_list files and return a data + structure holding them. Throws an error if the test_list files have + invalid syntax.""" + if self._options.lint_test_files: + test_files = None + else: + test_files = self._test_files + + try: + self._expectations = test_expectations.TestExpectations(test_files, + self._file_dir, platform, is_debug_mode, + self._options.lint_test_files) + return self._expectations + except Exception, err: + if self._options.lint_test_files: + print str(err) + else: + raise err + + def prepare_lists_and_print_output(self, write): + """Create appropriate subsets of test lists and returns a + ResultSummary object. Also prints expected test counts. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + """ + + # Remove skipped - both fixable and ignored - files from the + # top-level list of files to test. + num_all_test_files = len(self._test_files) + write("Found: %d tests" % (len(self._test_files))) + skipped = set() + if num_all_test_files > 1 and not self._options.force: + skipped = self._expectations.get_tests_with_result_type( + test_expectations.SKIP) + self._test_files -= skipped + + # Create a sorted list of test files so the subset chunk, + # if used, contains alphabetically consecutive tests. + self._test_files_list = list(self._test_files) + if self._options.randomize_order: + random.shuffle(self._test_files_list) + else: + self._test_files_list.sort() + + # If the user specifies they just want to run a subset of the tests, + # just grab a subset of the non-skipped tests. + if self._options.run_chunk or self._options.run_part: + chunk_value = self._options.run_chunk or self._options.run_part + test_files = self._test_files_list + try: + (chunk_num, chunk_len) = chunk_value.split(":") + chunk_num = int(chunk_num) + assert(chunk_num >= 0) + test_size = int(chunk_len) + assert(test_size > 0) + except: + logging.critical("invalid chunk '%s'" % chunk_value) + sys.exit(1) + + # Get the number of tests + num_tests = len(test_files) + + # Get the start offset of the slice. + if self._options.run_chunk: + chunk_len = test_size + # In this case chunk_num can be really large. We need + # to make the slave fit in the current number of tests. + slice_start = (chunk_num * chunk_len) % num_tests + else: + # Validate the data. + assert(test_size <= num_tests) + assert(chunk_num <= test_size) + + # To count the chunk_len, and make sure we don't skip + # some tests, we round to the next value that fits exactly + # all the parts. + rounded_tests = num_tests + if rounded_tests % test_size != 0: + rounded_tests = (num_tests + test_size - + (num_tests % test_size)) + + chunk_len = rounded_tests / test_size + slice_start = chunk_len * (chunk_num - 1) + # It does not mind if we go over test_size. + + # Get the end offset of the slice. + slice_end = min(num_tests, slice_start + chunk_len) + + files = test_files[slice_start:slice_end] + + tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ( + (slice_end - slice_start), slice_start, slice_end, num_tests) + write(tests_run_msg) + + # If we reached the end and we don't have enough tests, we run some + # from the beginning. + if (self._options.run_chunk and + (slice_end - slice_start < chunk_len)): + extra = 1 + chunk_len - (slice_end - slice_start) + extra_msg = (' last chunk is partial, appending [0:%d]' % + extra) + write(extra_msg) + tests_run_msg += "\n" + extra_msg + files.extend(test_files[0:extra]) + tests_run_filename = os.path.join(self._options.results_directory, + "tests_run.txt") + tests_run_file = open(tests_run_filename, "w") + tests_run_file.write(tests_run_msg + "\n") + tests_run_file.close() + + len_skip_chunk = int(len(files) * len(skipped) / + float(len(self._test_files))) + skip_chunk_list = list(skipped)[0:len_skip_chunk] + skip_chunk = set(skip_chunk_list) + + # Update expectations so that the stats are calculated correctly. + # We need to pass a list that includes the right # of skipped files + # to ParseExpectations so that ResultSummary() will get the correct + # stats. So, we add in the subset of skipped files, and then + # subtract them back out. + self._test_files_list = files + skip_chunk_list + self._test_files = set(self._test_files_list) + + self._expectations = self.parse_expectations( + path_utils.platform_name(), self._options.target == 'Debug') + + self._test_files = set(files) + self._test_files_list = files + else: + skip_chunk = skipped + + result_summary = ResultSummary(self._expectations, + self._test_files | skip_chunk) + self._print_expected_results_of_type(write, result_summary, + test_expectations.PASS, "passes") + self._print_expected_results_of_type(write, result_summary, + test_expectations.FAIL, "failures") + self._print_expected_results_of_type(write, result_summary, + test_expectations.FLAKY, "flaky") + self._print_expected_results_of_type(write, result_summary, + test_expectations.SKIP, "skipped") + + + if self._options.force: + write('Running all tests, including skips (--force)') + else: + # Note that we don't actually run the skipped tests (they were + # subtracted out of self._test_files, above), but we stub out the + # results here so the statistics can remain accurate. + for test in skip_chunk: + result_summary.add(test, [], test_expectations.SKIP, + expected=True) + write("") + + return result_summary + + def add_test_type(self, test_type): + """Add a TestType to the TestRunner.""" + self._test_types.append(test_type) + + def _get_dir_for_test_file(self, test_file): + """Returns the highest-level directory by which to shard the given + test file.""" + index = test_file.rfind(os.sep + 'LayoutTests' + os.sep) + + test_file = test_file[index + len('LayoutTests/'):] + test_file_parts = test_file.split(os.sep, 1) + directory = test_file_parts[0] + test_file = test_file_parts[1] + + # The http tests are very stable on mac/linux. + # TODO(ojan): Make the http server on Windows be apache so we can + # turn shard the http tests there as well. Switching to apache is + # what made them stable on linux/mac. + return_value = directory + while ((directory != 'http' or sys.platform in ('darwin', 'linux2')) + and test_file.find(os.sep) >= 0): + test_file_parts = test_file.split(os.sep, 1) + directory = test_file_parts[0] + return_value = os.path.join(return_value, directory) + test_file = test_file_parts[1] + + return return_value + + def _get_test_info_for_file(self, test_file): + """Returns the appropriate TestInfo object for the file. Mostly this + is used for looking up the timeout value (in ms) to use for the given + test.""" + if self._expectations.has_modifier(test_file, test_expectations.SLOW): + return TestInfo(test_file, self._options.slow_time_out_ms) + return TestInfo(test_file, self._options.time_out_ms) + + def _get_test_file_queue(self, test_files): + """Create the thread safe queue of lists of (test filenames, test URIs) + tuples. Each TestShellThread pulls a list from this queue and runs + those tests in order before grabbing the next available list. + + Shard the lists by directory. This helps ensure that tests that depend + on each other (aka bad tests!) continue to run together as most + cross-tests dependencies tend to occur within the same directory. + + Return: + The Queue of lists of TestInfo objects. + """ + + if (self._options.experimental_fully_parallel or + self._is_single_threaded()): + filename_queue = Queue.Queue() + for test_file in test_files: + filename_queue.put( + ('.', [self._get_test_info_for_file(test_file)])) + return filename_queue + + tests_by_dir = {} + for test_file in test_files: + directory = self._get_dir_for_test_file(test_file) + tests_by_dir.setdefault(directory, []) + tests_by_dir[directory].append( + self._get_test_info_for_file(test_file)) + + # Sort by the number of tests in the dir so that the ones with the + # most tests get run first in order to maximize parallelization. + # Number of tests is a good enough, but not perfect, approximation + # of how long that set of tests will take to run. We can't just use + # a PriorityQueue until we move # to Python 2.6. + test_lists = [] + http_tests = None + for directory in tests_by_dir: + test_list = tests_by_dir[directory] + # Keep the tests in alphabetical order. + # TODO: Remove once tests are fixed so they can be run in any + # order. + test_list.reverse() + test_list_tuple = (directory, test_list) + if directory == 'LayoutTests' + os.sep + 'http': + http_tests = test_list_tuple + else: + test_lists.append(test_list_tuple) + test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1]))) + + # Put the http tests first. There are only a couple hundred of them, + # but each http test takes a very long time to run, so sorting by the + # number of tests doesn't accurately capture how long they take to run. + if http_tests: + test_lists.insert(0, http_tests) + + filename_queue = Queue.Queue() + for item in test_lists: + filename_queue.put(item) + return filename_queue + + def _get_test_shell_args(self, index): + """Returns the tuple of arguments for tests and for test_shell.""" + shell_args = [] + test_args = test_type_base.TestArguments() + if not self._options.no_pixel_tests: + png_path = os.path.join(self._options.results_directory, + "png_result%s.png" % index) + shell_args.append("--pixel-tests=" + png_path) + test_args.png_path = png_path + + test_args.new_baseline = self._options.new_baseline + + test_args.show_sources = self._options.sources + + if self._options.startup_dialog: + shell_args.append('--testshell-startup-dialog') + + if self._options.gp_fault_error_box: + shell_args.append('--gp-fault-error-box') + + return (test_args, shell_args) + + def _contains_tests(self, subdir): + for test_file in self._test_files_list: + if test_file.find(subdir) >= 0: + return True + return False + + def _instantiate_test_shell_threads(self, test_shell_binary, test_files, + result_summary): + """Instantitates and starts the TestShellThread(s). + + Return: + The list of threads. + """ + test_shell_command = [test_shell_binary] + + if self._options.wrapper: + # This split() isn't really what we want -- it incorrectly will + # split quoted strings within the wrapper argument -- but in + # practice it shouldn't come up and the --help output warns + # about it anyway. + test_shell_command = (self._options.wrapper.split() + + test_shell_command) + + filename_queue = self._get_test_file_queue(test_files) + + # Instantiate TestShellThreads and start them. + threads = [] + for i in xrange(int(self._options.num_test_shells)): + # Create separate TestTypes instances for each thread. + test_types = [] + for t in self._test_types: + test_types.append(t(self._options.platform, + self._options.results_directory)) + + test_args, shell_args = self._get_test_shell_args(i) + thread = test_shell_thread.TestShellThread(filename_queue, + self._result_queue, + test_shell_command, + test_types, + test_args, + shell_args, + self._options) + if self._is_single_threaded(): + thread.run_in_main_thread(self, result_summary) + else: + thread.start() + threads.append(thread) + + return threads + + def _stop_layout_test_helper(self, proc): + """Stop the layout test helper and closes it down.""" + if proc: + logging.debug("Stopping layout test helper") + proc.stdin.write("x\n") + proc.stdin.close() + proc.wait() + + def _is_single_threaded(self): + """Returns whether we should run all the tests in the main thread.""" + return int(self._options.num_test_shells) == 1 + + def _run_tests(self, test_shell_binary, file_list, result_summary): + """Runs the tests in the file_list. + + Return: A tuple (failures, thread_timings, test_timings, + individual_test_timings) + failures is a map from test to list of failure types + thread_timings is a list of dicts with the total runtime + of each thread with 'name', 'num_tests', 'total_time' properties + test_timings is a list of timings for each sharded subdirectory + of the form [time, directory_name, num_tests] + individual_test_timings is a list of run times for each test + in the form {filename:filename, test_run_time:test_run_time} + result_summary: summary object to populate with the results + """ + threads = self._instantiate_test_shell_threads(test_shell_binary, + file_list, + result_summary) + + # Wait for the threads to finish and collect test failures. + failures = {} + test_timings = {} + individual_test_timings = [] + thread_timings = [] + try: + for thread in threads: + while thread.isAlive(): + # Let it timeout occasionally so it can notice a + # KeyboardInterrupt. Actually, the timeout doesn't + # really matter: apparently it suffices to not use + # an indefinite blocking join for it to + # be interruptible by KeyboardInterrupt. + thread.join(0.1) + self.update_summary(result_summary) + thread_timings.append({'name': thread.getName(), + 'num_tests': thread.get_num_tests(), + 'total_time': thread.get_total_time()}) + test_timings.update(thread.get_directory_timing_stats()) + individual_test_timings.extend( + thread.get_individual_test_stats()) + except KeyboardInterrupt: + for thread in threads: + thread.cancel() + self._stop_layout_test_helper(layout_test_helper_proc) + raise + for thread in threads: + # Check whether a TestShellThread died before normal completion. + exception_info = thread.get_exception_info() + if exception_info is not None: + # Re-raise the thread's exception here to make it clear that + # testing was aborted. Otherwise, the tests that did not run + # would be assumed to have passed. + raise exception_info[0], exception_info[1], exception_info[2] + + # Make sure we pick up any remaining tests. + self.update_summary(result_summary) + return (thread_timings, test_timings, individual_test_timings) + + def run(self, result_summary): + """Run all our tests on all our test files. + + For each test file, we run each test type. If there are any failures, + we collect them for reporting. + + Args: + result_summary: a summary object tracking the test results. + + Return: + We return nonzero if there are regressions compared to the last run. + """ + if not self._test_files: + return 0 + start_time = time.time() + test_shell_binary = path_utils.test_shell_path(self._options.target) + + # Start up any helper needed + layout_test_helper_proc = None + if not self._options.no_pixel_tests: + helper_path = path_utils.layout_test_helper_path( + self._options.target) + if len(helper_path): + logging.debug("Starting layout helper %s" % helper_path) + layout_test_helper_proc = subprocess.Popen( + [helper_path], stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=None) + is_ready = layout_test_helper_proc.stdout.readline() + if not is_ready.startswith('ready'): + logging.error("layout_test_helper failed to be ready") + + # Check that the system dependencies (themes, fonts, ...) are correct. + if not self._options.nocheck_sys_deps: + proc = subprocess.Popen([test_shell_binary, + "--check-layout-test-sys-deps"]) + if proc.wait() != 0: + logging.info("Aborting because system dependencies check " + "failed.\n To override, invoke with " + "--nocheck-sys-deps") + sys.exit(1) + + if self._contains_tests(self.HTTP_SUBDIR): + self._http_server.start() + + if self._contains_tests(self.WEBSOCKET_SUBDIR): + self._websocket_server.start() + # self._websocket_secure_server.Start() + + thread_timings, test_timings, individual_test_timings = ( + self._run_tests(test_shell_binary, self._test_files_list, + result_summary)) + + # We exclude the crashes from the list of results to retry, because + # we want to treat even a potentially flaky crash as an error. + failures = self._get_failures(result_summary, include_crashes=False) + retries = 0 + retry_summary = result_summary + while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and + len(failures)): + logging.debug("Retrying %d unexpected failure(s)" % len(failures)) + retries += 1 + retry_summary = ResultSummary(self._expectations, failures.keys()) + self._run_tests(test_shell_binary, failures.keys(), retry_summary) + failures = self._get_failures(retry_summary, include_crashes=True) + + self._stop_layout_test_helper(layout_test_helper_proc) + end_time = time.time() + + write = create_logging_writer(self._options, 'timing') + self._print_timing_statistics(write, end_time - start_time, + thread_timings, test_timings, + individual_test_timings, + result_summary) + + self._meter.update("") + + if self._options.verbose: + # We write this block to stdout for compatibility with the + # buildbot log parser, which only looks at stdout, not stderr :( + write = lambda s: sys.stdout.write("%s\n" % s) + else: + write = create_logging_writer(self._options, 'actual') + + self._print_result_summary(write, result_summary) + + sys.stdout.flush() + sys.stderr.flush() + + if (LOG_DETAILED_PROGRESS in self._options.log or + (LOG_UNEXPECTED in self._options.log and + result_summary.total != result_summary.expected)): + print + + # This summary data gets written to stdout regardless of log level + self._print_one_line_summary(result_summary.total, + result_summary.expected) + + unexpected_results = self._summarize_unexpected_results(result_summary, + retry_summary) + self._print_unexpected_results(unexpected_results) + + # Write the same data to log files. + self._write_json_files(unexpected_results, result_summary, + individual_test_timings) + + # Write the summary to disk (results.html) and maybe open the + # test_shell to this file. + wrote_results = self._write_results_html_file(result_summary) + if not self._options.noshow_results and wrote_results: + self._show_results_html_file() + + # Ignore flaky failures and unexpected passes so we don't turn the + # bot red for those. + return unexpected_results['num_regressions'] + + def update_summary(self, result_summary): + """Update the summary while running tests.""" + while True: + try: + (test, fail_list) = self._result_queue.get_nowait() + result = test_failures.determine_result_type(fail_list) + expected = self._expectations.matches_an_expected_result(test, + result) + result_summary.add(test, fail_list, result, expected) + if (LOG_DETAILED_PROGRESS in self._options.log and + (self._options.experimental_fully_parallel or + self._is_single_threaded())): + self._display_detailed_progress(result_summary) + else: + if not expected and LOG_UNEXPECTED in self._options.log: + self._print_unexpected_test_result(test, result) + self._display_one_line_progress(result_summary) + except Queue.Empty: + return + + def _display_one_line_progress(self, result_summary): + """Displays the progress through the test run.""" + self._meter.update("Testing: %d ran as expected, %d didn't, %d left" % + (result_summary.expected, result_summary.unexpected, + result_summary.remaining)) + + def _display_detailed_progress(self, result_summary): + """Display detailed progress output where we print the directory name + and one dot for each completed test. This is triggered by + "--log detailed-progress".""" + if self._current_test_number == len(self._test_files_list): + return + + next_test = self._test_files_list[self._current_test_number] + next_dir = os.path.dirname( + path_utils.relative_test_filename(next_test)) + if self._current_progress_str == "": + self._current_progress_str = "%s: " % (next_dir) + self._current_dir = next_dir + + while next_test in result_summary.results: + if next_dir != self._current_dir: + self._meter.write("%s\n" % (self._current_progress_str)) + self._current_progress_str = "%s: ." % (next_dir) + self._current_dir = next_dir + else: + self._current_progress_str += "." + + if (next_test in result_summary.unexpected_results and + LOG_UNEXPECTED in self._options.log): + result = result_summary.unexpected_results[next_test] + self._meter.write("%s\n" % self._current_progress_str) + self._print_unexpected_test_result(next_test, result) + self._current_progress_str = "%s: " % self._current_dir + + self._current_test_number += 1 + if self._current_test_number == len(self._test_files_list): + break + + next_test = self._test_files_list[self._current_test_number] + next_dir = os.path.dirname( + path_utils.relative_test_filename(next_test)) + + if result_summary.remaining: + remain_str = " (%d)" % (result_summary.remaining) + self._meter.update("%s%s" % + (self._current_progress_str, remain_str)) + else: + self._meter.write("%s\n" % (self._current_progress_str)) + + def _get_failures(self, result_summary, include_crashes): + """Filters a dict of results and returns only the failures. + + Args: + result_summary: the results of the test run + include_crashes: whether crashes are included in the output. + We use False when finding the list of failures to retry + to see if the results were flaky. Although the crashes may also be + flaky, we treat them as if they aren't so that they're not ignored. + Returns: + a dict of files -> results + """ + failed_results = {} + for test, result in result_summary.unexpected_results.iteritems(): + if (result == test_expectations.PASS or + result == test_expectations.CRASH and not include_crashes): + continue + failed_results[test] = result + + return failed_results + + def _summarize_unexpected_results(self, result_summary, retry_summary): + """Summarize any unexpected results as a dict. + + TODO(dpranke): split this data structure into a separate class? + + Args: + result_summary: summary object from initial test runs + retry_summary: summary object from final test run of retried tests + Returns: + A dictionary containing a summary of the unexpected results from the + run, with the following fields: + 'version': a version indicator (1 in this version) + 'fixable': # of fixable tests (NOW - PASS) + 'skipped': # of skipped tests (NOW & SKIPPED) + 'num_regressions': # of non-flaky failures + 'num_flaky': # of flaky failures + 'num_passes': # of unexpected passes + 'tests': a dict of tests -> {'expected': '...', 'actual': '...'} + """ + results = {} + results['version'] = 1 + + tbe = result_summary.tests_by_expectation + tbt = result_summary.tests_by_timeline + results['fixable'] = len(tbt[test_expectations.NOW] - + tbe[test_expectations.PASS]) + results['skipped'] = len(tbt[test_expectations.NOW] & + tbe[test_expectations.SKIP]) + + num_passes = 0 + num_flaky = 0 + num_regressions = 0 + keywords = {} + for k, v in TestExpectationsFile.EXPECTATIONS.iteritems(): + keywords[v] = k.upper() + + tests = {} + for filename, result in result_summary.unexpected_results.iteritems(): + # Note that if a test crashed in the original run, we ignore + # whether or not it crashed when we retried it (if we retried it), + # and always consider the result not flaky. + test = path_utils.relative_test_filename(filename) + expected = self._expectations.get_expectations_string(filename) + actual = [keywords[result]] + + if result == test_expectations.PASS: + num_passes += 1 + elif result == test_expectations.CRASH: + num_regressions += 1 + else: + if filename not in retry_summary.unexpected_results: + actual.extend( + self._expectations.get_expectations_string( + filename).split(" ")) + num_flaky += 1 + else: + retry_result = retry_summary.unexpected_results[filename] + if result != retry_result: + actual.append(keywords[retry_result]) + num_flaky += 1 + else: + num_regressions += 1 + + tests[test] = {} + tests[test]['expected'] = expected + tests[test]['actual'] = " ".join(actual) + + results['tests'] = tests + results['num_passes'] = num_passes + results['num_flaky'] = num_flaky + results['num_regressions'] = num_regressions + + return results + + def _write_json_files(self, unexpected_results, result_summary, + individual_test_timings): + """Writes the results of the test run as JSON files into the results + dir. + + There are three different files written into the results dir: + unexpected_results.json: A short list of any unexpected results. + This is used by the buildbots to display results. + expectations.json: This is used by the flakiness dashboard. + results.json: A full list of the results - used by the flakiness + dashboard and the aggregate results dashboard. + + Args: + unexpected_results: dict of unexpected results + result_summary: full summary object + individual_test_timings: list of test times (used by the flakiness + dashboard). + """ + logging.debug("Writing JSON files in %s." % + self._options.results_directory) + unexpected_file = open(os.path.join(self._options.results_directory, + "unexpected_results.json"), "w") + unexpected_file.write(simplejson.dumps(unexpected_results, + sort_keys=True, indent=2)) + unexpected_file.close() + + # Write a json file of the test_expectations.txt file for the layout + # tests dashboard. + expectations_file = open(os.path.join(self._options.results_directory, + "expectations.json"), "w") + expectations_json = \ + self._expectations.get_expectations_json_for_all_platforms() + expectations_file.write("ADD_EXPECTATIONS(" + expectations_json + ");") + expectations_file.close() + + json_layout_results_generator.JSONLayoutResultsGenerator( + self._options.builder_name, self._options.build_name, + self._options.build_number, self._options.results_directory, + BUILDER_BASE_URL, individual_test_timings, + self._expectations, result_summary, self._test_files_list) + + logging.debug("Finished writing JSON files.") + + def _print_expected_results_of_type(self, write, result_summary, + result_type, result_type_str): + """Print the number of the tests in a given result class. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + result_summary - the object containing all the results to report on + result_type - the particular result type to report in the summary. + result_type_str - a string description of the result_type. + """ + tests = self._expectations.get_tests_with_result_type(result_type) + now = result_summary.tests_by_timeline[test_expectations.NOW] + wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] + defer = result_summary.tests_by_timeline[test_expectations.DEFER] + + # We use a fancy format string in order to print the data out in a + # nicely-aligned table. + fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)" + % (self._num_digits(now), self._num_digits(defer), + self._num_digits(wontfix))) + write(fmtstr % (len(tests), result_type_str, len(tests & now), + len(tests & defer), len(tests & wontfix))) + + def _num_digits(self, num): + """Returns the number of digits needed to represent the length of a + sequence.""" + ndigits = 1 + if len(num): + ndigits = int(math.log10(len(num))) + 1 + return ndigits + + def _print_timing_statistics(self, write, total_time, thread_timings, + directory_test_timings, individual_test_timings, + result_summary): + """Record timing-specific information for the test run. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + total_time: total elapsed time (in seconds) for the test run + thread_timings: wall clock time each thread ran for + directory_test_timings: timing by directory + individual_test_timings: timing by file + result_summary: summary object for the test run + """ + write("Test timing:") + write(" %6.2f total testing time" % total_time) + write("") + write("Thread timing:") + cuml_time = 0 + for t in thread_timings: + write(" %10s: %5d tests, %6.2f secs" % + (t['name'], t['num_tests'], t['total_time'])) + cuml_time += t['total_time'] + write(" %6.2f cumulative, %6.2f optimal" % + (cuml_time, cuml_time / int(self._options.num_test_shells))) + write("") + + self._print_aggregate_test_statistics(write, individual_test_timings) + self._print_individual_test_times(write, individual_test_timings, + result_summary) + self._print_directory_timings(write, directory_test_timings) + + def _print_aggregate_test_statistics(self, write, individual_test_timings): + """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + individual_test_timings: List of test_shell_thread.TestStats for all + tests. + """ + test_types = individual_test_timings[0].time_for_diffs.keys() + times_for_test_shell = [] + times_for_diff_processing = [] + times_per_test_type = {} + for test_type in test_types: + times_per_test_type[test_type] = [] + + for test_stats in individual_test_timings: + times_for_test_shell.append(test_stats.test_run_time) + times_for_diff_processing.append( + test_stats.total_time_for_all_diffs) + time_for_diffs = test_stats.time_for_diffs + for test_type in test_types: + times_per_test_type[test_type].append( + time_for_diffs[test_type]) + + self._print_statistics_for_test_timings(write, + "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell) + self._print_statistics_for_test_timings(write, + "PER TEST DIFF PROCESSING TIMES (seconds):", + times_for_diff_processing) + for test_type in test_types: + self._print_statistics_for_test_timings(write, + "PER TEST TIMES BY TEST TYPE: %s" % test_type, + times_per_test_type[test_type]) + + def _print_individual_test_times(self, write, individual_test_timings, + result_summary): + """Prints the run times for slow, timeout and crash tests. + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + individual_test_timings: List of test_shell_thread.TestStats for all + tests. + result_summary: summary object for test run + """ + # Reverse-sort by the time spent in test_shell. + individual_test_timings.sort(lambda a, b: + cmp(b.test_run_time, a.test_run_time)) + + num_printed = 0 + slow_tests = [] + timeout_or_crash_tests = [] + unexpected_slow_tests = [] + for test_tuple in individual_test_timings: + filename = test_tuple.filename + is_timeout_crash_or_slow = False + if self._expectations.has_modifier(filename, + test_expectations.SLOW): + is_timeout_crash_or_slow = True + slow_tests.append(test_tuple) + + if filename in result_summary.failures: + result = result_summary.results[filename] + if (result == test_expectations.TIMEOUT or + result == test_expectations.CRASH): + is_timeout_crash_or_slow = True + timeout_or_crash_tests.append(test_tuple) + + if (not is_timeout_crash_or_slow and + num_printed < self._options.num_slow_tests_to_log): + num_printed = num_printed + 1 + unexpected_slow_tests.append(test_tuple) + + write("") + self._print_test_list_timing(write, "%s slowest tests that are not " + "marked as SLOW and did not timeout/crash:" % + self._options.num_slow_tests_to_log, unexpected_slow_tests) + write("") + self._print_test_list_timing(write, "Tests marked as SLOW:", + slow_tests) + write("") + self._print_test_list_timing(write, "Tests that timed out or crashed:", + timeout_or_crash_tests) + write("") + + def _print_test_list_timing(self, write, title, test_list): + """Print timing info for each test. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + title: section heading + test_list: tests that fall in this section + """ + write(title) + for test_tuple in test_list: + filename = test_tuple.filename[len( + path_utils.layout_tests_dir()) + 1:] + filename = filename.replace('\\', '/') + test_run_time = round(test_tuple.test_run_time, 1) + write(" %s took %s seconds" % (filename, test_run_time)) + + def _print_directory_timings(self, write, directory_test_timings): + """Print timing info by directory for any directories that + take > 10 seconds to run. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + directory_test_timing: time info for each directory + """ + timings = [] + for directory in directory_test_timings: + num_tests, time_for_directory = directory_test_timings[directory] + timings.append((round(time_for_directory, 1), directory, + num_tests)) + timings.sort() + + write("Time to process slowest subdirectories:") + min_seconds_to_print = 10 + for timing in timings: + if timing[0] > min_seconds_to_print: + write(" %s took %s seconds to run %s tests." % (timing[1], + timing[0], timing[2])) + write("") + + def _print_statistics_for_test_timings(self, write, title, timings): + """Prints the median, mean and standard deviation of the values in + timings. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + title: Title for these timings. + timings: A list of floats representing times. + """ + write(title) + timings.sort() + + num_tests = len(timings) + percentile90 = timings[int(.9 * num_tests)] + percentile99 = timings[int(.99 * num_tests)] + + if num_tests % 2 == 1: + median = timings[((num_tests - 1) / 2) - 1] + else: + lower = timings[num_tests / 2 - 1] + upper = timings[num_tests / 2] + median = (float(lower + upper)) / 2 + + mean = sum(timings) / num_tests + + for time in timings: + sum_of_deviations = math.pow(time - mean, 2) + + std_deviation = math.sqrt(sum_of_deviations / num_tests) + write(" Median: %6.3f" % median) + write(" Mean: %6.3f" % mean) + write(" 90th percentile: %6.3f" % percentile90) + write(" 99th percentile: %6.3f" % percentile99) + write(" Standard dev: %6.3f" % std_deviation) + write("") + + def _print_result_summary(self, write, result_summary): + """Print a short summary about how many tests passed. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + result_summary: information to log + """ + failed = len(result_summary.failures) + skipped = len( + result_summary.tests_by_expectation[test_expectations.SKIP]) + total = result_summary.total + passed = total - failed - skipped + pct_passed = 0.0 + if total > 0: + pct_passed = float(passed) * 100 / total + + write("") + write("=> Results: %d/%d tests passed (%.1f%%)" % + (passed, total, pct_passed)) + write("") + self._print_result_summary_entry(write, result_summary, + test_expectations.NOW, "Tests to be fixed for the current release") + + write("") + self._print_result_summary_entry(write, result_summary, + test_expectations.DEFER, + "Tests we'll fix in the future if they fail (DEFER)") + + write("") + self._print_result_summary_entry(write, result_summary, + test_expectations.WONTFIX, + "Tests that will only be fixed if they crash (WONTFIX)") + + def _print_result_summary_entry(self, write, result_summary, timeline, + heading): + """Print a summary block of results for a particular timeline of test. + + Args: + write: A callback to write info to (e.g., a LoggingWriter) or + sys.stdout.write. + result_summary: summary to print results for + timeline: the timeline to print results for (NOT, WONTFIX, etc.) + heading: a textual description of the timeline + """ + total = len(result_summary.tests_by_timeline[timeline]) + not_passing = (total - + len(result_summary.tests_by_expectation[test_expectations.PASS] & + result_summary.tests_by_timeline[timeline])) + write("=> %s (%d):" % (heading, not_passing)) + + for result in TestExpectationsFile.EXPECTATION_ORDER: + if result == test_expectations.PASS: + continue + results = (result_summary.tests_by_expectation[result] & + result_summary.tests_by_timeline[timeline]) + desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result] + if not_passing and len(results): + pct = len(results) * 100.0 / not_passing + write(" %5d %-24s (%4.1f%%)" % (len(results), + desc[len(results) != 1], pct)) + + def _print_one_line_summary(self, total, expected): + """Print a one-line summary of the test run to stdout. + + Args: + total: total number of tests run + expected: number of expected results + """ + unexpected = total - expected + if unexpected == 0: + print "All %d tests ran as expected." % expected + elif expected == 1: + print "1 test ran as expected, %d didn't:" % unexpected + else: + print "%d tests ran as expected, %d didn't:" % (expected, + unexpected) + + def _print_unexpected_results(self, unexpected_results): + """Prints any unexpected results in a human-readable form to stdout.""" + passes = {} + flaky = {} + regressions = {} + + if len(unexpected_results['tests']): + print "" + + for test, results in unexpected_results['tests'].iteritems(): + actual = results['actual'].split(" ") + expected = results['expected'].split(" ") + if actual == ['PASS']: + if 'CRASH' in expected: + _add_to_dict_of_lists(passes, + 'Expected to crash, but passed', + test) + elif 'TIMEOUT' in expected: + _add_to_dict_of_lists(passes, + 'Expected to timeout, but passed', + test) + else: + _add_to_dict_of_lists(passes, + 'Expected to fail, but passed', + test) + elif len(actual) > 1: + # We group flaky tests by the first actual result we got. + _add_to_dict_of_lists(flaky, actual[0], test) + else: + _add_to_dict_of_lists(regressions, results['actual'], test) + + if len(passes): + for key, tests in passes.iteritems(): + print "%s: (%d)" % (key, len(tests)) + tests.sort() + for test in tests: + print " %s" % test + print + + if len(flaky): + descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS + for key, tests in flaky.iteritems(): + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + print "Unexpected flakiness: %s (%d)" % ( + descriptions[result][1], len(tests)) + tests.sort() + + for test in tests: + result = unexpected_results['tests'][test] + actual = result['actual'].split(" ") + expected = result['expected'].split(" ") + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + new_expectations_list = list(set(actual) | set(expected)) + print " %s = %s" % (test, " ".join(new_expectations_list)) + print + + if len(regressions): + descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS + for key, tests in regressions.iteritems(): + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + print "Regressions: Unexpected %s : (%d)" % ( + descriptions[result][1], len(tests)) + tests.sort() + for test in tests: + print " %s = %s" % (test, key) + print + + if len(unexpected_results['tests']) and self._options.verbose: + print "-" * 78 + + def _print_unexpected_test_result(self, test, result): + """Prints one unexpected test result line.""" + desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0] + self._meter.write(" %s -> unexpected %s\n" % + (path_utils.relative_test_filename(test), desc)) + + def _write_results_html_file(self, result_summary): + """Write results.html which is a summary of tests that failed. + + Args: + result_summary: a summary of the results :) + + Returns: + True if any results were written (since expected failures may be + omitted) + """ + # test failures + if self._options.full_results_html: + test_files = result_summary.failures.keys() + else: + unexpected_failures = self._get_failures(result_summary, + include_crashes=True) + test_files = unexpected_failures.keys() + if not len(test_files): + return False + + out_filename = os.path.join(self._options.results_directory, + "results.html") + out_file = open(out_filename, 'w') + # header + if self._options.full_results_html: + h2 = "Test Failures" + else: + h2 = "Unexpected Test Failures" + out_file.write("<html><head><title>Layout Test Results (%(time)s)" + "</title></head><body><h2>%(h2)s (%(time)s)</h2>\n" + % {'h2': h2, 'time': time.asctime()}) + + test_files.sort() + for test_file in test_files: + test_failures = result_summary.failures.get(test_file, []) + out_file.write("<p><a href='%s'>%s</a><br />\n" + % (path_utils.filename_to_uri(test_file), + path_utils.relative_test_filename(test_file))) + for failure in test_failures: + out_file.write(" %s<br/>" + % failure.result_html_output( + path_utils.relative_test_filename(test_file))) + out_file.write("</p>\n") + + # footer + out_file.write("</body></html>\n") + return True + + def _show_results_html_file(self): + """Launches the test shell open to the results.html page.""" + results_filename = os.path.join(self._options.results_directory, + "results.html") + subprocess.Popen([path_utils.test_shell_path(self._options.target), + path_utils.filename_to_uri(results_filename)]) + + +def _add_to_dict_of_lists(dict, key, value): + dict.setdefault(key, []).append(value) + + +def read_test_files(files): + tests = [] + for file in files: + for line in open(file): + line = test_expectations.strip_comments(line) + if line: + tests.append(line) + return tests + + +def create_logging_writer(options, log_option): + """Returns a write() function that will write the string to logging.info() + if comp was specified in --log or if --verbose is true. Otherwise the + message is dropped. + + Args: + options: list of command line options from optparse + log_option: option to match in options.log in order for the messages + to be logged (e.g., 'actual' or 'expected') + """ + if options.verbose or log_option in options.log.split(","): + return logging.info + return lambda str: 1 + + +def main(options, args): + """Run the tests. Will call sys.exit when complete. + + Args: + options: a dictionary of command line options + args: a list of sub directories or files to test + """ + + if options.sources: + options.verbose = True + + # Set up our logging format. + meter = metered_stream.MeteredStream(options.verbose, sys.stderr) + log_fmt = '%(message)s' + log_datefmt = '%y%m%d %H:%M:%S' + log_level = logging.INFO + if options.verbose: + log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s ' + '%(message)s') + log_level = logging.DEBUG + logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt, + stream=meter) + + if not options.target: + if options.debug: + options.target = "Debug" + else: + options.target = "Release" + + if not options.use_apache: + options.use_apache = sys.platform in ('darwin', 'linux2') + + if options.results_directory.startswith("/"): + # Assume it's an absolute path and normalize. + options.results_directory = path_utils.get_absolute_path( + options.results_directory) + else: + # If it's a relative path, make the output directory relative to + # Debug or Release. + basedir = path_utils.path_from_base('webkit') + options.results_directory = path_utils.get_absolute_path( + os.path.join(basedir, options.target, options.results_directory)) + + if options.clobber_old_results: + # Just clobber the actual test results directories since the other + # files in the results directory are explicitly used for cross-run + # tracking. + path = os.path.join(options.results_directory, 'LayoutTests') + if os.path.exists(path): + shutil.rmtree(path) + + # Ensure platform is valid and force it to the form 'chromium-<platform>'. + options.platform = path_utils.platform_name(options.platform) + + if not options.num_test_shells: + # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1. + options.num_test_shells = platform_utils.get_num_cores() + + write = create_logging_writer(options, 'config') + write("Running %s test_shells in parallel" % options.num_test_shells) + + if not options.time_out_ms: + if options.target == "Debug": + options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS) + else: + options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS) + + options.slow_time_out_ms = str(5 * int(options.time_out_ms)) + write("Regular timeout: %s, slow test timeout: %s" % + (options.time_out_ms, options.slow_time_out_ms)) + + # Include all tests if none are specified. + new_args = [] + for arg in args: + if arg and arg != '': + new_args.append(arg) + + paths = new_args + if not paths: + paths = [] + if options.test_list: + paths += read_test_files(options.test_list) + + # Create the output directory if it doesn't already exist. + path_utils.maybe_make_directory(options.results_directory) + meter.update("Gathering files ...") + + test_runner = TestRunner(options, meter) + test_runner.gather_file_paths(paths) + + if options.lint_test_files: + # Creating the expecations for each platform/target pair does all the + # test list parsing and ensures it's correct syntax (e.g. no dupes). + for platform in TestExpectationsFile.PLATFORMS: + test_runner.parse_expectations(platform, is_debug_mode=True) + test_runner.parse_expectations(platform, is_debug_mode=False) + print ("If there are no fail messages, errors or exceptions, then the " + "lint succeeded.") + sys.exit(0) + + try: + test_shell_binary_path = path_utils.test_shell_path(options.target) + except path_utils.PathNotFound: + print "\nERROR: test_shell is not found. Be sure that you have built" + print "it and that you are using the correct build. This script" + print "will run the Release one by default. Use --debug to use the" + print "Debug build.\n" + sys.exit(1) + + write = create_logging_writer(options, "config") + write("Using platform '%s'" % options.platform) + write("Placing test results in %s" % options.results_directory) + if options.new_baseline: + write("Placing new baselines in %s" % + path_utils.chromium_baseline_path(options.platform)) + write("Using %s build at %s" % (options.target, test_shell_binary_path)) + if options.no_pixel_tests: + write("Not running pixel tests") + write("") + + meter.update("Parsing expectations ...") + test_runner.parse_expectations(options.platform, options.target == 'Debug') + + meter.update("Preparing tests ...") + write = create_logging_writer(options, "expected") + result_summary = test_runner.prepare_lists_and_print_output(write) + + if 'cygwin' == sys.platform: + logging.warn("#" * 40) + logging.warn("# UNEXPECTED PYTHON VERSION") + logging.warn("# This script should be run using the version of python") + logging.warn("# in third_party/python_24/") + logging.warn("#" * 40) + sys.exit(1) + + # Delete the disk cache if any to ensure a clean test run. + cachedir = os.path.split(test_shell_binary_path)[0] + cachedir = os.path.join(cachedir, "cache") + if os.path.exists(cachedir): + shutil.rmtree(cachedir) + + test_runner.add_test_type(text_diff.TestTextDiff) + if not options.no_pixel_tests: + test_runner.add_test_type(image_diff.ImageDiff) + if options.fuzzy_pixel_tests: + test_runner.add_test_type(fuzzy_image_diff.FuzzyImageDiff) + + meter.update("Starting ...") + has_new_failures = test_runner.run(result_summary) + + logging.debug("Exit status: %d" % has_new_failures) + sys.exit(has_new_failures) + + +def parse_args(args=None): + """Provides a default set of command line args. + + Returns a tuple of options, args from optparse""" + option_parser = optparse.OptionParser() + option_parser.add_option("", "--no-pixel-tests", action="store_true", + default=False, + help="disable pixel-to-pixel PNG comparisons") + option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true", + default=False, + help="Also use fuzzy matching to compare pixel " + "test outputs.") + option_parser.add_option("", "--results-directory", + default="layout-test-results", + help="Output results directory source dir," + " relative to Debug or Release") + option_parser.add_option("", "--new-baseline", action="store_true", + default=False, + help="save all generated results as new baselines" + " into the platform directory, overwriting " + "whatever's already there.") + option_parser.add_option("", "--noshow-results", action="store_true", + default=False, help="don't launch the test_shell" + " with results after the tests are done") + option_parser.add_option("", "--full-results-html", action="store_true", + default=False, help="show all failures in " + "results.html, rather than only regressions") + option_parser.add_option("", "--clobber-old-results", action="store_true", + default=False, help="Clobbers test results from " + "previous runs.") + option_parser.add_option("", "--lint-test-files", action="store_true", + default=False, help="Makes sure the test files " + "parse for all configurations. Does not run any " + "tests.") + option_parser.add_option("", "--force", action="store_true", + default=False, + help="Run all tests, even those marked SKIP " + "in the test list") + option_parser.add_option("", "--num-test-shells", + help="Number of testshells to run in parallel.") + option_parser.add_option("", "--use-apache", action="store_true", + default=False, + help="Whether to use apache instead of lighttpd.") + option_parser.add_option("", "--time-out-ms", default=None, + help="Set the timeout for each test") + option_parser.add_option("", "--run-singly", action="store_true", + default=False, + help="run a separate test_shell for each test") + option_parser.add_option("", "--debug", action="store_true", default=False, + help="use the debug binary instead of the release" + " binary") + option_parser.add_option("", "--num-slow-tests-to-log", default=50, + help="Number of slow tests whose timings " + "to print.") + option_parser.add_option("", "--platform", + help="Override the platform for expected results") + option_parser.add_option("", "--target", default="", + help="Set the build target configuration " + "(overrides --debug)") + option_parser.add_option("", "--log", action="store", + default="detailed-progress,unexpected", + help="log various types of data. The param should" + " be a comma-separated list of values from: " + "actual,config," + LOG_DETAILED_PROGRESS + + ",expected,timing," + LOG_UNEXPECTED + " " + "(defaults to " + + "--log detailed-progress,unexpected)") + option_parser.add_option("-v", "--verbose", action="store_true", + default=False, help="include debug-level logging") + option_parser.add_option("", "--sources", action="store_true", + help="show expected result file path for each " + "test (implies --verbose)") + option_parser.add_option("", "--startup-dialog", action="store_true", + default=False, + help="create a dialog on test_shell.exe startup") + option_parser.add_option("", "--gp-fault-error-box", action="store_true", + default=False, + help="enable Windows GP fault error box") + option_parser.add_option("", "--wrapper", + help="wrapper command to insert before " + "invocations of test_shell; option is split " + "on whitespace before running. (Example: " + "--wrapper='valgrind --smc-check=all')") + option_parser.add_option("", "--test-list", action="append", + help="read list of tests to run from file", + metavar="FILE") + option_parser.add_option("", "--nocheck-sys-deps", action="store_true", + default=False, + help="Don't check the system dependencies " + "(themes)") + option_parser.add_option("", "--randomize-order", action="store_true", + default=False, + help=("Run tests in random order (useful for " + "tracking down corruption)")) + option_parser.add_option("", "--run-chunk", + default=None, + help=("Run a specified chunk (n:l), the " + "nth of len l, of the layout tests")) + option_parser.add_option("", "--run-part", + default=None, + help=("Run a specified part (n:m), the nth of m" + " parts, of the layout tests")) + option_parser.add_option("", "--batch-size", + default=None, + help=("Run a the tests in batches (n), after " + "every n tests, the test shell is " + "relaunched.")) + option_parser.add_option("", "--builder-name", + default="DUMMY_BUILDER_NAME", + help=("The name of the builder shown on the " + "waterfall running this script e.g. " + "WebKit.")) + option_parser.add_option("", "--build-name", + default="DUMMY_BUILD_NAME", + help=("The name of the builder used in its path, " + "e.g. webkit-rel.")) + option_parser.add_option("", "--build-number", + default="DUMMY_BUILD_NUMBER", + help=("The build number of the builder running" + "this script.")) + option_parser.add_option("", "--experimental-fully-parallel", + action="store_true", default=False, + help="run all tests in parallel") + return option_parser.parse_args(args) + +if '__main__' == __name__: + options, args = parse_args() + main(options, args) diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py new file mode 100644 index 0000000..134b507 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Compares the image output of a test to the expected image output using +fuzzy matching. +""" + +import errno +import logging +import os +import shutil +import subprocess + +from layout_package import path_utils +from layout_package import test_failures +from test_types import test_type_base + + +class FuzzyImageDiff(test_type_base.TestTypeBase): + + def compare_output(self, filename, proc, output, test_args, target): + """Implementation of CompareOutput that checks the output image and + checksum against the expected files from the LayoutTest directory. + """ + failures = [] + + # If we didn't produce a hash file, this test must be text-only. + if test_args.hash is None: + return failures + + expected_png_file = path_utils.expected_filename(filename, '.png') + + if test_args.show_sources: + logging.debug('Using %s' % expected_png_file) + + # Also report a missing expected PNG file. + if not os.path.isfile(expected_png_file): + failures.append(test_failures.FailureMissingImage(self)) + + # Run the fuzzymatcher + r = subprocess.call([path_utils.fuzzy_match_path(), + test_args.png_path, expected_png_file]) + if r != 0: + failures.append(test_failures.FailureFuzzyFailure(self)) + + return failures diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py new file mode 100644 index 0000000..b0bf189 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py @@ -0,0 +1,224 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Compares the image output of a test to the expected image output. + +Compares hashes for the generated and expected images. If the output doesn't +match, returns FailureImageHashMismatch and outputs both hashes into the layout +test results directory. +""" + +import errno +import logging +import os +import shutil +import subprocess + +from layout_package import path_utils +from layout_package import test_failures +from test_types import test_type_base + +# Cache whether we have the image_diff executable available. +_compare_available = True +_compare_msg_printed = False + + +class ImageDiff(test_type_base.TestTypeBase): + + def _copy_output_png(self, test_filename, source_image, extension): + """Copies result files into the output directory with appropriate + names. + + Args: + test_filename: the test filename + source_file: path to the image file (either actual or expected) + extension: extension to indicate -actual.png or -expected.png + """ + self._make_output_directory(test_filename) + dest_image = self.output_filename(test_filename, extension) + + try: + shutil.copyfile(source_image, dest_image) + except IOError, e: + # A missing expected PNG has already been recorded as an error. + if errno.ENOENT != e.errno: + raise + + def _save_baseline_files(self, filename, png_path, checksum): + """Saves new baselines for the PNG and checksum. + + Args: + filename: test filename + png_path: path to the actual PNG result file + checksum: value of the actual checksum result + """ + png_file = open(png_path, "rb") + png_data = png_file.read() + png_file.close() + self._save_baseline_data(filename, png_data, ".png") + self._save_baseline_data(filename, checksum, ".checksum") + + def _create_image_diff(self, filename, target): + """Creates the visual diff of the expected/actual PNGs. + + Args: + filename: the name of the test + target: Debug or Release + """ + diff_filename = self.output_filename(filename, + self.FILENAME_SUFFIX_COMPARE) + actual_filename = self.output_filename(filename, + self.FILENAME_SUFFIX_ACTUAL + '.png') + expected_filename = self.output_filename(filename, + self.FILENAME_SUFFIX_EXPECTED + '.png') + + global _compare_available + cmd = '' + + try: + executable = path_utils.image_diff_path(target) + cmd = [executable, '--diff', actual_filename, expected_filename, + diff_filename] + except Exception, e: + _compare_available = False + + result = 1 + if _compare_available: + try: + result = subprocess.call(cmd) + except OSError, e: + if e.errno == errno.ENOENT or e.errno == errno.EACCES: + _compare_available = False + else: + raise e + except ValueError: + # work around a race condition in Python 2.4's implementation + # of subprocess.Popen + pass + + global _compare_msg_printed + + if not _compare_available and not _compare_msg_printed: + _compare_msg_printed = True + print('image_diff not found. Make sure you have a ' + target + + ' build of the image_diff executable.') + + return result + + def compare_output(self, filename, proc, output, test_args, target): + """Implementation of CompareOutput that checks the output image and + checksum against the expected files from the LayoutTest directory. + """ + failures = [] + + # If we didn't produce a hash file, this test must be text-only. + if test_args.hash is None: + return failures + + # If we're generating a new baseline, we pass. + if test_args.new_baseline: + self._save_baseline_files(filename, test_args.png_path, + test_args.hash) + return failures + + # Compare hashes. + expected_hash_file = path_utils.expected_filename(filename, + '.checksum') + expected_png_file = path_utils.expected_filename(filename, '.png') + + if test_args.show_sources: + logging.debug('Using %s' % expected_hash_file) + logging.debug('Using %s' % expected_png_file) + + try: + expected_hash = open(expected_hash_file, "r").read() + except IOError, e: + if errno.ENOENT != e.errno: + raise + expected_hash = '' + + + if not os.path.isfile(expected_png_file): + # Report a missing expected PNG file. + self.write_output_files(filename, '', '.checksum', test_args.hash, + expected_hash, diff=False, wdiff=False) + self._copy_output_png(filename, test_args.png_path, '-actual.png') + failures.append(test_failures.FailureMissingImage(self)) + return failures + elif test_args.hash == expected_hash: + # Hash matched (no diff needed, okay to return). + return failures + + + self.write_output_files(filename, '', '.checksum', test_args.hash, + expected_hash, diff=False, wdiff=False) + self._copy_output_png(filename, test_args.png_path, '-actual.png') + self._copy_output_png(filename, expected_png_file, '-expected.png') + + # Even though we only use result in one codepath below but we + # still need to call CreateImageDiff for other codepaths. + result = self._create_image_diff(filename, target) + if expected_hash == '': + failures.append(test_failures.FailureMissingImageHash(self)) + elif test_args.hash != expected_hash: + # Hashes don't match, so see if the images match. If they do, then + # the hash is wrong. + if result == 0: + failures.append(test_failures.FailureImageHashIncorrect(self)) + else: + failures.append(test_failures.FailureImageHashMismatch(self)) + + return failures + + def diff_files(self, file1, file2): + """Diff two image files. + + Args: + file1, file2: full paths of the files to compare. + + Returns: + True if two files are different. + False otherwise. + """ + + try: + executable = path_utils.image_diff_path('Debug') + except Exception, e: + logging.warn('Failed to find image diff executable.') + return True + + cmd = [executable, file1, file2] + result = 1 + try: + result = subprocess.call(cmd) + except OSError, e: + logging.warn('Failed to compare image diff: %s', e) + return True + + return result == 1 diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py new file mode 100644 index 0000000..334ae70 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines the interface TestTypeBase which other test types inherit from. + +Also defines the TestArguments "struct" to pass them additional arguments. +""" + +import cgi +import difflib +import errno +import logging +import os.path +import subprocess + +from layout_package import path_utils + + +class TestArguments(object): + """Struct-like wrapper for additional arguments needed by + specific tests.""" + # Whether to save new baseline results. + new_baseline = False + + # Path to the actual PNG file generated by pixel tests + png_path = None + + # Value of checksum generated by pixel tests. + hash = None + + # Whether to use wdiff to generate by-word diffs. + wdiff = False + + # Whether to report the locations of the expected result files used. + show_sources = False + +# Python bug workaround. See the wdiff code in WriteOutputFiles for an +# explanation. +_wdiff_available = True + + +class TestTypeBase(object): + + # Filename pieces when writing failures to the test results directory. + FILENAME_SUFFIX_ACTUAL = "-actual" + FILENAME_SUFFIX_EXPECTED = "-expected" + FILENAME_SUFFIX_DIFF = "-diff" + FILENAME_SUFFIX_WDIFF = "-wdiff.html" + FILENAME_SUFFIX_COMPARE = "-diff.png" + + def __init__(self, platform, root_output_dir): + """Initialize a TestTypeBase object. + + Args: + platform: the platform (e.g., 'chromium-mac-leopard') + identifying the platform-specific results to be used. + root_output_dir: The unix style path to the output dir. + """ + self._root_output_dir = root_output_dir + self._platform = platform + + def _make_output_directory(self, filename): + """Creates the output directory (if needed) for a given test + filename.""" + output_filename = os.path.join(self._root_output_dir, + path_utils.relative_test_filename(filename)) + path_utils.maybe_make_directory(os.path.split(output_filename)[0]) + + def _save_baseline_data(self, filename, data, modifier): + """Saves a new baseline file into the platform directory. + + The file will be named simply "<test>-expected<modifier>", suitable for + use as the expected results in a later run. + + Args: + filename: path to the test file + data: result to be saved as the new baseline + modifier: type of the result file, e.g. ".txt" or ".png" + """ + relative_dir = os.path.dirname( + path_utils.relative_test_filename(filename)) + output_dir = os.path.join( + path_utils.chromium_baseline_path(self._platform), relative_dir) + output_file = os.path.basename(os.path.splitext(filename)[0] + + self.FILENAME_SUFFIX_EXPECTED + modifier) + + path_utils.maybe_make_directory(output_dir) + output_path = os.path.join(output_dir, output_file) + logging.debug('writing new baseline to "%s"' % (output_path)) + open(output_path, "wb").write(data) + + def output_filename(self, filename, modifier): + """Returns a filename inside the output dir that contains modifier. + + For example, if filename is c:/.../fast/dom/foo.html and modifier is + "-expected.txt", the return value is + c:/cygwin/tmp/layout-test-results/fast/dom/foo-expected.txt + + Args: + filename: absolute filename to test file + modifier: a string to replace the extension of filename with + + Return: + The absolute windows path to the output filename + """ + output_filename = os.path.join(self._root_output_dir, + path_utils.relative_test_filename(filename)) + return os.path.splitext(output_filename)[0] + modifier + + def compare_output(self, filename, proc, output, test_args, target): + """Method that compares the output from the test with the + expected value. + + This is an abstract method to be implemented by all sub classes. + + Args: + filename: absolute filename to test file + proc: a reference to the test_shell process + output: a string containing the output of the test + test_args: a TestArguments object holding optional additional + arguments + target: Debug or Release + + Return: + a list of TestFailure objects, empty if the test passes + """ + raise NotImplemented + + def write_output_files(self, filename, test_type, file_type, output, + expected, diff=True, wdiff=False): + """Writes the test output, the expected output and optionally the diff + between the two to files in the results directory. + + The full output filename of the actual, for example, will be + <filename><test_type>-actual<file_type> + For instance, + my_test-simp-actual.txt + + Args: + filename: The test filename + test_type: A string describing the test type, e.g. "simp" + file_type: A string describing the test output file type, e.g. ".txt" + output: A string containing the test output + expected: A string containing the expected test output + diff: if True, write a file containing the diffs too. This should be + False for results that are not text + wdiff: if True, write an HTML file containing word-by-word diffs + """ + self._make_output_directory(filename) + actual_filename = self.output_filename(filename, + test_type + self.FILENAME_SUFFIX_ACTUAL + file_type) + expected_filename = self.output_filename(filename, + test_type + self.FILENAME_SUFFIX_EXPECTED + file_type) + if output: + open(actual_filename, "wb").write(output) + if expected: + open(expected_filename, "wb").write(expected) + + if not output or not expected: + return + + if diff: + diff = difflib.unified_diff(expected.splitlines(True), + output.splitlines(True), + expected_filename, + actual_filename) + + diff_filename = self.output_filename(filename, + test_type + self.FILENAME_SUFFIX_DIFF + file_type) + open(diff_filename, "wb").write(''.join(diff)) + + if wdiff: + # Shell out to wdiff to get colored inline diffs. + executable = path_utils.wdiff_path() + cmd = [executable, + '--start-delete=##WDIFF_DEL##', + '--end-delete=##WDIFF_END##', + '--start-insert=##WDIFF_ADD##', + '--end-insert=##WDIFF_END##', + expected_filename, + actual_filename] + filename = self.output_filename(filename, + test_type + self.FILENAME_SUFFIX_WDIFF) + + global _wdiff_available + + try: + # Python's Popen has a bug that causes any pipes opened to a + # process that can't be executed to be leaked. Since this + # code is specifically designed to tolerate exec failures + # to gracefully handle cases where wdiff is not installed, + # the bug results in a massive file descriptor leak. As a + # workaround, if an exec failure is ever experienced for + # wdiff, assume it's not available. This will leak one + # file descriptor but that's better than leaking each time + # wdiff would be run. + # + # http://mail.python.org/pipermail/python-list/ + # 2008-August/505753.html + # http://bugs.python.org/issue3210 + # + # It also has a threading bug, so we don't output wdiff if + # the Popen raises a ValueError. + # http://bugs.python.org/issue1236 + if _wdiff_available: + wdiff = subprocess.Popen( + cmd, stdout=subprocess.PIPE).communicate()[0] + wdiff_failed = False + + except OSError, e: + if (e.errno == errno.ENOENT or e.errno == errno.EACCES or + e.errno == errno.ECHILD): + _wdiff_available = False + else: + raise e + except ValueError, e: + wdiff_failed = True + + out = open(filename, 'wb') + + if not _wdiff_available: + out.write( + "wdiff not installed.<br/> " + "If you're running OS X, you can install via macports." + "<br/>" + "If running Ubuntu linux, you can run " + "'sudo apt-get install wdiff'.") + elif wdiff_failed: + out.write('wdiff failed due to running with multiple ' + 'test_shells in parallel.') + else: + wdiff = cgi.escape(wdiff) + wdiff = wdiff.replace('##WDIFF_DEL##', '<span class=del>') + wdiff = wdiff.replace('##WDIFF_ADD##', '<span class=add>') + wdiff = wdiff.replace('##WDIFF_END##', '</span>') + out.write('<head><style>.del { background: #faa; } ') + out.write('.add { background: #afa; }</style></head>') + out.write('<pre>' + wdiff + '</pre>') + + out.close() diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py new file mode 100644 index 0000000..8cff9e6 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# Copyright (C) 2010 The Chromium Authors. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Chromium name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Compares the text output of a test to the expected text output. + +If the output doesn't match, returns FailureTextMismatch and outputs the diff +files into the layout test results directory. +""" + +import errno +import logging +import os.path + +from layout_package import path_utils +from layout_package import test_failures +from test_types import test_type_base + + +def is_render_tree_dump(data): + """Returns true if data appears to be a render tree dump as opposed to a + plain text dump.""" + return data.find("RenderView at (0,0)") != -1 + + +class TestTextDiff(test_type_base.TestTypeBase): + + def get_normalized_output_text(self, output): + # Some tests produce "\r\n" explicitly. Our system (Python/Cygwin) + # helpfully changes the "\n" to "\r\n", resulting in "\r\r\n". + norm = output.replace("\r\r\n", "\r\n").strip("\r\n").replace( + "\r\n", "\n") + return norm + "\n" + + def get_normalized_expected_text(self, filename, show_sources): + """Given the filename of the test, read the expected output from a file + and normalize the text. Returns a string with the expected text, or '' + if the expected output file was not found.""" + # Read the platform-specific expected text. + expected_filename = path_utils.expected_filename(filename, '.txt') + if show_sources: + logging.debug('Using %s' % expected_filename) + + return self.get_normalized_text(expected_filename) + + def get_normalized_text(self, filename): + try: + text = open(filename).read() + except IOError, e: + if errno.ENOENT != e.errno: + raise + return '' + + # Normalize line endings + return text.strip("\r\n").replace("\r\n", "\n") + "\n" + + def compare_output(self, filename, proc, output, test_args, target): + """Implementation of CompareOutput that checks the output text against + the expected text from the LayoutTest directory.""" + failures = [] + + # If we're generating a new baseline, we pass. + if test_args.new_baseline: + self._save_baseline_data(filename, output, ".txt") + return failures + + # Normalize text to diff + output = self.get_normalized_output_text(output) + expected = self.get_normalized_expected_text(filename, + test_args.show_sources) + + # Write output files for new tests, too. + if output != expected: + # Text doesn't match, write output files. + self.write_output_files(filename, "", ".txt", output, expected, + diff=True, wdiff=True) + + if expected == '': + failures.append(test_failures.FailureMissingResult(self)) + else: + failures.append(test_failures.FailureTextMismatch(self, True)) + + return failures + + def diff_files(self, file1, file2): + """Diff two text files. + + Args: + file1, file2: full paths of the files to compare. + + Returns: + True if two files are different. + False otherwise. + """ + + return (self.get_normalized_text(file1) != + self.get_normalized_text(file2)) diff --git a/WebKitTools/Scripts/modules/mock.py b/WebKitTools/Scripts/webkitpy/mock.py index f6f328e..f6f328e 100644 --- a/WebKitTools/Scripts/modules/mock.py +++ b/WebKitTools/Scripts/webkitpy/mock.py diff --git a/WebKitTools/Scripts/webkitpy/mock.pyc b/WebKitTools/Scripts/webkitpy/mock.pyc Binary files differnew file mode 100644 index 0000000..c39d3f4 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/mock.pyc diff --git a/WebKitTools/Scripts/webkitpy/mock_bugzillatool.py b/WebKitTools/Scripts/webkitpy/mock_bugzillatool.py new file mode 100644 index 0000000..1aff53a --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/mock_bugzillatool.py @@ -0,0 +1,367 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.bugzilla import Bug, Attachment +from webkitpy.committers import CommitterList, Reviewer +from webkitpy.mock import Mock +from webkitpy.scm import CommitMessage +from webkitpy.webkit_logging import log + + +def _id_to_object_dictionary(*objects): + dictionary = {} + for thing in objects: + dictionary[thing["id"]] = thing + return dictionary + + +# FIXME: The ids should be 1, 2, 3 instead of crazy numbers. + + +_patch1 = { + "id": 197, + "bug_id": 42, + "url": "http://example.com/197", + "is_obsolete": False, + "is_patch": True, + "review": "+", + "reviewer_email": "foo@bar.com", + "commit-queue": "+", + "committer_email": "foo@bar.com", + "attacher_email": "Contributer1", +} + + +_patch2 = { + "id": 128, + "bug_id": 42, + "url": "http://example.com/128", + "is_obsolete": False, + "is_patch": True, + "review": "+", + "reviewer_email": "foo@bar.com", + "commit-queue": "+", + "committer_email": "non-committer@example.com", + "attacher_email": "eric@webkit.org", +} + + +_patch3 = { + "id": 103, + "bug_id": 75, + "url": "http://example.com/103", + "is_obsolete": False, + "is_patch": True, + "review": "?", + "attacher_email": "eric@webkit.org", +} + + +_patch4 = { + "id": 104, + "bug_id": 77, + "url": "http://example.com/103", + "is_obsolete": False, + "is_patch": True, + "review": "+", + "commit-queue": "?", + "reviewer_email": "foo@bar.com", + "attacher_email": "Contributer2", +} + + +_patch5 = { + "id": 105, + "bug_id": 77, + "url": "http://example.com/103", + "is_obsolete": False, + "is_patch": True, + "review": "+", + "reviewer_email": "foo@bar.com", + "attacher_email": "eric@webkit.org", +} + + +_patch6 = { # Valid committer, but no reviewer. + "id": 106, + "bug_id": 77, + "url": "http://example.com/103", + "is_obsolete": False, + "is_patch": True, + "commit-queue": "+", + "committer_email": "foo@bar.com", + "attacher_email": "eric@webkit.org", +} + + +_patch7 = { # Valid review, patch is marked obsolete. + "id": 107, + "bug_id": 76, + "url": "http://example.com/103", + "is_obsolete": True, + "is_patch": True, + "review": "+", + "reviewer_email": "foo@bar.com", + "attacher_email": "eric@webkit.org", +} + + +# This must be defined before we define the bugs, thus we don't use +# MockBugzilla.unassigned_email directly. +_unassigned_email = "unassigned@example.com" + + +# FIXME: The ids should be 1, 2, 3 instead of crazy numbers. + + +_bug1 = { + "id": 42, + "title": "Bug with two r+'d and cq+'d patches, one of which has an " + "invalid commit-queue setter.", + "assigned_to_email": _unassigned_email, + "attachments": [_patch1, _patch2], +} + + +_bug2 = { + "id": 75, + "title": "Bug with a patch needing review.", + "assigned_to_email": "foo@foo.com", + "attachments": [_patch3], +} + + +_bug3 = { + "id": 76, + "title": "The third bug", + "assigned_to_email": _unassigned_email, + "attachments": [_patch7], +} + + +_bug4 = { + "id": 77, + "title": "The fourth bug", + "assigned_to_email": "foo@foo.com", + "attachments": [_patch4, _patch5, _patch6], +} + + +class MockBugzillaQueries(Mock): + + def __init__(self, bugzilla): + Mock.__init__(self) + self._bugzilla = bugzilla + + def _all_bugs(self): + return map(lambda bug_dictionary: Bug(bug_dictionary, self._bugzilla), + self._bugzilla.bug_cache.values()) + + def fetch_bug_ids_from_commit_queue(self): + bugs_with_commit_queued_patches = filter( + lambda bug: bug.commit_queued_patches(), + self._all_bugs()) + return map(lambda bug: bug.id(), bugs_with_commit_queued_patches) + + def fetch_attachment_ids_from_review_queue(self): + unreviewed_patches = sum([bug.unreviewed_patches() + for bug in self._all_bugs()], []) + return map(lambda patch: patch.id(), unreviewed_patches) + + def fetch_patches_from_commit_queue(self): + return sum([bug.commit_queued_patches() + for bug in self._all_bugs()], []) + + def fetch_bug_ids_from_pending_commit_list(self): + bugs_with_reviewed_patches = filter(lambda bug: bug.reviewed_patches(), + self._all_bugs()) + bug_ids = map(lambda bug: bug.id(), bugs_with_reviewed_patches) + # NOTE: This manual hack here is to allow testing logging in + # test_assign_to_committer the real pending-commit query on bugzilla + # will return bugs with patches which have r+, but are also obsolete. + return bug_ids + [76] + + def fetch_patches_from_pending_commit_list(self): + return sum([bug.reviewed_patches() for bug in self._all_bugs()], []) + + +# FIXME: Bugzilla is the wrong Mock-point. Once we have a BugzillaNetwork +# class we should mock that instead. +# Most of this class is just copy/paste from Bugzilla. + + +class MockBugzilla(Mock): + + bug_server_url = "http://example.com" + + unassigned_email = _unassigned_email + + bug_cache = _id_to_object_dictionary(_bug1, _bug2, _bug3, _bug4) + + attachment_cache = _id_to_object_dictionary(_patch1, + _patch2, + _patch3, + _patch4, + _patch5, + _patch6, + _patch7) + + def __init__(self): + Mock.__init__(self) + self.queries = MockBugzillaQueries(self) + self.committers = CommitterList(reviewers=[Reviewer("Foo Bar", + "foo@bar.com")]) + + def fetch_bug(self, bug_id): + return Bug(self.bug_cache.get(bug_id), self) + + def fetch_attachment(self, attachment_id): + # This could be changed to .get() if we wish to allow failed lookups. + attachment_dictionary = self.attachment_cache[attachment_id] + bug = self.fetch_bug(attachment_dictionary["bug_id"]) + for attachment in bug.attachments(include_obsolete=True): + if attachment.id() == int(attachment_id): + return attachment + + def bug_url_for_bug_id(self, bug_id): + return "%s/%s" % (self.bug_server_url, bug_id) + + def fetch_bug_dictionary(self, bug_id): + return self.bug_cache.get(bug_id) + + def attachment_url_for_id(self, attachment_id, action="view"): + action_param = "" + if action and action != "view": + action_param = "&action=%s" % action + return "%s/%s%s" % (self.bug_server_url, attachment_id, action_param) + + +class MockBuildBot(Mock): + + def builder_statuses(self): + return [{ + "name": "Builder1", + "is_green": True, + }, { + "name": "Builder2", + "is_green": True, + }] + + def red_core_builders_names(self): + return [] + + +class MockSCM(Mock): + + def __init__(self): + Mock.__init__(self) + self.checkout_root = os.getcwd() + + def create_patch(self): + return "Patch1" + + def commit_ids_from_commitish_arguments(self, args): + return ["Commitish1", "Commitish2"] + + def commit_message_for_local_commit(self, commit_id): + if commit_id == "Commitish1": + return CommitMessage("CommitMessage1\n" \ + "https://bugs.example.org/show_bug.cgi?id=42\n") + if commit_id == "Commitish2": + return CommitMessage("CommitMessage2\n" \ + "https://bugs.example.org/show_bug.cgi?id=75\n") + raise Exception("Bogus commit_id in commit_message_for_local_commit.") + + def create_patch_from_local_commit(self, commit_id): + if commit_id == "Commitish1": + return "Patch1" + if commit_id == "Commitish2": + return "Patch2" + raise Exception("Bogus commit_id in commit_message_for_local_commit.") + + def diff_for_revision(self, revision): + return "DiffForRevision%s\n" \ + "http://bugs.webkit.org/show_bug.cgi?id=12345" % revision + + def svn_revision_from_commit_text(self, commit_text): + return "49824" + + def modified_changelogs(self): + # Ideally we'd return something more interesting here. The problem is + # that LandDiff will try to actually read the path from disk! + return [] + + +class MockUser(object): + + def prompt(self, message): + return "Mock user response" + + def edit(self, files): + pass + + def page(self, message): + pass + + def confirm(self, message=None): + return True + + def open_url(self, url): + log("MOCK: user.open_url: %s" % url) + pass + + +class MockStatusServer(object): + + def __init__(self): + self.host = "example.com" + + def patch_status(self, queue_name, patch_id): + return None + + def update_status(self, queue_name, status, patch=None, results_file=None): + return 187 + + +class MockBugzillaTool(): + + def __init__(self): + self.bugs = MockBugzilla() + self.buildbot = MockBuildBot() + self.executive = Mock() + self.user = MockUser() + self._scm = MockSCM() + self.status_server = MockStatusServer() + + def scm(self): + return self._scm + + def path(self): + return "echo" diff --git a/WebKitTools/Scripts/modules/multicommandtool.py b/WebKitTools/Scripts/webkitpy/multicommandtool.py index 0475cf1..10cf426 100644 --- a/WebKitTools/Scripts/modules/multicommandtool.py +++ b/WebKitTools/Scripts/webkitpy/multicommandtool.py @@ -35,20 +35,36 @@ import sys from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option -from modules.grammar import pluralize -from modules.logging import log +from webkitpy.grammar import pluralize +from webkitpy.webkit_logging import log + class Command(object): name = None - # show_in_main_help = False # Subclasses must define show_in_main_help, we leave it out here to enforce that. - def __init__(self, help_text, argument_names=None, options=None, requires_local_commits=False): + show_in_main_help = False + def __init__(self, help_text, argument_names=None, options=None, long_help=None, requires_local_commits=False): self.help_text = help_text + self.long_help = long_help self.argument_names = argument_names self.required_arguments = self._parse_required_arguments(argument_names) self.options = options - self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options) self.requires_local_commits = requires_local_commits self.tool = None + # option_parser can be overriden by the tool using set_option_parser + # This default parser will be used for standalone_help printing. + self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options) + + # This design is slightly awkward, but we need the + # the tool to be able to create and modify the option_parser + # before it knows what Command to run. + def set_option_parser(self, option_parser): + self.option_parser = option_parser + self._add_options_to_parser() + + def _add_options_to_parser(self): + options = self.options or [] + for option in options: + self.option_parser.add_option(option) # The tool calls bind_to_tool on each Command after adding it to its list. def bind_to_tool(self, tool): @@ -84,28 +100,44 @@ class Command(object): def parse_args(self, args): return self.option_parser.parse_args(args) - def check_arguments_and_execute(self, args_after_command_name, tool): - (command_options, command_args) = self.parse_args(args_after_command_name) - - if len(command_args) < len(self.required_arguments): + def check_arguments_and_execute(self, options, args, tool=None): + if len(args) < len(self.required_arguments): log("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % ( pluralize("argument", len(self.required_arguments)), - pluralize("argument", len(command_args)), - "'%s'" % " ".join(command_args), + pluralize("argument", len(args)), + "'%s'" % " ".join(args), " ".join(self.required_arguments), tool.name(), self.name)) return 1 - return self.execute(command_options, command_args, tool) or 0 + return self.execute(options, args, tool) or 0 def standalone_help(self): - help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n" + help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n\n" + if self.long_help: + help_text += "%s\n\n" % self.long_help help_text += self.option_parser.format_option_help(IndentedHelpFormatter()) return help_text def execute(self, options, args, tool): raise NotImplementedError, "subclasses must implement" + # main() exists so that Commands can be turned into stand-alone scripts. + # Other parts of the code will likely require modification to work stand-alone. + def main(self, args=sys.argv): + (options, args) = self.parse_args(args) + # Some commands might require a dummy tool + return self.check_arguments_and_execute(options, args) + + +# FIXME: This should just be rolled into Command. help_text and argument_names do not need to be instance variables. +class AbstractDeclarativeCommand(Command): + help_text = None + argument_names = None + long_help = None + def __init__(self, options=None, **kwargs): + Command.__init__(self, self.help_text, self.argument_names, options=options, long_help=self.long_help, **kwargs) + class HelpPrintingOptionParser(OptionParser): def __init__(self, epilog_method=None, *args, **kwargs): @@ -115,6 +147,7 @@ class HelpPrintingOptionParser(OptionParser): def error(self, msg): self.print_usage(sys.stderr) error_message = "%s: error: %s\n" % (self.get_prog_name(), msg) + # This method is overriden to add this one line to the output: error_message += "\nType \"%s --help\" to see usage.\n" % self.get_prog_name() self.exit(1, error_message) @@ -126,15 +159,16 @@ class HelpPrintingOptionParser(OptionParser): return "" -class HelpCommand(Command): +class HelpCommand(AbstractDeclarativeCommand): name = "help" - show_in_main_help = False + help_text = "Display information about this program or its subcommands" + argument_names = "[COMMAND]" def __init__(self): options = [ make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"), ] - Command.__init__(self, "Display information about this program or its subcommands", "[COMMAND]", options=options) + AbstractDeclarativeCommand.__init__(self, options) self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser. def _help_epilog(self): @@ -151,7 +185,12 @@ class HelpCommand(Command): epilog += "%s\n" % "".join(command_help_texts) epilog += "See '%prog help --all-commands' to list all commands.\n" epilog += "See '%prog help COMMAND' for more information on a specific command.\n" - return self.tool.global_option_parser.expand_prog_name(epilog) + return epilog.replace("%prog", self.tool.name()) # Use of %prog here mimics OptionParser.expand_prog_name(). + + # FIXME: This is a hack so that we don't show --all-commands as a global option: + def _remove_help_options(self): + for option in self.options: + self.option_parser.remove_option(option.get_opt_string()) def execute(self, options, args, tool): if args: @@ -161,12 +200,16 @@ class HelpCommand(Command): return 0 self.show_all_commands = options.show_all_commands - tool.global_option_parser.print_help() + self._remove_help_options() + self.option_parser.print_help() return 0 class MultiCommandTool(object): + global_options = None + def __init__(self, name=None, commands=None): + self._name = name or OptionParser(prog=name).get_prog_name() # OptionParser has nice logic for fetching the name. # Allow the unit tests to disable command auto-discovery. self.commands = commands or [cls() for cls in self._find_all_commands() if cls.name] self.help_command = self.command_by_name(HelpCommand.name) @@ -176,7 +219,6 @@ class MultiCommandTool(object): self.commands.append(self.help_command) for command in self.commands: command.bind_to_tool(self) - self.global_option_parser = HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=name, usage=self._usage_line()) @classmethod def _add_all_subclasses(cls, class_to_crawl, seen_classes): @@ -191,21 +233,15 @@ class MultiCommandTool(object): cls._add_all_subclasses(Command, commands) return sorted(commands) - @staticmethod - def _usage_line(): - return "Usage: %prog [options] COMMAND [ARGS]" - def name(self): - return self.global_option_parser.get_prog_name() + return self._name - def handle_global_args(self, args): - (options, args) = self.global_option_parser.parse_args(args) - # We should never hit this because _split_args splits at the first arg without a leading "-". - if args: - self.global_option_parser.error("Extra arguments before command: %s" % args) + def _create_option_parser(self): + usage = "Usage: %prog [options] COMMAND [ARGS]" + return HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=self.name(), usage=usage) @staticmethod - def _split_args(args): + def _split_command_name_from_args(args): # Assume the first argument which doesn't start with "-" is the command name. command_index = 0 for arg in args: @@ -213,12 +249,10 @@ class MultiCommandTool(object): break command_index += 1 else: - return (args[:], None, []) + return (None, args[:]) - global_args = args[:command_index] command = args[command_index] - command_args = args[command_index + 1:] - return (global_args, command, command_args) + return (command, args[:command_index] + args[command_index + 1:]) def command_by_name(self, command_name): for command in self.commands: @@ -233,21 +267,33 @@ class MultiCommandTool(object): return command.show_in_main_help def should_execute_command(self, command): - raise NotImplementedError, "subclasses must implement" + return True + + def _add_global_options(self, option_parser): + global_options = self.global_options or [] + for option in global_options: + option_parser.add_option(option) + + def handle_global_options(self, options): + pass def main(self, argv=sys.argv): - (global_args, command_name, args_after_command_name) = self._split_args(argv[1:]) + (command_name, args) = self._split_command_name_from_args(argv[1:]) - # Handle --help, etc: - self.handle_global_args(global_args) + option_parser = self._create_option_parser() + self._add_global_options(option_parser) command = self.command_by_name(command_name) or self.help_command if not command: - self.global_option_parser.error("%s is not a recognized command" % command_name) + option_parser.error("%s is not a recognized command" % command_name) + + command.set_option_parser(option_parser) + (options, args) = command.parse_args(args) + self.handle_global_options(options) (should_execute, failure_reason) = self.should_execute_command(command) if not should_execute: log(failure_reason) - return 0 + return 0 # FIXME: Should this really be 0? - return command.check_arguments_and_execute(args_after_command_name, self) + return command.check_arguments_and_execute(options, args, self) diff --git a/WebKitTools/Scripts/webkitpy/multicommandtool.pyc b/WebKitTools/Scripts/webkitpy/multicommandtool.pyc Binary files differnew file mode 100644 index 0000000..4584643 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/multicommandtool.pyc diff --git a/WebKitTools/Scripts/modules/multicommandtool_unittest.py b/WebKitTools/Scripts/webkitpy/multicommandtool_unittest.py index c71cc09..ae77e73 100644 --- a/WebKitTools/Scripts/modules/multicommandtool_unittest.py +++ b/WebKitTools/Scripts/webkitpy/multicommandtool_unittest.py @@ -29,7 +29,7 @@ import sys import unittest from multicommandtool import MultiCommandTool, Command -from modules.outputcapture import OutputCapture +from webkitpy.outputcapture import OutputCapture from optparse import make_option @@ -63,14 +63,9 @@ class CommandTest(unittest.TestCase): def test_required_arguments(self): two_required_arguments = TrivialCommand(argument_names="ARG1 ARG2 [ARG3]") - capture = OutputCapture() - capture.capture_output() - exit_code = two_required_arguments.check_arguments_and_execute(["foo"], TrivialTool()) - (stdout_string, stderr_string) = capture.restore_output() expected_missing_args_error = "2 arguments required, 1 argument provided. Provided: 'foo' Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n" + exit_code = OutputCapture().assert_outputs(self, two_required_arguments.check_arguments_and_execute, [None, ["foo"], TrivialTool()], expected_stderr=expected_missing_args_error) self.assertEqual(exit_code, 1) - self.assertEqual(stdout_string, "") - self.assertEqual(stderr_string, expected_missing_args_error) class TrivialTool(MultiCommandTool): @@ -86,20 +81,20 @@ class TrivialTool(MultiCommandTool): class MultiCommandToolTest(unittest.TestCase): def _assert_split(self, args, expected_split): - self.assertEqual(MultiCommandTool._split_args(args), expected_split) + self.assertEqual(MultiCommandTool._split_command_name_from_args(args), expected_split) def test_split_args(self): - # MultiCommandToolTest._split_args returns: (global_args, command, command_args) + # MultiCommandToolTest._split_command_name_from_args returns: (command, args) full_args = ["--global-option", "command", "--option", "arg"] - full_args_expected = (["--global-option"], "command", ["--option", "arg"]) + full_args_expected = ("command", ["--global-option", "--option", "arg"]) self._assert_split(full_args, full_args_expected) full_args = [] - full_args_expected = ([], None, []) + full_args_expected = (None, []) self._assert_split(full_args, full_args_expected) full_args = ["command", "arg"] - full_args_expected = ([], "command", ["arg"]) + full_args_expected = ("command", ["arg"]) self._assert_split(full_args, full_args_expected) def test_command_by_name(self): @@ -108,13 +103,9 @@ class MultiCommandToolTest(unittest.TestCase): self.assertEqual(tool.command_by_name("trivial").name, "trivial") self.assertEqual(tool.command_by_name("bar"), None) - def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", exit_code=0): - capture = OutputCapture() - capture.capture_output() - exit_code = tool.main(main_args) - (stdout_string, stderr_string) = capture.restore_output() - self.assertEqual(stdout_string, expected_stdout) - self.assertEqual(expected_stderr, expected_stderr) + def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", expected_exit_code=0): + exit_code = OutputCapture().assert_outputs(self, tool.main, [main_args], expected_stdout=expected_stdout, expected_stderr=expected_stderr) + self.assertEqual(exit_code, expected_exit_code) def test_global_help(self): tool = TrivialTool(commands=[TrivialCommand(), UncommonCommand()]) @@ -130,6 +121,7 @@ See 'trivial-tool help --all-commands' to list all commands. See 'trivial-tool help COMMAND' for more information on a specific command. """ + self._assert_tool_main_outputs(tool, ["tool"], expected_common_commands_help) self._assert_tool_main_outputs(tool, ["tool", "help"], expected_common_commands_help) expected_all_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS] @@ -146,11 +138,14 @@ See 'trivial-tool help COMMAND' for more information on a specific command. """ self._assert_tool_main_outputs(tool, ["tool", "help", "--all-commands"], expected_all_commands_help) + # Test that arguments can be passed before commands as well + self._assert_tool_main_outputs(tool, ["tool", "--all-commands", "help"], expected_all_commands_help) + def test_command_help(self): - command_with_options = TrivialCommand(options=[make_option("--my_option")]) + command_with_options = TrivialCommand(options=[make_option("--my_option")], long_help="LONG HELP") tool = TrivialTool(commands=[command_with_options]) - expected_subcommand_help = "trivial [options] help text\nOptions:\n --my_option=MY_OPTION\n\n" + expected_subcommand_help = "trivial [options] help text\n\nLONG HELP\n\nOptions:\n --my_option=MY_OPTION\n\n" self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help) diff --git a/WebKitTools/Scripts/webkitpy/networktransaction.py b/WebKitTools/Scripts/webkitpy/networktransaction.py new file mode 100644 index 0000000..65ea27d --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/networktransaction.py @@ -0,0 +1,63 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import time + +from mechanize import HTTPError +from webkitpy.webkit_logging import log + + +class NetworkTimeout(Exception): + pass + + +class NetworkTransaction(object): + def __init__(self, initial_backoff_seconds=10, grown_factor=1.1, timeout_seconds=5*60*60): + self._initial_backoff_seconds = initial_backoff_seconds + self._grown_factor = grown_factor + self._timeout_seconds = timeout_seconds + + def run(self, request): + self._total_sleep = 0 + self._backoff_seconds = self._initial_backoff_seconds + while True: + try: + return request() + except HTTPError, e: + self._check_for_timeout() + log("Received HTTP status %s from server. Retrying in %s seconds..." % (e.code, self._backoff_seconds)) + self._sleep() + + def _check_for_timeout(self): + if self._total_sleep + self._backoff_seconds > self._timeout_seconds: + raise NetworkTimeout() + + def _sleep(self): + time.sleep(self._backoff_seconds) + self._total_sleep += self._backoff_seconds + self._backoff_seconds *= self._grown_factor diff --git a/WebKitTools/Scripts/webkitpy/networktransaction.pyc b/WebKitTools/Scripts/webkitpy/networktransaction.pyc Binary files differnew file mode 100644 index 0000000..fb45bcb --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/networktransaction.pyc diff --git a/WebKitTools/Scripts/webkitpy/networktransaction_unittest.py b/WebKitTools/Scripts/webkitpy/networktransaction_unittest.py new file mode 100644 index 0000000..3cffe02 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/networktransaction_unittest.py @@ -0,0 +1,80 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from mechanize import HTTPError +from webkitpy.networktransaction import NetworkTransaction, NetworkTimeout + +class NetworkTransactionTest(unittest.TestCase): + exception = Exception("Test exception") + + def test_success(self): + transaction = NetworkTransaction() + self.assertEqual(transaction.run(lambda: 42), 42) + + def _raise_exception(self): + raise self.exception + + def test_exception(self): + transaction = NetworkTransaction() + did_process_exception = False + did_throw_exception = True + try: + transaction.run(lambda: self._raise_exception()) + did_throw_exception = False + except Exception, e: + did_process_exception = True + self.assertEqual(e, self.exception) + self.assertTrue(did_throw_exception) + self.assertTrue(did_process_exception) + + def _raise_http_error(self): + self._run_count += 1 + if self._run_count < 3: + raise HTTPError("http://example.com/", 500, "inteneral server error", None, None) + return 42 + + def test_retry(self): + self._run_count = 0 + transaction = NetworkTransaction(initial_backoff_seconds=0) + self.assertEqual(transaction.run(lambda: self._raise_http_error()), 42) + self.assertEqual(self._run_count, 3) + + def test_timeout(self): + self._run_count = 0 + transaction = NetworkTransaction(initial_backoff_seconds=60*60, timeout_seconds=60) + did_process_exception = False + did_throw_exception = True + try: + transaction.run(lambda: self._raise_http_error()) + did_throw_exception = False + except NetworkTimeout, e: + did_process_exception = True + self.assertTrue(did_throw_exception) + self.assertTrue(did_process_exception) diff --git a/WebKitTools/Scripts/modules/outputcapture.py b/WebKitTools/Scripts/webkitpy/outputcapture.py index f02fc5d..592a669 100644 --- a/WebKitTools/Scripts/modules/outputcapture.py +++ b/WebKitTools/Scripts/webkitpy/outputcapture.py @@ -51,3 +51,12 @@ class OutputCapture(object): def restore_output(self): return (self._restore_output_with_name("stdout"), self._restore_output_with_name("stderr")) + + def assert_outputs(self, testcase, function, args=[], kwargs={}, expected_stdout="", expected_stderr=""): + self.capture_output() + return_value = function(*args, **kwargs) + (stdout_string, stderr_string) = self.restore_output() + testcase.assertEqual(stdout_string, expected_stdout) + testcase.assertEqual(stderr_string, expected_stderr) + # This is a little strange, but I don't know where else to return this information. + return return_value diff --git a/WebKitTools/Scripts/modules/patchcollection.py b/WebKitTools/Scripts/webkitpy/patchcollection.py index add8129..7e8603c 100644 --- a/WebKitTools/Scripts/modules/patchcollection.py +++ b/WebKitTools/Scripts/webkitpy/patchcollection.py @@ -37,11 +37,11 @@ class PersistentPatchCollectionDelegate: def status_server(self): raise NotImplementedError, "subclasses must implement" + def is_terminal_status(self, status): + raise NotImplementedError, "subclasses must implement" + class PersistentPatchCollection: - _initial_status = "Pending" - _pass_status = "Pass" - _fail_status = "Fail" def __init__(self, delegate): self._delegate = delegate self._name = self._delegate.collection_name() @@ -53,7 +53,7 @@ class PersistentPatchCollection: if cached: return cached status = self._status.patch_status(self._name, patch_id) - if status: + if status and self._delegate.is_terminal_status(status): self._status_cache[patch_id] = status return status @@ -61,11 +61,5 @@ class PersistentPatchCollection: patch_ids = self._delegate.fetch_potential_patch_ids() for patch_id in patch_ids: status = self._cached_status(patch_id) - if not status: + if not status or not self._delegate.is_terminal_status(status): return patch_id - - def did_pass(self, patch): - self._status.update_status(self._name, self._pass_status, patch) - - def did_fail(self, patch): - self._status.update_status(self._name, self._fail_status, patch) diff --git a/WebKitTools/Scripts/webkitpy/patchcollection.pyc b/WebKitTools/Scripts/webkitpy/patchcollection.pyc Binary files differnew file mode 100644 index 0000000..18058d3 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/patchcollection.pyc diff --git a/WebKitTools/Scripts/run-webkit-unittests b/WebKitTools/Scripts/webkitpy/patchcollection_unittest.py index 3487299..811fed9 100755..100644 --- a/WebKitTools/Scripts/run-webkit-unittests +++ b/WebKitTools/Scripts/webkitpy/patchcollection_unittest.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009, Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -29,21 +29,25 @@ import unittest -from modules.bugzilla_unittest import * -from modules.buildbot_unittest import * -from modules.changelogs_unittest import * -from modules.commands.download_unittest import * -from modules.commands.upload_unittest import * -from modules.commands.queries_unittest import * -from modules.commands.queues_unittest import * -from modules.committers_unittest import * -from modules.cpp_style_unittest import * -from modules.diff_parser_unittest import * -from modules.logging_unittest import * -from modules.multicommandtool_unittest import * -from modules.scm_unittest import * -from modules.webkitport_unittest import * -from modules.workqueue_unittest import * - -if __name__ == "__main__": - unittest.main() +from webkitpy.mock import Mock +from webkitpy.patchcollection import PersistentPatchCollection, PersistentPatchCollectionDelegate + + +class TestPersistentPatchCollectionDelegate(PersistentPatchCollectionDelegate): + def collection_name(self): + return "test-collection" + + def fetch_potential_patch_ids(self): + return [42, 192, 87] + + def status_server(self): + return Mock() + + def is_terminal_status(self, status): + return False + + +class PersistentPatchCollectionTest(unittest.TestCase): + def test_next(self): + collection = PersistentPatchCollection(TestPersistentPatchCollectionDelegate()) + collection.next() diff --git a/WebKitTools/Scripts/modules/workqueue.py b/WebKitTools/Scripts/webkitpy/queueengine.py index f8cbba8..d14177d 100644 --- a/WebKitTools/Scripts/modules/workqueue.py +++ b/WebKitTools/Scripts/webkitpy/queueengine.py @@ -34,21 +34,15 @@ import traceback from datetime import datetime, timedelta -from modules.executive import ScriptError -from modules.logging import log, OutputTee -from modules.statusbot import StatusBot - -class WorkQueueDelegate: - def queue_name(self): - raise NotImplementedError, "subclasses must implement" +from webkitpy.executive import ScriptError +from webkitpy.webkit_logging import log, OutputTee +from webkitpy.statusserver import StatusServer +class QueueEngineDelegate: def queue_log_path(self): raise NotImplementedError, "subclasses must implement" - def work_logs_directory(self): - raise NotImplementedError, "subclasses must implement" - - def status_host(self): + def work_item_log_path(self, work_item): raise NotImplementedError, "subclasses must implement" def begin_work_queue(self): @@ -71,7 +65,7 @@ class WorkQueueDelegate: raise NotImplementedError, "subclasses must implement" -class WorkQueue: +class QueueEngine: def __init__(self, name, delegate): self._name = name self._delegate = delegate @@ -90,41 +84,38 @@ class WorkQueue: def run(self): self._begin_logging() - self.status_bot = StatusBot(host=self._delegate.status_host()) self._delegate.begin_work_queue() while (self._delegate.should_continue_work_queue()): - self._ensure_work_log_closed() try: + self._ensure_work_log_closed() work_item = self._delegate.next_work_item() if not work_item: - self._update_status_and_sleep("Empty queue.") + self._sleep("No work item.") continue - (safe_to_proceed, waiting_message, patch) = self._delegate.should_proceed_with_work_item(work_item) - if not safe_to_proceed: - self._update_status_and_sleep(waiting_message) + if not self._delegate.should_proceed_with_work_item(work_item): + self._sleep("Not proceeding with work item.") continue - self.status_bot.update_status(self._name, waiting_message, patch) + + # FIXME: Work logs should not depend on bug_id specificaly. + # This looks fixed, no? + self._open_work_log(work_item) + try: + self._delegate.process_work_item(work_item) + except ScriptError, e: + # Use a special exit code to indicate that the error was already + # handled in the child process and we should just keep looping. + if e.exit_code == self.handled_error_code: + continue + message = "Unexpected failure when landing patch! Please file a bug against webkit-patch.\n%s" % e.message_with_output() + self._delegate.handle_unexpected_error(work_item, message) except KeyboardInterrupt, e: log("\nUser terminated queue.") return 1 except Exception, e: traceback.print_exc() # Don't try tell the status bot, in case telling it causes an exception. - self._sleep("Exception while preparing queue: %s." % e) - continue - - # FIXME: Work logs should not depend on bug_id specificaly. - self._open_work_log(patch["bug_id"]) - try: - self._delegate.process_work_item(work_item) - except ScriptError, e: - # Use a special exit code to indicate that the error was already - # handled in the child process and we should just keep looping. - if e.exit_code == self.handled_error_code: - continue - message = "Unexpected failure when landing patch! Please file a bug against bugzilla-tool.\n%s" % e.message_with_output() - self._delegate.handle_unexpected_error(work_item, message) + self._sleep("Exception while preparing queue") # Never reached. self._ensure_work_log_closed() @@ -132,9 +123,9 @@ class WorkQueue: self._queue_log = self._output_tee.add_log(self._delegate.queue_log_path()) self._work_log = None - def _open_work_log(self, bug_id): - work_log_path = os.path.join(self._delegate.work_logs_directory(), "%s.log" % bug_id) - self._work_log = self._output_tee.add_log(work_log_path) + def _open_work_log(self, work_item): + work_item_log_path = self._delegate.work_item_log_path(work_item) + self._work_log = self._output_tee.add_log(work_item_log_path) def _ensure_work_log_closed(self): # If we still have a bug log open, close it. @@ -151,9 +142,3 @@ class WorkQueue: def _sleep(cls, message): log(cls._sleep_message(message)) time.sleep(cls.seconds_to_sleep) - - def _update_status_and_sleep(self, message): - status_message = self._sleep_message(message) - self.status_bot.update_status(self._name, status_message) - log(status_message) - time.sleep(self.seconds_to_sleep) diff --git a/WebKitTools/Scripts/webkitpy/queueengine.pyc b/WebKitTools/Scripts/webkitpy/queueengine.pyc Binary files differnew file mode 100644 index 0000000..635bb57 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/queueengine.pyc diff --git a/WebKitTools/Scripts/modules/workqueue_unittest.py b/WebKitTools/Scripts/webkitpy/queueengine_unittest.py index ed77b5f..a4036ea 100644 --- a/WebKitTools/Scripts/modules/workqueue_unittest.py +++ b/WebKitTools/Scripts/webkitpy/queueengine_unittest.py @@ -32,10 +32,10 @@ import shutil import tempfile import unittest -from modules.executive import ScriptError -from modules.workqueue import WorkQueue, WorkQueueDelegate +from webkitpy.executive import ScriptError +from webkitpy.queueengine import QueueEngine, QueueEngineDelegate -class LoggingDelegate(WorkQueueDelegate): +class LoggingDelegate(QueueEngineDelegate): def __init__(self, test): self._test = test self._callbacks = [] @@ -43,12 +43,11 @@ class LoggingDelegate(WorkQueueDelegate): expected_callbacks = [ 'queue_log_path', - 'status_host', 'begin_work_queue', 'should_continue_work_queue', 'next_work_item', 'should_proceed_with_work_item', - 'work_logs_directory', + 'work_item_log_path', 'process_work_item', 'should_continue_work_queue' ] @@ -60,13 +59,9 @@ class LoggingDelegate(WorkQueueDelegate): self.record("queue_log_path") return os.path.join(self._test.temp_dir, "queue_log_path") - def work_logs_directory(self): - self.record("work_logs_directory") - return os.path.join(self._test.temp_dir, "work_log_path") - - def status_host(self): - self.record("status_host") - return None + def work_item_log_path(self, work_item): + self.record("work_item_log_path") + return os.path.join(self._test.temp_dir, "work_log_path", "%s.log" % work_item) def begin_work_queue(self): self.record("begin_work_queue") @@ -111,33 +106,32 @@ class NotSafeToProceedDelegate(LoggingDelegate): def should_proceed_with_work_item(self, work_item): self.record("should_proceed_with_work_item") self._test.assertEquals(work_item, "work_item") - fake_patch = { 'bug_id' : 42 } - return (False, "waiting_message", fake_patch) + return False -class FastWorkQueue(WorkQueue): +class FastQueueEngine(QueueEngine): def __init__(self, delegate): - WorkQueue.__init__(self, "fast-queue", delegate) + QueueEngine.__init__(self, "fast-queue", delegate) # No sleep for the wicked. seconds_to_sleep = 0 - def _update_status_and_sleep(self, message): + def _sleep(self, message): pass -class WorkQueueTest(unittest.TestCase): +class QueueEngineTest(unittest.TestCase): def test_trivial(self): delegate = LoggingDelegate(self) - work_queue = WorkQueue("trivial-queue", delegate) + work_queue = QueueEngine("trivial-queue", delegate) work_queue.run() self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks) - self.assertTrue(os.path.exists(delegate.queue_log_path())) - self.assertTrue(os.path.exists(os.path.join(delegate.work_logs_directory(), "42.log"))) + self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "queue_log_path"))) + self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "work_log_path", "work_item.log"))) def test_unexpected_error(self): delegate = ThrowErrorDelegate(self, 3) - work_queue = WorkQueue("error-queue", delegate) + work_queue = QueueEngine("error-queue", delegate) work_queue.run() expected_callbacks = LoggingDelegate.expected_callbacks[:] work_item_index = expected_callbacks.index('process_work_item') @@ -147,14 +141,14 @@ class WorkQueueTest(unittest.TestCase): self.assertEquals(delegate._callbacks, expected_callbacks) def test_handled_error(self): - delegate = ThrowErrorDelegate(self, WorkQueue.handled_error_code) - work_queue = WorkQueue("handled-error-queue", delegate) + delegate = ThrowErrorDelegate(self, QueueEngine.handled_error_code) + work_queue = QueueEngine("handled-error-queue", delegate) work_queue.run() self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks) def test_not_safe_to_proceed(self): delegate = NotSafeToProceedDelegate(self) - work_queue = FastWorkQueue(delegate) + work_queue = FastQueueEngine(delegate) work_queue.run() expected_callbacks = LoggingDelegate.expected_callbacks[:] next_work_item_index = expected_callbacks.index('next_work_item') diff --git a/WebKitTools/Scripts/modules/scm.py b/WebKitTools/Scripts/webkitpy/scm.py index ff26693..743f3fe 100644 --- a/WebKitTools/Scripts/modules/scm.py +++ b/WebKitTools/Scripts/webkitpy/scm.py @@ -34,9 +34,9 @@ import re import subprocess # Import WebKit-specific modules. -from modules.changelogs import ChangeLog -from modules.executive import Executive, run_command, ScriptError -from modules.logging import error, log +from webkitpy.changelogs import ChangeLog +from webkitpy.executive import Executive, run_command, ScriptError +from webkitpy.webkit_logging import error, log def detect_scm_system(path): if SVN.in_working_directory(path): @@ -123,10 +123,11 @@ class SCM: def apply_patch(self, patch, force=False): # It's possible that the patch was not made from the root directory. # We should detect and handle that case. - curl_process = subprocess.Popen(['curl', '--location', '--silent', '--show-error', patch['url']], stdout=subprocess.PIPE) + # FIXME: scm.py should not deal with fetching Attachment data. Attachment should just have a .data() accessor. + curl_process = subprocess.Popen(['curl', '--location', '--silent', '--show-error', patch.url()], stdout=subprocess.PIPE) args = [self.script_path('svn-apply')] - if patch.get('reviewer'): - args += ['--reviewer', patch['reviewer']] + if patch.reviewer(): + args += ['--reviewer', patch.reviewer().full_name] if force: args.append('--force') diff --git a/WebKitTools/Scripts/webkitpy/scm.pyc b/WebKitTools/Scripts/webkitpy/scm.pyc Binary files differnew file mode 100644 index 0000000..520f611 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/scm.pyc diff --git a/WebKitTools/Scripts/modules/scm_unittest.py b/WebKitTools/Scripts/webkitpy/scm_unittest.py index 8e82f3c..73faf40 100644 --- a/WebKitTools/Scripts/modules/scm_unittest.py +++ b/WebKitTools/Scripts/webkitpy/scm_unittest.py @@ -38,8 +38,9 @@ import unittest import urllib from datetime import date -from modules.executive import Executive, run_command, ScriptError -from modules.scm import detect_scm_system, SCM, CheckoutNeedsUpdate, commit_error_handler +from webkitpy.executive import Executive, run_command, ScriptError +from webkitpy.scm import detect_scm_system, SCM, CheckoutNeedsUpdate, commit_error_handler +from webkitpy.bugzilla import Attachment # FIXME: This should not be needed # Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.) # Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from. @@ -168,7 +169,7 @@ class SCMTest(unittest.TestCase): patch['reviewer'] = 'Joe Cool' patch['bug_id'] = '12345' patch['url'] = 'file://%s' % urllib.pathname2url(patch_path) - return patch + return Attachment(patch, None) # FIXME: This is a hack, scm.py shouldn't be fetching attachment data. def _setup_webkittools_scripts_symlink(self, local_scm): webkit_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__))) diff --git a/WebKitTools/Scripts/modules/statusbot.py b/WebKitTools/Scripts/webkitpy/statusserver.py index 350aebf..ff0ddfa 100644 --- a/WebKitTools/Scripts/modules/statusbot.py +++ b/WebKitTools/Scripts/webkitpy/statusserver.py @@ -25,31 +25,19 @@ # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# -# WebKit's Python module for interacting with the Commit Queue status page. -# WebKit includes a built copy of BeautifulSoup in Scripts/modules +from webkitpy.networktransaction import NetworkTransaction +from webkitpy.webkit_logging import log +from mechanize import Browser + +# WebKit includes a built copy of BeautifulSoup in Scripts/webkitpy # so this import should always succeed. from .BeautifulSoup import BeautifulSoup -try: - from mechanize import Browser -except ImportError, e: - print """ -mechanize is required. - -To install: -sudo easy_install mechanize - -Or from the web: -http://wwwsearch.sourceforge.net/mechanize/ -""" - exit(1) - import urllib2 -class StatusBot: +class StatusServer: default_host = "webkit-commit-queue.appspot.com" def __init__(self, host=default_host): @@ -57,30 +45,49 @@ class StatusBot: self.browser = Browser() def set_host(self, host): - self.statusbot_host = host - self.statusbot_server_url = "http://%s" % self.statusbot_host + self.host = host + self.url = "http://%s" % self.host - def update_status(self, queue_name, status, patch=None, results_file=None): - # During unit testing, statusbot_host is None - if not self.statusbot_host: + def results_url_for_status(self, status_id): + return "%s/results/%s" % (self.url, status_id) + + def _add_patch(self, patch): + if not patch: + return + if patch.bug_id(): + self.browser["bug_id"] = str(patch.bug_id()) + if patch.id(): + self.browser["patch_id"] = str(patch.id()) + + def _add_results_file(self, results_file): + if not results_file: return + self.browser.add_file(results_file, "text/plain", "results.txt", 'results_file') - update_status_url = "%s/update-status" % self.statusbot_server_url + def _post_to_server(self, queue_name, status, patch, results_file): + if results_file: + # We might need to re-wind the file if we've already tried to post it. + results_file.seek(0) + + update_status_url = "%s/update-status" % self.url self.browser.open(update_status_url) self.browser.select_form(name="update_status") self.browser['queue_name'] = queue_name - if patch: - if patch.get('bug_id'): - self.browser['bug_id'] = str(patch['bug_id']) - if patch.get('id'): - self.browser['patch_id'] = str(patch['id']) + self._add_patch(patch) self.browser['status'] = status - if results_file: - self.browser.add_file(results_file, "text/plain", "results.txt", 'results_file') - self.browser.submit() + self._add_results_file(results_file) + return self.browser.submit().read() # This is the id of the newly created status object. + + def update_status(self, queue_name, status, patch=None, results_file=None): + # During unit testing, host is None + if not self.host: + return + + log(status) + return NetworkTransaction().run(lambda: self._post_to_server(queue_name, status, patch, results_file)) def patch_status(self, queue_name, patch_id): - update_status_url = "%s/patch-status/%s/%s" % (self.statusbot_server_url, queue_name, patch_id) + update_status_url = "%s/patch-status/%s/%s" % (self.url, queue_name, patch_id) try: return urllib2.urlopen(update_status_url).read() except urllib2.HTTPError, e: diff --git a/WebKitTools/Scripts/webkitpy/statusserver.pyc b/WebKitTools/Scripts/webkitpy/statusserver.pyc Binary files differnew file mode 100644 index 0000000..2ba11b0 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/statusserver.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/__init__.py b/WebKitTools/Scripts/webkitpy/steps/__init__.py new file mode 100644 index 0000000..5ae4bea --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/__init__.py @@ -0,0 +1,56 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# FIXME: Is this the right way to do this? +from webkitpy.steps.applypatch import ApplyPatch +from webkitpy.steps.applypatchwithlocalcommit import ApplyPatchWithLocalCommit +from webkitpy.steps.build import Build +from webkitpy.steps.checkstyle import CheckStyle +from webkitpy.steps.cleanworkingdirectory import CleanWorkingDirectory +from webkitpy.steps.cleanworkingdirectorywithlocalcommits import CleanWorkingDirectoryWithLocalCommits +from webkitpy.steps.closebug import CloseBug +from webkitpy.steps.closebugforlanddiff import CloseBugForLandDiff +from webkitpy.steps.closepatch import ClosePatch +from webkitpy.steps.commit import Commit +from webkitpy.steps.completerollout import CompleteRollout +from webkitpy.steps.confirmdiff import ConfirmDiff +from webkitpy.steps.createbug import CreateBug +from webkitpy.steps.editchangelog import EditChangeLog +from webkitpy.steps.ensurebuildersaregreen import EnsureBuildersAreGreen +from webkitpy.steps.ensurelocalcommitifneeded import EnsureLocalCommitIfNeeded +from webkitpy.steps.obsoletepatches import ObsoletePatches +from webkitpy.steps.options import Options +from webkitpy.steps.postdiff import PostDiff +from webkitpy.steps.postdiffforcommit import PostDiffForCommit +from webkitpy.steps.preparechangelogforrevert import PrepareChangeLogForRevert +from webkitpy.steps.preparechangelog import PrepareChangeLog +from webkitpy.steps.promptforbugortitle import PromptForBugOrTitle +from webkitpy.steps.revertrevision import RevertRevision +from webkitpy.steps.runtests import RunTests +from webkitpy.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer +from webkitpy.steps.update import Update diff --git a/WebKitTools/Scripts/webkitpy/steps/__init__.pyc b/WebKitTools/Scripts/webkitpy/steps/__init__.pyc Binary files differnew file mode 100644 index 0000000..ccf513b --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/__init__.pyc diff --git a/WebKitTools/Scripts/modules/commands/early_warning_system.py b/WebKitTools/Scripts/webkitpy/steps/abstractstep.py index e8ef408..639cf55 100644 --- a/WebKitTools/Scripts/modules/commands/early_warning_system.py +++ b/WebKitTools/Scripts/webkitpy/steps/abstractstep.py @@ -1,6 +1,5 @@ -#!/usr/bin/env python -# Copyright (c) 2009, Google Inc. All rights reserved. -# +# Copyright (C) 2010 Google Inc. All rights reserved. +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: @@ -27,40 +26,44 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from modules.commands.queues import AbstractReviewQueue -from modules.executive import ScriptError -from modules.webkitport import WebKitPort +from webkitpy.webkit_logging import log +from webkitpy.webkitport import WebKitPort + -class AbstractEarlyWarningSystem(AbstractReviewQueue): - def __init__(self): - AbstractReviewQueue.__init__(self) - self.port = WebKitPort.port(self.port_name) +class AbstractStep(object): + def __init__(self, tool, options): + self._tool = tool + self._options = options + self._port = None - def should_proceed_with_work_item(self, patch): - try: - self.run_bugzilla_tool(["build", self.port.flag(), "--force-clean", "--quiet"]) - except ScriptError, e: - return (False, "Unable to perform a build.", None) - return (True, "Building patch %s on bug %s." % (patch["id"], patch["bug_id"]), patch) + def _run_script(self, script_name, quiet=False, port=WebKitPort): + log("Running %s" % script_name) + # FIXME: This should use self.port() + self._tool.executive.run_and_throw_if_fail(port.script_path(script_name), quiet) - def process_work_item(self, patch): - self.run_bugzilla_tool([ - "build-attachment", - self.port.flag(), - "--force-clean", - "--quiet", - "--non-interactive", - "--parent-command=%s" % self.name, - "--no-update", - patch["id"]]) - self._patches.did_pass(patch) + # FIXME: The port should live on the tool. + def port(self): + if self._port: + return self._port + self._port = WebKitPort.port(self._options.port) + return self._port + _well_known_keys = { + "diff" : lambda self: self._tool.scm().create_patch(), + "changelogs" : lambda self: self._tool.scm().modified_changelogs(), + } -class QtEWS(AbstractEarlyWarningSystem): - name = "qt-ews" - port_name = "qt" + def cached_lookup(self, state, key, promise=None): + if state.get(key): + return state[key] + if not promise: + promise = self._well_known_keys.get(key) + state[key] = promise(self) + return state[key] + @classmethod + def options(cls): + return [] -class ChromiumEWS(AbstractEarlyWarningSystem): - name = "chromium-ews" - port_name = "chromium" + def run(self, state): + raise NotImplementedError, "subclasses must implement" diff --git a/WebKitTools/Scripts/webkitpy/steps/abstractstep.pyc b/WebKitTools/Scripts/webkitpy/steps/abstractstep.pyc Binary files differnew file mode 100644 index 0000000..d172c92 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/abstractstep.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/applypatch.py b/WebKitTools/Scripts/webkitpy/steps/applypatch.py new file mode 100644 index 0000000..aba81ae --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/applypatch.py @@ -0,0 +1,42 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import log + +class ApplyPatch(AbstractStep): + @classmethod + def options(cls): + return [ + Options.non_interactive, + ] + + def run(self, state): + log("Processing patch %s from bug %s." % (state["patch"].id(), state["patch"].bug_id())) + self._tool.scm().apply_patch(state["patch"], force=self._options.non_interactive) diff --git a/WebKitTools/Scripts/webkitpy/steps/applypatch.pyc b/WebKitTools/Scripts/webkitpy/steps/applypatch.pyc Binary files differnew file mode 100644 index 0000000..0c5212d --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/applypatch.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.py b/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.py new file mode 100644 index 0000000..bfaf52a --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.py @@ -0,0 +1,43 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.applypatch import ApplyPatch +from webkitpy.steps.options import Options + +class ApplyPatchWithLocalCommit(ApplyPatch): + @classmethod + def options(cls): + return [ + Options.local_commit, + ] + ApplyPatch.options() + + def run(self, state): + ApplyPatch.run(self, state) + if self._options.local_commit: + commit_message = self._tool.scm().commit_message_for_this_commit() + self._tool.scm().commit_locally_with_message(commit_message.message() or state["patch"].name()) diff --git a/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.pyc b/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.pyc Binary files differnew file mode 100644 index 0000000..67afd10 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/build.py b/WebKitTools/Scripts/webkitpy/steps/build.py new file mode 100644 index 0000000..1823cff --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/build.py @@ -0,0 +1,54 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import log + + +class Build(AbstractStep): + @classmethod + def options(cls): + return [ + Options.build, + Options.quiet, + Options.build_style, + ] + + def build(self, build_style): + self._tool.executive.run_and_throw_if_fail(self.port().build_webkit_command(build_style=build_style), self._options.quiet) + + def run(self, state): + if not self._options.build: + return + log("Building WebKit") + if self._options.build_style == "both": + self.build("debug") + self.build("release") + else: + self.build(self._options.build_style) diff --git a/WebKitTools/Scripts/webkitpy/steps/build.pyc b/WebKitTools/Scripts/webkitpy/steps/build.pyc Binary files differnew file mode 100644 index 0000000..8e9e5ee --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/build.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/checkstyle.py b/WebKitTools/Scripts/webkitpy/steps/checkstyle.py new file mode 100644 index 0000000..c8e20f8 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/checkstyle.py @@ -0,0 +1,56 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.executive import ScriptError +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import error + +class CheckStyle(AbstractStep): + @classmethod + def options(cls): + return [ + Options.non_interactive, + Options.check_style, + ] + + def run(self, state): + if not self._options.check_style: + return + os.chdir(self._tool.scm().checkout_root) + try: + self._run_script("check-webkit-style") + except ScriptError, e: + if self._options.non_interactive: + # We need to re-raise the exception here to have the + # style-queue do the right thing. + raise e + if not self._tool.user.confirm("Are you sure you want to continue?"): + exit(1) diff --git a/WebKitTools/Scripts/webkitpy/steps/checkstyle.pyc b/WebKitTools/Scripts/webkitpy/steps/checkstyle.pyc Binary files differnew file mode 100644 index 0000000..561036b --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/checkstyle.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.py b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.py new file mode 100644 index 0000000..88e38f5 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.py @@ -0,0 +1,52 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options + + +class CleanWorkingDirectory(AbstractStep): + def __init__(self, tool, options, allow_local_commits=False): + AbstractStep.__init__(self, tool, options) + self._allow_local_commits = allow_local_commits + + @classmethod + def options(cls): + return [ + Options.force_clean, + Options.clean, + ] + + def run(self, state): + os.chdir(self._tool.scm().checkout_root) + if not self._allow_local_commits: + self._tool.scm().ensure_no_local_commits(self._options.force_clean) + if self._options.clean: + self._tool.scm().ensure_clean_working_directory(force_clean=self._options.force_clean) diff --git a/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.pyc b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.pyc Binary files differnew file mode 100644 index 0000000..11383f2 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.py b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.py new file mode 100644 index 0000000..cabeba2 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.py @@ -0,0 +1,34 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.cleanworkingdirectory import CleanWorkingDirectory + +class CleanWorkingDirectoryWithLocalCommits(CleanWorkingDirectory): + def __init__(self, tool, options): + # FIXME: This a bit of a hack. Consider doing this more cleanly. + CleanWorkingDirectory.__init__(self, tool, options, allow_local_commits=True) diff --git a/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.pyc b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.pyc Binary files differnew file mode 100644 index 0000000..757d19a --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/closebug.py b/WebKitTools/Scripts/webkitpy/steps/closebug.py new file mode 100644 index 0000000..2640ee3 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/closebug.py @@ -0,0 +1,51 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import log + + +class CloseBug(AbstractStep): + @classmethod + def options(cls): + return [ + Options.close_bug, + ] + + def run(self, state): + if not self._options.close_bug: + return + # Check to make sure there are no r? or r+ patches on the bug before closing. + # Assume that r- patches are just previous patches someone forgot to obsolete. + patches = self._tool.bugs.fetch_bug(state["patch"].bug_id()).patches() + for patch in patches: + if patch.review() == "?" or patch.review() == "+": + log("Not closing bug %s as attachment %s has review=%s. Assuming there are more patches to land from this bug." % (patch.bug_id(), patch.id(), patch.review())) + return + self._tool.bugs.close_bug_as_fixed(state["patch"].bug_id(), "All reviewed patches have been landed. Closing bug.") diff --git a/WebKitTools/Scripts/webkitpy/steps/closebug.pyc b/WebKitTools/Scripts/webkitpy/steps/closebug.pyc Binary files differnew file mode 100644 index 0000000..356f430 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/closebug.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.py b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.py new file mode 100644 index 0000000..43a0c66 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.py @@ -0,0 +1,58 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.comments import bug_comment_from_commit_text +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import log + + +class CloseBugForLandDiff(AbstractStep): + @classmethod + def options(cls): + return [ + Options.close_bug, + ] + + def run(self, state): + comment_text = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"]) + bug_id = state.get("bug_id") + if not bug_id and state.get("patch"): + bug_id = state.get("patch").bug_id() + + if bug_id: + log("Updating bug %s" % bug_id) + if self._options.close_bug: + self._tool.bugs.close_bug_as_fixed(bug_id, comment_text) + else: + # FIXME: We should a smart way to figure out if the patch is attached + # to the bug, and if so obsolete it. + self._tool.bugs.post_comment_to_bug(bug_id, comment_text) + else: + log(comment_text) + log("No bug id provided.") diff --git a/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.pyc b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.pyc Binary files differnew file mode 100644 index 0000000..2dd3814 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff_unittest.py b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff_unittest.py new file mode 100644 index 0000000..73561ab --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff_unittest.py @@ -0,0 +1,41 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.steps.closebugforlanddiff import CloseBugForLandDiff +from webkitpy.mock import Mock +from webkitpy.mock_bugzillatool import MockBugzillaTool +from webkitpy.outputcapture import OutputCapture + +class CloseBugForLandDiffTest(unittest.TestCase): + def test_empty_state(self): + capture = OutputCapture() + step = CloseBugForLandDiff(MockBugzillaTool(), Mock()) + expected_stderr = "Committed r49824: <http://trac.webkit.org/changeset/49824>\nNo bug id provided.\n" + capture.assert_outputs(self, step.run, [{"commit_text" : "Mock commit text"}], expected_stderr=expected_stderr) diff --git a/WebKitTools/Scripts/webkitpy/steps/closepatch.py b/WebKitTools/Scripts/webkitpy/steps/closepatch.py new file mode 100644 index 0000000..f20fe7e --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/closepatch.py @@ -0,0 +1,36 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.comments import bug_comment_from_commit_text +from webkitpy.steps.abstractstep import AbstractStep + + +class ClosePatch(AbstractStep): + def run(self, state): + comment_text = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"]) + self._tool.bugs.clear_attachment_flags(state["patch"].id(), comment_text) diff --git a/WebKitTools/Scripts/webkitpy/steps/closepatch.pyc b/WebKitTools/Scripts/webkitpy/steps/closepatch.pyc Binary files differnew file mode 100644 index 0000000..3a2a75b --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/closepatch.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/commit.py b/WebKitTools/Scripts/webkitpy/steps/commit.py new file mode 100644 index 0000000..dd1fed7 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/commit.py @@ -0,0 +1,35 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep + + +class Commit(AbstractStep): + def run(self, state): + commit_message = self._tool.scm().commit_message_for_this_commit() + state["commit_text"] = self._tool.scm().commit_with_message(commit_message.message()) diff --git a/WebKitTools/Scripts/webkitpy/steps/commit.pyc b/WebKitTools/Scripts/webkitpy/steps/commit.pyc Binary files differnew file mode 100644 index 0000000..fe9ef1a --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/commit.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/completerollout.py b/WebKitTools/Scripts/webkitpy/steps/completerollout.py new file mode 100644 index 0000000..8534956 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/completerollout.py @@ -0,0 +1,66 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.comments import bug_comment_from_commit_text +from webkitpy.steps.build import Build +from webkitpy.steps.commit import Commit +from webkitpy.steps.metastep import MetaStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import log + + +class CompleteRollout(MetaStep): + substeps = [ + Build, + Commit, + ] + + @classmethod + def options(cls): + collected_options = cls._collect_options_from_steps(cls.substeps) + collected_options.append(Options.complete_rollout) + return collected_options + + def run(self, state): + bug_id = state["bug_id"] + # FIXME: Fully automated rollout is not 100% idiot-proof yet, so for now just log with instructions on how to complete the rollout. + # Once we trust rollout we will remove this option. + if not self._options.complete_rollout: + log("\nNOTE: Rollout support is experimental.\nPlease verify the rollout diff and use \"webkit-patch land %s\" to commit the rollout." % bug_id) + return + + MetaStep.run(self, state) + + commit_comment = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"]) + comment_text = "Reverted r%s for reason:\n\n%s\n\n%s" % (state["revision"], state["reason"], commit_comment) + + if not bug_id: + log(comment_text) + log("No bugs were updated.") + return + self._tool.bugs.reopen_bug(bug_id, comment_text) diff --git a/WebKitTools/Scripts/webkitpy/steps/completerollout.pyc b/WebKitTools/Scripts/webkitpy/steps/completerollout.pyc Binary files differnew file mode 100644 index 0000000..47312b8 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/completerollout.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/confirmdiff.py b/WebKitTools/Scripts/webkitpy/steps/confirmdiff.py new file mode 100644 index 0000000..fc28f8f --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/confirmdiff.py @@ -0,0 +1,47 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import error + + +class ConfirmDiff(AbstractStep): + @classmethod + def options(cls): + return [ + Options.confirm, + ] + + def run(self, state): + if not self._options.confirm: + return + diff = self.cached_lookup(state, "diff") + self._tool.user.page(diff) + if not self._tool.user.confirm("Was that diff correct?"): + exit(1) diff --git a/WebKitTools/Scripts/webkitpy/steps/confirmdiff.pyc b/WebKitTools/Scripts/webkitpy/steps/confirmdiff.pyc Binary files differnew file mode 100644 index 0000000..d3fc1d4 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/confirmdiff.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/createbug.py b/WebKitTools/Scripts/webkitpy/steps/createbug.py new file mode 100644 index 0000000..75bf17f --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/createbug.py @@ -0,0 +1,45 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options + + +class CreateBug(AbstractStep): + @classmethod + def options(cls): + return [ + Options.cc, + Options.component, + ] + + def run(self, state): + # No need to create a bug if we already have one. + if state.get("bug_id"): + return + state["bug_id"] = self._tool.bugs.create_bug(state["bug_title"], state["bug_description"], component=self._options.component, cc=self._options.cc) diff --git a/WebKitTools/Scripts/webkitpy/steps/createbug.pyc b/WebKitTools/Scripts/webkitpy/steps/createbug.pyc Binary files differnew file mode 100644 index 0000000..e27f5ec --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/createbug.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/editchangelog.py b/WebKitTools/Scripts/webkitpy/steps/editchangelog.py new file mode 100644 index 0000000..d545c72 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/editchangelog.py @@ -0,0 +1,37 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.steps.abstractstep import AbstractStep + + +class EditChangeLog(AbstractStep): + def run(self, state): + os.chdir(self._tool.scm().checkout_root) + self._tool.user.edit(self.cached_lookup(state, "changelogs")) diff --git a/WebKitTools/Scripts/webkitpy/steps/editchangelog.pyc b/WebKitTools/Scripts/webkitpy/steps/editchangelog.pyc Binary files differnew file mode 100644 index 0000000..2ca5dbf --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/editchangelog.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.py b/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.py new file mode 100644 index 0000000..96f265a --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.py @@ -0,0 +1,48 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import error + + +class EnsureBuildersAreGreen(AbstractStep): + @classmethod + def options(cls): + return [ + Options.check_builders, + ] + + def run(self, state): + if not self._options.check_builders: + return + red_builders_names = self._tool.buildbot.red_core_builders_names() + if not red_builders_names: + return + red_builders_names = map(lambda name: "\"%s\"" % name, red_builders_names) # Add quotes around the names. + error("Builders [%s] are red, please do not commit.\nSee http://%s.\nPass --ignore-builders to bypass this check." % (", ".join(red_builders_names), self._tool.buildbot.buildbot_host)) diff --git a/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.pyc b/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.pyc Binary files differnew file mode 100644 index 0000000..dd98935 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.py b/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.py new file mode 100644 index 0000000..cecf891 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.py @@ -0,0 +1,43 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import error + + +class EnsureLocalCommitIfNeeded(AbstractStep): + @classmethod + def options(cls): + return [ + Options.local_commit, + ] + + def run(self, state): + if self._options.local_commit and not self._tool.scm().supports_local_commits(): + error("--local-commit passed, but %s does not support local commits" % self._tool.scm.display_name()) diff --git a/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.pyc b/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.pyc Binary files differnew file mode 100644 index 0000000..18ce98a --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/metastep.py b/WebKitTools/Scripts/webkitpy/steps/metastep.py new file mode 100644 index 0000000..9f368de --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/metastep.py @@ -0,0 +1,54 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep + + +# FIXME: Unify with StepSequence? I'm not sure yet which is the better design. +class MetaStep(AbstractStep): + substeps = [] # Override in subclasses + def __init__(self, tool, options): + AbstractStep.__init__(self, tool, options) + self._step_instances = [] + for step_class in self.substeps: + self._step_instances.append(step_class(tool, options)) + + @staticmethod + def _collect_options_from_steps(steps): + collected_options = [] + for step in steps: + collected_options = collected_options + step.options() + return collected_options + + @classmethod + def options(cls): + return cls._collect_options_from_steps(cls.substeps) + + def run(self, state): + for step in self._step_instances: + step.run(state) diff --git a/WebKitTools/Scripts/webkitpy/steps/metastep.pyc b/WebKitTools/Scripts/webkitpy/steps/metastep.pyc Binary files differnew file mode 100644 index 0000000..21d2bf6 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/metastep.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.py b/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.py new file mode 100644 index 0000000..dbdbabd --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.py @@ -0,0 +1,51 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.grammar import pluralize +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import log + + +class ObsoletePatches(AbstractStep): + @classmethod + def options(cls): + return [ + Options.obsolete_patches, + ] + + def run(self, state): + if not self._options.obsolete_patches: + return + bug_id = state["bug_id"] + patches = self._tool.bugs.fetch_bug(bug_id).patches() + if not patches: + return + log("Obsoleting %s on bug %s" % (pluralize("old patch", len(patches)), bug_id)) + for patch in patches: + self._tool.bugs.obsolete_attachment(patch.id()) diff --git a/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.pyc b/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.pyc Binary files differnew file mode 100644 index 0000000..4586950 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/options.py b/WebKitTools/Scripts/webkitpy/steps/options.py new file mode 100644 index 0000000..8b28f27 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/options.py @@ -0,0 +1,56 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from optparse import make_option + +class Options(object): + build = make_option("--no-build", action="store_false", dest="build", default=True, help="Commit without building first, implies --no-test.") + build_style = make_option("--build-style", action="store", dest="build_style", default=None, help="Whether to build debug, release, or both.") + cc = make_option("--cc", action="store", type="string", dest="cc", help="Comma-separated list of email addresses to carbon-copy.") + check_builders = make_option("--ignore-builders", action="store_false", dest="check_builders", default=True, help="Don't check to see if the build.webkit.org builders are green before landing.") + check_style = make_option("--ignore-style", action="store_false", dest="check_style", default=True, help="Don't check to see if the patch has proper style before uploading.") + clean = make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches") + close_bug = make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing.") + complete_rollout = make_option("--complete-rollout", action="store_true", dest="complete_rollout", help="Commit the revert and re-open the original bug.") + component = make_option("--component", action="store", type="string", dest="component", help="Component for the new bug.") + confirm = make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Skip confirmation steps.") + description = make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: \"patch\")") + email = make_option("--email", action="store", type="string", dest="email", help="Email address to use in ChangeLogs.") + force_clean = make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)") + local_commit = make_option("--local-commit", action="store_true", dest="local_commit", default=False, help="Make a local commit for each applied patch") + non_interactive = make_option("--non-interactive", action="store_true", dest="non_interactive", default=False, help="Never prompt the user, fail as fast as possible.") + obsolete_patches = make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting this one.") + open_bug = make_option("--open-bug", action="store_true", dest="open_bug", default=False, help="Opens the associated bug in a browser.") + parent_command = make_option("--parent-command", action="store", dest="parent_command", default=None, help="(Internal) The command that spawned this instance.") + port = make_option("--port", action="store", dest="port", default=None, help="Specify a port (e.g., mac, qt, gtk, ...).") + quiet = make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output.") + request_commit = make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review.") + review = make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review.") + reviewer = make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER.") + test = make_option("--no-test", action="store_false", dest="test", default=True, help="Commit without running run-webkit-tests.") + update = make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory.") diff --git a/WebKitTools/Scripts/webkitpy/steps/options.pyc b/WebKitTools/Scripts/webkitpy/steps/options.pyc Binary files differnew file mode 100644 index 0000000..7634605 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/options.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/postdiff.py b/WebKitTools/Scripts/webkitpy/steps/postdiff.py new file mode 100644 index 0000000..a5ba2a4 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/postdiff.py @@ -0,0 +1,51 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import StringIO + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options + + +class PostDiff(AbstractStep): + @classmethod + def options(cls): + return [ + Options.description, + Options.review, + Options.request_commit, + Options.open_bug, + ] + + def run(self, state): + diff = self.cached_lookup(state, "diff") + diff_file = StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object + description = self._options.description or "Patch" + self._tool.bugs.add_patch_to_bug(state["bug_id"], diff_file, description, mark_for_review=self._options.review, mark_for_commit_queue=self._options.request_commit) + if self._options.open_bug: + self._tool.user.open_url(self._tool.bugs.bug_url_for_bug_id(state["bug_id"])) diff --git a/WebKitTools/Scripts/webkitpy/steps/postdiff.pyc b/WebKitTools/Scripts/webkitpy/steps/postdiff.pyc Binary files differnew file mode 100644 index 0000000..82f1c09 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/postdiff.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/postdiffforcommit.py b/WebKitTools/Scripts/webkitpy/steps/postdiffforcommit.py new file mode 100644 index 0000000..449381c --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/postdiffforcommit.py @@ -0,0 +1,41 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import StringIO + +from webkitpy.steps.abstractstep import AbstractStep + + +class PostDiffForCommit(AbstractStep): + def run(self, state): + self._tool.bugs.add_patch_to_bug( + state["bug_id"], + StringIO.StringIO(self.cached_lookup(state, "diff")), + "Patch for landing", + mark_for_review=False, + mark_for_landing=True) diff --git a/WebKitTools/Scripts/webkitpy/steps/preparechangelog.py b/WebKitTools/Scripts/webkitpy/steps/preparechangelog.py new file mode 100644 index 0000000..bd41f0b --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/preparechangelog.py @@ -0,0 +1,59 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.executive import ScriptError +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import error + + +class PrepareChangeLog(AbstractStep): + @classmethod + def options(cls): + return [ + Options.port, + Options.quiet, + Options.email, + ] + + def run(self, state): + if self.cached_lookup(state, "changelogs"): + return + os.chdir(self._tool.scm().checkout_root) + args = [self.port().script_path("prepare-ChangeLog")] + if state["bug_id"]: + args.append("--bug=%s" % state["bug_id"]) + if self._options.email: + args.append("--email=%s" % self._options.email) + try: + self._tool.executive.run_and_throw_if_fail(args, self._options.quiet) + except ScriptError, e: + error("Unable to prepare ChangeLogs.") + state["diff"] = None # We've changed the diff diff --git a/WebKitTools/Scripts/webkitpy/steps/preparechangelog.pyc b/WebKitTools/Scripts/webkitpy/steps/preparechangelog.pyc Binary files differnew file mode 100644 index 0000000..2f6edbd --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/preparechangelog.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.py b/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.py new file mode 100644 index 0000000..88e5134 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.py @@ -0,0 +1,49 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.changelogs import ChangeLog +from webkitpy.steps.abstractstep import AbstractStep + + +class PrepareChangeLogForRevert(AbstractStep): + def run(self, state): + # First, discard the ChangeLog changes from the rollout. + os.chdir(self._tool.scm().checkout_root) + changelog_paths = self._tool.scm().modified_changelogs() + self._tool.scm().revert_files(changelog_paths) + + # Second, make new ChangeLog entries for this rollout. + # This could move to prepare-ChangeLog by adding a --revert= option. + self._run_script("prepare-ChangeLog") + bug_url = self._tool.bugs.bug_url_for_bug_id(state["bug_id"]) if state["bug_id"] else None + for changelog_path in changelog_paths: + # FIXME: Seems we should prepare the message outside of changelogs.py and then just pass in + # text that we want to use to replace the reviewed by line. + ChangeLog(changelog_path).update_for_revert(state["revision"], state["reason"], bug_url) diff --git a/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.pyc b/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.pyc Binary files differnew file mode 100644 index 0000000..c1f0ca4 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.py b/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.py new file mode 100644 index 0000000..fb2f877 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.py @@ -0,0 +1,45 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep + + +class PromptForBugOrTitle(AbstractStep): + def run(self, state): + # No need to prompt if we alrady have the bug_id. + if state.get("bug_id"): + return + user_response = self._tool.user.prompt("Please enter a bug number or a title for a new bug:\n") + # If the user responds with a number, we assume it's bug number. + # Otherwise we assume it's a bug subject. + try: + state["bug_id"] = int(user_response) + except ValueError, TypeError: + state["bug_title"] = user_response + # FIXME: This is kind of a lame description. + state["bug_description"] = user_response diff --git a/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.pyc b/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.pyc Binary files differnew file mode 100644 index 0000000..fdca409 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/revertrevision.py b/WebKitTools/Scripts/webkitpy/steps/revertrevision.py new file mode 100644 index 0000000..ce6c263 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/revertrevision.py @@ -0,0 +1,34 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep + + +class RevertRevision(AbstractStep): + def run(self, state): + self._tool.scm().apply_reverse_diff(state["revision"]) diff --git a/WebKitTools/Scripts/webkitpy/steps/revertrevision.pyc b/WebKitTools/Scripts/webkitpy/steps/revertrevision.pyc Binary files differnew file mode 100644 index 0000000..ec08b1f --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/revertrevision.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/runtests.py b/WebKitTools/Scripts/webkitpy/steps/runtests.py new file mode 100644 index 0000000..ebe809f --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/runtests.py @@ -0,0 +1,66 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import log + +class RunTests(AbstractStep): + @classmethod + def options(cls): + return [ + Options.build, + Options.test, + Options.non_interactive, + Options.quiet, + Options.port, + ] + + def run(self, state): + if not self._options.build: + return + if not self._options.test: + return + + # Run the scripting unit tests first because they're quickest. + log("Running Python unit tests") + self._tool.executive.run_and_throw_if_fail(self.port().run_python_unittests_command()) + log("Running Perl unit tests") + self._tool.executive.run_and_throw_if_fail(self.port().run_perl_unittests_command()) + log("Running JavaScriptCore tests") + self._tool.executive.run_and_throw_if_fail(self.port().run_javascriptcore_tests_command(), quiet=True) + + log("Running run-webkit-tests") + args = self.port().run_webkit_tests_command() + if self._options.non_interactive: + args.append("--no-launch-safari") + args.append("--exit-after-n-failures=1") + if self._options.quiet: + args.append("--quiet") + self._tool.executive.run_and_throw_if_fail(args) + diff --git a/WebKitTools/Scripts/webkitpy/steps/runtests.pyc b/WebKitTools/Scripts/webkitpy/steps/runtests.pyc Binary files differnew file mode 100644 index 0000000..16908fb --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/runtests.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/steps_unittest.py b/WebKitTools/Scripts/webkitpy/steps/steps_unittest.py new file mode 100644 index 0000000..3e6a032 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/steps_unittest.py @@ -0,0 +1,56 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.steps.update import Update +from webkitpy.steps.promptforbugortitle import PromptForBugOrTitle +from webkitpy.mock_bugzillatool import MockBugzillaTool +from webkitpy.outputcapture import OutputCapture +from webkitpy.mock import Mock + + +class StepsTest(unittest.TestCase): + def _run_step(self, step, tool=None, options=None, state=None): + if not tool: + tool = MockBugzillaTool() + if not options: + options = Mock() + if not state: + state = {} + step(tool, options).run(state) + + def test_update_step(self): + options = Mock() + options.update = True + self._run_step(Update, options) + + def test_prompt_for_bug_or_title_step(self): + tool = MockBugzillaTool() + tool.user.prompt = lambda message: 42 + self._run_step(PromptForBugOrTitle, tool=tool) diff --git a/WebKitTools/Scripts/webkitpy/steps/update.py b/WebKitTools/Scripts/webkitpy/steps/update.py new file mode 100644 index 0000000..0f45671 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/update.py @@ -0,0 +1,46 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import log + + +class Update(AbstractStep): + @classmethod + def options(cls): + return [ + Options.update, + Options.port, + ] + + def run(self, state): + if not self._options.update: + return + log("Updating working directory") + self._tool.executive.run_and_throw_if_fail(self.port().update_webkit_command(), quiet=True) diff --git a/WebKitTools/Scripts/webkitpy/steps/update.pyc b/WebKitTools/Scripts/webkitpy/steps/update.pyc Binary files differnew file mode 100644 index 0000000..0b9e7e9 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/update.pyc diff --git a/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreview_unittests.py b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreview_unittests.py new file mode 100644 index 0000000..102a454 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreview_unittests.py @@ -0,0 +1,46 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer +from webkitpy.mock import Mock +from webkitpy.mock_bugzillatool import MockBugzillaTool +from webkitpy.outputcapture import OutputCapture + +class UpdateChangeLogsWithReviewerTest(unittest.TestCase): + def test_guess_reviewer_from_bug(self): + capture = OutputCapture() + step = UpdateChangeLogsWithReviewer(MockBugzillaTool(), Mock()) + expected_stderr = "0 reviewed patches on bug 75, cannot infer reviewer.\n" + capture.assert_outputs(self, step._guess_reviewer_from_bug, [75], expected_stderr=expected_stderr) + + def test_empty_state(self): + capture = OutputCapture() + step = UpdateChangeLogsWithReviewer(MockBugzillaTool(), Mock()) + capture.assert_outputs(self, step.run, [{}]) diff --git a/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.py b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.py new file mode 100644 index 0000000..90fdc35 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.py @@ -0,0 +1,71 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.changelogs import ChangeLog +from webkitpy.grammar import pluralize +from webkitpy.steps.abstractstep import AbstractStep +from webkitpy.steps.options import Options +from webkitpy.webkit_logging import log, error + +class UpdateChangeLogsWithReviewer(AbstractStep): + @classmethod + def options(cls): + return [ + Options.reviewer, + ] + + def _guess_reviewer_from_bug(self, bug_id): + patches = self._tool.bugs.fetch_bug(bug_id).reviewed_patches() + if len(patches) != 1: + log("%s on bug %s, cannot infer reviewer." % (pluralize("reviewed patch", len(patches)), bug_id)) + return None + patch = patches[0] + log("Guessing \"%s\" as reviewer from attachment %s on bug %s." % (patch.reviewer().full_name, patch.id(), bug_id)) + return patch.reviewer().full_name + + def run(self, state): + bug_id = state.get("bug_id") + if not bug_id and state.get("patch"): + bug_id = state.get("patch").bug_id() + + reviewer = self._options.reviewer + if not reviewer: + if not bug_id: + log("No bug id provided and --reviewer= not provided. Not updating ChangeLogs with reviewer.") + return + reviewer = self._guess_reviewer_from_bug(bug_id) + + if not reviewer: + log("Failed to guess reviewer from bug %s and --reviewer= not provided. Not updating ChangeLogs with reviewer." % bug_id) + return + + os.chdir(self._tool.scm().checkout_root) + for changelog_path in self._tool.scm().modified_changelogs(): + ChangeLog(changelog_path).set_reviewer(reviewer) diff --git a/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.pyc b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.pyc Binary files differnew file mode 100644 index 0000000..6c9b7fd --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.pyc diff --git a/WebKitTools/Scripts/modules/stepsequence.py b/WebKitTools/Scripts/webkitpy/stepsequence.py index 6f085c9..008b366 100644 --- a/WebKitTools/Scripts/modules/stepsequence.py +++ b/WebKitTools/Scripts/webkitpy/stepsequence.py @@ -26,21 +26,28 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from modules.buildsteps import CommandOptions -from modules.executive import ScriptError -from modules.logging import log -from modules.scm import CheckoutNeedsUpdate -from modules.workqueue import WorkQueue +import webkitpy.steps as steps + +from webkitpy.executive import ScriptError +from webkitpy.webkit_logging import log +from webkitpy.scm import CheckoutNeedsUpdate +from webkitpy.queueengine import QueueEngine + + +class StepSequenceErrorHandler(): + @classmethod + def handle_script_error(cls, tool, patch, script_error): + raise NotImplementedError, "subclasses must implement" class StepSequence(object): def __init__(self, steps): - self._steps = steps + self._steps = steps or [] def options(self): collected_options = [ - CommandOptions.parent_command, - CommandOptions.quiet, + steps.Options.parent_command, + steps.Options.quiet, ] for step in self._steps: collected_options = collected_options + step.options() @@ -48,21 +55,23 @@ class StepSequence(object): collected_options = sorted(set(collected_options)) return collected_options - def _run(self, tool, options, patch): + def _run(self, tool, options, state): for step in self._steps: - step(tool, options, patch).run() + step(tool, options).run(state) - def run_and_handle_errors(self, tool, options, patch=None): + def run_and_handle_errors(self, tool, options, state=None): + if not state: + state = {} try: - self._run(tool, options, patch) + self._run(tool, options, state) except CheckoutNeedsUpdate, e: log("Commit failed because the checkout is out of date. Please update and try again.") log("You can pass --no-build to skip building/testing after update if you believe the new commits did not affect the results.") - WorkQueue.exit_after_handled_error(e) + QueueEngine.exit_after_handled_error(e) except ScriptError, e: if not options.quiet: log(e.message_with_output()) if options.parent_command: command = tool.command_by_name(options.parent_command) - command.handle_script_error(tool, patch, e) - WorkQueue.exit_after_handled_error(e) + command.handle_script_error(tool, state, e) + QueueEngine.exit_after_handled_error(e) diff --git a/WebKitTools/Scripts/webkitpy/stepsequence.pyc b/WebKitTools/Scripts/webkitpy/stepsequence.pyc Binary files differnew file mode 100644 index 0000000..4b3505e --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/stepsequence.pyc diff --git a/WebKitTools/Scripts/modules/commands/__init__.py b/WebKitTools/Scripts/webkitpy/style/__init__.py index ef65bee..ef65bee 100644 --- a/WebKitTools/Scripts/modules/commands/__init__.py +++ b/WebKitTools/Scripts/webkitpy/style/__init__.py diff --git a/WebKitTools/Scripts/webkitpy/style/checker.py b/WebKitTools/Scripts/webkitpy/style/checker.py new file mode 100644 index 0000000..faf954f --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/checker.py @@ -0,0 +1,809 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Front end of some style-checker modules.""" + +import codecs +import getopt +import os.path +import sys + +from .. style_references import parse_patch +from error_handlers import DefaultStyleErrorHandler +from error_handlers import PatchStyleErrorHandler +from processors.cpp import CppProcessor +from processors.text import TextProcessor + + +# These defaults are used by check-webkit-style. +WEBKIT_DEFAULT_VERBOSITY = 1 +WEBKIT_DEFAULT_OUTPUT_FORMAT = 'emacs' + + +# FIXME: For style categories we will never want to have, remove them. +# For categories for which we want to have similar functionality, +# modify the implementation and enable them. +# +# Throughout this module, we use "filter rule" rather than "filter" +# for an individual boolean filter flag like "+foo". This allows us to +# reserve "filter" for what one gets by collectively applying all of +# the filter rules. +# +# The _WEBKIT_FILTER_RULES are prepended to any user-specified filter +# rules. Since by default all errors are on, only include rules that +# begin with a - sign. +WEBKIT_DEFAULT_FILTER_RULES = [ + '-build/endif_comment', + '-build/include_what_you_use', # <string> for std::string + '-build/storage_class', # const static + '-legal/copyright', + '-readability/multiline_comment', + '-readability/braces', # int foo() {}; + '-readability/fn_size', + '-readability/casting', + '-readability/function', + '-runtime/arrays', # variable length array + '-runtime/casting', + '-runtime/sizeof', + '-runtime/explicit', # explicit + '-runtime/virtual', # virtual dtor + '-runtime/printf', + '-runtime/threadsafe_fn', + '-runtime/rtti', + '-whitespace/blank_line', + '-whitespace/end_of_line', + '-whitespace/labels', + ] + + +# Some files should be skipped when checking style. For example, +# WebKit maintains some files in Mozilla style on purpose to ease +# future merges. +# +# Include a warning for skipped files that are less obvious. +SKIPPED_FILES_WITH_WARNING = [ + # The Qt API and tests do not follow WebKit style. + # They follow Qt style. :) + "gtk2drawing.c", # WebCore/platform/gtk/gtk2drawing.c + "gtk2drawing.h", # WebCore/platform/gtk/gtk2drawing.h + "JavaScriptCore/qt/api/", + "WebKit/gtk/tests/", + "WebKit/qt/Api/", + "WebKit/qt/tests/", + ] + + +# Don't include a warning for skipped files that are more common +# and more obvious. +SKIPPED_FILES_WITHOUT_WARNING = [ + "LayoutTests/" + ] + + +def style_categories(): + """Return the set of all categories used by check-webkit-style.""" + # If other processors had categories, we would take their union here. + return CppProcessor.categories + + +def webkit_argument_defaults(): + """Return the DefaultArguments instance for use by check-webkit-style.""" + return ArgumentDefaults(WEBKIT_DEFAULT_OUTPUT_FORMAT, + WEBKIT_DEFAULT_VERBOSITY, + WEBKIT_DEFAULT_FILTER_RULES) + + +def _create_usage(defaults): + """Return the usage string to display for command help. + + Args: + defaults: An ArgumentDefaults instance. + + """ + usage = """ +Syntax: %(program_name)s [--verbose=#] [--git-commit=<SingleCommit>] [--output=vs7] + [--filter=-x,+y,...] [file] ... + + The style guidelines this tries to follow are here: + http://webkit.org/coding/coding-style.html + + Every style error is given a confidence score from 1-5, with 5 meaning + we are certain of the problem, and 1 meaning it could be a legitimate + construct. This can miss some errors and does not substitute for + code review. + + To prevent specific lines from being linted, add a '// NOLINT' comment to the + end of the line. + + Linted extensions are .cpp, .c and .h. Other file types are ignored. + + The file parameter is optional and accepts multiple files. Leaving + out the file parameter applies the check to all files considered changed + by your source control management system. + + Flags: + + verbose=# + A number 1-5 that restricts output to errors with a confidence + score at or above this value. In particular, the value 1 displays + all errors. The default is %(default_verbosity)s. + + git-commit=<SingleCommit> + Checks the style of everything from the given commit to the local tree. + + output=vs7 + The output format, which may be one of + emacs : to ease emacs parsing + vs7 : compatible with Visual Studio + Defaults to "%(default_output_format)s". Other formats are unsupported. + + filter=-x,+y,... + A comma-separated list of boolean filter rules used to filter + which categories of style guidelines to check. The script checks + a category if the category passes the filter rules, as follows. + + Any webkit category starts out passing. All filter rules are then + evaluated left to right, with later rules taking precedence. For + example, the rule "+foo" passes any category that starts with "foo", + and "-foo" fails any such category. The filter input "-whitespace, + +whitespace/braces" fails the category "whitespace/tab" and passes + "whitespace/braces". + + Examples: --filter=-whitespace,+whitespace/braces + --filter=-whitespace,-runtime/printf,+runtime/printf_format + --filter=-,+build/include_what_you_use + + Category names appear in error messages in brackets, for example + [whitespace/indent]. To see a list of all categories available to + %(program_name)s, along with which are enabled by default, pass + the empty filter as follows: + --filter= +""" % {'program_name': os.path.basename(sys.argv[0]), + 'default_verbosity': defaults.verbosity, + 'default_output_format': defaults.output_format} + + return usage + + +class CategoryFilter(object): + + """Filters whether to check style categories.""" + + def __init__(self, filter_rules=None): + """Create a category filter. + + This method performs argument validation but does not strip + leading or trailing white space. + + Args: + filter_rules: A list of strings that are filter rules, which + are strings beginning with the plus or minus + symbol (+/-). The list should include any + default filter rules at the beginning. + Defaults to the empty list. + + Raises: + ValueError: Invalid filter rule if a rule does not start with + plus ("+") or minus ("-"). + + """ + if filter_rules is None: + filter_rules = [] + + for rule in filter_rules: + if not (rule.startswith('+') or rule.startswith('-')): + raise ValueError('Invalid filter rule "%s": every rule ' + 'rule in the --filter flag must start ' + 'with + or -.' % rule) + + self._filter_rules = filter_rules + self._should_check_category = {} # Cached dictionary of category to True/False + + def __str__(self): + return ",".join(self._filter_rules) + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this CategoryFilter instance is equal to another.""" + return self._filter_rules == other._filter_rules + + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce from __eq__(). + return not (self == other) + + def should_check(self, category): + """Return whether the category should be checked. + + The rules for determining whether a category should be checked + are as follows. By default all categories should be checked. + Then apply the filter rules in order from first to last, with + later flags taking precedence. + + A filter rule applies to a category if the string after the + leading plus/minus (+/-) matches the beginning of the category + name. A plus (+) means the category should be checked, while a + minus (-) means the category should not be checked. + + """ + if category in self._should_check_category: + return self._should_check_category[category] + + should_check = True # All categories checked by default. + for rule in self._filter_rules: + if not category.startswith(rule[1:]): + continue + should_check = rule.startswith('+') + self._should_check_category[category] = should_check # Update cache. + return should_check + + +# This class should not have knowledge of the flag key names. +class ProcessorOptions(object): + + """A container to store options to use when checking style. + + Attributes: + output_format: A string that is the output format. The supported + output formats are "emacs" which emacs can parse + and "vs7" which Microsoft Visual Studio 7 can parse. + + verbosity: An integer between 1-5 inclusive that restricts output + to errors with a confidence score at or above this value. + The default is 1, which displays all errors. + + filter: A CategoryFilter instance. The default is the empty filter, + which means that all categories should be checked. + + git_commit: A string representing the git commit to check. + The default is None. + + extra_flag_values: A string-string dictionary of all flag key-value + pairs that are not otherwise represented by this + class. The default is the empty dictionary. + + """ + + def __init__(self, output_format="emacs", verbosity=1, filter=None, + git_commit=None, extra_flag_values=None): + if filter is None: + filter = CategoryFilter() + if extra_flag_values is None: + extra_flag_values = {} + + if output_format not in ("emacs", "vs7"): + raise ValueError('Invalid "output_format" parameter: ' + 'value must be "emacs" or "vs7". ' + 'Value given: "%s".' % output_format) + + if (verbosity < 1) or (verbosity > 5): + raise ValueError('Invalid "verbosity" parameter: ' + "value must be an integer between 1-5 inclusive. " + 'Value given: "%s".' % verbosity) + + self.output_format = output_format + self.verbosity = verbosity + self.filter = filter + self.git_commit = git_commit + self.extra_flag_values = extra_flag_values + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this ProcessorOptions instance is equal to another.""" + if self.output_format != other.output_format: + return False + if self.verbosity != other.verbosity: + return False + if self.filter != other.filter: + return False + if self.git_commit != other.git_commit: + return False + if self.extra_flag_values != other.extra_flag_values: + return False + + return True + + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce from __eq__(). + return not (self == other) + + def is_reportable(self, category, confidence_in_error): + """Return whether an error is reportable. + + An error is reportable if the confidence in the error + is at least the current verbosity level, and if the current + filter says that the category should be checked. + + Args: + category: A string that is a style category. + confidence_in_error: An integer between 1 and 5, inclusive, that + represents the application's confidence in + the error. A higher number signifies greater + confidence. + + """ + if confidence_in_error < self.verbosity: + return False + + if self.filter is None: + return True # All categories should be checked by default. + + return self.filter.should_check(category) + + +# This class should not have knowledge of the flag key names. +class ArgumentDefaults(object): + + """A container to store default argument values. + + Attributes: + output_format: A string that is the default output format. + verbosity: An integer that is the default verbosity level. + filter_rules: A list of strings that are boolean filter rules + to prepend to any user-specified rules. + + """ + + def __init__(self, default_output_format, default_verbosity, + default_filter_rules): + self.output_format = default_output_format + self.verbosity = default_verbosity + self.filter_rules = default_filter_rules + + +class ArgumentPrinter(object): + + """Supports the printing of check-webkit-style command arguments.""" + + def _flag_pair_to_string(self, flag_key, flag_value): + return '--%(key)s=%(val)s' % {'key': flag_key, 'val': flag_value } + + def to_flag_string(self, options): + """Return a flag string yielding the given ProcessorOptions instance. + + This method orders the flag values alphabetically by the flag key. + + Args: + options: A ProcessorOptions instance. + + """ + flags = options.extra_flag_values.copy() + + flags['output'] = options.output_format + flags['verbose'] = options.verbosity + if options.filter: + # Only include the filter flag if rules are present. + filter_string = str(options.filter) + if filter_string: + flags['filter'] = filter_string + if options.git_commit: + flags['git-commit'] = options.git_commit + + flag_string = '' + # Alphabetizing lets us unit test this method. + for key in sorted(flags.keys()): + flag_string += self._flag_pair_to_string(key, flags[key]) + ' ' + + return flag_string.strip() + + +class ArgumentParser(object): + + """Supports the parsing of check-webkit-style command arguments. + + Attributes: + defaults: An ArgumentDefaults instance. + create_usage: A function that accepts an ArgumentDefaults instance + and returns a string of usage instructions. + This defaults to the function used to generate the + usage string for check-webkit-style. + doc_print: A function that accepts a string parameter and that is + called to display help messages. This defaults to + sys.stderr.write(). + + """ + + def __init__(self, argument_defaults, create_usage=None, doc_print=None): + if create_usage is None: + create_usage = _create_usage + if doc_print is None: + doc_print = sys.stderr.write + + self.defaults = argument_defaults + self.create_usage = create_usage + self.doc_print = doc_print + + def _exit_with_usage(self, error_message=''): + """Exit and print a usage string with an optional error message. + + Args: + error_message: A string that is an error message to print. + + """ + usage = self.create_usage(self.defaults) + self.doc_print(usage) + if error_message: + sys.exit('\nFATAL ERROR: ' + error_message) + else: + sys.exit(1) + + def _exit_with_categories(self): + """Exit and print the style categories and default filter rules.""" + self.doc_print('\nAll categories:\n') + categories = style_categories() + for category in sorted(categories): + self.doc_print(' ' + category + '\n') + + self.doc_print('\nDefault filter rules**:\n') + for filter_rule in sorted(self.defaults.filter_rules): + self.doc_print(' ' + filter_rule + '\n') + self.doc_print('\n**The command always evaluates the above rules, ' + 'and before any --filter flag.\n\n') + + sys.exit(0) + + def _parse_filter_flag(self, flag_value): + """Parse the value of the --filter flag. + + These filters are applied when deciding whether to emit a given + error message. + + Args: + flag_value: A string of comma-separated filter rules, for + example "-whitespace,+whitespace/indent". + + """ + filters = [] + for uncleaned_filter in flag_value.split(','): + filter = uncleaned_filter.strip() + if not filter: + continue + filters.append(filter) + return filters + + def parse(self, args, extra_flags=None): + """Parse the command line arguments to check-webkit-style. + + Args: + args: A list of command-line arguments as returned by sys.argv[1:]. + extra_flags: A list of flags whose values we want to extract, but + are not supported by the ProcessorOptions class. + An example flag "new_flag=". This defaults to the + empty list. + + Returns: + A tuple of (filenames, options) + + filenames: The list of filenames to check. + options: A ProcessorOptions instance. + + """ + if extra_flags is None: + extra_flags = [] + + output_format = self.defaults.output_format + verbosity = self.defaults.verbosity + filter_rules = self.defaults.filter_rules + + # The flags already supported by the ProcessorOptions class. + flags = ['help', 'output=', 'verbose=', 'filter=', 'git-commit='] + + for extra_flag in extra_flags: + if extra_flag in flags: + raise ValueError('Flag \'%(extra_flag)s is duplicated ' + 'or already supported.' % + {'extra_flag': extra_flag}) + flags.append(extra_flag) + + try: + (opts, filenames) = getopt.getopt(args, '', flags) + except getopt.GetoptError: + # FIXME: Settle on an error handling approach: come up + # with a consistent guideline as to when and whether + # a ValueError should be raised versus calling + # sys.exit when needing to interrupt execution. + self._exit_with_usage('Invalid arguments.') + + extra_flag_values = {} + git_commit = None + + for (opt, val) in opts: + if opt == '--help': + self._exit_with_usage() + elif opt == '--output': + output_format = val + elif opt == '--verbose': + verbosity = val + elif opt == '--git-commit': + git_commit = val + elif opt == '--filter': + if not val: + self._exit_with_categories() + # Prepend the defaults. + filter_rules = filter_rules + self._parse_filter_flag(val) + else: + extra_flag_values[opt] = val + + # Check validity of resulting values. + if filenames and (git_commit != None): + self._exit_with_usage('It is not possible to check files and a ' + 'specific commit at the same time.') + + if output_format not in ('emacs', 'vs7'): + raise ValueError('Invalid --output value "%s": The only ' + 'allowed output formats are emacs and vs7.' % + output_format) + + verbosity = int(verbosity) + if (verbosity < 1) or (verbosity > 5): + raise ValueError('Invalid --verbose value %s: value must ' + 'be between 1-5.' % verbosity) + + filter = CategoryFilter(filter_rules) + + options = ProcessorOptions(output_format, verbosity, filter, + git_commit, extra_flag_values) + + return (filenames, options) + + +# Enum-like idiom +class FileType: + + NONE = 1 + # Alphabetize remaining types + CPP = 2 + TEXT = 3 + + +class ProcessorDispatcher(object): + + """Supports determining whether and how to check style, based on path.""" + + cpp_file_extensions = ( + 'c', + 'cpp', + 'h', + ) + + text_file_extensions = ( + 'css', + 'html', + 'idl', + 'js', + 'mm', + 'php', + 'pm', + 'py', + 'txt', + ) + + def _file_extension(self, file_path): + """Return the file extension without the leading dot.""" + return os.path.splitext(file_path)[1].lstrip(".") + + def should_skip_with_warning(self, file_path): + """Return whether the given file should be skipped with a warning.""" + for skipped_file in SKIPPED_FILES_WITH_WARNING: + if file_path.find(skipped_file) >= 0: + return True + return False + + def should_skip_without_warning(self, file_path): + """Return whether the given file should be skipped without a warning.""" + for skipped_file in SKIPPED_FILES_WITHOUT_WARNING: + if file_path.find(skipped_file) >= 0: + return True + return False + + def _file_type(self, file_path): + """Return the file type corresponding to the given file.""" + file_extension = self._file_extension(file_path) + + if (file_extension in self.cpp_file_extensions) or (file_path == '-'): + # FIXME: Do something about the comment below and the issue it + # raises since cpp_style already relies on the extension. + # + # Treat stdin as C++. Since the extension is unknown when + # reading from stdin, cpp_style tests should not rely on + # the extension. + return FileType.CPP + elif ("ChangeLog" in file_path + or "WebKitTools/Scripts/" in file_path + or file_extension in self.text_file_extensions): + return FileType.TEXT + else: + return FileType.NONE + + def _create_processor(self, file_type, file_path, handle_style_error, verbosity): + """Instantiate and return a style processor based on file type.""" + if file_type == FileType.NONE: + processor = None + elif file_type == FileType.CPP: + file_extension = self._file_extension(file_path) + processor = CppProcessor(file_path, file_extension, handle_style_error, verbosity) + elif file_type == FileType.TEXT: + processor = TextProcessor(file_path, handle_style_error) + else: + raise ValueError('Invalid file type "%(file_type)s": the only valid file types ' + "are %(NONE)s, %(CPP)s, and %(TEXT)s." + % {"file_type": file_type, + "NONE": FileType.NONE, + "CPP": FileType.CPP, + "TEXT": FileType.TEXT}) + + return processor + + def dispatch_processor(self, file_path, handle_style_error, verbosity): + """Instantiate and return a style processor based on file path.""" + file_type = self._file_type(file_path) + + processor = self._create_processor(file_type, + file_path, + handle_style_error, + verbosity) + return processor + + +class StyleChecker(object): + + """Supports checking style in files and patches. + + Attributes: + error_count: An integer that is the total number of reported + errors for the lifetime of this StyleChecker + instance. + options: A ProcessorOptions instance that controls the behavior + of style checking. + + """ + + def __init__(self, options, stderr_write=None): + """Create a StyleChecker instance. + + Args: + options: See options attribute. + stderr_write: A function that takes a string as a parameter + and that is called when a style error occurs. + Defaults to sys.stderr.write. This should be + used only for unit tests. + + """ + if stderr_write is None: + stderr_write = sys.stderr.write + + self._stderr_write = stderr_write + self.error_count = 0 + self.options = options + + def _increment_error_count(self): + """Increment the total count of reported errors.""" + self.error_count += 1 + + def _process_file(self, processor, file_path, handle_style_error): + """Process the file using the given processor.""" + try: + # Support the UNIX convention of using "-" for stdin. Note that + # we are not opening the file with universal newline support + # (which codecs doesn't support anyway), so the resulting lines do + # contain trailing '\r' characters if we are reading a file that + # has CRLF endings. + # If after the split a trailing '\r' is present, it is removed + # below. If it is not expected to be present (i.e. os.linesep != + # '\r\n' as in Windows), a warning is issued below if this file + # is processed. + if file_path == '-': + lines = codecs.StreamReaderWriter(sys.stdin, + codecs.getreader('utf8'), + codecs.getwriter('utf8'), + 'replace').read().split('\n') + else: + lines = codecs.open(file_path, 'r', 'utf8', 'replace').read().split('\n') + + carriage_return_found = False + # Remove trailing '\r'. + for line_number in range(len(lines)): + if lines[line_number].endswith('\r'): + lines[line_number] = lines[line_number].rstrip('\r') + carriage_return_found = True + + except IOError: + self._stderr_write("Skipping input '%s': Can't open for reading\n" % file_path) + return + + processor.process(lines) + + if carriage_return_found and os.linesep != '\r\n': + # FIXME: Make sure this error also shows up when checking + # patches, if appropriate. + # + # Use 0 for line_number since outputting only one error for + # potentially several lines. + handle_style_error(file_path, 0, 'whitespace/newline', 1, + 'One or more unexpected \\r (^M) found;' + 'better to use only a \\n') + + def check_file(self, file_path, handle_style_error=None, process_file=None): + """Check style in the given file. + + Args: + file_path: A string that is the path of the file to process. + handle_style_error: The function to call when a style error + occurs. This parameter is meant for internal + use within this class. Defaults to a + DefaultStyleErrorHandler instance. + process_file: The function to call to process the file. This + parameter should be used only for unit tests. + Defaults to the file processing method of this class. + + """ + if handle_style_error is None: + handle_style_error = DefaultStyleErrorHandler(file_path, + self.options, + self._increment_error_count, + self._stderr_write) + if process_file is None: + process_file = self._process_file + + dispatcher = ProcessorDispatcher() + + if dispatcher.should_skip_without_warning(file_path): + return + if dispatcher.should_skip_with_warning(file_path): + self._stderr_write('Ignoring "%s": this file is exempt from the ' + "style guide.\n" % file_path) + return + + verbosity = self.options.verbosity + processor = dispatcher.dispatch_processor(file_path, + handle_style_error, + verbosity) + if processor is None: + return + + process_file(processor, file_path, handle_style_error) + + def check_patch(self, patch_string): + """Check style in the given patch. + + Args: + patch_string: A string that is a patch string. + + """ + patch_files = parse_patch(patch_string) + for file_path, diff in patch_files.iteritems(): + style_error_handler = PatchStyleErrorHandler(diff, + file_path, + self.options, + self._increment_error_count, + self._stderr_write) + + self.check_file(file_path, style_error_handler) + diff --git a/WebKitTools/Scripts/webkitpy/style/checker_unittest.py b/WebKitTools/Scripts/webkitpy/style/checker_unittest.py new file mode 100755 index 0000000..4d6b2e7 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/checker_unittest.py @@ -0,0 +1,677 @@ +#!/usr/bin/python +# -*- coding: utf-8; -*- +# +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile Inc. +# Copyright (C) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for style.py.""" + +import unittest + +import checker as style +from checker import CategoryFilter +from checker import ProcessorDispatcher +from checker import ProcessorOptions +from checker import StyleChecker +from processors.cpp import CppProcessor +from processors.text import TextProcessor + +class CategoryFilterTest(unittest.TestCase): + + """Tests CategoryFilter class.""" + + def test_init(self): + """Test __init__ constructor.""" + self.assertRaises(ValueError, CategoryFilter, ["no_prefix"]) + CategoryFilter() # No ValueError: works + CategoryFilter(["+"]) # No ValueError: works + CategoryFilter(["-"]) # No ValueError: works + + def test_str(self): + """Test __str__ "to string" operator.""" + filter = CategoryFilter(["+a", "-b"]) + self.assertEquals(str(filter), "+a,-b") + + def test_eq(self): + """Test __eq__ equality function.""" + filter1 = CategoryFilter(["+a", "+b"]) + filter2 = CategoryFilter(["+a", "+b"]) + filter3 = CategoryFilter(["+b", "+a"]) + + # == calls __eq__. + self.assertTrue(filter1 == filter2) + self.assertFalse(filter1 == filter3) # Cannot test with assertNotEqual. + + def test_ne(self): + """Test __ne__ inequality function.""" + # != calls __ne__. + # By default, __ne__ always returns true on different objects. + # Thus, just check the distinguishing case to verify that the + # code defines __ne__. + self.assertFalse(CategoryFilter() != CategoryFilter()) + + def test_should_check(self): + """Test should_check() method.""" + filter = CategoryFilter() + self.assertTrue(filter.should_check("everything")) + # Check a second time to exercise cache. + self.assertTrue(filter.should_check("everything")) + + filter = CategoryFilter(["-"]) + self.assertFalse(filter.should_check("anything")) + # Check a second time to exercise cache. + self.assertFalse(filter.should_check("anything")) + + filter = CategoryFilter(["-", "+ab"]) + self.assertTrue(filter.should_check("abc")) + self.assertFalse(filter.should_check("a")) + + filter = CategoryFilter(["+", "-ab"]) + self.assertFalse(filter.should_check("abc")) + self.assertTrue(filter.should_check("a")) + + +class ProcessorOptionsTest(unittest.TestCase): + + """Tests ProcessorOptions class.""" + + def test_init(self): + """Test __init__ constructor.""" + # Check default parameters. + options = ProcessorOptions() + self.assertEquals(options.extra_flag_values, {}) + self.assertEquals(options.filter, CategoryFilter()) + self.assertEquals(options.git_commit, None) + self.assertEquals(options.output_format, "emacs") + self.assertEquals(options.verbosity, 1) + + # Check argument validation. + self.assertRaises(ValueError, ProcessorOptions, output_format="bad") + ProcessorOptions(output_format="emacs") # No ValueError: works + ProcessorOptions(output_format="vs7") # works + self.assertRaises(ValueError, ProcessorOptions, verbosity=0) + self.assertRaises(ValueError, ProcessorOptions, verbosity=6) + ProcessorOptions(verbosity=1) # works + ProcessorOptions(verbosity=5) # works + + # Check attributes. + options = ProcessorOptions(extra_flag_values={"extra_value" : 2}, + filter=CategoryFilter(["+"]), + git_commit="commit", + output_format="vs7", + verbosity=3) + self.assertEquals(options.extra_flag_values, {"extra_value" : 2}) + self.assertEquals(options.filter, CategoryFilter(["+"])) + self.assertEquals(options.git_commit, "commit") + self.assertEquals(options.output_format, "vs7") + self.assertEquals(options.verbosity, 3) + + def test_eq(self): + """Test __eq__ equality function.""" + # == calls __eq__. + self.assertTrue(ProcessorOptions() == ProcessorOptions()) + + # Verify that a difference in any argument cause equality to fail. + options = ProcessorOptions(extra_flag_values={"extra_value" : 1}, + filter=CategoryFilter(["+"]), + git_commit="commit", + output_format="vs7", + verbosity=1) + self.assertFalse(options == ProcessorOptions(extra_flag_values={"extra_value" : 2})) + self.assertFalse(options == ProcessorOptions(filter=CategoryFilter(["-"]))) + self.assertFalse(options == ProcessorOptions(git_commit="commit2")) + self.assertFalse(options == ProcessorOptions(output_format="emacs")) + self.assertFalse(options == ProcessorOptions(verbosity=2)) + + def test_ne(self): + """Test __ne__ inequality function.""" + # != calls __ne__. + # By default, __ne__ always returns true on different objects. + # Thus, just check the distinguishing case to verify that the + # code defines __ne__. + self.assertFalse(ProcessorOptions() != ProcessorOptions()) + + def test_is_reportable(self): + """Test is_reportable().""" + filter = CategoryFilter(["-xyz"]) + options = ProcessorOptions(filter=filter, verbosity=3) + + # Test verbosity + self.assertTrue(options.is_reportable("abc", 3)) + self.assertFalse(options.is_reportable("abc", 2)) + + # Test filter + self.assertTrue(options.is_reportable("xy", 3)) + self.assertFalse(options.is_reportable("xyz", 3)) + + +class WebKitArgumentDefaultsTest(unittest.TestCase): + + """Tests validity of default arguments used by check-webkit-style.""" + + def defaults(self): + return style.webkit_argument_defaults() + + def test_filter_rules(self): + defaults = self.defaults() + already_seen = [] + all_categories = style.style_categories() + for rule in defaults.filter_rules: + # Check no leading or trailing white space. + self.assertEquals(rule, rule.strip()) + # All categories are on by default, so defaults should + # begin with -. + self.assertTrue(rule.startswith('-')) + self.assertTrue(rule[1:] in all_categories) + # Check no rule occurs twice. + self.assertFalse(rule in already_seen) + already_seen.append(rule) + + def test_defaults(self): + """Check that default arguments are valid.""" + defaults = self.defaults() + + # FIXME: We should not need to call parse() to determine + # whether the default arguments are valid. + parser = style.ArgumentParser(defaults) + # No need to test the return value here since we test parse() + # on valid arguments elsewhere. + parser.parse([]) # arguments valid: no error or SystemExit + + +class ArgumentPrinterTest(unittest.TestCase): + + """Tests the ArgumentPrinter class.""" + + _printer = style.ArgumentPrinter() + + def _create_options(self, output_format='emacs', verbosity=3, + filter_rules=[], git_commit=None, + extra_flag_values={}): + filter = CategoryFilter(filter_rules) + return style.ProcessorOptions(output_format, verbosity, filter, + git_commit, extra_flag_values) + + def test_to_flag_string(self): + options = self._create_options('vs7', 5, ['+foo', '-bar'], 'git', + {'a': 0, 'z': 1}) + self.assertEquals('--a=0 --filter=+foo,-bar --git-commit=git ' + '--output=vs7 --verbose=5 --z=1', + self._printer.to_flag_string(options)) + + # This is to check that --filter and --git-commit do not + # show up when not user-specified. + options = self._create_options() + self.assertEquals('--output=emacs --verbose=3', + self._printer.to_flag_string(options)) + + +class ArgumentParserTest(unittest.TestCase): + + """Test the ArgumentParser class.""" + + def _parse(self): + """Return a default parse() function for testing.""" + return self._create_parser().parse + + def _create_defaults(self, default_output_format='vs7', + default_verbosity=3, + default_filter_rules=['-', '+whitespace']): + """Return a default ArgumentDefaults instance for testing.""" + return style.ArgumentDefaults(default_output_format, + default_verbosity, + default_filter_rules) + + def _create_parser(self, defaults=None): + """Return an ArgumentParser instance for testing.""" + def create_usage(_defaults): + """Return a usage string for testing.""" + return "usage" + + def doc_print(message): + # We do not want the usage string or style categories + # to print during unit tests, so print nothing. + return + + if defaults is None: + defaults = self._create_defaults() + + return style.ArgumentParser(defaults, create_usage, doc_print) + + def test_parse_documentation(self): + parse = self._parse() + + # FIXME: Test both the printing of the usage string and the + # filter categories help. + + # Request the usage string. + self.assertRaises(SystemExit, parse, ['--help']) + # Request default filter rules and available style categories. + self.assertRaises(SystemExit, parse, ['--filter=']) + + def test_parse_bad_values(self): + parse = self._parse() + + # Pass an unsupported argument. + self.assertRaises(SystemExit, parse, ['--bad']) + + self.assertRaises(ValueError, parse, ['--verbose=bad']) + self.assertRaises(ValueError, parse, ['--verbose=0']) + self.assertRaises(ValueError, parse, ['--verbose=6']) + parse(['--verbose=1']) # works + parse(['--verbose=5']) # works + + self.assertRaises(ValueError, parse, ['--output=bad']) + parse(['--output=vs7']) # works + + # Pass a filter rule not beginning with + or -. + self.assertRaises(ValueError, parse, ['--filter=foo']) + parse(['--filter=+foo']) # works + # Pass files and git-commit at the same time. + self.assertRaises(SystemExit, parse, ['--git-commit=', 'file.txt']) + # Pass an extra flag already supported. + self.assertRaises(ValueError, parse, [], ['filter=']) + parse([], ['extra=']) # works + # Pass an extra flag with typo. + self.assertRaises(SystemExit, parse, ['--extratypo='], ['extra=']) + parse(['--extra='], ['extra=']) # works + self.assertRaises(ValueError, parse, [], ['extra=', 'extra=']) + + + def test_parse_default_arguments(self): + parse = self._parse() + + (files, options) = parse([]) + + self.assertEquals(files, []) + + self.assertEquals(options.output_format, 'vs7') + self.assertEquals(options.verbosity, 3) + self.assertEquals(options.filter, + CategoryFilter(["-", "+whitespace"])) + self.assertEquals(options.git_commit, None) + + def test_parse_explicit_arguments(self): + parse = self._parse() + + # Pass non-default explicit values. + (files, options) = parse(['--output=emacs']) + self.assertEquals(options.output_format, 'emacs') + (files, options) = parse(['--verbose=4']) + self.assertEquals(options.verbosity, 4) + (files, options) = parse(['--git-commit=commit']) + self.assertEquals(options.git_commit, 'commit') + (files, options) = parse(['--filter=+foo,-bar']) + self.assertEquals(options.filter, + CategoryFilter(["-", "+whitespace", "+foo", "-bar"])) + # Spurious white space in filter rules. + (files, options) = parse(['--filter=+foo ,-bar']) + self.assertEquals(options.filter, + CategoryFilter(["-", "+whitespace", "+foo", "-bar"])) + + # Pass extra flag values. + (files, options) = parse(['--extra'], ['extra']) + self.assertEquals(options.extra_flag_values, {'--extra': ''}) + (files, options) = parse(['--extra='], ['extra=']) + self.assertEquals(options.extra_flag_values, {'--extra': ''}) + (files, options) = parse(['--extra=x'], ['extra=']) + self.assertEquals(options.extra_flag_values, {'--extra': 'x'}) + + def test_parse_files(self): + parse = self._parse() + + (files, options) = parse(['foo.cpp']) + self.assertEquals(files, ['foo.cpp']) + + # Pass multiple files. + (files, options) = parse(['--output=emacs', 'foo.cpp', 'bar.cpp']) + self.assertEquals(files, ['foo.cpp', 'bar.cpp']) + + +class ProcessorDispatcherSkipTest(unittest.TestCase): + + """Tests the "should skip" methods of the ProcessorDispatcher class.""" + + def test_should_skip_with_warning(self): + """Test should_skip_with_warning().""" + dispatcher = ProcessorDispatcher() + + # Check a non-skipped file. + self.assertFalse(dispatcher.should_skip_with_warning("foo.txt")) + + # Check skipped files. + paths_to_skip = [ + "gtk2drawing.c", + "gtk2drawing.h", + "JavaScriptCore/qt/api/qscriptengine_p.h", + "WebCore/platform/gtk/gtk2drawing.c", + "WebCore/platform/gtk/gtk2drawing.h", + "WebKit/gtk/tests/testatk.c", + "WebKit/qt/Api/qwebpage.h", + "WebKit/qt/tests/qwebsecurityorigin/tst_qwebsecurityorigin.cpp", + ] + + for path in paths_to_skip: + self.assertTrue(dispatcher.should_skip_with_warning(path), + "Checking: " + path) + + def test_should_skip_without_warning(self): + """Test should_skip_without_warning().""" + dispatcher = ProcessorDispatcher() + + # Check a non-skipped file. + self.assertFalse(dispatcher.should_skip_without_warning("foo.txt")) + + # Check skipped files. + paths_to_skip = [ + # LayoutTests folder + "LayoutTests/foo.txt", + ] + + for path in paths_to_skip: + self.assertTrue(dispatcher.should_skip_without_warning(path), + "Checking: " + path) + + +class ProcessorDispatcherDispatchTest(unittest.TestCase): + + """Tests dispatch_processor() method of ProcessorDispatcher class.""" + + def mock_handle_style_error(self): + pass + + def dispatch_processor(self, file_path): + """Call dispatch_processor() with the given file path.""" + dispatcher = ProcessorDispatcher() + processor = dispatcher.dispatch_processor(file_path, + self.mock_handle_style_error, + verbosity=3) + return processor + + def assert_processor_none(self, file_path): + """Assert that the dispatched processor is None.""" + processor = self.dispatch_processor(file_path) + self.assertTrue(processor is None, 'Checking: "%s"' % file_path) + + def assert_processor(self, file_path, expected_class): + """Assert the type of the dispatched processor.""" + processor = self.dispatch_processor(file_path) + got_class = processor.__class__ + self.assertEquals(got_class, expected_class, + 'For path "%(file_path)s" got %(got_class)s when ' + "expecting %(expected_class)s." + % {"file_path": file_path, + "got_class": got_class, + "expected_class": expected_class}) + + def assert_processor_cpp(self, file_path): + """Assert that the dispatched processor is a CppProcessor.""" + self.assert_processor(file_path, CppProcessor) + + def assert_processor_text(self, file_path): + """Assert that the dispatched processor is a TextProcessor.""" + self.assert_processor(file_path, TextProcessor) + + def test_cpp_paths(self): + """Test paths that should be checked as C++.""" + paths = [ + "-", + "foo.c", + "foo.cpp", + "foo.h", + ] + + for path in paths: + self.assert_processor_cpp(path) + + # Check processor attributes on a typical input. + file_base = "foo" + file_extension = "c" + file_path = file_base + "." + file_extension + self.assert_processor_cpp(file_path) + processor = self.dispatch_processor(file_path) + self.assertEquals(processor.file_extension, file_extension) + self.assertEquals(processor.file_path, file_path) + self.assertEquals(processor.handle_style_error, self.mock_handle_style_error) + self.assertEquals(processor.verbosity, 3) + # Check "-" for good measure. + file_base = "-" + file_extension = "" + file_path = file_base + self.assert_processor_cpp(file_path) + processor = self.dispatch_processor(file_path) + self.assertEquals(processor.file_extension, file_extension) + self.assertEquals(processor.file_path, file_path) + + def test_text_paths(self): + """Test paths that should be checked as text.""" + paths = [ + "ChangeLog", + "foo.css", + "foo.html", + "foo.idl", + "foo.js", + "foo.mm", + "foo.php", + "foo.pm", + "foo.py", + "foo.txt", + "FooChangeLog.bak", + "WebCore/ChangeLog", + "WebCore/inspector/front-end/inspector.js", + "WebKitTools/Scripts/check-webkit=style", + "WebKitTools/Scripts/modules/text_style.py", + ] + + for path in paths: + self.assert_processor_text(path) + + # Check processor attributes on a typical input. + file_base = "foo" + file_extension = "css" + file_path = file_base + "." + file_extension + self.assert_processor_text(file_path) + processor = self.dispatch_processor(file_path) + self.assertEquals(processor.file_path, file_path) + self.assertEquals(processor.handle_style_error, self.mock_handle_style_error) + + def test_none_paths(self): + """Test paths that have no file type..""" + paths = [ + "Makefile", + "foo.png", + "foo.exe", + ] + + for path in paths: + self.assert_processor_none(path) + + +class StyleCheckerTest(unittest.TestCase): + + """Test the StyleChecker class. + + Attributes: + error_messages: A string containing all of the warning messages + written to the mock_stderr_write method of + this class. + + """ + + def _mock_stderr_write(self, message): + pass + + def _style_checker(self, options): + return StyleChecker(options, self._mock_stderr_write) + + def test_init(self): + """Test __init__ constructor.""" + options = ProcessorOptions() + style_checker = self._style_checker(options) + + self.assertEquals(style_checker.error_count, 0) + self.assertEquals(style_checker.options, options) + + +class StyleCheckerCheckFileTest(unittest.TestCase): + + """Test the check_file() method of the StyleChecker class. + + The check_file() method calls its process_file parameter when + given a file that should not be skipped. + + The "got_*" attributes of this class are the parameters passed + to process_file by calls to check_file() made by this test + class. These attributes allow us to check the parameter values + passed internally to the process_file function. + + Attributes: + got_file_path: The file_path parameter passed by check_file() + to its process_file parameter. + got_handle_style_error: The handle_style_error parameter passed + by check_file() to its process_file + parameter. + got_processor: The processor parameter passed by check_file() to + its process_file parameter. + warning_messages: A string containing all of the warning messages + written to the mock_stderr_write method of + this class. + + """ + def setUp(self): + self.got_file_path = None + self.got_handle_style_error = None + self.got_processor = None + self.warning_messages = "" + + def mock_stderr_write(self, warning_message): + self.warning_messages += warning_message + + def mock_handle_style_error(self): + pass + + def mock_process_file(self, processor, file_path, handle_style_error): + """A mock _process_file(). + + See the documentation for this class for more information + on this function. + + """ + self.got_file_path = file_path + self.got_handle_style_error = handle_style_error + self.got_processor = processor + + def assert_attributes(self, + expected_file_path, + expected_handle_style_error, + expected_processor, + expected_warning_messages): + """Assert that the attributes of this class equal the given values.""" + self.assertEquals(self.got_file_path, expected_file_path) + self.assertEquals(self.got_handle_style_error, expected_handle_style_error) + self.assertEquals(self.got_processor, expected_processor) + self.assertEquals(self.warning_messages, expected_warning_messages) + + def call_check_file(self, file_path): + """Call the check_file() method of a test StyleChecker instance.""" + # Confirm that the attributes are reset. + self.assert_attributes(None, None, None, "") + + # Create a test StyleChecker instance. + # + # The verbosity attribute is the only ProcessorOptions + # attribute that needs to be checked in this test. + # This is because it is the only option is directly + # passed to the constructor of a style processor. + options = ProcessorOptions(verbosity=3) + + style_checker = StyleChecker(options, self.mock_stderr_write) + + style_checker.check_file(file_path, + self.mock_handle_style_error, + self.mock_process_file) + + def test_check_file_on_skip_without_warning(self): + """Test check_file() for a skipped-without-warning file.""" + + file_path = "LayoutTests/foo.txt" + + dispatcher = ProcessorDispatcher() + # Confirm that the input file is truly a skipped-without-warning file. + self.assertTrue(dispatcher.should_skip_without_warning(file_path)) + + # Check the outcome. + self.call_check_file(file_path) + self.assert_attributes(None, None, None, "") + + def test_check_file_on_skip_with_warning(self): + """Test check_file() for a skipped-with-warning file.""" + + file_path = "gtk2drawing.c" + + dispatcher = ProcessorDispatcher() + # Check that the input file is truly a skipped-with-warning file. + self.assertTrue(dispatcher.should_skip_with_warning(file_path)) + + # Check the outcome. + self.call_check_file(file_path) + self.assert_attributes(None, None, None, + 'Ignoring "gtk2drawing.c": this file is exempt from the style guide.\n') + + def test_check_file_on_non_skipped(self): + + # We use a C++ file since by using a CppProcessor, we can check + # that all of the possible information is getting passed to + # process_file (in particular, the verbosity). + file_base = "foo" + file_extension = "cpp" + file_path = file_base + "." + file_extension + + dispatcher = ProcessorDispatcher() + # Check that the input file is truly a C++ file. + self.assertEquals(dispatcher._file_type(file_path), style.FileType.CPP) + + # Check the outcome. + self.call_check_file(file_path) + + expected_processor = CppProcessor(file_path, file_extension, self.mock_handle_style_error, 3) + + self.assert_attributes(file_path, + self.mock_handle_style_error, + expected_processor, + "") + + +if __name__ == '__main__': + import sys + + unittest.main() + diff --git a/WebKitTools/Scripts/webkitpy/style/error_handlers.py b/WebKitTools/Scripts/webkitpy/style/error_handlers.py new file mode 100644 index 0000000..54b1d76 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/error_handlers.py @@ -0,0 +1,154 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines style error handler classes. + +A style error handler is a function to call when a style error is +found. Style error handlers can also have state. A class that represents +a style error handler should implement the following methods. + +Methods: + + __call__(self, line_number, category, confidence, message): + + Handle the occurrence of a style error. + + Check whether the error is reportable. If so, report the details. + + Args: + line_number: The integer line number of the line containing the error. + category: The name of the category of the error, for example + "whitespace/newline". + confidence: An integer between 1-5 that represents the level of + confidence in the error. The value 5 means that we are + certain of the problem, and the value 1 means that it + could be a legitimate construct. + message: The error message to report. + +""" + + +import sys + + +class DefaultStyleErrorHandler(object): + + """The default style error handler.""" + + def __init__(self, file_path, options, increment_error_count, + stderr_write=None): + """Create a default style error handler. + + Args: + file_path: The path to the file containing the error. This + is used for reporting to the user. + options: A ProcessorOptions instance. + increment_error_count: A function that takes no arguments and + increments the total count of reportable + errors. + stderr_write: A function that takes a string as a parameter + and that is called when a style error occurs. + Defaults to sys.stderr.write. This should be + used only for unit tests. + + """ + if stderr_write is None: + stderr_write = sys.stderr.write + + self._file_path = file_path + self._increment_error_count = increment_error_count + self._options = options + self._stderr_write = stderr_write + + def __call__(self, line_number, category, confidence, message): + """Handle the occurrence of a style error. + + See the docstring of this module for more information. + + """ + if not self._options.is_reportable(category, confidence): + return + + self._increment_error_count() + + if self._options.output_format == 'vs7': + format_string = "%s(%s): %s [%s] [%d]\n" + else: + format_string = "%s:%s: %s [%s] [%d]\n" + + self._stderr_write(format_string % (self._file_path, + line_number, + message, + category, + confidence)) + + +class PatchStyleErrorHandler(object): + + """The style error function for patch files.""" + + def __init__(self, diff, file_path, options, increment_error_count, + stderr_write): + """Create a patch style error handler for the given path. + + Args: + diff: A DiffFile instance. + Other arguments: see the DefaultStyleErrorHandler.__init__() + documentation for the other arguments. + + """ + self._diff = diff + self._default_error_handler = DefaultStyleErrorHandler(file_path, + options, + increment_error_count, + stderr_write) + + # The line numbers of the modified lines. This is set lazily. + self._line_numbers = set() + + def _get_line_numbers(self): + """Return the line numbers of the modified lines.""" + if not self._line_numbers: + for line in self._diff.lines: + # When deleted line is not set, it means that + # the line is newly added. + if not line[0]: + self._line_numbers.add(line[1]) + + return self._line_numbers + + def __call__(self, line_number, category, confidence, message): + """Handle the occurrence of a style error. + + This function does not report errors occurring in lines not + modified or added. + + Args: see the DefaultStyleErrorHandler.__call__() documentation. + + """ + if line_number not in self._get_line_numbers(): + # Then the error is not reportable. + return + + self._default_error_handler(line_number, category, confidence, + message) + diff --git a/WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py b/WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py new file mode 100644 index 0000000..6a91ff2 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py @@ -0,0 +1,163 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for error_handlers.py.""" + + +import unittest + +from .. style_references import parse_patch +from checker import ProcessorOptions +from error_handlers import DefaultStyleErrorHandler +from error_handlers import PatchStyleErrorHandler + + +class StyleErrorHandlerTestBase(unittest.TestCase): + + def setUp(self): + self._error_messages = "" + self._error_count = 0 + + def _mock_increment_error_count(self): + self._error_count += 1 + + def _mock_stderr_write(self, message): + self._error_messages += message + + +class DefaultStyleErrorHandlerTest(StyleErrorHandlerTestBase): + + """Tests DefaultStyleErrorHandler class.""" + + _category = "whitespace/tab" + + def _options(self, output_format): + return ProcessorOptions(verbosity=3, output_format=output_format) + + def _error_handler(self, options): + file_path = "foo.h" + return DefaultStyleErrorHandler(file_path, + options, + self._mock_increment_error_count, + self._mock_stderr_write) + + def _prepare_call(self, output_format="emacs"): + """Return options after initializing.""" + options = self._options(output_format) + + # Test that count is initialized to zero. + self.assertEquals(0, self._error_count) + self.assertEquals("", self._error_messages) + + return options + + def _call_error_handler(self, options, confidence): + """Handle an error with given confidence.""" + handle_error = self._error_handler(options) + + line_number = 100 + message = "message" + + handle_error(line_number, self._category, confidence, message) + + def test_call_non_reportable(self): + """Test __call__() method with a non-reportable error.""" + confidence = 1 + options = self._prepare_call() + + # Confirm the error is not reportable. + self.assertFalse(options.is_reportable(self._category, confidence)) + + self._call_error_handler(options, confidence) + + self.assertEquals(0, self._error_count) + self.assertEquals("", self._error_messages) + + def test_call_reportable_emacs(self): + """Test __call__() method with a reportable error and emacs format.""" + confidence = 5 + options = self._prepare_call("emacs") + + self._call_error_handler(options, confidence) + + self.assertEquals(1, self._error_count) + self.assertEquals(self._error_messages, + "foo.h:100: message [whitespace/tab] [5]\n") + + def test_call_reportable_vs7(self): + """Test __call__() method with a reportable error and vs7 format.""" + confidence = 5 + options = self._prepare_call("vs7") + + self._call_error_handler(options, confidence) + + self.assertEquals(1, self._error_count) + self.assertEquals(self._error_messages, + "foo.h(100): message [whitespace/tab] [5]\n") + + +class PatchStyleErrorHandlerTest(StyleErrorHandlerTestBase): + + """Tests PatchStyleErrorHandler class.""" + + file_path = "__init__.py" + + patch_string = """diff --git a/__init__.py b/__init__.py +index ef65bee..e3db70e 100644 +--- a/__init__.py ++++ b/__init__.py +@@ -1 +1,2 @@ + # Required for Python to search this directory for module files ++# New line + +""" + + def test_call(self): + patch_files = parse_patch(self.patch_string) + diff = patch_files[self.file_path] + + options = ProcessorOptions(verbosity=3) + + handle_error = PatchStyleErrorHandler(diff, + self.file_path, + options, + self._mock_increment_error_count, + self._mock_stderr_write) + + category = "whitespace/tab" + confidence = 5 + message = "message" + + # Confirm error is reportable. + self.assertTrue(options.is_reportable(category, confidence)) + + # Confirm error count initialized to zero. + self.assertEquals(0, self._error_count) + + # Test error in unmodified line (error count does not increment). + handle_error(1, category, confidence, message) + self.assertEquals(0, self._error_count) + + # Test error in modified line (error count increments). + handle_error(2, category, confidence, message) + self.assertEquals(1, self._error_count) + diff --git a/WebKitTools/Scripts/webkitpy/style/processors/__init__.py b/WebKitTools/Scripts/webkitpy/style/processors/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/processors/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/WebKitTools/Scripts/modules/cpp_style.py b/WebKitTools/Scripts/webkitpy/style/processors/cpp.py index d8ca8d1..e1f41a4 100644 --- a/WebKitTools/Scripts/modules/cpp_style.py +++ b/WebKitTools/Scripts/webkitpy/style/processors/cpp.py @@ -4,6 +4,7 @@ # Copyright (C) 2009 Google Inc. All rights reserved. # Copyright (C) 2009 Torch Mobile Inc. # Copyright (C) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -34,21 +35,9 @@ # This is the modified version of Google's cpplint. The original code is # http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py -"""Does WebKit-lint on c++ files. - -The goal of this script is to identify places in the code that *may* -be in non-compliance with WebKit style. It does not attempt to fix -up these problems -- the point is to educate. It does also not -attempt to find all problems, or to ensure that everything it does -find is legitimately a problem. - -In particular, we can get very confused by /* and // inside strings! -We do a small hack, which is to ignore //'s with "'s after them on the -same line, but it is far from perfect (in either direction). -""" +"""Support for check-webkit-style.""" import codecs -import getopt import math # for log import os import os.path @@ -59,122 +48,6 @@ import sys import unicodedata -_USAGE = """ -Syntax: %(program_name)s [--verbose=#] [--output=vs7] [--filter=-x,+y,...] - <file> [file] ... - - The style guidelines this tries to follow are those in - http://webkit.org/coding/coding-style.html - - Every problem is given a confidence score from 1-5, with 5 meaning we are - certain of the problem, and 1 meaning it could be a legitimate construct. - This will miss some errors, and is not a substitute for a code review. - - To prevent specific lines from being linted, add a '// NOLINT' comment to the - end of the line. - - The files passed in will be linted; at least one file must be provided. - Linted extensions are .cpp, .c and .h. Other file types will be ignored. - - Flags: - - output=vs7 - By default, the output is formatted to ease emacs parsing. Visual Studio - compatible output (vs7) may also be used. Other formats are unsupported. - - verbose=# - Specify a number 0-5 to restrict errors to certain verbosity levels. - - filter=-x,+y,... - Specify a comma-separated list of category-filters to apply: only - error messages whose category names pass the filters will be printed. - (Category names are printed with the message and look like - "[whitespace/indent]".) Filters are evaluated left to right. - "-FOO" and "FOO" means "do not print categories that start with FOO". - "+FOO" means "do print categories that start with FOO". - - Examples: --filter=-whitespace,+whitespace/braces - --filter=whitespace,runtime/printf,+runtime/printf_format - --filter=-,+build/include_what_you_use - - To see a list of all the categories used in %(program_name)s, pass no arg: - --filter= -""" % {'program_name': sys.argv[0]} - -# We categorize each error message we print. Here are the categories. -# We want an explicit list so we can list them all in cpp_style --filter=. -# If you add a new error message with a new category, add it to the list -# here! cpp_style_unittest.py should tell you if you forget to do this. -# \ used for clearer layout -- pylint: disable-msg=C6013 -_ERROR_CATEGORIES = '''\ - build/class - build/deprecated - build/endif_comment - build/forward_decl - build/header_guard - build/include - build/include_order - build/include_what_you_use - build/namespaces - build/printf_format - build/storage_class - build/using_std - legal/copyright - readability/braces - readability/casting - readability/check - readability/comparison_to_zero - readability/constructors - readability/control_flow - readability/fn_size - readability/function - readability/multiline_comment - readability/multiline_string - readability/naming - readability/null - readability/streams - readability/todo - readability/utf8 - runtime/arrays - runtime/casting - runtime/explicit - runtime/int - runtime/init - runtime/invalid_increment - runtime/max_min_macros - runtime/memset - runtime/printf - runtime/printf_format - runtime/references - runtime/rtti - runtime/sizeof - runtime/string - runtime/threadsafe_fn - runtime/virtual - whitespace/blank_line - whitespace/braces - whitespace/comma - whitespace/comments - whitespace/declaration - whitespace/end_of_line - whitespace/ending_newline - whitespace/indent - whitespace/labels - whitespace/line_length - whitespace/newline - whitespace/operators - whitespace/parens - whitespace/semicolon - whitespace/tab - whitespace/todo -''' - -# The default state of the category filter. This is overrided by the --filter= -# flag. By default all errors are on, so only add here categories that should be -# off by default (i.e., categories that must be enabled by the --filter= flags). -# All entries here should start with a '-' or '+', as in the --filter= flag. -_DEFAULT_FILTERS = [] - # Headers that we consider STL headers. _STL_HEADERS = frozenset([ 'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception', @@ -242,6 +115,7 @@ for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), _CONFIG_HEADER = 0 _PRIMARY_HEADER = 1 _OTHER_HEADER = 2 +_MOC_HEADER = 3 # The regexp compilation caching is inlined in all regexp functions for @@ -278,6 +152,31 @@ def subn(pattern, replacement, s): return _regexp_compile_cache[pattern].subn(replacement, s) +def up_to_unmatched_closing_paren(s): + """Splits a string into two parts up to first unmatched ')'. + + Args: + s: a string which is a substring of line after '(' + (e.g., "a == (b + c))"). + + Returns: + A pair of strings (prefix before first unmatched ')', + reminder of s after first unmatched ')'), e.g., + up_to_unmatched_closing_paren("a == (b + c)) { ") + returns "a == (b + c)", " {". + Returns None, None if there is no unmatched ')' + + """ + i = 1 + for pos, c in enumerate(s): + if c == '(': + i += 1 + elif c == ')': + i -= 1 + if i == 0: + return s[:pos], s[pos + 1:] + return None, None + class _IncludeState(dict): """Tracks line numbers for includes, and the order in which includes appear. @@ -300,6 +199,7 @@ class _IncludeState(dict): _CONFIG_HEADER: 'WebCore config.h', _PRIMARY_HEADER: 'header this file implements', _OTHER_HEADER: 'other header', + _MOC_HEADER: 'moc file', } _SECTION_NAMES = { _INITIAL_SECTION: "... nothing.", @@ -336,6 +236,8 @@ class _IncludeState(dict): return 'Header file should not contain WebCore config.h.' if header_type == _PRIMARY_HEADER and file_is_header: return 'Header file should not contain itself.' + if header_type == _MOC_HEADER: + return '' error_message = '' if self._section != self._OTHER_SECTION: @@ -366,117 +268,19 @@ class _IncludeState(dict): return error_message -class _CppStyleState(object): - """Maintains module-wide state..""" - - def __init__(self): - self.verbose_level = 1 # global setting. - self.error_count = 0 # global count of reported errors - # filters to apply when emitting error messages - self.filters = _DEFAULT_FILTERS[:] - - # output format: - # "emacs" - format that emacs can parse (default) - # "vs7" - format that Microsoft Visual Studio 7 can parse - self.output_format = 'emacs' - - def set_output_format(self, output_format): - """Sets the output format for errors.""" - self.output_format = output_format - - def set_verbose_level(self, level): - """Sets the module's verbosity, and returns the previous setting.""" - last_verbose_level = self.verbose_level - self.verbose_level = level - return last_verbose_level - - def set_filters(self, filters): - """Sets the error-message filters. - - These filters are applied when deciding whether to emit a given - error message. - - Args: - filters: A string of comma-separated filters (eg "+whitespace/indent"). - Each filter should start with + or -; else we die. - - Raises: - ValueError: The comma-separated filters did not all start with '+' or '-'. - E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" - """ - # Default filters always have less priority than the flag ones. - self.filters = _DEFAULT_FILTERS[:] - for filter in filters.split(','): - clean_filter = filter.strip() - if clean_filter: - self.filters.append(clean_filter) - for filter in self.filters: - if not (filter.startswith('+') or filter.startswith('-')): - raise ValueError('Every filter in --filter must start with ' - '+ or - (%s does not)' % filter) - - def reset_error_count(self): - """Sets the module's error statistic back to zero.""" - self.error_count = 0 - - def increment_error_count(self): - """Bumps the module's error statistic.""" - self.error_count += 1 - - -_cpp_style_state = _CppStyleState() - - -def _output_format(): - """Gets the module's output format.""" - return _cpp_style_state.output_format - - -def _set_output_format(output_format): - """Sets the module's output format.""" - _cpp_style_state.set_output_format(output_format) - - -def _verbose_level(): - """Returns the module's verbosity setting.""" - return _cpp_style_state.verbose_level - - -def _set_verbose_level(level): - """Sets the module's verbosity, and returns the previous setting.""" - return _cpp_style_state.set_verbose_level(level) - - -def _filters(): - """Returns the module's list of output filters, as a list.""" - return _cpp_style_state.filters - - -def _set_filters(filters): - """Sets the module's error-message filters. +class _FunctionState(object): + """Tracks current function name and the number of lines in its body. - These filters are applied when deciding whether to emit a given - error message. + Attributes: + verbosity: The verbosity level to use while checking style. - Args: - filters: A string of comma-separated filters (eg "whitespace/indent"). - Each filter should start with + or -; else we die. """ - _cpp_style_state.set_filters(filters) - - -def error_count(): - """Returns the global count of reported errors.""" - return _cpp_style_state.error_count - - -class _FunctionState(object): - """Tracks current function name and the number of lines in its body.""" _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. - def __init__(self): + def __init__(self, verbosity): + self.verbosity = verbosity self.in_a_function = False self.lines_in_function = 0 self.current_function = '' @@ -496,26 +300,25 @@ class _FunctionState(object): if self.in_a_function: self.lines_in_function += 1 - def check(self, error, filename, line_number): + def check(self, error, line_number): """Report if too many lines in function body. Args: error: The function to call with any errors found. - filename: The name of the current file. line_number: The number of the line to check. """ if match(r'T(EST|est)', self.current_function): base_trigger = self._TEST_TRIGGER else: base_trigger = self._NORMAL_TRIGGER - trigger = base_trigger * 2 ** _verbose_level() + trigger = base_trigger * 2 ** self.verbosity if self.lines_in_function > trigger: error_level = int(math.log(self.lines_in_function / base_trigger, 2)) # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... if error_level > 5: error_level = 5 - error(filename, line_number, 'readability/fn_size', error_level, + error(line_number, 'readability/fn_size', error_level, 'Small and focused functions are preferred:' ' %s has %d non-comment lines' ' (error triggered by exceeding %d lines).' % ( @@ -531,6 +334,16 @@ class _IncludeError(Exception): pass +def is_c_or_objective_c(file_extension): + """Return whether the file extension corresponds to C or Objective-C. + + Args: + file_extension: The file extension without the leading dot. + + """ + return file_extension in ['c', 'm'] + + class FileInfo: """Provides utility functions for filenames. @@ -617,59 +430,6 @@ class FileInfo: return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx') -def _should_print_error(category, confidence): - """Returns true iff confidence >= verbose, and category passes filter.""" - # There are two ways we might decide not to print an error message: - # the verbosity level isn't high enough, or the filters filter it out. - if confidence < _cpp_style_state.verbose_level: - return False - - is_filtered = False - for one_filter in _filters(): - if one_filter.startswith('-'): - if category.startswith(one_filter[1:]): - is_filtered = True - elif one_filter.startswith('+'): - if category.startswith(one_filter[1:]): - is_filtered = False - else: - assert False # should have been checked for in set_filter. - if is_filtered: - return False - - return True - - -def error(filename, line_number, category, confidence, message): - """Logs the fact we've found a lint error. - - We log where the error was found, and also our confidence in the error, - that is, how certain we are this is a legitimate style regression, and - not a misidentification or a use that's sometimes justified. - - Args: - filename: The name of the file containing the error. - line_number: The number of the line containing the error. - category: A string used to describe the "category" this bug - falls under: "whitespace", say, or "runtime". Categories - may have a hierarchy separated by slashes: "whitespace/indent". - confidence: A number from 1-5 representing a confidence score for - the error, with 5 meaning that we are certain of the problem, - and 1 meaning that it could be a legitimate construct. - message: The error message. - """ - # There are two ways we might decide not to print an error message: - # the verbosity level isn't high enough, or the filters filter it out. - if _should_print_error(category, confidence): - _cpp_style_state.increment_error_count() - if _cpp_style_state.output_format == 'vs7': - sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( - filename, line_number, message, category, confidence)) - else: - sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( - filename, line_number, message, category, confidence)) - - # Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard. _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') @@ -737,7 +497,7 @@ def remove_multi_line_comments_from_range(lines, begin, end): lines[i] = '// dummy' -def remove_multi_line_comments(filename, lines, error): +def remove_multi_line_comments(lines, error): """Removes multiline (c-style) comments from lines.""" line_index = 0 while line_index < len(lines): @@ -746,7 +506,7 @@ def remove_multi_line_comments(filename, lines, error): return line_index_end = find_next_multi_line_comment_end(lines, line_index_begin) if line_index_end >= len(lines): - error(filename, line_index_begin + 1, 'readability/multiline_comment', 5, + error(line_index_begin + 1, 'readability/multiline_comment', 5, 'Could not find end of multi-line comment') return remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1) @@ -856,7 +616,7 @@ def close_expression(clean_lines, line_number, pos): return (line, line_number, endpos + 1) -def check_for_copyright(filename, lines, error): +def check_for_copyright(lines, error): """Logs an error if no Copyright message appears at the top of the file.""" # We'll say it should occur by line 10. Don't forget there's a @@ -865,7 +625,7 @@ def check_for_copyright(filename, lines, error): if re.search(r'Copyright', lines[line], re.I): break else: # means no copyright line was found - error(filename, 0, 'legal/copyright', 5, + error(0, 'legal/copyright', 5, 'No copyright message found. ' 'You should have a line: "Copyright [year] <Copyright Owner>"') @@ -882,8 +642,7 @@ def get_header_guard_cpp_variable(filename): """ - fileinfo = FileInfo(filename) - return sub(r'[-./\s]', '_', fileinfo.repository_name()).upper() + '_' + return sub(r'[-.\s]', '_', os.path.basename(filename)) def check_for_header_guard(filename, lines, error): @@ -903,8 +662,6 @@ def check_for_header_guard(filename, lines, error): ifndef = None ifndef_line_number = 0 define = None - endif = None - endif_line_number = 0 for line_number, line in enumerate(lines): line_split = line.split() if len(line_split) >= 2: @@ -915,37 +672,22 @@ def check_for_header_guard(filename, lines, error): ifndef_line_number = line_number if not define and line_split[0] == '#define': define = line_split[1] - # find the last occurrence of #endif, save entire line - if line.startswith('#endif'): - endif = line - endif_line_number = line_number + if define and ifndef: + break if not ifndef or not define or ifndef != define: - error(filename, 0, 'build/header_guard', 5, + error(0, 'build/header_guard', 5, 'No #ifndef header guard found, suggested CPP variable is: %s' % cppvar) return - # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ - # for backward compatibility. + # The guard should be File_h. if ifndef != cppvar: - error_level = 0 - if ifndef != cppvar + '_': - error_level = 5 - - error(filename, ifndef_line_number, 'build/header_guard', error_level, + error(ifndef_line_number, 'build/header_guard', 5, '#ifndef header guard has wrong style, please use: %s' % cppvar) - if endif != ('#endif // %s' % cppvar): - error_level = 0 - if endif != ('#endif // %s' % (cppvar + '_')): - error_level = 5 - - error(filename, endif_line_number, 'build/header_guard', error_level, - '#endif line should be "#endif // %s"' % cppvar) - -def check_for_unicode_replacement_characters(filename, lines, error): +def check_for_unicode_replacement_characters(lines, error): """Logs an error for each line containing Unicode replacement characters. These indicate that either the file contained invalid UTF-8 (likely) @@ -954,21 +696,19 @@ def check_for_unicode_replacement_characters(filename, lines, error): UTF-8 occurred adjacent to a newline. Args: - filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ for line_number, line in enumerate(lines): if u'\ufffd' in line: - error(filename, line_number, 'readability/utf8', 5, + error(line_number, 'readability/utf8', 5, 'Line contains invalid UTF-8 (or Unicode replacement character).') -def check_for_new_line_at_eof(filename, lines, error): +def check_for_new_line_at_eof(lines, error): """Logs an error if there is no newline char at the end of the file. Args: - filename: The name of the current file. lines: An array of strings, each representing a line of the file. error: The function to call with any errors found. """ @@ -978,11 +718,11 @@ def check_for_new_line_at_eof(filename, lines, error): # To verify that the file ends in \n, we just have to make sure the # last-but-two element of lines() exists and is empty. if len(lines) < 3 or lines[-2]: - error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, + error(len(lines) - 2, 'whitespace/ending_newline', 5, 'Could not find a newline character at the end of the file.') -def check_for_multiline_comments_and_strings(filename, clean_lines, line_number, error): +def check_for_multiline_comments_and_strings(clean_lines, line_number, error): """Logs an error if we see /* ... */ or "..." that extend past one line. /* ... */ comments are legit inside macros, for one line. @@ -994,7 +734,6 @@ def check_for_multiline_comments_and_strings(filename, clean_lines, line_number, in this lint program, so we warn about both. Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. @@ -1006,7 +745,7 @@ def check_for_multiline_comments_and_strings(filename, clean_lines, line_number, line = line.replace('\\\\', '') if line.count('/*') > line.count('*/'): - error(filename, line_number, 'readability/multiline_comment', 5, + error(line_number, 'readability/multiline_comment', 5, 'Complex multi-line /*...*/-style comment found. ' 'Lint may give bogus warnings. ' 'Consider replacing these with //-style comments, ' @@ -1014,7 +753,7 @@ def check_for_multiline_comments_and_strings(filename, clean_lines, line_number, 'or with more clearly structured multi-line comments.') if (line.count('"') - line.count('\\"')) % 2: - error(filename, line_number, 'readability/multiline_string', 5, + error(line_number, 'readability/multiline_string', 5, 'Multi-line string ("...") found. This lint script doesn\'t ' 'do well with such strings, and may give bogus warnings. They\'re ' 'ugly and unnecessary, and you should use concatenation instead".') @@ -1037,7 +776,7 @@ _THREADING_LIST = ( ) -def check_posix_threading(filename, clean_lines, line_number, error): +def check_posix_threading(clean_lines, line_number, error): """Checks for calls to thread-unsafe functions. Much code has been originally written without consideration of @@ -1047,7 +786,6 @@ def check_posix_threading(filename, clean_lines, line_number, error): posix directly). Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. @@ -1058,7 +796,7 @@ def check_posix_threading(filename, clean_lines, line_number, error): # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 if index >= 0 and (index == 0 or (not line[index - 1].isalnum() and line[index - 1] not in ('_', '.', '>'))): - error(filename, line_number, 'runtime/threadsafe_fn', 2, + error(line_number, 'runtime/threadsafe_fn', 2, 'Consider using ' + multithread_safe_function + '...) instead of ' + single_thread_function + '...) for improved thread safety.') @@ -1070,7 +808,7 @@ _RE_PATTERN_INVALID_INCREMENT = re.compile( r'^\s*\*\w+(\+\+|--);') -def check_invalid_increment(filename, clean_lines, line_number, error): +def check_invalid_increment(clean_lines, line_number, error): """Checks for invalid increment *count++. For example following function: @@ -1081,14 +819,13 @@ def check_invalid_increment(filename, clean_lines, line_number, error): be replaced with ++*count, (*count)++ or *count += 1. Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. """ line = clean_lines.elided[line_number] if _RE_PATTERN_INVALID_INCREMENT.match(line): - error(filename, line_number, 'runtime/invalid_increment', 5, + error(line_number, 'runtime/invalid_increment', 5, 'Changing pointer instead of value (or unused value of operator*).') @@ -1117,19 +854,18 @@ class _ClassState(object): def __init__(self): self.classinfo_stack = [] - def check_finished(self, filename, error): + def check_finished(self, error): """Checks that all classes have been completely parsed. Call this when all lines in a file have been processed. Args: - filename: The name of the current file. error: The function to call with any errors found. """ if self.classinfo_stack: # Note: This test can result in false positives if #ifdef constructs # get in the way of brace matching. See the testBuildClass test in # cpp_style_unittest.py for an example of this. - error(filename, self.classinfo_stack[0].line_number, 'build/class', 5, + error(self.classinfo_stack[0].line_number, 'build/class', 5, 'Failed to find complete declaration of class %s' % self.classinfo_stack[0].name) @@ -1144,7 +880,7 @@ class _FileState(object): def did_inside_namespace_indent_warning(self): return self._did_inside_namespace_indent_warning -def check_for_non_standard_constructs(filename, clean_lines, line_number, +def check_for_non_standard_constructs(clean_lines, line_number, class_state, error): """Logs an error if we see certain non-ANSI constructs ignored by gcc-2. @@ -1165,31 +901,30 @@ def check_for_non_standard_constructs(filename, clean_lines, line_number, is very convenient to do so while checking for gcc-2 compliance. Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. class_state: A _ClassState instance which maintains information about the current stack of nested class declarations being parsed. - error: A callable to which errors are reported, which takes 4 arguments: - filename, line number, error level, and message + error: A callable to which errors are reported, which takes parameters: + line number, error level, and message """ # Remove comments from the line, but leave in strings for now. line = clean_lines.lines[line_number] if search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): - error(filename, line_number, 'runtime/printf_format', 3, + error(line_number, 'runtime/printf_format', 3, '%q in format strings is deprecated. Use %ll instead.') if search(r'printf\s*\(.*".*%\d+\$', line): - error(filename, line_number, 'runtime/printf_format', 2, + error(line_number, 'runtime/printf_format', 2, '%N$ formats are unconventional. Try rewriting to avoid them.') # Remove escaped backslashes before looking for undefined escapes. line = line.replace('\\\\', '') if search(r'("|\').*\\(%|\[|\(|{)', line): - error(filename, line_number, 'build/printf_format', 3, + error(line_number, 'build/printf_format', 3, '%, [, (, and { are undefined character escapes. Unescape them.') # For the rest, work with both comments and strings removed. @@ -1200,19 +935,19 @@ def check_for_non_standard_constructs(filename, clean_lines, line_number, r'|schar|u?int8|u?int16|u?int32|u?int64)' r'\s+(auto|register|static|extern|typedef)\b', line): - error(filename, line_number, 'build/storage_class', 5, + error(line_number, 'build/storage_class', 5, 'Storage class (static, extern, typedef, etc) should be first.') if match(r'\s*#\s*endif\s*[^/\s]+', line): - error(filename, line_number, 'build/endif_comment', 5, + error(line_number, 'build/endif_comment', 5, 'Uncommented text after #endif is non-standard. Use a comment.') if match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): - error(filename, line_number, 'build/forward_decl', 5, + error(line_number, 'build/forward_decl', 5, 'Inner-style forward declarations are invalid. Remove this line.') if search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): - error(filename, line_number, 'build/deprecated', 3, + error(line_number, 'build/deprecated', 3, '>? and <? (max and min) operators are non-standard and deprecated.') # Track class entry and exit, and attempt to find cases within the @@ -1262,7 +997,7 @@ def check_for_non_standard_constructs(filename, clean_lines, line_number, and args.group(1) != 'void' and not match(r'(const\s+)?%s\s*&' % re.escape(base_classname), args.group(1).strip())): - error(filename, line_number, 'runtime/explicit', 5, + error(line_number, 'runtime/explicit', 5, 'Single-argument constructors should be marked explicit.') # Look for methods declared virtual. @@ -1287,7 +1022,7 @@ def check_for_non_standard_constructs(filename, clean_lines, line_number, if ((classinfo.virtual_method_line_number is not None) and (not classinfo.has_virtual_destructor) and (not classinfo.is_derived)): # Only warn for base classes - error(filename, classinfo.line_number, 'runtime/virtual', 4, + error(classinfo.line_number, 'runtime/virtual', 4, 'The class %s probably needs a virtual destructor due to ' 'having virtual method(s), one declared at line %d.' % (classinfo.name, classinfo.virtual_method_line_number)) @@ -1295,11 +1030,10 @@ def check_for_non_standard_constructs(filename, clean_lines, line_number, classinfo.brace_depth = brace_depth -def check_spacing_for_function_call(filename, line, line_number, error): +def check_spacing_for_function_call(line, line_number, error): """Checks for the correctness of various spacing around function calls. Args: - filename: The name of the current file. line: The text of the line to check. line_number: The number of the line to check. error: The function to call with any errors found. @@ -1340,19 +1074,19 @@ def check_spacing_for_function_call(filename, line, line_number, error): # Ignore pointers/references to arrays. and not search(r' \([^)]+\)\[[^\]]+\]', function_call)): if search(r'\w\s*\([ \t](?!\s*\\$)', function_call): # a ( used for a fn call - error(filename, line_number, 'whitespace/parens', 4, + error(line_number, 'whitespace/parens', 4, 'Extra space after ( in function call') elif search(r'\([ \t]+(?!(\s*\\)|\()', function_call): - error(filename, line_number, 'whitespace/parens', 2, + error(line_number, 'whitespace/parens', 2, 'Extra space after (') if (search(r'\w\s+\(', function_call) and not search(r'#\s*define|typedef', function_call)): - error(filename, line_number, 'whitespace/parens', 4, + error(line_number, 'whitespace/parens', 4, 'Extra space before ( in function call') # If the ) is followed only by a newline or a { + newline, assume it's # part of a control statement (if/while/etc), and don't complain if search(r'[^)\s]\s+\)(?!\s*$|{\s*$)', function_call): - error(filename, line_number, 'whitespace/parens', 2, + error(line_number, 'whitespace/parens', 2, 'Extra space before )') @@ -1371,8 +1105,7 @@ def is_blank_line(line): return not line or line.isspace() -def check_for_function_lengths(filename, clean_lines, line_number, - function_state, error): +def check_for_function_lengths(clean_lines, line_number, function_state, error): """Reports for long function bodies. For an overview why this is done, see: @@ -1388,7 +1121,6 @@ def check_for_function_lengths(filename, clean_lines, line_number, NOLINT *on the last line of a function* disables this check. Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. function_state: Current function name and lines in body so far. @@ -1431,17 +1163,17 @@ def check_for_function_lengths(filename, clean_lines, line_number, break if not body_found: # No body for the function (or evidence of a non-function) was found. - error(filename, line_number, 'readability/fn_size', 5, + error(line_number, 'readability/fn_size', 5, 'Lint failed to find start of function body.') elif match(r'^\}\s*$', line): # function end if not search(r'\bNOLINT\b', raw_line): - function_state.check(error, filename, line_number) + function_state.check(error, line_number) function_state.end() elif not match(r'^\s*$', line): function_state.count() # Count non-blank/non-comment lines. -def check_spacing(filename, clean_lines, line_number, error): +def check_spacing(file_extension, clean_lines, line_number, error): """Checks for the correctness of various spacing issues in the code. Things we check for: spaces around operators, spaces after @@ -1451,7 +1183,7 @@ def check_spacing(filename, clean_lines, line_number, error): blank lines in a row. Args: - filename: The name of the current file. + file_extension: The current file extension, without the leading dot. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. @@ -1502,7 +1234,7 @@ def check_spacing(filename, clean_lines, line_number, error): or match(r' {4}:', previous_line)) if not exception: - error(filename, line_number, 'whitespace/blank_line', 2, + error(line_number, 'whitespace/blank_line', 2, 'Blank line at the start of a code block. Is this needed?') # This doesn't ignore whitespace at the end of a namespace block # because that is too hard without pairing open/close braces; @@ -1523,7 +1255,7 @@ def check_spacing(filename, clean_lines, line_number, error): and match(r'\s*}', next_line) and next_line.find('namespace') == -1 and next_line.find('} else ') == -1): - error(filename, line_number, 'whitespace/blank_line', 3, + error(line_number, 'whitespace/blank_line', 3, 'Blank line at the end of a code block. Is this needed?') # Next, we complain if there's a comment too near the text @@ -1532,14 +1264,14 @@ def check_spacing(filename, clean_lines, line_number, error): # Check if the // may be in quotes. If so, ignore it # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 if (line.count('"', 0, comment_position) - line.count('\\"', 0, comment_position)) % 2 == 0: # not in quotes - # Allow one space for new scopes, two spaces otherwise: - if (not match(r'^\s*{ //', line) - and ((comment_position >= 1 - and line[comment_position-1] not in string.whitespace) + # Allow one space before end of line comment. + if (not match(r'^\s*$', line[:comment_position]) + and (comment_position >= 1 + and ((line[comment_position - 1] not in string.whitespace) or (comment_position >= 2 - and line[comment_position-2] not in string.whitespace))): - error(filename, line_number, 'whitespace/comments', 2, - 'At least two spaces is best between code and comments') + and line[comment_position - 2] in string.whitespace)))): + error(line_number, 'whitespace/comments', 5, + 'One space before end of line comments') # There should always be a space between the // and the comment commentend = comment_position + 2 if commentend < len(line) and not line[commentend] == ' ': @@ -1551,7 +1283,7 @@ def check_spacing(filename, clean_lines, line_number, error): matched = (search(r'[=/-]{4,}\s*$', line[commentend:]) or search(r'^/+ ', line[commentend:])) if not matched: - error(filename, line_number, 'whitespace/comments', 4, + error(line_number, 'whitespace/comments', 4, 'Should have a space between // and comment') line = clean_lines.elided[line_number] # get rid of comments and strings @@ -1563,7 +1295,7 @@ def check_spacing(filename, clean_lines, line_number, error): if match(r'\s*#\s*(?:include|import)', line): return if search(r'[\w.]=[\w.]', line): - error(filename, line_number, 'whitespace/operators', 4, + error(line_number, 'whitespace/operators', 4, 'Missing spaces around =') # FIXME: It's not ok to have spaces around binary operators like . @@ -1581,19 +1313,19 @@ def check_spacing(filename, clean_lines, line_number, error): if not search(r'<[^<]*,\s*$', line): # template params spill matched = search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line) if matched: - error(filename, line_number, 'whitespace/operators', 3, + error(line_number, 'whitespace/operators', 3, 'Missing spaces around %s' % matched.group(1)) # There shouldn't be space around unary operators matched = search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) if matched: - error(filename, line_number, 'whitespace/operators', 4, + error(line_number, 'whitespace/operators', 4, 'Extra space for operator %s' % matched.group(1)) # A pet peeve of mine: no spaces after an if, while, switch, or for matched = search(r' (if\(|for\(|foreach\(|while\(|switch\()', line) if matched: - error(filename, line_number, 'whitespace/parens', 5, + error(line_number, 'whitespace/parens', 5, 'Missing space before ( in %s' % matched.group(1)) # For if/for/foreach/while/switch, the left and right parens should be @@ -1601,79 +1333,92 @@ def check_spacing(filename, clean_lines, line_number, error): # there should either be zero or one spaces inside the parens. # We don't want: "if ( foo)" or "if ( foo )". # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. - matched = search(r'\b(if|for|foreach|while|switch)\s*\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', - line) + matched = search(r'\b(?P<statement>if|for|foreach|while|switch)\s*\((?P<reminder>.*)$', line) if matched: - if len(matched.group(2)) != len(matched.group(4)): - if not (matched.group(3) == ';' - and len(matched.group(2)) == 1 + len(matched.group(4)) - or not matched.group(2) and search(r'\bfor\s*\(.*; \)', line)): - error(filename, line_number, 'whitespace/parens', 5, - 'Mismatching spaces inside () in %s' % matched.group(1)) - if not len(matched.group(2)) in [0, 1]: - error(filename, line_number, 'whitespace/parens', 5, - 'Should have zero or one spaces inside ( and ) in %s' % - matched.group(1)) + statement = matched.group('statement') + condition, rest = up_to_unmatched_closing_paren(matched.group('reminder')) + if condition is not None: + condition_match = search(r'(?P<leading>[ ]*)(?P<separator>.).*[^ ]+(?P<trailing>[ ]*)', condition) + if condition_match: + n_leading = len(condition_match.group('leading')) + n_trailing = len(condition_match.group('trailing')) + if n_leading != n_trailing: + for_exception = statement == 'for' and ( + (condition.startswith(' ;') and n_trailing == 0) or + (condition.endswith('; ') and n_leading == 0)) + if not for_exception: + error(line_number, 'whitespace/parens', 5, + 'Mismatching spaces inside () in %s' % statement) + if n_leading > 1: + error(line_number, 'whitespace/parens', 5, + 'Should have zero or one spaces inside ( and ) in %s' % + statement) + + # Do not check for more than one command in macros + in_macro = match(r'\s*#define', line) + if not in_macro and not match(r'((\s*{\s*}?)|(\s*;?))\s*\\?$', rest): + error(line_number, 'whitespace/parens', 4, + 'More than one command on the same line in %s' % statement) # You should always have a space after a comma (either as fn arg or operator) if search(r',[^\s]', line): - error(filename, line_number, 'whitespace/comma', 3, + error(line_number, 'whitespace/comma', 3, 'Missing space after ,') - if filename.endswith('.cpp'): + if file_extension == 'cpp': # C++ should have the & or * beside the type not the variable name. matched = match(r'\s*\w+(?<!\breturn)\s+(?P<pointer_operator>\*|\&)\w+', line) if matched: - error(filename, line_number, 'whitespace/declaration', 3, + error(line_number, 'whitespace/declaration', 3, 'Declaration has space between type name and %s in %s' % (matched.group('pointer_operator'), matched.group(0).strip())) - elif filename.endswith('.c'): + elif file_extension == 'c': # C Pointer declaration should have the * beside the variable not the type name. matched = search(r'^\s*\w+\*\s+\w+', line) if matched: - error(filename, line_number, 'whitespace/declaration', 3, + error(line_number, 'whitespace/declaration', 3, 'Declaration has space between * and variable name in %s' % matched.group(0).strip()) # Next we will look for issues with function calls. - check_spacing_for_function_call(filename, line, line_number, error) + check_spacing_for_function_call(line, line_number, error) # Except after an opening paren, you should have spaces before your braces. # And since you should never have braces at the beginning of a line, this is # an easy test. if search(r'[^ ({]{', line): - error(filename, line_number, 'whitespace/braces', 5, + error(line_number, 'whitespace/braces', 5, 'Missing space before {') # Make sure '} else {' has spaces. if search(r'}else', line): - error(filename, line_number, 'whitespace/braces', 5, + error(line_number, 'whitespace/braces', 5, 'Missing space before else') # You shouldn't have spaces before your brackets, except maybe after # 'delete []' or 'new char * []'. if search(r'\w\s+\[', line) and not search(r'delete\s+\[', line): - error(filename, line_number, 'whitespace/braces', 5, + error(line_number, 'whitespace/braces', 5, 'Extra space before [') # You shouldn't have a space before a semicolon at the end of the line. # There's a special case for "for" since the style guide allows space before # the semicolon there. if search(r':\s*;\s*$', line): - error(filename, line_number, 'whitespace/semicolon', 5, + error(line_number, 'whitespace/semicolon', 5, 'Semicolon defining empty statement. Use { } instead.') elif search(r'^\s*;\s*$', line): - error(filename, line_number, 'whitespace/semicolon', 5, + error(line_number, 'whitespace/semicolon', 5, 'Line contains only semicolon. If this should be an empty statement, ' 'use { } instead.') elif (search(r'\s+;\s*$', line) and not search(r'\bfor\b', line)): - error(filename, line_number, 'whitespace/semicolon', 5, + error(line_number, 'whitespace/semicolon', 5, 'Extra space before last semicolon. If this should be an empty ' 'statement, use { } instead.') elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line) and line.count('(') == line.count(')') # Allow do {} while(); and not search(r'}\s*while', line)): - error(filename, line_number, 'whitespace/semicolon', 5, + error(line_number, 'whitespace/semicolon', 5, 'Semicolon defining empty statement for this loop. Use { } instead.') @@ -1700,11 +1445,10 @@ def get_previous_non_blank_line(clean_lines, line_number): return ('', -1) -def check_namespace_indentation(filename, clean_lines, line_number, file_extension, file_state, error): +def check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error): """Looks for indentation errors inside of namespaces. Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. file_extension: The extension (dot not included) of the file. @@ -1723,7 +1467,7 @@ def check_namespace_indentation(filename, clean_lines, line_number, file_extensi if current_indentation_level > 0: # Don't warn about an indented namespace if we already warned about indented code. if not file_state.did_inside_namespace_indent_warning(): - error(filename, line_number, 'whitespace/indent', 4, + error(line_number, 'whitespace/indent', 4, 'namespace should never be indented.') return looking_for_semicolon = False; @@ -1737,7 +1481,7 @@ def check_namespace_indentation(filename, clean_lines, line_number, file_extensi if not (in_preprocessor_directive or looking_for_semicolon): if not match(r'\S', current_line) and not file_state.did_inside_namespace_indent_warning(): file_state.set_did_inside_namespace_indent_warning() - error(filename, line_number + line_offset, 'whitespace/indent', 4, + error(line_number + line_offset, 'whitespace/indent', 4, 'Code inside a namespace should not be indented.') if in_preprocessor_directive or (current_line.strip()[0] == '#'): # This takes care of preprocessor directive syntax. in_preprocessor_directive = current_line[-1] == '\\' @@ -1749,18 +1493,18 @@ def check_namespace_indentation(filename, clean_lines, line_number, file_extensi if current_indentation_level < 0: break; -def check_using_std(filename, clean_lines, line_number, error): +def check_using_std(file_extension, clean_lines, line_number, error): """Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'. Args: - filename: The name of the current file. + file_extension: The extension of the current file, without the leading dot. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. """ # This check doesn't apply to C or Objective-C implementation files. - if filename.endswith('.c') or filename.endswith('.m'): + if is_c_or_objective_c(file_extension): return line = clean_lines.elided[line_number] # Get rid of comments and strings. @@ -1770,22 +1514,22 @@ def check_using_std(filename, clean_lines, line_number, error): return method_name = using_std_match.group('method_name') - error(filename, line_number, 'build/using_std', 4, + error(line_number, 'build/using_std', 4, "Use 'using namespace std;' instead of 'using std::%s;'." % method_name) -def check_max_min_macros(filename, clean_lines, line_number, error): +def check_max_min_macros(file_extension, clean_lines, line_number, error): """Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min(). Args: - filename: The name of the current file. + file_extension: The extension of the current file, without the leading dot. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. """ # This check doesn't apply to C or Objective-C implementation files. - if filename.endswith('.c') or filename.endswith('.m'): + if is_c_or_objective_c(file_extension): return line = clean_lines.elided[line_number] # Get rid of comments and strings. @@ -1796,16 +1540,15 @@ def check_max_min_macros(filename, clean_lines, line_number, error): max_min_macro = max_min_macros_search.group('max_min_macro') max_min_macro_lower = max_min_macro.lower() - error(filename, line_number, 'runtime/max_min_macros', 4, + error(line_number, 'runtime/max_min_macros', 4, 'Use std::%s() or std::%s<type>() instead of the %s() macro.' % (max_min_macro_lower, max_min_macro_lower, max_min_macro)) -def check_switch_indentation(filename, clean_lines, line_number, error): +def check_switch_indentation(clean_lines, line_number, error): """Looks for indentation errors inside of switch statements. Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. @@ -1851,7 +1594,7 @@ def check_switch_indentation(filename, clean_lines, line_number, error): # on stuff like "Document::Foo();". elif match(r'(default|case\s+.*)\s*:([^:].*)?$', remaining_line): if current_indentation != switch_indentation: - error(filename, line_number + line_offset, 'whitespace/indent', 4, + error(line_number + line_offset, 'whitespace/indent', 4, 'A case label should not be indented, but line up with its switch statement.') # Don't throw an error for multiple badly indented labels, # one should be enough to figure out the problem. @@ -1862,7 +1605,7 @@ def check_switch_indentation(filename, clean_lines, line_number, error): # It's not a goto label, so check if it's indented at least as far as # the switch statement plus one more level of indentation. elif not current_indentation.startswith(inner_indentation): - error(filename, line_number + line_offset, 'whitespace/indent', 4, + error(line_number + line_offset, 'whitespace/indent', 4, 'Non-label code inside switch statements should be indented.') # Don't throw an error for multiple badly indented statements, # one should be enough to figure out the problem. @@ -1872,11 +1615,10 @@ def check_switch_indentation(filename, clean_lines, line_number, error): break -def check_braces(filename, clean_lines, line_number, error): +def check_braces(clean_lines, line_number, error): """Looks for misplaced braces (e.g. at the end of line). Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. @@ -1897,13 +1639,13 @@ def check_braces(filename, clean_lines, line_number, error): if ((not search(r'[;:}{)=]\s*$|\)\s*const\s*$', previous_line) or search(r'\b(if|for|foreach|while|switch|else)\b', previous_line)) and previous_line.find('#') < 0): - error(filename, line_number, 'whitespace/braces', 4, + error(line_number, 'whitespace/braces', 4, 'This { should be at the end of the previous line') elif (search(r'\)\s*(const\s*)?{\s*$', line) and line.count('(') == line.count(')') and not search(r'\b(if|for|foreach|while|switch)\b', line) and not match(r'\s+[A-Z_][A-Z_0-9]+\b', line)): - error(filename, line_number, 'whitespace/braces', 4, + error(line_number, 'whitespace/braces', 4, 'Place brace on its own line for function definitions.') if (match(r'\s*}\s*(else\s*({\s*)?)?$', line) and line_number > 1): @@ -1912,24 +1654,24 @@ def check_braces(filename, clean_lines, line_number, error): previous_line = clean_lines.elided[line_number - 2] if (previous_line.find('{') > 0 and search(r'\b(if|for|foreach|while|else)\b', previous_line)): - error(filename, line_number, 'whitespace/braces', 4, + error(line_number, 'whitespace/braces', 4, 'One line control clauses should not use braces.') # An else clause should be on the same line as the preceding closing brace. if match(r'\s*else\s*', line): previous_line = get_previous_non_blank_line(clean_lines, line_number)[0] if match(r'\s*}\s*$', previous_line): - error(filename, line_number, 'whitespace/newline', 4, + error(line_number, 'whitespace/newline', 4, 'An else should appear on the same line as the preceding }') # Likewise, an else should never have the else clause on the same line if search(r'\belse [^\s{]', line) and not search(r'\belse if\b', line): - error(filename, line_number, 'whitespace/newline', 4, + error(line_number, 'whitespace/newline', 4, 'Else clause should never be on same line as else (use 2 lines)') # In the same way, a do/while should never be on one line if match(r'\s*do [^\s{]', line): - error(filename, line_number, 'whitespace/newline', 4, + error(line_number, 'whitespace/newline', 4, 'do/while clauses should not be on a single line') # Braces shouldn't be followed by a ; unless they're defining a struct @@ -1945,17 +1687,16 @@ def check_braces(filename, clean_lines, line_number, error): if (search(r'{.*}\s*;', line) and line.count('{') == line.count('}') and not search(r'struct|class|enum|\s*=\s*{', line)): - error(filename, line_number, 'readability/braces', 4, + error(line_number, 'readability/braces', 4, "You don't need a ; after a }") -def check_exit_statement_simplifications(filename, clean_lines, line_number, error): +def check_exit_statement_simplifications(clean_lines, line_number, error): """Looks for else or else-if statements that should be written as an if statement when the prior if concludes with a return, break, continue or goto statement. Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. @@ -2028,11 +1769,11 @@ def check_exit_statement_simplifications(filename, clean_lines, line_number, err # Whatever the outcome, this is the end of our loop. if match(r'if\s*\(', remaining_line): if else_match.start('else') != -1: - error(filename, line_number + line_offset, 'readability/control_flow', 4, + error(line_number + line_offset, 'readability/control_flow', 4, 'An else statement can be removed when the prior "if" ' 'concludes with a return, break, continue or goto statement.') else: - error(filename, line_number + line_offset, 'readability/control_flow', 4, + error(line_number + line_offset, 'readability/control_flow', 4, 'An else if statement should be written as an if statement ' 'when the prior "if" concludes with a return, break, ' 'continue or goto statement.') @@ -2074,11 +1815,10 @@ def replaceable_check(operator, macro, line): return match(match_this, line) and not search(r'NULL|&&|\|\|', line) -def check_check(filename, clean_lines, line_number, error): +def check_check(clean_lines, line_number, error): """Checks the use of CHECK and EXPECT macros. Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. error: The function to call with any errors found. @@ -2100,31 +1840,36 @@ def check_check(filename, clean_lines, line_number, error): # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc. for operator in ['==', '!=', '>=', '>', '<=', '<']: if replaceable_check(operator, current_macro, line): - error(filename, line_number, 'readability/check', 2, + error(line_number, 'readability/check', 2, 'Consider using %s instead of %s(a %s b)' % ( _CHECK_REPLACEMENT[current_macro][operator], current_macro, operator)) break -def check_for_comparisons_to_zero(filename, clean_lines, line_number, error): +def check_for_comparisons_to_zero(clean_lines, line_number, error): # Get the line without comments and strings. line = clean_lines.elided[line_number] # Include NULL here so that users don't have to convert NULL to 0 first and then get this error. if search(r'[=!]=\s*(NULL|0|true|false)\W', line) or search(r'\W(NULL|0|true|false)\s*[=!]=', line): - error(filename, line_number, 'readability/comparison_to_zero', 5, + error(line_number, 'readability/comparison_to_zero', 5, 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.') -def check_for_null(filename, clean_lines, line_number, error): +def check_for_null(file_extension, clean_lines, line_number, error): # This check doesn't apply to C or Objective-C implementation files. - if filename.endswith('.c') or filename.endswith('.m'): + if is_c_or_objective_c(file_extension): return line = clean_lines.elided[line_number] + + # Don't warn about NULL usage in g_object_{get,set}(). See Bug 32858 + if search(r'\bg_object_[sg]et\b', line): + return + if search(r'\bNULL\b', line): - error(filename, line_number, 'readability/null', 5, 'Use 0 instead of NULL.') + error(line_number, 'readability/null', 5, 'Use 0 instead of NULL.') return line = clean_lines.raw_lines[line_number] @@ -2132,7 +1877,7 @@ def check_for_null(filename, clean_lines, line_number, error): # matches, then do the check with strings collapsed to avoid giving errors for # NULLs occurring in strings. if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)): - error(filename, line_number, 'readability/null', 4, 'Use 0 instead of NULL.') + error(line_number, 'readability/null', 4, 'Use 0 instead of NULL.') def get_line_width(line): """Determines the width of the line in column positions. @@ -2155,7 +1900,7 @@ def get_line_width(line): return len(line) -def check_style(filename, clean_lines, line_number, file_extension, file_state, error): +def check_style(clean_lines, line_number, file_extension, file_state, error): """Checks rules from the 'C++ style rules' section of cppguide.html. Most of these rules are hard to test (naming, comment style), but we @@ -2163,7 +1908,6 @@ def check_style(filename, clean_lines, line_number, file_extension, file_state, tab usage, spaces inside code, etc. Args: - filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. file_extension: The extension (without the dot) of the filename. @@ -2176,7 +1920,7 @@ def check_style(filename, clean_lines, line_number, file_extension, file_state, line = raw_lines[line_number] if line.find('\t') != -1: - error(filename, line_number, 'whitespace/tab', 1, + error(line_number, 'whitespace/tab', 1, 'Tab found; better to use spaces') # One or three blank spaces at the beginning of the line is weird; it's @@ -2196,12 +1940,12 @@ def check_style(filename, clean_lines, line_number, file_extension, file_state, while initial_spaces < len(line) and line[initial_spaces] == ' ': initial_spaces += 1 if line and line[-1].isspace(): - error(filename, line_number, 'whitespace/end_of_line', 4, + error(line_number, 'whitespace/end_of_line', 4, 'Line ends in whitespace. Consider deleting these extra spaces.') # There are certain situations we allow one space, notably for labels elif ((initial_spaces >= 1 and initial_spaces <= 3) and not match(r'\s*\w+\s*:\s*$', cleansed_line)): - error(filename, line_number, 'whitespace/indent', 3, + error(line_number, 'whitespace/indent', 3, 'Weird number of spaces at line-start. ' 'Are you using a 4-space indent?') # Labels should always be indented at least one space. @@ -2213,7 +1957,7 @@ def check_style(filename, clean_lines, line_number, file_extension, file_state, # Only throw errors for stuff that is definitely not a goto label, # because goto labels can in fact occur at the start of the line. if label in ['public', 'private', 'protected'] or label.find(' ') != -1: - error(filename, line_number, 'whitespace/labels', 4, + error(line_number, 'whitespace/labels', 4, 'Labels should always be indented at least one space. ' 'If this is a member-initializer list in a constructor, ' 'the colon should be on the line after the definition header.') @@ -2226,26 +1970,27 @@ def check_style(filename, clean_lines, line_number, file_extension, file_state, # It's ok to have many commands in a switch case that fits in 1 line and not ((cleansed_line.find('case ') != -1 or cleansed_line.find('default:') != -1) - and cleansed_line.find('break;') != -1)): - error(filename, line_number, 'whitespace/newline', 4, + and cleansed_line.find('break;') != -1) + and not cleansed_line.startswith('#define ')): + error(line_number, 'whitespace/newline', 4, 'More than one command on the same line') if cleansed_line.strip().endswith('||') or cleansed_line.strip().endswith('&&'): - error(filename, line_number, 'whitespace/operators', 4, + error(line_number, 'whitespace/operators', 4, 'Boolean expressions that span multiple lines should have their ' 'operators on the left side of the line instead of the right side.') # Some more style checks - check_namespace_indentation(filename, clean_lines, line_number, file_extension, file_state, error) - check_using_std(filename, clean_lines, line_number, error) - check_max_min_macros(filename, clean_lines, line_number, error) - check_switch_indentation(filename, clean_lines, line_number, error) - check_braces(filename, clean_lines, line_number, error) - check_exit_statement_simplifications(filename, clean_lines, line_number, error) - check_spacing(filename, clean_lines, line_number, error) - check_check(filename, clean_lines, line_number, error) - check_for_comparisons_to_zero(filename, clean_lines, line_number, error) - check_for_null(filename, clean_lines, line_number, error) + check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error) + check_using_std(file_extension, clean_lines, line_number, error) + check_max_min_macros(file_extension, clean_lines, line_number, error) + check_switch_indentation(clean_lines, line_number, error) + check_braces(clean_lines, line_number, error) + check_exit_statement_simplifications(clean_lines, line_number, error) + check_spacing(file_extension, clean_lines, line_number, error) + check_check(clean_lines, line_number, error) + check_for_comparisons_to_zero(clean_lines, line_number, error) + check_for_null(file_extension, clean_lines, line_number, error) _RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"') @@ -2336,6 +2081,13 @@ def _classify_include(filename, include, is_system, include_state): if filename.endswith('.h') and filename != include: return _OTHER_HEADER; + # Qt's moc files do not follow the naming and ordering rules, so they should be skipped + if include.startswith('moc_') and include.endswith('.cpp'): + return _MOC_HEADER + + if include.endswith('.moc'): + return _MOC_HEADER + # If the target file basename starts with the include we're checking # then we consider it the primary header. target_base = FileInfo(filename).base_name() @@ -2348,13 +2100,18 @@ def _classify_include(filename, include, is_system, include_state): # In case the two filename bases are the same then the above lenient check # probably was a false positive. elif include_state.visited_primary_section() and target_base == include_base: + if include == "ResourceHandleWin.h": + # FIXME: Thus far, we've only seen one example of these, but if we + # start to see more, please consider generalizing this check + # somehow. + return _OTHER_HEADER return _PRIMARY_HEADER return _OTHER_HEADER -def check_include_line(filename, clean_lines, line_number, include_state, error): +def check_include_line(filename, file_extension, clean_lines, line_number, include_state, error): """Check rules that are applicable to #include lines. Strings on #include lines are NOT removed from elided line, to make @@ -2363,12 +2120,20 @@ def check_include_line(filename, clean_lines, line_number, include_state, error) Args: filename: The name of the current file. + file_extension: The current file extension, without the leading dot. clean_lines: A CleansedLines instance containing the file. line_number: The number of the line to check. include_state: An _IncludeState instance in which the headers are inserted. error: The function to call with any errors found. """ + if (filename.find('WebKitTools/WebKitAPITest/') >= 0 + or filename.find('WebKit/qt/QGVLauncher/') >= 0): + # Files in this directory are consumers of the WebKit API and + # therefore do not follow the same header including discipline as + # WebCore. + return + line = clean_lines.lines[line_number] matched = _RE_PATTERN_INCLUDE.search(line) @@ -2382,17 +2147,17 @@ def check_include_line(filename, clean_lines, line_number, include_state, error) if match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): # Many unit tests use cout, so we exempt them. if not _is_test_filename(filename): - error(filename, line_number, 'readability/streams', 3, + error(line_number, 'readability/streams', 3, 'Streams are highly discouraged.') # Look for specific includes to fix. if include.startswith('wtf/') and not is_system: - error(filename, line_number, 'build/include', 4, + error(line_number, 'build/include', 4, 'wtf includes should be <wtf/file.h> instead of "wtf/file.h".') duplicate_header = include in include_state if duplicate_header: - error(filename, line_number, 'build/include', 4, + error(line_number, 'build/include', 4, '"%s" already included at %s:%s' % (include, filename, include_state[include])) else: @@ -2410,17 +2175,17 @@ def check_include_line(filename, clean_lines, line_number, include_state, error) # 2) for header files: alphabetically sorted # The include_state object keeps track of the last type seen # and complains if the header types are out of order or missing. - error_message = include_state.check_next_include_order(header_type, filename.endswith('.h')) + error_message = include_state.check_next_include_order(header_type, file_extension == "h") # Check to make sure we have a blank line after primary header. if not error_message and header_type == _PRIMARY_HEADER: next_line = clean_lines.raw_lines[line_number + 1] if not is_blank_line(next_line): - error(filename, line_number, 'build/include_order', 4, + error(line_number, 'build/include_order', 4, 'You should add a blank line after implementation file\'s own header.') # Check to make sure all headers besides config.h and the primary header are - # alphabetically sorted. + # alphabetically sorted. Skip Qt's moc files. if not error_message and header_type == _OTHER_HEADER: previous_line_number = line_number - 1; previous_line = clean_lines.lines[previous_line_number] @@ -2433,16 +2198,16 @@ def check_include_line(filename, clean_lines, line_number, include_state, error) if previous_match: previous_header_type = include_state.header_types[previous_line_number] if previous_header_type == _OTHER_HEADER and previous_line.strip() > line.strip(): - error(filename, line_number, 'build/include_order', 4, + error(line_number, 'build/include_order', 4, 'Alphabetical sorting problem.') if error_message: - if filename.endswith('.h'): - error(filename, line_number, 'build/include_order', 4, + if file_extension == 'h': + error(line_number, 'build/include_order', 4, '%s Should be: alphabetically sorted.' % error_message) else: - error(filename, line_number, 'build/include_order', 4, + error(line_number, 'build/include_order', 4, '%s Should be: config.h, primary header, blank line, and then alphabetically sorted.' % error_message) @@ -2470,7 +2235,7 @@ def check_language(filename, clean_lines, line_number, file_extension, include_s matched = _RE_PATTERN_INCLUDE.search(line) if matched: - check_include_line(filename, clean_lines, line_number, include_state, error) + check_include_line(filename, file_extension, clean_lines, line_number, include_state, error) return # FIXME: figure out if they're using default arguments in fn proto. @@ -2486,17 +2251,17 @@ def check_language(filename, clean_lines, line_number, file_extension, include_s # where type may be float(), int(string), etc. Without context they are # virtually indistinguishable from int(x) casts. if not match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line): - error(filename, line_number, 'readability/casting', 4, + error(line_number, 'readability/casting', 4, 'Using deprecated casting style. ' 'Use static_cast<%s>(...) instead' % matched.group(1)) - check_c_style_cast(filename, line_number, line, clean_lines.raw_lines[line_number], + check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number], 'static_cast', r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) # This doesn't catch all cases. Consider (const char * const)"hello". - check_c_style_cast(filename, line_number, line, clean_lines.raw_lines[line_number], + check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number], 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) # In addition, we look for people taking the address of a cast. This @@ -2504,7 +2269,7 @@ def check_language(filename, clean_lines, line_number, file_extension, include_s # point where you think. if search( r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line): - error(filename, line_number, 'runtime/casting', 4, + error(line_number, 'runtime/casting', 4, ('Are you taking an address of a cast? ' 'This is dangerous: could be a temp var. ' 'Take the address before doing the cast, rather than after')) @@ -2520,20 +2285,20 @@ def check_language(filename, clean_lines, line_number, file_extension, include_s # Class template definitions look like: "string Foo<Type>::Method(...". if matched and not match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', matched.group(3)): - error(filename, line_number, 'runtime/string', 4, + error(line_number, 'runtime/string', 4, 'For a static/global string constant, use a C style string instead: ' '"%schar %s[]".' % (matched.group(1), matched.group(2))) # Check that we're not using RTTI outside of testing code. if search(r'\bdynamic_cast<', line) and not _is_test_filename(filename): - error(filename, line_number, 'runtime/rtti', 5, + error(line_number, 'runtime/rtti', 5, 'Do not use dynamic_cast<>. If you need to cast within a class ' "hierarchy, use static_cast<> to upcast. Google doesn't support " 'RTTI.') if search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): - error(filename, line_number, 'runtime/init', 4, + error(line_number, 'runtime/init', 4, 'You seem to be initializing a member variable with itself.') if file_extension == 'h': @@ -2546,33 +2311,33 @@ def check_language(filename, clean_lines, line_number, file_extension, include_s # we regularly allow is "unsigned short port" for port. if search(r'\bshort port\b', line): if not search(r'\bunsigned short port\b', line): - error(filename, line_number, 'runtime/int', 4, + error(line_number, 'runtime/int', 4, 'Use "unsigned short" for ports, not "short"') # When snprintf is used, the second argument shouldn't be a literal. matched = search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) if matched: - error(filename, line_number, 'runtime/printf', 3, + error(line_number, 'runtime/printf', 3, 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 'to snprintf.' % (matched.group(1), matched.group(2))) # Check if some verboten C functions are being used. if search(r'\bsprintf\b', line): - error(filename, line_number, 'runtime/printf', 5, + error(line_number, 'runtime/printf', 5, 'Never use sprintf. Use snprintf instead.') matched = search(r'\b(strcpy|strcat)\b', line) if matched: - error(filename, line_number, 'runtime/printf', 4, + error(line_number, 'runtime/printf', 4, 'Almost always, snprintf is better than %s' % matched.group(1)) if search(r'\bsscanf\b', line): - error(filename, line_number, 'runtime/printf', 1, + error(line_number, 'runtime/printf', 1, 'sscanf can be ok, but is slow and can overflow buffers.') # Check for suspicious usage of "if" like # } if (a == b) { if search(r'\}\s*if\s*\(', line): - error(filename, line_number, 'readability/braces', 4, + error(line_number, 'readability/braces', 4, 'Did you mean "else if"? If not, start a new line for "if".') # Check for potential format string bugs like printf(foo). @@ -2580,14 +2345,14 @@ def check_language(filename, clean_lines, line_number, file_extension, include_s # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) matched = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I) if matched: - error(filename, line_number, 'runtime/printf', 4, + error(line_number, 'runtime/printf', 4, 'Potential format string bug. Do %s("%%s", %s) instead.' % (matched.group(1), matched.group(2))) # Check for potential memset bugs like memset(buf, sizeof(buf), 0). matched = search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) if matched and not match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", matched.group(2)): - error(filename, line_number, 'runtime/memset', 4, + error(line_number, 'runtime/memset', 4, 'Did you mean "memset(%s, 0, %s)"?' % (matched.group(1), matched.group(2))) @@ -2634,7 +2399,7 @@ def check_language(filename, clean_lines, line_number, file_extension, include_s is_const = False break if not is_const: - error(filename, line_number, 'runtime/arrays', 1, + error(line_number, 'runtime/arrays', 1, 'Do not use variable-length arrays. Use an appropriately named ' "('k' followed by CamelCase) compile-time constant for the size.") @@ -2644,7 +2409,7 @@ def check_language(filename, clean_lines, line_number, file_extension, include_s if (file_extension == 'h' and search(r'\bnamespace\s*{', line) and line[-1] != '\\'): - error(filename, line_number, 'build/namespaces', 4, + error(line_number, 'build/namespaces', 4, 'Do not use unnamed namespaces in header files. See ' 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' ' for more information.') @@ -2666,7 +2431,7 @@ def check_identifier_name_in_declaration(filename, line_number, line, error): error: The function to call with any errors found. """ # We don't check a return statement. - if match(r'\s*return\b', line): + if match(r'\s*(return|delete)\b', line): return # Basically, a declaration is a type name followed by whitespaces @@ -2737,8 +2502,13 @@ def check_identifier_name_in_declaration(filename, line_number, line, error): if modified_identifier.find('_') >= 0: # Various exceptions to the rule: JavaScript op codes functions, const_iterator. if (not (filename.find('JavaScriptCore') >= 0 and modified_identifier.find('_op_') >= 0) + and not filename.find('WebKit/gtk/webkit/') >= 0 + and not modified_identifier.startswith('tst_') + and not modified_identifier.startswith('webkit_dom_object_') + and not modified_identifier.startswith('qt_') + and not modified_identifier.find('::qt_') >= 0 and not modified_identifier == "const_iterator"): - error(filename, line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use underscores in your identifier names.") + error(line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use underscores in your identifier names.") # There can be only one declaration in non-for-control statements. if control_statement: @@ -2753,14 +2523,13 @@ def check_identifier_name_in_declaration(filename, line_number, line, error): line = line[matched.end():] -def check_c_style_cast(filename, line_number, line, raw_line, cast_type, pattern, +def check_c_style_cast(line_number, line, raw_line, cast_type, pattern, error): """Checks for a C-style cast by looking for the pattern. This also handles sizeof(type) warnings, due to similarity of content. Args: - filename: The name of the current file. line_number: The number of the line to check. line: The line of code to check. raw_line: The raw line of code to check, with comments. @@ -2776,7 +2545,7 @@ def check_c_style_cast(filename, line_number, line, raw_line, cast_type, pattern # e.g., sizeof(int) sizeof_match = match(r'.*sizeof\s*$', line[0:matched.start(1) - 1]) if sizeof_match: - error(filename, line_number, 'runtime/sizeof', 1, + error(line_number, 'runtime/sizeof', 1, 'Using sizeof(type). Use sizeof(varname) instead if possible') return @@ -2798,12 +2567,12 @@ def check_c_style_cast(filename, line_number, line, raw_line, cast_type, pattern if (not function_match.group(3) or function_match.group(3) == ';' or raw_line.find('/*') < 0): - error(filename, line_number, 'readability/function', 3, + error(line_number, 'readability/function', 3, 'All parameters should be named in a function') return # At this point, all that should be left is actual casts. - error(filename, line_number, 'readability/casting', 4, + error(line_number, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, matched.group(1))) @@ -3043,7 +2812,7 @@ def check_for_include_what_you_use(filename, clean_lines, include_state, error, if [True for header in headers if header in include_state]: continue if required_header_unstripped.strip('<>"') not in include_state: - error(filename, required[required_header_unstripped][0], + error(required[required_header_unstripped][0], 'build/include_what_you_use', 4, 'Add #include ' + required_header_unstripped + ' for ' + template) @@ -3065,25 +2834,24 @@ def process_line(filename, file_extension, the current stack of nested class declarations being parsed. file_state: A _FileState instance which maintains information about the state of things in the file. - error: A callable to which errors are reported, which takes 4 arguments: - filename, line number, error level, and message + error: A callable to which errors are reported, which takes arguments: + line number, error level, and message """ raw_lines = clean_lines.raw_lines - check_for_function_lengths(filename, clean_lines, line, function_state, error) + check_for_function_lengths(clean_lines, line, function_state, error) if search(r'\bNOLINT\b', raw_lines[line]): # ignore nolint lines return - check_for_multiline_comments_and_strings(filename, clean_lines, line, error) - check_style(filename, clean_lines, line, file_extension, file_state, error) + check_for_multiline_comments_and_strings(clean_lines, line, error) + check_style(clean_lines, line, file_extension, file_state, error) check_language(filename, clean_lines, line, file_extension, include_state, error) - check_for_non_standard_constructs(filename, clean_lines, line, - class_state, error) - check_posix_threading(filename, clean_lines, line, error) - check_invalid_increment(filename, clean_lines, line, error) + check_for_non_standard_constructs(clean_lines, line, class_state, error) + check_posix_threading(clean_lines, line, error) + check_invalid_increment(clean_lines, line, error) -def process_file_data(filename, file_extension, lines, error): +def _process_lines(filename, file_extension, lines, error, verbosity): """Performs lint checks and reports any errors to the given error function. Args: @@ -3097,221 +2865,143 @@ def process_file_data(filename, file_extension, lines, error): ['// marker so line numbers end in a known way']) include_state = _IncludeState() - function_state = _FunctionState() + function_state = _FunctionState(verbosity) class_state = _ClassState() file_state = _FileState() - check_for_copyright(filename, lines, error) + check_for_copyright(lines, error) if file_extension == 'h': check_for_header_guard(filename, lines, error) - remove_multi_line_comments(filename, lines, error) + remove_multi_line_comments(lines, error) clean_lines = CleansedLines(lines) for line in xrange(clean_lines.num_lines()): process_line(filename, file_extension, clean_lines, line, include_state, function_state, class_state, file_state, error) - class_state.check_finished(filename, error) + class_state.check_finished(error) check_for_include_what_you_use(filename, clean_lines, include_state, error) # We check here rather than inside process_line so that we see raw # lines rather than "cleaned" lines. - check_for_unicode_replacement_characters(filename, lines, error) + check_for_unicode_replacement_characters(lines, error) - check_for_new_line_at_eof(filename, lines, error) + check_for_new_line_at_eof(lines, error) -def process_file(filename, error=error): - """Performs cpp_style on a single file. +class CppProcessor(object): - Args: - filename: The name of the file to parse. - error: The function to call with any errors found. - """ - try: - # Support the UNIX convention of using "-" for stdin. Note that - # we are not opening the file with universal newline support - # (which codecs doesn't support anyway), so the resulting lines do - # contain trailing '\r' characters if we are reading a file that - # has CRLF endings. - # If after the split a trailing '\r' is present, it is removed - # below. If it is not expected to be present (i.e. os.linesep != - # '\r\n' as in Windows), a warning is issued below if this file - # is processed. - - if filename == '-': - lines = codecs.StreamReaderWriter(sys.stdin, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace').read().split('\n') - else: - lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') - - carriage_return_found = False - # Remove trailing '\r'. - for line_number in range(len(lines)): - if lines[line_number].endswith('\r'): - lines[line_number] = lines[line_number].rstrip('\r') - carriage_return_found = True + """Processes C++ lines for checking style.""" - except IOError: - sys.stderr.write( - "Skipping input '%s': Can't open for reading\n" % filename) - return - - # Note, if no dot is found, this will give the entire filename as the ext. - file_extension = filename[filename.rfind('.') + 1:] - - # When reading from stdin, the extension is unknown, so no cpp_style tests - # should rely on the extension. - if (filename != '-' and file_extension != 'h' and file_extension != 'cpp' - and file_extension != 'c'): - sys.stderr.write('Ignoring %s; not a .cpp, .c or .h file\n' % filename) - else: - process_file_data(filename, file_extension, lines, error) - if carriage_return_found and os.linesep != '\r\n': - # Use 0 for line_number since outputing only one error for potentially - # several lines. - error(filename, 0, 'whitespace/newline', 1, - 'One or more unexpected \\r (^M) found;' - 'better to use only a \\n') - - -def print_usage(message): - """Prints a brief usage string and exits, optionally with an error message. - - Args: - message: The optional error message. - """ - sys.stderr.write(_USAGE) - if message: - sys.exit('\nFATAL ERROR: ' + message) - else: - sys.exit(1) - - -def print_categories(): - """Prints a list of all the error-categories used by error messages. - - These are the categories used to filter messages via --filter. - """ - sys.stderr.write(_ERROR_CATEGORIES) - sys.exit(0) - - -def parse_arguments(args, additional_flags=[]): - """Parses the command line arguments. - - This may set the output format and verbosity level as side-effects. - - Args: - args: The command line arguments: - additional_flags: A list of strings which specifies flags we allow. - - Returns: - A tuple of (filenames, flags) - - filenames: The list of filenames to lint. - flags: The dict of the flag names and the flag values. - """ - flags = ['help', 'output=', 'verbose=', 'filter='] + additional_flags - additional_flag_values = {} - try: - (opts, filenames) = getopt.getopt(args, '', flags) - except getopt.GetoptError: - print_usage('Invalid arguments.') - - verbosity = _verbose_level() - output_format = _output_format() - filters = '' - - for (opt, val) in opts: - if opt == '--help': - print_usage(None) - elif opt == '--output': - if not val in ('emacs', 'vs7'): - print_usage('The only allowed output formats are emacs and vs7.') - output_format = val - elif opt == '--verbose': - verbosity = int(val) - elif opt == '--filter': - filters = val - if not filters: - print_categories() - else: - additional_flag_values[opt] = val - - _set_output_format(output_format) - _set_verbose_level(verbosity) - _set_filters(filters) - - return (filenames, additional_flag_values) - - -def use_webkit_styles(): - """Disables some features which are not suitable for WebKit.""" - # FIXME: For filters we will never want to have, remove them. - # For filters we want to have similar functionalities, - # modify the implementation and enable them. - global _DEFAULT_FILTERS - _DEFAULT_FILTERS = [ - '-whitespace/end_of_line', - '-whitespace/comments', - '-whitespace/blank_line', - '-runtime/explicit', # explicit - '-runtime/virtual', # virtual dtor - '-runtime/printf', - '-runtime/threadsafe_fn', - '-runtime/rtti', - '-build/include_what_you_use', # <string> for std::string - '-legal/copyright', - '-readability/multiline_comment', - '-readability/braces', # int foo() {}; - '-readability/fn_size', - '-build/storage_class', # const static - '-build/endif_comment', - '-whitespace/labels', - '-runtime/arrays', # variable length array - '-build/header_guard', - '-readability/casting', - '-readability/function', - '-runtime/casting', - '-runtime/sizeof', - ] - - -def main(): - sys.stderr.write( - '''********************* WARNING WARNING WARNING ********************* - -This tool is in the process of development and may give inaccurate -results at present. Please file bugs (and/or patches) for things -that you notice that it flags incorrectly. + # This list is used to-- + # + # (1) generate an explicit list of all possible categories, + # (2) unit test that all checked categories have valid names, and + # (3) unit test that all categories are getting unit tested. + # + categories = set([ + 'build/class', + 'build/deprecated', + 'build/endif_comment', + 'build/forward_decl', + 'build/header_guard', + 'build/include', + 'build/include_order', + 'build/include_what_you_use', + 'build/namespaces', + 'build/printf_format', + 'build/storage_class', + 'build/using_std', + 'legal/copyright', + 'readability/braces', + 'readability/casting', + 'readability/check', + 'readability/comparison_to_zero', + 'readability/constructors', + 'readability/control_flow', + 'readability/fn_size', + 'readability/function', + 'readability/multiline_comment', + 'readability/multiline_string', + 'readability/naming', + 'readability/null', + 'readability/streams', + 'readability/todo', + 'readability/utf8', + 'runtime/arrays', + 'runtime/casting', + 'runtime/explicit', + 'runtime/init', + 'runtime/int', + 'runtime/invalid_increment', + 'runtime/max_min_macros', + 'runtime/memset', + 'runtime/printf', + 'runtime/printf_format', + 'runtime/references', + 'runtime/rtti', + 'runtime/sizeof', + 'runtime/string', + 'runtime/threadsafe_fn', + 'runtime/virtual', + 'whitespace/blank_line', + 'whitespace/braces', + 'whitespace/comma', + 'whitespace/comments', + 'whitespace/declaration', + 'whitespace/end_of_line', + 'whitespace/ending_newline', + 'whitespace/indent', + 'whitespace/labels', + 'whitespace/line_length', + 'whitespace/newline', + 'whitespace/operators', + 'whitespace/parens', + 'whitespace/semicolon', + 'whitespace/tab', + 'whitespace/todo', + ]) + + def __init__(self, file_path, file_extension, handle_style_error, verbosity): + """Create a CppProcessor instance. -********************* WARNING WARNING WARNING ********************* + Args: + file_extension: A string that is the file extension, without + the leading dot. -''') + """ + self.file_extension = file_extension + self.file_path = file_path + self.handle_style_error = handle_style_error + self.verbosity = verbosity + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this CppProcessor instance is equal to another.""" + if self.file_extension != other.file_extension: + return False + if self.file_path != other.file_path: + return False + if self.handle_style_error != other.handle_style_error: + return False + if self.verbosity != other.verbosity: + return False - use_webkit_styles() + return True - (filenames, flags) = parse_arguments(sys.argv[1:]) - if not filenames: - print_usage('No files were specified.') + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce __ne__() from __eq__(). + return not self.__eq__(other) - # Change stderr to write with replacement characters so we don't die - # if we try to print something containing non-ASCII characters. - sys.stderr = codecs.StreamReaderWriter(sys.stderr, - codecs.getreader('utf8'), - codecs.getwriter('utf8'), - 'replace') + def process(self, lines): + _process_lines(self.file_path, self.file_extension, lines, + self.handle_style_error, self.verbosity) - _cpp_style_state.reset_error_count() - for filename in filenames: - process_file(filename) - sys.stderr.write('Total errors found: %d\n' % _cpp_style_state.error_count) - sys.exit(_cpp_style_state.error_count > 0) +# FIXME: Remove this function (requires refactoring unit tests). +def process_file_data(filename, file_extension, lines, error, verbosity): + processor = CppProcessor(filename, file_extension, error, verbosity) + processor.process(lines) -if __name__ == '__main__': - main() diff --git a/WebKitTools/Scripts/modules/cpp_style_unittest.py b/WebKitTools/Scripts/webkitpy/style/processors/cpp_unittest.py index 75dd47e..e556cd3 100644 --- a/WebKitTools/Scripts/modules/cpp_style_unittest.py +++ b/WebKitTools/Scripts/webkitpy/style/processors/cpp_unittest.py @@ -4,6 +4,7 @@ # Copyright (C) 2009 Google Inc. All rights reserved. # Copyright (C) 2009 Torch Mobile Inc. # Copyright (C) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -40,31 +41,28 @@ import os import random import re import unittest -import cpp_style - +import cpp as cpp_style +from cpp import CppProcessor # This class works as an error collector and replaces cpp_style.Error # function for the unit tests. We also verify each category we see -# is in cpp_style._ERROR_CATEGORIES, to help keep that list up to date. +# is in STYLE_CATEGORIES, to help keep that list up to date. class ErrorCollector: - # These are a global list, covering all categories seen ever. - _ERROR_CATEGORIES = [x.strip() # get rid of leading whitespace - for x in cpp_style._ERROR_CATEGORIES.split()] - _SEEN_ERROR_CATEGORIES = {} + _all_style_categories = CppProcessor.categories + # This is a list including all categories seen in any unit test. + _seen_style_categories = {} def __init__(self, assert_fn): """assert_fn: a function to call when we notice a problem.""" self._assert_fn = assert_fn self._errors = [] - def __call__(self, unused_filename, unused_linenum, - category, confidence, message): - self._assert_fn(category in self._ERROR_CATEGORIES, + def __call__(self, unused_linenum, category, confidence, message): + self._assert_fn(category in self._all_style_categories, 'Message "%s" has category "%s",' - ' which is not in _ERROR_CATEGORIES' % (message, category)) - self._SEEN_ERROR_CATEGORIES[category] = 1 - if cpp_style._should_print_error(category, confidence): - self._errors.append('%s [%s] [%d]' % (message, category, confidence)) + ' which is not in STYLE_CATEGORIES' % (message, category)) + self._seen_style_categories[category] = 1 + self._errors.append('%s [%s] [%d]' % (message, category, confidence)) def results(self): if len(self._errors) < 2: @@ -76,16 +74,16 @@ class ErrorCollector: return self._errors def verify_all_categories_are_seen(self): - """Fails if there's a category in _ERROR_CATEGORIES - _SEEN_ERROR_CATEGORIES. + """Fails if there's a category in _all_style_categories - _seen_style_categories. This should only be called after all tests are run, so - _SEEN_ERROR_CATEGORIES has had a chance to fully populate. Since + _seen_style_categories has had a chance to fully populate. Since this isn't called from within the normal unittest framework, we can't use the normal unittest assert macros. Instead we just exit when we see an error. Good thing this test is always run last! """ - for category in self._ERROR_CATEGORIES: - if category not in self._SEEN_ERROR_CATEGORIES: + for category in self._all_style_categories: + if category not in self._seen_style_categories: import sys sys.exit('FATAL ERROR: There are no tests for category "%s"' % category) @@ -107,17 +105,43 @@ class MockIo: return self.mock_file +class CppFunctionsTest(unittest.TestCase): + + """Supports testing functions that do not need CppStyleTestBase.""" + + def test_is_c_or_objective_c(self): + self.assertTrue(cpp_style.is_c_or_objective_c("c")) + self.assertTrue(cpp_style.is_c_or_objective_c("m")) + self.assertFalse(cpp_style.is_c_or_objective_c("cpp")) + + class CppStyleTestBase(unittest.TestCase): - """Provides some useful helper functions for cpp_style tests.""" + """Provides some useful helper functions for cpp_style tests. + + Attributes: + verbosity: An integer that is the current verbosity level for + the tests. + + """ + + # FIXME: Refactor the unit tests so the verbosity level is passed + # explicitly, just like it is in the real code. + verbosity = 1; + + # Helper function to avoid needing to explicitly pass verbosity + # in all the unit test calls to cpp_style.process_file_data(). + def process_file_data(self, filename, file_extension, lines, error): + """Call cpp_style.process_file_data() with the current verbosity.""" + return cpp_style.process_file_data(filename, file_extension, lines, error, self.verbosity) # Perform lint on single line of input and return the error message. def perform_single_line_lint(self, code, file_name): error_collector = ErrorCollector(self.assert_) lines = code.split('\n') - cpp_style.remove_multi_line_comments(file_name, lines, error_collector) + cpp_style.remove_multi_line_comments(lines, error_collector) clean_lines = cpp_style.CleansedLines(lines) include_state = cpp_style._IncludeState() - function_state = cpp_style._FunctionState() + function_state = cpp_style._FunctionState(self.verbosity) ext = file_name[file_name.rfind('.') + 1:] class_state = cpp_style._ClassState() file_state = cpp_style._FileState() @@ -131,19 +155,18 @@ class CppStyleTestBase(unittest.TestCase): return error_collector.results() # Perform lint over multiple lines and return the error message. - def perform_multi_line_lint(self, code, file_name): + def perform_multi_line_lint(self, code, file_extension): error_collector = ErrorCollector(self.assert_) lines = code.split('\n') - cpp_style.remove_multi_line_comments(file_name, lines, error_collector) + cpp_style.remove_multi_line_comments(lines, error_collector) lines = cpp_style.CleansedLines(lines) - ext = file_name[file_name.rfind('.') + 1:] class_state = cpp_style._ClassState() file_state = cpp_style._FileState() for i in xrange(lines.num_lines()): - cpp_style.check_style(file_name, lines, i, ext, file_state, error_collector) - cpp_style.check_for_non_standard_constructs(file_name, lines, i, class_state, + cpp_style.check_style(lines, i, file_extension, file_state, error_collector) + cpp_style.check_for_non_standard_constructs(lines, i, class_state, error_collector) - class_state.check_finished(file_name, error_collector) + class_state.check_finished(error_collector) return error_collector.results() # Similar to perform_multi_line_lint, but calls check_language instead of @@ -152,7 +175,7 @@ class CppStyleTestBase(unittest.TestCase): error_collector = ErrorCollector(self.assert_) include_state = cpp_style._IncludeState() lines = code.split('\n') - cpp_style.remove_multi_line_comments(file_name, lines, error_collector) + cpp_style.remove_multi_line_comments(lines, error_collector) lines = cpp_style.CleansedLines(lines) ext = file_name[file_name.rfind('.') + 1:] for i in xrange(lines.num_lines()): @@ -175,14 +198,13 @@ class CppStyleTestBase(unittest.TestCase): Returns: The accumulated errors. """ - file_name = 'foo.cpp' error_collector = ErrorCollector(self.assert_) - function_state = cpp_style._FunctionState() + function_state = cpp_style._FunctionState(self.verbosity) lines = code.split('\n') - cpp_style.remove_multi_line_comments(file_name, lines, error_collector) + cpp_style.remove_multi_line_comments(lines, error_collector) lines = cpp_style.CleansedLines(lines) for i in xrange(lines.num_lines()): - cpp_style.check_for_function_lengths(file_name, lines, i, + cpp_style.check_for_function_lengths(lines, i, function_state, error_collector) return error_collector.results() @@ -191,10 +213,11 @@ class CppStyleTestBase(unittest.TestCase): error_collector = ErrorCollector(self.assert_) include_state = cpp_style._IncludeState() lines = code.split('\n') - cpp_style.remove_multi_line_comments(filename, lines, error_collector) + cpp_style.remove_multi_line_comments(lines, error_collector) lines = cpp_style.CleansedLines(lines) + file_extension = filename[filename.rfind('.') + 1:] for i in xrange(lines.num_lines()): - cpp_style.check_language(filename, lines, i, '.h', include_state, + cpp_style.check_language(filename, lines, i, file_extension, include_state, error_collector) # We could clear the error_collector here, but this should # also be fine, since our IncludeWhatYouUse unittests do not @@ -218,10 +241,12 @@ class CppStyleTestBase(unittest.TestCase): self.assertEquals(expected_message, messages) def assert_multi_line_lint(self, code, expected_message, file_name='foo.h'): - self.assertEquals(expected_message, self.perform_multi_line_lint(code, file_name)) + file_extension = file_name[file_name.rfind('.') + 1:] + self.assertEquals(expected_message, self.perform_multi_line_lint(code, file_extension)) def assert_multi_line_lint_re(self, code, expected_message_re, file_name='foo.h'): - message = self.perform_multi_line_lint(code, file_name) + file_extension = file_name[file_name.rfind('.') + 1:] + message = self.perform_multi_line_lint(code, file_extension) if not re.search(expected_message_re, message): self.fail('Message was:\n' + message + 'Expected match to "' + expected_message_re + '"') @@ -235,7 +260,7 @@ class CppStyleTestBase(unittest.TestCase): def assert_blank_lines_check(self, lines, start_errors, end_errors): error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data('foo.cpp', 'cpp', lines, error_collector) + self.process_file_data('foo.cpp', 'cpp', lines, error_collector) self.assertEquals( start_errors, error_collector.results().count( @@ -400,13 +425,13 @@ class CppStyleTest(CppStyleTestBase): ' [readability/casting] [4]') # Checks for false positives... self.assert_lint( - 'int a = int(); // Constructor, o.k.', + 'int a = int(); // Constructor, o.k.', '') self.assert_lint( - 'X::X() : a(int()) {} // default Constructor, o.k.', + 'X::X() : a(int()) {} // default Constructor, o.k.', '') self.assert_lint( - 'operator bool(); // Conversion operator, o.k.', + 'operator bool(); // Conversion operator, o.k.', '') # The second parameter to a gMock method definition is a function signature @@ -693,10 +718,10 @@ class CppStyleTest(CppStyleTestBase): file_path = 'mydir/foo.cpp' error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'cpp', - ['const char* str = "This is a\\', - ' multiline string.";'], - error_collector) + self.process_file_data(file_path, 'cpp', + ['const char* str = "This is a\\', + ' multiline string.";'], + error_collector) self.assertEquals( 2, # One per line. error_collector.result_list().count(multiline_string_error_message)) @@ -721,7 +746,7 @@ class CppStyleTest(CppStyleTestBase): # missing explicit, with distracting comment, is still bad self.assert_multi_line_lint( '''class Foo { - Foo(int f); // simpler than Foo(blargh, blarg) + Foo(int f); // simpler than Foo(blargh, blarg) };''', 'Single-argument constructors should be marked explicit.' ' [runtime/explicit] [5]') @@ -1089,7 +1114,7 @@ class CppStyleTest(CppStyleTestBase): ' [readability/check] [2]') self.assert_lint( - ' EXPECT_TRUE(42 < x) // Random comment.', + ' EXPECT_TRUE(42 < x) // Random comment.', 'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)' ' [readability/check] [2]') self.assert_lint( @@ -1148,10 +1173,16 @@ class CppStyleTest(CppStyleTestBase): ' [whitespace/parens] [5]') self.assert_lint('for (foo; ba; bar ) {', 'Mismatching spaces inside () in for' ' [whitespace/parens] [5]') + self.assert_lint('for ((foo); (ba); (bar) ) {', 'Mismatching spaces inside () in for' + ' [whitespace/parens] [5]') self.assert_lint('for (; foo; bar) {', '') + self.assert_lint('for (; (foo); (bar)) {', '') self.assert_lint('for ( ; foo; bar) {', '') + self.assert_lint('for ( ; (foo); (bar)) {', '') self.assert_lint('for ( ; foo; bar ) {', '') + self.assert_lint('for ( ; (foo); (bar) ) {', '') self.assert_lint('for (foo; bar; ) {', '') + self.assert_lint('for ((foo); (bar); ) {', '') self.assert_lint('foreach (foo, foos ) {', 'Mismatching spaces inside () in foreach' ' [whitespace/parens] [5]') self.assert_lint('foreach ( foo, foos) {', 'Mismatching spaces inside () in foreach' @@ -1249,30 +1280,31 @@ class CppStyleTest(CppStyleTestBase): ' [whitespace/operators] [3]') self.assert_lint('a<Foo*> t <<= *b/c;', 'Missing spaces around /' ' [whitespace/operators] [3]') - self.assert_lint('a<Foo*> t <<= b/c; //Test', ['At least two spaces' - ' is best between code and comments [whitespace/' - 'comments] [2]', 'Should have a space between // ' - 'and comment [whitespace/comments] [4]', 'Missing' + self.assert_lint('a<Foo*> t <<= b/c; //Test', [ + 'Should have a space between // and comment ' + '[whitespace/comments] [4]', 'Missing' ' spaces around / [whitespace/operators] [3]']) - self.assert_lint('a<Foo*> t <<= b||c; //Test', ['Should have a space' - ' between // and comment [whitespace/comments] [4]', + self.assert_lint('a<Foo*> t <<= b||c; //Test', ['One space before end' + ' of line comments [whitespace/comments] [5]', + 'Should have a space between // and comment ' + '[whitespace/comments] [4]', 'Missing spaces around || [whitespace/operators] [3]']) - self.assert_lint('a<Foo*> t <<= b&&c; // Test', 'Missing spaces around' + self.assert_lint('a<Foo*> t <<= b&&c; // Test', 'Missing spaces around' ' && [whitespace/operators] [3]') - self.assert_lint('a<Foo*> t <<= b&&&c; // Test', 'Missing spaces around' + self.assert_lint('a<Foo*> t <<= b&&&c; // Test', 'Missing spaces around' ' && [whitespace/operators] [3]') - self.assert_lint('a<Foo*> t <<= b&&*c; // Test', 'Missing spaces around' + self.assert_lint('a<Foo*> t <<= b&&*c; // Test', 'Missing spaces around' ' && [whitespace/operators] [3]') - self.assert_lint('a<Foo*> t <<= b && *c; // Test', '') - self.assert_lint('a<Foo*> t <<= b && &c; // Test', '') + self.assert_lint('a<Foo*> t <<= b && *c; // Test', '') + self.assert_lint('a<Foo*> t <<= b && &c; // Test', '') self.assert_lint('a<Foo*> t <<= b || &c; /*Test', 'Complex multi-line ' '/*...*/-style comment found. Lint may give bogus ' 'warnings. Consider replacing these with //-style' ' comments, with #if 0...#endif, or with more clearly' ' structured multi-line comments. [readability/multiline_comment] [5]') self.assert_lint('a<Foo&> t <<= &b | &c;', '') - self.assert_lint('a<Foo*> t <<= &b & &c; // Test', '') - self.assert_lint('a<Foo*> t <<= *b / &c; // Test', '') + self.assert_lint('a<Foo*> t <<= &b & &c; // Test', '') + self.assert_lint('a<Foo*> t <<= *b / &c; // Test', '') self.assert_lint('if (a=b == 1)', 'Missing spaces around = [whitespace/operators] [4]') self.assert_lint('a = 1<<20', 'Missing spaces around << [whitespace/operators] [3]') self.assert_lint('if (a = b == 1)', '') @@ -1306,7 +1338,7 @@ class CppStyleTest(CppStyleTestBase): 'For a static/global string constant, use a C style ' 'string instead: "char foo[]".' ' [runtime/string] [4]') - self.assert_lint('string kFoo = "hello"; // English', + self.assert_lint('string kFoo = "hello"; // English', 'For a static/global string constant, use a C style ' 'string instead: "char kFoo[]".' ' [runtime/string] [4]') @@ -1365,25 +1397,27 @@ class CppStyleTest(CppStyleTestBase): def test_two_spaces_between_code_and_comments(self): self.assert_lint('} // namespace foo', - 'At least two spaces is best between code and comments' - ' [whitespace/comments] [2]') + '') self.assert_lint('}// namespace foo', - 'At least two spaces is best between code and comments' - ' [whitespace/comments] [2]') + 'One space before end of line comments' + ' [whitespace/comments] [5]') self.assert_lint('printf("foo"); // Outside quotes.', - 'At least two spaces is best between code and comments' - ' [whitespace/comments] [2]') - self.assert_lint('int i = 0; // Having two spaces is fine.', '') - self.assert_lint('int i = 0; // Having three spaces is OK.', '') + '') + self.assert_lint('int i = 0; // Having one space is fine.','') + self.assert_lint('int i = 0; // Having two spaces is bad.', + 'One space before end of line comments' + ' [whitespace/comments] [5]') + self.assert_lint('int i = 0; // Having three spaces is bad.', + 'One space before end of line comments' + ' [whitespace/comments] [5]') self.assert_lint('// Top level comment', '') self.assert_lint(' // Line starts with four spaces.', '') self.assert_lint('foo();\n' '{ // A scope is opening.', '') self.assert_lint(' foo();\n' ' { // An indented scope is opening.', '') - self.assert_lint('if (foo) { // not a pure scope; comment is too close!', - 'At least two spaces is best between code and comments' - ' [whitespace/comments] [2]') + self.assert_lint('if (foo) { // not a pure scope', + '') self.assert_lint('printf("// In quotes.")', '') self.assert_lint('printf("\\"%s // In quotes.")', '') self.assert_lint('printf("%s", "// In quotes.")', '') @@ -1404,8 +1438,8 @@ class CppStyleTest(CppStyleTestBase): def test_newline_at_eof(self): def do_test(self, data, is_missing_eof): error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data('foo.cpp', 'cpp', data.split('\n'), - error_collector) + self.process_file_data('foo.cpp', 'cpp', data.split('\n'), + error_collector) # The warning appears only once. self.assertEquals( int(is_missing_eof), @@ -1419,10 +1453,9 @@ class CppStyleTest(CppStyleTestBase): def test_invalid_utf8(self): def do_test(self, raw_bytes, has_invalid_utf8): error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data( - 'foo.cpp', 'cpp', - unicode(raw_bytes, 'utf8', 'replace').split('\n'), - error_collector) + self.process_file_data('foo.cpp', 'cpp', + unicode(raw_bytes, 'utf8', 'replace').split('\n'), + error_collector) # The warning appears only once. self.assertEquals( int(has_invalid_utf8), @@ -1455,40 +1488,40 @@ class CppStyleTest(CppStyleTestBase): def test_allow_blank_line_before_closing_namespace(self): error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data('foo.cpp', 'cpp', - ['namespace {', '', '} // namespace'], - error_collector) + self.process_file_data('foo.cpp', 'cpp', + ['namespace {', '', '} // namespace'], + error_collector) self.assertEquals(0, error_collector.results().count( 'Blank line at the end of a code block. Is this needed?' ' [whitespace/blank_line] [3]')) def test_allow_blank_line_before_if_else_chain(self): error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data('foo.cpp', 'cpp', - ['if (hoge) {', - '', # No warning - '} else if (piyo) {', - '', # No warning - '} else if (piyopiyo) {', - ' hoge = true;', # No warning - '} else {', - '', # Warning on this line - '}'], - error_collector) + self.process_file_data('foo.cpp', 'cpp', + ['if (hoge) {', + '', # No warning + '} else if (piyo) {', + '', # No warning + '} else if (piyopiyo) {', + ' hoge = true;', # No warning + '} else {', + '', # Warning on this line + '}'], + error_collector) self.assertEquals(1, error_collector.results().count( 'Blank line at the end of a code block. Is this needed?' ' [whitespace/blank_line] [3]')) def test_else_on_same_line_as_closing_braces(self): error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data('foo.cpp', 'cpp', - ['if (hoge) {', - '', - '}', - ' else {' # Warning on this line - '', - '}'], - error_collector) + self.process_file_data('foo.cpp', 'cpp', + ['if (hoge) {', + '', + '}', + ' else {' # Warning on this line + '', + '}'], + error_collector) self.assertEquals(1, error_collector.results().count( 'An else should appear on the same line as the preceding }' ' [whitespace/newline] [4]')) @@ -1562,112 +1595,9 @@ class CppStyleTest(CppStyleTestBase): def test_tab(self): self.assert_lint('\tint a;', 'Tab found; better to use spaces [whitespace/tab] [1]') - self.assert_lint('int a = 5;\t\t// set a to 5', + self.assert_lint('int a = 5;\t// set a to 5', 'Tab found; better to use spaces [whitespace/tab] [1]') - def test_parse_arguments(self): - old_usage = cpp_style._USAGE - old_error_categories = cpp_style._ERROR_CATEGORIES - old_output_format = cpp_style._cpp_style_state.output_format - old_verbose_level = cpp_style._cpp_style_state.verbose_level - old_filters = cpp_style._cpp_style_state.filters - try: - # Don't print usage during the tests, or filter categories - cpp_style._USAGE = '' - cpp_style._ERROR_CATEGORIES = '' - - self.assertRaises(SystemExit, cpp_style.parse_arguments, ['--badopt']) - self.assertRaises(SystemExit, cpp_style.parse_arguments, ['--help']) - self.assertRaises(SystemExit, cpp_style.parse_arguments, ['--filter=']) - # This is illegal because all filters must start with + or - - self.assertRaises(ValueError, cpp_style.parse_arguments, ['--filter=foo']) - self.assertRaises(ValueError, cpp_style.parse_arguments, - ['--filter=+a,b,-c']) - - self.assertEquals((['foo.cpp'], {}), cpp_style.parse_arguments(['foo.cpp'])) - self.assertEquals(old_output_format, cpp_style._cpp_style_state.output_format) - self.assertEquals(old_verbose_level, cpp_style._cpp_style_state.verbose_level) - - self.assertEquals(([], {}), cpp_style.parse_arguments([])) - self.assertEquals(([], {}), cpp_style.parse_arguments(['--v=0'])) - - self.assertEquals((['foo.cpp'], {}), - cpp_style.parse_arguments(['--v=1', 'foo.cpp'])) - self.assertEquals(1, cpp_style._cpp_style_state.verbose_level) - self.assertEquals((['foo.h'], {}), - cpp_style.parse_arguments(['--v=3', 'foo.h'])) - self.assertEquals(3, cpp_style._cpp_style_state.verbose_level) - self.assertEquals((['foo.cpp'], {}), - cpp_style.parse_arguments(['--verbose=5', 'foo.cpp'])) - self.assertEquals(5, cpp_style._cpp_style_state.verbose_level) - self.assertRaises(ValueError, - cpp_style.parse_arguments, ['--v=f', 'foo.cpp']) - - self.assertEquals((['foo.cpp'], {}), - cpp_style.parse_arguments(['--output=emacs', 'foo.cpp'])) - self.assertEquals('emacs', cpp_style._cpp_style_state.output_format) - self.assertEquals((['foo.h'], {}), - cpp_style.parse_arguments(['--output=vs7', 'foo.h'])) - self.assertEquals('vs7', cpp_style._cpp_style_state.output_format) - self.assertRaises(SystemExit, - cpp_style.parse_arguments, ['--output=blah', 'foo.cpp']) - - filt = '-,+whitespace,-whitespace/indent' - self.assertEquals((['foo.h'], {}), - cpp_style.parse_arguments(['--filter='+filt, 'foo.h'])) - self.assertEquals(['-', '+whitespace', '-whitespace/indent'], - cpp_style._cpp_style_state.filters) - - self.assertEquals((['foo.cpp', 'foo.h'], {}), - cpp_style.parse_arguments(['foo.cpp', 'foo.h'])) - - self.assertEquals((['foo.cpp'], {'--foo': ''}), - cpp_style.parse_arguments(['--foo', 'foo.cpp'], ['foo'])) - self.assertEquals((['foo.cpp'], {'--foo': 'bar'}), - cpp_style.parse_arguments(['--foo=bar', 'foo.cpp'], ['foo='])) - self.assertEquals((['foo.cpp'], {}), - cpp_style.parse_arguments(['foo.cpp'], ['foo='])) - self.assertRaises(SystemExit, - cpp_style.parse_arguments, - ['--footypo=bar', 'foo.cpp'], ['foo=']) - finally: - cpp_style._USAGE = old_usage - cpp_style._ERROR_CATEGORIES = old_error_categories - cpp_style._cpp_style_state.output_format = old_output_format - cpp_style._cpp_style_state.verbose_level = old_verbose_level - cpp_style._cpp_style_state.filters = old_filters - - def test_filter(self): - old_filters = cpp_style._cpp_style_state.filters - try: - cpp_style._cpp_style_state.set_filters('-,+whitespace,-whitespace/indent') - self.assert_lint( - '// Hello there ', - 'Line ends in whitespace. Consider deleting these extra spaces.' - ' [whitespace/end_of_line] [4]') - self.assert_lint('int a = (int)1.0;', '') - self.assert_lint(' weird opening space', '') - finally: - cpp_style._cpp_style_state.filters = old_filters - - def test_default_filter(self): - default_filters = cpp_style._DEFAULT_FILTERS - old_filters = cpp_style._cpp_style_state.filters - cpp_style._DEFAULT_FILTERS = [ '-whitespace' ] - try: - # Reset filters - cpp_style._cpp_style_state.set_filters('') - self.assert_lint('// Hello there ', '') - cpp_style._cpp_style_state.set_filters('+whitespace/end_of_line') - self.assert_lint( - '// Hello there ', - 'Line ends in whitespace. Consider deleting these extra spaces.' - ' [whitespace/end_of_line] [4]') - self.assert_lint(' weird opening space', '') - finally: - cpp_style._cpp_style_state.filters = old_filters - cpp_style._DEFAULT_FILTERS = default_filters - def test_unnamed_namespaces_in_headers(self): self.assert_language_rules_check( 'foo.h', 'namespace {', @@ -1728,17 +1658,17 @@ class CppStyleTest(CppStyleTestBase): ' [build/forward_decl] [5]') def test_build_header_guard(self): - file_path = 'mydir/foo.h' + file_path = 'mydir/Foo.h' # We can't rely on our internal stuff to get a sane path on the open source # side of things, so just parse out the suggested header guard. This # doesn't allow us to test the suggested header guard, but it does let us # test all the other header tests. error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', [], error_collector) + self.process_file_data(file_path, 'h', [], error_collector) expected_guard = '' matcher = re.compile( - 'No \#ifndef header guard found\, suggested CPP variable is\: ([A-Z_0-9]+) ') + 'No \#ifndef header guard found\, suggested CPP variable is\: ([A-Za-z_0-9]+) ') for error in error_collector.result_list(): matches = matcher.match(error) if matches: @@ -1750,8 +1680,8 @@ class CppStyleTest(CppStyleTestBase): # Wrong guard error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef FOO_H', '#define FOO_H'], error_collector) + self.process_file_data(file_path, 'h', + ['#ifndef FOO_H', '#define FOO_H'], error_collector) self.assertEquals( 1, error_collector.result_list().count( @@ -1761,8 +1691,8 @@ class CppStyleTest(CppStyleTestBase): # No define error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef %s' % expected_guard], error_collector) + self.process_file_data(file_path, 'h', + ['#ifndef %s' % expected_guard], error_collector) self.assertEquals( 1, error_collector.result_list().count( @@ -1772,10 +1702,10 @@ class CppStyleTest(CppStyleTestBase): # Mismatched define error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef %s' % expected_guard, - '#define FOO_H'], - error_collector) + self.process_file_data(file_path, 'h', + ['#ifndef %s' % expected_guard, + '#define FOO_H'], + error_collector) self.assertEquals( 1, error_collector.result_list().count( @@ -1783,107 +1713,30 @@ class CppStyleTest(CppStyleTestBase): ' [build/header_guard] [5]' % expected_guard), error_collector.result_list()) - # No endif - error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef %s' % expected_guard, - '#define %s' % expected_guard], - error_collector) - self.assertEquals( - 1, - error_collector.result_list().count( - '#endif line should be "#endif // %s"' - ' [build/header_guard] [5]' % expected_guard), - error_collector.result_list()) - - # Commentless endif - error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef %s' % expected_guard, - '#define %s' % expected_guard, - '#endif'], - error_collector) - self.assertEquals( - 1, - error_collector.result_list().count( - '#endif line should be "#endif // %s"' - ' [build/header_guard] [5]' % expected_guard), - error_collector.result_list()) - - # Commentless endif for old-style guard - error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef %s_' % expected_guard, - '#define %s_' % expected_guard, - '#endif'], - error_collector) - self.assertEquals( - 1, - error_collector.result_list().count( - '#endif line should be "#endif // %s"' - ' [build/header_guard] [5]' % expected_guard), - error_collector.result_list()) - # No header guard errors error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef %s' % expected_guard, - '#define %s' % expected_guard, - '#endif // %s' % expected_guard], - error_collector) + self.process_file_data(file_path, 'h', + ['#ifndef %s' % expected_guard, + '#define %s' % expected_guard, + '#endif // %s' % expected_guard], + error_collector) for line in error_collector.result_list(): if line.find('build/header_guard') != -1: self.fail('Unexpected error: %s' % line) - # No header guard errors for old-style guard - error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef %s_' % expected_guard, - '#define %s_' % expected_guard, - '#endif // %s_' % expected_guard], - error_collector) - for line in error_collector.result_list(): - if line.find('build/header_guard') != -1: - self.fail('Unexpected error: %s' % line) - - old_verbose_level = cpp_style._cpp_style_state.verbose_level - try: - cpp_style._cpp_style_state.verbose_level = 0 - # Warn on old-style guard if verbosity is 0. - error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef %s_' % expected_guard, - '#define %s_' % expected_guard, - '#endif // %s_' % expected_guard], - error_collector) - self.assertEquals( - 1, - error_collector.result_list().count( - '#ifndef header guard has wrong style, please use: %s' - ' [build/header_guard] [0]' % expected_guard), - error_collector.result_list()) - finally: - cpp_style._cpp_style_state.verbose_level = old_verbose_level - # Completely incorrect header guard error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'h', - ['#ifndef FOO', - '#define FOO', - '#endif // FOO'], - error_collector) + self.process_file_data(file_path, 'h', + ['#ifndef FOO', + '#define FOO', + '#endif // FOO'], + error_collector) self.assertEquals( 1, error_collector.result_list().count( '#ifndef header guard has wrong style, please use: %s' ' [build/header_guard] [5]' % expected_guard), error_collector.result_list()) - self.assertEquals( - 1, - error_collector.result_list().count( - '#endif line should be "#endif // %s"' - ' [build/header_guard] [5]' % expected_guard), - error_collector.result_list()) def test_build_printf_format(self): self.assert_lint( @@ -2019,13 +1872,13 @@ class CppStyleTest(CppStyleTestBase): # There should be a copyright message in the first 10 lines error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'cpp', [], error_collector) + self.process_file_data(file_path, 'cpp', [], error_collector) self.assertEquals( 1, error_collector.result_list().count(legal_copyright_message)) error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data( + self.process_file_data( file_path, 'cpp', ['' for unused_i in range(10)] + [copyright_line], error_collector) @@ -2035,13 +1888,13 @@ class CppStyleTest(CppStyleTestBase): # Test that warning isn't issued if Copyright line appears early enough. error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data(file_path, 'cpp', [copyright_line], error_collector) + self.process_file_data(file_path, 'cpp', [copyright_line], error_collector) for message in error_collector.result_list(): if message.find('legal/copyright') != -1: self.fail('Unexpected error: %s' % message) error_collector = ErrorCollector(self.assert_) - cpp_style.process_file_data( + self.process_file_data( file_path, 'cpp', ['' for unused_i in range(9)] + [copyright_line], error_collector) @@ -2203,6 +2056,16 @@ class OrderOfIncludesTest(CppStyleTestBase): '#include <assert.h>\n', '') + def test_webkit_api_test_excluded(self): + self.assert_language_rules_check('WebKitTools/WebKitAPITest/Test.h', + '#include "foo.h"\n', + '') + + def test_webkit_api_test_excluded(self): + self.assert_language_rules_check('WebKit/qt/QGVLauncher/main.cpp', + '#include "foo.h"\n', + '') + def test_check_line_break_after_own_header(self): self.assert_language_rules_check('foo.cpp', '#include "config.h"\n' @@ -2320,6 +2183,14 @@ class OrderOfIncludesTest(CppStyleTestBase): classify_include('PrefixFooCustom.cpp', 'Foo.h', False, include_state)) + self.assertEqual(cpp_style._MOC_HEADER, + classify_include('foo.cpp', + 'foo.moc', + False, include_state)) + self.assertEqual(cpp_style._MOC_HEADER, + classify_include('foo.cpp', + 'moc_foo.cpp', + False, include_state)) # Tricky example where both includes might be classified as primary. self.assert_language_rules_check('ScrollbarThemeWince.cpp', '#include "config.h"\n' @@ -2335,6 +2206,12 @@ class OrderOfIncludesTest(CppStyleTestBase): 'Found header this file implements after a header this file implements.' ' Should be: config.h, primary header, blank line, and then alphabetically sorted.' ' [build/include_order] [4]') + self.assert_language_rules_check('ResourceHandleWin.cpp', + '#include "config.h"\n' + '#include "ResourceHandle.h"\n' + '\n' + '#include "ResourceHandleWin.h"\n', + '') def test_try_drop_common_suffixes(self): self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h')) @@ -2363,6 +2240,13 @@ class CheckForFunctionLengthsTest(CppStyleTestBase): cpp_style._FunctionState._NORMAL_TRIGGER = self.old_normal_trigger cpp_style._FunctionState._TEST_TRIGGER = self.old_test_trigger + # FIXME: Eliminate the need for this function. + def set_verbosity(self, verbosity): + """Set new test verbosity and return old test verbosity.""" + old_verbosity = self.verbosity + self.verbosity = verbosity + return old_verbosity + def assert_function_lengths_check(self, code, expected_message): """Check warnings for long function bodies are as expected. @@ -2402,7 +2286,7 @@ class CheckForFunctionLengthsTest(CppStyleTestBase): lines: Number of lines to generate. error_level: --v setting for cpp_style. """ - trigger_level = self.trigger_lines(cpp_style._verbose_level()) + trigger_level = self.trigger_lines(self.verbosity) self.assert_function_lengths_check( 'void test(int x)' + self.function_body(lines), ('Small and focused functions are preferred: ' @@ -2485,29 +2369,29 @@ class CheckForFunctionLengthsTest(CppStyleTestBase): '') def test_function_length_check_definition_below_severity0(self): - old_verbosity = cpp_style._set_verbose_level(0) + old_verbosity = self.set_verbosity(0) self.assert_function_length_check_definition_ok(self.trigger_lines(0) - 1) - cpp_style._set_verbose_level(old_verbosity) + self.set_verbosity(old_verbosity) def test_function_length_check_definition_at_severity0(self): - old_verbosity = cpp_style._set_verbose_level(0) + old_verbosity = self.set_verbosity(0) self.assert_function_length_check_definition_ok(self.trigger_lines(0)) - cpp_style._set_verbose_level(old_verbosity) + self.set_verbosity(old_verbosity) def test_function_length_check_definition_above_severity0(self): - old_verbosity = cpp_style._set_verbose_level(0) + old_verbosity = self.set_verbosity(0) self.assert_function_length_check_above_error_level(0) - cpp_style._set_verbose_level(old_verbosity) + self.set_verbosity(old_verbosity) def test_function_length_check_definition_below_severity1v0(self): - old_verbosity = cpp_style._set_verbose_level(0) + old_verbosity = self.set_verbosity(0) self.assert_function_length_check_below_error_level(1) - cpp_style._set_verbose_level(old_verbosity) + self.set_verbosity(old_verbosity) def test_function_length_check_definition_at_severity1v0(self): - old_verbosity = cpp_style._set_verbose_level(0) + old_verbosity = self.set_verbosity(0) self.assert_function_length_check_at_error_level(1) - cpp_style._set_verbose_level(old_verbosity) + self.set_verbosity(old_verbosity) def test_function_length_check_definition_below_severity1(self): self.assert_function_length_check_definition_ok(self.trigger_lines(1) - 1) @@ -2521,7 +2405,7 @@ class CheckForFunctionLengthsTest(CppStyleTestBase): def test_function_length_check_definition_severity1_plus_blanks(self): error_level = 1 error_lines = self.trigger_lines(error_level) + 1 - trigger_level = self.trigger_lines(cpp_style._verbose_level()) + trigger_level = self.trigger_lines(self.verbosity) self.assert_function_lengths_check( 'void test_blanks(int x)' + self.function_body(error_lines), ('Small and focused functions are preferred: ' @@ -2533,7 +2417,7 @@ class CheckForFunctionLengthsTest(CppStyleTestBase): def test_function_length_check_complex_definition_severity1(self): error_level = 1 error_lines = self.trigger_lines(error_level) + 1 - trigger_level = self.trigger_lines(cpp_style._verbose_level()) + trigger_level = self.trigger_lines(self.verbosity) self.assert_function_lengths_check( ('my_namespace::my_other_namespace::MyVeryLongTypeName*\n' 'my_namespace::my_other_namespace::MyFunction(int arg1, char* arg2)' @@ -2548,7 +2432,7 @@ class CheckForFunctionLengthsTest(CppStyleTestBase): def test_function_length_check_definition_severity1_for_test(self): error_level = 1 error_lines = self.trigger_test_lines(error_level) + 1 - trigger_level = self.trigger_test_lines(cpp_style._verbose_level()) + trigger_level = self.trigger_test_lines(self.verbosity) self.assert_function_lengths_check( 'TEST_F(Test, Mutator)' + self.function_body(error_lines), ('Small and focused functions are preferred: ' @@ -2560,7 +2444,7 @@ class CheckForFunctionLengthsTest(CppStyleTestBase): def test_function_length_check_definition_severity1_for_split_line_test(self): error_level = 1 error_lines = self.trigger_test_lines(error_level) + 1 - trigger_level = self.trigger_test_lines(cpp_style._verbose_level()) + trigger_level = self.trigger_test_lines(self.verbosity) self.assert_function_lengths_check( ('TEST_F(GoogleUpdateRecoveryRegistryProtectedTest,\n' ' FixGoogleUpdate_AllValues_MachineApp)' # note: 4 spaces @@ -2575,7 +2459,7 @@ class CheckForFunctionLengthsTest(CppStyleTestBase): def test_function_length_check_definition_severity1_for_bad_test_doesnt_break(self): error_level = 1 error_lines = self.trigger_test_lines(error_level) + 1 - trigger_level = self.trigger_test_lines(cpp_style._verbose_level()) + trigger_level = self.trigger_test_lines(self.verbosity) self.assert_function_lengths_check( ('TEST_F(' + self.function_body(error_lines)), @@ -2588,7 +2472,7 @@ class CheckForFunctionLengthsTest(CppStyleTestBase): def test_function_length_check_definition_severity1_with_embedded_no_lints(self): error_level = 1 error_lines = self.trigger_lines(error_level) + 1 - trigger_level = self.trigger_lines(cpp_style._verbose_level()) + trigger_level = self.trigger_lines(self.verbosity) self.assert_function_lengths_check( 'void test(int x)' + self.function_body_with_no_lints(error_lines), ('Small and focused functions are preferred: ' @@ -2787,16 +2671,6 @@ class NoNonVirtualDestructorsTest(CppStyleTestBase): 'virtual method(s), one declared at line 2. [runtime/virtual] [4]']) -class CppStyleStateTest(unittest.TestCase): - def test_error_count(self): - self.assertEquals(0, cpp_style.error_count()) - cpp_style._cpp_style_state.increment_error_count() - cpp_style._cpp_style_state.increment_error_count() - self.assertEquals(2, cpp_style.error_count()) - cpp_style._cpp_style_state.reset_error_count() - self.assertEquals(0, cpp_style.error_count()) - - class WebKitStyleTest(CppStyleTestBase): # for http://webkit.org/coding/coding-style.html @@ -2910,7 +2784,7 @@ class WebKitStyleTest(CppStyleTestBase): 'foo.cpp') self.assert_multi_line_lint( 'namespace WebCore {\n\n' - 'const char* foo(void* a = ";", // ;\n' + 'const char* foo(void* a = ";", // ;\n' ' void* b);\n' ' void* p;\n' '}\n', @@ -2919,7 +2793,7 @@ class WebKitStyleTest(CppStyleTestBase): self.assert_multi_line_lint( 'namespace WebCore {\n\n' 'const char* foo[] = {\n' - ' "void* b);", // ;\n' + ' "void* b);", // ;\n' ' "asfdf",\n' ' }\n' ' void* p;\n' @@ -2929,7 +2803,7 @@ class WebKitStyleTest(CppStyleTestBase): self.assert_multi_line_lint( 'namespace WebCore {\n\n' 'const char* foo[] = {\n' - ' "void* b);", // }\n' + ' "void* b);", // }\n' ' "asfdf",\n' ' }\n' '}\n', @@ -2939,7 +2813,7 @@ class WebKitStyleTest(CppStyleTestBase): ' namespace WebCore {\n\n' ' void Document::Foo()\n' ' {\n' - 'start: // infinite loops are fun!\n' + 'start: // infinite loops are fun!\n' ' goto start;\n' ' }', 'namespace should never be indented. [whitespace/indent] [4]', @@ -3136,12 +3010,15 @@ class WebKitStyleTest(CppStyleTestBase): ' doIt();\n', '') self.assert_multi_line_lint( + ' if (condition) \\\n' + ' doIt();\n', + '') + self.assert_multi_line_lint( ' x++; y++;', 'More than one command on the same line [whitespace/newline] [4]') - # FIXME: Make this fail. - # self.assert_multi_line_lint( - # ' if (condition) doIt();\n', - # '') + self.assert_multi_line_lint( + ' if (condition) doIt();\n', + 'More than one command on the same line in if [whitespace/parens] [4]') # 2. An else statement should go on the same line as a preceding # close brace if one is present, else it should line up with the @@ -3169,7 +3046,16 @@ class WebKitStyleTest(CppStyleTestBase): ' doSomethingElseAgain();\n' '}\n', '') - + self.assert_multi_line_lint( + '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n', + '') + self.assert_multi_line_lint( + '#define TEST_ASSERT(expression) do { if ( !(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n', + 'Mismatching spaces inside () in if [whitespace/parens] [5]') + # FIXME: currently we only check first conditional, so we cannot detect errors in next ones. + # self.assert_multi_line_lint( + # '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0 )\n', + # 'Mismatching spaces inside () in if [whitespace/parens] [5]') self.assert_multi_line_lint( 'if (condition) {\n' ' doSomething();\n' @@ -3183,13 +3069,14 @@ class WebKitStyleTest(CppStyleTestBase): self.assert_multi_line_lint( 'if (condition) doSomething(); else doSomethingElse();\n', ['More than one command on the same line [whitespace/newline] [4]', - 'Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]']) - # FIXME: Make this fail. - # self.assert_multi_line_lint( - # 'if (condition) doSomething(); else {\n' - # ' doSomethingElse();\n' - # '}\n', - # '') + 'Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]', + 'More than one command on the same line in if [whitespace/parens] [4]']) + self.assert_multi_line_lint( + 'if (condition) doSomething(); else {\n' + ' doSomethingElse();\n' + '}\n', + ['More than one command on the same line in if [whitespace/parens] [4]', + 'One line control clauses should not use braces. [whitespace/braces] [4]']) # 3. An else if statement should be written as an if statement # when the prior if concludes with a return statement. @@ -3493,7 +3380,7 @@ class WebKitStyleTest(CppStyleTestBase): ' [readability/null] [4]', 'foo.cpp') self.assert_lint( - '"A string with NULL" // and a comment with NULL is tricky to flag correctly in cpp_style.', + '"A string with NULL" // and a comment with NULL is tricky to flag correctly in cpp_style.', 'Use 0 instead of NULL.' ' [readability/null] [4]', 'foo.cpp') @@ -3519,6 +3406,14 @@ class WebKitStyleTest(CppStyleTestBase): '', 'foo.m') + # Make sure that the NULL check does not apply to g_object_{set,get} + self.assert_lint( + 'g_object_get(foo, "prop", &bar, NULL);', + '') + self.assert_lint( + 'g_object_set(foo, "prop", bar, NULL);', + '') + # 2. C++ and C bool values should be written as true and # false. Objective-C BOOL values should be written as YES and NO. # FIXME: Implement this. @@ -3683,6 +3578,7 @@ class WebKitStyleTest(CppStyleTestBase): 'under_score' + name_error_message) self.assert_lint('goto under_score;', 'under_score' + name_error_message) + self.assert_lint('delete static_cast<Foo*>(p);', '') # Multiple variables in one line. self.assert_lint('void myFunction(int variable1, int another_variable);', @@ -3703,15 +3599,86 @@ class WebKitStyleTest(CppStyleTestBase): self.assert_lint('void this_op_code(int var1, int var2)', '', 'JavaScriptCore/foo.cpp') self.assert_lint('void this_op_code(int var1, int var2)', 'this_op_code' + name_error_message) + # GObject requires certain magical names in class declarations. + self.assert_lint('void webkit_dom_object_init();', '') + self.assert_lint('void webkit_dom_object_class_init();', '') + + # The GTK+ APIs use GTK+ naming style, which includes lower-cased, _-separated values. + self.assert_lint('void this_is_a_gtk_style_name(int var1, int var2)', '', 'WebKit/gtk/webkit/foo.cpp') + + # There is an exception for some unit tests that begin with "tst_". + self.assert_lint('void tst_QWebFrame::arrayObjectEnumerable(int var1, int var2)', '') + + # The Qt API uses names that begin with "qt_". + self.assert_lint('void QTFrame::qt_drt_is_awesome(int var1, int var2)', '') + self.assert_lint('void qt_drt_is_awesome(int var1, int var2);', '') + # const_iterator is allowed as well. self.assert_lint('typedef VectorType::const_iterator const_iterator;', '') + def test_comments(self): + # A comment at the beginning of a line is ok. + self.assert_lint('// comment', '') + self.assert_lint(' // comment', '') + + self.assert_lint('} // namespace WebCore', + 'One space before end of line comments' + ' [whitespace/comments] [5]') + def test_other(self): # FIXME: Implement this. pass +class CppProcessorTest(unittest.TestCase): + + """Tests CppProcessor class.""" + + def mock_handle_style_error(self): + pass + + def _processor(self): + return CppProcessor("foo", "h", self.mock_handle_style_error, 3) + + def test_init(self): + """Test __init__ constructor.""" + processor = self._processor() + self.assertEquals(processor.file_extension, "h") + self.assertEquals(processor.file_path, "foo") + self.assertEquals(processor.handle_style_error, self.mock_handle_style_error) + self.assertEquals(processor.verbosity, 3) + + def test_eq(self): + """Test __eq__ equality function.""" + processor1 = self._processor() + processor2 = self._processor() + + # == calls __eq__. + self.assertTrue(processor1 == processor2) + + def mock_handle_style_error2(self): + pass + + # Verify that a difference in any argument cause equality to fail. + processor = CppProcessor("foo", "h", self.mock_handle_style_error, 3) + self.assertFalse(processor == CppProcessor("bar", "h", self.mock_handle_style_error, 3)) + self.assertFalse(processor == CppProcessor("foo", "c", self.mock_handle_style_error, 3)) + self.assertFalse(processor == CppProcessor("foo", "h", mock_handle_style_error2, 3)) + self.assertFalse(processor == CppProcessor("foo", "h", self.mock_handle_style_error, 4)) + + def test_ne(self): + """Test __ne__ inequality function.""" + processor1 = self._processor() + processor2 = self._processor() + + # != calls __ne__. + # By default, __ne__ always returns true on different objects. + # Thus, just check the distinguishing case to verify that the + # code defines __ne__. + self.assertFalse(processor1 != processor2) + + def tearDown(): """A global check to make sure all error-categories have been tested. diff --git a/WebKitTools/Scripts/webkitpy/style/processors/text.py b/WebKitTools/Scripts/webkitpy/style/processors/text.py new file mode 100644 index 0000000..307e5b8 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/processors/text.py @@ -0,0 +1,56 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Checks WebKit style for text files.""" + + +class TextProcessor(object): + + """Processes text lines for checking style.""" + + def __init__(self, file_path, handle_style_error): + self.file_path = file_path + self.handle_style_error = handle_style_error + + def process(self, lines): + lines = (["// adjust line numbers to make the first line 1."] + lines) + + # FIXME: share with cpp_style. + for line_number, line in enumerate(lines): + if "\t" in line: + self.handle_style_error(line_number, + "whitespace/tab", 5, + "Line contains tab character.") + + +# FIXME: Remove this function (requires refactoring unit tests). +def process_file_data(filename, lines, error): + processor = TextProcessor(filename, error) + processor.process(lines) + diff --git a/WebKitTools/Scripts/webkitpy/style/processors/text_unittest.py b/WebKitTools/Scripts/webkitpy/style/processors/text_unittest.py new file mode 100644 index 0000000..62f825b --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/processors/text_unittest.py @@ -0,0 +1,94 @@ +#!/usr/bin/python +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test for text_style.py.""" + +import unittest + +import text as text_style +from text import TextProcessor + +class TextStyleTestCase(unittest.TestCase): + """TestCase for text_style.py""" + + def assertNoError(self, lines): + """Asserts that the specified lines has no errors.""" + self.had_error = False + + def error_for_test(line_number, category, confidence, message): + """Records if an error occurs.""" + self.had_error = True + + text_style.process_file_data('', lines, error_for_test) + self.assert_(not self.had_error, '%s should not have any errors.' % lines) + + def assertError(self, lines, expected_line_number): + """Asserts that the specified lines has an error.""" + self.had_error = False + + def error_for_test(line_number, category, confidence, message): + """Checks if the expected error occurs.""" + self.assertEquals(expected_line_number, line_number) + self.assertEquals('whitespace/tab', category) + self.had_error = True + + text_style.process_file_data('', lines, error_for_test) + self.assert_(self.had_error, '%s should have an error [whitespace/tab].' % lines) + + + def test_no_error(self): + """Tests for no error cases.""" + self.assertNoError(['']) + self.assertNoError(['abc def', 'ggg']) + + + def test_error(self): + """Tests for error cases.""" + self.assertError(['2009-12-16\tKent Tamura\t<tkent@chromium.org>'], 1) + self.assertError(['2009-12-16 Kent Tamura <tkent@chromium.org>', + '', + '\tReviewed by NOBODY.'], 3) + + +class TextProcessorTest(unittest.TestCase): + + """Tests TextProcessor class.""" + + def mock_handle_style_error(self): + pass + + def test_init(self): + """Test __init__ constructor.""" + processor = TextProcessor("foo.txt", self.mock_handle_style_error) + self.assertEquals(processor.file_path, "foo.txt") + self.assertEquals(processor.handle_style_error, self.mock_handle_style_error) + + +if __name__ == '__main__': + unittest.main() diff --git a/WebKitTools/Scripts/webkitpy/style/unittests.py b/WebKitTools/Scripts/webkitpy/style/unittests.py new file mode 100644 index 0000000..11c10e7 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/unittests.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Runs style package unit tests.""" + +# This module is imported by test-webkitpy. + +import sys +import unittest + +from checker_unittest import * +from error_handlers_unittest import * +from processors.cpp_unittest import * +from processors.text_unittest import * diff --git a/WebKitTools/Scripts/webkitpy/style_references.py b/WebKitTools/Scripts/webkitpy/style_references.py new file mode 100644 index 0000000..2528c4d --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style_references.py @@ -0,0 +1,72 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""References to non-style modules used by the style package.""" + +# This module is a simple facade to the functionality used by the +# style package that comes from WebKit modules outside the style +# package. +# +# With this module, the only intra-package references (i.e. +# references to webkitpy modules outside the style folder) that +# the style package needs to make are relative references to +# this module. For example-- +# +# > from .. style_references import parse_patch +# +# Similarly, people maintaining non-style code are not beholden +# to the contents of the style package when refactoring or +# otherwise changing non-style code. They only have to be aware +# of this module. + +import os + +from diff_parser import DiffParser +from scm import detect_scm_system + + +def parse_patch(patch_string): + + """Parse a patch string and return the affected files.""" + + patch = DiffParser(patch_string.splitlines()) + return patch.files + + +class SimpleScm(object): + + """Simple facade to SCM for use by style package.""" + + def __init__(self): + cwd = os.path.abspath('.') + self._scm = detect_scm_system(cwd) + + def checkout_root(self): + """Return the source control root as an absolute path.""" + return self._scm.checkout_root + + def create_patch(self): + return self._scm.create_patch() + + def create_patch_since_local_commit(self, commit): + return self._scm.create_patch_since_local_commit(commit) + diff --git a/WebKitTools/Scripts/webkitpy/user.py b/WebKitTools/Scripts/webkitpy/user.py new file mode 100644 index 0000000..8dbf74c --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/user.py @@ -0,0 +1,58 @@ +# Copyright (c) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import shlex +import subprocess +import webbrowser + +class User(object): + def prompt(self, message): + return raw_input(message) + + def edit(self, files): + editor = os.environ.get("EDITOR") or "vi" + args = shlex.split(editor) + subprocess.call(args + files) + + def page(self, message): + pager = os.environ.get("PAGER") or "less" + try: + child_process = subprocess.Popen([pager], stdin=subprocess.PIPE) + child_process.communicate(input=message) + except IOError, e: + pass + + def confirm(self, message=None): + if not message: + message = "Continue?" + response = raw_input("%s [Y/n]: " % message) + return not response or response.lower() == "y" + + def open_url(self, url): + webbrowser.open(url) diff --git a/WebKitTools/Scripts/webkitpy/user.pyc b/WebKitTools/Scripts/webkitpy/user.pyc Binary files differnew file mode 100644 index 0000000..7d6b687 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/user.pyc diff --git a/WebKitTools/Scripts/modules/logging.py b/WebKitTools/Scripts/webkitpy/webkit_logging.py index 7b7cec5..ba1c5eb 100644 --- a/WebKitTools/Scripts/modules/logging.py +++ b/WebKitTools/Scripts/webkitpy/webkit_logging.py @@ -28,6 +28,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # WebKit's Python module for logging +# This module is now deprecated in favor of python's built-in logging.py. import os import sys diff --git a/WebKitTools/Scripts/webkitpy/webkit_logging.pyc b/WebKitTools/Scripts/webkitpy/webkit_logging.pyc Binary files differnew file mode 100644 index 0000000..137f042 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/webkit_logging.pyc diff --git a/WebKitTools/Scripts/modules/logging_unittest.py b/WebKitTools/Scripts/webkitpy/webkit_logging_unittest.py index b09a563..b940a4d 100644 --- a/WebKitTools/Scripts/modules/logging_unittest.py +++ b/WebKitTools/Scripts/webkitpy/webkit_logging_unittest.py @@ -32,8 +32,8 @@ import StringIO import tempfile import unittest -from modules.executive import ScriptError -from modules.logging import * +from webkitpy.executive import ScriptError +from webkitpy.webkit_logging import * class LoggingTest(unittest.TestCase): diff --git a/WebKitTools/Scripts/modules/webkitport.py b/WebKitTools/Scripts/webkitpy/webkitport.py index 849ac4b..cd60a54 100644 --- a/WebKitTools/Scripts/modules/webkitport.py +++ b/WebKitTools/Scripts/webkitpy/webkitport.py @@ -1,9 +1,9 @@ # Copyright (C) 2009, Google Inc. All rights reserved. -# +# # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: -# +# # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above @@ -13,7 +13,7 @@ # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. -# +# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -31,8 +31,11 @@ import os from optparse import make_option +from webkitpy.executive import Executive + + +class WebKitPort(object): -class WebKitPort(): # We might need to pass scm into this function for scm.checkout_root @classmethod def script_path(cls, script_name): @@ -40,37 +43,55 @@ class WebKitPort(): @staticmethod def port(port_name): - if port_name == "mac": - return MacPort - if port_name == "qt": - return QtPort - if port_name == "chromium": - return ChromiumPort + ports = { + "chromium": ChromiumPort, + "gtk": GtkPort, + "mac": MacPort, + "qt": QtPort, + } # FIXME: We should default to WinPort on Windows. - return MacPort + return ports.get(port_name, MacPort) @classmethod def name(cls): - raise NotImplementedError, "subclasses must implement" + raise NotImplementedError("subclasses must implement") @classmethod def flag(cls): - raise NotImplementedError, "subclasses must implement" + raise NotImplementedError("subclasses must implement") @classmethod def update_webkit_command(cls): return [cls.script_path("update-webkit")] @classmethod - def build_webkit_command(cls): - return [cls.script_path("build-webkit")] + def build_webkit_command(cls, build_style=None): + command = [cls.script_path("build-webkit")] + if build_style == "debug": + command.append("--debug") + if build_style == "release": + command.append("--release") + return command + + @classmethod + def run_javascriptcore_tests_command(cls): + return [cls.script_path("run-javascriptcore-tests")] @classmethod def run_webkit_tests_command(cls): return [cls.script_path("run-webkit-tests")] + @classmethod + def run_python_unittests_command(cls): + return [cls.script_path("test-webkitpy")] + + @classmethod + def run_perl_unittests_command(cls): + return [cls.script_path("test-webkitperl")] + class MacPort(WebKitPort): + @classmethod def name(cls): return "Mac" @@ -80,7 +101,32 @@ class MacPort(WebKitPort): return "--port=mac" +class GtkPort(WebKitPort): + + @classmethod + def name(cls): + return "Gtk" + + @classmethod + def flag(cls): + return "--port=gtk" + + @classmethod + def build_webkit_command(cls, build_style=None): + command = WebKitPort.build_webkit_command(build_style=build_style) + command.append("--gtk") + command.append('--makeargs="-j%s"' % Executive.cpu_count()) + return command + + @classmethod + def run_webkit_tests_command(cls): + command = WebKitPort.run_webkit_tests_command() + command.append("--gtk") + return command + + class QtPort(WebKitPort): + @classmethod def name(cls): return "Qt" @@ -90,13 +136,15 @@ class QtPort(WebKitPort): return "--port=qt" @classmethod - def build_webkit_command(cls): - command = WebKitPort.build_webkit_command() + def build_webkit_command(cls, build_style=None): + command = WebKitPort.build_webkit_command(build_style=build_style) command.append("--qt") + command.append('--makeargs="-j%s"' % Executive.cpu_count()) return command class ChromiumPort(WebKitPort): + @classmethod def name(cls): return "Chromium" @@ -112,7 +160,7 @@ class ChromiumPort(WebKitPort): return command @classmethod - def build_webkit_command(cls): - command = WebKitPort.build_webkit_command() + def build_webkit_command(cls, build_style=None): + command = WebKitPort.build_webkit_command(build_style=build_style) command.append("--chromium") return command diff --git a/WebKitTools/Scripts/webkitpy/webkitport.pyc b/WebKitTools/Scripts/webkitpy/webkitport.pyc Binary files differnew file mode 100644 index 0000000..e344aca --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/webkitport.pyc diff --git a/WebKitTools/Scripts/modules/webkitport_unittest.py b/WebKitTools/Scripts/webkitpy/webkitport_unittest.py index c713e83..202234f 100644 --- a/WebKitTools/Scripts/modules/webkitport_unittest.py +++ b/WebKitTools/Scripts/webkitpy/webkitport_unittest.py @@ -29,7 +29,9 @@ import unittest -from modules.webkitport import WebKitPort, MacPort, QtPort, ChromiumPort +from webkitpy.executive import Executive +from webkitpy.webkitport import WebKitPort, MacPort, GtkPort, QtPort, ChromiumPort + class WebKitPortTest(unittest.TestCase): def test_mac_port(self): @@ -37,18 +39,29 @@ class WebKitPortTest(unittest.TestCase): self.assertEquals(MacPort.flag(), "--port=mac") self.assertEquals(MacPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")]) self.assertEquals(MacPort.build_webkit_command(), [WebKitPort.script_path("build-webkit")]) + self.assertEquals(MacPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug"]) + self.assertEquals(MacPort.build_webkit_command(build_style="release"), [WebKitPort.script_path("build-webkit"), "--release"]) + + def test_gtk_port(self): + self.assertEquals(GtkPort.name(), "Gtk") + self.assertEquals(GtkPort.flag(), "--port=gtk") + self.assertEquals(GtkPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests"), "--gtk"]) + self.assertEquals(GtkPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--gtk", '--makeargs="-j%s"' % Executive.cpu_count()]) + self.assertEquals(GtkPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug", "--gtk", '--makeargs="-j%s"' % Executive.cpu_count()]) def test_qt_port(self): self.assertEquals(QtPort.name(), "Qt") self.assertEquals(QtPort.flag(), "--port=qt") self.assertEquals(QtPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")]) - self.assertEquals(QtPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--qt"]) + self.assertEquals(QtPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--qt", '--makeargs="-j%s"' % Executive.cpu_count()]) + self.assertEquals(QtPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug", "--qt", '--makeargs="-j%s"' % Executive.cpu_count()]) def test_chromium_port(self): self.assertEquals(ChromiumPort.name(), "Chromium") self.assertEquals(ChromiumPort.flag(), "--port=chromium") self.assertEquals(ChromiumPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")]) self.assertEquals(ChromiumPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--chromium"]) + self.assertEquals(ChromiumPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug", "--chromium"]) self.assertEquals(ChromiumPort.update_webkit_command(), [WebKitPort.script_path("update-webkit"), "--chromium"]) |