diff options
author | Ben Murdoch <benm@google.com> | 2011-05-05 14:36:32 +0100 |
---|---|---|
committer | Ben Murdoch <benm@google.com> | 2011-05-10 15:38:30 +0100 |
commit | f05b935882198ccf7d81675736e3aeb089c5113a (patch) | |
tree | 4ea0ca838d9ef1b15cf17ddb3928efb427c7e5a1 /Tools/Scripts | |
parent | 60fbdcc62bced8db2cb1fd233cc4d1e4ea17db1b (diff) | |
download | external_webkit-f05b935882198ccf7d81675736e3aeb089c5113a.zip external_webkit-f05b935882198ccf7d81675736e3aeb089c5113a.tar.gz external_webkit-f05b935882198ccf7d81675736e3aeb089c5113a.tar.bz2 |
Merge WebKit at r74534: Initial merge by git.
Change-Id: I6ccd1154fa1b19c2ec2a66878eb675738735f1eb
Diffstat (limited to 'Tools/Scripts')
427 files changed, 83017 insertions, 0 deletions
diff --git a/Tools/Scripts/SpacingHeuristics.pm b/Tools/Scripts/SpacingHeuristics.pm new file mode 100644 index 0000000..7de0172 --- /dev/null +++ b/Tools/Scripts/SpacingHeuristics.pm @@ -0,0 +1,101 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2006 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Used for helping remove extra blank lines from files when processing. +# see split-class for an example usage (or other scripts in bugzilla) + +BEGIN { + use Exporter (); + our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS); + $VERSION = 1.00; + @ISA = qw(Exporter); + @EXPORT = qw(&resetSpacingHeuristics &isOnlyWhiteSpace &applySpacingHeuristicsAndPrint &setPreviousAllowedLine &setPreviousAllowedLine &printPendingEmptyLines &ignoringLine); + %EXPORT_TAGS = (); + @EXPORT_OK = (); +} + +our @EXPORT_OK; + +my $justFoundEmptyLine = 0; +my $previousLineWasDisallowed = 0; +my $previousAllowedLine = ""; +my $pendingEmptyLines = ""; + +sub resetSpacingHeuristics +{ + $justFoundEmptyLine = 0; + $previousLineWasDisallowed = 0; + $previousAllowedLine = ""; + $pendingEmptyLines = ""; +} + +sub isOnlyWhiteSpace +{ + my $line = shift; + my $isOnlyWhiteSpace = ($line =~ m/^\s+$/); + $pendingEmptyLines .= $line if ($isOnlyWhiteSpace); + return $isOnlyWhiteSpace; +} + +sub applySpacingHeuristicsAndPrint +{ + my ($out, $line) = @_; + + printPendingEmptyLines($out, $line); + $previousLineWasDisallowed = 0; + print $out $line; +} + +sub setPreviousAllowedLine +{ + my $line = shift; + $previousAllowedLine = $line; +} + +sub printPendingEmptyLines +{ + my $out = shift; + my $line = shift; + if ($previousLineWasDisallowed) { + if (!($pendingEmptyLines eq "") && !($previousAllowedLine =~ m/{\s*$/) && !($line =~ m/^\s*}/)) { + $pendingEmptyLines = "\n"; + } else { + $pendingEmptyLines = ""; + } + } + print $out $pendingEmptyLines; + $pendingEmptyLines = ""; +} + +sub ignoringLine +{ + # my $line = shift; # ignoring input argument + $previousLineWasDisallowed = 1; +} + +1;
\ No newline at end of file diff --git a/Tools/Scripts/VCSUtils.pm b/Tools/Scripts/VCSUtils.pm new file mode 100644 index 0000000..faed7ed --- /dev/null +++ b/Tools/Scripts/VCSUtils.pm @@ -0,0 +1,1768 @@ +# Copyright (C) 2007, 2008, 2009 Apple Inc. All rights reserved. +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# Copyright (C) Research In Motion Limited 2010. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Module to share code to work with various version control systems. +package VCSUtils; + +use strict; +use warnings; + +use Cwd qw(); # "qw()" prevents warnings about redefining getcwd() with "use POSIX;" +use English; # for $POSTMATCH, etc. +use File::Basename; +use File::Spec; +use POSIX; + +BEGIN { + use Exporter (); + our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS); + $VERSION = 1.00; + @ISA = qw(Exporter); + @EXPORT = qw( + &callSilently + &canonicalizePath + &changeLogEmailAddress + &changeLogName + &chdirReturningRelativePath + &decodeGitBinaryPatch + &determineSVNRoot + &determineVCSRoot + &exitStatus + &fixChangeLogPatch + &gitBranch + &gitdiff2svndiff + &isGit + &isGitBranchBuild + &isGitDirectory + &isSVN + &isSVNDirectory + &isSVNVersion16OrNewer + &makeFilePathRelative + &mergeChangeLogs + &normalizePath + &parsePatch + &pathRelativeToSVNRepositoryRootForPath + &prepareParsedPatch + &removeEOL + &runPatchCommand + &scmMoveOrRenameFile + &scmToggleExecutableBit + &setChangeLogDateAndReviewer + &svnRevisionForDirectory + &svnStatus + &toWindowsLineEndings + ); + %EXPORT_TAGS = ( ); + @EXPORT_OK = (); +} + +our @EXPORT_OK; + +my $gitBranch; +my $gitRoot; +my $isGit; +my $isGitBranchBuild; +my $isSVN; +my $svnVersion; + +# Project time zone for Cupertino, CA, US +my $changeLogTimeZone = "PST8PDT"; + +my $gitDiffStartRegEx = qr#^diff --git (\w/)?(.+) (\w/)?([^\r\n]+)#; +my $svnDiffStartRegEx = qr#^Index: ([^\r\n]+)#; +my $svnPropertiesStartRegEx = qr#^Property changes on: ([^\r\n]+)#; # $1 is normally the same as the index path. +my $svnPropertyStartRegEx = qr#^(Modified|Name|Added|Deleted): ([^\r\n]+)#; # $2 is the name of the property. +my $svnPropertyValueStartRegEx = qr#^ (\+|-|Merged|Reverse-merged) ([^\r\n]+)#; # $2 is the start of the property's value (which may span multiple lines). + +# This method is for portability. Return the system-appropriate exit +# status of a child process. +# +# Args: pass the child error status returned by the last pipe close, +# for example "$?". +sub exitStatus($) +{ + my ($returnvalue) = @_; + if ($^O eq "MSWin32") { + return $returnvalue >> 8; + } + return WEXITSTATUS($returnvalue); +} + +# Call a function while suppressing STDERR, and return the return values +# as an array. +sub callSilently($@) { + my ($func, @args) = @_; + + # The following pattern was taken from here: + # http://www.sdsc.edu/~moreland/courses/IntroPerl/docs/manual/pod/perlfunc/open.html + # + # Also see this Perl documentation (search for "open OLDERR"): + # http://perldoc.perl.org/functions/open.html + open(OLDERR, ">&STDERR"); + close(STDERR); + my @returnValue = &$func(@args); + open(STDERR, ">&OLDERR"); + close(OLDERR); + + return @returnValue; +} + +sub toWindowsLineEndings +{ + my ($text) = @_; + $text =~ s/\n/\r\n/g; + return $text; +} + +# Note, this method will not error if the file corresponding to the $source path does not exist. +sub scmMoveOrRenameFile +{ + my ($source, $destination) = @_; + return if ! -e $source; + if (isSVN()) { + system("svn", "move", $source, $destination); + } elsif (isGit()) { + system("git", "mv", $source, $destination); + } +} + +# Note, this method will not error if the file corresponding to the path does not exist. +sub scmToggleExecutableBit +{ + my ($path, $executableBitDelta) = @_; + return if ! -e $path; + if ($executableBitDelta == 1) { + scmAddExecutableBit($path); + } elsif ($executableBitDelta == -1) { + scmRemoveExecutableBit($path); + } +} + +sub scmAddExecutableBit($) +{ + my ($path) = @_; + + if (isSVN()) { + system("svn", "propset", "svn:executable", "on", $path) == 0 or die "Failed to run 'svn propset svn:executable on $path'."; + } elsif (isGit()) { + chmod(0755, $path); + } +} + +sub scmRemoveExecutableBit($) +{ + my ($path) = @_; + + if (isSVN()) { + system("svn", "propdel", "svn:executable", $path) == 0 or die "Failed to run 'svn propdel svn:executable $path'."; + } elsif (isGit()) { + chmod(0664, $path); + } +} + +sub isGitDirectory($) +{ + my ($dir) = @_; + return system("cd $dir && git rev-parse > " . File::Spec->devnull() . " 2>&1") == 0; +} + +sub isGit() +{ + return $isGit if defined $isGit; + + $isGit = isGitDirectory("."); + return $isGit; +} + +sub gitBranch() +{ + unless (defined $gitBranch) { + chomp($gitBranch = `git symbolic-ref -q HEAD`); + $gitBranch = "" if exitStatus($?); + $gitBranch =~ s#^refs/heads/##; + $gitBranch = "" if $gitBranch eq "master"; + } + + return $gitBranch; +} + +sub isGitBranchBuild() +{ + my $branch = gitBranch(); + chomp(my $override = `git config --bool branch.$branch.webKitBranchBuild`); + return 1 if $override eq "true"; + return 0 if $override eq "false"; + + unless (defined $isGitBranchBuild) { + chomp(my $gitBranchBuild = `git config --bool core.webKitBranchBuild`); + $isGitBranchBuild = $gitBranchBuild eq "true"; + } + + return $isGitBranchBuild; +} + +sub isSVNDirectory($) +{ + my ($dir) = @_; + + return -d File::Spec->catdir($dir, ".svn"); +} + +sub isSVN() +{ + return $isSVN if defined $isSVN; + + $isSVN = isSVNDirectory("."); + return $isSVN; +} + +sub svnVersion() +{ + return $svnVersion if defined $svnVersion; + + if (!isSVN()) { + $svnVersion = 0; + } else { + chomp($svnVersion = `svn --version --quiet`); + } + return $svnVersion; +} + +sub isSVNVersion16OrNewer() +{ + my $version = svnVersion(); + return eval "v$version" ge v1.6; +} + +sub chdirReturningRelativePath($) +{ + my ($directory) = @_; + my $previousDirectory = Cwd::getcwd(); + chdir $directory; + my $newDirectory = Cwd::getcwd(); + return "." if $newDirectory eq $previousDirectory; + return File::Spec->abs2rel($previousDirectory, $newDirectory); +} + +sub determineGitRoot() +{ + chomp(my $gitDir = `git rev-parse --git-dir`); + return dirname($gitDir); +} + +sub determineSVNRoot() +{ + my $last = ''; + my $path = '.'; + my $parent = '..'; + my $repositoryRoot; + my $repositoryUUID; + while (1) { + my $thisRoot; + my $thisUUID; + # Ignore error messages in case we've run past the root of the checkout. + open INFO, "svn info '$path' 2> " . File::Spec->devnull() . " |" or die; + while (<INFO>) { + if (/^Repository Root: (.+)/) { + $thisRoot = $1; + } + if (/^Repository UUID: (.+)/) { + $thisUUID = $1; + } + if ($thisRoot && $thisUUID) { + local $/ = undef; + <INFO>; # Consume the rest of the input. + } + } + close INFO; + + # It's possible (e.g. for developers of some ports) to have a WebKit + # checkout in a subdirectory of another checkout. So abort if the + # repository root or the repository UUID suddenly changes. + last if !$thisUUID; + $repositoryUUID = $thisUUID if !$repositoryUUID; + last if $thisUUID ne $repositoryUUID; + + last if !$thisRoot; + $repositoryRoot = $thisRoot if !$repositoryRoot; + last if $thisRoot ne $repositoryRoot; + + $last = $path; + $path = File::Spec->catdir($parent, $path); + } + + return File::Spec->rel2abs($last); +} + +sub determineVCSRoot() +{ + if (isGit()) { + return determineGitRoot(); + } + + if (!isSVN()) { + # Some users have a workflow where svn-create-patch, svn-apply and + # svn-unapply are used outside of multiple svn working directores, + # so warn the user and assume Subversion is being used in this case. + warn "Unable to determine VCS root; assuming Subversion"; + $isSVN = 1; + } + + return determineSVNRoot(); +} + +sub svnRevisionForDirectory($) +{ + my ($dir) = @_; + my $revision; + + if (isSVNDirectory($dir)) { + my $svnInfo = `LC_ALL=C svn info $dir | grep Revision:`; + ($revision) = ($svnInfo =~ m/Revision: (\d+).*/g); + } elsif (isGitDirectory($dir)) { + my $gitLog = `cd $dir && LC_ALL=C git log --grep='git-svn-id: ' -n 1 | grep git-svn-id:`; + ($revision) = ($gitLog =~ m/ +git-svn-id: .+@(\d+) /g); + } + die "Unable to determine current SVN revision in $dir" unless (defined $revision); + return $revision; +} + +sub pathRelativeToSVNRepositoryRootForPath($) +{ + my ($file) = @_; + my $relativePath = File::Spec->abs2rel($file); + + my $svnInfo; + if (isSVN()) { + $svnInfo = `LC_ALL=C svn info $relativePath`; + } elsif (isGit()) { + $svnInfo = `LC_ALL=C git svn info $relativePath`; + } + + $svnInfo =~ /.*^URL: (.*?)$/m; + my $svnURL = $1; + + $svnInfo =~ /.*^Repository Root: (.*?)$/m; + my $repositoryRoot = $1; + + $svnURL =~ s/$repositoryRoot\///; + return $svnURL; +} + +sub makeFilePathRelative($) +{ + my ($path) = @_; + return $path unless isGit(); + + unless (defined $gitRoot) { + chomp($gitRoot = `git rev-parse --show-cdup`); + } + return $gitRoot . $path; +} + +sub normalizePath($) +{ + my ($path) = @_; + $path =~ s/\\/\//g; + return $path; +} + +sub canonicalizePath($) +{ + my ($file) = @_; + + # Remove extra slashes and '.' directories in path + $file = File::Spec->canonpath($file); + + # Remove '..' directories in path + my @dirs = (); + foreach my $dir (File::Spec->splitdir($file)) { + if ($dir eq '..' && $#dirs >= 0 && $dirs[$#dirs] ne '..') { + pop(@dirs); + } else { + push(@dirs, $dir); + } + } + return ($#dirs >= 0) ? File::Spec->catdir(@dirs) : "."; +} + +sub removeEOL($) +{ + my ($line) = @_; + return "" unless $line; + + $line =~ s/[\r\n]+$//g; + return $line; +} + +sub svnStatus($) +{ + my ($fullPath) = @_; + my $svnStatus; + open SVN, "svn status --non-interactive --non-recursive '$fullPath' |" or die; + if (-d $fullPath) { + # When running "svn stat" on a directory, we can't assume that only one + # status will be returned (since any files with a status below the + # directory will be returned), and we can't assume that the directory will + # be first (since any files with unknown status will be listed first). + my $normalizedFullPath = File::Spec->catdir(File::Spec->splitdir($fullPath)); + while (<SVN>) { + # Input may use a different EOL sequence than $/, so avoid chomp. + $_ = removeEOL($_); + my $normalizedStatPath = File::Spec->catdir(File::Spec->splitdir(substr($_, 7))); + if ($normalizedFullPath eq $normalizedStatPath) { + $svnStatus = "$_\n"; + last; + } + } + # Read the rest of the svn command output to avoid a broken pipe warning. + local $/ = undef; + <SVN>; + } + else { + # Files will have only one status returned. + $svnStatus = removeEOL(<SVN>) . "\n"; + } + close SVN; + return $svnStatus; +} + +# Return whether the given file mode is executable in the source control +# sense. We make this determination based on whether the executable bit +# is set for "others" rather than the stronger condition that it be set +# for the user, group, and others. This is sufficient for distinguishing +# the default behavior in Git and SVN. +# +# Args: +# $fileMode: A number or string representing a file mode in octal notation. +sub isExecutable($) +{ + my $fileMode = shift; + + return $fileMode % 2; +} + +# Parse the next Git diff header from the given file handle, and advance +# the handle so the last line read is the first line after the header. +# +# This subroutine dies if given leading junk. +# +# Args: +# $fileHandle: advanced so the last line read from the handle is the first +# line of the header to parse. This should be a line +# beginning with "diff --git". +# $line: the line last read from $fileHandle +# +# Returns ($headerHashRef, $lastReadLine): +# $headerHashRef: a hash reference representing a diff header, as follows-- +# copiedFromPath: the path from which the file was copied or moved if +# the diff is a copy or move. +# executableBitDelta: the value 1 or -1 if the executable bit was added or +# removed, respectively. New and deleted files have +# this value only if the file is executable, in which +# case the value is 1 and -1, respectively. +# indexPath: the path of the target file. +# isBinary: the value 1 if the diff is for a binary file. +# isDeletion: the value 1 if the diff is a file deletion. +# isCopyWithChanges: the value 1 if the file was copied or moved and +# the target file was changed in some way after being +# copied or moved (e.g. if its contents or executable +# bit were changed). +# isNew: the value 1 if the diff is for a new file. +# shouldDeleteSource: the value 1 if the file was copied or moved and +# the source file was deleted -- i.e. if the copy +# was actually a move. +# svnConvertedText: the header text with some lines converted to SVN +# format. Git-specific lines are preserved. +# $lastReadLine: the line last read from $fileHandle. +sub parseGitDiffHeader($$) +{ + my ($fileHandle, $line) = @_; + + $_ = $line; + + my $indexPath; + if (/$gitDiffStartRegEx/) { + # The first and second paths can differ in the case of copies + # and renames. We use the second file path because it is the + # destination path. + $indexPath = $4; + # Use $POSTMATCH to preserve the end-of-line character. + $_ = "Index: $indexPath$POSTMATCH"; # Convert to SVN format. + } else { + die("Could not parse leading \"diff --git\" line: \"$line\"."); + } + + my $copiedFromPath; + my $foundHeaderEnding; + my $isBinary; + my $isDeletion; + my $isNew; + my $newExecutableBit = 0; + my $oldExecutableBit = 0; + my $shouldDeleteSource = 0; + my $similarityIndex = 0; + my $svnConvertedText; + while (1) { + # Temporarily strip off any end-of-line characters to simplify + # regex matching below. + s/([\n\r]+)$//; + my $eol = $1; + + if (/^(deleted file|old) mode (\d+)/) { + $oldExecutableBit = (isExecutable($2) ? 1 : 0); + $isDeletion = 1 if $1 eq "deleted file"; + } elsif (/^new( file)? mode (\d+)/) { + $newExecutableBit = (isExecutable($2) ? 1 : 0); + $isNew = 1 if $1; + } elsif (/^similarity index (\d+)%/) { + $similarityIndex = $1; + } elsif (/^copy from (\S+)/) { + $copiedFromPath = $1; + } elsif (/^rename from (\S+)/) { + # FIXME: Record this as a move rather than as a copy-and-delete. + # This will simplify adding rename support to svn-unapply. + # Otherwise, the hash for a deletion would have to know + # everything about the file being deleted in order to + # support undoing itself. Recording as a move will also + # permit us to use "svn move" and "git move". + $copiedFromPath = $1; + $shouldDeleteSource = 1; + } elsif (/^--- \S+/) { + $_ = "--- $indexPath"; # Convert to SVN format. + } elsif (/^\+\+\+ \S+/) { + $_ = "+++ $indexPath"; # Convert to SVN format. + $foundHeaderEnding = 1; + } elsif (/^GIT binary patch$/ ) { + $isBinary = 1; + $foundHeaderEnding = 1; + # The "git diff" command includes a line of the form "Binary files + # <path1> and <path2> differ" if the --binary flag is not used. + } elsif (/^Binary files / ) { + die("Error: the Git diff contains a binary file without the binary data in ". + "line: \"$_\". Be sure to use the --binary flag when invoking \"git diff\" ". + "with diffs containing binary files."); + } + + $svnConvertedText .= "$_$eol"; # Also restore end-of-line characters. + + $_ = <$fileHandle>; # Not defined if end-of-file reached. + + last if (!defined($_) || /$gitDiffStartRegEx/ || $foundHeaderEnding); + } + + my $executableBitDelta = $newExecutableBit - $oldExecutableBit; + + my %header; + + $header{copiedFromPath} = $copiedFromPath if $copiedFromPath; + $header{executableBitDelta} = $executableBitDelta if $executableBitDelta; + $header{indexPath} = $indexPath; + $header{isBinary} = $isBinary if $isBinary; + $header{isCopyWithChanges} = 1 if ($copiedFromPath && ($similarityIndex != 100 || $executableBitDelta)); + $header{isDeletion} = $isDeletion if $isDeletion; + $header{isNew} = $isNew if $isNew; + $header{shouldDeleteSource} = $shouldDeleteSource if $shouldDeleteSource; + $header{svnConvertedText} = $svnConvertedText; + + return (\%header, $_); +} + +# Parse the next SVN diff header from the given file handle, and advance +# the handle so the last line read is the first line after the header. +# +# This subroutine dies if given leading junk or if it could not detect +# the end of the header block. +# +# Args: +# $fileHandle: advanced so the last line read from the handle is the first +# line of the header to parse. This should be a line +# beginning with "Index:". +# $line: the line last read from $fileHandle +# +# Returns ($headerHashRef, $lastReadLine): +# $headerHashRef: a hash reference representing a diff header, as follows-- +# copiedFromPath: the path from which the file was copied if the diff +# is a copy. +# indexPath: the path of the target file, which is the path found in +# the "Index:" line. +# isBinary: the value 1 if the diff is for a binary file. +# isNew: the value 1 if the diff is for a new file. +# sourceRevision: the revision number of the source, if it exists. This +# is the same as the revision number the file was copied +# from, in the case of a file copy. +# svnConvertedText: the header text converted to a header with the paths +# in some lines corrected. +# $lastReadLine: the line last read from $fileHandle. +sub parseSvnDiffHeader($$) +{ + my ($fileHandle, $line) = @_; + + $_ = $line; + + my $indexPath; + if (/$svnDiffStartRegEx/) { + $indexPath = $1; + } else { + die("First line of SVN diff does not begin with \"Index \": \"$_\""); + } + + my $copiedFromPath; + my $foundHeaderEnding; + my $isBinary; + my $isNew; + my $sourceRevision; + my $svnConvertedText; + while (1) { + # Temporarily strip off any end-of-line characters to simplify + # regex matching below. + s/([\n\r]+)$//; + my $eol = $1; + + # Fix paths on ""---" and "+++" lines to match the leading + # index line. + if (s/^--- \S+/--- $indexPath/) { + # --- + if (/^--- .+\(revision (\d+)\)/) { + $sourceRevision = $1; + $isNew = 1 if !$sourceRevision; # if revision 0. + if (/\(from (\S+):(\d+)\)$/) { + # The "from" clause is created by svn-create-patch, in + # which case there is always also a "revision" clause. + $copiedFromPath = $1; + die("Revision number \"$2\" in \"from\" clause does not match " . + "source revision number \"$sourceRevision\".") if ($2 != $sourceRevision); + } + } + } elsif (s/^\+\+\+ \S+/+++ $indexPath/) { + $foundHeaderEnding = 1; + } elsif (/^Cannot display: file marked as a binary type.$/) { + $isBinary = 1; + $foundHeaderEnding = 1; + } + + $svnConvertedText .= "$_$eol"; # Also restore end-of-line characters. + + $_ = <$fileHandle>; # Not defined if end-of-file reached. + + last if (!defined($_) || /$svnDiffStartRegEx/ || $foundHeaderEnding); + } + + if (!$foundHeaderEnding) { + die("Did not find end of header block corresponding to index path \"$indexPath\"."); + } + + my %header; + + $header{copiedFromPath} = $copiedFromPath if $copiedFromPath; + $header{indexPath} = $indexPath; + $header{isBinary} = $isBinary if $isBinary; + $header{isNew} = $isNew if $isNew; + $header{sourceRevision} = $sourceRevision if $sourceRevision; + $header{svnConvertedText} = $svnConvertedText; + + return (\%header, $_); +} + +# Parse the next diff header from the given file handle, and advance +# the handle so the last line read is the first line after the header. +# +# This subroutine dies if given leading junk or if it could not detect +# the end of the header block. +# +# Args: +# $fileHandle: advanced so the last line read from the handle is the first +# line of the header to parse. For SVN-formatted diffs, this +# is a line beginning with "Index:". For Git, this is a line +# beginning with "diff --git". +# $line: the line last read from $fileHandle +# +# Returns ($headerHashRef, $lastReadLine): +# $headerHashRef: a hash reference representing a diff header +# copiedFromPath: the path from which the file was copied if the diff +# is a copy. +# executableBitDelta: the value 1 or -1 if the executable bit was added or +# removed, respectively. New and deleted files have +# this value only if the file is executable, in which +# case the value is 1 and -1, respectively. +# indexPath: the path of the target file. +# isBinary: the value 1 if the diff is for a binary file. +# isGit: the value 1 if the diff is Git-formatted. +# isSvn: the value 1 if the diff is SVN-formatted. +# sourceRevision: the revision number of the source, if it exists. This +# is the same as the revision number the file was copied +# from, in the case of a file copy. +# svnConvertedText: the header text with some lines converted to SVN +# format. Git-specific lines are preserved. +# $lastReadLine: the line last read from $fileHandle. +sub parseDiffHeader($$) +{ + my ($fileHandle, $line) = @_; + + my $header; # This is a hash ref. + my $isGit; + my $isSvn; + my $lastReadLine; + + if ($line =~ $svnDiffStartRegEx) { + $isSvn = 1; + ($header, $lastReadLine) = parseSvnDiffHeader($fileHandle, $line); + } elsif ($line =~ $gitDiffStartRegEx) { + $isGit = 1; + ($header, $lastReadLine) = parseGitDiffHeader($fileHandle, $line); + } else { + die("First line of diff does not begin with \"Index:\" or \"diff --git\": \"$line\""); + } + + $header->{isGit} = $isGit if $isGit; + $header->{isSvn} = $isSvn if $isSvn; + + return ($header, $lastReadLine); +} + +# FIXME: The %diffHash "object" should not have an svnConvertedText property. +# Instead, the hash object should store its information in a +# structured way as properties. This should be done in a way so +# that, if necessary, the text of an SVN or Git patch can be +# reconstructed from the information in those hash properties. +# +# A %diffHash is a hash representing a source control diff of a single +# file operation (e.g. a file modification, copy, or delete). +# +# These hashes appear, for example, in the parseDiff(), parsePatch(), +# and prepareParsedPatch() subroutines of this package. +# +# The corresponding values are-- +# +# copiedFromPath: the path from which the file was copied if the diff +# is a copy. +# executableBitDelta: the value 1 or -1 if the executable bit was added or +# removed from the target file, respectively. +# indexPath: the path of the target file. For SVN-formatted diffs, +# this is the same as the path in the "Index:" line. +# isBinary: the value 1 if the diff is for a binary file. +# isDeletion: the value 1 if the diff is known from the header to be a deletion. +# isGit: the value 1 if the diff is Git-formatted. +# isNew: the value 1 if the dif is known from the header to be a new file. +# isSvn: the value 1 if the diff is SVN-formatted. +# sourceRevision: the revision number of the source, if it exists. This +# is the same as the revision number the file was copied +# from, in the case of a file copy. +# svnConvertedText: the diff with some lines converted to SVN format. +# Git-specific lines are preserved. + +# Parse one diff from a patch file created by svn-create-patch, and +# advance the file handle so the last line read is the first line +# of the next header block. +# +# This subroutine preserves any leading junk encountered before the header. +# +# Composition of an SVN diff +# +# There are three parts to an SVN diff: the header, the property change, and +# the binary contents, in that order. Either the header or the property change +# may be ommitted, but not both. If there are binary changes, then you always +# have all three. +# +# Args: +# $fileHandle: a file handle advanced to the first line of the next +# header block. Leading junk is okay. +# $line: the line last read from $fileHandle. +# +# Returns ($diffHashRefs, $lastReadLine): +# $diffHashRefs: A reference to an array of references to %diffHash hashes. +# See the %diffHash documentation above. +# $lastReadLine: the line last read from $fileHandle +sub parseDiff($$) +{ + # FIXME: Adjust this method so that it dies if the first line does not + # match the start of a diff. This will require a change to + # parsePatch() so that parsePatch() skips over leading junk. + my ($fileHandle, $line) = @_; + + my $headerStartRegEx = $svnDiffStartRegEx; # SVN-style header for the default + + my $headerHashRef; # Last header found, as returned by parseDiffHeader(). + my $svnPropertiesHashRef; # Last SVN properties diff found, as returned by parseSvnDiffProperties(). + my $svnText; + while (defined($line)) { + if (!$headerHashRef && ($line =~ $gitDiffStartRegEx)) { + # Then assume all diffs in the patch are Git-formatted. This + # block was made to be enterable at most once since we assume + # all diffs in the patch are formatted the same (SVN or Git). + $headerStartRegEx = $gitDiffStartRegEx; + } + + if ($line =~ $svnPropertiesStartRegEx) { + my $propertyPath = $1; + if ($svnPropertiesHashRef || $headerHashRef && ($propertyPath ne $headerHashRef->{indexPath})) { + # This is the start of the second diff in the while loop, which happens to + # be a property diff. If $svnPropertiesHasRef is defined, then this is the + # second consecutive property diff, otherwise it's the start of a property + # diff for a file that only has property changes. + last; + } + ($svnPropertiesHashRef, $line) = parseSvnDiffProperties($fileHandle, $line); + next; + } + if ($line !~ $headerStartRegEx) { + # Then we are in the body of the diff. + $svnText .= $line; + $line = <$fileHandle>; + next; + } # Otherwise, we found a diff header. + + if ($svnPropertiesHashRef || $headerHashRef) { + # Then either we just processed an SVN property change or this + # is the start of the second diff header of this while loop. + last; + } + + ($headerHashRef, $line) = parseDiffHeader($fileHandle, $line); + + $svnText .= $headerHashRef->{svnConvertedText}; + } + + my @diffHashRefs; + + if ($headerHashRef->{shouldDeleteSource}) { + my %deletionHash; + $deletionHash{indexPath} = $headerHashRef->{copiedFromPath}; + $deletionHash{isDeletion} = 1; + push @diffHashRefs, \%deletionHash; + } + if ($headerHashRef->{copiedFromPath}) { + my %copyHash; + $copyHash{copiedFromPath} = $headerHashRef->{copiedFromPath}; + $copyHash{indexPath} = $headerHashRef->{indexPath}; + $copyHash{sourceRevision} = $headerHashRef->{sourceRevision} if $headerHashRef->{sourceRevision}; + if ($headerHashRef->{isSvn}) { + $copyHash{executableBitDelta} = $svnPropertiesHashRef->{executableBitDelta} if $svnPropertiesHashRef->{executableBitDelta}; + } + push @diffHashRefs, \%copyHash; + } + + # Note, the order of evaluation for the following if conditional has been explicitly chosen so that + # it evaluates to false when there is no headerHashRef (e.g. a property change diff for a file that + # only has property changes). + if ($headerHashRef->{isCopyWithChanges} || (%$headerHashRef && !$headerHashRef->{copiedFromPath})) { + # Then add the usual file modification. + my %diffHash; + # FIXME: We should expand this code to support other properties. In the future, + # parseSvnDiffProperties may return a hash whose keys are the properties. + if ($headerHashRef->{isSvn}) { + # SVN records the change to the executable bit in a separate property change diff + # that follows the contents of the diff, except for binary diffs. For binary + # diffs, the property change diff follows the diff header. + $diffHash{executableBitDelta} = $svnPropertiesHashRef->{executableBitDelta} if $svnPropertiesHashRef->{executableBitDelta}; + } elsif ($headerHashRef->{isGit}) { + # Git records the change to the executable bit in the header of a diff. + $diffHash{executableBitDelta} = $headerHashRef->{executableBitDelta} if $headerHashRef->{executableBitDelta}; + } + $diffHash{indexPath} = $headerHashRef->{indexPath}; + $diffHash{isBinary} = $headerHashRef->{isBinary} if $headerHashRef->{isBinary}; + $diffHash{isDeletion} = $headerHashRef->{isDeletion} if $headerHashRef->{isDeletion}; + $diffHash{isGit} = $headerHashRef->{isGit} if $headerHashRef->{isGit}; + $diffHash{isNew} = $headerHashRef->{isNew} if $headerHashRef->{isNew}; + $diffHash{isSvn} = $headerHashRef->{isSvn} if $headerHashRef->{isSvn}; + if (!$headerHashRef->{copiedFromPath}) { + # If the file was copied, then we have already incorporated the + # sourceRevision information into the change. + $diffHash{sourceRevision} = $headerHashRef->{sourceRevision} if $headerHashRef->{sourceRevision}; + } + # FIXME: Remove the need for svnConvertedText. See the %diffHash + # code comments above for more information. + # + # Note, we may not always have SVN converted text since we intend + # to deprecate it in the future. For example, a property change + # diff for a file that only has property changes will not return + # any SVN converted text. + $diffHash{svnConvertedText} = $svnText if $svnText; + push @diffHashRefs, \%diffHash; + } + + if (!%$headerHashRef && $svnPropertiesHashRef) { + # A property change diff for a file that only has property changes. + my %propertyChangeHash; + $propertyChangeHash{executableBitDelta} = $svnPropertiesHashRef->{executableBitDelta} if $svnPropertiesHashRef->{executableBitDelta}; + $propertyChangeHash{indexPath} = $svnPropertiesHashRef->{propertyPath}; + $propertyChangeHash{isSvn} = 1; + push @diffHashRefs, \%propertyChangeHash; + } + + return (\@diffHashRefs, $line); +} + +# Parse an SVN property change diff from the given file handle, and advance +# the handle so the last line read is the first line after this diff. +# +# For the case of an SVN binary diff, the binary contents will follow the +# the property changes. +# +# This subroutine dies if the first line does not begin with "Property changes on" +# or if the separator line that follows this line is missing. +# +# Args: +# $fileHandle: advanced so the last line read from the handle is the first +# line of the footer to parse. This line begins with +# "Property changes on". +# $line: the line last read from $fileHandle. +# +# Returns ($propertyHashRef, $lastReadLine): +# $propertyHashRef: a hash reference representing an SVN diff footer. +# propertyPath: the path of the target file. +# executableBitDelta: the value 1 or -1 if the executable bit was added or +# removed from the target file, respectively. +# $lastReadLine: the line last read from $fileHandle. +sub parseSvnDiffProperties($$) +{ + my ($fileHandle, $line) = @_; + + $_ = $line; + + my %footer; + if (/$svnPropertiesStartRegEx/) { + $footer{propertyPath} = $1; + } else { + die("Failed to find start of SVN property change, \"Property changes on \": \"$_\""); + } + + # We advance $fileHandle two lines so that the next line that + # we process is $svnPropertyStartRegEx in a well-formed footer. + # A well-formed footer has the form: + # Property changes on: FileA + # ___________________________________________________________________ + # Added: svn:executable + # + * + $_ = <$fileHandle>; # Not defined if end-of-file reached. + my $separator = "_" x 67; + if (defined($_) && /^$separator[\r\n]+$/) { + $_ = <$fileHandle>; + } else { + die("Failed to find separator line: \"$_\"."); + } + + # FIXME: We should expand this to support other SVN properties + # (e.g. return a hash of property key-values that represents + # all properties). + # + # Notice, we keep processing until we hit end-of-file or some + # line that does not resemble $svnPropertyStartRegEx, such as + # the empty line that precedes the start of the binary contents + # of a patch, or the start of the next diff (e.g. "Index:"). + my $propertyHashRef; + while (defined($_) && /$svnPropertyStartRegEx/) { + ($propertyHashRef, $_) = parseSvnProperty($fileHandle, $_); + if ($propertyHashRef->{name} eq "svn:executable") { + # Notice, for SVN properties, propertyChangeDelta is always non-zero + # because a property can only be added or removed. + $footer{executableBitDelta} = $propertyHashRef->{propertyChangeDelta}; + } + } + + return(\%footer, $_); +} + +# Parse the next SVN property from the given file handle, and advance the handle so the last +# line read is the first line after the property. +# +# This subroutine dies if the first line is not a valid start of an SVN property, +# or the property is missing a value, or the property change type (e.g. "Added") +# does not correspond to the property value type (e.g. "+"). +# +# Args: +# $fileHandle: advanced so the last line read from the handle is the first +# line of the property to parse. This should be a line +# that matches $svnPropertyStartRegEx. +# $line: the line last read from $fileHandle. +# +# Returns ($propertyHashRef, $lastReadLine): +# $propertyHashRef: a hash reference representing a SVN property. +# name: the name of the property. +# value: the last property value. For instance, suppose the property is "Modified". +# Then it has both a '-' and '+' property value in that order. Therefore, +# the value of this key is the value of the '+' property by ordering (since +# it is the last value). +# propertyChangeDelta: the value 1 or -1 if the property was added or +# removed, respectively. +# $lastReadLine: the line last read from $fileHandle. +sub parseSvnProperty($$) +{ + my ($fileHandle, $line) = @_; + + $_ = $line; + + my $propertyName; + my $propertyChangeType; + if (/$svnPropertyStartRegEx/) { + $propertyChangeType = $1; + $propertyName = $2; + } else { + die("Failed to find SVN property: \"$_\"."); + } + + $_ = <$fileHandle>; # Not defined if end-of-file reached. + + # The "svn diff" command neither inserts newline characters between property values + # nor between successive properties. + # + # FIXME: We do not support property values that contain tailing newline characters + # as it is difficult to disambiguate these trailing newlines from the empty + # line that precedes the contents of a binary patch. + my $propertyValue; + my $propertyValueType; + while (defined($_) && /$svnPropertyValueStartRegEx/) { + # Note, a '-' property may be followed by a '+' property in the case of a "Modified" + # or "Name" property. We only care about the ending value (i.e. the '+' property) + # in such circumstances. So, we take the property value for the property to be its + # last parsed property value. + # + # FIXME: We may want to consider strictly enforcing a '-', '+' property ordering or + # add error checking to prevent '+', '+', ..., '+' and other invalid combinations. + $propertyValueType = $1; + ($propertyValue, $_) = parseSvnPropertyValue($fileHandle, $_); + } + + if (!$propertyValue) { + die("Failed to find the property value for the SVN property \"$propertyName\": \"$_\"."); + } + + my $propertyChangeDelta; + if ($propertyValueType eq "+" || $propertyValueType eq "Merged") { + $propertyChangeDelta = 1; + } elsif ($propertyValueType eq "-" || $propertyValueType eq "Reverse-merged") { + $propertyChangeDelta = -1; + } else { + die("Not reached."); + } + + # We perform a simple validation that an "Added" or "Deleted" property + # change type corresponds with a "+" and "-" value type, respectively. + my $expectedChangeDelta; + if ($propertyChangeType eq "Added") { + $expectedChangeDelta = 1; + } elsif ($propertyChangeType eq "Deleted") { + $expectedChangeDelta = -1; + } + + if ($expectedChangeDelta && $propertyChangeDelta != $expectedChangeDelta) { + die("The final property value type found \"$propertyValueType\" does not " . + "correspond to the property change type found \"$propertyChangeType\"."); + } + + my %propertyHash; + $propertyHash{name} = $propertyName; + $propertyHash{propertyChangeDelta} = $propertyChangeDelta; + $propertyHash{value} = $propertyValue; + return (\%propertyHash, $_); +} + +# Parse the value of an SVN property from the given file handle, and advance +# the handle so the last line read is the first line after the property value. +# +# This subroutine dies if the first line is an invalid SVN property value line +# (i.e. a line that does not begin with " +" or " -"). +# +# Args: +# $fileHandle: advanced so the last line read from the handle is the first +# line of the property value to parse. This should be a line +# beginning with " +" or " -". +# $line: the line last read from $fileHandle. +# +# Returns ($propertyValue, $lastReadLine): +# $propertyValue: the value of the property. +# $lastReadLine: the line last read from $fileHandle. +sub parseSvnPropertyValue($$) +{ + my ($fileHandle, $line) = @_; + + $_ = $line; + + my $propertyValue; + my $eol; + if (/$svnPropertyValueStartRegEx/) { + $propertyValue = $2; # Does not include the end-of-line character(s). + $eol = $POSTMATCH; + } else { + die("Failed to find property value beginning with '+', '-', 'Merged', or 'Reverse-merged': \"$_\"."); + } + + while (<$fileHandle>) { + if (/^[\r\n]+$/ || /$svnPropertyValueStartRegEx/ || /$svnPropertyStartRegEx/) { + # Note, we may encounter an empty line before the contents of a binary patch. + # Also, we check for $svnPropertyValueStartRegEx because a '-' property may be + # followed by a '+' property in the case of a "Modified" or "Name" property. + # We check for $svnPropertyStartRegEx because it indicates the start of the + # next property to parse. + last; + } + + # Temporarily strip off any end-of-line characters. We add the end-of-line characters + # from the previously processed line to the start of this line so that the last line + # of the property value does not end in end-of-line characters. + s/([\n\r]+)$//; + $propertyValue .= "$eol$_"; + $eol = $1; + } + + return ($propertyValue, $_); +} + +# Parse a patch file created by svn-create-patch. +# +# Args: +# $fileHandle: A file handle to the patch file that has not yet been +# read from. +# +# Returns: +# @diffHashRefs: an array of diff hash references. +# See the %diffHash documentation above. +sub parsePatch($) +{ + my ($fileHandle) = @_; + + my $newDiffHashRefs; + my @diffHashRefs; # return value + + my $line = <$fileHandle>; + + while (defined($line)) { # Otherwise, at EOF. + + ($newDiffHashRefs, $line) = parseDiff($fileHandle, $line); + + push @diffHashRefs, @$newDiffHashRefs; + } + + return @diffHashRefs; +} + +# Prepare the results of parsePatch() for use in svn-apply and svn-unapply. +# +# Args: +# $shouldForce: Whether to continue processing if an unexpected +# state occurs. +# @diffHashRefs: An array of references to %diffHashes. +# See the %diffHash documentation above. +# +# Returns $preparedPatchHashRef: +# copyDiffHashRefs: A reference to an array of the $diffHashRefs in +# @diffHashRefs that represent file copies. The original +# ordering is preserved. +# nonCopyDiffHashRefs: A reference to an array of the $diffHashRefs in +# @diffHashRefs that do not represent file copies. +# The original ordering is preserved. +# sourceRevisionHash: A reference to a hash of source path to source +# revision number. +sub prepareParsedPatch($@) +{ + my ($shouldForce, @diffHashRefs) = @_; + + my %copiedFiles; + + # Return values + my @copyDiffHashRefs = (); + my @nonCopyDiffHashRefs = (); + my %sourceRevisionHash = (); + for my $diffHashRef (@diffHashRefs) { + my $copiedFromPath = $diffHashRef->{copiedFromPath}; + my $indexPath = $diffHashRef->{indexPath}; + my $sourceRevision = $diffHashRef->{sourceRevision}; + my $sourcePath; + + if (defined($copiedFromPath)) { + # Then the diff is a copy operation. + $sourcePath = $copiedFromPath; + + # FIXME: Consider printing a warning or exiting if + # exists($copiedFiles{$indexPath}) is true -- i.e. if + # $indexPath appears twice as a copy target. + $copiedFiles{$indexPath} = $sourcePath; + + push @copyDiffHashRefs, $diffHashRef; + } else { + # Then the diff is not a copy operation. + $sourcePath = $indexPath; + + push @nonCopyDiffHashRefs, $diffHashRef; + } + + if (defined($sourceRevision)) { + if (exists($sourceRevisionHash{$sourcePath}) && + ($sourceRevisionHash{$sourcePath} != $sourceRevision)) { + if (!$shouldForce) { + die "Two revisions of the same file required as a source:\n". + " $sourcePath:$sourceRevisionHash{$sourcePath}\n". + " $sourcePath:$sourceRevision"; + } + } + $sourceRevisionHash{$sourcePath} = $sourceRevision; + } + } + + my %preparedPatchHash; + + $preparedPatchHash{copyDiffHashRefs} = \@copyDiffHashRefs; + $preparedPatchHash{nonCopyDiffHashRefs} = \@nonCopyDiffHashRefs; + $preparedPatchHash{sourceRevisionHash} = \%sourceRevisionHash; + + return \%preparedPatchHash; +} + +# Return localtime() for the project's time zone, given an integer time as +# returned by Perl's time() function. +sub localTimeInProjectTimeZone($) +{ + my $epochTime = shift; + + # Change the time zone temporarily for the localtime() call. + my $savedTimeZone = $ENV{'TZ'}; + $ENV{'TZ'} = $changeLogTimeZone; + my @localTime = localtime($epochTime); + if (defined $savedTimeZone) { + $ENV{'TZ'} = $savedTimeZone; + } else { + delete $ENV{'TZ'}; + } + + return @localTime; +} + +# Set the reviewer and date in a ChangeLog patch, and return the new patch. +# +# Args: +# $patch: a ChangeLog patch as a string. +# $reviewer: the name of the reviewer, or undef if the reviewer should not be set. +# $epochTime: an integer time as returned by Perl's time() function. +sub setChangeLogDateAndReviewer($$$) +{ + my ($patch, $reviewer, $epochTime) = @_; + + my @localTime = localTimeInProjectTimeZone($epochTime); + my $newDate = strftime("%Y-%m-%d", @localTime); + + my $firstChangeLogLineRegEx = qr#(\n\+)\d{4}-[^-]{2}-[^-]{2}( )#; + $patch =~ s/$firstChangeLogLineRegEx/$1$newDate$2/; + + if (defined($reviewer)) { + # We include a leading plus ("+") in the regular expression to make + # the regular expression less likely to match text in the leading junk + # for the patch, if the patch has leading junk. + $patch =~ s/(\n\+.*)NOBODY \(OOPS!\)/$1$reviewer/; + } + + return $patch; +} + +# If possible, returns a ChangeLog patch equivalent to the given one, +# but with the newest ChangeLog entry inserted at the top of the +# file -- i.e. no leading context and all lines starting with "+". +# +# If given a patch string not representable as a patch with the above +# properties, it returns the input back unchanged. +# +# WARNING: This subroutine can return an inequivalent patch string if +# both the beginning of the new ChangeLog file matches the beginning +# of the source ChangeLog, and the source beginning was modified. +# Otherwise, it is guaranteed to return an equivalent patch string, +# if it returns. +# +# Applying this subroutine to ChangeLog patches allows svn-apply to +# insert new ChangeLog entries at the top of the ChangeLog file. +# svn-apply uses patch with --fuzz=3 to do this. We need to apply +# this subroutine because the diff(1) command is greedy when matching +# lines. A new ChangeLog entry with the same date and author as the +# previous will match and cause the diff to have lines of starting +# context. +# +# This subroutine has unit tests in VCSUtils_unittest.pl. +# +# Returns $changeLogHashRef: +# $changeLogHashRef: a hash reference representing a change log patch. +# patch: a ChangeLog patch equivalent to the given one, but with the +# newest ChangeLog entry inserted at the top of the file, if possible. +# hasOverlappingLines: the value 1 if the change log entry overlaps +# some lines of another change log entry. This can +# happen when deliberately inserting a new ChangeLog +# entry earlier in the file above an entry with +# the same date and author. +sub fixChangeLogPatch($) +{ + my $patch = shift; # $patch will only contain patch fragments for ChangeLog. + + $patch =~ /(\r?\n)/; + my $lineEnding = $1; + my @lines = split(/$lineEnding/, $patch); + + my $i = 0; # We reuse the same index throughout. + + # Skip to beginning of first chunk. + for (; $i < @lines; ++$i) { + if (substr($lines[$i], 0, 1) eq "@") { + last; + } + } + my $chunkStartIndex = ++$i; + my %changeLogHashRef; + + # Optimization: do not process if new lines already begin the chunk. + if (substr($lines[$i], 0, 1) eq "+") { + $changeLogHashRef{patch} = $patch; + return \%changeLogHashRef; + } + + # Skip to first line of newly added ChangeLog entry. + # For example, +2009-06-03 Eric Seidel <eric@webkit.org> + my $dateStartRegEx = '^\+(\d{4}-\d{2}-\d{2})' # leading "+" and date + . '\s+(.+)\s+' # name + . '<([^<>]+)>$'; # e-mail address + + for (; $i < @lines; ++$i) { + my $line = $lines[$i]; + my $firstChar = substr($line, 0, 1); + if ($line =~ /$dateStartRegEx/) { + last; + } elsif ($firstChar eq " " or $firstChar eq "+") { + next; + } + $changeLogHashRef{patch} = $patch; # Do not change if, for example, "-" or "@" found. + return \%changeLogHashRef; + } + if ($i >= @lines) { + $changeLogHashRef{patch} = $patch; # Do not change if date not found. + return \%changeLogHashRef; + } + my $dateStartIndex = $i; + + # Rewrite overlapping lines to lead with " ". + my @overlappingLines = (); # These will include a leading "+". + for (; $i < @lines; ++$i) { + my $line = $lines[$i]; + if (substr($line, 0, 1) ne "+") { + last; + } + push(@overlappingLines, $line); + $lines[$i] = " " . substr($line, 1); + } + + # Remove excess ending context, if necessary. + my $shouldTrimContext = 1; + for (; $i < @lines; ++$i) { + my $firstChar = substr($lines[$i], 0, 1); + if ($firstChar eq " ") { + next; + } elsif ($firstChar eq "@") { + last; + } + $shouldTrimContext = 0; # For example, if "+" or "-" encountered. + last; + } + my $deletedLineCount = 0; + if ($shouldTrimContext) { # Also occurs if end of file reached. + splice(@lines, $i - @overlappingLines, @overlappingLines); + $deletedLineCount = @overlappingLines; + } + + # Work backwards, shifting overlapping lines towards front + # while checking that patch stays equivalent. + for ($i = $dateStartIndex - 1; @overlappingLines && $i >= $chunkStartIndex; --$i) { + my $line = $lines[$i]; + if (substr($line, 0, 1) ne " ") { + next; + } + my $text = substr($line, 1); + my $newLine = pop(@overlappingLines); + if ($text ne substr($newLine, 1)) { + $changeLogHashRef{patch} = $patch; # Unexpected difference. + return \%changeLogHashRef; + } + $lines[$i] = "+$text"; + } + + # Finish moving whatever overlapping lines remain, and update + # the initial chunk range. + my $chunkRangeRegEx = '^\@\@ -(\d+),(\d+) \+\d+,(\d+) \@\@$'; # e.g. @@ -2,6 +2,18 @@ + if ($lines[$chunkStartIndex - 1] !~ /$chunkRangeRegEx/) { + # FIXME: Handle errors differently from ChangeLog files that + # are okay but should not be altered. That way we can find out + # if improvements to the script ever become necessary. + $changeLogHashRef{patch} = $patch; # Error: unexpected patch string format. + return \%changeLogHashRef; + } + my $skippedFirstLineCount = $1 - 1; + my $oldSourceLineCount = $2; + my $oldTargetLineCount = $3; + + if (@overlappingLines != $skippedFirstLineCount) { + # This can happen, for example, when deliberately inserting + # a new ChangeLog entry earlier in the file. + $changeLogHashRef{hasOverlappingLines} = 1; + $changeLogHashRef{patch} = $patch; + return \%changeLogHashRef; + } + # If @overlappingLines > 0, this is where we make use of the + # assumption that the beginning of the source file was not modified. + splice(@lines, $chunkStartIndex, 0, @overlappingLines); + + my $sourceLineCount = $oldSourceLineCount + @overlappingLines - $deletedLineCount; + my $targetLineCount = $oldTargetLineCount + @overlappingLines - $deletedLineCount; + $lines[$chunkStartIndex - 1] = "@@ -1,$sourceLineCount +1,$targetLineCount @@"; + + $changeLogHashRef{patch} = join($lineEnding, @lines) . "\n"; # patch(1) expects an extra trailing newline. + return \%changeLogHashRef; +} + +# This is a supporting method for runPatchCommand. +# +# Arg: the optional $args parameter passed to runPatchCommand (can be undefined). +# +# Returns ($patchCommand, $isForcing). +# +# This subroutine has unit tests in VCSUtils_unittest.pl. +sub generatePatchCommand($) +{ + my ($passedArgsHashRef) = @_; + + my $argsHashRef = { # Defaults + ensureForce => 0, + shouldReverse => 0, + options => [] + }; + + # Merges hash references. It's okay here if passed hash reference is undefined. + @{$argsHashRef}{keys %{$passedArgsHashRef}} = values %{$passedArgsHashRef}; + + my $ensureForce = $argsHashRef->{ensureForce}; + my $shouldReverse = $argsHashRef->{shouldReverse}; + my $options = $argsHashRef->{options}; + + if (! $options) { + $options = []; + } else { + $options = [@{$options}]; # Copy to avoid side effects. + } + + my $isForcing = 0; + if (grep /^--force$/, @{$options}) { + $isForcing = 1; + } elsif ($ensureForce) { + push @{$options}, "--force"; + $isForcing = 1; + } + + if ($shouldReverse) { # No check: --reverse should never be passed explicitly. + push @{$options}, "--reverse"; + } + + @{$options} = sort(@{$options}); # For easier testing. + + my $patchCommand = join(" ", "patch -p0", @{$options}); + + return ($patchCommand, $isForcing); +} + +# Apply the given patch using the patch(1) command. +# +# On success, return the resulting exit status. Otherwise, exit with the +# exit status. If "--force" is passed as an option, however, then never +# exit and always return the exit status. +# +# Args: +# $patch: a patch string. +# $repositoryRootPath: an absolute path to the repository root. +# $pathRelativeToRoot: the path of the file to be patched, relative to the +# repository root. This should normally be the path +# found in the patch's "Index:" line. It is passed +# explicitly rather than reparsed from the patch +# string for optimization purposes. +# This is used only for error reporting. The +# patch command gleans the actual file to patch +# from the patch string. +# $args: a reference to a hash of optional arguments. The possible +# keys are -- +# ensureForce: whether to ensure --force is passed (defaults to 0). +# shouldReverse: whether to pass --reverse (defaults to 0). +# options: a reference to an array of options to pass to the +# patch command. The subroutine passes the -p0 option +# no matter what. This should not include --reverse. +# +# This subroutine has unit tests in VCSUtils_unittest.pl. +sub runPatchCommand($$$;$) +{ + my ($patch, $repositoryRootPath, $pathRelativeToRoot, $args) = @_; + + my ($patchCommand, $isForcing) = generatePatchCommand($args); + + # Temporarily change the working directory since the path found + # in the patch's "Index:" line is relative to the repository root + # (i.e. the same as $pathRelativeToRoot). + my $cwd = Cwd::getcwd(); + chdir $repositoryRootPath; + + open PATCH, "| $patchCommand" or die "Could not call \"$patchCommand\" for file \"$pathRelativeToRoot\": $!"; + print PATCH $patch; + close PATCH; + my $exitStatus = exitStatus($?); + + chdir $cwd; + + if ($exitStatus && !$isForcing) { + print "Calling \"$patchCommand\" for file \"$pathRelativeToRoot\" returned " . + "status $exitStatus. Pass --force to ignore patch failures.\n"; + exit $exitStatus; + } + + return $exitStatus; +} + +# Merge ChangeLog patches using a three-file approach. +# +# This is used by resolve-ChangeLogs when it's operated as a merge driver +# and when it's used to merge conflicts after a patch is applied or after +# an svn update. +# +# It's also used for traditional rejected patches. +# +# Args: +# $fileMine: The merged version of the file. Also known in git as the +# other branch's version (%B) or "ours". +# For traditional patch rejects, this is the *.rej file. +# $fileOlder: The base version of the file. Also known in git as the +# ancestor version (%O) or "base". +# For traditional patch rejects, this is the *.orig file. +# $fileNewer: The current version of the file. Also known in git as the +# current version (%A) or "theirs". +# For traditional patch rejects, this is the original-named +# file. +# +# Returns 1 if merge was successful, else 0. +sub mergeChangeLogs($$$) +{ + my ($fileMine, $fileOlder, $fileNewer) = @_; + + my $traditionalReject = $fileMine =~ /\.rej$/ ? 1 : 0; + + local $/ = undef; + + my $patch; + if ($traditionalReject) { + open(DIFF, "<", $fileMine) or die $!; + $patch = <DIFF>; + close(DIFF); + rename($fileMine, "$fileMine.save"); + rename($fileOlder, "$fileOlder.save"); + } else { + open(DIFF, "-|", qw(diff -u -a --binary), $fileOlder, $fileMine) or die $!; + $patch = <DIFF>; + close(DIFF); + } + + unlink("${fileNewer}.orig"); + unlink("${fileNewer}.rej"); + + open(PATCH, "| patch --force --fuzz=3 --binary $fileNewer > " . File::Spec->devnull()) or die $!; + if ($traditionalReject) { + print PATCH $patch; + } else { + my $changeLogHash = fixChangeLogPatch($patch); + print PATCH $changeLogHash->{patch}; + } + close(PATCH); + + my $result = !exitStatus($?); + + # Refuse to merge the patch if it did not apply cleanly + if (-e "${fileNewer}.rej") { + unlink("${fileNewer}.rej"); + if (-f "${fileNewer}.orig") { + unlink($fileNewer); + rename("${fileNewer}.orig", $fileNewer); + } + } else { + unlink("${fileNewer}.orig"); + } + + if ($traditionalReject) { + rename("$fileMine.save", $fileMine); + rename("$fileOlder.save", $fileOlder); + } + + return $result; +} + +sub gitConfig($) +{ + return unless $isGit; + + my ($config) = @_; + + my $result = `git config $config`; + if (($? >> 8)) { + $result = `git repo-config $config`; + } + chomp $result; + return $result; +} + +sub changeLogNameError($) +{ + my ($message) = @_; + print STDERR "$message\nEither:\n"; + print STDERR " set CHANGE_LOG_NAME in your environment\n"; + print STDERR " OR pass --name= on the command line\n"; + print STDERR " OR set REAL_NAME in your environment"; + print STDERR " OR git users can set 'git config user.name'\n"; + exit(1); +} + +sub changeLogName() +{ + my $name = $ENV{CHANGE_LOG_NAME} || $ENV{REAL_NAME} || gitConfig("user.name") || (split /\s*,\s*/, (getpwuid $<)[6])[0]; + + changeLogNameError("Failed to determine ChangeLog name.") unless $name; + # getpwuid seems to always succeed on windows, returning the username instead of the full name. This check will catch that case. + changeLogNameError("'$name' does not contain a space! ChangeLogs should contain your full name.") unless ($name =~ /\w \w/); + + return $name; +} + +sub changeLogEmailAddressError($) +{ + my ($message) = @_; + print STDERR "$message\nEither:\n"; + print STDERR " set CHANGE_LOG_EMAIL_ADDRESS in your environment\n"; + print STDERR " OR pass --email= on the command line\n"; + print STDERR " OR set EMAIL_ADDRESS in your environment\n"; + print STDERR " OR git users can set 'git config user.email'\n"; + exit(1); +} + +sub changeLogEmailAddress() +{ + my $emailAddress = $ENV{CHANGE_LOG_EMAIL_ADDRESS} || $ENV{EMAIL_ADDRESS} || gitConfig("user.email"); + + changeLogEmailAddressError("Failed to determine email address for ChangeLog.") unless $emailAddress; + changeLogEmailAddressError("Email address '$emailAddress' does not contain '\@' and is likely invalid.") unless ($emailAddress =~ /\@/); + + return $emailAddress; +} + +# http://tools.ietf.org/html/rfc1924 +sub decodeBase85($) +{ + my ($encoded) = @_; + my %table; + my @characters = ('0'..'9', 'A'..'Z', 'a'..'z', '!', '#', '$', '%', '&', '(', ')', '*', '+', '-', ';', '<', '=', '>', '?', '@', '^', '_', '`', '{', '|', '}', '~'); + for (my $i = 0; $i < 85; $i++) { + $table{$characters[$i]} = $i; + } + + my $decoded = ''; + my @encodedChars = $encoded =~ /./g; + + for (my $encodedIter = 0; defined($encodedChars[$encodedIter]);) { + my $digit = 0; + for (my $i = 0; $i < 5; $i++) { + $digit *= 85; + my $char = $encodedChars[$encodedIter]; + $digit += $table{$char}; + $encodedIter++; + } + + for (my $i = 0; $i < 4; $i++) { + $decoded .= chr(($digit >> (3 - $i) * 8) & 255); + } + } + + return $decoded; +} + +sub decodeGitBinaryChunk($$) +{ + my ($contents, $fullPath) = @_; + + # Load this module lazily in case the user don't have this module + # and won't handle git binary patches. + require Compress::Zlib; + + my $encoded = ""; + my $compressedSize = 0; + while ($contents =~ /^([A-Za-z])(.*)$/gm) { + my $line = $2; + next if $line eq ""; + die "$fullPath: unexpected size of a line: $&" if length($2) % 5 != 0; + my $actualSize = length($2) / 5 * 4; + my $encodedExpectedSize = ord($1); + my $expectedSize = $encodedExpectedSize <= ord("Z") ? $encodedExpectedSize - ord("A") + 1 : $encodedExpectedSize - ord("a") + 27; + + die "$fullPath: unexpected size of a line: $&" if int(($expectedSize + 3) / 4) * 4 != $actualSize; + $compressedSize += $expectedSize; + $encoded .= $line; + } + + my $compressed = decodeBase85($encoded); + $compressed = substr($compressed, 0, $compressedSize); + return Compress::Zlib::uncompress($compressed); +} + +sub decodeGitBinaryPatch($$) +{ + my ($contents, $fullPath) = @_; + + # Git binary patch has two chunks. One is for the normal patching + # and another is for the reverse patching. + # + # Each chunk a line which starts from either "literal" or "delta", + # followed by a number which specifies decoded size of the chunk. + # The "delta" type chunks aren't supported by this function yet. + # + # Then, content of the chunk comes. To decode the content, we + # need decode it with base85 first, and then zlib. + my $gitPatchRegExp = '(literal|delta) ([0-9]+)\n([A-Za-z0-9!#$%&()*+-;<=>?@^_`{|}~\\n]*?)\n\n'; + if ($contents !~ m"\nGIT binary patch\n$gitPatchRegExp$gitPatchRegExp\Z") { + die "$fullPath: unknown git binary patch format" + } + + my $binaryChunkType = $1; + my $binaryChunkExpectedSize = $2; + my $encodedChunk = $3; + my $reverseBinaryChunkType = $4; + my $reverseBinaryChunkExpectedSize = $5; + my $encodedReverseChunk = $6; + + my $binaryChunk = decodeGitBinaryChunk($encodedChunk, $fullPath); + my $binaryChunkActualSize = length($binaryChunk); + my $reverseBinaryChunk = decodeGitBinaryChunk($encodedReverseChunk, $fullPath); + my $reverseBinaryChunkActualSize = length($reverseBinaryChunk); + + die "$fullPath: unexpected size of the first chunk (expected $binaryChunkExpectedSize but was $binaryChunkActualSize" if ($binaryChunkExpectedSize != $binaryChunkActualSize); + die "$fullPath: unexpected size of the second chunk (expected $reverseBinaryChunkExpectedSize but was $reverseBinaryChunkActualSize" if ($reverseBinaryChunkExpectedSize != $reverseBinaryChunkActualSize); + + return ($binaryChunkType, $binaryChunk, $reverseBinaryChunkType, $reverseBinaryChunk); +} + +1; diff --git a/Tools/Scripts/add-include b/Tools/Scripts/add-include new file mode 100755 index 0000000..d0525eb --- /dev/null +++ b/Tools/Scripts/add-include @@ -0,0 +1,135 @@ +#!/usr/bin/perl -w + +# Copyright 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Helper script to add includes to source files. + +use strict; + +my $headerPattern = '[\"<][A-Za-z][A-Za-z0-9_/]+(\.h)?[\">]'; # " Make Xcode formatter happy. + +my $headerToAdd = shift @ARGV or die; +$headerToAdd =~ /^([A-Za-z][A-Za-z0-9]+)\.h$/ or die "Header to add must be a .h file: $headerToAdd.\n"; + +sub includesParagraph; + +FILE: for my $filename (@ARGV) { + unless ($filename =~ /(\w+)\.cpp$/) { print STDERR "Command line args must be .cpp files: $filename.\n"; next FILE; } + + my $base = $1; + + my $sawConfig = 0; + my $sawSelfInclude = 0; + + my $pastIncludes = 0; + my %includes; + + my $beforeIncludes = ""; + my $afterIncludes = ""; + + my $currentCondition = ""; + + my $entireFileCondition = ""; + + unless (open INPUT, "<", $filename) { print STDERR "File does not exist: $filename\n"; next FILE; } + while (my $line = <INPUT>) { + if ($line =~ /^\s*#(include|import)\s*($headerPattern)\s*\n/) { + my $include = $2; + if ($pastIncludes) { print STDERR "Saw more includes after include section in $filename, line $.\n"; next FILE; } + if ($include eq "\"config.h\"") { + $sawConfig = 1; + } else { + unless ($sawConfig) { print STDERR "First include must be config.h in $filename, line $.\n"; next FILE; } + if ($include eq "\"$base.h\"") { + $sawSelfInclude = 1; + } else { + unless ($sawSelfInclude) { print STDERR "Second include must be $base.h in $filename, line $.\n"; next FILE; } + $includes{$currentCondition}{$include} = 1; + } + } + } else { + if ($sawConfig && !$pastIncludes) { + if ($line =~ /^\s*#\s*if\s+(.+?)\s*$/) { + my $condition = $1; + if (!$sawSelfInclude) { + $entireFileCondition = $1; + next; + } + unless ($currentCondition eq "") { print STDERR "Nested #if in include section in $filename, line $.\n"; next FILE; } + $currentCondition = $condition; + next; + } + if ($line =~ /^\s*#\s*endif\s*$/) { + unless ($currentCondition ne "") { print STDERR "Extra #endif in include section in $filename, line $.\n"; next FILE; } + $currentCondition = ""; + next; + } + } + if (!$sawConfig) { + $beforeIncludes .= $line; + } else { + $pastIncludes = 1 if $line !~ /^\s*$/; + if ($pastIncludes) { + unless ($currentCondition eq "") { print STDERR "Unterminated #if in include section in $filename, line $.\n"; next FILE; } + $afterIncludes .= $line; + } + } + } + } + close INPUT or die; + + $includes{""}{"\"$headerToAdd\""} = 1; + + $beforeIncludes =~ s/\n+$//; + $afterIncludes =~ s/^\n+//; + + my $contents = $beforeIncludes; + $contents .= "\n\n#include \"config.h\"\n"; + $contents .= "\n#if $entireFileCondition\n" if $entireFileCondition ne ""; + $contents .= "#include \"$base.h\"\n\n"; + for my $condition (sort keys %includes) { + $contents .= "#if $condition\n" unless $condition eq ""; + $contents .= includesParagraph($includes{$condition}); + $contents .= "#endif\n" unless $condition eq ""; + $contents .= "\n"; + } + $contents .= $afterIncludes; + + unless (open OUTPUT, ">", $filename) { print STDERR "Could not open file for writing: $filename\n"; next FILE; }; + print OUTPUT $contents; + close OUTPUT or die; +} + +sub includesParagraph() +{ + my ($includes) = @_; + + my $paragraph = ""; + + for my $include (sort keys %{$includes}) { + $paragraph .= "#include $include\n"; + } + + return $paragraph; +} diff --git a/Tools/Scripts/bisect-builds b/Tools/Scripts/bisect-builds new file mode 100755 index 0000000..063b61e --- /dev/null +++ b/Tools/Scripts/bisect-builds @@ -0,0 +1,432 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2007, 2008 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This script attempts to find the point at which a regression (or progression) +# of behavior occurred by searching WebKit nightly builds. + +# To override the location where the nightly builds are downloaded or the path +# to the Safari web browser, create a ~/.bisect-buildsrc file with one or more of +# the following lines (use "~/" to specify a path from your home directory): +# +# $branch = "branch-name"; +# $nightlyDownloadDirectory = "~/path/to/nightly/downloads"; +# $safariPath = "/path/to/Safari.app"; + +use strict; + +use File::Basename; +use File::Path; +use File::Spec; +use File::Temp qw(tempfile); +use Getopt::Long; +use Time::HiRes qw(usleep); + +sub createTempFile($); +sub downloadNightly($$$); +sub findMacOSXVersion(); +sub findNearestNightlyIndex(\@$$); +sub findSafariVersion($); +sub loadSettings(); +sub makeNightlyList($$$$); +sub max($$) { return $_[0] > $_[1] ? $_[0] : $_[1]; } +sub mountAndRunNightly($$$$); +sub parseRevisions($$;$); +sub printStatus($$$); +sub promptForTest($); + +loadSettings(); + +my %validBranches = map { $_ => 1 } qw(feature-branch trunk); +my $branch = $Settings::branch; +my $nightlyDownloadDirectory = $Settings::nightlyDownloadDirectory; +my $safariPath = $Settings::safariPath; + +my @nightlies; + +my $isProgression; +my $localOnly; +my @revisions; +my $sanityCheck; +my $showHelp; +my $testURL; + +# Fix up -r switches in @ARGV +@ARGV = map { /^(-r)(.+)$/ ? ($1, $2) : $_ } @ARGV; + +my $result = GetOptions( + "b|branch=s" => \$branch, + "d|download-directory=s" => \$nightlyDownloadDirectory, + "h|help" => \$showHelp, + "l|local!" => \$localOnly, + "p|progression!" => \$isProgression, + "r|revisions=s" => \&parseRevisions, + "safari-path=s" => \$safariPath, + "s|sanity-check!" => \$sanityCheck, +); +$testURL = shift @ARGV; + +$branch = "feature-branch" if $branch eq "feature"; +if (!exists $validBranches{$branch}) { + print STDERR "ERROR: Invalid branch '$branch'\n"; + $showHelp = 1; +} + +if (!$result || $showHelp || scalar(@ARGV) > 0) { + print STDERR "Search WebKit nightly builds for changes in behavior.\n"; + print STDERR "Usage: " . basename($0) . " [options] [url]\n"; + print STDERR <<END; + [-b|--branch name] name of the nightly build branch (default: trunk) + [-d|--download-directory dir] nightly build download directory (default: ~/Library/Caches/WebKit-Nightlies) + [-h|--help] show this help message + [-l|--local] only use local (already downloaded) nightlies + [-p|--progression] searching for a progression, not a regression + [-r|--revision M[:N]] specify starting (and optional ending) revisions to search + [--safari-path path] path to Safari application bundle (default: /Applications/Safari.app) + [-s|--sanity-check] verify both starting and ending revisions before bisecting +END + exit 1; +} + +my $nightlyWebSite = "http://nightly.webkit.org"; +my $nightlyBuildsURLBase = $nightlyWebSite . File::Spec->catdir("/builds", $branch, "mac"); +my $nightlyFilesURLBase = $nightlyWebSite . File::Spec->catdir("/files", $branch, "mac"); + +$nightlyDownloadDirectory = glob($nightlyDownloadDirectory) if $nightlyDownloadDirectory =~ /^~/; +$safariPath = glob($safariPath) if $safariPath =~ /^~/; +$safariPath = File::Spec->catdir($safariPath, "Contents/MacOS/Safari") if $safariPath =~ m#\.app/*#; + +$nightlyDownloadDirectory = File::Spec->catdir($nightlyDownloadDirectory, $branch); +if (! -d $nightlyDownloadDirectory) { + mkpath($nightlyDownloadDirectory, 0, 0755) || die "Could not create $nightlyDownloadDirectory: $!"; +} + +@nightlies = makeNightlyList($localOnly, $nightlyDownloadDirectory, findMacOSXVersion(), findSafariVersion($safariPath)); + +my $startIndex = $revisions[0] ? findNearestNightlyIndex(@nightlies, $revisions[0], 'ceil') : 0; +my $endIndex = $revisions[1] ? findNearestNightlyIndex(@nightlies, $revisions[1], 'floor') : $#nightlies; + +my $tempFile = createTempFile($testURL); + +if ($sanityCheck) { + my $didReproduceBug; + + do { + printf "\nChecking starting revision r%s...\n", + $nightlies[$startIndex]->{rev}; + downloadNightly($nightlies[$startIndex]->{file}, $nightlyFilesURLBase, $nightlyDownloadDirectory); + mountAndRunNightly($nightlies[$startIndex]->{file}, $nightlyDownloadDirectory, $safariPath, $tempFile); + $didReproduceBug = promptForTest($nightlies[$startIndex]->{rev}); + $startIndex-- if $didReproduceBug < 0; + } while ($didReproduceBug < 0); + die "ERROR: Bug reproduced in starting revision! Do you need to test an earlier revision or for a progression?" + if $didReproduceBug && !$isProgression; + die "ERROR: Bug not reproduced in starting revision! Do you need to test an earlier revision or for a regression?" + if !$didReproduceBug && $isProgression; + + do { + printf "\nChecking ending revision r%s...\n", + $nightlies[$endIndex]->{rev}; + downloadNightly($nightlies[$endIndex]->{file}, $nightlyFilesURLBase, $nightlyDownloadDirectory); + mountAndRunNightly($nightlies[$endIndex]->{file}, $nightlyDownloadDirectory, $safariPath, $tempFile); + $didReproduceBug = promptForTest($nightlies[$endIndex]->{rev}); + $endIndex++ if $didReproduceBug < 0; + } while ($didReproduceBug < 0); + die "ERROR: Bug NOT reproduced in ending revision! Do you need to test a later revision or for a progression?" + if !$didReproduceBug && !$isProgression; + die "ERROR: Bug reproduced in ending revision! Do you need to test a later revision or for a regression?" + if $didReproduceBug && $isProgression; +} + +printStatus($nightlies[$startIndex]->{rev}, $nightlies[$endIndex]->{rev}, $isProgression); + +my %brokenRevisions = (); +while (abs($endIndex - $startIndex) > 1) { + my $index = $startIndex + int(($endIndex - $startIndex) / 2); + + my $didReproduceBug; + do { + if (exists $nightlies[$index]) { + my $buildsLeft = max(max(0, $endIndex - $index - 1), max(0, $index - $startIndex - 1)); + my $plural = $buildsLeft == 1 ? "" : "s"; + printf "\nChecking revision r%s (%d build%s left to test after this)...\n", $nightlies[$index]->{rev}, $buildsLeft, $plural; + downloadNightly($nightlies[$index]->{file}, $nightlyFilesURLBase, $nightlyDownloadDirectory); + mountAndRunNightly($nightlies[$index]->{file}, $nightlyDownloadDirectory, $safariPath, $tempFile); + $didReproduceBug = promptForTest($nightlies[$index]->{rev}); + } + if ($didReproduceBug < 0) { + $brokenRevisions{$nightlies[$index]->{rev}} = $nightlies[$index]->{file}; + delete $nightlies[$index]; + $endIndex--; + $index = $startIndex + int(($endIndex - $startIndex) / 2); + } + } while ($didReproduceBug < 0); + + if ($didReproduceBug && !$isProgression || !$didReproduceBug && $isProgression) { + $endIndex = $index; + } else { + $startIndex = $index; + } + + print "\nBroken revisions skipped: r" . join(", r", keys %brokenRevisions) . "\n" + if scalar keys %brokenRevisions > 0; + printStatus($nightlies[$startIndex]->{rev}, $nightlies[$endIndex]->{rev}, $isProgression); +} + +unlink $tempFile if $tempFile; + +exit 0; + +sub createTempFile($) +{ + my ($url) = @_; + + return undef if !$url; + + my ($fh, $tempFile) = tempfile( + basename($0) . "-XXXXXXXX", + DIR => File::Spec->tmpdir(), + SUFFIX => ".html", + UNLINK => 0, + ); + print $fh "<meta http-equiv=\"refresh\" content=\"0; $url\">\n"; + close($fh); + + return $tempFile; +} + +sub downloadNightly($$$) +{ + my ($filename, $urlBase, $directory) = @_; + my $path = File::Spec->catfile($directory, $filename); + if (! -f $path) { + print "Downloading $filename to $directory...\n"; + `curl -# -o '$path' '$urlBase/$filename'`; + } +} + +sub findMacOSXVersion() +{ + my $version; + open(SW_VERS, "-|", "/usr/bin/sw_vers") || die; + while (<SW_VERS>) { + $version = $1 if /^ProductVersion:\s+([^\s]+)/; + } + close(SW_VERS); + return $version; +} + +sub findNearestNightlyIndex(\@$$) +{ + my ($nightlies, $revision, $round) = @_; + + my $lowIndex = 0; + my $highIndex = $#{$nightlies}; + + return $highIndex if uc($revision) eq 'HEAD' || $revision >= $nightlies->[$highIndex]->{rev}; + return $lowIndex if $revision <= $nightlies->[$lowIndex]->{rev}; + + while (abs($highIndex - $lowIndex) > 1) { + my $index = $lowIndex + int(($highIndex - $lowIndex) / 2); + if ($revision < $nightlies->[$index]->{rev}) { + $highIndex = $index; + } elsif ($revision > $nightlies->[$index]->{rev}) { + $lowIndex = $index; + } else { + return $index; + } + } + + return ($round eq "floor") ? $lowIndex : $highIndex; +} + +sub findSafariVersion($) +{ + my ($path) = @_; + my $versionPlist = File::Spec->catdir(dirname(dirname($path)), "version.plist"); + my $version; + open(PLIST, "< $versionPlist") || die; + while (<PLIST>) { + if (m#^\s*<key>CFBundleShortVersionString</key>#) { + $version = <PLIST>; + $version =~ s#^\s*<string>([0-9.]+)[^<]*</string>\s*[\r\n]*#$1#; + } + } + close(PLIST); + return $version; +} + +sub loadSettings() +{ + package Settings; + + our $branch = "trunk"; + our $nightlyDownloadDirectory = File::Spec->catdir($ENV{HOME}, "Library/Caches/WebKit-Nightlies"); + our $safariPath = "/Applications/Safari.app"; + + my $rcfile = File::Spec->catdir($ENV{HOME}, ".bisect-buildsrc"); + return if !-f $rcfile; + + my $result = do $rcfile; + die "Could not parse $rcfile: $@" if $@; +} + +sub makeNightlyList($$$$) +{ + my ($useLocalFiles, $localDirectory, $macOSXVersion, $safariVersion) = @_; + my @files; + + if ($useLocalFiles) { + opendir(DIR, $localDirectory) || die "$!"; + foreach my $file (readdir(DIR)) { + if ($file =~ /^WebKit-SVN-r([0-9]+)\.dmg$/) { + push(@files, +{ rev => $1, file => $file }); + } + } + closedir(DIR); + } else { + open(NIGHTLIES, "curl -s $nightlyBuildsURLBase/all |") || die; + + while (my $line = <NIGHTLIES>) { + chomp $line; + my ($revision, $timestamp, $url) = split(/,/, $line); + my $nightly = basename($url); + push(@files, +{ rev => $revision, file => $nightly }); + } + close(NIGHTLIES); + } + + if (eval "v$macOSXVersion" ge v10.5) { + if ($safariVersion eq "4 Public Beta") { + @files = grep { $_->{rev} >= 39682 } @files; + } elsif (eval "v$safariVersion" ge v3.2) { + @files = grep { $_->{rev} >= 37348 } @files; + } elsif (eval "v$safariVersion" ge v3.1) { + @files = grep { $_->{rev} >= 29711 } @files; + } elsif (eval "v$safariVersion" ge v3.0) { + @files = grep { $_->{rev} >= 25124 } @files; + } elsif (eval "v$safariVersion" ge v2.0) { + @files = grep { $_->{rev} >= 19594 } @files; + } else { + die "Requires Safari 2.0 or newer"; + } + } elsif (eval "v$macOSXVersion" ge v10.4) { + if ($safariVersion eq "4 Public Beta") { + @files = grep { $_->{rev} >= 39682 } @files; + } elsif (eval "v$safariVersion" ge v3.2) { + @files = grep { $_->{rev} >= 37348 } @files; + } elsif (eval "v$safariVersion" ge v3.1) { + @files = grep { $_->{rev} >= 29711 } @files; + } elsif (eval "v$safariVersion" ge v3.0) { + @files = grep { $_->{rev} >= 19992 } @files; + } elsif (eval "v$safariVersion" ge v2.0) { + @files = grep { $_->{rev} >= 11976 } @files; + } else { + die "Requires Safari 2.0 or newer"; + } + } else { + die "Requires Mac OS X 10.4 (Tiger) or 10.5 (Leopard)"; + } + + my $nightlycmp = sub { return $a->{rev} <=> $b->{rev}; }; + + return sort $nightlycmp @files; +} + +sub mountAndRunNightly($$$$) +{ + my ($filename, $directory, $safari, $tempFile) = @_; + my $mountPath = "/Volumes/WebKit"; + my $webkitApp = File::Spec->catfile($mountPath, "WebKit.app"); + my $diskImage = File::Spec->catfile($directory, $filename); + my $devNull = File::Spec->devnull(); + + my $i = 0; + while (-e $mountPath) { + $i++; + usleep 100 if $i > 1; + `hdiutil detach '$mountPath' 2> $devNull`; + die "Could not unmount $diskImage at $mountPath" if $i > 100; + } + die "Can't mount $diskImage: $mountPath already exists!" if -e $mountPath; + + print "Mounting disk image and running WebKit...\n"; + `hdiutil attach '$diskImage'`; + $i = 0; + while (! -e $webkitApp) { + usleep 100; + $i++; + die "Could not mount $diskImage at $mountPath" if $i > 100; + } + + my $frameworkPath; + if (-d "/Volumes/WebKit/WebKit.app/Contents/Frameworks") { + my $osXVersion = join('.', (split(/\./, findMacOSXVersion()))[0..1]); + $frameworkPath = "/Volumes/WebKit/WebKit.app/Contents/Frameworks/$osXVersion"; + } else { + $frameworkPath = "/Volumes/WebKit/WebKit.app/Contents/Resources"; + } + + $tempFile ||= ""; + `DYLD_FRAMEWORK_PATH=$frameworkPath WEBKIT_UNSET_DYLD_FRAMEWORK_PATH=YES $safari $tempFile`; + + `hdiutil detach '$mountPath' 2> $devNull`; +} + +sub parseRevisions($$;$) +{ + my ($optionName, $value, $ignored) = @_; + + if ($value =~ /^r?([0-9]+|HEAD):?$/i) { + push(@revisions, $1); + die "Too many revision arguments specified" if scalar @revisions > 2; + } elsif ($value =~ /^r?([0-9]+):?r?([0-9]+|HEAD)$/i) { + $revisions[0] = $1; + $revisions[1] = $2; + } else { + die "Unknown revision '$value': expected 'M' or 'M:N'"; + } +} + +sub printStatus($$$) +{ + my ($startRevision, $endRevision, $isProgression) = @_; + printf "\n%s: r%s %s: r%s\n", + $isProgression ? "Fails" : "Works", $startRevision, + $isProgression ? "Works" : "Fails", $endRevision; +} + +sub promptForTest($) +{ + my ($revision) = @_; + print "Did the bug reproduce in r$revision (yes/no/broken)? "; + my $answer = <STDIN>; + return 1 if $answer =~ /^(1|y.*)$/i; + return -1 if $answer =~ /^(-1|b.*)$/i; # Broken + return 0; +} + diff --git a/Tools/Scripts/build-api-tests b/Tools/Scripts/build-api-tests new file mode 100755 index 0000000..dd2231c --- /dev/null +++ b/Tools/Scripts/build-api-tests @@ -0,0 +1,70 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS +# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use File::Basename; +use FindBin; +use Getopt::Long qw(:config pass_through); +use lib $FindBin::Bin; +use webkitdirs; +use POSIX; + +my $showHelp = 0; +my $clean = 0; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] [options to pass to build system] + --help Show this help message + --clean Clean up the build directory +EOF + +GetOptions( + 'help' => \$showHelp, + 'clean' => \$clean, +); + +if ($showHelp) { + print STDERR $usage; + exit 1; +} + +checkRequiredSystemConfig(); +setConfiguration(); +chdirWebKit(); + +# Build +chdir "Tools/TestWebKitAPI" or die; + +my $result; +if (isAppleMacWebKit()) { + $result = buildXCodeProject("TestWebKitAPI", $clean, XcodeOptions(), @ARGV); +} elsif (isAppleWinWebKit()) { + $result = buildVisualStudioProject("win/TestWebKitAPI.sln", $clean); +} else { + die "TestWebKitAPI is not supported on this platform.\n"; +} + +exit exitStatus($result); diff --git a/Tools/Scripts/build-dumprendertree b/Tools/Scripts/build-dumprendertree new file mode 100755 index 0000000..717f934 --- /dev/null +++ b/Tools/Scripts/build-dumprendertree @@ -0,0 +1,80 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use File::Basename; +use FindBin; +use Getopt::Long qw(:config pass_through); +use lib $FindBin::Bin; +use webkitdirs; +use POSIX; + +my $showHelp = 0; +my $clean = 0; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] [options to pass to build system] + --help Show this help message + --clean Clean up the build directory + --gtk Build the GTK+ port + --qt Build the Qt port + --wx Build the wxWindows port + --chromium Build the Chromium port +EOF + +GetOptions( + 'help' => \$showHelp, + 'clean' => \$clean, +); + +if ($showHelp) { + print STDERR $usage; + exit 1; +} + +checkRequiredSystemConfig(); +setConfiguration(); +chdirWebKit(); + +# Build +chdir "Tools/DumpRenderTree" or die; + +my $result; +if (isAppleMacWebKit()) { + $result = buildXCodeProject("DumpRenderTree", $clean, XcodeOptions(), @ARGV); +} elsif (isAppleWinWebKit()) { + $result = buildVisualStudioProject("DumpRenderTree.sln", $clean); +} elsif (isQt() || isGtk() || isWx() || isChromium()) { + # Qt, Gtk wxWindows, and Chromium build everything in one shot. No need to build anything here. + $result = 0; +} else { + die "Building not defined for this platform!\n"; +} + +exit exitStatus($result); diff --git a/Tools/Scripts/build-jsc b/Tools/Scripts/build-jsc new file mode 100755 index 0000000..0316349 --- /dev/null +++ b/Tools/Scripts/build-jsc @@ -0,0 +1,76 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005 Apple Computer, Inc. All rights reserved. +# Copyright (C) 2007 Eric Seidel <eric@webkit.org> +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +use strict; +use FindBin; +use Getopt::Long qw(:config pass_through); +use lib $FindBin::Bin; +use webkitdirs; +use POSIX; + +my $coverageSupport = 0; +my $showHelp = 0; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] [options to pass to build system] + --help Show this help message + --[no-]coverage Toggle code coverage support (default: $coverageSupport) +EOF + +GetOptions( + 'coverage!' => \$coverageSupport, + 'help' => \$showHelp +); + +if ($showHelp) { + print STDERR $usage; + exit 1; +} + +checkRequiredSystemConfig(); +setConfiguration(); +chdirWebKit(); +my @options = XcodeOptions(); +my @coverageSupportOptions = ($coverageSupport) ? XcodeCoverageSupportOptions() : (); + +chdir "JavaScriptCore" or die "Can't find JavaScriptCore directory to build from"; +my $result; +if (isAppleMacWebKit()) { + $result = system "sh", "-c", 'xcodebuild -project JavaScriptCore.xcodeproj "$@" | grep -v setenv && exit ${PIPESTATUS[0]}', "xcodebuild", @options, @ARGV, @coverageSupportOptions; +} elsif (isAppleWinWebKit()) { + $result = buildVisualStudioProject("JavaScriptCore.vcproj/JavaScriptCore.sln"); +} elsif (isQt() or isGtk() or isWx()) { + # Qt and Gtk build everything in one-shot. No need to build anything here. + $result = 0; +} else { + die "Building not defined for this platform!\n"; +} +exit exitStatus($result); diff --git a/Tools/Scripts/build-webkit b/Tools/Scripts/build-webkit new file mode 100755 index 0000000..94b2ea2 --- /dev/null +++ b/Tools/Scripts/build-webkit @@ -0,0 +1,589 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved. +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 moiji-mobile.com All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Build script wrapper for the WebKit Open Source Project. + +use strict; +use File::Basename; +use File::Find; +use File::Spec; +use FindBin; +use Getopt::Long qw(:config pass_through); +use lib $FindBin::Bin; +use webkitdirs; +use webkitperl::features; +use POSIX; + +sub formatBuildTime($); +sub writeCongrats(); + +my $originalWorkingDirectory = getcwd(); +chdirWebKit(); + +my $showHelp = 0; +my $clean = 0; +my $minimal = 0; +my $v8 = 0; +my $installHeaders; +my $installLibs; +my $prefixPath; +my $makeArgs; +my $noWebKit2 = 0; +my $startTime = time(); + +my ( + $threeDCanvasSupport, + $threeDRenderingSupport, + $accelerated2dCanvasSupport, + $blobSupport, + $channelMessagingSupport, + $clientBasedGeolocationSupport, + $coverageSupport, + $databaseSupport, + $datagridSupport, + $datalistSupport, + $deviceOrientationSupport, + $directoryUploadSupport, + $domStorageSupport, + $eventsourceSupport, + $fileSystemSupport, + $filtersSupport, + $fullscreenAPISupport, + $geolocationSupport, + $iconDatabaseSupport, + $imageResizerSupport, + $indexedDatabaseSupport, + $inputSpeechSupport, + $javaScriptDebuggerSupport, + $linkPrefetchSupport, + $mathmlSupport, + $meterTagSupport, + $notificationsSupport, + $offlineWebApplicationSupport, + $progressTagSupport, + $sharedWorkersSupport, + $svgSupport, + $svgAnimationSupport, + $svgAsImageSupport, + $svgDOMObjCBindingsSupport, + $svgFontsSupport, + $svgForeignObjectSupport, + $svgUseSupport, + $systemMallocSupport, + $tiledBackingStoreSupport, + $videoSupport, + $wcssSupport, + $webAudioSupport, + $webInspectorSupport, + $webSocketsSupport, + $webTimingSupport, + $wmlSupport, + $workersSupport, + $xhtmlmpSupport, + $xpathSupport, + $xsltSupport, +); + +my @features = ( + { option => "3d-canvas", desc => "Toggle 3D canvas support", + define => "ENABLE_3D_CANVAS", default => (isAppleMacWebKit() && !isTiger() && !isLeopard()), value => \$threeDCanvasSupport }, + + { option => "3d-rendering", desc => "Toggle 3D rendering support", + define => "ENABLE_3D_RENDERING", default => (isAppleMacWebKit() && !isTiger()), value => \$threeDRenderingSupport }, + + { option => "accelerated-2d-canvas", desc => "Toggle accelerated 2D canvas support", + define => "ENABLE_ACCELERATED_2D_CANVAS", default => 0, value => \$accelerated2dCanvasSupport }, + + { option => "blob", desc => "Toggle Blob support", + define => "ENABLE_BLOB", default => (isAppleMacWebKit()), value => \$blobSupport }, + + { option => "channel-messaging", desc => "Toggle MessageChannel and MessagePort support", + define => "ENABLE_CHANNEL_MESSAGING", default => 1, value => \$channelMessagingSupport }, + + { option => "client-based-geolocation", desc => "Toggle client-based Geolocation support", + define => "ENABLE_CLIENT_BASED_GEOLOCATION", default => isAppleWebKit(), value => \$clientBasedGeolocationSupport }, + + { option => "coverage", desc => "Toggle code coverage support", + define => "", default => 0, value => \$coverageSupport }, + + { option => "database", desc => "Toggle Database Support", + define => "ENABLE_DATABASE", default => 1, value => \$databaseSupport }, + + { option => "datagrid", desc => "Toggle Datagrid Support", + define => "ENABLE_DATAGRID", default => 0, value => \$datagridSupport }, + + { option => "datalist", desc => "Toggle HTML5 datalist support", + define => "ENABLE_DATALIST", default => 1, value => \$datalistSupport }, + + { option => "device-orientation", desc => "Toggle DeviceOrientation support", + define => "ENABLE_DEVICE_ORIENTATION", default => 0, value => \$deviceOrientationSupport }, + + { option => "directory-upload", desc => "Toogle Directory upload support", + define => "ENABLE_DIRECTORY_UPLOAD", default => 0, value => \$directoryUploadSupport }, + + { option => "dom-storage", desc => "Toggle DOM Storage Support", + define => "ENABLE_DOM_STORAGE", default => 1, value => \$domStorageSupport }, + + { option => "eventsource", desc => "Toggle server-sent events support", + define => "ENABLE_EVENTSOURCE", default => 1, value => \$eventsourceSupport }, + + { option => "file-system", desc => "Toggle FileSystem support", + define => "ENABLE_FILE_SYSTEM", default => 0, value => \$fileSystemSupport }, + + { option => "filters", desc => "Toggle Filters support", + define => "ENABLE_FILTERS", default => (isAppleWebKit() || isGtk() || isQt() || isEfl()), value => \$filtersSupport }, + + { option => "fullscreen-api", desc => "Toggle Fullscreen API support", + define => "ENABLE_FULLSCREEN_API", default => (isAppleMacWebKit() || isGtk()), value => \$fullscreenAPISupport }, + + { option => "geolocation", desc => "Toggle Geolocation support", + define => "ENABLE_GEOLOCATION", default => (isAppleWebKit() || isGtk()), value => \$geolocationSupport }, + + { option => "icon-database", desc => "Toggle Icon database support", + define => "ENABLE_ICONDATABASE", default => 1, value => \$iconDatabaseSupport }, + + { option => "image-resizer", desc => "Toggle Image Resizer API support", + define => "ENABLE_IMAGE_RESIZER", default => 0, value => \$imageResizerSupport }, + + { option => "indexed-database", desc => "Toggle Indexed Database API support", + define => "ENABLE_INDEXED_DATABASE", default => 0, value => \$indexedDatabaseSupport }, + + { option => "input-speech", desc => "Speech Input API support", + define => "ENABLE_INPUT_SPEECH", default => 0, value => \$inputSpeechSupport }, + + { option => "inspector", desc => "Toggle Web Inspector support", + define => "ENABLE_INSPECTOR", default => 1, value => \$webInspectorSupport }, + + { option => "javascript-debugger", desc => "Toggle JavaScript Debugger/Profiler support", + define => "ENABLE_JAVASCRIPT_DEBUGGER", default => 1, value => \$javaScriptDebuggerSupport }, + + { option => "link-prefetch", desc => "Toggle pre fetching support", + define => "ENABLE_LINK_PREFETCH", default => 0, value => \$linkPrefetchSupport }, + + { option => "mathml", desc => "Toggle MathML support", + define => "ENABLE_MATHML", default => 1, value => \$mathmlSupport }, + + { option => "meter-tag", desc => "Meter Tag support", + define => "ENABLE_METER_TAG", default => !isGtk() && !isAppleWinWebKit(), value => \$meterTagSupport }, + + { option => "notifications", desc => "Toggle Desktop Notifications Support", + define => "ENABLE_NOTIFICATIONS", default => 0, value => \$notificationsSupport }, + + { option => "offline-web-applications", desc => "Toggle Offline Web Application Support", + define => "ENABLE_OFFLINE_WEB_APPLICATIONS", default => 1, value => \$offlineWebApplicationSupport }, + + { option => "progress-tag", desc => "Progress Tag support", + define => "ENABLE_PROGRESS_TAG", default => 1, value => \$progressTagSupport }, + + { option => "system-malloc", desc => "Toggle system allocator instead of TCmalloc", + define => "USE_SYSTEM_MALLOC", default => 0, value => \$systemMallocSupport }, + + { option => "shared-workers", desc => "Toggle SharedWorkers support", + define => "ENABLE_SHARED_WORKERS", default => (isAppleWebKit() || isGtk()), value => \$sharedWorkersSupport }, + + { option => "svg", desc => "Toggle SVG support", + define => "ENABLE_SVG", default => 1, value => \$svgSupport }, + + { option => "svg-animation", desc => "Toggle SVG animation support (implies SVG support)", + define => "ENABLE_SVG_ANIMATION", default => 1, value => \$svgAnimationSupport }, + + { option => "svg-as-image", desc => "Toggle SVG as Image support (implies SVG support)", + define => "ENABLE_SVG_AS_IMAGE", default => 1, value => \$svgAsImageSupport }, + + { option => "svg-dom-objc-bindings", desc => "Toggle SVG DOM Objective-C bindings support (implies SVG support)", + define => "ENABLE_SVG_DOM_OBJC_BINDINGS", default => isAppleMacWebKit(), value => \$svgDOMObjCBindingsSupport }, + + { option => "svg-fonts", desc => "Toggle SVG fonts support (imples SVG support)", + define => "ENABLE_SVG_FONTS", default => 1, value => \$svgFontsSupport }, + + { option => "svg-foreign-object", desc => "Toggle SVG foreign object support (implies SVG support)", + define => "ENABLE_SVG_FOREIGN_OBJECT", default => 1, value => \$svgForeignObjectSupport }, + + { option => "svg-use", desc => "Toggle SVG use element support (implies SVG support)", + define => "ENABLE_SVG_USE", default => 1, value => \$svgUseSupport }, + + { option => "tiled-backing-store", desc => "Toggle Tiled Backing Store support", + define => "ENABLE_TILED_BACKING_STORE", default => isQt(), value => \$tiledBackingStoreSupport }, + + { option => "video", desc => "Toggle Video support", + define => "ENABLE_VIDEO", default => (isAppleWebKit() || isGtk()), value => \$videoSupport }, + + { option => "wcss", desc => "Toggle WCSS support", + define => "ENABLE_WCSS", default => 0, value => \$wcssSupport }, + + { option => "web-audio", desc => "Toggle Web Audio support", + define => "ENABLE_WEB_AUDIO", default => 0, value=> \$webAudioSupport }, + + { option => "web-sockets", desc => "Toggle Web Sockets support", + define => "ENABLE_WEB_SOCKETS", default => 1, value=> \$webSocketsSupport }, + + { option => "web-timing", desc => "Toggle Web Timing support", + define => "ENABLE_WEB_TIMING", default => 0, value=> \$webTimingSupport }, + + { option => "wml", desc => "Toggle WML support", + define => "ENABLE_WML", default => 0, value => \$wmlSupport }, + + { option => "workers", desc => "Toggle Web Workers support", + define => "ENABLE_WORKERS", default => (isAppleWebKit() || isGtk()), value => \$workersSupport }, + + { option => "xhtmlmp", desc => "Toggle XHTML-MP support", + define => "ENABLE_XHTMLMP", default => 0, value => \$xhtmlmpSupport }, + + { option => "xpath", desc => "Toggle XPath support", + define => "ENABLE_XPATH", default => 1, value => \$xpathSupport }, + + { option => "xslt", desc => "Toggle XSLT support", + define => "ENABLE_XSLT", default => 1, value => \$xsltSupport }, +); + +# Update defaults from Qt's project file +if (isQt()) { + my %qtDefaults = qtFeatureDefaults(); + foreach (@features) { + $_->{default} = $qtDefaults{$_->{define}} || 0; + } +} + +# Additional environment parameters +push @ARGV, split(/ /, $ENV{'BUILD_WEBKIT_ARGS'}) if ($ENV{'BUILD_WEBKIT_ARGS'}); + +# Initialize values from defaults +foreach (@ARGV) { + if ($_ eq '--minimal') { + $minimal = 1; + } elsif ($_ eq '--v8') { + $v8 = 1; + } +} + +# Initialize values from defaults +foreach (@features) { + ${$_->{value}} = ($_->{default} && !$minimal) || 0; +} + +$svgSupport = $svgSupport || $svgAnimationSupport || $svgAsImageSupport + || $svgDOMObjCBindingsSupport || $svgFontsSupport + || $svgForeignObjectSupport || $svgUseSupport; + + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] [options to pass to build system] + --help Show this help message + --clean Cleanup the build directory + --debug Compile in debug mode + --wincairo Build using Cairo (rather than CoreGraphics) on Windows + --chromium Build the Chromium port on Mac/Win/Linux + --gtk Build the GTK+ port + --qt Build the Qt port + --efl Build the EFL port + --inspector-frontend Copy changes to the inspector front-end files to the build directory + + --install-headers=<path> Set installation path for the headers (Qt only) + --install-libs=<path> Set installation path for the libraries (Qt only) + --v8 Use V8 as JavaScript engine (Qt only) + + --prefix=<path> Set installation prefix to the given path (Gtk/Efl only) + --makeargs=<arguments> Optional Makefile flags + + --minimal No optional features, unless explicitly enabled. + +EOF + +my %options = ( + 'help' => \$showHelp, + 'clean' => \$clean, + 'install-headers=s' => \$installHeaders, + 'install-libs=s' => \$installLibs, + 'prefix=s' => \$prefixPath, + 'makeargs=s' => \$makeArgs, + 'minimal' => \$minimal, + 'v8' => \$v8, + 'no-webkit2' => \$noWebKit2, +); + +# Build usage text and options list from features +foreach (@features) { + my $opt = sprintf("%-35s", " --[no-]$_->{option}"); + $usage .= "$opt $_->{desc} (default: $_->{default})\n"; + $options{"$_->{option}!"} = $_->{value}; +} + +GetOptions(%options); + +if ($showHelp) { + print STDERR $usage; + exit 1; +} + +checkRequiredSystemConfig(); +setConfiguration(); + +my $productDir = productDir(); + +# Remove 0 byte sized files from productDir after slave lost for Qt buildbots. +File::Find::find(\&unlinkZeroFiles, $productDir) if (isQt() && -e $productDir); + +sub unlinkZeroFiles () +{ + my $file = $File::Find::name; + if (! -s $file) { + unlink $file; + print "0 byte sized file removed from build directory: $file\n"; + } +} + +# Check that all the project directories are there. +my @projects = ("JavaScriptCore", "WebCore", "WebKit"); + +my @otherDirs = ("WebKitLibraries"); +for my $dir (@projects, @otherDirs) { + if (! -d $dir) { + die "Error: No $dir directory found. Please do a fresh checkout.\n"; + } +} + +my @options = (); + +# enable autotool options accordingly +if (isGtk()) { + @options = @ARGV; + foreach (@features) { + push @options, autotoolsFlag(${$_->{value}}, $_->{option}); + } + + push @options, "--prefix=" . $prefixPath if defined($prefixPath); + push @options, "--makeargs=" . $makeArgs if defined($makeArgs); +} elsif (isAppleMacWebKit()) { + push @options, XcodeOptions(); + + sub option($$$) + { + my ($feature, $isEnabled, $defaultValue) = @_; + return "" if $defaultValue == $isEnabled; + return $feature . "=" . ($isEnabled ? $feature : " "); + } + + foreach (@features) { + if ($_->{option} ne "coverage") { + my $option = option($_->{define}, ${$_->{value}}, $_->{default}); + push @options, $option unless $option eq ""; + } + } + + # Apple builds JavaScriptGlue, and only on the Mac. + splice @projects, 1, 0, "JavaScriptGlue"; + + # ANGLE must come before WebCore + splice @projects, 0, 0, "ANGLE"; + + # WebKit2 is only supported in SnowLeopard and later at present. + push @projects, ("WebKit2", "Tools/MiniBrowser") if osXVersion()->{"minor"} >= 6 and !$noWebKit2; + + # Copy library and header from WebKitLibraries to a findable place in the product directory. + my @librariesToCopy = ( + "libWebKitSystemInterfaceTiger.a", + "libWebKitSystemInterfaceLeopard.a", + "libWebKitSystemInterfaceSnowLeopard.a", + "libWebCoreSQLite3.a", + ); + foreach my $libName (@librariesToCopy) { + my $srcLib = "WebKitLibraries/" . $libName; + my $lib = "$productDir/" . $libName; + if (!-e $lib || -M $lib > -M $srcLib) { + print "Updating $lib\n"; + system "ditto", $srcLib, $lib; + system "ranlib", $lib; + } + } + + # FIXME: This code should be abstracted to not be copy/paste. + my $srcHeader = "WebKitLibraries/WebKitSystemInterface.h"; + my $header = "$productDir/usr/local/include/WebKitSystemInterface.h"; + if (!-e $header || -M $header > -M $srcHeader) { + print "Updating $header\n"; + system "mkdir", "-p", "$productDir/usr/local/include"; + system "ditto", $srcHeader, $header; + } + + my $srcHeaderDir = "WebKitLibraries/WebCoreSQLite3"; + my $headerDir = "$productDir/WebCoreSQLite3"; + if (!-e $headerDir || -M $headerDir > -M $srcHeaderDir) { + print "Updating $headerDir\n"; + system "ditto", $srcHeaderDir, $headerDir; + } +} elsif (isAppleWinWebKit()) { + # Copy WebKitSupportLibrary to the correct location in WebKitLibraries so it can be found. + # Will fail if WebKitSupportLibrary.zip is not in source root. + (system("perl Tools/Scripts/update-webkit-support-libs") == 0) or die; +} elsif (isQt()) { + @options = @ARGV; + push @options, "--install-headers=" . $installHeaders if defined($installHeaders); + push @options, "--install-libs=" . $installLibs if defined($installLibs); + push @options, "--makeargs=" . $makeArgs if defined($makeArgs); + + foreach (@features) { + push @options, "DEFINES+=$_->{define}=${$_->{value}}" if ${$_->{value}} != $_->{default}; + } + + if ($minimal) { + push @options, "CONFIG+=minimal"; + } + + if ($v8) { + push @options, "CONFIG+=v8"; + } +} + +# Force re-link of existing libraries if different than expected +removeLibraryDependingOnFeature("WebCore", "SVG", $svgSupport); + +if (isInspectorFrontend()) { + exit exitStatus(copyInspectorFrontendFiles()); +} + +if (isWx()) { + downloadWafIfNeeded(); + @options = (); + if (defined($makeArgs)) { + @options = split(/ /, $makeArgs); + } + @projects = (); + my $result = buildWafProject('.', $clean, @options); + exit exitStatus($result) if exitStatus($result); +} + +if (isChromium()) { + @options = @ARGV; + # Chromium doesn't build by project directories. + @projects = (); + my $result = buildChromium($clean, @options); + exit exitStatus($result) if exitStatus($result); +} + +if (isEfl()) { + @options = (); + @projects = (); + foreach (@features) { + my $featureName = $_->{define}; + if ($featureName) { + my $featureEnabled = ${$_->{value}} ? "ON" : "OFF"; + push @options, "-D$featureName=$featureEnabled"; + } + } + push @options, "--makeargs=" . $makeArgs if defined($makeArgs); + push @options, "--prefix=" . $prefixPath if defined($prefixPath); + my $result = buildCMakeEflProject($clean, @options); + exit exitStatus($result) if exitStatus($result); +} + +# Build, and abort if the build fails. +for my $dir (@projects) { + chdir $dir or die; + my $result = 0; + + # For Gtk and Qt the WebKit project builds all others + if ((isGtk() || isQt()) && $dir ne "WebKit") { + chdir ".." or die; + next; + } + + if (isGtk()) { + $result = buildGtkProject($dir, $clean, @options); + } elsif (isQt()) { + $result = buildQMakeQtProject($dir, $clean, @options); + } elsif (isAppleMacWebKit()) { + $dir = "MiniBrowser" if $dir eq "Tools/MiniBrowser"; + my @local_options = @options; + push @local_options, XcodeCoverageSupportOptions() if $coverageSupport && $dir ne "ANGLE"; + $result = buildXCodeProject($dir, $clean, @local_options, @ARGV); + } elsif (isAppleWinWebKit()) { + if ($dir eq "WebKit") { + $result = buildVisualStudioProject("win/WebKit.vcproj/WebKit.sln", $clean); + } + } + # Various build* calls above may change the CWD. + chdirWebKit(); + + if (exitStatus($result)) { + my $scriptDir = relativeScriptsDir(); + if (usingVisualStudioExpress()) { + # Visual Studio Express is so lame it can't stdout build failures. + # So we find its logs and dump them to the console ourselves. + system(File::Spec->catfile($scriptDir, "print-vse-failure-logs")); + } + if (isAppleWinWebKit()) { + print "\n\n===== BUILD FAILED ======\n\n"; + print "Please ensure you have run $scriptDir/update-webkit to install dependencies.\n\n"; + my $baseProductDir = baseProductDir(); + print "You can view build errors by checking the BuildLog.htm files located at:\n$baseProductDir/obj/<project>/<config>.\n"; + } + exit exitStatus($result); + } +} + +# Don't report the "WebKit is now built" message after a clean operation. +exit if $clean; + +# Write out congratulations message. +writeCongrats(); + +exit 0; + +sub formatBuildTime($) +{ + my ($buildTime) = @_; + + my $buildHours = int($buildTime / 3600); + my $buildMins = int(($buildTime - $buildHours * 3600) / 60); + my $buildSecs = $buildTime - $buildHours * 3600 - $buildMins * 60; + + if ($buildHours) { + return sprintf("%dh:%02dm:%02ds", $buildHours, $buildMins, $buildSecs); + } + return sprintf("%02dm:%02ds", $buildMins, $buildSecs); +} + +sub writeCongrats() +{ + my $launcherPath = launcherPath(); + my $launcherName = launcherName(); + my $endTime = time(); + my $buildTime = formatBuildTime($endTime - $startTime); + + print "\n"; + print "===========================================================\n"; + print " WebKit is now built ($buildTime). \n"; + if (!isChromium()) { + print " To run $launcherName with this newly-built code, use the\n"; + print " \"$launcherPath\" script.\n"; + } + print "===========================================================\n"; +} diff --git a/Tools/Scripts/build-webkittestrunner b/Tools/Scripts/build-webkittestrunner new file mode 100755 index 0000000..e96dd6a --- /dev/null +++ b/Tools/Scripts/build-webkittestrunner @@ -0,0 +1,73 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS +# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use File::Basename; +use FindBin; +use Getopt::Long qw(:config pass_through); +use lib $FindBin::Bin; +use webkitdirs; +use POSIX; + +my $showHelp = 0; +my $clean = 0; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] [options to pass to build system] + --help Show this help message + --clean Clean up the build directory +EOF + +GetOptions( + 'help' => \$showHelp, + 'clean' => \$clean, +); + +if ($showHelp) { + print STDERR $usage; + exit 1; +} + +checkRequiredSystemConfig(); +setConfiguration(); +chdirWebKit(); + +# Build +chdir "Tools/WebKitTestRunner" or die; + +my $result; +if (isAppleMacWebKit()) { + $result = buildXCodeProject("WebKitTestRunner", $clean, XcodeOptions(), @ARGV); +} elsif (isAppleWinWebKit()) { + $result = buildVisualStudioProject("WebKitTestRunner.sln", $clean); +} elsif (isQt()) { + # Qt builds everything in one shot. No need to build anything here. + $result = 0; +} else { + die "WebKitTestRunner is not supported on this platform.\n"; +} + +exit exitStatus($result); diff --git a/Tools/Scripts/check-Xcode-source-file-types b/Tools/Scripts/check-Xcode-source-file-types new file mode 100755 index 0000000..57a70b9 --- /dev/null +++ b/Tools/Scripts/check-Xcode-source-file-types @@ -0,0 +1,168 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2007, 2008, 2009, 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to check that source file extensions match file types in Xcode project.pbxproj files. + +# TODO +# - Add support for file types other than source code files. +# - Can't differentiate between sourcecode.c.h and sourcecode.cpp.h. +# (Hint: Use gcc -x c/objective-c/c++/objective-c++ -E. It will +# take time to check each header using gcc, so make it a switch.) + +use strict; + +use File::Basename; +use File::Spec; +use File::Temp qw(tempfile); +use Getopt::Long; + +# Map of Xcode file types to file extensions. +my %typeExtensionMap = qw( + sourcecode.c.c .c + sourcecode.c.h .h + sourcecode.c.objc .m + sourcecode.cpp.h .h + sourcecode.cpp.cpp .cpp + sourcecode.cpp.objcpp .mm + sourcecode.exports .exp + sourcecode.javascript .js + sourcecode.make .make + sourcecode.mig .defs + sourcecode.yacc .y +); + +# Map of file extensions to Xcode file types. +my %extensionTypeMap = map { $typeExtensionMap{$_} => $_ } keys %typeExtensionMap; +$extensionTypeMap{'.h'} = 'sourcecode.c.h'; # See TODO list. + +my $shouldFixIssues = 0; +my $printWarnings = 1; +my $showHelp; + +my $getOptionsResult = GetOptions( + 'f|fix' => \$shouldFixIssues, + 'h|help' => \$showHelp, + 'w|warnings!' => \$printWarnings, +); + +if (scalar(@ARGV) == 0 && !$showHelp) { + print STDERR "ERROR: No Xcode project files (project.pbxproj) listed on command-line.\n"; + undef $getOptionsResult; +} + +if (!$getOptionsResult || $showHelp) { + print STDERR <<__END__; +Usage: @{[ basename($0) ]} [options] path/to/project.pbxproj [path/to/project.pbxproj ...] + -f|--fix fix mismatched types in Xcode project file + -h|--help show this help message + -w|--[no-]warnings show or suppress warnings (default: show warnings) +__END__ + exit 1; +} + +for my $projectFile (@ARGV) { + my $issuesFound = 0; + my $issuesFixed = 0; + + if (basename($projectFile) =~ /\.xcodeproj$/) { + $projectFile = File::Spec->catfile($projectFile, "project.pbxproj"); + } + + if (basename($projectFile) ne "project.pbxproj") { + print STDERR "WARNING: Not an Xcode project file: $projectFile\n" if $printWarnings; + next; + } + + open(IN, "< $projectFile") || die "Could not open $projectFile: $!"; + + my ($OUT, $tempFileName); + if ($shouldFixIssues) { + ($OUT, $tempFileName) = tempfile( + basename($projectFile) . "-XXXXXXXX", + DIR => dirname($projectFile), + UNLINK => 0, + ); + + # Clean up temp file in case of die() + $SIG{__DIE__} = sub { + close(IN); + close($OUT); + unlink($tempFileName); + }; + } + + # Fast-forward to "Begin PBXFileReference section". + while (my $line = <IN>) { + print $OUT $line if $shouldFixIssues; + last if $line =~ m#^\Q/* Begin PBXFileReference section */\E$#; + } + + while (my $line = <IN>) { + if ($line =~ m#^\Q/* End PBXFileReference section */\E$#) { + print $OUT $line if $shouldFixIssues; + last; + } + + if ($line =~ m#^\s*[A-Z0-9]{24} /\* (.+) \*/\s+=\s+\{.*\s+explicitFileType = (sourcecode[^;]*);.*\s+path = ([^;]+);.*\};$#) { + my $fileName = $1; + my $fileType = $2; + my $filePath = $3; + my (undef, undef, $fileExtension) = map { lc($_) } fileparse(basename($filePath), qr{\.[^.]+$}); + + if (!exists $typeExtensionMap{$fileType}) { + $issuesFound++; + print STDERR "WARNING: Unknown file type '$fileType' for file '$filePath'.\n" if $printWarnings; + } elsif ($typeExtensionMap{$fileType} ne $fileExtension) { + $issuesFound++; + print STDERR "WARNING: Incorrect file type '$fileType' for file '$filePath'.\n" if $printWarnings; + $line =~ s/(\s+)explicitFileType( = )(sourcecode[^;]*);/$1lastKnownFileType$2$extensionTypeMap{$fileExtension};/; + $issuesFixed++ if $shouldFixIssues; + } + } + + print $OUT $line if $shouldFixIssues; + } + + # Output the rest of the file. + print $OUT <IN> if $shouldFixIssues; + + close(IN); + + if ($shouldFixIssues) { + close($OUT); + + unlink($projectFile) || die "Could not delete $projectFile: $!"; + rename($tempFileName, $projectFile) || die "Could not rename $tempFileName to $projectFile: $!"; + } + + if ($printWarnings) { + printf STDERR "%s issues found for $projectFile.\n", ($issuesFound ? $issuesFound : "No"); + print STDERR "$issuesFixed issues fixed for $projectFile.\n" if $issuesFixed && $shouldFixIssues; + print STDERR "NOTE: Open $projectFile in Xcode to let it have its way with the file.\n" if $issuesFixed; + print STDERR "\n"; + } +} + +exit 0; diff --git a/Tools/Scripts/check-dom-results b/Tools/Scripts/check-dom-results new file mode 100755 index 0000000..0b32406 --- /dev/null +++ b/Tools/Scripts/check-dom-results @@ -0,0 +1,141 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to check status of W3C DOM tests that are part of the WebKit tests. + +use strict; +use FindBin; +use Cwd; +use lib $FindBin::Bin; +use webkitdirs; + +chdirWebKit(); + +my $verbose = $ARGV[0] && $ARGV[0] eq "-v"; + +my $workingDir = getcwd(); +my $testDirectory = "$workingDir/LayoutTests"; + +my @suites = ( {"name" => "DOM Level 1 Core (html)", "directory" => "dom/html/level1/core"}, + {"name" => "DOM Level 2 Core (html)", "directory" => "dom/html/level2/core"}, + {"name" => "DOM Level 2 Events (html)", "directory" => "dom/html/level2/events"}, + {"name" => "DOM Level 2 HTML (html)", "directory" => "dom/html/level2/html"}, + {"name" => "DOM Level 1 Core (xhtml)", "directory" => "dom/xhtml/level1/core"}, + {"name" => "DOM Level 2 Core (xhtml)", "directory" => "dom/xhtml/level2/core"}, + {"name" => "DOM Level 2 Events (xhtml)", "directory" => "dom/xhtml/level2/events"}, + {"name" => "DOM Level 2 HTML (xhtml)", "directory" => "dom/xhtml/level2/html"}, + {"name" => "DOM Level 3 Core (xhtml)", "directory" => "dom/xhtml/level3/core"}, + {"name" => "DOM Level 3 XPath (svg)", "directory" => "dom/svg/level3/xpath"}); + +my $totalCount = 0; +my $totalSuccesses = 0; +my $totalDisabled = 0; +my $totalFailed = 0; + +foreach my $suite (@suites) { + + my %suite = %$suite; + my $directory = $suite{"directory"}; + my $name = $suite{"name"}; + my @results = `find "${testDirectory}/${directory}" -name "*-expected.txt"`; + my @disabled = `find "${testDirectory}/${directory}" -name "*-disabled"`; + + my @failures = (); + my $count = 0; + + foreach my $result (@results) { + $count++; + my $success = 0; + open RESULT, "<$result"; + while (<RESULT>) { + if (/Success/) { + $success = 1; + last; + } + } + close RESULT; + if (!$success) { + push @failures, $result; + } + } + + my $disabledCount = (scalar @disabled); + my $failureCount = (scalar @failures); + + $count += $disabledCount; + + my $successCount = $count - $failureCount - $disabledCount; + my $percentage = (sprintf "%.1f", ($successCount * 100.0 / $count)); + + if ($percentage == 100) { + print "${name}: all ${count} tests succeeded"; + } else { + print "${name}: ${successCount} out of ${count} tests succeeded (${percentage}%)"; + } + print " ($disabledCount disabled)" if $disabledCount; + print "\n"; + if ($verbose) { + print "\n"; + if (@disabled) { + print " Disabled:\n"; + + foreach my $failure (sort @disabled) { + $failure =~ s|.*/||; + $failure =~ s|-disabled||; + print " ${directory}/${failure}"; + } + } + if (@failures) { + print " Failed:\n"; + + foreach my $failure (sort @failures) { + $directory =~ m|^dom/(\w+)|; + my $extension = $1; + $failure =~ s|.*/||; + $failure =~ s|-expected\.txt|.${extension}|; + print " ${directory}/${failure}"; + } + } + + print "\n"; + } + + $totalCount += $count; + $totalSuccesses += $successCount; + $totalDisabled += $disabledCount; + $totalFailed += $failureCount; +} + + +my $totalPercentage = (sprintf "%.1f", ($totalSuccesses * 100.0 / $totalCount)); +my $totalDisabledPercentage = (sprintf "%.1f", ($totalDisabled * 100.0 / $totalCount)); +my $totalFailedPercentage = (sprintf "%.1f", ($totalFailed * 100.0 / $totalCount)); + +print "Total: ${totalSuccesses} out of ${totalCount} tests succeeded (${totalPercentage}%)\n"; +print " ${totalDisabled} tests disabled (${totalDisabledPercentage}%)\n"; +print " ${totalFailed} tests failed (${totalFailedPercentage}%)\n"; diff --git a/Tools/Scripts/check-for-exit-time-destructors b/Tools/Scripts/check-for-exit-time-destructors new file mode 100755 index 0000000..249bd0b --- /dev/null +++ b/Tools/Scripts/check-for-exit-time-destructors @@ -0,0 +1,151 @@ +#!/usr/bin/perl + +# Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# "check-for-exit-time-destructors" script for WebKit Open Source Project + +# Intended to be invoked from an Xcode build step to check if there are +# any exit-time destructors in a target. + +use warnings; +use strict; + +use File::Basename; + +sub touch($); +sub printFunctions($$); + +my $arch = $ENV{'CURRENT_ARCH'}; +my $configuration = $ENV{'CONFIGURATION'}; +my $target = $ENV{'TARGET_NAME'}; +my $variant = $ENV{'CURRENT_VARIANT'}; +my $coverageBuild = $ENV{'WEBKIT_COVERAGE_BUILD'}; +my $debugRoot = $ENV{'WEBKIT_DEBUG_ROOT'}; + +$arch = $ENV{'NATIVE_ARCH'} if !$arch; # for Xcode 2.1, which does not have CURRENT_ARCH +$variant = "normal" if !$variant; # for Xcode 2.1, which does not have CURRENT_VARIANT + +my $executablePath = "$ENV{'TARGET_BUILD_DIR'}/$ENV{'EXECUTABLE_PATH'}"; + +my $buildTimestampPath = $ENV{'TARGET_TEMP_DIR'} . "/" . basename($0) . ".timestamp"; +my $buildTimestampAge = -M $buildTimestampPath; +my $scriptAge = -M $0; + +my $list = $ENV{"LINK_FILE_LIST_${variant}_${arch}"}; + +if (!open LIST, $list) { + print "ERROR: Could not open $list\n"; + exit 1; +} + +my @files = <LIST>; +chomp @files; +close LIST; + +my $sawError = 0; + +for my $file (sort @files) { + if (defined $buildTimestampAge && $buildTimestampAge < $scriptAge) { + my $fileAge = -M $file; + next if defined $fileAge && $fileAge > $buildTimestampAge; + } + if (!open NM, "(nm '$file' | sed 's/^/STDOUT:/') 2>&1 |") { + print "ERROR: Could not open $file\n"; + $sawError = 1; + next; + } + my $sawAtExit = 0; + while (<NM>) { + if (/^STDOUT:/) { + $sawAtExit = 1 if /___cxa_atexit/; + } else { + print STDERR if $_ ne "nm: no name list\n"; + } + } + close NM; + next unless $sawAtExit; + + my $shortName = $file; + $shortName =~ s/.*\///; + + $sawError = 1 if printFunctions($shortName, $file); +} + +if ($sawError and !$coverageBuild) { + print "ERROR: Use DEFINE_STATIC_LOCAL from <wtf/StdLibExtras.h>\n"; + unlink $executablePath; + exit 1; +} + +touch($buildTimestampPath); +exit 0; + +sub touch($) +{ + my ($path) = @_; + open(TOUCH, ">", $path) or die "$!"; + close(TOUCH); +} + +sub demangle($) +{ + my ($symbol) = @_; + if (!open FILT, "c++filt $symbol |") { + print "ERROR: Could not open c++filt\n"; + return; + } + my $result = <FILT>; + close FILT; + chomp $result; + return $result; +} + +sub printFunctions($$) +{ + my ($shortName, $path) = @_; + if (!open OTOOL, "otool -tV '$path' |") { + print "WARNING: Could not open $path\n"; + return 0; + } + my %functions; + my $currentSymbol = ""; + while (<OTOOL>) { + $currentSymbol = $1 if /^(\w+):$/; + next unless $currentSymbol; + $functions{demangle($currentSymbol)} = 1 if /___cxa_atexit/; + } + close OTOOL; + my $result = 0; + for my $function (sort keys %functions) { + if (!$result) { + print "ERROR: $shortName has exit time destructors in it! ($path)\n"; + $result = 1; + } + print "ERROR: In function $function\n"; + } + return $result; +} diff --git a/Tools/Scripts/check-for-global-initializers b/Tools/Scripts/check-for-global-initializers new file mode 100755 index 0000000..102fa09 --- /dev/null +++ b/Tools/Scripts/check-for-global-initializers @@ -0,0 +1,161 @@ +#!/usr/bin/perl + +# Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# "check-for-global-initializers" script for WebKit Open Source Project + +# Intended to be invoked from an Xcode build step to check if there are +# any global initializers in a target. + +use warnings; +use strict; + +use File::Basename; + +sub touch($); +sub demangle($); + +my $arch = $ENV{'CURRENT_ARCH'}; +my $configuration = $ENV{'CONFIGURATION'}; +my $target = $ENV{'TARGET_NAME'}; +my $variant = $ENV{'CURRENT_VARIANT'}; +my $coverageBuild = $ENV{'WEBKIT_COVERAGE_BUILD'}; +my $debugRoot = $ENV{'WEBKIT_DEBUG_ROOT'}; + +$arch = $ENV{'NATIVE_ARCH'} if !$arch; # for Xcode 2.1, which does not have CURRENT_ARCH +$variant = "normal" if !$variant; # for Xcode 2.1, which does not have CURRENT_VARIANT + +my $executablePath = "$ENV{'TARGET_BUILD_DIR'}/$ENV{'EXECUTABLE_PATH'}"; + +my $buildTimestampPath = $ENV{'TARGET_TEMP_DIR'} . "/" . basename($0) . ".timestamp"; +my $buildTimestampAge = -M $buildTimestampPath; +my $scriptAge = -M $0; + +my $list = $ENV{"LINK_FILE_LIST_${variant}_${arch}"}; + +if (!open LIST, $list) { + print "ERROR: Could not open $list\n"; + exit 1; +} + +my @files = <LIST>; +chomp @files; +close LIST; + +my $sawError = 0; + +for my $file (sort @files) { + if (defined $buildTimestampAge && $buildTimestampAge < $scriptAge) { + my $fileAge = -M $file; + next if defined $fileAge && $fileAge > $buildTimestampAge; + } + if (!open NM, "(nm '$file' | sed 's/^/STDOUT:/') 2>&1 |") { + print "ERROR: Could not open $file\n"; + $sawError = 1; + next; + } + my $sawGlobal = 0; + my @globals; + while (<NM>) { + if (/^STDOUT:/) { + my $line = $_; + if ($line =~ /__GLOBAL__I(.+)$/) { + $sawGlobal = 1; + push(@globals, demangle($1)); + } + } else { + print STDERR if $_ ne "nm: no name list\n"; + } + } + close NM; + if ($sawGlobal) { + my $shortName = $file; + $shortName =~ s/.*\///; + + # Special cases for files that have initializers in debug builds. + if ($configuration eq "Debug" or $variant eq "debug" or $debugRoot) { + if ($target eq "JavaScriptCore") { + next if $shortName eq "AllInOneFile.o"; + next if $shortName eq "Opcode.o"; + next if $shortName eq "Structure.o"; + next if $shortName eq "nodes.o"; + } + if ($target eq "WebCore") { + next if $shortName eq "BidiRun.o"; + next if $shortName eq "CachedPage.o"; + next if $shortName eq "CachedResource.o"; + next if $shortName eq "FEGaussianBlur.o"; + next if $shortName eq "Frame.o"; + next if $shortName eq "JSCustomSQLTransactionCallback.o"; + next if $shortName eq "JSLazyEventListener.o"; + next if $shortName eq "Node.o"; + next if $shortName eq "Page.o"; + next if $shortName eq "Range.o"; + next if $shortName eq "RenderObject.o"; + next if $shortName eq "SVGElementInstance.o"; + next if $shortName eq "SubresourceLoader.o"; + next if $shortName eq "XMLHttpRequest.o"; + } + if ($target eq "WebKit") { + next if $shortName eq "HostedNetscapePluginStream.o"; + next if $shortName eq "NetscapePluginInstanceProxy.o"; + } + } + + print "ERROR: $shortName has one or more global initializers in it! ($file), near @globals\n"; + $sawError = 1; + } +} + +if ($sawError and !$coverageBuild) { + unlink $executablePath; + exit 1; +} + +touch($buildTimestampPath); +exit 0; + +sub touch($) +{ + my ($path) = @_; + open(TOUCH, ">", $path) or die "$!"; + close(TOUCH); +} + +sub demangle($) +{ + my ($symbol) = @_; + if (!open FILT, "c++filt $symbol |") { + print "ERROR: Could not open c++filt\n"; + return; + } + my $result = <FILT>; + close FILT; + chomp $result; + return $result; +} + diff --git a/Tools/Scripts/check-for-inappropriate-files-in-framework b/Tools/Scripts/check-for-inappropriate-files-in-framework new file mode 100755 index 0000000..1ab71b2 --- /dev/null +++ b/Tools/Scripts/check-for-inappropriate-files-in-framework @@ -0,0 +1,68 @@ +#!/usr/bin/env ruby + +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS +# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. + +base_directory = ENV['TARGET_BUILD_DIR'] or throw "Unable to find TARGET_BUILD_DIR in the environment!" +project_name = ENV['PROJECT_NAME'] or throw "Unable to find PROJECT_NAME in the environment!" +is_shallow_bundle = (ENV['SHALLOW_BUNDLE'] || "NO").upcase == "YES" + +$INAPPROPRIATE_FILES = { "WebCore" => { "Resources" => ["*.css", "*.in", "*.idl", "*.h"] } } + +Dir.chdir base_directory + +$error_printed = false + +def print_error msg + $error_printed = true + STDERR.puts "ERROR: #{msg}" +end + +def print_inappropriate_file_error framework, relative_path + print_error "#{framework}.framework/#{relative_path} should not be present in the framework." +end + +def check_framework framework, is_shallow_bundle + $INAPPROPRIATE_FILES[framework].each do |directory, patterns| + framework_bundle_path = is_shallow_bundle ? "#{framework}.framework" : "#{framework}.framework/Versions/A/#{directory}" + Dir.chdir framework_bundle_path do + patterns.each do |pattern| + Dir.glob(pattern).each do |inappropriate_file| + print_inappropriate_file_error framework, is_shallow_bundle ? inappropriate_file : "#{directory}/#{inappropriate_file}" + File.unlink inappropriate_file + end + end + end + end +end + +check_framework project_name, is_shallow_bundle + +if $error_printed + STDERR.puts + STDERR.puts " Inappropriate files were detected and have been removed from the framework." + STDERR.puts " If this error continues to appear after building again then the build system needs" + STDERR.puts " to be modified so that the inappropriate files are no longer copied in to the framework." + STDERR.puts + exit 1 +end diff --git a/Tools/Scripts/check-for-weak-vtables-and-externals b/Tools/Scripts/check-for-weak-vtables-and-externals new file mode 100755 index 0000000..a3dc364 --- /dev/null +++ b/Tools/Scripts/check-for-weak-vtables-and-externals @@ -0,0 +1,120 @@ +#!/usr/bin/perl + +# Copyright (C) 2006, 2007, 2008, 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# "check-for-weak-vtables-and-externals" script for WebKit Open Source Project + +# Intended to be invoked from an Xcode build step to check if there are +# any weak vtables or weak externals in a target. + +use warnings; +use strict; + +use File::Basename; + +sub touch($); + +my $arch = $ENV{'CURRENT_ARCH'}; +my $configuration = $ENV{'CONFIGURATION'}; +my $target = $ENV{'TARGET_NAME'}; +my $variant = $ENV{'CURRENT_VARIANT'}; +my $coverageBuild = $ENV{'WEBKIT_COVERAGE_BUILD'}; +my $debugRoot = $ENV{'WEBKIT_DEBUG_ROOT'}; + +$arch = $ENV{'NATIVE_ARCH'} if !$arch; # for Xcode 2.1, which does not have CURRENT_ARCH +$variant = "normal" if !$variant; # for Xcode 2.1, which does not have CURRENT_VARIANT + +my $executablePath = "$ENV{'TARGET_BUILD_DIR'}/$ENV{'EXECUTABLE_PATH'}"; + +my $buildTimestampPath = $ENV{'TARGET_TEMP_DIR'} . "/" . basename($0) . ".timestamp"; +my $buildTimestampAge = -M $buildTimestampPath; +my $executablePathAge = -M $executablePath; + +my $sawError = 0; + +if (!defined $executablePathAge || !defined $buildTimestampAge || $executablePathAge > $buildTimestampAge) { + if (!open NM, "(nm -m '$executablePath' | c++filt | sed 's/^/STDOUT:/') 2>&1 |") { + print "ERROR: Could not open $executablePath\n"; + $sawError = 1; + next; + } + my @weakVTableClasses = (); + my @weakExternalSymbols = (); + while (<NM>) { + if (/^STDOUT:/) { + # Ignore undefined, RTTI and typeinfo symbols. + next if /\bundefined\b/ or /\b__ZT[IS]/; + + if (/weak external vtable for (.*)$/) { + push @weakVTableClasses, $1; + } elsif (/weak external (.*)$/) { + push @weakExternalSymbols, $1; + } + } else { + print STDERR if $_ ne "nm: no name list\n"; + } + } + close NM; + + my $shortName = $executablePath; + $shortName =~ s/.*\///; + + if (@weakVTableClasses) { + print "ERROR: $shortName has a weak vtable in it ($executablePath)\n"; + print "ERROR: Fix by making sure the first virtual function in each of these classes is not an inline:\n"; + for my $class (sort @weakVTableClasses) { + print "ERROR: class $class\n"; + } + $sawError = 1; + } + + if (@weakExternalSymbols) { + print "ERROR: $shortName has a weak external symbol in it ($executablePath)\n"; + print "ERROR: A weak external symbol is generated when a symbol is defined in multiple compilation units and is also marked as being exported from the library.\n"; + print "ERROR: A common cause of weak external symbols is when an inline function is listed in the linker export file.\n"; + for my $symbol (sort @weakExternalSymbols) { + print "ERROR: symbol $symbol\n"; + } + $sawError = 1; + } +} + +if ($sawError and !$coverageBuild) { + unlink $executablePath; + exit 1; +} + +touch($buildTimestampPath); + +exit 0; + +sub touch($) +{ + my ($path) = @_; + open(TOUCH, ">", $path) or die "$!"; + close(TOUCH); +} diff --git a/Tools/Scripts/check-for-webkit-framework-include-consistency b/Tools/Scripts/check-for-webkit-framework-include-consistency new file mode 100755 index 0000000..339fa7e --- /dev/null +++ b/Tools/Scripts/check-for-webkit-framework-include-consistency @@ -0,0 +1,111 @@ +#!/usr/bin/env ruby + +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS +# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. + + +base_directory = ENV['TARGET_BUILD_DIR'] or throw "Unable to find TARGET_BUILD_DIR in the environment!" +is_shallow_bundle = (ENV['SHALLOW_BUNDLE'] || "NO").upcase == "YES" + +unless base_directory + throw "Unable to find TARGET_BUILD_DIR in the environment!" +end + +Dir.chdir base_directory + +$PERMITTED_INCLUDE_TYPES = { :public => [ :public ], :private => [ :public, :private ] } + +$HEADER_NAMES_TO_TYPE = { } +$HEADERS_BY_TYPE = { :public => [], :private => [] } + +$error_printed = false + +def print_error msg + $error_printed = true + STDERR.puts "ERROR: #{msg}" +end + +def build_header_maps is_shallow_bundle + current_version_path = is_shallow_bundle ? "" : "Versions/A/" + all_headers = `find WebKit.framework/#{current_version_path}{,Private}Headers -type f -name '*.h'`.split + + all_headers.each do |header| + if /\/Headers\/(.*)/.match(header) + $HEADER_NAMES_TO_TYPE[$1] = :public + $HEADERS_BY_TYPE[:public] << header + elsif /\/PrivateHeaders\/(.*)/.match(header) + $HEADER_NAMES_TO_TYPE[$1] = :private + $HEADERS_BY_TYPE[:private] << header + else + print_error "Unknown header type: #{header}" + end + end +end + +def resolve_include(header, included_header, permitted_types) + # Ignore includes that aren't in the typical framework style. + return unless /<([^\/]+)\/(.*)>/.match(included_header) + + framework, included_header_name = [$1, $2] + + # Ignore includes that aren't related to other WebKit headers. + return unless framework =~ /^Web/ + + # A header of any type including a WebCore header is a recipe for disaster. + if framework == "WebCore" + # <rdar://problem/7718826> WebKeyGenerator.h should not include a WebCore header + return if header =~ /\/WebKeyGenerator.h$/ and included_header_name == "WebCoreKeyGenerator.h" + + print_error "#{header} included #{included_header}!" + return + end + + header_type = $HEADER_NAMES_TO_TYPE[included_header_name] + + if not header_type + print_error "#{header} included #{included_header} but I could not find a header of that name!" + elsif not permitted_types.member?(header_type) + print_error "#{header} included #{included_header} which is #{header_type}!" + end +end + +def verify_includes(header, permitted_types) + File.open(header) do |file| + file.each_line do |line| + if /#(include|import) (.*)/.match(line) + resolve_include(header, $2, permitted_types) + end + end + end +end + +build_header_maps is_shallow_bundle + +$HEADERS_BY_TYPE.each do |header_type, headers| + permitted_types = $PERMITTED_INCLUDE_TYPES[header_type] + headers.each do |header| + verify_includes header, permitted_types + end +end + +exit 1 if $error_printed diff --git a/Tools/Scripts/check-webkit-style b/Tools/Scripts/check-webkit-style new file mode 100755 index 0000000..076c712 --- /dev/null +++ b/Tools/Scripts/check-webkit-style @@ -0,0 +1,134 @@ +#!/usr/bin/env python +# +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Does WebKit-lint on C/C++ or text files. + +The goal of this script is to identify places in the code that *may* +be in non-compliance with WebKit style. It does not attempt to fix +up these problems -- the point is to educate. It does also not +attempt to find all problems, or to ensure that everything it does +find is legitimately a problem. + +In particular, we can get very confused by /* and // inside strings! +We do a small hack, which is to ignore //'s with "'s after them on the +same line, but it is far from perfect (in either direction). +""" + +import codecs +import logging +import os +import os.path +import sys + +from webkitpy.style_references import detect_checkout +import webkitpy.style.checker as checker +from webkitpy.style.patchreader import PatchReader +from webkitpy.style.checker import StyleProcessor +from webkitpy.style.filereader import TextFileReader +from webkitpy.style.main import change_directory + +_log = logging.getLogger("check-webkit-style") + + +# FIXME: Move this code to style.main. +def main(): + # Change stderr to write with replacement characters so we don't die + # if we try to print something containing non-ASCII characters. + stderr = codecs.StreamReaderWriter(sys.stderr, + codecs.getreader('utf8'), + codecs.getwriter('utf8'), + 'replace') + # Setting an "encoding" attribute on the stream is necessary to + # prevent the logging module from raising an error. See + # the checker.configure_logging() function for more information. + stderr.encoding = "UTF-8" + + # FIXME: Change webkitpy.style so that we do not need to overwrite + # the global sys.stderr. This involves updating the code to + # accept a stream parameter where necessary, and not calling + # sys.stderr explicitly anywhere. + sys.stderr = stderr + + args = sys.argv[1:] + + # Checking for the verbose flag before calling check_webkit_style_parser() + # lets us enable verbose logging earlier. + is_verbose = "-v" in args or "--verbose" in args + + checker.configure_logging(stream=stderr, is_verbose=is_verbose) + _log.debug("Verbose logging enabled.") + + parser = checker.check_webkit_style_parser() + (paths, options) = parser.parse(args) + + checkout = detect_checkout() + + if checkout is None: + if not paths: + _log.error("WebKit checkout not found: You must run this script " + "from within a WebKit checkout if you are not passing " + "specific paths to check.") + sys.exit(1) + + checkout_root = None + _log.debug("WebKit checkout not found for current directory.") + else: + checkout_root = checkout.root_path() + _log.debug("WebKit checkout found with root: %s" % checkout_root) + + configuration = checker.check_webkit_style_configuration(options) + + paths = change_directory(checkout_root=checkout_root, paths=paths) + + style_processor = StyleProcessor(configuration) + + file_reader = TextFileReader(style_processor) + + if paths and not options.diff_files: + file_reader.process_paths(paths) + else: + changed_files = paths if options.diff_files else None + patch = checkout.create_patch(options.git_commit, changed_files=changed_files) + patch_checker = PatchReader(file_reader) + patch_checker.check(patch) + + error_count = style_processor.error_count + file_count = file_reader.file_count + delete_only_file_count = file_reader.delete_only_file_count + + _log.info("Total errors found: %d in %d files" + % (error_count, file_count)) + # We fail when style errors are found or there are no checked files. + sys.exit(error_count > 0 or (file_count == 0 and delete_only_file_count == 0)) + + +if __name__ == "__main__": + main() diff --git a/Tools/Scripts/clean-header-guards b/Tools/Scripts/clean-header-guards new file mode 100755 index 0000000..2bad046 --- /dev/null +++ b/Tools/Scripts/clean-header-guards @@ -0,0 +1,53 @@ +#!/usr/bin/ruby + +require 'find' +require 'optparse' + +options = {} +OptionParser.new do |opts| + opts.banner = "Usage: clean-header-guards [options]" + + opts.on("--prefix [PREFIX]", "Append a header prefix to all guards") do |prefix| + options[:prefix] = prefix + end +end.parse! + +IgnoredFilenamePatterns = [ + # ignore headers which are known not to have guard + /WebCorePrefix/, + /ForwardingHeaders/, + %r|bindings/objc|, + /vcproj/, # anything inside a vcproj is in the windows wasteland + + # we don't own any of these headers + %r|icu/unicode|, + %r|platform/graphics/cairo|, + %r|platform/image-decoders|, + + /config.h/ # changing this one sounds scary +].freeze + +IgnoreFileNamesPattern = Regexp.union(*IgnoredFilenamePatterns).freeze + +Find::find(".") do |filename| + next unless filename =~ /\.h$/ + next if filename.match(IgnoreFileNamesPattern) + + File.open(filename, "r+") do |file| + contents = file.read + match_results = contents.match(/#ifndef (\S+)\n#define \1/s) + if match_results + current_guard = match_results[1] + new_guard = File.basename(filename).sub('.', '_') + new_guard = options[:prefix] + '_' + new_guard if options[:prefix] + contents.gsub!(/#{current_guard}\b/, new_guard) + else + puts "Ignoring #{filename}, failed to find existing header guards." + end + tmp_filename = filename + ".tmp" + File.open(tmp_filename, "w+") do |new_file| + new_file.write(contents) + end + File.rename tmp_filename, filename + end +end diff --git a/Tools/Scripts/commit-log-editor b/Tools/Scripts/commit-log-editor new file mode 100755 index 0000000..f40295d --- /dev/null +++ b/Tools/Scripts/commit-log-editor @@ -0,0 +1,325 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to put change log comments in as default check-in comment. + +use strict; +use File::Basename; +use File::Spec; +use FindBin; +use lib $FindBin::Bin; +use Term::ReadKey; +use VCSUtils; +use webkitdirs; + +sub normalizeLineEndings($$); +sub removeLongestCommonPrefixEndingInDoubleNewline(\%); +sub isCommitLogEditor($); + +sub usage +{ + print "Usage: [--help] [--regenerate-log] <log file>\n"; + exit 1; +} + +my $help = checkForArgumentAndRemoveFromARGV("--help"); +if ($help) { + usage(); +} + +my $regenerateLog = checkForArgumentAndRemoveFromARGV("--regenerate-log"); +my $log = $ARGV[0]; +if (!$log) { + usage(); +} + +my $baseDir = baseProductDir(); + +my $editor = $ENV{SVN_LOG_EDITOR}; +$editor = $ENV{CVS_LOG_EDITOR} if !$editor; +$editor = "" if $editor && isCommitLogEditor($editor); + +my $splitEditor = 1; +if (!$editor) { + my $builtEditorApplication = "$baseDir/Release/Commit Log Editor.app/Contents/MacOS/Commit Log Editor"; + if (-x $builtEditorApplication) { + $editor = $builtEditorApplication; + $splitEditor = 0; + } +} +if (!$editor) { + my $builtEditorApplication = "$baseDir/Debug/Commit Log Editor.app/Contents/MacOS/Commit Log Editor"; + if (-x $builtEditorApplication) { + $editor = $builtEditorApplication; + $splitEditor = 0; + } +} +if (!$editor) { + my $builtEditorApplication = "$ENV{HOME}/Applications/Commit Log Editor.app/Contents/MacOS/Commit Log Editor"; + if (-x $builtEditorApplication) { + $editor = $builtEditorApplication; + $splitEditor = 0; + } +} + +$editor = $ENV{EDITOR} if !$editor; +$editor = "/usr/bin/vi" if !$editor; + +my @editor; +if ($splitEditor) { + @editor = split ' ', $editor; +} else { + @editor = ($editor); +} + +my $inChangesToBeCommitted = !isGit(); +my @changeLogs = (); +my $logContents = ""; +my $existingLog = 0; +open LOG, $log or die "Could not open the log file."; +while (<LOG>) { + if (isGit()) { + if (/^# Changes to be committed:$/) { + $inChangesToBeCommitted = 1; + } elsif ($inChangesToBeCommitted && /^# \S/) { + $inChangesToBeCommitted = 0; + } + } + + if (!isGit() || /^#/) { # + $logContents .= $_; + } else { + # $_ contains the current git log message + # (without the log comment info). We don't need it. + } + $existingLog = isGit() && !(/^#/ || /^\s*$/) unless $existingLog; + + push @changeLogs, makeFilePathRelative($1) if $inChangesToBeCommitted && (/^(?:M|A)....(.*ChangeLog)\r?\n?$/ || /^#\t(?:modified|new file): (.*ChangeLog)$/) && !/-ChangeLog$/; +} +close LOG; + +# We want to match the line endings of the existing log file in case they're +# different from perl's line endings. +my $endl = "\n"; +$endl = $1 if $logContents =~ /(\r?\n)/; + +my $keepExistingLog = 1; +if ($regenerateLog && $existingLog && scalar(@changeLogs) > 0) { + print "Existing log message detected, Use 'r' to regenerate log message from ChangeLogs, or any other key to keep the existing message.\n"; + ReadMode('cbreak'); + my $key = ReadKey(0); + ReadMode('normal'); + $keepExistingLog = 0 if ($key eq "r"); +} + +# Don't change anything if there's already a log message (as can happen with git-commit --amend). +exec (@editor, @ARGV) if $existingLog && $keepExistingLog; + +my $topLevel = determineVCSRoot(); + +my %changeLogSort; +my %changeLogContents; +for my $changeLog (@changeLogs) { + open CHANGELOG, $changeLog or die "Can't open $changeLog"; + my $contents = ""; + my $blankLines = ""; + my $reviewedByLine = ""; + my $lineCount = 0; + my $date = ""; + my $author = ""; + my $email = ""; + my $hasAuthorInfoToWrite = 0; + while (<CHANGELOG>) { + if (/^\S/) { + last if $contents; + } + if (/\S/) { + my $previousLineWasBlank = 1 unless $blankLines eq ""; + my $line = $_; + my $currentLineBlankLines = $blankLines; + $blankLines = ""; + + # Remove indentation spaces + $line =~ s/^ {8}//; + + # Save the reviewed / rubber stamped by line. + if ($line =~ m/^Reviewed by .*/ || $line =~ m/^Rubber[ \-]?stamped by .*/) { + $reviewedByLine = $line; + next; + } + + # Grab the author and the date line + if ($line =~ m/^([0-9]{4}-[0-9]{2}-[0-9]{2})\s+(.*[^\s])\s+<(.*)>/ && $lineCount == 0) { + $date = $1; + $author = $2; + $email = $3; + $hasAuthorInfoToWrite = 1; + next; + } + + $contents .= $currentLineBlankLines if $contents; + + # Attempt to insert the "patch by" line, after the first blank line. + if ($previousLineWasBlank && $hasAuthorInfoToWrite && $lineCount > 0) { + my $committerEmail = changeLogEmailAddress(); + my $authorAndCommitterAreSamePerson = $email eq $committerEmail; + if (!$authorAndCommitterAreSamePerson) { + $contents .= "Patch by $author <$email> on $date\n"; + $hasAuthorInfoToWrite = 0; + } + } + + # Attempt to insert the "reviewed by" line, after the first blank line. + if ($previousLineWasBlank && $reviewedByLine && $lineCount > 0) { + $contents .= $reviewedByLine . "\n"; + $reviewedByLine = ""; + } + + $lineCount++; + $contents .= $line; + } else { + $blankLines .= $_; + } + } + if ($reviewedByLine) { + $contents .= "\n".$reviewedByLine; + } + close CHANGELOG; + + $changeLog = File::Spec->abs2rel(File::Spec->rel2abs($changeLog), $topLevel); + + my $label = dirname($changeLog); + $label = "top level" unless length $label; + + my $sortKey = lc $label; + if ($label eq "top level") { + $sortKey = ""; + } elsif ($label eq "LayoutTests") { + $sortKey = lc "~, LayoutTests last"; + } + + $changeLogSort{$sortKey} = $label; + $changeLogContents{$label} = $contents; +} + +my $commonPrefix = removeLongestCommonPrefixEndingInDoubleNewline(%changeLogContents); + +my $first = 1; +open NEWLOG, ">$log.edit" or die; +if (isGit() && scalar keys %changeLogSort == 0) { + # populate git commit message with WebKit-format ChangeLog entries unless explicitly disabled + my $branch = gitBranch(); + chomp(my $webkitGenerateCommitMessage = `git config --bool branch.$branch.webkitGenerateCommitMessage`); + if ($webkitGenerateCommitMessage eq "") { + chomp($webkitGenerateCommitMessage = `git config --bool core.webkitGenerateCommitMessage`); + } + if ($webkitGenerateCommitMessage ne "false") { + open CHANGELOG_ENTRIES, "-|", "$FindBin::Bin/prepare-ChangeLog --git-index --no-write" or die "prepare-ChangeLog failed: $!.\n"; + while (<CHANGELOG_ENTRIES>) { + print NEWLOG normalizeLineEndings($_, $endl); + } + close CHANGELOG_ENTRIES; + } +} else { + print NEWLOG normalizeLineEndings($commonPrefix, $endl); + for my $sortKey (sort keys %changeLogSort) { + my $label = $changeLogSort{$sortKey}; + if (keys %changeLogSort > 1) { + print NEWLOG normalizeLineEndings("\n", $endl) if !$first; + $first = 0; + print NEWLOG normalizeLineEndings("$label: ", $endl); + } + print NEWLOG normalizeLineEndings($changeLogContents{$label}, $endl); + } +} +print NEWLOG $logContents; +close NEWLOG; + +system (@editor, "$log.edit"); + +open NEWLOG, "$log.edit" or exit; +my $foundComment = 0; +while (<NEWLOG>) { + $foundComment = 1 if (/\S/ && !/^CVS:/); +} +close NEWLOG; + +if ($foundComment) { + open NEWLOG, "$log.edit" or die; + open LOG, ">$log" or die; + while (<NEWLOG>) { + print LOG; + } + close LOG; + close NEWLOG; +} + +unlink "$log.edit"; + +sub normalizeLineEndings($$) +{ + my ($string, $endl) = @_; + $string =~ s/\r?\n/$endl/g; + return $string; +} + +sub removeLongestCommonPrefixEndingInDoubleNewline(\%) +{ + my ($hashOfStrings) = @_; + + my @strings = values %{$hashOfStrings}; + return "" unless @strings > 1; + + my $prefix = shift @strings; + my $prefixLength = length $prefix; + foreach my $string (@strings) { + while ($prefixLength) { + last if substr($string, 0, $prefixLength) eq $prefix; + --$prefixLength; + $prefix = substr($prefix, 0, -1); + } + last unless $prefixLength; + } + + return "" unless $prefixLength; + + my $lastDoubleNewline = rindex($prefix, "\n\n"); + return "" unless $lastDoubleNewline > 0; + + foreach my $key (keys %{$hashOfStrings}) { + $hashOfStrings->{$key} = substr($hashOfStrings->{$key}, $lastDoubleNewline); + } + return substr($prefix, 0, $lastDoubleNewline + 2); +} + +sub isCommitLogEditor($) +{ + my $editor = shift; + return $editor =~ m/commit-log-editor/; +} diff --git a/Tools/Scripts/compare-timing-files b/Tools/Scripts/compare-timing-files new file mode 100755 index 0000000..11b470b --- /dev/null +++ b/Tools/Scripts/compare-timing-files @@ -0,0 +1,88 @@ +#!/usr/bin/perl + +# Copyright (C) 2006 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This script takes two files that are lists of timings and compares them. + +use warnings; +use strict; +use Getopt::Long; + +my $usage = "compare-timing-files [-c|--count results] oldFile newFile"; + +my $count = 1; +GetOptions("c|count=i" => \$count); + +my ($file1, $file2) = @ARGV; +die "$usage\n" unless ($file1 && $file2 && @ARGV == 2); + +my ($oldAverage, $oldRange, $oldRangePercent) = parseResults($file1); +my ($newAverage, $newRange, $newRangePercent) = parseResults($file2); + +print "\n===== $file1 =====\n"; +if ($count == 1) { + print("fastest run: $oldAverage\n"); +} else { + print("average of fastest $count runs: $oldAverage\n"); + printf("range of fastest $count runs: %.2f%% (%d)\n", $oldRangePercent, $oldRange); +} + +print "\n===== $file2 =====\n"; +if ($count == 1) { + print("fastest run: $newAverage\n"); +} else { + print("average of fastest $count runs: $newAverage\n"); + printf("range of fastest $count runs: %.2f%% (%d)\n", $newRangePercent, $newRange); +} + +my $gainOrLoss = $newAverage <= $oldAverage ? "GAIN" : "LOSS"; +my $difference = abs($newAverage - $oldAverage); +my $differencePercent = $difference / $oldAverage * 100; +printf("\nperformance %s of %.2f%% (%.1f / %.1f)\n", $gainOrLoss, $differencePercent, $difference, $oldAverage); +print "\n"; + +sub parseResults +{ + my ($file) = @_; + + open(FILE, $file) or die "Couldn't open file: $file"; + my @results = <FILE>; + close(FILE); + + @results = sort(@results); + my $total = 0; + for (my $i = 0; $i < $count; $i++) { + $results[$i] =~ s/\D*//; # cut out non-digits + $total += $results[$i]; + } + my $average = $total / $count; + my $range = $results[$count - 1] - $results[0]; + my $rangePercent = $range / $results[$count - 1] * 100; + + return ($average, $range, $rangePercent); +} + diff --git a/Tools/Scripts/create-exports b/Tools/Scripts/create-exports new file mode 100755 index 0000000..c645d55 --- /dev/null +++ b/Tools/Scripts/create-exports @@ -0,0 +1,5 @@ +#!/usr/bin/perl -w + +while (<>) { + print "$1\n" if /^\s*\"(.+)\", referenced from:$/; +} diff --git a/Tools/Scripts/debug-minibrowser b/Tools/Scripts/debug-minibrowser new file mode 100755 index 0000000..06685b4 --- /dev/null +++ b/Tools/Scripts/debug-minibrowser @@ -0,0 +1,38 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2007 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Simplified "debug" script for debugging the WebKit2 MiniBrowser. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); + +exit exitStatus(debugMiniBrowser()); diff --git a/Tools/Scripts/debug-safari b/Tools/Scripts/debug-safari new file mode 100755 index 0000000..52e97fe --- /dev/null +++ b/Tools/Scripts/debug-safari @@ -0,0 +1,38 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2007 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to run Safari in the platform's debugger for the WebKit Open Source Project. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); + +exit exitStatus(runSafari(1)); diff --git a/Tools/Scripts/debug-test-runner b/Tools/Scripts/debug-test-runner new file mode 100755 index 0000000..5a9b7f9 --- /dev/null +++ b/Tools/Scripts/debug-test-runner @@ -0,0 +1,35 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS +# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. + +# Simplified "debug" script for debugging the WebKitTestRunner. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); + +exit exitStatus(debugWebKitTestRunner()); diff --git a/Tools/Scripts/deduplicate-tests b/Tools/Scripts/deduplicate-tests new file mode 100755 index 0000000..f0afe13 --- /dev/null +++ b/Tools/Scripts/deduplicate-tests @@ -0,0 +1,84 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""deduplicate-tests -- print test results duplicated between platforms. + +If platform/mac-leopard is missing an expected test output, we fall back on +platform/mac. This means it's possible to grow redundant test outputs, +where we have the same expected data in both a platform directory and another +platform it falls back on. + +This command dumps out all such files. You can use it like this: + deduplicate-tests --verbose # print out the duplicated files + deduplicate-tests | xargs git rm # delete them +""" + + +import optparse +import webkitpy.common.system.logutils as logutils +import webkitpy.layout_tests.deduplicate_tests as deduplicate_tests + + +def parse_args(): + """Provides a default set of command line args. + + Returns a tuple of options, args from optparse""" + + configuration_options = [ + optparse.make_option("-v", "--verbose", dest="verbose", + action="store_true", default=False, + help="Verbose output."), + optparse.make_option("-g", "--glob", dest="glob_pattern", + default="*-expected*", + help="Specify the glob to filter the files, defaults to *-expected*."), + ] + + option_list = (configuration_options) + option_parser = optparse.OptionParser(option_list=option_list) + + options, _ = option_parser.parse_args() + + return options + + +def run(options): + logutils.configure_logging() + if options.verbose: + format = ("* %(test)s\n" + "\tredundantly on %(platform)s and %(fallback)s\n" + "\tconsider deleting %(path)s") + else: + format = "%(path)s" + + for dupe in deduplicate_tests.deduplicate(options.glob_pattern): + print(format % dupe) + + +def main(): + options = parse_args() + run(options) + + +if __name__ == '__main__': + main() diff --git a/Tools/Scripts/detect-mismatched-virtual-const b/Tools/Scripts/detect-mismatched-virtual-const new file mode 100755 index 0000000..b345cb2 --- /dev/null +++ b/Tools/Scripts/detect-mismatched-virtual-const @@ -0,0 +1,167 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2008 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# +# This script attempts to find instances of a problem where the signatures +# of virtual methods fail to match because one is defined 'const', and another +# is not. For example: +# virtual void Base::doStuff() const; +# virtual void Derived::doStuff(); +# +# The lack of 'const' on the derived class gives it a different signature, and +# it will therefore not be called when doStuff() is called on a derived object +# via a base class pointer. +# +# Limitations of this script: +# * It only works on things in the WebCore namespace +# * Not all templatized methods may be found correctly +# * It doesn't know anything about inheritance, or if methods are actually virtual +# * It has lots of false positives (should add a whitelist for known-good signatures, +# and specific methods) +# * It's rather slow +# +# Added by Simon Fraser <simon.fraser@apple.com> +# +# Run the script like this: +# WebKitTools/Scripts/detect-mismatched-virtual-const WebKitBuild/Debug/WebCore.framework/WebCore +# +# Output consists of a series of warnings like this: +# +# Both const and non-const versions of bgColor(): +# HTMLDocument::bgColor() +# HTMLBodyElement::bgColor() const +# HTMLTableElement::bgColor() const +# HTMLTableRowElement::bgColor() const +# HTMLTableCellElement::bgColor() const +# + +use strict; +no warnings qw /syntax/; + + +my $file = $ARGV[0]; + +print "Looking for unmatched const methods in $file\n"; + +if (!open NM, "(nm '$file' | c++filt | sed 's/^/STDOUT:/') 2>&1 |") { + die "Could not open $file\n"; +} + +my $nestedParens; + $nestedParens = qr / + [(] + [^()]* + (?: + (??{ $nestedParens }) + [^()]* + )* + [)]/x; + +my $nestedAngleBrackets; + $nestedAngleBrackets = qr / + [<] + [^<>]* + (?: + (??{ $nestedAngleBrackets }) + [^<>]* + )* + [>]/x; + +my $bal; + $bal = qr /([^:]+ + (??{ $nestedAngleBrackets })? + (??{ $nestedParens })) + ([^()]*)$/x; + +my %signature_map = (); + +while (<NM>) { + my $line = $_; + chomp($line); + if ($line =~ m/ [tT] WebCore::(.+)$/) { + my $method = $1; + + if ($method =~ /$bal/) { + my $signature = $1; + my $const = $2 eq " const"; + + my $class = substr($method, 0, length($method) - length($signature) - ($const ? 6 : 0)); + +# print "line: $line\nclass: $class\nmethod: $method\nsignature: $signature\nconst: $const\n\n"; + + my %method_info = ( + 'class' => $class, + 'const' => $const, + 'method' => $method, + ); + + push @{$signature_map{$signature}}, \%method_info; + } else { + print "unmatched line $method\n\n" + } + } +} +close NM; + +my $sig; +for $sig (keys %signature_map) { + #print "\n$sig\n"; + + my @entries = @{$signature_map{$sig}}; +# print "$#entries\n"; + + my $num_const = 0; + my $num_not_const = 0; + my $i; + for $i (0 .. $#entries) { + my $entry = @entries[$i]; + + my $class = $entry->{'class'}; + my $const = $entry->{'const'}; + + if ($const) { + $num_const++; + } else { + $num_not_const++; + } + } + + if ($#entries > 1 && $num_const > 0 && $num_not_const > 0) { + print "Both const and non-const versions of $sig:\n"; + + for $i (0 .. $#entries) { + my $entry = @entries[$i]; + my $method = $entry->{'method'}; + print "\t$method\n"; + } + + } +} + + + diff --git a/Tools/Scripts/do-file-rename b/Tools/Scripts/do-file-rename new file mode 100755 index 0000000..9c4c898 --- /dev/null +++ b/Tools/Scripts/do-file-rename @@ -0,0 +1,117 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2006, 2008 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to do file renaming. + +use strict; +use File::Find; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; +use VCSUtils; + +setConfiguration(); +chdirWebKit(); + +my %words; + +# find all files we want to process + +my @paths; +find(\&wanted, "JavaScriptCore"); +find(\&wanted, "JavaScriptGlue"); +find(\&wanted, "WebCore"); +find(\&wanted, "WebKit"); +find(\&wanted, "WebKit2"); + +sub wanted +{ + my $file = $_; + + if ($file eq "icu") { + $File::Find::prune = 1; + return; + } + + if ($file =~ /^\../) { + $File::Find::prune = 1; + return; + } + + return if $file =~ /^ChangeLog/; + return if -d $file; + + push @paths, $File::Find::name; +} + +my %renames = ( +); + +my %renamesContemplatedForTheFuture = ( +); + +# rename files + +my %newFile; +for my $file (sort @paths) { + my $f = $file; + $f = "$1$renames{$2}" if $f =~ /^(.*\/)(\w+\.\w+)$/ && $renames{$2}; + $newFile{$file} = $f if $f ne $file; +} + +for my $file (sort @paths) { + if ($newFile{$file}) { + my $newFile = $newFile{$file}; + print "Renaming $file to $newFile\n"; + scmMoveOrRenameFile($file, $newFile); + } +} + +# change all file contents + +for my $file (sort @paths) { + $file = $newFile{$file} if $newFile{$file}; + my $contents; + { + local $/; + open FILE, $file or die; + $contents = <FILE>; + close FILE; + } + my $newContents = $contents; + + for my $from (keys %renames) { + $newContents =~ s/\b\Q$from\E(?!\w)/$renames{$from}/g; # this " unconfuses Xcode syntax highlighting + } + + if ($newContents ne $contents) { + open FILE, ">", $file or die; + print FILE $newContents; + close FILE; + } +} diff --git a/Tools/Scripts/do-webcore-rename b/Tools/Scripts/do-webcore-rename new file mode 100755 index 0000000..6dcb719 --- /dev/null +++ b/Tools/Scripts/do-webcore-rename @@ -0,0 +1,247 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to do a rename in JavaScriptCore, WebCore, and WebKit. + +use strict; + +use File::Find; +use FindBin; +use Getopt::Long qw(:config pass_through); + +use lib $FindBin::Bin; +use webkitdirs; +use VCSUtils; + +setConfiguration(); +chdirWebKit(); + +my $showHelp; +my $verbose; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] + -h|--help Show this help message + -v|--verbose More verbose output +EOF + +my $getOptionsResult = GetOptions( + 'help|h' => \$showHelp, + 'verbose|v' => \$verbose, +); + +if (!$getOptionsResult || $showHelp) { + print STDERR $usage; + exit 1; +} + +my @directoriesToIgnoreList = ( + "icu", +); +my %directoriesToIgnore = map { $_ => 1 } @directoriesToIgnoreList; + +# find all files we want to process + +my @paths; +find(\&wanted, "JavaScriptCore"); +find(\&wanted, "JavaScriptGlue"); +find(\&wanted, "WebCore"); +find(\&wanted, "WebKit"); +find(\&wanted, "WebKit2"); + +sub wanted +{ + my $file = $_; + + # Ignore excluded and hidden files/directories. + if ($directoriesToIgnore{$file} or $file =~ /^\../ or $file =~ /^ChangeLog/) { + print "Ignoring $File::Find::name\n" if $verbose; + $File::Find::prune = 1; + return; + } + + return if -d $file; + + push @paths, $File::Find::name; +} + +# Setting isDOMTypeRename to 1 rather than 0 expands the regexps used +# below to handle custom JavaScript bindings. +my $isDOMTypeRename = 0; +my %renames = ( + # Renames go here in the form of: + "DocLoader" => "CachedResourceLoader", +); + +my %renamesContemplatedForTheFuture = ( + "HTMLPlugInImageElement" => "HTMLEmbeddedObjectElement", + + "DOMObject" => "JSDOMObject", + + "runtimeObjectGetter" => "pluginElementGetter", + "runtimeObjectPropertyGetter" => "pluginElementPropertyGetter", + "runtimeObjectCustomGetOwnPropertySlot" => "pluginElementCustomGetOwnPropertySlot", + "runtimeObjectCustomPut" => "pluginElementCustomPut", + "runtimeObjectImplementsCall" => "pluginElementImplementsCall", + "runtimeObjectCallAsFunction" => "pluginElementCallAsFunction", + + "CLONE_CONTENTS" => "Clone", + "DELETE_CONTENTS" => "Delete", + "EXTRACT_CONTENTS" => "Extract", + + "DateInstance" => "JSDate", + "ErrorInstance" => "JSError", + + "KURL" => "URL", + "KURLCFNet" => "URLCF", + "KURLHash" => "URLHash", + "KURLMac" => "URLMac", + "KURL_h" => "URL_h", + + "ThreadSafeSharedBase" => "ThreadSafeRefCountedBase", + "ThreadSafeShared" => "ThreadSafeRefCounted", + "TreeShared" => "TreeRefCounted", + + "StringImpl" => "SharedString", + + "RenderView" => "RenderViewport", + + "ObjcFallbackObjectImp" => "ObjCFallbackObject", + "RuntimeObjectImp" => "ForeignObject", + + "runtime_array" => "BridgedArray", + "runtime_method" => "BridgedFunction", + "runtime_object" => "BridgedObject", + "objc_runtime" => "ObjCBridge", + + "equalIgnoringCase" => "equalFoldingCase", + + "FTPDirectoryTokenizer" => "FTPDirectoryDocumentBuilder", + "HTMLTokenizer" => "HTMLDocumentBuilder", + "ImageTokenizer" => "ImageDocumentBuilder", + "PluginTokenizer" => "PluginDocumentBuilder", + "TextTokenizer" => "TextDocumentBuilder", + "Tokenizer" => "DocumentBuilder", + "Tokenizer_h" => "DocumentBuilder_h", + "XMLTokenizer" => "XMLDocumentBuilder", + "isHTMLTokenizer" => "isHTMLDocumentBuilder", + "m_tokenizer" => "m_builder", + "createTokenizer" => "createBuilder", + "tokenizerProcessedData" => "documentBuilderProcessedData", + + "WTF_UNICODE_H" => "Unicode_h", + "WTF_UNICODE_ICU_H" => "UnicodeICU_h", + "WTF_UNICODE_QT4_H" => "UnicodeQt4_h", + "UnicodeIcu" => "UnicodeICU", + + "m_invertibleCTM" => "m_transformIsInvertible", + + "NativeFunctionWrapper_h" => "JSHostFunction_h", + "NativeFunctionWrapper" => "JSHostFunction", + "nativeFunctionThunk" => "hostFunctionThunk", + "nativeFunction" => "hostFunction", + "NativeFunction" => "HostFunction", +); + +# Sort the keys of the renames hash in order of decreasing length. This +# handles the case where some of the renames are substrings of others; +# i.e., "Foo" => "Bar" and "FooBuffer" => "BarBuffer". +my @sortedRenameKeys = sort { length($b) - length($a) } keys %renames; + +# rename files + +sub renameFile +{ + my $file = shift; + + if ($isDOMTypeRename) { + # Find the longest key in %renames which matches this more permissive regexp. + # (The old regexp would match ".../Foo.cpp" but not ".../JSFooCustom.cpp".) + # This handles renaming of custom JavaScript bindings even when some of the + # renames are substrings of others. The only reason we don't do this all the + # time is to avoid accidental file renamings for short, non-DOM renames. + for my $key (@sortedRenameKeys) { + my $newFile = ""; + $newFile = "$1$renames{$2}$3" if $file =~ /^(.*\/\w*)($key)(\w*\.\w+)$/; + if ($newFile ne "") { + return $newFile; + } + } + } else { + $file = "$1$renames{$2}$3" if $file =~ /^(.*\/)(\w+)(\.\w+)$/ && $renames{$2}; + } + return $file; +} + +my %newFile; +for my $file (sort @paths) { + my $f = renameFile($file); + if ($f ne $file) { + $newFile{$file} = $f; + } +} + +for my $file (sort @paths) { + if ($newFile{$file}) { + my $newFile = $newFile{$file}; + print "Renaming $file to $newFile\n"; + scmMoveOrRenameFile($file, $newFile); + } +} + +# change all file contents + +for my $file (sort @paths) { + $file = $newFile{$file} if $newFile{$file}; + my $contents; + { + local $/; + open FILE, $file or die "Failed to open $file"; + $contents = <FILE>; + close FILE; + } + my $newContents = $contents; + + if ($isDOMTypeRename) { + for my $from (@sortedRenameKeys) { + # Handle JavaScript custom bindings. + $newContents =~ s/\b(JS|V8|to|)$from/$1$renames{$from}/g; + } + } else { + for my $from (@sortedRenameKeys) { + $newContents =~ s/\b$from(?!["\w])/$renames{$from}/g; # this " unconfuses Xcode syntax highlighting + } + } + + if ($newContents ne $contents) { + open FILE, ">", $file or die "Failed to open $file"; + print FILE $newContents; + close FILE; + } +} diff --git a/Tools/Scripts/ensure-valid-python b/Tools/Scripts/ensure-valid-python new file mode 100755 index 0000000..aede812 --- /dev/null +++ b/Tools/Scripts/ensure-valid-python @@ -0,0 +1,152 @@ +#!/usr/bin/perl -w +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use strict; + +use File::Basename; +use File::Spec; +use File::Temp qw(tempdir); +use FindBin; +use Getopt::Long; + +use lib $FindBin::Bin; +use webkitdirs; +use VCSUtils; + +my $macPythonURL = "http://www.python.org/ftp/python/2.6.5/python-2.6.5-macosx10.3-2010-03-24.dmg"; +my $macPythonMD5 = "84489bba813fdbb6041b69d4310a86da"; +my $macPythonInstallerName = "Python.mpkg"; + +# We could use a consistent download location, like the source or build directory. +my $tempDirectory = File::Temp::tempdir("WebKitPythonXXXX", TMPDIR => 1, CLEANUP => 1); +my $downloadDirectory = $tempDirectory; +my $mountPoint = File::Spec->join($tempDirectory, "mount"); + +sub checkPythonVersion() +{ + # Will exit 0 if Python is 2.5 or greater, non-zero otherwise. + `python -c "import sys;sys.exit(sys.version_info[:2] < (2,5))"`; + return exitStatus($?) == 0; +} + +sub downloadFileToPath($$) +{ + my ($remoteURL, $localPath) = @_; + print "Downloading $remoteURL to $localPath\n"; + my $exitCode = system("curl", "-o", $localPath, $remoteURL); + return exitStatus($exitCode) == 0; +} + +sub checkMD5($$) +{ + my ($path, $expectedMD5) = @_; + my $md5Output = `md5 -q "$path"`; + chomp($md5Output); + my $isValid = $md5Output eq $expectedMD5; + print "'$md5Output' does not match expected: '$expectedMD5'\n" unless $isValid; + return $isValid; +} + +sub mountDMG($$) +{ + my ($dmgPath, $mountPoint) = @_; + print "Mounting $dmgPath at $mountPoint\n"; + return system("hdiutil", "attach", "-mountpoint", $mountPoint, "-nobrowse", $dmgPath) == 0; +} + +sub unmountDMG($) +{ + my ($mountPoint) = @_; + print "Unmounting disk image from $mountPoint\n"; + my $exitCode = system("hdiutil", "detach", $mountPoint); + return exitStatus($exitCode) == 0; +} + +sub runInstaller($) +{ + my ($installerPackage) = @_; + print "sudo will now ask for your password to run the Python installer.\n"; + print "The installer will install Python in /Library/Frameworks/Python.framework\n"; + print "and add symlinks from /usr/local/bin.\n"; + return system("sudo", "installer", "-verbose", "-pkg", $installerPackage, "-target", "/") == 0; +} + +sub downloadAndMountMacPythonDMG($$) +{ + my ($pythonURL, $pythonMD5) = @_; + my $localFilename = basename($pythonURL); + my $localPath = File::Spec->join($downloadDirectory, $localFilename); + + downloadFileToPath($pythonURL, $localPath) or die "Failed to download $pythonURL"; + checkMD5($localPath, $pythonMD5) or die "MD5 check failed on $localPath"; + return mountDMG($localPath, $mountPoint); +} + +sub installMacPython() +{ + downloadAndMountMacPythonDMG($macPythonURL, $macPythonMD5) or die "Failed to download and mount disk image."; + print "Mounted python install image at: $mountPoint\n"; + my $installerPackage = File::Spec->join($mountPoint, $macPythonInstallerName); + my $installSuccess = runInstaller($installerPackage); + unmountDMG($mountPoint) or die "Failed to unmount disk image from $mountPoint"; + return $installSuccess; +} + +sub main() +{ + my $checkOnly = 0; + my $showHelp = 0; + my $getOptionsResult = GetOptions( + 'check-only!' => \$checkOnly, + 'help|h' => \$showHelp, + ); + if (!$getOptionsResult || $showHelp) { + print STDERR <<HELP; +Usage: $0 [options] + --check-only Check python version only. + -h|--help Show this help message. +HELP + return 1; + } + # Congrats, your Python is fine. + return 0 if checkPythonVersion(); + + return 1 if $checkOnly; + + if (!isTiger()) { + print "Your Python version is insufficient to run WebKit's Python code. Please update.\n"; + print "See http://trac.webkit.org/wiki/PythonGuidelines for more info.\n"; + return 1; + } + + installMacPython() or die "Failed to install Python."; + + checkPythonVersion() or die "Final version check failed, must have failed to update Python"; + print "Successfully updated python.\n"; +} + +exit(main()); diff --git a/Tools/Scripts/execAppWithEnv b/Tools/Scripts/execAppWithEnv new file mode 100755 index 0000000..d185e2f --- /dev/null +++ b/Tools/Scripts/execAppWithEnv @@ -0,0 +1,38 @@ +#!/usr/bin/perl + +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This script launches a program with a given enviroment. +# It is a workaround for a perl bug that apps launched from perl threads +# use the environment of the main thread instead of the current thread. + +my ($unsplitEnvVar, @app) = @ARGV; +# The first argument to this script should be perl code (in quotes) that sets the environment. +eval substr($unsplitEnvVar, 1, -1); +exec(@app); diff --git a/Tools/Scripts/extract-localizable-strings b/Tools/Scripts/extract-localizable-strings new file mode 100755 index 0000000..b31550a --- /dev/null +++ b/Tools/Scripts/extract-localizable-strings @@ -0,0 +1,390 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2006, 2007, 2009, 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This script is like the genstrings tool (minus most of the options) with these differences. +# +# 1) It uses the names UI_STRING and UI_STRING_WITH_KEY for the macros, rather than the macros +# from NSBundle.h, and doesn't support tables (although they would be easy to add). +# 2) It supports UTF-8 in key strings (and hence uses "" strings rather than @"" strings; +# @"" strings only reliably support ASCII since they are decoded based on the system encoding +# at runtime, so give different results on US and Japanese systems for example). +# 3) It looks for strings that are not marked for localization, using both macro names that are +# known to be used for debugging in Intrigue source code and an exceptions file. +# 4) It finds the files to work on rather than taking them as parameters, and also uses a +# hardcoded location for both the output file and the exceptions file. +# It would have been nice to use the project to find the source files, but it's too hard to +# locate source files after parsing a .pbxproj file. + +# The exceptions file has a list of strings in quotes, filenames, and filename/string pairs separated by :. + +use strict; + +sub UnescapeHexSequence($); + +my %isDebugMacro = ( ASSERT_WITH_MESSAGE => 1, LOG_ERROR => 1, ERROR => 1, NSURL_ERROR => 1, FATAL => 1, LOG => 1, LOG_WARNING => 1, UI_STRING_LOCALIZE_LATER => 1, LPCTSTR_UI_STRING_LOCALIZE_LATER => 1, UNLOCALIZED_STRING => 1, UNLOCALIZED_LPCTSTR => 1, dprintf => 1, NSException => 1, NSLog => 1, printf => 1 ); + +@ARGV >= 2 or die "Usage: extract-localizable-strings <exceptions file> <file to update> [ directory... ]\nDid you mean to run update-webkit-localizable-strings instead?\n"; + +my $exceptionsFile = shift @ARGV; +-f $exceptionsFile or die "Couldn't find exceptions file $exceptionsFile\n"; + +my $fileToUpdate = shift @ARGV; +-f $fileToUpdate or die "Couldn't find file to update $fileToUpdate\n"; + +my @directories = (); +my @directoriesToSkip = (); +if (@ARGV < 1) { + push(@directories, "."); +} else { + for my $dir (@ARGV) { + if ($dir =~ /^-(.*)$/) { + push @directoriesToSkip, $1; + } else { + push @directories, $dir; + } + } +} + +my $sawError = 0; + +my $localizedCount = 0; +my $keyCollisionCount = 0; +my $notLocalizedCount = 0; +my $NSLocalizeCount = 0; + +my %exception; +my %usedException; + +if (open EXCEPTIONS, $exceptionsFile) { + while (<EXCEPTIONS>) { + chomp; + if (/^"([^\\"]|\\.)*"$/ or /^[-_\/\w.]+.(h|m|mm|c|cpp)$/ or /^[-_\/\w.]+.(h|m|mm|c|cpp):"([^\\"]|\\.)*"$/) { + if ($exception{$_}) { + print "$exceptionsFile:$.:exception for $_ appears twice\n"; + print "$exceptionsFile:$exception{$_}:first appearance\n"; + } else { + $exception{$_} = $.; + } + } else { + print "$exceptionsFile:$.:syntax error\n"; + } + } + close EXCEPTIONS; +} + +my $quotedDirectoriesString = '"' . join('" "', @directories) . '"'; +for my $dir (@directoriesToSkip) { + $quotedDirectoriesString .= ' -path "' . $dir . '" -prune -o'; +} + +my @files = ( split "\n", `find $quotedDirectoriesString \\( -name "*.h" -o -name "*.m" -o -name "*.mm" -o -name "*.c" -o -name "*.cpp" \\)` ); + +for my $file (sort @files) { + next if $file =~ /\/\w+LocalizableStrings\.h$/; + + $file =~ s-^./--; + + open SOURCE, $file or die "can't open $file\n"; + + my $inComment = 0; + + my $expected = ""; + my $macroLine; + my $macro; + my $UIString; + my $key; + my $comment; + + my $string; + my $stringLine; + my $nestingLevel; + + my $previousToken = ""; + + while (<SOURCE>) { + chomp; + + # Handle continued multi-line comment. + if ($inComment) { + next unless s-.*\*/--; + $inComment = 0; + } + + # Handle all the tokens in the line. + while (s-^\s*([#\w]+|/\*|//|[^#\w/'"()\[\],]+|.)--) { + my $token = $1; + + if ($token eq "\"") { + if ($expected and $expected ne "a quoted string") { + print "$file:$.:ERROR:found a quoted string but expected $expected\n"; + $sawError = 1; + $expected = ""; + } + if (s-^(([^\\$token]|\\.)*?)$token--) { + if (!defined $string) { + $stringLine = $.; + $string = $1; + } else { + $string .= $1; + } + } else { + print "$file:$.:ERROR:mismatched quotes\n"; + $sawError = 1; + $_ = ""; + } + next; + } + + if (defined $string) { +handleString: + if ($expected) { + if (!defined $UIString) { + # FIXME: Validate UTF-8 here? + $UIString = $string; + $expected = ","; + } elsif (($macro =~ /UI_STRING_KEY$/) and !defined $key) { + # FIXME: Validate UTF-8 here? + $key = $string; + $expected = ","; + } elsif (!defined $comment) { + # FIXME: Validate UTF-8 here? + $comment = $string; + $expected = ")"; + } + } else { + if (defined $nestingLevel) { + # In a debug macro, no need to localize. + } elsif ($previousToken eq "#include" or $previousToken eq "#import") { + # File name, no need to localize. + } elsif ($previousToken eq "extern" and $string eq "C") { + # extern "C", no need to localize. + } elsif ($string eq "") { + # Empty string can sometimes be localized, but we need not complain if not. + } elsif ($exception{$file}) { + $usedException{$file} = 1; + } elsif ($exception{"\"$string\""}) { + $usedException{"\"$string\""} = 1; + } elsif ($exception{"$file:\"$string\""}) { + $usedException{"$file:\"$string\""} = 1; + } else { + print "$file:$stringLine:\"$string\" is not marked for localization\n"; + $notLocalizedCount++; + } + } + $string = undef; + last if !defined $token; + } + + $previousToken = $token; + + if ($token =~ /^NSLocalized/ && $token !~ /NSLocalizedDescriptionKey/ && $token !~ /NSLocalizedStringFromTableInBundle/) { + print "$file:$.:ERROR:found a use of an NSLocalized macro; not supported\n"; + $nestingLevel = 0 if !defined $nestingLevel; + $sawError = 1; + $NSLocalizeCount++; + } elsif ($token eq "/*") { + if (!s-^.*?\*/--) { + $_ = ""; # If the comment doesn't end, discard the result of the line and set flag + $inComment = 1; + } + } elsif ($token eq "//") { + $_ = ""; # Discard the rest of the line + } elsif ($token eq "'") { + if (!s-([^\\]|\\.)'--) { #' <-- that single quote makes the Project Builder editor less confused + print "$file:$.:ERROR:mismatched single quote\n"; + $sawError = 1; + $_ = ""; + } + } else { + if ($expected and $expected ne $token) { + print "$file:$.:ERROR:found $token but expected $expected\n"; + $sawError = 1; + $expected = ""; + } + if ($token =~ /UI_STRING(_KEY)?$/) { + $expected = "("; + $macro = $token; + $UIString = undef; + $key = undef; + $comment = undef; + $macroLine = $.; + } elsif ($token eq "(" or $token eq "[") { + ++$nestingLevel if defined $nestingLevel; + $expected = "a quoted string" if $expected; + } elsif ($token eq ",") { + $expected = "a quoted string" if $expected; + } elsif ($token eq ")" or $token eq "]") { + $nestingLevel = undef if defined $nestingLevel && !--$nestingLevel; + if ($expected) { + $key = $UIString if !defined $key; + HandleUIString($UIString, $key, $comment, $file, $macroLine); + $macro = ""; + $expected = ""; + $localizedCount++; + } + } elsif ($isDebugMacro{$token}) { + $nestingLevel = 0 if !defined $nestingLevel; + } + } + } + + } + + goto handleString if defined $string; + + if ($expected) { + print "$file:ERROR:reached end of file but expected $expected\n"; + $sawError = 1; + } + + close SOURCE; +} + +# Unescapes C language hexadecimal escape sequences. +sub UnescapeHexSequence($) +{ + my ($originalStr) = @_; + + my $escapedStr = $originalStr; + my $unescapedStr = ""; + + for (;;) { + if ($escapedStr =~ s-^\\x([[:xdigit:]]+)--) { + if (256 <= hex($1)) { + print "Hexadecimal escape sequence out of range: \\x$1\n"; + return undef; + } + $unescapedStr .= pack("H*", $1); + } elsif ($escapedStr =~ s-^(.)--) { + $unescapedStr .= $1; + } else { + return $unescapedStr; + } + } +} + +my %stringByKey; +my %commentByKey; +my %fileByKey; +my %lineByKey; + +sub HandleUIString +{ + my ($string, $key, $comment, $file, $line) = @_; + + my $bad = 0; + $string = UnescapeHexSequence($string); + if (!defined($string)) { + print "$file:$line:ERROR:string has an illegal hexadecimal escape sequence\n"; + $bad = 1; + } + $key = UnescapeHexSequence($key); + if (!defined($key)) { + print "$file:$line:ERROR:key has an illegal hexadecimal escape sequence\n"; + $bad = 1; + } + $comment = UnescapeHexSequence($comment); + if (!defined($comment)) { + print "$file:$line:ERROR:comment has an illegal hexadecimal escape sequence\n"; + $bad = 1; + } + if (grep { $_ == 0xFFFD } unpack "U*", $string) { + print "$file:$line:ERROR:string for translation has illegal UTF-8 -- most likely a problem with the Text Encoding of the source file\n"; + $bad = 1; + } + if ($string ne $key && grep { $_ == 0xFFFD } unpack "U*", $key) { + print "$file:$line:ERROR:key has illegal UTF-8 -- most likely a problem with the Text Encoding of the source file\n"; + $bad = 1; + } + if (grep { $_ == 0xFFFD } unpack "U*", $comment) { + print "$file:$line:ERROR:comment for translation has illegal UTF-8 -- most likely a problem with the Text Encoding of the source file\n"; + $bad = 1; + } + if ($bad) { + $sawError = 1; + return; + } + + if ($stringByKey{$key} && $stringByKey{$key} ne $string) { + print "$file:$line:encountered the same key, \"$key\", twice, with different strings\n"; + print "$fileByKey{$key}:$lineByKey{$key}:previous occurrence\n"; + $keyCollisionCount++; + return; + } + if ($commentByKey{$key} && $commentByKey{$key} ne $comment) { + print "$file:$line:encountered the same key, \"$key\", twice, with different comments\n"; + print "$fileByKey{$key}:$lineByKey{$key}:previous occurrence\n"; + $keyCollisionCount++; + return; + } + + $fileByKey{$key} = $file; + $lineByKey{$key} = $line; + $stringByKey{$key} = $string; + $commentByKey{$key} = $comment; +} + +print "\n" if $sawError || $notLocalizedCount || $NSLocalizeCount; + +my @unusedExceptions = sort grep { !$usedException{$_} } keys %exception; +if (@unusedExceptions) { + for my $unused (@unusedExceptions) { + print "$exceptionsFile:$exception{$unused}:exception $unused not used\n"; + } + print "\n"; +} + +print "$localizedCount localizable strings\n" if $localizedCount; +print "$keyCollisionCount key collisions\n" if $keyCollisionCount; +print "$notLocalizedCount strings not marked for localization\n" if $notLocalizedCount; +print "$NSLocalizeCount uses of NSLocalize\n" if $NSLocalizeCount; +print scalar(@unusedExceptions), " unused exceptions\n" if @unusedExceptions; + +if ($sawError) { + print "\nErrors encountered. Exiting without writing to $fileToUpdate.\n"; + exit 1; +} + +my $localizedStrings = ""; + +for my $key (sort keys %commentByKey) { + $localizedStrings .= "/* $commentByKey{$key} */\n\"$key\" = \"$stringByKey{$key}\";\n\n"; +} + +# Write out the strings file in UTF-16 with a BOM. +utf8::decode($localizedStrings) if $^V ge v5.8; +my $output = pack "n*", (0xFEFF, unpack "U*", $localizedStrings); + +if (-e "$fileToUpdate") { + open STRINGS, ">", "$fileToUpdate" or die; + print STRINGS $output; + close STRINGS; +} else { + print "$fileToUpdate does not exist\n"; + exit 1; +} diff --git a/Tools/Scripts/find-extra-includes b/Tools/Scripts/find-extra-includes new file mode 100755 index 0000000..4a847ed --- /dev/null +++ b/Tools/Scripts/find-extra-includes @@ -0,0 +1,102 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# "find-extra-includes" script for WebKit Open Source Project + +use strict; +use File::Find; + +find(\&wanted, @ARGV ? @ARGV : "."); + +my %paths; +my %includes; + +sub wanted +{ + my $file = $_; + + if ($file eq "icu") { + $File::Find::prune = 1; + return; + } + + if ($file !~ /^\./ && $file =~ /\.(h|cpp|c|mm|m)$/) { + $paths{$file} = $File::Find::name; + open FILE, $file or die; + while (<FILE>) { + if (m-^\s*#\s*(include|import)\s+["<]((\S+/)*)(\S+)[">]-) { + my $include = ($2 eq "sys/" ? $2 : "") . $4; + $includes{$file}{$include}++; + } + } + close FILE; + } +} + +my %totalIncludes; + +sub fillOut +{ + my ($file) = @_; + + return if defined $totalIncludes{$file}; + + for my $include (keys %{ $includes{$file} }) { + $totalIncludes{$file}{$include} = 1; + fillOut($include); + for my $i (keys %{ $totalIncludes{$include} }) { + $totalIncludes{$file}{$i} = 1; + } + } +} + +sub check +{ + my ($file) = @_; + + for my $include (keys %{ $includes{$file} }) { + fillOut($include); + } + for my $i1 (sort keys %{ $includes{$file} }) { + for my $i2 (keys %{ $includes{$file} }) { + next if $i1 eq $i2; + if ($totalIncludes{$i2}{$i1}) { + my $b1 = $i1; + my $b2 = $file; + $b1 =~ s/\..+$//; + $b2 =~ s/\..+$//; + print "$paths{$file} does not need to include $i1, because $i2 does\n" if $b1 ne $b2; + last; + } + } + } +} + +for my $file (sort keys %includes) { + check($file); +} diff --git a/Tools/Scripts/find-included-framework-headers b/Tools/Scripts/find-included-framework-headers new file mode 100755 index 0000000..759a60b --- /dev/null +++ b/Tools/Scripts/find-included-framework-headers @@ -0,0 +1,30 @@ +#!/bin/sh +# Copyright (C) 2007, 2008, 2009, 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# A script to find headers included from the given frameworks by files in the +# current directory (and subdirectories). + +for framework in $*; do + echo -e "\n$framework\n==================" + find . \( -name '*.cpp' -o -name '*.h' -o -name '*.m' -o -name '*.mm' \) -exec grep "<$framework/" {} ';' | sed -e 's|.*/\(.*\.h\).*|\1|' | sort -u +done diff --git a/Tools/Scripts/gdb-safari b/Tools/Scripts/gdb-safari new file mode 100755 index 0000000..9776212 --- /dev/null +++ b/Tools/Scripts/gdb-safari @@ -0,0 +1,53 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Simplified "run under gdb" script for WebKit Open Source Project. + +use strict; +use File::Temp qw/:mktemp/; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); +my $productDir = productDir(); +my $safariPath = safariPath(); + +# Check to see that gdb is in the usual place. +my $gdbPath = "/usr/bin/gdb"; +die "Can't find gdb executable. Is gdb installed?\n" unless -x $gdbPath; + +# Check to see that all the frameworks are built. +checkFrameworks(); + +$ENV{DYLD_FRAMEWORK_PATH} = $productDir; +$ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = 'YES'; + +print "Starting Safari under gdb with DYLD_FRAMEWORK_PATH set to point to built WebKit in $productDir.\n"; +my @architectureFlags = ("-arch", architecture()) if !isTiger(); +exec $gdbPath, @architectureFlags, $safariPath or die; diff --git a/Tools/Scripts/generate-coverage-data b/Tools/Scripts/generate-coverage-data new file mode 100755 index 0000000..c97d086 --- /dev/null +++ b/Tools/Scripts/generate-coverage-data @@ -0,0 +1,71 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006 Apple Computer, Inc. All rights reserved. +# Copyright (C) 2007 Holger Hans Peter Freyther. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Simple script to build, run and visualize coverage information + +use strict; +use File::Basename; +use File::Spec; +use FindBin; +use Getopt::Long qw(:config pass_through); +use lib $FindBin::Bin; +use webkitdirs; +use POSIX; + +# Generate a name for our results +my $svnVersion = determineCurrentSVNRevision(); +my @timeData = localtime(time); +my $resultName = $svnVersion . "-" . join('_', @timeData); +my @otherOptions = (); + +# Move to the source directory +# Delete old gcov files +# Compile WebKit and run the tests +# Generate the coverage graph... +# Upload + +$ENV{'WEBKIT_COVERAGE_BUILD'} = 1; +chdirWebKit(); + +# Clean-up old files +print "Cleaning up\n"; +system("if [ -d WebKitBuild ]; then find WebKitBuild -name '*.gcda' -delete; fi;") == 0 or die; + + +print "Building and testing\n"; +system("Tools/Scripts/build-webkit", "--coverage", @ARGV) == 0 or die; +system "Tools/Scripts/run-webkit-tests", "--no-launch-safari"; +system "Tools/Scripts/run-javascriptcore-tests", "--coverage", @ARGV; + +# Collect the data and generate a report +print "Collecting coverage data\n"; +system("Tools/CodeCoverage/run-generate-coverage-data", $resultName, "WebKitBuild/Coverage") == 0 or die; +system("Tools/CodeCoverage/regenerate-coverage-display", "WebKitBuild/Coverage", "WebKitBuild/Coverage/html") == 0 or die; + +print "Done\n"; diff --git a/Tools/Scripts/generate-qt-inspector-resource b/Tools/Scripts/generate-qt-inspector-resource new file mode 100755 index 0000000..a65da13 --- /dev/null +++ b/Tools/Scripts/generate-qt-inspector-resource @@ -0,0 +1,53 @@ +#!/usr/bin/perl +# +# Copyright (C) 2008 Holger Hans Peter Freyther +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Regenerate WebCore/inspector/front-end/WebKit.qrc from the content of WebCore/inspector/front-end/ + +sub addFiles(@) +{ + my @files = @_; + + foreach $file (@files) { + $file =~ s,WebCore/inspector/front-end/,,; + print WEBKIT_QRC " <file>".$file . "</file>\n"; + } +} + +# Setup +open(WEBKIT_QRC, ">WebCore/inspector/front-end/WebKit.qrc") or die; +print WEBKIT_QRC '<!DOCTYPE RCC><RCC version="1.0">'."\n"; +print WEBKIT_QRC '<qresource prefix="/webkit/inspector">'."\n"; + + +# Directory with html and js files and the images +addFiles(<WebCore/inspector/front-end/*.{*html,js,css,svg}>); +addFiles(<WebCore/inspector/front-end/Images/*>); + +print WEBKIT_QRC "</qresource>\n"; +print WEBKIT_QRC "</RCC>\n"; +close(WEBKIT_QRC); diff --git a/Tools/Scripts/make-script-test-wrappers b/Tools/Scripts/make-script-test-wrappers new file mode 100755 index 0000000..aed1834 --- /dev/null +++ b/Tools/Scripts/make-script-test-wrappers @@ -0,0 +1,141 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to generate HTML wrappers for JavaScript tests from templates + +use strict; + +use FindBin; +use lib $FindBin::Bin; + +use File::Basename; +use File::Find; +use Getopt::Long; +use webkitdirs; + +sub directoryFilter; +sub findTemplateFiles(@); + +my $showHelp; + +my $result = GetOptions( + "help" => \$showHelp, +); + +if (!$result || $showHelp) { + print STDERR basename($0) . " [-h|--help] [path ...]\n"; + exit 1; +} + +setConfiguration(); +my $productDir = productDir(); + +chdirWebKit(); + +my @templates = findTemplateFiles(@ARGV); + +for my $tfile (@templates) { + + my $tpath = $tfile; + my $templateDirectory; + my $templateRelativePath; + if ($tpath =~ s:/(script-tests)/TEMPLATE.html$::) { + $templateDirectory = $1; + $templateRelativePath = $1 . "/TEMPLATE.html"; + } else { + print "Inappropriate position of a template: ${tpath}\n"; + next; + } + + print "${tpath}\n"; + + chdirWebKit(); + chdir($tpath); + + my @files; + my $fileFilter = sub { + push @files, $File::Find::name if substr($_, -3) eq ".js"; + }; + find({ preprocess => \&directoryFilter, wanted => $fileFilter }, $templateDirectory); + + open TEMPLATE, "<${templateRelativePath}"; + my $template = do { local $/; <TEMPLATE> }; + close TEMPLATE; + + my $templateNegative = $template; + if (-e "${templateDirectory}/TEMPLATE-n.html") { + open TEMPLATE, "<${templateDirectory}/TEMPLATE-n.html"; + $templateNegative = do { local $/; <TEMPLATE> }; + close TEMPLATE; + } + + for my $file (@files) { + my $html = $file; + $html =~ s:${templateDirectory}/(.*)\.js:$1.html:; + next if -f "$html-disabled"; + + system("cat ${file} | tr '\\0' ' ' | grep -q 'successfullyParsed ='"); + if ($? != 0) { + `echo "" >> "${file}"`; + `echo "var successfullyParsed = true;" >> "${file}"`; + } + + print " ${html}\n"; + open HTML, ">$html"; + my $output = ($file =~ /-n\.js/) ? $templateNegative : $template; + $output =~ s:YOUR_JS_FILE_HERE:$file:; + print HTML $output; + + close HTML; + } +} + +exit 0; + +sub directoryFilter +{ + return () if basename($File::Find::dir) eq ".svn"; + return @_; +} + +sub findTemplateFiles(@) { + my @args = @_; + my @templateFiles; + + push @args, "LayoutTests" if scalar(@args) == 0; + + my @paths = map { -f $_ ? dirname($_) : $_ } @args; + + my $fileFilter = sub { + push @templateFiles, $File::Find::name if $_ eq "TEMPLATE.html"; + }; + + find({ preprocess => \&directoryFilter, wanted => $fileFilter }, @paths); + + return @templateFiles; +} diff --git a/Tools/Scripts/new-run-webkit-httpd b/Tools/Scripts/new-run-webkit-httpd new file mode 100755 index 0000000..f6ec164 --- /dev/null +++ b/Tools/Scripts/new-run-webkit-httpd @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A utility script for starting and stopping the HTTP server with the + same configuration as used in the layout tests.""" + +# +# FIXME: currently this code only works with the Chromium ports and LigHTTPd. +# It should be made to work on all ports. +# +# This script is also used by Chromium's ui_tests to run http layout tests +# in a browser. +# +import optparse +import os +import sys +import tempfile + +scripts_directory = os.path.dirname(os.path.abspath(sys.argv[0])) +webkitpy_directory = os.path.join(scripts_directory, "webkitpy") +sys.path.append(os.path.join(webkitpy_directory, "layout_tests")) + +import port +from port import http_server + +def run(options): + if not options.server: + print ('Usage: %s --server {start|stop} [--root=root_dir]' + ' [--port=port_number]' % sys.argv[0]) + else: + if (options.root is None) and (options.port is not None): + # specifying root but not port means we want httpd on default + # set of ports that LayoutTest use, but pointing to a different + # source of tests. Specifying port but no root does not seem + # meaningful. + raise 'Specifying port requires also a root.' + port_obj = port.get(None, options) + httpd = http_server.Lighttpd(port_obj, + tempfile.gettempdir(), + port=options.port, + root=options.root, + run_background=options.run_background) + if options.server == 'start': + httpd.start() + else: + httpd.stop(force=True) + + +def main(): + option_parser = optparse.OptionParser() + option_parser.add_option('-k', '--server', + help='Server action (start|stop)') + option_parser.add_option('-p', '--port', + help='Port to listen on (overrides layout test ports)') + option_parser.add_option('-r', '--root', + help='Absolute path to DocumentRoot (overrides layout test roots)') + option_parser.add_option('--register_cygwin', action="store_true", + dest="register_cygwin", help='Register Cygwin paths (on Win try bots)') + option_parser.add_option('--run_background', action="store_true", + dest="run_background", + help='Run on background (for running as UI test)') + options, args = option_parser.parse_args() + + # FIXME: Make this work with other ports as well. + options.chromium = True + + run(options) + + +if '__main__' == __name__: + main() diff --git a/Tools/Scripts/new-run-webkit-tests b/Tools/Scripts/new-run-webkit-tests new file mode 100755 index 0000000..9fcacaa --- /dev/null +++ b/Tools/Scripts/new-run-webkit-tests @@ -0,0 +1,41 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Wrapper around webkitpy/layout_tests/run_webkit_tests.py""" +import signal +import sys + +import webkitpy.layout_tests.run_webkit_tests as run_webkit_tests + +if __name__ == '__main__': + try: + sys.exit(run_webkit_tests.main()) + except KeyboardInterrupt: + # this mirrors what the shell normally does + sys.exit(signal.SIGINT + 128) diff --git a/Tools/Scripts/new-run-webkit-websocketserver b/Tools/Scripts/new-run-webkit-websocketserver new file mode 100644 index 0000000..3350582 --- /dev/null +++ b/Tools/Scripts/new-run-webkit-websocketserver @@ -0,0 +1,108 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A utility script for starting and stopping the web socket server with the + same configuration as used in the layout tests.""" + +import logging +import optparse +import tempfile + +import webkitpy.layout_tests.port.factory as factory +import webkitpy.layout_tests.port.websocket_server as websocket_server + + +def main(): + option_parser = optparse.OptionParser() + option_parser.add_option('--server', type='choice', + choices=['start', 'stop'], default='start', + help='Server action (start|stop).') + option_parser.add_option('-p', '--port', dest='port', + default=None, help='Port to listen on.') + option_parser.add_option('-r', '--root', + help='Absolute path to DocumentRoot ' + '(overrides layout test roots).') + option_parser.add_option('-t', '--tls', dest='use_tls', + action='store_true', + default=False, help='use TLS (wss://).') + option_parser.add_option('-k', '--private_key', dest='private_key', + default='', help='TLS private key file.') + option_parser.add_option('-c', '--certificate', dest='certificate', + default='', help='TLS certificate file.') + option_parser.add_option('--chromium', action='store_true', + dest='chromium', + default=False, + help='Use the Chromium port.') + option_parser.add_option('--register_cygwin', action="store_true", + dest="register_cygwin", + help='Register Cygwin paths (on Win try bots).') + option_parser.add_option('--pidfile', help='path to pid file.') + option_parser.add_option('--output-dir', dest='output_dir', + default=None, help='output directory.') + option_parser.add_option('-v', '--verbose', action='store_true', + default=False, + help='Include debug-level logging.') + options, args = option_parser.parse_args() + + if not options.port: + if options.use_tls: + # FIXME: We shouldn't grab at this private variable. + options.port = websocket_server._DEFAULT_WSS_PORT + else: + # FIXME: We shouldn't grab at this private variable. + options.port = websocket_server._DEFAULT_WS_PORT + + if not options.output_dir: + options.output_dir = tempfile.gettempdir() + + kwds = {'port': options.port, 'use_tls': options.use_tls} + if options.root: + kwds['root'] = options.root + if options.private_key: + kwds['private_key'] = options.private_key + if options.certificate: + kwds['certificate'] = options.certificate + if options.pidfile: + kwds['pidfile'] = options.pidfile + + port_obj = factory.get(options=options) + pywebsocket = websocket_server.PyWebSocket(port_obj, options.output_dir, **kwds) + + log_level = logging.WARN + if options.verbose: + log_level = logging.DEBUG + logging.basicConfig(level=log_level) + + if 'start' == options.server: + pywebsocket.start() + else: + pywebsocket.stop(force=True) + +if '__main__' == __name__: + main() diff --git a/Tools/Scripts/num-cpus b/Tools/Scripts/num-cpus new file mode 100755 index 0000000..8a8c97f --- /dev/null +++ b/Tools/Scripts/num-cpus @@ -0,0 +1,6 @@ +#!/usr/bin/perl -w +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; +print numberOfCPUs() . "\n"; diff --git a/Tools/Scripts/old-run-webkit-tests b/Tools/Scripts/old-run-webkit-tests new file mode 100755 index 0000000..ab41e9b --- /dev/null +++ b/Tools/Scripts/old-run-webkit-tests @@ -0,0 +1,2481 @@ +#!/usr/bin/perl + +# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved. +# Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com) +# Copyright (C) 2007 Matt Lilek (pewtermoose@gmail.com) +# Copyright (C) 2007 Eric Seidel <eric@webkit.org> +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Andras Becsi (becsi.andras@stud.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to run the WebKit Open Source Project layout tests. + +# Run all the tests passed in on the command line. +# If no tests are passed, find all the .html, .shtml, .xml, .xhtml, .xhtmlmp, .pl, .php (and svg) files in the test directory. + +# Run each text. +# Compare against the existing file xxx-expected.txt. +# If there is a mismatch, generate xxx-actual.txt and xxx-diffs.txt. + +# At the end, report: +# the number of tests that got the expected results +# the number of tests that ran, but did not get the expected results +# the number of tests that failed to run +# the number of tests that were run but had no expected results to compare against + +use strict; +use warnings; + +use Config; +use Cwd; +use Data::Dumper; +use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK); +use File::Basename; +use File::Copy; +use File::Find; +use File::Path; +use File::Spec; +use File::Spec::Functions; +use File::Temp; +use FindBin; +use Getopt::Long; +use IPC::Open2; +use IPC::Open3; +use Time::HiRes qw(time usleep); + +use List::Util 'shuffle'; + +use lib $FindBin::Bin; +use webkitperl::features; +use webkitperl::httpd; +use webkitdirs; +use VCSUtils; +use POSIX; + +sub buildPlatformResultHierarchy(); +sub buildPlatformTestHierarchy(@); +sub checkPythonVersion(); +sub closeCygpaths(); +sub closeDumpTool(); +sub closeWebSocketServer(); +sub configureAndOpenHTTPDIfNeeded(); +sub countAndPrintLeaks($$$); +sub countFinishedTest($$$$); +sub deleteExpectedAndActualResults($); +sub dumpToolDidCrash(); +sub epiloguesAndPrologues($$); +sub expectedDirectoryForTest($;$;$); +sub fileNameWithNumber($$); +sub htmlForResultsSection(\@$&); +sub isTextOnlyTest($); +sub launchWithEnv(\@\%); +sub resolveAndMakeTestResultsDirectory(); +sub numericcmp($$); +sub openDiffTool(); +sub buildDumpTool($); +sub openDumpTool(); +sub parseLeaksandPrintUniqueLeaks(); +sub openWebSocketServerIfNeeded(); +sub pathcmp($$); +sub printFailureMessageForTest($$); +sub processIgnoreTests($$); +sub readFromDumpToolWithTimer(**); +sub readSkippedFiles($); +sub recordActualResultsAndDiff($$); +sub sampleDumpTool(); +sub setFileHandleNonBlocking(*$); +sub setUpWindowsCrashLogSaving(); +sub slowestcmp($$); +sub splitpath($); +sub stopRunningTestsEarlyIfNeeded(); +sub stripExtension($); +sub stripMetrics($$); +sub testCrashedOrTimedOut($$$$$); +sub toCygwinPath($); +sub toURL($); +sub toWindowsPath($); +sub validateSkippedArg($$;$); +sub writeToFile($$); + +# Argument handling +my $addPlatformExceptions = 0; +my $complexText = 0; +my $exitAfterNFailures = 0; +my $exitAfterNCrashesOrTimeouts = 0; +my $generateNewResults = isAppleMacWebKit() ? 1 : 0; +my $guardMalloc = ''; +# FIXME: Dynamic HTTP-port configuration in this file is wrong. The various +# apache config files in LayoutTests/http/config govern the port numbers. +# Dynamic configuration as-written will also cause random failures in +# an IPv6 environment. See https://bugs.webkit.org/show_bug.cgi?id=37104. +my $httpdPort = 8000; +my $httpdSSLPort = 8443; +my $ignoreMetrics = 0; +my $webSocketPort = 8880; +# wss is disabled until all platforms support pyOpenSSL. +# my $webSocketSecurePort = 9323; +my $ignoreTests = ''; +my $iterations = 1; +my $launchSafari = 1; +my $mergeDepth; +my $pixelTests = ''; +my $platform; +my $quiet = ''; +my $randomizeTests = 0; +my $repeatEach = 1; +my $report10Slowest = 0; +my $resetResults = 0; +my $reverseTests = 0; +my $root; +my $runSample = 1; +my $shouldCheckLeaks = 0; +my $showHelp = 0; +my $stripEditingCallbacks; +my $testHTTP = 1; +my $testWebSocket = 1; +my $testMedia = 1; +my $tmpDir = "/tmp"; +my $testResultsDirectory = File::Spec->catdir($tmpDir, "layout-test-results"); +my $testsPerDumpTool = 1000; +my $threaded = 0; +# DumpRenderTree has an internal timeout of 30 seconds, so this must be > 30. +my $timeoutSeconds = 35; +my $tolerance = 0; +my $treatSkipped = "default"; +my $useRemoteLinksToTests = 0; +my $useValgrind = 0; +my $verbose = 0; +my $shouldWaitForHTTPD = 0; +my $useWebKitTestRunner = 0; + +my @leaksFilenames; + +if (isWindows() || isMsys()) { + print "This script has to be run under Cygwin to function correctly.\n"; + exit 1; +} + +# Default to --no-http for wx for now. +$testHTTP = 0 if (isWx()); + +my $perlInterpreter = "perl"; + +my $expectedTag = "expected"; +my $actualTag = "actual"; +my $prettyDiffTag = "pretty-diff"; +my $diffsTag = "diffs"; +my $errorTag = "stderr"; + +# These are defined here instead of closer to where they are used so that they +# will always be accessible from the END block that uses them, even if the user +# presses Ctrl-C before Perl has finished evaluating this whole file. +my $windowsPostMortemDebuggerKey = "/HKLM/SOFTWARE/Microsoft/Windows NT/CurrentVersion/AeDebug"; +my %previousWindowsPostMortemDebuggerValues; + +my $realPlatform; + +my @macPlatforms = ("mac-tiger", "mac-leopard", "mac-snowleopard", "mac"); +my @winPlatforms = ("win-xp", "win-vista", "win-7", "win"); + +if (isAppleMacWebKit()) { + if (isTiger()) { + $platform = "mac-tiger"; + $tolerance = 1.0; + } elsif (isLeopard()) { + $platform = "mac-leopard"; + $tolerance = 0.1; + } elsif (isSnowLeopard()) { + $platform = "mac-snowleopard"; + $tolerance = 0.1; + } else { + $platform = "mac"; + } +} elsif (isQt()) { + if (isDarwin()) { + $platform = "qt-mac"; + } elsif (isLinux()) { + $platform = "qt-linux"; + } elsif (isWindows() || isCygwin()) { + $platform = "qt-win"; + } else { + $platform = "qt"; + } +} elsif (isGtk()) { + $platform = "gtk"; +} elsif (isWx()) { + $platform = "wx"; +} elsif (isCygwin() || isWindows()) { + if (isWindowsXP()) { + $platform = "win-xp"; + } elsif (isWindowsVista()) { + $platform = "win-vista"; + } elsif (isWindows7()) { + $platform = "win-7"; + } else { + $platform = "win"; + } +} + +if (isQt() || isAppleWinWebKit()) { + my $testfontPath = $ENV{"WEBKIT_TESTFONTS"}; + if (!$testfontPath || !-d "$testfontPath") { + print "The WEBKIT_TESTFONTS environment variable is not defined or not set properly\n"; + print "You must set it before running the tests.\n"; + print "Use git to grab the actual fonts from http://gitorious.org/qtwebkit/testfonts\n"; + exit 1; + } +} + +if (!defined($platform)) { + print "WARNING: Your platform is not recognized. Any platform-specific results will be generated in platform/undefined.\n"; + $platform = "undefined"; +} + +if (!checkPythonVersion()) { + print "WARNING: Your platform does not have Python 2.5+, which is required to run websocket server, so disabling websocket/tests.\n"; + $testWebSocket = 0; +} + +my $programName = basename($0); +my $launchSafariDefault = $launchSafari ? "launch" : "do not launch"; +my $httpDefault = $testHTTP ? "run" : "do not run"; +my $sampleDefault = $runSample ? "run" : "do not run"; + +my $usage = <<EOF; +Usage: $programName [options] [testdir|testpath ...] + --add-platform-exceptions Put new results for non-platform-specific failing tests into the platform-specific results directory + --complex-text Use the complex text code path for all text (Mac OS X and Windows only) + -c|--configuration config Set DumpRenderTree build configuration + -g|--guard-malloc Enable malloc guard + --exit-after-n-failures N Exit after the first N failures (includes crashes) instead of running all tests + --exit-after-n-crashes-or-timeouts N + Exit after the first N crashes instead of running all tests + -h|--help Show this help message + --[no-]http Run (or do not run) http tests (default: $httpDefault) + --[no-]wait-for-httpd Wait for httpd if some other test session is using it already (same as WEBKIT_WAIT_FOR_HTTPD=1). (default: $shouldWaitForHTTPD) + -i|--ignore-tests Comma-separated list of directories or tests to ignore + --iterations n Number of times to run the set of tests (e.g. ABCABCABC) + --[no-]launch-safari Launch (or do not launch) Safari to display test results (default: $launchSafariDefault) + -l|--leaks Enable leaks checking + --[no-]new-test-results Generate results for new tests + --nthly n Restart DumpRenderTree every n tests (default: $testsPerDumpTool) + -p|--pixel-tests Enable pixel tests + --tolerance t Ignore image differences less than this percentage (default: $tolerance) + --platform Override the detected platform to use for tests and results (default: $platform) + --port Web server port to use with http tests + -q|--quiet Less verbose output + --reset-results Reset ALL results (including pixel tests if --pixel-tests is set) + -o|--results-directory Output results directory (default: $testResultsDirectory) + --random Run the tests in a random order + --repeat-each n Number of times to run each test (e.g. AAABBBCCC) + --reverse Run the tests in reverse alphabetical order + --root Path to root tools build + --[no-]sample-on-timeout Run sample on timeout (default: $sampleDefault) (Mac OS X only) + -1|--singly Isolate each test case run (implies --nthly 1 --verbose) + --skipped=[default|ignore|only] Specifies how to treat the Skipped file + default: Tests/directories listed in the Skipped file are not tested + ignore: The Skipped file is ignored + only: Only those tests/directories listed in the Skipped file will be run + --slowest Report the 10 slowest tests + --ignore-metrics Ignore metrics in tests + --[no-]strip-editing-callbacks Remove editing callbacks from expected results + -t|--threaded Run a concurrent JavaScript thead with each test + --timeout t Sets the number of seconds before a test times out (default: $timeoutSeconds) + --valgrind Run DumpRenderTree inside valgrind (Qt/Linux only) + -v|--verbose More verbose output (overrides --quiet) + -m|--merge-leak-depth arg Merges leak callStacks and prints the number of unique leaks beneath a callstack depth of arg. Defaults to 5. + --use-remote-links-to-tests Link to test files within the SVN repository in the results. + -2|--webkit-test-runner Use WebKitTestRunner rather than DumpRenderTree. +EOF + +setConfiguration(); + +my $getOptionsResult = GetOptions( + 'add-platform-exceptions' => \$addPlatformExceptions, + 'complex-text' => \$complexText, + 'exit-after-n-failures=i' => \$exitAfterNFailures, + 'exit-after-n-crashes-or-timeouts=i' => \$exitAfterNCrashesOrTimeouts, + 'guard-malloc|g' => \$guardMalloc, + 'help|h' => \$showHelp, + 'http!' => \$testHTTP, + 'wait-for-httpd!' => \$shouldWaitForHTTPD, + 'ignore-metrics!' => \$ignoreMetrics, + 'ignore-tests|i=s' => \$ignoreTests, + 'iterations=i' => \$iterations, + 'launch-safari!' => \$launchSafari, + 'leaks|l' => \$shouldCheckLeaks, + 'merge-leak-depth|m:5' => \$mergeDepth, + 'new-test-results!' => \$generateNewResults, + 'nthly=i' => \$testsPerDumpTool, + 'pixel-tests|p' => \$pixelTests, + 'platform=s' => \$platform, + 'port=i' => \$httpdPort, + 'quiet|q' => \$quiet, + 'random' => \$randomizeTests, + 'repeat-each=i' => \$repeatEach, + 'reset-results' => \$resetResults, + 'results-directory|o=s' => \$testResultsDirectory, + 'reverse' => \$reverseTests, + 'root=s' => \$root, + 'sample-on-timeout!' => \$runSample, + 'singly|1' => sub { $testsPerDumpTool = 1; }, + 'skipped=s' => \&validateSkippedArg, + 'slowest' => \$report10Slowest, + 'strip-editing-callbacks!' => \$stripEditingCallbacks, + 'threaded|t' => \$threaded, + 'timeout=i' => \$timeoutSeconds, + 'tolerance=f' => \$tolerance, + 'use-remote-links-to-tests' => \$useRemoteLinksToTests, + 'valgrind' => \$useValgrind, + 'verbose|v' => \$verbose, + 'webkit-test-runner|2' => \$useWebKitTestRunner, +); + +if (!$getOptionsResult || $showHelp) { + print STDERR $usage; + exit 1; +} + +if ($useWebKitTestRunner) { + if (isAppleMacWebKit()) { + $realPlatform = $platform; + $platform = "mac-wk2"; + } elsif (isAppleWinWebKit()) { + $stripEditingCallbacks = 0 unless defined $stripEditingCallbacks; + $realPlatform = $platform; + $platform = "win-wk2"; + } elsif (isQt()) { + $realPlatform = $platform; + $platform = "qt-wk2"; + } +} + +$timeoutSeconds *= 10 if $guardMalloc; + +$stripEditingCallbacks = isCygwin() unless defined $stripEditingCallbacks; + +my $ignoreSkipped = $treatSkipped eq "ignore"; +my $skippedOnly = $treatSkipped eq "only"; + +my $configuration = configuration(); + +# We need an environment variable to be able to enable the feature per-slave +$shouldWaitForHTTPD = $ENV{"WEBKIT_WAIT_FOR_HTTPD"} unless ($shouldWaitForHTTPD); +$verbose = 1 if $testsPerDumpTool == 1; + +if ($shouldCheckLeaks && $testsPerDumpTool > 1000) { + print STDERR "\nWARNING: Running more than 1000 tests at a time with MallocStackLogging enabled may cause a crash.\n\n"; +} + +# Stack logging does not play well with QuickTime on Tiger (rdar://problem/5537157) +$testMedia = 0 if $shouldCheckLeaks && isTiger(); + +# Generating remote links causes a lot of unnecessary spew on GTK build bot +$useRemoteLinksToTests = 0 if isGtk(); + +setUpWindowsCrashLogSaving() if isCygwin(); + +setConfigurationProductDir(Cwd::abs_path($root)) if (defined($root)); +my $productDir = productDir(); +$productDir .= "/bin" if isQt(); +$productDir .= "/Programs" if isGtk(); + +chdirWebKit(); + +if (!defined($root)) { + # FIXME: We build both DumpRenderTree and WebKitTestRunner for + # WebKitTestRunner runs becuase DumpRenderTree still includes + # the DumpRenderTreeSupport module and the TestNetscapePlugin. + # These two projects should be factored out into their own + # projects. + buildDumpTool("DumpRenderTree"); + buildDumpTool("WebKitTestRunner") if $useWebKitTestRunner; +} + +my $dumpToolName = $useWebKitTestRunner ? "WebKitTestRunner" : "DumpRenderTree"; + +if (isAppleWinWebKit()) { + $dumpToolName .= "_debug" if configurationForVisualStudio() eq "Debug_All"; + $dumpToolName .= $Config{_exe}; +} +my $dumpTool = File::Spec->catfile($productDir, $dumpToolName); +die "can't find executable $dumpToolName (looked in $productDir)\n" unless -x $dumpTool; + +my $imageDiffTool = "$productDir/ImageDiff"; +$imageDiffTool .= "_debug" if isCygwin() && configurationForVisualStudio() eq "Debug_All"; +die "can't find executable $imageDiffTool (looked in $productDir)\n" if $pixelTests && !-x $imageDiffTool; + +checkFrameworks() unless isCygwin(); + +if (isAppleMacWebKit()) { + push @INC, $productDir; + require DumpRenderTreeSupport; +} + +my $layoutTestsName = "LayoutTests"; +my $testDirectory = File::Spec->rel2abs($layoutTestsName); +my $expectedDirectory = $testDirectory; +my $platformBaseDirectory = catdir($testDirectory, "platform"); +my $platformTestDirectory = catdir($platformBaseDirectory, $platform); +my @platformResultHierarchy = buildPlatformResultHierarchy(); +my @platformTestHierarchy = buildPlatformTestHierarchy(@platformResultHierarchy); + +$expectedDirectory = $ENV{"WebKitExpectedTestResultsDirectory"} if $ENV{"WebKitExpectedTestResultsDirectory"}; + +$testResultsDirectory = File::Spec->rel2abs($testResultsDirectory); +my $testResults = File::Spec->catfile($testResultsDirectory, "results.html"); + +if (isAppleMacWebKit()) { + print STDERR "Compiling Java tests\n"; + my $javaTestsDirectory = catdir($testDirectory, "java"); + + if (system("/usr/bin/make", "-C", "$javaTestsDirectory")) { + exit 1; + } +} + + +print "Running tests from $testDirectory\n"; +if ($pixelTests) { + print "Enabling pixel tests with a tolerance of $tolerance%\n"; + if (isDarwin()) { + print "WARNING: Temporarily changing the main display color profile:\n"; + print "\tThe colors on your screen will change for the duration of the testing.\n"; + print "\tThis allows the pixel tests to have consistent color values across all machines.\n"; + + if (isPerianInstalled()) { + print "WARNING: Perian's QuickTime component is installed and this may affect pixel test results!\n"; + print "\tYou should avoid generating new pixel results in this environment.\n"; + print "\tSee https://bugs.webkit.org/show_bug.cgi?id=22615 for details.\n"; + } + } +} + +system "ln", "-s", $testDirectory, "/tmp/LayoutTests" unless -x "/tmp/LayoutTests"; + +my %ignoredFiles = ( "results.html" => 1 ); +my %ignoredDirectories = map { $_ => 1 } qw(platform); +my %ignoredLocalDirectories = map { $_ => 1 } qw(.svn _svn resources script-tests); +my %supportedFileExtensions = map { $_ => 1 } qw(html shtml xml xhtml xhtmlmp pl php); + +if (!checkWebCoreFeatureSupport("MathML", 0)) { + $ignoredDirectories{'mathml'} = 1; +} + +# FIXME: We should fix webkitperl/features.pm:hasFeature() to do the correct feature detection for Cygwin. +if (checkWebCoreFeatureSupport("SVG", 0)) { + $supportedFileExtensions{'svg'} = 1; +} elsif (isCygwin()) { + $supportedFileExtensions{'svg'} = 1; +} else { + $ignoredLocalDirectories{'svg'} = 1; +} + +if (!$testHTTP) { + $ignoredDirectories{'http'} = 1; + $ignoredDirectories{'websocket'} = 1; +} +if (!$testWebSocket) { + $ignoredDirectories{'websocket'} = 1; +} + +if (!$testMedia) { + $ignoredDirectories{'media'} = 1; + $ignoredDirectories{'http/tests/media'} = 1; +} + +my $supportedFeaturesResult = ""; + +if (isCygwin()) { + # Collect supported features list + setPathForRunningWebKitApp(\%ENV); + my $supportedFeaturesCommand = "\"$dumpTool\" --print-supported-features 2>&1"; + $supportedFeaturesResult = `$supportedFeaturesCommand 2>&1`; +} + +my $hasAcceleratedCompositing = 0; +my $has3DRendering = 0; + +if (isCygwin()) { + $hasAcceleratedCompositing = $supportedFeaturesResult =~ /AcceleratedCompositing/; + $has3DRendering = $supportedFeaturesResult =~ /3DRendering/; +} else { + $hasAcceleratedCompositing = checkWebCoreFeatureSupport("Accelerated Compositing", 0); + $has3DRendering = checkWebCoreFeatureSupport("3D Rendering", 0); +} + +if (!$hasAcceleratedCompositing) { + $ignoredDirectories{'compositing'} = 1; + + # This test has slightly different floating-point rounding when accelerated + # compositing is enabled. + $ignoredFiles{'svg/custom/use-on-symbol-inside-pattern.svg'} = 1; + + if (isAppleWebKit()) { + # In Apple's ports, the default controls for <video> contain a "full + # screen" button only if accelerated compositing is enabled. + $ignoredFiles{'media/controls-after-reload.html'} = 1; + $ignoredFiles{'media/controls-drag-timebar.html'} = 1; + $ignoredFiles{'media/controls-strict.html'} = 1; + $ignoredFiles{'media/controls-styling.html'} = 1; + $ignoredFiles{'media/video-controls-rendering.html'} = 1; + $ignoredFiles{'media/video-display-toggle.html'} = 1; + $ignoredFiles{'media/video-no-audio.html'} = 1; + } + + # Here we're using !$hasAcceleratedCompositing as a proxy for "is a headless XP machine" (like + # our test slaves). Headless XP machines can neither support accelerated compositing nor pass + # this test, so skipping the test here is expedient, if a little sloppy. See + # <http://webkit.org/b/48333>. + $ignoredFiles{'platform/win/plugins/npn-invalidate-rect-invalidates-window.html'} = 1 if isAppleWinWebKit(); +} + +if (!$has3DRendering) { + $ignoredDirectories{'animations/3d'} = 1; + $ignoredDirectories{'transforms/3d'} = 1; + + # These tests use the -webkit-transform-3d media query. + $ignoredFiles{'fast/media/mq-transform-02.html'} = 1; + $ignoredFiles{'fast/media/mq-transform-03.html'} = 1; +} + +if (!checkWebCoreFeatureSupport("3D Canvas", 0)) { + $ignoredDirectories{'fast/canvas/webgl'} = 1; + $ignoredDirectories{'compositing/webgl'} = 1; + $ignoredDirectories{'http/tests/canvas/webgl'} = 1; +} + +if (checkWebCoreFeatureSupport("WML", 0)) { + $supportedFileExtensions{'wml'} = 1; +} else { + $ignoredDirectories{'http/tests/wml'} = 1; + $ignoredDirectories{'fast/wml'} = 1; + $ignoredDirectories{'wml'} = 1; +} + +if (!checkWebCoreFeatureSupport("WCSS", 0)) { + $ignoredDirectories{'fast/wcss'} = 1; +} + +if (!checkWebCoreFeatureSupport("XHTMLMP", 0)) { + $ignoredDirectories{'fast/xhtmlmp'} = 1; +} + +processIgnoreTests($ignoreTests, "ignore-tests") if $ignoreTests; +if (!$ignoreSkipped) { + if (!$skippedOnly || @ARGV == 0) { + readSkippedFiles(""); + } else { + # Since readSkippedFiles() appends to @ARGV, we must use a foreach + # loop so that we only iterate over the original argument list. + foreach my $argnum (0 .. $#ARGV) { + readSkippedFiles(shift @ARGV); + } + } +} + +my @tests = findTestsToRun(); + +die "no tests to run\n" if !@tests; + +my %counts; +my %tests; +my %imagesPresent; +my %imageDifferences; +my %durations; +my $count = 0; +my $leaksOutputFileNumber = 1; +my $totalLeaks = 0; + +my @toolArgs = (); +push @toolArgs, "--pixel-tests" if $pixelTests; +push @toolArgs, "--threaded" if $threaded; +push @toolArgs, "--complex-text" if $complexText; +push @toolArgs, "-"; + +my @diffToolArgs = (); +push @diffToolArgs, "--tolerance", $tolerance; + +$| = 1; + +my $dumpToolPID; +my $isDumpToolOpen = 0; +my $dumpToolCrashed = 0; +my $imageDiffToolPID; +my $isDiffToolOpen = 0; + +my $atLineStart = 1; +my $lastDirectory = ""; + +my $isHttpdOpen = 0; +my $isWebSocketServerOpen = 0; +my $webSocketServerPidFile = 0; +my $failedToStartWebSocketServer = 0; +# wss is disabled until all platforms support pyOpenSSL. +# my $webSocketSecureServerPID = 0; + +sub catch_pipe { $dumpToolCrashed = 1; } +$SIG{"PIPE"} = "catch_pipe"; + +print "Testing ", scalar @tests, " test cases"; +print " $iterations times" if ($iterations > 1); +print ", repeating each test $repeatEach times" if ($repeatEach > 1); +print ".\n"; + +my $overallStartTime = time; + +my %expectedResultPaths; + +my @originalTests = @tests; +# Add individual test repetitions +if ($repeatEach > 1) { + @tests = (); + foreach my $test (@originalTests) { + for (my $i = 0; $i < $repeatEach; $i++) { + push(@tests, $test); + } + } +} +# Add test set repetitions +for (my $i = 1; $i < $iterations; $i++) { + push(@tests, @originalTests); +} + +for my $test (@tests) { + my $newDumpTool = not $isDumpToolOpen; + openDumpTool(); + + my $base = stripExtension($test); + my $expectedExtension = ".txt"; + + my $dir = $base; + $dir =~ s|/[^/]+$||; + + if ($newDumpTool || $dir ne $lastDirectory) { + foreach my $logue (epiloguesAndPrologues($newDumpTool ? "" : $lastDirectory, $dir)) { + if (isCygwin()) { + $logue = toWindowsPath($logue); + } else { + $logue = canonpath($logue); + } + if ($verbose) { + print "running epilogue or prologue $logue\n"; + } + print OUT "$logue\n"; + # Throw away output from DumpRenderTree. + # Once for the test output and once for pixel results (empty) + while (<IN>) { + last if /#EOF/; + } + while (<IN>) { + last if /#EOF/; + } + } + } + + if ($verbose) { + print "running $test -> "; + $atLineStart = 0; + } elsif (!$quiet) { + if ($dir ne $lastDirectory) { + print "\n" unless $atLineStart; + print "$dir "; + } + print "."; + $atLineStart = 0; + } + + $lastDirectory = $dir; + + my $result; + + my $startTime = time if $report10Slowest; + + # Try to read expected hash file for pixel tests + my $suffixExpectedHash = ""; + if ($pixelTests && !$resetResults) { + my $expectedPixelDir = expectedDirectoryForTest($base, 0, "png"); + if (open EXPECTEDHASH, File::Spec->catfile($expectedPixelDir, "$base-$expectedTag.checksum")) { + my $expectedHash = <EXPECTEDHASH>; + chomp($expectedHash); + close EXPECTEDHASH; + + # Format expected hash into a suffix string that is appended to the path / URL passed to DRT + $suffixExpectedHash = "'$expectedHash"; + } + } + + if ($test =~ /^http\//) { + configureAndOpenHTTPDIfNeeded(); + if ($test =~ /^http\/tests\/websocket\//) { + if ($test =~ /^websocket\/tests\/local\//) { + my $testPath = "$testDirectory/$test"; + if (isCygwin()) { + $testPath = toWindowsPath($testPath); + } else { + $testPath = canonpath($testPath); + } + print OUT "$testPath\n"; + } else { + if (openWebSocketServerIfNeeded()) { + my $path = canonpath($test); + if ($test =~ /^http\/tests\/websocket\/tests\/ssl\//) { + # wss is disabled until all platforms support pyOpenSSL. + print STDERR "Error: wss is disabled until all platforms support pyOpenSSL."; + } else { + $path =~ s/^http\/tests\///; + print OUT "http://127.0.0.1:$httpdPort/$path\n"; + } + } else { + # We failed to launch the WebSocket server. Display a useful error message rather than attempting + # to run tests that expect the server to be available. + my $errorMessagePath = "$testDirectory/http/tests/websocket/resources/server-failed-to-start.html"; + $errorMessagePath = isCygwin() ? toWindowsPath($errorMessagePath) : canonpath($errorMessagePath); + print OUT "$errorMessagePath\n"; + } + } + } elsif ($test !~ /^http\/tests\/local\// && $test !~ /^http\/tests\/ssl\// && $test !~ /^http\/tests\/wml\// && $test !~ /^http\/tests\/media\//) { + my $path = canonpath($test); + $path =~ s/^http\/tests\///; + print OUT "http://127.0.0.1:$httpdPort/$path$suffixExpectedHash\n"; + } elsif ($test =~ /^http\/tests\/ssl\//) { + my $path = canonpath($test); + $path =~ s/^http\/tests\///; + print OUT "https://127.0.0.1:$httpdSSLPort/$path$suffixExpectedHash\n"; + } else { + my $testPath = "$testDirectory/$test"; + if (isCygwin()) { + $testPath = toWindowsPath($testPath); + } else { + $testPath = canonpath($testPath); + } + print OUT "$testPath$suffixExpectedHash\n"; + } + } else { + my $testPath = "$testDirectory/$test"; + if (isCygwin()) { + $testPath = toWindowsPath($testPath); + } else { + $testPath = canonpath($testPath); + } + print OUT "$testPath$suffixExpectedHash\n" if defined $testPath; + } + + # DumpRenderTree is expected to dump two "blocks" to stdout for each test. + # Each block is terminated by a #EOF on a line by itself. + # The first block is the output of the test (in text, RenderTree or other formats). + # The second block is for optional pixel data in PNG format, and may be empty if + # pixel tests are not being run, or the test does not dump pixels (e.g. text tests). + my $readResults = readFromDumpToolWithTimer(IN, ERROR); + + my $actual = $readResults->{output}; + my $error = $readResults->{error}; + + $expectedExtension = $readResults->{extension}; + my $expectedFileName = "$base-$expectedTag.$expectedExtension"; + + my $isText = isTextOnlyTest($actual); + + my $expectedDir = expectedDirectoryForTest($base, $isText, $expectedExtension); + $expectedResultPaths{$base} = File::Spec->catfile($expectedDir, $expectedFileName); + + unless ($readResults->{status} eq "success") { + my $crashed = $readResults->{status} eq "crashed"; + testCrashedOrTimedOut($test, $base, $crashed, $actual, $error); + countFinishedTest($test, $base, $crashed ? "crash" : "timedout", 0); + last if stopRunningTestsEarlyIfNeeded(); + next; + } + + $durations{$test} = time - $startTime if $report10Slowest; + + my $expected; + + if (!$resetResults && open EXPECTED, "<", $expectedResultPaths{$base}) { + $expected = ""; + while (<EXPECTED>) { + next if $stripEditingCallbacks && $_ =~ /^EDITING DELEGATE:/; + $expected .= $_; + } + close EXPECTED; + } + + if ($ignoreMetrics && !$isText && defined $expected) { + ($actual, $expected) = stripMetrics($actual, $expected); + } + + if ($shouldCheckLeaks && $testsPerDumpTool == 1) { + print " $test -> "; + } + + my $actualPNG = ""; + my $diffPNG = ""; + my $diffPercentage = 0; + my $diffResult = "passed"; + + my $actualHash = ""; + my $expectedHash = ""; + my $actualPNGSize = 0; + + while (<IN>) { + last if /#EOF/; + if (/ActualHash: ([a-f0-9]{32})/) { + $actualHash = $1; + } elsif (/ExpectedHash: ([a-f0-9]{32})/) { + $expectedHash = $1; + } elsif (/Content-Length: (\d+)\s*/) { + $actualPNGSize = $1; + read(IN, $actualPNG, $actualPNGSize); + } + } + + if ($verbose && $pixelTests && !$resetResults && $actualPNGSize) { + if ($actualHash eq "" && $expectedHash eq "") { + printFailureMessageForTest($test, "WARNING: actual & expected pixel hashes are missing!"); + } elsif ($actualHash eq "") { + printFailureMessageForTest($test, "WARNING: actual pixel hash is missing!"); + } elsif ($expectedHash eq "") { + printFailureMessageForTest($test, "WARNING: expected pixel hash is missing!"); + } + } + + if ($actualPNGSize > 0) { + my $expectedPixelDir = expectedDirectoryForTest($base, 0, "png"); + my $expectedPNGPath = File::Spec->catfile($expectedPixelDir, "$base-$expectedTag.png"); + + if (!$resetResults && ($expectedHash ne $actualHash || ($actualHash eq "" && $expectedHash eq ""))) { + if (-f $expectedPNGPath) { + my $expectedPNGSize = -s $expectedPNGPath; + my $expectedPNG = ""; + open EXPECTEDPNG, $expectedPNGPath; + read(EXPECTEDPNG, $expectedPNG, $expectedPNGSize); + + openDiffTool(); + print DIFFOUT "Content-Length: $actualPNGSize\n"; + print DIFFOUT $actualPNG; + + print DIFFOUT "Content-Length: $expectedPNGSize\n"; + print DIFFOUT $expectedPNG; + + while (<DIFFIN>) { + last if /^error/ || /^diff:/; + if (/Content-Length: (\d+)\s*/) { + read(DIFFIN, $diffPNG, $1); + } + } + + if (/^diff: (.+)% (passed|failed)/) { + $diffPercentage = $1 + 0; + $imageDifferences{$base} = $diffPercentage; + $diffResult = $2; + } + + if (!$diffPercentage) { + printFailureMessageForTest($test, "pixel hash failed (but pixel test still passes)"); + } + } elsif ($verbose) { + printFailureMessageForTest($test, "WARNING: expected image is missing!"); + } + } + + if ($resetResults || !-f $expectedPNGPath) { + mkpath catfile($expectedPixelDir, dirname($base)) if $testDirectory ne $expectedPixelDir; + writeToFile($expectedPNGPath, $actualPNG); + } + + my $expectedChecksumPath = File::Spec->catfile($expectedPixelDir, "$base-$expectedTag.checksum"); + if ($actualHash ne "" && ($resetResults || !-f $expectedChecksumPath)) { + writeToFile($expectedChecksumPath, $actualHash); + } + } + + if (dumpToolDidCrash()) { + $result = "crash"; + testCrashedOrTimedOut($test, $base, 1, $actual, $error); + } elsif (!defined $expected) { + if ($verbose) { + print "new " . ($resetResults ? "result" : "test"); + } + $result = "new"; + + if ($generateNewResults || $resetResults) { + mkpath catfile($expectedDir, dirname($base)) if $testDirectory ne $expectedDir; + writeToFile("$expectedDir/$expectedFileName", $actual); + } + deleteExpectedAndActualResults($base); + recordActualResultsAndDiff($base, $actual); + if (!$resetResults) { + # Always print the file name for new tests, as they will probably need some manual inspection. + # in verbose mode we already printed the test case, so no need to do it again. + unless ($verbose) { + print "\n" unless $atLineStart; + print "$test -> "; + } + my $resultsDir = catdir($expectedDir, dirname($base)); + if (!$verbose) { + print "new"; + } + if ($generateNewResults) { + print " (results generated in $resultsDir)"; + } + print "\n" unless $atLineStart; + $atLineStart = 1; + } + } elsif ($actual eq $expected && $diffResult eq "passed") { + if ($verbose) { + print "succeeded\n"; + $atLineStart = 1; + } + $result = "match"; + deleteExpectedAndActualResults($base); + } else { + $result = "mismatch"; + + my $pixelTestFailed = $pixelTests && $diffPNG && $diffPNG ne ""; + my $testFailed = $actual ne $expected; + + my $message = !$testFailed ? "pixel test failed" : "failed"; + + if (($testFailed || $pixelTestFailed) && $addPlatformExceptions) { + my $testBase = catfile($testDirectory, $base); + my $expectedBase = catfile($expectedDir, $base); + my $testIsMaximallyPlatformSpecific = $testBase =~ m|^\Q$platformTestDirectory\E/|; + my $expectedResultIsMaximallyPlatformSpecific = $expectedBase =~ m|^\Q$platformTestDirectory\E/|; + if (!$testIsMaximallyPlatformSpecific && !$expectedResultIsMaximallyPlatformSpecific) { + mkpath catfile($platformTestDirectory, dirname($base)); + if ($testFailed) { + my $expectedFile = catfile($platformTestDirectory, "$expectedFileName"); + writeToFile("$expectedFile", $actual); + } + if ($pixelTestFailed) { + my $expectedFile = catfile($platformTestDirectory, "$base-$expectedTag.checksum"); + writeToFile("$expectedFile", $actualHash); + + $expectedFile = catfile($platformTestDirectory, "$base-$expectedTag.png"); + writeToFile("$expectedFile", $actualPNG); + } + $message .= " (results generated in $platformTestDirectory)"; + } + } + + printFailureMessageForTest($test, $message); + + my $dir = "$testResultsDirectory/$base"; + $dir =~ s|/([^/]+)$|| or die "Failed to find test name from base\n"; + my $testName = $1; + mkpath $dir; + + deleteExpectedAndActualResults($base); + recordActualResultsAndDiff($base, $actual); + + if ($pixelTestFailed) { + $imagesPresent{$base} = 1; + + writeToFile("$testResultsDirectory/$base-$actualTag.png", $actualPNG); + writeToFile("$testResultsDirectory/$base-$diffsTag.png", $diffPNG); + + my $expectedPixelDir = expectedDirectoryForTest($base, 0, "png"); + copy("$expectedPixelDir/$base-$expectedTag.png", "$testResultsDirectory/$base-$expectedTag.png"); + + open DIFFHTML, ">$testResultsDirectory/$base-$diffsTag.html" or die; + print DIFFHTML "<html>\n"; + print DIFFHTML "<head>\n"; + print DIFFHTML "<title>$base Image Compare</title>\n"; + print DIFFHTML "<script language=\"Javascript\" type=\"text/javascript\">\n"; + print DIFFHTML "var currentImage = 0;\n"; + print DIFFHTML "var imageNames = new Array(\"Actual\", \"Expected\");\n"; + print DIFFHTML "var imagePaths = new Array(\"$testName-$actualTag.png\", \"$testName-$expectedTag.png\");\n"; + if (-f "$testDirectory/$base-w3c.png") { + copy("$testDirectory/$base-w3c.png", "$testResultsDirectory/$base-w3c.png"); + print DIFFHTML "imageNames.push(\"W3C\");\n"; + print DIFFHTML "imagePaths.push(\"$testName-w3c.png\");\n"; + } + print DIFFHTML "function animateImage() {\n"; + print DIFFHTML " var image = document.getElementById(\"animatedImage\");\n"; + print DIFFHTML " var imageText = document.getElementById(\"imageText\");\n"; + print DIFFHTML " image.src = imagePaths[currentImage];\n"; + print DIFFHTML " imageText.innerHTML = imageNames[currentImage] + \" Image\";\n"; + print DIFFHTML " currentImage = (currentImage + 1) % imageNames.length;\n"; + print DIFFHTML " setTimeout('animateImage()',2000);\n"; + print DIFFHTML "}\n"; + print DIFFHTML "</script>\n"; + print DIFFHTML "</head>\n"; + print DIFFHTML "<body onLoad=\"animateImage();\">\n"; + print DIFFHTML "<table>\n"; + if ($diffPercentage) { + print DIFFHTML "<tr>\n"; + print DIFFHTML "<td>Difference between images: <a href=\"$testName-$diffsTag.png\">$diffPercentage%</a></td>\n"; + print DIFFHTML "</tr>\n"; + } + print DIFFHTML "<tr>\n"; + print DIFFHTML "<td><a href=\"" . toURL("$testDirectory/$test") . "\">test file</a></td>\n"; + print DIFFHTML "</tr>\n"; + print DIFFHTML "<tr>\n"; + print DIFFHTML "<td id=\"imageText\" style=\"text-weight: bold;\">Actual Image</td>\n"; + print DIFFHTML "</tr>\n"; + print DIFFHTML "<tr>\n"; + print DIFFHTML "<td><img src=\"$testName-$actualTag.png\" id=\"animatedImage\"></td>\n"; + print DIFFHTML "</tr>\n"; + print DIFFHTML "</table>\n"; + print DIFFHTML "</body>\n"; + print DIFFHTML "</html>\n"; + } + } + + if ($error) { + my $dir = dirname(File::Spec->catdir($testResultsDirectory, $base)); + mkpath $dir; + + writeToFile(File::Spec->catfile($testResultsDirectory, "$base-$errorTag.txt"), $error); + + $counts{error}++; + push @{$tests{error}}, $test; + } + + countFinishedTest($test, $base, $result, $isText); + last if stopRunningTestsEarlyIfNeeded(); +} + +my $totalTestingTime = time - $overallStartTime; +my $waitTime = getWaitTime(); +if ($waitTime > 0.1) { + my $normalizedTestingTime = $totalTestingTime - $waitTime; + printf "\n%0.2fs HTTPD waiting time\n", $waitTime . ""; + printf "%0.2fs normalized testing time", $normalizedTestingTime . ""; +} +printf "\n%0.2fs total testing time\n", $totalTestingTime . ""; + +!$isDumpToolOpen || die "Failed to close $dumpToolName.\n"; + +$isHttpdOpen = !closeHTTPD(); +closeWebSocketServer(); + +# Because multiple instances of this script are running concurrently we cannot +# safely delete this symlink. +# system "rm /tmp/LayoutTests"; + +# FIXME: Do we really want to check the image-comparison tool for leaks every time? +if ($isDiffToolOpen && $shouldCheckLeaks) { + $totalLeaks += countAndPrintLeaks("ImageDiff", $imageDiffToolPID, "$testResultsDirectory/ImageDiff-leaks.txt"); +} + +if ($totalLeaks) { + if ($mergeDepth) { + parseLeaksandPrintUniqueLeaks(); + } else { + print "\nWARNING: $totalLeaks total leaks found!\n"; + print "See above for individual leaks results.\n" if ($leaksOutputFileNumber > 2); + } +} + +close IN; +close OUT; +close ERROR; + +if ($report10Slowest) { + print "\n\nThe 10 slowest tests:\n\n"; + my $count = 0; + for my $test (sort slowestcmp keys %durations) { + printf "%0.2f secs: %s\n", $durations{$test}, $test; + last if ++$count == 10; + } +} + +print "\n"; + +if ($skippedOnly && $counts{"match"}) { + print "The following tests are in the Skipped file (" . File::Spec->abs2rel("$platformTestDirectory/Skipped", $testDirectory) . "), but succeeded:\n"; + foreach my $test (@{$tests{"match"}}) { + print " $test\n"; + } +} + +if ($resetResults || ($counts{match} && $counts{match} == $count)) { + print "all $count test cases succeeded\n"; + unlink $testResults; + exit; +} + +printResults(); + +mkpath $testResultsDirectory; + +open HTML, ">", $testResults or die "Failed to open $testResults. $!"; +print HTML "<html>\n"; +print HTML "<head>\n"; +print HTML "<title>Layout Test Results</title>\n"; +print HTML "</head>\n"; +print HTML "<body>\n"; + +if ($ignoreMetrics) { + print HTML "<h4>Tested with metrics ignored.</h4>"; +} + +print HTML htmlForResultsSection(@{$tests{mismatch}}, "Tests where results did not match expected results", \&linksForMismatchTest); +print HTML htmlForResultsSection(@{$tests{timedout}}, "Tests that timed out", \&linksForErrorTest); +print HTML htmlForResultsSection(@{$tests{crash}}, "Tests that caused the DumpRenderTree tool to crash", \&linksForErrorTest); +print HTML htmlForResultsSection(@{$tests{error}}, "Tests that had stderr output", \&linksForErrorTest); +print HTML htmlForResultsSection(@{$tests{new}}, "Tests that had no expected results (probably new)", \&linksForNewTest); + +print HTML "</body>\n"; +print HTML "</html>\n"; +close HTML; + +my @configurationArgs = argumentsForConfiguration(); + +if (isGtk()) { + system "Tools/Scripts/run-launcher", @configurationArgs, "file://".$testResults if $launchSafari; +} elsif (isQt()) { + unshift @configurationArgs, qw(-graphicssystem raster -style windows); + if (isCygwin()) { + $testResults = "/" . toWindowsPath($testResults); + $testResults =~ s/\\/\//g; + } + system "Tools/Scripts/run-launcher", @configurationArgs, "file://".$testResults if $launchSafari; +} elsif (isCygwin()) { + system "cygstart", $testResults if $launchSafari; +} elsif (isWindows()) { + system "start", $testResults if $launchSafari; +} else { + system "Tools/Scripts/run-safari", @configurationArgs, "-NSOpen", $testResults if $launchSafari; +} + +closeCygpaths() if isCygwin(); + +exit 1; + +sub countAndPrintLeaks($$$) +{ + my ($dumpToolName, $dumpToolPID, $leaksFilePath) = @_; + + print "\n" unless $atLineStart; + $atLineStart = 1; + + # We are excluding the following reported leaks so they don't get in our way when looking for WebKit leaks: + # This allows us ignore known leaks and only be alerted when new leaks occur. Some leaks are in the old + # versions of the system frameworks that are being used by the leaks bots. Even though a leak has been + # fixed, it will be listed here until the bot has been updated with the newer frameworks. + + my @typesToExclude = ( + ); + + my @callStacksToExclude = ( + "Flash_EnforceLocalSecurity" # leaks in Flash plug-in code, rdar://problem/4449747 + ); + + if (isTiger()) { + # Leak list for the version of Tiger used on the build bot. + push @callStacksToExclude, ( + "CFRunLoopRunSpecific \\| malloc_zone_malloc", "CFRunLoopRunSpecific \\| CFAllocatorAllocate ", # leak in CFRunLoopRunSpecific, rdar://problem/4670839 + "CGImageSourceGetPropertiesAtIndex", # leak in ImageIO, rdar://problem/4628809 + "FOGetCoveredUnicodeChars", # leak in ATS, rdar://problem/3943604 + "GetLineDirectionPreference", "InitUnicodeUtilities", # leaks tool falsely reporting leak in CFNotificationCenterAddObserver, rdar://problem/4964790 + "ICCFPrefWrapper::GetPrefDictionary", # leaks in Internet Config. code, rdar://problem/4449794 + "NSHTTPURLProtocol setResponseHeader:", # leak in multipart/mixed-replace handling in Foundation, no Radar, but fixed in Leopard + "NSURLCache cachedResponseForRequest", # leak in CFURL cache, rdar://problem/4768430 + "PCFragPrepareClosureFromFile", # leak in Code Fragment Manager, rdar://problem/3426998 + "WebCore::Selection::toRange", # bug in 'leaks', rdar://problem/4967949 + "WebCore::SubresourceLoader::create", # bug in 'leaks', rdar://problem/4985806 + "_CFPreferencesDomainDeepCopyDictionary", # leak in CFPreferences, rdar://problem/4220786 + "_objc_msgForward", # leak in NSSpellChecker, rdar://problem/4965278 + "gldGetString", # leak in OpenGL, rdar://problem/5013699 + "_setDefaultUserInfoFromURL", # leak in NSHTTPAuthenticator, rdar://problem/5546453 + "SSLHandshake", # leak in SSL, rdar://problem/5546440 + "SecCertificateCreateFromData", # leak in SSL code, rdar://problem/4464397 + ); + push @typesToExclude, ( + "THRD", # bug in 'leaks', rdar://problem/3387783 + "DRHT", # ditto (endian little hate i) + ); + } + + if (isLeopard()) { + # Leak list for the version of Leopard used on the build bot. + push @callStacksToExclude, ( + "CFHTTPMessageAppendBytes", # leak in CFNetwork, rdar://problem/5435912 + "sendDidReceiveDataCallback", # leak in CFNetwork, rdar://problem/5441619 + "_CFHTTPReadStreamReadMark", # leak in CFNetwork, rdar://problem/5441468 + "httpProtocolStart", # leak in CFNetwork, rdar://problem/5468837 + "_CFURLConnectionSendCallbacks", # leak in CFNetwork, rdar://problem/5441600 + "DispatchQTMsg", # leak in QuickTime, PPC only, rdar://problem/5667132 + "QTMovieContentView createVisualContext", # leak in QuickTime, PPC only, rdar://problem/5667132 + "_CopyArchitecturesForJVMVersion", # leak in Java, rdar://problem/5910823 + ); + } + + if (isSnowLeopard()) { + push @callStacksToExclude, ( + "readMakerNoteProps", # <rdar://problem/7156432> leak in ImageIO + "QTKitMovieControllerView completeUISetup", # <rdar://problem/7155156> leak in QTKit + "getVMInitArgs", # <rdar://problem/7714444> leak in Java + "Java_java_lang_System_initProperties", # <rdar://problem/7714465> leak in Java + "glrCompExecuteKernel", # <rdar://problem/7815391> leak in graphics driver while using OpenGL + ); + } + + if (isDarwin() && !isTiger() && !isLeopard() && !isSnowLeopard()) { + push @callStacksToExclude, ( + "CGGradientCreateWithColorComponents", # leak in CoreGraphics, <rdar://problem/7888492> + ); + } + + my $leaksTool = sourceDir() . "/Tools/Scripts/run-leaks"; + my $excludeString = "--exclude-callstack '" . (join "' --exclude-callstack '", @callStacksToExclude) . "'"; + $excludeString .= " --exclude-type '" . (join "' --exclude-type '", @typesToExclude) . "'" if @typesToExclude; + + print " ? checking for leaks in $dumpToolName\n"; + my $leaksOutput = `$leaksTool $excludeString $dumpToolPID`; + my ($count, $bytes) = $leaksOutput =~ /Process $dumpToolPID: (\d+) leaks? for (\d+) total/; + my ($excluded) = $leaksOutput =~ /(\d+) leaks? excluded/; + + my $adjustedCount = $count; + $adjustedCount -= $excluded if $excluded; + + if (!$adjustedCount) { + print " - no leaks found\n"; + unlink $leaksFilePath; + return 0; + } else { + my $dir = $leaksFilePath; + $dir =~ s|/[^/]+$|| or die; + mkpath $dir; + + if ($excluded) { + print " + $adjustedCount leaks ($bytes bytes including $excluded excluded leaks) were found, details in $leaksFilePath\n"; + } else { + print " + $count leaks ($bytes bytes) were found, details in $leaksFilePath\n"; + } + + writeToFile($leaksFilePath, $leaksOutput); + + push @leaksFilenames, $leaksFilePath; + } + + return $adjustedCount; +} + +sub writeToFile($$) +{ + my ($filePath, $contents) = @_; + open NEWFILE, ">", "$filePath" or die "Could not create $filePath. $!\n"; + print NEWFILE $contents; + close NEWFILE; +} + +# Break up a path into the directory (with slash) and base name. +sub splitpath($) +{ + my ($path) = @_; + + my $pathSeparator = "/"; + my $dirname = dirname($path) . $pathSeparator; + $dirname = "" if $dirname eq "." . $pathSeparator; + + return ($dirname, basename($path)); +} + +# Sort first by directory, then by file, so all paths in one directory are grouped +# rather than being interspersed with items from subdirectories. +# Use numericcmp to sort directory and filenames to make order logical. +sub pathcmp($$) +{ + my ($patha, $pathb) = @_; + + my ($dira, $namea) = splitpath($patha); + my ($dirb, $nameb) = splitpath($pathb); + + return numericcmp($dira, $dirb) if $dira ne $dirb; + return numericcmp($namea, $nameb); +} + +# Sort numeric parts of strings as numbers, other parts as strings. +# Makes 1.33 come after 1.3, which is cool. +sub numericcmp($$) +{ + my ($aa, $bb) = @_; + + my @a = split /(\d+)/, $aa; + my @b = split /(\d+)/, $bb; + + # Compare one chunk at a time. + # Each chunk is either all numeric digits, or all not numeric digits. + while (@a && @b) { + my $a = shift @a; + my $b = shift @b; + + # Use numeric comparison if chunks are non-equal numbers. + return $a <=> $b if $a =~ /^\d/ && $b =~ /^\d/ && $a != $b; + + # Use string comparison if chunks are any other kind of non-equal string. + return $a cmp $b if $a ne $b; + } + + # One of the two is now empty; compare lengths for result in this case. + return @a <=> @b; +} + +# Sort slowest tests first. +sub slowestcmp($$) +{ + my ($testa, $testb) = @_; + + my $dura = $durations{$testa}; + my $durb = $durations{$testb}; + return $durb <=> $dura if $dura != $durb; + return pathcmp($testa, $testb); +} + +sub launchWithEnv(\@\%) +{ + my ($args, $env) = @_; + + # Dump the current environment as perl code and then put it in quotes so it is one parameter. + my $environmentDumper = Data::Dumper->new([\%{$env}], [qw(*ENV)]); + $environmentDumper->Indent(0); + $environmentDumper->Purity(1); + my $allEnvVars = $environmentDumper->Dump(); + unshift @{$args}, "\"$allEnvVars\""; + + my $execScript = File::Spec->catfile(sourceDir(), qw(Tools Scripts execAppWithEnv)); + unshift @{$args}, $perlInterpreter, $execScript; + return @{$args}; +} + +sub resolveAndMakeTestResultsDirectory() +{ + my $absTestResultsDirectory = File::Spec->rel2abs(glob $testResultsDirectory); + mkpath $absTestResultsDirectory; + return $absTestResultsDirectory; +} + +sub openDiffTool() +{ + return if $isDiffToolOpen; + return if !$pixelTests; + + my %CLEAN_ENV; + $CLEAN_ENV{MallocStackLogging} = 1 if $shouldCheckLeaks; + $imageDiffToolPID = open2(\*DIFFIN, \*DIFFOUT, $imageDiffTool, launchWithEnv(@diffToolArgs, %CLEAN_ENV)) or die "unable to open $imageDiffTool\n"; + $isDiffToolOpen = 1; +} + +sub buildDumpTool($) +{ + my ($dumpToolName) = @_; + + my $dumpToolBuildScript = "build-" . lc($dumpToolName); + print STDERR "Running $dumpToolBuildScript\n"; + + local *DEVNULL; + my ($childIn, $childOut, $childErr); + if ($quiet) { + open(DEVNULL, ">", File::Spec->devnull()) or die "Failed to open /dev/null"; + $childOut = ">&DEVNULL"; + $childErr = ">&DEVNULL"; + } else { + # When not quiet, let the child use our stdout/stderr. + $childOut = ">&STDOUT"; + $childErr = ">&STDERR"; + } + + my @args = argumentsForConfiguration(); + my $buildProcess = open3($childIn, $childOut, $childErr, $perlInterpreter, File::Spec->catfile(qw(Tools Scripts), $dumpToolBuildScript), @args) or die "Failed to run build-dumprendertree"; + close($childIn); + waitpid $buildProcess, 0; + my $buildResult = $?; + close($childOut); + close($childErr); + + close DEVNULL if ($quiet); + + if ($buildResult) { + print STDERR "Compiling $dumpToolName failed!\n"; + exit exitStatus($buildResult); + } +} + +sub openDumpTool() +{ + return if $isDumpToolOpen; + + if ($verbose && $testsPerDumpTool != 1) { + print "| Opening DumpTool |\n"; + } + + my %CLEAN_ENV; + + # Generic environment variables + if (defined $ENV{'WEBKIT_TESTFONTS'}) { + $CLEAN_ENV{WEBKIT_TESTFONTS} = $ENV{'WEBKIT_TESTFONTS'}; + } + + # unique temporary directory for each DumpRendertree - needed for running more DumpRenderTree in parallel + $CLEAN_ENV{DUMPRENDERTREE_TEMP} = File::Temp::tempdir('DumpRenderTree-XXXXXX', TMPDIR => 1, CLEANUP => 1); + $CLEAN_ENV{XML_CATALOG_FILES} = ""; # work around missing /etc/catalog <rdar://problem/4292995> + + # Platform spesifics + if (isLinux()) { + if (defined $ENV{'DISPLAY'}) { + $CLEAN_ENV{DISPLAY} = $ENV{'DISPLAY'}; + } else { + $CLEAN_ENV{DISPLAY} = ":1"; + } + if (defined $ENV{'XAUTHORITY'}) { + $CLEAN_ENV{XAUTHORITY} = $ENV{'XAUTHORITY'}; + } + + $CLEAN_ENV{HOME} = $ENV{'HOME'}; + + if (defined $ENV{'LD_LIBRARY_PATH'}) { + $CLEAN_ENV{LD_LIBRARY_PATH} = $ENV{'LD_LIBRARY_PATH'}; + } + if (defined $ENV{'DBUS_SESSION_BUS_ADDRESS'}) { + $CLEAN_ENV{DBUS_SESSION_BUS_ADDRESS} = $ENV{'DBUS_SESSION_BUS_ADDRESS'}; + } + } elsif (isDarwin()) { + if (defined $ENV{'DYLD_LIBRARY_PATH'}) { + $CLEAN_ENV{DYLD_LIBRARY_PATH} = $ENV{'DYLD_LIBRARY_PATH'}; + } + + $CLEAN_ENV{DYLD_FRAMEWORK_PATH} = $productDir; + $CLEAN_ENV{DYLD_INSERT_LIBRARIES} = "/usr/lib/libgmalloc.dylib" if $guardMalloc; + } elsif (isCygwin()) { + $CLEAN_ENV{HOMEDRIVE} = $ENV{'HOMEDRIVE'}; + $CLEAN_ENV{HOMEPATH} = $ENV{'HOMEPATH'}; + $CLEAN_ENV{_NT_SYMBOL_PATH} = $ENV{_NT_SYMBOL_PATH}; + + setPathForRunningWebKitApp(\%CLEAN_ENV); + } + + # Port specifics + if (isGtk()) { + $CLEAN_ENV{GTK_MODULES} = "gail"; + $CLEAN_ENV{WEBKIT_INSPECTOR_PATH} = "$productDir/resources/inspector"; + } + + if (isQt()) { + $CLEAN_ENV{QTWEBKIT_PLUGIN_PATH} = productDir() . "/lib/plugins"; + $CLEAN_ENV{QT_DRT_WEBVIEW_MODE} = $ENV{"QT_DRT_WEBVIEW_MODE"}; + } + + my @args = ($dumpTool, @toolArgs); + if (isAppleMacWebKit() and !isTiger()) { + unshift @args, "arch", "-" . architecture(); + } + + if ($useValgrind) { + unshift @args, "valgrind", "--suppressions=$platformBaseDirectory/qt/SuppressedValgrindErrors"; + } + + if ($useWebKitTestRunner) { + # Make WebKitTestRunner use a similar timeout. We don't use the exact same timeout to avoid + # race conditions. + push @args, "--timeout", $timeoutSeconds - 5; + } + + $CLEAN_ENV{MallocStackLogging} = 1 if $shouldCheckLeaks; + + $dumpToolPID = open3(\*OUT, \*IN, \*ERROR, launchWithEnv(@args, %CLEAN_ENV)) or die "Failed to start tool: $dumpTool\n"; + $isDumpToolOpen = 1; + $dumpToolCrashed = 0; +} + +sub closeDumpTool() +{ + return if !$isDumpToolOpen; + + if ($verbose && $testsPerDumpTool != 1) { + print "| Closing DumpTool |\n"; + } + + close IN; + close OUT; + waitpid $dumpToolPID, 0; + + # check for WebCore counter leaks. + if ($shouldCheckLeaks) { + while (<ERROR>) { + print; + } + } + close ERROR; + $isDumpToolOpen = 0; +} + +sub dumpToolDidCrash() +{ + return 1 if $dumpToolCrashed; + return 0 unless $isDumpToolOpen; + my $pid = waitpid(-1, WNOHANG); + return 1 if ($pid == $dumpToolPID); + + # On Mac OS X, crashing may be significantly delayed by crash reporter. + return 0 unless isAppleMacWebKit(); + + return DumpRenderTreeSupport::processIsCrashing($dumpToolPID); +} + +sub configureAndOpenHTTPDIfNeeded() +{ + return if $isHttpdOpen; + my $absTestResultsDirectory = resolveAndMakeTestResultsDirectory(); + my $listen = "127.0.0.1:$httpdPort"; + my @args = ( + "-c", "CustomLog \"$absTestResultsDirectory/access_log.txt\" common", + "-c", "ErrorLog \"$absTestResultsDirectory/error_log.txt\"", + "-C", "Listen $listen" + ); + + my @defaultArgs = getDefaultConfigForTestDirectory($testDirectory); + @args = (@defaultArgs, @args); + + waitForHTTPDLock() if $shouldWaitForHTTPD; + $isHttpdOpen = openHTTPD(@args); +} + +sub checkPythonVersion() +{ + # we have not chdir to sourceDir yet. + system $perlInterpreter, File::Spec->catfile(sourceDir(), qw(Tools Scripts ensure-valid-python)), "--check-only"; + return exitStatus($?) == 0; +} + +sub openWebSocketServerIfNeeded() +{ + return 1 if $isWebSocketServerOpen; + return 0 if $failedToStartWebSocketServer; + + my $webSocketHandlerDir = "$testDirectory"; + my $absTestResultsDirectory = resolveAndMakeTestResultsDirectory(); + $webSocketServerPidFile = "$absTestResultsDirectory/websocket.pid"; + + my @args = ( + "Tools/Scripts/new-run-webkit-websocketserver", + "--server", "start", + "--port", "$webSocketPort", + "--root", "$webSocketHandlerDir", + "--output-dir", "$absTestResultsDirectory", + "--pidfile", "$webSocketServerPidFile" + ); + system "/usr/bin/python", @args; + + $isWebSocketServerOpen = 1; + return 1; +} + +sub closeWebSocketServer() +{ + return if !$isWebSocketServerOpen; + + my @args = ( + "Tools/Scripts/new-run-webkit-websocketserver", + "--server", "stop", + "--pidfile", "$webSocketServerPidFile" + ); + system "/usr/bin/python", @args; + unlink "$webSocketServerPidFile"; + + # wss is disabled until all platforms support pyOpenSSL. + $isWebSocketServerOpen = 0; +} + +sub fileNameWithNumber($$) +{ + my ($base, $number) = @_; + return "$base$number" if ($number > 1); + return $base; +} + +sub processIgnoreTests($$) +{ + my @ignoreList = split(/\s*,\s*/, shift); + my $listName = shift; + + my $disabledSuffix = "-disabled"; + + my $addIgnoredDirectories = sub { + return () if exists $ignoredLocalDirectories{basename($File::Find::dir)}; + $ignoredDirectories{File::Spec->abs2rel($File::Find::dir, $testDirectory)} = 1; + return @_; + }; + foreach my $item (@ignoreList) { + my $path = catfile($testDirectory, $item); + if (-d $path) { + $ignoredDirectories{$item} = 1; + find({ preprocess => $addIgnoredDirectories, wanted => sub {} }, $path); + } + elsif (-f $path) { + $ignoredFiles{$item} = 1; + } elsif (-f $path . $disabledSuffix) { + # The test is disabled, so do nothing. + } else { + print "$listName list contained '$item', but no file of that name could be found\n"; + } + } +} + +sub stripExtension($) +{ + my ($test) = @_; + + $test =~ s/\.[a-zA-Z]+$//; + return $test; +} + +sub isTextOnlyTest($) +{ + my ($actual) = @_; + my $isText; + if ($actual =~ /^layer at/ms) { + $isText = 0; + } else { + $isText = 1; + } + return $isText; +} + +sub expectedDirectoryForTest($;$;$) +{ + my ($base, $isText, $expectedExtension) = @_; + + my @directories = @platformResultHierarchy; + push @directories, map { catdir($platformBaseDirectory, $_) } qw(mac-snowleopard mac) if isAppleWinWebKit(); + push @directories, $expectedDirectory; + + # If we already have expected results, just return their location. + foreach my $directory (@directories) { + return $directory if -f File::Spec->catfile($directory, "$base-$expectedTag.$expectedExtension"); + } + + # For cross-platform tests, text-only results should go in the cross-platform directory, + # while render tree dumps should go in the least-specific platform directory. + return $isText ? $expectedDirectory : $platformResultHierarchy[$#platformResultHierarchy]; +} + +sub countFinishedTest($$$$) +{ + my ($test, $base, $result, $isText) = @_; + + if (($count + 1) % $testsPerDumpTool == 0 || $count == $#tests) { + if ($shouldCheckLeaks) { + my $fileName; + if ($testsPerDumpTool == 1) { + $fileName = File::Spec->catfile($testResultsDirectory, "$base-leaks.txt"); + } else { + $fileName = File::Spec->catfile($testResultsDirectory, fileNameWithNumber($dumpToolName, $leaksOutputFileNumber) . "-leaks.txt"); + } + my $leakCount = countAndPrintLeaks($dumpToolName, $dumpToolPID, $fileName); + $totalLeaks += $leakCount; + $leaksOutputFileNumber++ if ($leakCount); + } + + closeDumpTool(); + } + + $count++; + $counts{$result}++; + push @{$tests{$result}}, $test; +} + +sub testCrashedOrTimedOut($$$$$) +{ + my ($test, $base, $didCrash, $actual, $error) = @_; + + printFailureMessageForTest($test, $didCrash ? "crashed" : "timed out"); + + sampleDumpTool() unless $didCrash; + + my $dir = dirname(File::Spec->catdir($testResultsDirectory, $base)); + mkpath $dir; + + deleteExpectedAndActualResults($base); + + if (defined($error) && length($error)) { + writeToFile(File::Spec->catfile($testResultsDirectory, "$base-$errorTag.txt"), $error); + } + + recordActualResultsAndDiff($base, $actual); + + kill 9, $dumpToolPID unless $didCrash; + + closeDumpTool(); + + return unless isCygwin() && !$didCrash && $base =~ /^http/; + # On Cygwin, http tests timing out can be a symptom of a non-responsive httpd. + # If we timed out running an http test, try restarting httpd. + $isHttpdOpen = !closeHTTPD(); + configureAndOpenHTTPDIfNeeded(); +} + +sub printFailureMessageForTest($$) +{ + my ($test, $description) = @_; + + unless ($verbose) { + print "\n" unless $atLineStart; + print "$test -> "; + } + print "$description\n"; + $atLineStart = 1; +} + +my %cygpaths = (); + +sub openCygpathIfNeeded($) +{ + my ($options) = @_; + + return unless isCygwin(); + return $cygpaths{$options} if $cygpaths{$options} && $cygpaths{$options}->{"open"}; + + local (*CYGPATHIN, *CYGPATHOUT); + my $pid = open2(\*CYGPATHIN, \*CYGPATHOUT, "cygpath -f - $options"); + my $cygpath = { + "pid" => $pid, + "in" => *CYGPATHIN, + "out" => *CYGPATHOUT, + "open" => 1 + }; + + $cygpaths{$options} = $cygpath; + + return $cygpath; +} + +sub closeCygpaths() +{ + return unless isCygwin(); + + foreach my $cygpath (values(%cygpaths)) { + close $cygpath->{"in"}; + close $cygpath->{"out"}; + waitpid($cygpath->{"pid"}, 0); + $cygpath->{"open"} = 0; + + } +} + +sub convertPathUsingCygpath($$) +{ + my ($path, $options) = @_; + + # cygpath -f (at least in Cygwin 1.7) converts spaces into newlines. We remove spaces here and + # add them back in after conversion to work around this. + my $spaceSubstitute = "__NOTASPACE__"; + $path =~ s/ /\Q$spaceSubstitute\E/g; + + my $cygpath = openCygpathIfNeeded($options); + local *inFH = $cygpath->{"in"}; + local *outFH = $cygpath->{"out"}; + print outFH $path . "\n"; + my $convertedPath = <inFH>; + chomp($convertedPath) if defined $convertedPath; + + $convertedPath =~ s/\Q$spaceSubstitute\E/ /g; + return $convertedPath; +} + +sub toCygwinPath($) +{ + my ($path) = @_; + return unless isCygwin(); + + return convertPathUsingCygpath($path, "-u"); +} + +sub toWindowsPath($) +{ + my ($path) = @_; + return unless isCygwin(); + + return convertPathUsingCygpath($path, "-w"); +} + +sub toURL($) +{ + my ($path) = @_; + + if ($useRemoteLinksToTests) { + my $relativePath = File::Spec->abs2rel($path, $testDirectory); + + # If the file is below the test directory then convert it into a link to the file in SVN + if ($relativePath !~ /^\.\.\//) { + my $revision = svnRevisionForDirectory($testDirectory); + my $svnPath = pathRelativeToSVNRepositoryRootForPath($path); + return "http://trac.webkit.org/export/$revision/$svnPath"; + } + } + + return $path unless isCygwin(); + + return "file:///" . convertPathUsingCygpath($path, "-m"); +} + +sub validateSkippedArg($$;$) +{ + my ($option, $value, $value2) = @_; + my %validSkippedValues = map { $_ => 1 } qw(default ignore only); + $value = lc($value); + die "Invalid argument '" . $value . "' for option $option" unless $validSkippedValues{$value}; + $treatSkipped = $value; +} + +sub htmlForResultsSection(\@$&) +{ + my ($tests, $description, $linkGetter) = @_; + + my @html = (); + return join("\n", @html) unless @{$tests}; + + push @html, "<p>$description:</p>"; + push @html, "<table>"; + foreach my $test (@{$tests}) { + push @html, "<tr>"; + push @html, "<td><a href=\"" . toURL("$testDirectory/$test") . "\">$test</a></td>"; + foreach my $link (@{&{$linkGetter}($test)}) { + push @html, "<td><a href=\"$link->{href}\">$link->{text}</a></td>"; + } + push @html, "</tr>"; + } + push @html, "</table>"; + + return join("\n", @html); +} + +sub linksForExpectedAndActualResults($) +{ + my ($base) = @_; + + my @links = (); + + return \@links unless -s "$testResultsDirectory/$base-$diffsTag.txt"; + + my $expectedResultPath = $expectedResultPaths{$base}; + my ($expectedResultFileName, $expectedResultsDirectory, $expectedResultExtension) = fileparse($expectedResultPath, qr{\.[^.]+$}); + + push @links, { href => "$base-$expectedTag$expectedResultExtension", text => "expected" }; + push @links, { href => "$base-$actualTag$expectedResultExtension", text => "actual" }; + push @links, { href => "$base-$diffsTag.txt", text => "diff" }; + push @links, { href => "$base-$prettyDiffTag.html", text => "pretty diff" }; + + return \@links; +} + +sub linksForMismatchTest +{ + my ($test) = @_; + + my @links = (); + + my $base = stripExtension($test); + + push @links, @{linksForExpectedAndActualResults($base)}; + return \@links unless $pixelTests && $imagesPresent{$base}; + + push @links, { href => "$base-$expectedTag.png", text => "expected image" }; + push @links, { href => "$base-$diffsTag.html", text => "image diffs" }; + push @links, { href => "$base-$diffsTag.png", text => "$imageDifferences{$base}%" }; + + return \@links; +} + +sub linksForErrorTest +{ + my ($test) = @_; + + my @links = (); + + my $base = stripExtension($test); + + push @links, @{linksForExpectedAndActualResults($base)}; + push @links, { href => "$base-$errorTag.txt", text => "stderr" }; + + return \@links; +} + +sub linksForNewTest +{ + my ($test) = @_; + + my @links = (); + + my $base = stripExtension($test); + + my $expectedResultPath = $expectedResultPaths{$base}; + my ($expectedResultFileName, $expectedResultsDirectory, $expectedResultExtension) = fileparse($expectedResultPath, qr{\.[^.]+$}); + + push @links, { href => "$base-$actualTag$expectedResultExtension", text => "result" }; + if ($pixelTests && $imagesPresent{$base}) { + push @links, { href => "$base-$expectedTag.png", text => "image" }; + } + + return \@links; +} + +sub deleteExpectedAndActualResults($) +{ + my ($base) = @_; + + unlink "$testResultsDirectory/$base-$actualTag.txt"; + unlink "$testResultsDirectory/$base-$diffsTag.txt"; + unlink "$testResultsDirectory/$base-$errorTag.txt"; +} + +sub recordActualResultsAndDiff($$) +{ + my ($base, $actualResults) = @_; + + return unless defined($actualResults) && length($actualResults); + + my $expectedResultPath = $expectedResultPaths{$base}; + my ($expectedResultFileNameMinusExtension, $expectedResultDirectoryPath, $expectedResultExtension) = fileparse($expectedResultPath, qr{\.[^.]+$}); + my $actualResultsPath = File::Spec->catfile($testResultsDirectory, "$base-$actualTag$expectedResultExtension"); + my $copiedExpectedResultsPath = File::Spec->catfile($testResultsDirectory, "$base-$expectedTag$expectedResultExtension"); + + mkpath(dirname($actualResultsPath)); + writeToFile("$actualResultsPath", $actualResults); + + if (-f $expectedResultPath) { + copy("$expectedResultPath", "$copiedExpectedResultsPath"); + } else { + open EMPTY, ">$copiedExpectedResultsPath"; + close EMPTY; + } + + my $diffOuputBasePath = File::Spec->catfile($testResultsDirectory, $base); + my $diffOutputPath = "$diffOuputBasePath-$diffsTag.txt"; + system "diff -u \"$copiedExpectedResultsPath\" \"$actualResultsPath\" > \"$diffOutputPath\""; + + my $prettyDiffOutputPath = "$diffOuputBasePath-$prettyDiffTag.html"; + my $prettyPatchPath = "Websites/bugs.webkit.org/PrettyPatch/"; + my $prettifyPath = "$prettyPatchPath/prettify.rb"; + system "ruby -I \"$prettyPatchPath\" \"$prettifyPath\" \"$diffOutputPath\" > \"$prettyDiffOutputPath\""; +} + +sub buildPlatformResultHierarchy() +{ + mkpath($platformTestDirectory) if ($platform eq "undefined" && !-d "$platformTestDirectory"); + + my @platforms; + + my $isMac = $platform =~ /^mac/; + my $isWin = $platform =~ /^win/; + if ($isMac || $isWin) { + my $effectivePlatform = $platform; + if ($platform eq "mac-wk2" || $platform eq "win-wk2") { + push @platforms, $platform; + $effectivePlatform = $realPlatform; + } + + my @platformList = $isMac ? @macPlatforms : @winPlatforms; + my $i; + for ($i = 0; $i < @platformList; $i++) { + last if $platformList[$i] eq $effectivePlatform; + } + for (; $i < @platformList; $i++) { + push @platforms, $platformList[$i]; + } + } elsif ($platform =~ /^qt-/) { + push @platforms, $platform; + push @platforms, "qt"; + } else { + @platforms = $platform; + } + + my @hierarchy; + for (my $i = 0; $i < @platforms; $i++) { + my $scoped = catdir($platformBaseDirectory, $platforms[$i]); + push(@hierarchy, $scoped) if (-d $scoped); + } + + return @hierarchy; +} + +sub buildPlatformTestHierarchy(@) +{ + my (@platformHierarchy) = @_; + return @platformHierarchy if (@platformHierarchy < 2); + if ($platformHierarchy[0] =~ /mac-wk2/) { + return ($platformHierarchy[0], $platformHierarchy[1], $platformHierarchy[$#platformHierarchy]); + } + return ($platformHierarchy[0], $platformHierarchy[$#platformHierarchy]); +} + +sub epiloguesAndPrologues($$) +{ + my ($lastDirectory, $directory) = @_; + my @lastComponents = split('/', $lastDirectory); + my @components = split('/', $directory); + + while (@lastComponents) { + if (!defined($components[0]) || $lastComponents[0] ne $components[0]) { + last; + } + shift @components; + shift @lastComponents; + } + + my @result; + my $leaving = $lastDirectory; + foreach (@lastComponents) { + my $epilogue = $leaving . "/resources/run-webkit-tests-epilogue.html"; + foreach (@platformResultHierarchy) { + push @result, catdir($_, $epilogue) if (stat(catdir($_, $epilogue))); + } + push @result, catdir($testDirectory, $epilogue) if (stat(catdir($testDirectory, $epilogue))); + $leaving =~ s|(^\|/)[^/]+$||; + } + + my $entering = $leaving; + foreach (@components) { + $entering .= '/' . $_; + my $prologue = $entering . "/resources/run-webkit-tests-prologue.html"; + push @result, catdir($testDirectory, $prologue) if (stat(catdir($testDirectory, $prologue))); + foreach (reverse @platformResultHierarchy) { + push @result, catdir($_, $prologue) if (stat(catdir($_, $prologue))); + } + } + return @result; +} + +sub parseLeaksandPrintUniqueLeaks() +{ + return unless @leaksFilenames; + + my $mergedFilenames = join " ", @leaksFilenames; + my $parseMallocHistoryTool = sourceDir() . "/Tools/Scripts/parse-malloc-history"; + + open MERGED_LEAKS, "cat $mergedFilenames | $parseMallocHistoryTool --merge-depth $mergeDepth - |" ; + my @leakLines = <MERGED_LEAKS>; + close MERGED_LEAKS; + + my $uniqueLeakCount = 0; + my $totalBytes; + foreach my $line (@leakLines) { + ++$uniqueLeakCount if ($line =~ /^(\d*)\scalls/); + $totalBytes = $1 if $line =~ /^total\:\s(.*)\s\(/; + } + + print "\nWARNING: $totalLeaks total leaks found for a total of $totalBytes!\n"; + print "WARNING: $uniqueLeakCount unique leaks found!\n"; + print "See above for individual leaks results.\n" if ($leaksOutputFileNumber > 2); + +} + +sub extensionForMimeType($) +{ + my ($mimeType) = @_; + + if ($mimeType eq "application/x-webarchive") { + return "webarchive"; + } elsif ($mimeType eq "application/pdf") { + return "pdf"; + } + return "txt"; +} + +# Read up to the first #EOF (the content block of the test), or until detecting crashes or timeouts. +sub readFromDumpToolWithTimer(**) +{ + my ($fhIn, $fhError) = @_; + + setFileHandleNonBlocking($fhIn, 1); + setFileHandleNonBlocking($fhError, 1); + + my $maximumSecondsWithoutOutput = $timeoutSeconds; + my $microsecondsToWaitBeforeReadingAgain = 1000; + + my $timeOfLastSuccessfulRead = time; + + my @output = (); + my @error = (); + my $status = "success"; + my $mimeType = "text/plain"; + # We don't have a very good way to know when the "headers" stop + # and the content starts, so we use this as a hack: + my $haveSeenContentType = 0; + my $haveSeenEofIn = 0; + my $haveSeenEofError = 0; + + while (1) { + if (time - $timeOfLastSuccessfulRead > $maximumSecondsWithoutOutput) { + $status = dumpToolDidCrash() ? "crashed" : "timedOut"; + last; + } + + # Once we've seen the EOF, we must not read anymore. + my $lineIn = readline($fhIn) unless $haveSeenEofIn; + my $lineError = readline($fhError) unless $haveSeenEofError; + if (!defined($lineIn) && !defined($lineError)) { + last if ($haveSeenEofIn && $haveSeenEofError); + + if ($! != EAGAIN) { + $status = "crashed"; + last; + } + + # No data ready + usleep($microsecondsToWaitBeforeReadingAgain); + next; + } + + $timeOfLastSuccessfulRead = time; + + if (defined($lineIn)) { + if (!$haveSeenContentType && $lineIn =~ /^Content-Type: (\S+)$/) { + $mimeType = $1; + $haveSeenContentType = 1; + } elsif ($lineIn =~ /#EOF/) { + $haveSeenEofIn = 1; + } else { + push @output, $lineIn; + } + } + if (defined($lineError)) { + if ($lineError =~ /#CRASHED/) { + $status = "crashed"; + last; + } + if ($lineError =~ /#EOF/) { + $haveSeenEofError = 1; + } else { + push @error, $lineError; + } + } + } + + setFileHandleNonBlocking($fhIn, 0); + setFileHandleNonBlocking($fhError, 0); + return { + output => join("", @output), + error => join("", @error), + status => $status, + mimeType => $mimeType, + extension => extensionForMimeType($mimeType) + }; +} + +sub setFileHandleNonBlocking(*$) +{ + my ($fh, $nonBlocking) = @_; + + my $flags = fcntl($fh, F_GETFL, 0) or die "Couldn't get filehandle flags"; + + if ($nonBlocking) { + $flags |= O_NONBLOCK; + } else { + $flags &= ~O_NONBLOCK; + } + + fcntl($fh, F_SETFL, $flags) or die "Couldn't set filehandle flags"; + + return 1; +} + +sub sampleDumpTool() +{ + return unless isAppleMacWebKit(); + return unless $runSample; + + my $outputDirectory = "$ENV{HOME}/Library/Logs/DumpRenderTree"; + -d $outputDirectory or mkdir $outputDirectory; + + my $outputFile = "$outputDirectory/HangReport.txt"; + system "/usr/bin/sample", $dumpToolPID, qw(10 10 -file), $outputFile; +} + +sub stripMetrics($$) +{ + my ($actual, $expected) = @_; + + foreach my $result ($actual, $expected) { + $result =~ s/at \(-?[0-9]+,-?[0-9]+\) *//g; + $result =~ s/size -?[0-9]+x-?[0-9]+ *//g; + $result =~ s/text run width -?[0-9]+: //g; + $result =~ s/text run width -?[0-9]+ [a-zA-Z ]+: //g; + $result =~ s/RenderButton {BUTTON} .*/RenderButton {BUTTON}/g; + $result =~ s/RenderImage {INPUT} .*/RenderImage {INPUT}/g; + $result =~ s/RenderBlock {INPUT} .*/RenderBlock {INPUT}/g; + $result =~ s/RenderTextControl {INPUT} .*/RenderTextControl {INPUT}/g; + $result =~ s/\([0-9]+px/px/g; + $result =~ s/ *" *\n +" */ /g; + $result =~ s/" +$/"/g; + + $result =~ s/- /-/g; + $result =~ s/\n( *)"\s+/\n$1"/g; + $result =~ s/\s+"\n/"\n/g; + $result =~ s/scrollWidth [0-9]+/scrollWidth/g; + $result =~ s/scrollHeight [0-9]+/scrollHeight/g; + } + + return ($actual, $expected); +} + +sub fileShouldBeIgnored +{ + my ($filePath) = @_; + foreach my $ignoredDir (keys %ignoredDirectories) { + if ($filePath =~ m/^$ignoredDir/) { + return 1; + } + } + return 0; +} + +sub readSkippedFiles($) +{ + my ($constraintPath) = @_; + + my @skippedFileDirectories = @platformTestHierarchy; + + # Because nearly all of the skipped tests for WebKit 2 on Mac are due to + # cross-platform issues, Windows will use both the Mac and Windows skipped + # lists to avoid maintaining separate lists. + push(@skippedFileDirectories, catdir($platformBaseDirectory, "mac-wk2")) if $platform eq "win-wk2"; + + foreach my $level (@skippedFileDirectories) { + if (open SKIPPED, "<", "$level/Skipped") { + if ($verbose) { + my ($dir, $name) = splitpath($level); + print "Skipped tests in $name:\n"; + } + + while (<SKIPPED>) { + my $skipped = $_; + chomp $skipped; + $skipped =~ s/^[ \n\r]+//; + $skipped =~ s/[ \n\r]+$//; + if ($skipped && $skipped !~ /^#/) { + if ($skippedOnly) { + if (!fileShouldBeIgnored($skipped)) { + if (!$constraintPath) { + # Always add $skipped since no constraint path was specified on the command line. + push(@ARGV, $skipped); + } elsif ($skipped =~ /^($constraintPath)/) { + # Add $skipped only if it matches the current path constraint, e.g., + # "--skipped=only dir1" with "dir1/file1.html" on the skipped list. + push(@ARGV, $skipped); + } elsif ($constraintPath =~ /^($skipped)/) { + # Add current path constraint if it is more specific than the skip list entry, + # e.g., "--skipped=only dir1/dir2/dir3" with "dir1" on the skipped list. + push(@ARGV, $constraintPath); + } + } elsif ($verbose) { + print " $skipped\n"; + } + } else { + if ($verbose) { + print " $skipped\n"; + } + processIgnoreTests($skipped, "Skipped"); + } + } + } + close SKIPPED; + } + } +} + +my @testsFound; + +sub directoryFilter +{ + return () if exists $ignoredLocalDirectories{basename($File::Find::dir)}; + return () if exists $ignoredDirectories{File::Spec->abs2rel($File::Find::dir, $testDirectory)}; + return @_; +} + +sub fileFilter +{ + my $filename = $_; + if ($filename =~ /\.([^.]+)$/) { + if (exists $supportedFileExtensions{$1}) { + my $path = File::Spec->abs2rel(catfile($File::Find::dir, $filename), $testDirectory); + push @testsFound, $path if !exists $ignoredFiles{$path}; + } + } +} + +sub findTestsToRun +{ + my @testsToRun = (); + + for my $test (@ARGV) { + $test =~ s/^(\Q$layoutTestsName\E|\Q$testDirectory\E)\///; + my $fullPath = catfile($testDirectory, $test); + if (file_name_is_absolute($test)) { + print "can't run test $test outside $testDirectory\n"; + } elsif (-f $fullPath) { + my ($filename, $pathname, $fileExtension) = fileparse($test, qr{\.[^.]+$}); + if (!exists $supportedFileExtensions{substr($fileExtension, 1)}) { + print "test $test does not have a supported extension\n"; + } elsif ($testHTTP || $pathname !~ /^http\//) { + push @testsToRun, $test; + } + } elsif (-d $fullPath) { + @testsFound = (); + find({ preprocess => \&directoryFilter, wanted => \&fileFilter }, $fullPath); + for my $level (@platformTestHierarchy) { + my $platformPath = catfile($level, $test); + find({ preprocess => \&directoryFilter, wanted => \&fileFilter }, $platformPath) if (-d $platformPath); + } + push @testsToRun, sort pathcmp @testsFound; + @testsFound = (); + } else { + print "test $test not found\n"; + } + } + + if (!scalar @ARGV) { + @testsFound = (); + find({ preprocess => \&directoryFilter, wanted => \&fileFilter }, $testDirectory); + for my $level (@platformTestHierarchy) { + find({ preprocess => \&directoryFilter, wanted => \&fileFilter }, $level); + } + push @testsToRun, sort pathcmp @testsFound; + @testsFound = (); + + # We need to minimize the time when Apache and WebSocketServer is locked by tests + # so run them last if no explicit order was specified in the argument list. + my @httpTests; + my @websocketTests; + my @otherTests; + foreach my $test (@testsToRun) { + if ($test =~ /^http\//) { + push(@httpTests, $test); + } elsif ($test =~ /^websocket\//) { + push(@websocketTests, $test); + } else { + push(@otherTests, $test); + } + } + @testsToRun = (@otherTests, @httpTests, @websocketTests); + } + + # Reverse the tests + @testsToRun = reverse @testsToRun if $reverseTests; + + # Shuffle the array + @testsToRun = shuffle(@testsToRun) if $randomizeTests; + + return @testsToRun; +} + +sub printResults +{ + my %text = ( + match => "succeeded", + mismatch => "had incorrect layout", + new => "were new", + timedout => "timed out", + crash => "crashed", + error => "had stderr output" + ); + + for my $type ("match", "mismatch", "new", "timedout", "crash", "error") { + my $typeCount = $counts{$type}; + next unless $typeCount; + my $typeText = $text{$type}; + my $message; + if ($typeCount == 1) { + $typeText =~ s/were/was/; + $message = sprintf "1 test case (%d%%) %s\n", 1 * 100 / $count, $typeText; + } else { + $message = sprintf "%d test cases (%d%%) %s\n", $typeCount, $typeCount * 100 / $count, $typeText; + } + $message =~ s-\(0%\)-(<1%)-; + print $message; + } +} + +sub stopRunningTestsEarlyIfNeeded() +{ + # --reset-results does not check pass vs. fail, so exitAfterNFailures makes no sense with --reset-results. + return 0 if $resetResults; + + my $passCount = $counts{match} || 0; # $counts{match} will be undefined if we've not yet passed a test (e.g. the first test fails). + my $newCount = $counts{new} || 0; + my $failureCount = $count - $passCount - $newCount; # "Failure" here includes timeouts, crashes, etc. + if ($exitAfterNFailures && $failureCount >= $exitAfterNFailures) { + print "\nExiting early after $failureCount failures. $count tests run."; + closeDumpTool(); + return 1; + } + + my $crashCount = $counts{crash} || 0; + my $timeoutCount = $counts{timedout} || 0; + if ($exitAfterNCrashesOrTimeouts && $crashCount + $timeoutCount >= $exitAfterNCrashesOrTimeouts) { + print "\nExiting early after $crashCount crashes and $timeoutCount timeouts. $count tests run."; + closeDumpTool(); + return 1; + } + + return 0; +} + +sub setUpWindowsCrashLogSaving() +{ + return unless isCygwin(); + + unless (defined $ENV{_NT_SYMBOL_PATH}) { + print "The _NT_SYMBOL_PATH environment variable is not set. Crash logs will not be saved.\nSee <http://trac.webkit.org/wiki/BuildingOnWindows#GettingCrashLogs>.\n"; + return; + } + + my $ntsdPath = File::Spec->catfile(toCygwinPath($ENV{PROGRAMFILES}), "Debugging Tools for Windows (x86)", "ntsd.exe"); + unless (-f $ntsdPath) { + $ntsdPath = File::Spec->catfile(toCygwinPath($ENV{SYSTEMROOT}), "system32", "ntsd.exe"); + unless (-f $ntsdPath) { + print STDERR "Can't find ntsd.exe. Crash logs will not be saved.\nSee <http://trac.webkit.org/wiki/BuildingOnWindows#GettingCrashLogs>.\n"; + return; + } + } + + my %values = ( + Debugger => '"' . toWindowsPath($ntsdPath) . '" -p %ld -e %ld -g -lines -c ".logopen /t \"' . toWindowsPath($testResultsDirectory) . '\CrashLog.txt\";!analyze -vv;~*kpn;q"', + Auto => 1 + ); + + foreach my $value (keys %values) { + chomp($previousWindowsPostMortemDebuggerValues{$value} = `regtool get "$windowsPostMortemDebuggerKey/$value"`); + my $result = system "regtool", "set", "-s", "$windowsPostMortemDebuggerKey/$value", $values{$value}; + next unless $result; + + print "Failed to set \"$windowsPostMortemDebuggerKey/$value\". Crash logs will not be saved.\nSee <http://trac.webkit.org/wiki/BuildingOnWindows#GettingCrashLogs>.\n"; + return; + } + + print "Crash logs will be saved to $testResultsDirectory.\n"; +} + +END { + return unless isCygwin(); + + foreach my $value (keys %previousWindowsPostMortemDebuggerValues) { + my $result = system "regtool", "set", "-s", "$windowsPostMortemDebuggerKey/$value", $previousWindowsPostMortemDebuggerValues{$value}; + !$result or print "Failed to restore \"$windowsPostMortemDebuggerKey/$value\" to its previous value \"$previousWindowsPostMortemDebuggerValues{$value}\"\n."; + } +} diff --git a/Tools/Scripts/parallelcl b/Tools/Scripts/parallelcl new file mode 100755 index 0000000..8a46365 --- /dev/null +++ b/Tools/Scripts/parallelcl @@ -0,0 +1,224 @@ +#!/usr/bin/perl + +use strict; +use warnings; + +use File::Basename; +use File::Spec; +use File::Temp; +use POSIX; + +sub makeJob(\@$); +sub forkAndCompileFiles(\@$); +sub Exec($); +sub waitForChild(\@); +sub cleanup(\@); + +my $debug = 0; + +chomp(my $clexe = `cygpath -u '$ENV{'VS80COMNTOOLS'}/../../VC/bin/cl.exe'`); + +if ($debug) { + print STDERR "Received " . @ARGV . " arguments:\n"; + foreach my $arg (@ARGV) { + print STDERR "$arg\n"; + } +} + +my $commandFile; +foreach my $arg (@ARGV) { + if ($arg =~ /^[\/-](E|EP|P)$/) { + print STDERR "The invoking process wants preprocessed source, so let's hand off this whole command to the real cl.exe\n" if $debug; + Exec("\"$clexe\" \"" . join('" "', @ARGV) . "\""); + } elsif ($arg =~ /^@(.*)$/) { + chomp($commandFile = `cygpath -u '$1'`); + } +} + +die "No command file specified!" unless $commandFile; +die "Couldn't find $commandFile!" unless -f $commandFile; + +my @sources; + +open(COMMAND, '<:raw:encoding(UTF16-LE):crlf:utf8', $commandFile) or die "Couldn't open $commandFile!"; + +# The first line of the command file contains all the options to cl.exe plus the first (possibly quoted) filename +my $firstLine = <COMMAND>; +$firstLine =~ s/\r?\n$//; + +# To find the start of the first filename, look for either the last space on the line. +# If the filename is quoted, the last character on the line will be a quote, so look for the quote before that. +my $firstFileIndex; +print STDERR "Last character of first line = '" . substr($firstLine, -1, 1) . "'\n" if $debug; +if (substr($firstLine, -1, 1) eq '"') { + print STDERR "First file is quoted\n" if $debug; + $firstFileIndex = rindex($firstLine, '"', length($firstLine) - 2); +} else { + print STDERR "First file is NOT quoted\n" if $debug; + $firstFileIndex = rindex($firstLine, ' ') + 1; +} + +my $options = substr($firstLine, 0, $firstFileIndex) . join(' ', @ARGV[1 .. $#ARGV]); +my $possibleFirstFile = substr($firstLine, $firstFileIndex); +if ($possibleFirstFile =~ /\.(cpp|c)/) { + push(@sources, $possibleFirstFile); +} else { + $options .= " $possibleFirstFile"; +} + +print STDERR "######## Found options $options ##########\n" if $debug; +print STDERR "####### Found first source file $sources[0] ########\n" if @sources && $debug; + +# The rest of the lines of the command file just contain source files, one per line +while (my $source = <COMMAND>) { + chomp($source); + $source =~ s/^\s+//; + $source =~ s/\s+$//; + push(@sources, $source) if length($source); +} +close(COMMAND); + +my $numSources = @sources; +exit unless $numSources > 0; + +my $numJobs; +if ($options =~ s/-j\s*([0-9]+)//) { + $numJobs = $1; +} else { + chomp($numJobs = `num-cpus`); +} + +print STDERR "\n\n####### COMPILING $numSources FILES USING AT MOST $numJobs PARALLEL INSTANCES OF cl.exe ###########\n\n";# if $debug; + +# Magic determination of job size +# The hope is that by splitting the source files up into 2*$numJobs pieces, we +# won't suffer too much if one job finishes much more quickly than another. +# However, we don't want to split it up too much due to cl.exe overhead, so set +# the minimum job size to 5. +my $jobSize = POSIX::ceil($numSources / (2 * $numJobs)); +$jobSize = $jobSize < 5 ? 5 : $jobSize; + +print STDERR "######## jobSize = $jobSize ##########\n" if $debug; + +# Sort the source files randomly so that we don't end up with big clumps of large files (aka SVG) +sub fisher_yates_shuffle(\@) +{ + my ($array) = @_; + for (my $i = @{$array}; --$i; ) { + my $j = int(rand($i+1)); + next if $i == $j; + @{$array}[$i,$j] = @{$array}[$j,$i]; + } +} + +fisher_yates_shuffle(@sources); # permutes @array in place + +my @children; +my @tmpFiles; +my $status = 0; +while (@sources) { + while (@sources && @children < $numJobs) { + my $pid; + my $tmpFile; + my $job = makeJob(@sources, $jobSize); + ($pid, $tmpFile) = forkAndCompileFiles(@{$job}, $options); + + print STDERR "####### Spawned child with PID $pid and tmpFile $tmpFile ##########\n" if $debug; + push(@children, $pid); + push(@tmpFiles, $tmpFile); + } + + $status |= waitForChild(@children); +} + +while (@children) { + $status |= waitForChild(@children); +} +cleanup(@tmpFiles); + +exit WEXITSTATUS($status); + + +sub makeJob(\@$) +{ + my ($files, $jobSize) = @_; + + my @job; + if (@{$files} > ($jobSize * 1.5)) { + @job = splice(@{$files}, -$jobSize); + } else { + # Compile all the remaining files in this job to avoid having a small job later + @job = splice(@{$files}); + } + + return \@job; +} + +sub forkAndCompileFiles(\@$) +{ + print STDERR "######## forkAndCompileFiles()\n" if $debug; + my ($files, $options) = @_; + + if ($debug) { + foreach my $file (@{$files}) { + print STDERR "######## $file\n"; + } + } + + my (undef, $tmpFile) = File::Temp::tempfile('clcommandXXXXX', DIR => File::Spec->tmpdir, OPEN => 0); + + my $pid = fork(); + die "Fork failed" unless defined($pid); + + unless ($pid) { + # Child process + open(TMP, '>:raw:encoding(UTF16-LE):crlf:utf8', $tmpFile) or die "Couldn't open $tmpFile"; + print TMP "$options\n"; + foreach my $file (@{$files}) { + print TMP "$file\n"; + } + close(TMP); + + chomp(my $winTmpFile = `cygpath -m $tmpFile`); + Exec "\"$clexe\" \@\"$winTmpFile\""; + } else { + return ($pid, $tmpFile); + } +} + +sub Exec($) +{ + my ($command) = @_; + + print STDERR "Exec($command)\n" if $debug; + + exec($command); +} + +sub waitForChild(\@) +{ + my ($children) = @_; + + return unless @{$children}; + + my $deceased = wait(); + my $status = $?; + print STDERR "######## Child with PID $deceased finished ###########\n" if $debug; + for (my $i = 0; $i < @{$children}; $i++) { + if ($children->[$i] == $deceased) { + splice(@{$children}, $i, 1); + last; + } + } + + return $status; +} + +sub cleanup(\@) +{ + my ($tmpFiles) = @_; + + foreach my $file (@{$tmpFiles}) { + unlink $file; + } +} diff --git a/Tools/Scripts/parse-malloc-history b/Tools/Scripts/parse-malloc-history new file mode 100755 index 0000000..177de1c --- /dev/null +++ b/Tools/Scripts/parse-malloc-history @@ -0,0 +1,174 @@ +#!/usr/bin/perl + +# Copyright (C) 2007 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Parses the callstacks in a file with malloc_history formatted content, sorting +# based on total number of bytes allocated, and filtering based on command-line +# parameters. + +use Getopt::Long; +use File::Basename; + +use strict; +use warnings; + +sub commify($); + +sub main() +{ + my $usage = + "Usage: " . basename($0) . " [options] malloc_history.txt\n" . + " --grep-regexp Include only call stacks that match this regular expression.\n" . + " --byte-minimum Include only call stacks with allocation sizes >= this value.\n" . + " --merge-regexp Merge all call stacks that match this regular expression.\n" . + " --merge-depth Merge all call stacks that match at this stack depth and above.\n"; + + my $grepRegexp = ""; + my $byteMinimum = ""; + my @mergeRegexps = (); + my $mergeDepth = ""; + my $getOptionsResult = GetOptions( + "grep-regexp:s" => \$grepRegexp, + "byte-minimum:i" => \$byteMinimum, + "merge-regexp:s" => \@mergeRegexps, + "merge-depth:i" => \$mergeDepth + ); + my $fileName = $ARGV[0]; + die $usage if (!$getOptionsResult || !$fileName); + + open FILE, "<$fileName" or die "bad file: $fileName"; + my @file = <FILE>; + close FILE; + + my %callstacks = (); + my $byteCountTotal = 0; + + for (my $i = 0; $i < @file; $i++) { + my $line = $file[$i]; + my ($callCount, $byteCount); + + # First try malloc_history format + # 6 calls for 664 bytes thread_ffffffff |0x0 | start + ($callCount, $byteCount) = ($line =~ /(\d+) calls for (\d+) bytes/); + + # Then try leaks format + # Leak: 0x0ac3ca40 size=48 + # 0x00020001 0x00000001 0x00000000 0x00000000 ................ + # Call stack: [thread ffffffff]: | 0x0 | start + if (!$callCount || !$byteCount) { + $callCount = 1; + ($byteCount) = ($line =~ /Leak: [x[:xdigit:]]* size=(\d+)/); + + if ($byteCount) { + while (!($line =~ "Call stack: ")) { + $i++; + $line = $file[$i]; + } + } + } + + # Then try LeakFinder format + # --------------- Key: 213813, 84 bytes --------- + # c:\cygwin\home\buildbot\webkit\opensource\webcore\rendering\renderarena.cpp(78): WebCore::RenderArena::allocate + # c:\cygwin\home\buildbot\webkit\opensource\webcore\rendering\renderobject.cpp(82): WebCore::RenderObject::operator new + if (!$callCount || !$byteCount) { + $callCount = 1; + ($byteCount) = ($line =~ /Key: (?:\d+), (\d+) bytes/); + if ($byteCount) { + $line = $file[++$i]; + my @tempStack; + while ($file[$i+1] !~ /^(?:-|\d)/) { + if ($line =~ /\): (.*)$/) { + my $call = $1; + $call =~ s/\r$//; + unshift(@tempStack, $call); + } + $line = $file[++$i]; + } + $line = join(" | ", @tempStack); + } + } + + # Then give up + next if (!$callCount || !$byteCount); + + $byteCountTotal += $byteCount; + + next if ($grepRegexp && !($line =~ $grepRegexp)); + + my $callstackBegin = 0; + if ($mergeDepth) { + # count stack frames backwards from end of callstack + $callstackBegin = length($line); + for (my $pipeCount = 0; $pipeCount < $mergeDepth; $pipeCount++) { + my $rindexResult = rindex($line, "|", $callstackBegin - 1); + last if $rindexResult == -1; + $callstackBegin = $rindexResult; + } + } else { + # start at beginning of callstack + $callstackBegin = index($line, "|"); + } + + my $callstack = substr($line, $callstackBegin + 2); # + 2 skips "| " + for my $regexp (@mergeRegexps) { + if ($callstack =~ $regexp) { + $callstack = $regexp . "\n"; + last; + } + } + + if (!$callstacks{$callstack}) { + $callstacks{$callstack} = {"callCount" => 0, "byteCount" => 0}; + } + + $callstacks{$callstack}{"callCount"} += $callCount; + $callstacks{$callstack}{"byteCount"} += $byteCount; + } + + my $byteCountTotalReported = 0; + for my $callstack (sort { $callstacks{$b}{"byteCount"} <=> $callstacks{$a}{"byteCount"} } keys %callstacks) { + my $callCount = $callstacks{$callstack}{"callCount"}; + my $byteCount = $callstacks{$callstack}{"byteCount"}; + last if ($byteMinimum && $byteCount < $byteMinimum); + + $byteCountTotalReported += $byteCount; + print commify($callCount) . " calls for " . commify($byteCount) . " bytes: $callstack\n"; + } + + print "total: " . commify($byteCountTotalReported) . " bytes (" . commify($byteCountTotal - $byteCountTotalReported) . " bytes excluded).\n"; +} + +exit(main()); + +# Copied from perldoc -- please excuse the style +sub commify($) +{ + local $_ = shift; + 1 while s/^([-+]?\d+)(\d{3})/$1,$2/; + return $_; +} diff --git a/Tools/Scripts/pdevenv b/Tools/Scripts/pdevenv new file mode 100755 index 0000000..4643728 --- /dev/null +++ b/Tools/Scripts/pdevenv @@ -0,0 +1,45 @@ +#!/usr/bin/perl -w + +use strict; +use warnings; + +use File::Temp qw/tempfile/; +use FindBin; + +use lib $FindBin::Bin; +use webkitdirs; + +my ($fh, $path) = tempfile(UNLINK => 0, SUFFIX => '.cmd') or die; + +chomp(my $vcBin = `cygpath -w "$FindBin::Bin/../vcbin"`); +chomp(my $scriptsPath = `cygpath -w "$FindBin::Bin"`); + +my $vsToolsVar; +if ($ENV{'VS80COMNTOOLS'}) { + $vsToolsVar = "VS80COMNTOOLS"; +} elsif ($ENV{'VS90COMNTOOLS'}) { + $vsToolsVar = "VS90COMNTOOLS"; +} else { + print "*************************************************************\n"; + print "Cannot find Visual Studio tools dir.\n"; + print "Please ensure that \$VS80COMNTOOLS or \$VS90COMNTOOLS\n"; + print "is set to a valid location.\n"; + print "*************************************************************\n"; + die; +} + +print $fh "\@echo off\n\n"; +print $fh "call \"\%" . $vsToolsVar . "\%\\vsvars32.bat\"\n\n"; +print $fh "set PATH=$vcBin;$scriptsPath;\%PATH\%\n\n"; + +print $fh "IF EXIST \"\%VSINSTALLDIR\%\\Common7\\IDE\\devenv.com\" (devenv.com /useenv " . join(" ", @ARGV) . ") ELSE "; +print $fh "VCExpress.exe /useenv " . join(" ", @ARGV) . "\n"; + + +close $fh; + +chmod 0755, $path; + +chomp($path = `cygpath -w -s '$path'`); + +exec("cmd /c \"call $path\""); diff --git a/Tools/Scripts/prepare-ChangeLog b/Tools/Scripts/prepare-ChangeLog new file mode 100755 index 0000000..2fc03d2 --- /dev/null +++ b/Tools/Scripts/prepare-ChangeLog @@ -0,0 +1,1723 @@ +#!/usr/bin/perl -w +# -*- Mode: perl; indent-tabs-mode: nil; c-basic-offset: 2 -*- + +# +# Copyright (C) 2000, 2001 Eazel, Inc. +# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 Apple Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile, Inc. +# Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au> +# +# prepare-ChangeLog is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public +# License as published by the Free Software Foundation; either +# version 2 of the License, or (at your option) any later version. +# +# prepare-ChangeLog is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public +# License along with this program; if not, write to the Free +# Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +# + + +# Perl script to create a ChangeLog entry with names of files +# and functions from a diff. +# +# Darin Adler <darin@bentspoon.com>, started 20 April 2000 +# Java support added by Maciej Stachowiak <mjs@eazel.com> +# Objective-C, C++ and Objective-C++ support added by Maciej Stachowiak <mjs@apple.com> +# Git support added by Adam Roben <aroben@apple.com> +# --git-index flag added by Joe Mason <joe.mason@torchmobile.com> + + +# +# TODO: +# List functions that have been removed too. +# Decide what a good logical order is for the changed files +# other than a normal text "sort" (top level first?) +# (group directories?) (.h before .c?) +# Handle yacc source files too (other languages?). +# Help merge when there are ChangeLog conflicts or if there's +# already a partly written ChangeLog entry. +# Add command line option to put the ChangeLog into a separate file. +# Add SVN version numbers for commit (can't do that until +# the changes are checked in, though). +# Work around diff stupidity where deleting a function that starts +# with a comment makes diff think that the following function +# has been changed (if the following function starts with a comment +# with the same first line, such as /**) +# Work around diff stupidity where deleting an entire function and +# the blank lines before it makes diff think you've changed the +# previous function. + +use strict; +use warnings; + +use File::Basename; +use File::Spec; +use FindBin; +use Getopt::Long; +use lib $FindBin::Bin; +use POSIX qw(strftime); +use VCSUtils; + +sub changeLogDate($); +sub changeLogEmailAddressFromArgs($); +sub changeLogNameFromArgs($); +sub firstDirectoryOrCwd(); +sub diffFromToString(); +sub diffCommand(@); +sub statusCommand(@); +sub createPatchCommand($); +sub diffHeaderFormat(); +sub findOriginalFileFromSvn($); +sub determinePropertyChanges($$$); +sub pluralizeAndList($$@); +sub generateFileList(\@\@\%); +sub isUnmodifiedStatus($); +sub isModifiedStatus($); +sub isAddedStatus($); +sub isConflictStatus($); +sub statusDescription($$$$); +sub propertyChangeDescription($); +sub extractLineRange($); +sub testListForChangeLog(@); +sub get_function_line_ranges($$); +sub get_function_line_ranges_for_c($$); +sub get_function_line_ranges_for_java($$); +sub get_function_line_ranges_for_javascript($$); +sub get_selector_line_ranges_for_css($$); +sub method_decl_to_selector($); +sub processPaths(\@); +sub reviewerAndDescriptionForGitCommit($); +sub normalizeLineEndings($$); +sub decodeEntities($); + +# Project time zone for Cupertino, CA, US +my $changeLogTimeZone = "PST8PDT"; + +my $bugNumber; +my $name; +my $emailAddress; +my $mergeBase = 0; +my $gitCommit = 0; +my $gitIndex = ""; +my $gitReviewer = ""; +my $openChangeLogs = 0; +my $writeChangeLogs = 1; +my $showHelp = 0; +my $spewDiff = $ENV{"PREPARE_CHANGELOG_DIFF"}; +my $updateChangeLogs = 1; +my $parseOptionsResult = + GetOptions("diff|d!" => \$spewDiff, + "bug|b:i" => \$bugNumber, + "name:s" => \$name, + "email:s" => \$emailAddress, + "merge-base:s" => \$mergeBase, + "git-commit:s" => \$gitCommit, + "git-index" => \$gitIndex, + "git-reviewer:s" => \$gitReviewer, + "help|h!" => \$showHelp, + "open|o!" => \$openChangeLogs, + "write!" => \$writeChangeLogs, + "update!" => \$updateChangeLogs); +if (!$parseOptionsResult || $showHelp) { + print STDERR basename($0) . " [-b|--bug=<bugid>] [-d|--diff] [-h|--help] [-o|--open] [--git-commit=<committish>] [--git-reviewer=<name>] [svndir1 [svndir2 ...]]\n"; + print STDERR " -b|--bug Fill in the ChangeLog bug information from the given bug.\n"; + print STDERR " -d|--diff Spew diff to stdout when running\n"; + print STDERR " --merge-base Populate the ChangeLogs with the diff to this branch\n"; + print STDERR " --git-commit Populate the ChangeLogs from the specified git commit\n"; + print STDERR " --git-index Populate the ChangeLogs from the git index only\n"; + print STDERR " --git-reviewer When populating the ChangeLogs from a git commit claim that the spcified name reviewed the change.\n"; + print STDERR " This option is useful when the git commit lacks a Signed-Off-By: line\n"; + print STDERR " -h|--help Show this help message\n"; + print STDERR " -o|--open Open ChangeLogs in an editor when done\n"; + print STDERR " --[no-]update Update ChangeLogs from svn before adding entry (default: update)\n"; + print STDERR " --[no-]write Write ChangeLogs to disk (otherwise send new entries to stdout) (default: write)\n"; + exit 1; +} + +die "--git-commit and --git-index are incompatible." if ($gitIndex && $gitCommit); + +my %paths = processPaths(@ARGV); + +my $isGit = isGitDirectory(firstDirectoryOrCwd()); +my $isSVN = isSVNDirectory(firstDirectoryOrCwd()); + +$isSVN || $isGit || die "Couldn't determine your version control system."; + +my $SVN = "svn"; +my $GIT = "git"; + +# Find the list of modified files +my @changed_files; +my $changed_files_string; +my %changed_line_ranges; +my %function_lists; +my @conflict_files; + + +my %supportedTestExtensions = map { $_ => 1 } qw(html shtml svg xml xhtml pl php); +my @addedRegressionTests = (); +my $didChangeRegressionTests = 0; + +generateFileList(@changed_files, @conflict_files, %function_lists); + +if (!@changed_files && !@conflict_files && !keys %function_lists) { + print STDERR " No changes found.\n"; + exit 1; +} + +if (@conflict_files) { + print STDERR " The following files have conflicts. Run prepare-ChangeLog again after fixing the conflicts:\n"; + print STDERR join("\n", @conflict_files), "\n"; + exit 1; +} + +if (@changed_files) { + $changed_files_string = "'" . join ("' '", @changed_files) . "'"; + + # For each file, build a list of modified lines. + # Use line numbers from the "after" side of each diff. + print STDERR " Reviewing diff to determine which lines changed.\n"; + my $file; + open DIFF, "-|", diffCommand(@changed_files) or die "The diff failed: $!.\n"; + while (<DIFF>) { + $file = makeFilePathRelative($1) if $_ =~ diffHeaderFormat(); + if (defined $file) { + my ($start, $end) = extractLineRange($_); + if ($start >= 0 && $end >= 0) { + push @{$changed_line_ranges{$file}}, [ $start, $end ]; + } elsif (/DO_NOT_COMMIT/) { + print STDERR "WARNING: file $file contains the string DO_NOT_COMMIT, line $.\n"; + } + } + } + close DIFF; +} + +# For each source file, convert line range to function list. +if (%changed_line_ranges) { + print STDERR " Extracting affected function names from source files.\n"; + foreach my $file (keys %changed_line_ranges) { + # Only look for function names in certain source files. + next unless $file =~ /\.(c|cpp|m|mm|h|java|js)/; + + # Find all the functions in the file. + open SOURCE, $file or next; + my @function_ranges = get_function_line_ranges(\*SOURCE, $file); + close SOURCE; + + # Find all the modified functions. + my @functions; + my %saw_function; + my @change_ranges = (@{$changed_line_ranges{$file}}, []); + my @change_range = (0, 0); + FUNCTION: foreach my $function_range_ref (@function_ranges) { + my @function_range = @$function_range_ref; + + # Advance to successive change ranges. + for (;; @change_range = @{shift @change_ranges}) { + last FUNCTION unless @change_range; + + # If past this function, move on to the next one. + next FUNCTION if $change_range[0] > $function_range[1]; + + # If an overlap with this function range, record the function name. + if ($change_range[1] >= $function_range[0] + and $change_range[0] <= $function_range[1]) { + if (!$saw_function{$function_range[2]}) { + $saw_function{$function_range[2]} = 1; + push @functions, $function_range[2]; + } + next FUNCTION; + } + } + } + + # Format the list of functions now. + + if (@functions) { + $function_lists{$file} = "" if !defined $function_lists{$file}; + $function_lists{$file} .= "\n (" . join("):\n (", @functions) . "):"; + } + } +} + +# Get some parameters for the ChangeLog we are about to write. +my $date = changeLogDate($changeLogTimeZone); +$name = changeLogNameFromArgs($name); +$emailAddress = changeLogEmailAddressFromArgs($emailAddress); + +print STDERR " Change author: $name <$emailAddress>.\n"; + +my $bugDescription; +my $bugURL; +if ($bugNumber) { + $bugURL = "https://bugs.webkit.org/show_bug.cgi?id=$bugNumber"; + my $bugXMLURL = "$bugURL&ctype=xml"; + # Perl has no built in XML processing, so we'll fetch and parse with curl and grep + # Pass --insecure because some cygwin installs have no certs we don't + # care about validating that bugs.webkit.org is who it says it is here. + my $descriptionLine = `curl --insecure --silent "$bugXMLURL" | grep short_desc`; + if ($descriptionLine !~ /<short_desc>(.*)<\/short_desc>/) { + # Maybe the reason the above did not work is because the curl that is installed doesn't + # support ssl at all. + if (`curl --version | grep ^Protocols` !~ /\bhttps\b/) { + print STDERR " Could not get description for bug $bugNumber.\n"; + print STDERR " It looks like your version of curl does not support ssl.\n"; + print STDERR " If you are using macports, this can be fixed with sudo port install curl +ssl.\n"; + } else { + print STDERR " Bug $bugNumber has no bug description. Maybe you set wrong bug ID?\n"; + print STDERR " The bug URL: $bugXMLURL\n"; + } + exit 1; + } + $bugDescription = decodeEntities($1); + print STDERR " Description from bug $bugNumber:\n \"$bugDescription\".\n"; +} + +# Remove trailing parenthesized notes from user name (bit of hack). +$name =~ s/\(.*?\)\s*$//g; + +# Find the change logs. +my %has_log; +my %files; +foreach my $file (sort keys %function_lists) { + my $prefix = $file; + my $has_log = 0; + while ($prefix) { + $prefix =~ s-/[^/]+/?$-/- or $prefix = ""; + $has_log = $has_log{$prefix}; + if (!defined $has_log) { + $has_log = -f "${prefix}ChangeLog"; + $has_log{$prefix} = $has_log; + } + last if $has_log; + } + if (!$has_log) { + print STDERR "No ChangeLog found for $file.\n"; + } else { + push @{$files{$prefix}}, $file; + } +} + +# Build the list of ChangeLog prefixes in the correct project order +my @prefixes; +my %prefixesSort; +foreach my $prefix (keys %files) { + my $prefixDir = substr($prefix, 0, length($prefix) - 1); # strip trailing / + my $sortKey = lc $prefix; + $sortKey = "top level" unless length $sortKey; + + if ($prefixDir eq "top level") { + $sortKey = ""; + } elsif ($prefixDir eq "Tools") { + $sortKey = "-, just after top level"; + } elsif ($prefixDir eq "WebBrowser") { + $sortKey = lc "WebKit, WebBrowser after"; + } elsif ($prefixDir eq "WebCore") { + $sortKey = lc "WebFoundation, WebCore after"; + } elsif ($prefixDir eq "LayoutTests") { + $sortKey = lc "~, LayoutTests last"; + } + + $prefixesSort{$sortKey} = $prefix; +} +foreach my $prefixSort (sort keys %prefixesSort) { + push @prefixes, $prefixesSort{$prefixSort}; +} + +# Get the latest ChangeLog files from svn. +my @logs = (); +foreach my $prefix (@prefixes) { + push @logs, File::Spec->catfile($prefix || ".", "ChangeLog"); +} + +if (@logs && $updateChangeLogs && $isSVN) { + print STDERR " Running 'svn update' to update ChangeLog files.\n"; + open ERRORS, "-|", $SVN, "update", @logs + or die "The svn update of ChangeLog files failed: $!.\n"; + my @conflictedChangeLogs; + while (my $line = <ERRORS>) { + print STDERR " ", $line; + push @conflictedChangeLogs, $1 if $line =~ m/^C\s+(.+?)[\r\n]*$/; + } + close ERRORS; + + if (@conflictedChangeLogs) { + print STDERR " Attempting to merge conflicted ChangeLogs.\n"; + my $resolveChangeLogsPath = File::Spec->catfile(dirname($0), "resolve-ChangeLogs"); + open RESOLVE, "-|", $resolveChangeLogsPath, "--no-warnings", @conflictedChangeLogs + or die "Could not open resolve-ChangeLogs script: $!.\n"; + print STDERR " $_" while <RESOLVE>; + close RESOLVE; + } +} + +# Generate new ChangeLog entries and (optionally) write out new ChangeLog files. +foreach my $prefix (@prefixes) { + my $endl = "\n"; + my @old_change_log; + + if ($writeChangeLogs) { + my $changeLogPath = File::Spec->catfile($prefix || ".", "ChangeLog"); + print STDERR " Editing the ${changeLogPath} file.\n"; + open OLD_CHANGE_LOG, ${changeLogPath} or die "Could not open ${changeLogPath} file: $!.\n"; + # It's less efficient to read the whole thing into memory than it would be + # to read it while we prepend to it later, but I like doing this part first. + @old_change_log = <OLD_CHANGE_LOG>; + close OLD_CHANGE_LOG; + # We want to match the ChangeLog's line endings in case it doesn't match + # the native line endings for this version of perl. + if ($old_change_log[0] =~ /(\r?\n)$/g) { + $endl = "$1"; + } + open CHANGE_LOG, "> ${changeLogPath}" or die "Could not write ${changeLogPath}\n."; + } else { + open CHANGE_LOG, ">-" or die "Could not write to STDOUT\n."; + print substr($prefix, 0, length($prefix) - 1) . ":\n\n" unless (scalar @prefixes) == 1; + } + + print CHANGE_LOG normalizeLineEndings("$date $name <$emailAddress>\n\n", $endl); + + my ($reviewer, $description) = reviewerAndDescriptionForGitCommit($gitCommit) if $gitCommit; + $reviewer = "NOBODY (OO" . "PS!)" if !$reviewer; + + print CHANGE_LOG normalizeLineEndings(" Reviewed by $reviewer.\n\n", $endl); + print CHANGE_LOG normalizeLineEndings($description . "\n", $endl) if $description; + + $bugDescription = "Need a short description and bug URL (OOPS!)" unless $bugDescription; + print CHANGE_LOG normalizeLineEndings(" $bugDescription\n", $endl) if $bugDescription; + print CHANGE_LOG normalizeLineEndings(" $bugURL\n", $endl) if $bugURL; + print CHANGE_LOG normalizeLineEndings("\n", $endl); + + if ($prefix =~ m/WebCore/ || `pwd` =~ m/WebCore/) { + if ($didChangeRegressionTests) { + print CHANGE_LOG normalizeLineEndings(testListForChangeLog(sort @addedRegressionTests), $endl); + } else { + print CHANGE_LOG normalizeLineEndings(" No new tests. (OOPS!)\n\n", $endl); + } + } + + foreach my $file (sort @{$files{$prefix}}) { + my $file_stem = substr $file, length $prefix; + print CHANGE_LOG normalizeLineEndings(" * $file_stem:$function_lists{$file}\n", $endl); + } + + if ($writeChangeLogs) { + print CHANGE_LOG normalizeLineEndings("\n", $endl), @old_change_log; + } else { + print CHANGE_LOG "\n"; + } + + close CHANGE_LOG; +} + +if ($writeChangeLogs) { + print STDERR "-- Please remember to include a detailed description in your ChangeLog entry. --\n-- See <http://webkit.org/coding/contributing.html> for more info --\n"; +} + +# Write out another diff. +if ($spewDiff && @changed_files) { + print STDERR " Running diff to help you write the ChangeLog entries.\n"; + local $/ = undef; # local slurp mode + open DIFF, "-|", createPatchCommand($changed_files_string) or die "The diff failed: $!.\n"; + print <DIFF>; + close DIFF; +} + +# Open ChangeLogs. +if ($openChangeLogs && @logs) { + print STDERR " Opening the edited ChangeLog files.\n"; + my $editor = $ENV{CHANGE_LOG_EDITOR}; + if ($editor) { + system ((split ' ', $editor), @logs); + } else { + $editor = $ENV{CHANGE_LOG_EDIT_APPLICATION}; + if ($editor) { + system "open", "-a", $editor, @logs; + } else { + system "open", "-e", @logs; + } + } +} + +# Done. +exit; + + +sub changeLogDate($) +{ + my ($timeZone) = @_; + my $savedTimeZone = $ENV{'TZ'}; + # Set TZ temporarily so that localtime() is in that time zone + $ENV{'TZ'} = $timeZone; + my $date = strftime("%Y-%m-%d", localtime()); + if (defined $savedTimeZone) { + $ENV{'TZ'} = $savedTimeZone; + } else { + delete $ENV{'TZ'}; + } + return $date; +} + +sub changeLogNameFromArgs($) +{ + my ($nameFromArgs) = @_; + # Silently allow --git-commit to win, we could warn if $nameFromArgs is defined. + return `$GIT log --max-count=1 --pretty=\"format:%an\" \"$gitCommit\"` if $gitCommit; + + return $nameFromArgs || changeLogName(); +} + +sub changeLogEmailAddressFromArgs($) +{ + my ($emailAddressFromArgs) = @_; + # Silently allow --git-commit to win, we could warn if $emailAddressFromArgs is defined. + return `$GIT log --max-count=1 --pretty=\"format:%ae\" \"$gitCommit\"` if $gitCommit; + + return $emailAddressFromArgs || changeLogEmailAddress(); +} + +sub get_function_line_ranges($$) +{ + my ($file_handle, $file_name) = @_; + + if ($file_name =~ /\.(c|cpp|m|mm|h)$/) { + return get_function_line_ranges_for_c ($file_handle, $file_name); + } elsif ($file_name =~ /\.java$/) { + return get_function_line_ranges_for_java ($file_handle, $file_name); + } elsif ($file_name =~ /\.js$/) { + return get_function_line_ranges_for_javascript ($file_handle, $file_name); + } elsif ($file_name =~ /\.css$/) { + return get_selector_line_ranges_for_css ($file_handle, $file_name); + } + return (); +} + + +sub method_decl_to_selector($) +{ + (my $method_decl) = @_; + + $_ = $method_decl; + + if ((my $comment_stripped) = m-([^/]*)(//|/*).*-) { + $_ = $comment_stripped; + } + + s/,\s*...//; + + if (/:/) { + my @components = split /:/; + pop @components if (scalar @components > 1); + $_ = (join ':', map {s/.*[^[:word:]]//; scalar $_;} @components) . ':'; + } else { + s/\s*$//; + s/.*[^[:word:]]//; + } + + return $_; +} + + + +# Read a file and get all the line ranges of the things that look like C functions. +# A function name is the last word before an open parenthesis before the outer +# level open brace. A function starts at the first character after the last close +# brace or semicolon before the function name and ends at the close brace. +# Comment handling is simple-minded but will work for all but pathological cases. +# +# Result is a list of triples: [ start_line, end_line, function_name ]. + +sub get_function_line_ranges_for_c($$) +{ + my ($file_handle, $file_name) = @_; + + my @ranges; + + my $in_comment = 0; + my $in_macro = 0; + my $in_method_declaration = 0; + my $in_parentheses = 0; + my $in_braces = 0; + my $brace_start = 0; + my $brace_end = 0; + my $skip_til_brace_or_semicolon = 0; + + my $word = ""; + my $interface_name = ""; + + my $potential_method_char = ""; + my $potential_method_spec = ""; + + my $potential_start = 0; + my $potential_name = ""; + + my $start = 0; + my $name = ""; + + my $next_word_could_be_namespace = 0; + my $potential_namespace = ""; + my @namespaces; + + while (<$file_handle>) { + # Handle continued multi-line comment. + if ($in_comment) { + next unless s-.*\*/--; + $in_comment = 0; + } + + # Handle continued macro. + if ($in_macro) { + $in_macro = 0 unless /\\$/; + next; + } + + # Handle start of macro (or any preprocessor directive). + if (/^\s*\#/) { + $in_macro = 1 if /^([^\\]|\\.)*\\$/; + next; + } + + # Handle comments and quoted text. + while (m-(/\*|//|\'|\")-) { # \' and \" keep emacs perl mode happy + my $match = $1; + if ($match eq "/*") { + if (!s-/\*.*?\*/--) { + s-/\*.*--; + $in_comment = 1; + } + } elsif ($match eq "//") { + s-//.*--; + } else { # ' or " + if (!s-$match([^\\]|\\.)*?$match--) { + warn "mismatched quotes at line $. in $file_name\n"; + s-$match.*--; + } + } + } + + + # continued method declaration + if ($in_method_declaration) { + my $original = $_; + my $method_cont = $_; + + chomp $method_cont; + $method_cont =~ s/[;\{].*//; + $potential_method_spec = "${potential_method_spec} ${method_cont}"; + + $_ = $original; + if (/;/) { + $potential_start = 0; + $potential_method_spec = ""; + $potential_method_char = ""; + $in_method_declaration = 0; + s/^[^;\{]*//; + } elsif (/{/) { + my $selector = method_decl_to_selector ($potential_method_spec); + $potential_name = "${potential_method_char}\[${interface_name} ${selector}\]"; + + $potential_method_spec = ""; + $potential_method_char = ""; + $in_method_declaration = 0; + + $_ = $original; + s/^[^;{]*//; + } elsif (/\@end/) { + $in_method_declaration = 0; + $interface_name = ""; + $_ = $original; + } else { + next; + } + } + + + # start of method declaration + if ((my $method_char, my $method_spec) = m&^([-+])([^0-9;][^;]*);?$&) { + my $original = $_; + + if ($interface_name) { + chomp $method_spec; + $method_spec =~ s/\{.*//; + + $potential_method_char = $method_char; + $potential_method_spec = $method_spec; + $potential_start = $.; + $in_method_declaration = 1; + } else { + warn "declaring a method but don't have interface on line $. in $file_name\n"; + } + $_ = $original; + if (/\{/) { + my $selector = method_decl_to_selector ($potential_method_spec); + $potential_name = "${potential_method_char}\[${interface_name} ${selector}\]"; + + $potential_method_spec = ""; + $potential_method_char = ""; + $in_method_declaration = 0; + $_ = $original; + s/^[^{]*//; + } elsif (/\@end/) { + $in_method_declaration = 0; + $interface_name = ""; + $_ = $original; + } else { + next; + } + } + + + # Find function, interface and method names. + while (m&((?:[[:word:]]+::)*operator(?:[ \t]*\(\)|[^()]*)|[[:word:]:~]+|[(){}:;])|\@(?:implementation|interface|protocol)\s+(\w+)[^{]*&g) { + # interface name + if ($2) { + $interface_name = $2; + next; + } + + # Open parenthesis. + if ($1 eq "(") { + $potential_name = $word unless $in_parentheses || $skip_til_brace_or_semicolon; + $in_parentheses++; + next; + } + + # Close parenthesis. + if ($1 eq ")") { + $in_parentheses--; + next; + } + + # C++ constructor initializers + if ($1 eq ":") { + $skip_til_brace_or_semicolon = 1 unless ($in_parentheses || $in_braces); + } + + # Open brace. + if ($1 eq "{") { + $skip_til_brace_or_semicolon = 0; + + if ($potential_namespace) { + push @namespaces, $potential_namespace; + $potential_namespace = ""; + next; + } + + # Promote potential name to real function name at the + # start of the outer level set of braces (function body?). + if (!$in_braces and $potential_start) { + $start = $potential_start; + $name = $potential_name; + if (@namespaces && $name && (length($name) < 2 || substr($name,1,1) ne "[")) { + $name = join ('::', @namespaces, $name); + } + } + + $in_method_declaration = 0; + + $brace_start = $. if (!$in_braces); + $in_braces++; + next; + } + + # Close brace. + if ($1 eq "}") { + if (!$in_braces && @namespaces) { + pop @namespaces; + next; + } + + $in_braces--; + $brace_end = $. if (!$in_braces); + + # End of an outer level set of braces. + # This could be a function body. + if (!$in_braces and $name) { + push @ranges, [ $start, $., $name ]; + $name = ""; + } + + $potential_start = 0; + $potential_name = ""; + next; + } + + # Semicolon. + if ($1 eq ";") { + $skip_til_brace_or_semicolon = 0; + $potential_start = 0; + $potential_name = ""; + $in_method_declaration = 0; + next; + } + + # Ignore "const" method qualifier. + if ($1 eq "const") { + next; + } + + if ($1 eq "namespace" || $1 eq "class" || $1 eq "struct") { + $next_word_could_be_namespace = 1; + next; + } + + # Word. + $word = $1; + if (!$skip_til_brace_or_semicolon) { + if ($next_word_could_be_namespace) { + $potential_namespace = $word; + $next_word_could_be_namespace = 0; + } elsif ($potential_namespace) { + $potential_namespace = ""; + } + + if (!$in_parentheses) { + $potential_start = 0; + $potential_name = ""; + } + if (!$potential_start) { + $potential_start = $.; + $potential_name = ""; + } + } + } + } + + warn "missing close braces in $file_name (probable start at $brace_start)\n" if ($in_braces > 0); + warn "too many close braces in $file_name (probable start at $brace_end)\n" if ($in_braces < 0); + + warn "mismatched parentheses in $file_name\n" if $in_parentheses; + + return @ranges; +} + + + +# Read a file and get all the line ranges of the things that look like Java +# classes, interfaces and methods. +# +# A class or interface name is the word that immediately follows +# `class' or `interface' when followed by an open curly brace and not +# a semicolon. It can appear at the top level, or inside another class +# or interface block, but not inside a function block +# +# A class or interface starts at the first character after the first close +# brace or after the function name and ends at the close brace. +# +# A function name is the last word before an open parenthesis before +# an open brace rather than a semicolon. It can appear at top level or +# inside a class or interface block, but not inside a function block. +# +# A function starts at the first character after the first close +# brace or after the function name and ends at the close brace. +# +# Comment handling is simple-minded but will work for all but pathological cases. +# +# Result is a list of triples: [ start_line, end_line, function_name ]. + +sub get_function_line_ranges_for_java($$) +{ + my ($file_handle, $file_name) = @_; + + my @current_scopes; + + my @ranges; + + my $in_comment = 0; + my $in_macro = 0; + my $in_parentheses = 0; + my $in_braces = 0; + my $in_non_block_braces = 0; + my $class_or_interface_just_seen = 0; + + my $word = ""; + + my $potential_start = 0; + my $potential_name = ""; + my $potential_name_is_class_or_interface = 0; + + my $start = 0; + my $name = ""; + my $current_name_is_class_or_interface = 0; + + while (<$file_handle>) { + # Handle continued multi-line comment. + if ($in_comment) { + next unless s-.*\*/--; + $in_comment = 0; + } + + # Handle continued macro. + if ($in_macro) { + $in_macro = 0 unless /\\$/; + next; + } + + # Handle start of macro (or any preprocessor directive). + if (/^\s*\#/) { + $in_macro = 1 if /^([^\\]|\\.)*\\$/; + next; + } + + # Handle comments and quoted text. + while (m-(/\*|//|\'|\")-) { # \' and \" keep emacs perl mode happy + my $match = $1; + if ($match eq "/*") { + if (!s-/\*.*?\*/--) { + s-/\*.*--; + $in_comment = 1; + } + } elsif ($match eq "//") { + s-//.*--; + } else { # ' or " + if (!s-$match([^\\]|\\.)*?$match--) { + warn "mismatched quotes at line $. in $file_name\n"; + s-$match.*--; + } + } + } + + # Find function names. + while (m-(\w+|[(){};])-g) { + # Open parenthesis. + if ($1 eq "(") { + if (!$in_parentheses) { + $potential_name = $word; + $potential_name_is_class_or_interface = 0; + } + $in_parentheses++; + next; + } + + # Close parenthesis. + if ($1 eq ")") { + $in_parentheses--; + next; + } + + # Open brace. + if ($1 eq "{") { + # Promote potential name to real function name at the + # start of the outer level set of braces (function/class/interface body?). + if (!$in_non_block_braces + and (!$in_braces or $current_name_is_class_or_interface) + and $potential_start) { + if ($name) { + push @ranges, [ $start, ($. - 1), + join ('.', @current_scopes) ]; + } + + + $current_name_is_class_or_interface = $potential_name_is_class_or_interface; + + $start = $potential_start; + $name = $potential_name; + + push (@current_scopes, $name); + } else { + $in_non_block_braces++; + } + + $potential_name = ""; + $potential_start = 0; + + $in_braces++; + next; + } + + # Close brace. + if ($1 eq "}") { + $in_braces--; + + # End of an outer level set of braces. + # This could be a function body. + if (!$in_non_block_braces) { + if ($name) { + push @ranges, [ $start, $., + join ('.', @current_scopes) ]; + + pop (@current_scopes); + + if (@current_scopes) { + $current_name_is_class_or_interface = 1; + + $start = $. + 1; + $name = $current_scopes[$#current_scopes-1]; + } else { + $current_name_is_class_or_interface = 0; + $start = 0; + $name = ""; + } + } + } else { + $in_non_block_braces-- if $in_non_block_braces; + } + + $potential_start = 0; + $potential_name = ""; + next; + } + + # Semicolon. + if ($1 eq ";") { + $potential_start = 0; + $potential_name = ""; + next; + } + + if ($1 eq "class" or $1 eq "interface") { + $class_or_interface_just_seen = 1; + next; + } + + # Word. + $word = $1; + if (!$in_parentheses) { + if ($class_or_interface_just_seen) { + $potential_name = $word; + $potential_start = $.; + $class_or_interface_just_seen = 0; + $potential_name_is_class_or_interface = 1; + next; + } + } + if (!$potential_start) { + $potential_start = $.; + $potential_name = ""; + } + $class_or_interface_just_seen = 0; + } + } + + warn "mismatched braces in $file_name\n" if $in_braces; + warn "mismatched parentheses in $file_name\n" if $in_parentheses; + + return @ranges; +} + + + +# Read a file and get all the line ranges of the things that look like +# JavaScript functions. +# +# A function name is the word that immediately follows `function' when +# followed by an open curly brace. It can appear at the top level, or +# inside other functions. +# +# An anonymous function name is the identifier chain immediately before +# an assignment with the equals operator or object notation that has a +# value starting with `function' followed by an open curly brace. +# +# A getter or setter name is the word that immediately follows `get' or +# `set' when followed by an open curly brace . +# +# Comment handling is simple-minded but will work for all but pathological cases. +# +# Result is a list of triples: [ start_line, end_line, function_name ]. + +sub get_function_line_ranges_for_javascript($$) +{ + my ($fileHandle, $fileName) = @_; + + my @currentScopes; + my @currentIdentifiers; + my @currentFunctionNames; + my @currentFunctionDepths; + my @currentFunctionStartLines; + + my @ranges; + + my $inComment = 0; + my $inQuotedText = ""; + my $parenthesesDepth = 0; + my $bracesDepth = 0; + + my $functionJustSeen = 0; + my $getterJustSeen = 0; + my $setterJustSeen = 0; + my $assignmentJustSeen = 0; + + my $word = ""; + + while (<$fileHandle>) { + # Handle continued multi-line comment. + if ($inComment) { + next unless s-.*\*/--; + $inComment = 0; + } + + # Handle continued quoted text. + if ($inQuotedText ne "") { + next if /\\$/; + s-([^\\]|\\.)*?$inQuotedText--; + $inQuotedText = ""; + } + + # Handle comments and quoted text. + while (m-(/\*|//|\'|\")-) { # \' and \" keep emacs perl mode happy + my $match = $1; + if ($match eq '/*') { + if (!s-/\*.*?\*/--) { + s-/\*.*--; + $inComment = 1; + } + } elsif ($match eq '//') { + s-//.*--; + } else { # ' or " + if (!s-$match([^\\]|\\.)*?$match--) { + $inQuotedText = $match if /\\$/; + warn "mismatched quotes at line $. in $fileName\n" if $inQuotedText eq ""; + s-$match.*--; + } + } + } + + # Find function names. + while (m-(\w+|[(){}=:;])-g) { + # Open parenthesis. + if ($1 eq '(') { + $parenthesesDepth++; + next; + } + + # Close parenthesis. + if ($1 eq ')') { + $parenthesesDepth--; + next; + } + + # Open brace. + if ($1 eq '{') { + push(@currentScopes, join(".", @currentIdentifiers)); + @currentIdentifiers = (); + + $bracesDepth++; + next; + } + + # Close brace. + if ($1 eq '}') { + $bracesDepth--; + + if (@currentFunctionDepths and $bracesDepth == $currentFunctionDepths[$#currentFunctionDepths]) { + pop(@currentFunctionDepths); + + my $currentFunction = pop(@currentFunctionNames); + my $start = pop(@currentFunctionStartLines); + + push(@ranges, [$start, $., $currentFunction]); + } + + pop(@currentScopes); + @currentIdentifiers = (); + + next; + } + + # Semicolon. + if ($1 eq ';') { + @currentIdentifiers = (); + next; + } + + # Function. + if ($1 eq 'function') { + $functionJustSeen = 1; + + if ($assignmentJustSeen) { + my $currentFunction = join('.', (@currentScopes, @currentIdentifiers)); + $currentFunction =~ s/\.{2,}/\./g; # Removes consecutive periods. + + push(@currentFunctionNames, $currentFunction); + push(@currentFunctionDepths, $bracesDepth); + push(@currentFunctionStartLines, $.); + } + + next; + } + + # Getter prefix. + if ($1 eq 'get') { + $getterJustSeen = 1; + next; + } + + # Setter prefix. + if ($1 eq 'set') { + $setterJustSeen = 1; + next; + } + + # Assignment operator. + if ($1 eq '=' or $1 eq ':') { + $assignmentJustSeen = 1; + next; + } + + next if $parenthesesDepth; + + # Word. + $word = $1; + $word = "get $word" if $getterJustSeen; + $word = "set $word" if $setterJustSeen; + + if (($functionJustSeen and !$assignmentJustSeen) or $getterJustSeen or $setterJustSeen) { + push(@currentIdentifiers, $word); + + my $currentFunction = join('.', (@currentScopes, @currentIdentifiers)); + $currentFunction =~ s/\.{2,}/\./g; # Removes consecutive periods. + + push(@currentFunctionNames, $currentFunction); + push(@currentFunctionDepths, $bracesDepth); + push(@currentFunctionStartLines, $.); + } elsif ($word ne 'if' and $word ne 'for' and $word ne 'do' and $word ne 'while' and $word ne 'which' and $word ne 'var') { + push(@currentIdentifiers, $word); + } + + $functionJustSeen = 0; + $getterJustSeen = 0; + $setterJustSeen = 0; + $assignmentJustSeen = 0; + } + } + + warn "mismatched braces in $fileName\n" if $bracesDepth; + warn "mismatched parentheses in $fileName\n" if $parenthesesDepth; + + return @ranges; +} + +# Read a file and get all the line ranges of the things that look like CSS selectors. A selector is +# anything before an opening brace on a line. A selector starts at the line containing the opening +# brace and ends at the closing brace. +# FIXME: Comments are parsed just like uncommented text. +# +# Result is a list of triples: [ start_line, end_line, selector ]. + +sub get_selector_line_ranges_for_css($$) +{ + my ($fileHandle, $fileName) = @_; + + my @ranges; + + my $currentSelector = ""; + my $start = 0; + + while (<$fileHandle>) { + if (/^[ \t]*(.*[^ \t])[ \t]*{/) { + $currentSelector = $1; + $start = $.; + } + if (index($_, "}") >= 0) { + unless ($start) { + warn "mismatched braces in $fileName\n"; + next; + } + push(@ranges, [$start, $., $currentSelector]); + $currentSelector = ""; + $start = 0; + next; + } + } + + return @ranges; +} + +sub processPaths(\@) +{ + my ($paths) = @_; + return ("." => 1) if (!@{$paths}); + + my %result = (); + + for my $file (@{$paths}) { + die "can't handle absolute paths like \"$file\"\n" if File::Spec->file_name_is_absolute($file); + die "can't handle empty string path\n" if $file eq ""; + die "can't handle path with single quote in the name like \"$file\"\n" if $file =~ /'/; # ' (keep Xcode syntax highlighting happy) + + my $untouchedFile = $file; + + $file = canonicalizePath($file); + + die "can't handle paths with .. like \"$untouchedFile\"\n" if $file =~ m|/\.\./|; + + $result{$file} = 1; + } + + return ("." => 1) if ($result{"."}); + + # Remove any paths that also have a parent listed. + for my $path (keys %result) { + for (my $parent = dirname($path); $parent ne '.'; $parent = dirname($parent)) { + if ($result{$parent}) { + delete $result{$path}; + last; + } + } + } + + return %result; +} + +sub diffFromToString() +{ + return "" if $isSVN; + return $gitCommit if $gitCommit =~ m/.+\.\..+/; + return "\"$gitCommit^\" \"$gitCommit\"" if $gitCommit; + return "--cached" if $gitIndex; + return $mergeBase if $mergeBase; + return "HEAD" if $isGit; +} + +sub diffCommand(@) +{ + my @paths = @_; + + my $pathsString = "'" . join("' '", @paths) . "'"; + + my $command; + if ($isSVN) { + $command = "$SVN diff --diff-cmd diff -x -N $pathsString"; + } elsif ($isGit) { + $command = "$GIT diff --no-ext-diff -U0 " . diffFromToString(); + $command .= " -- $pathsString" unless $gitCommit or $mergeBase; + } + + return $command; +} + +sub statusCommand(@) +{ + my @files = @_; + + my $filesString = "'" . join ("' '", @files) . "'"; + my $command; + if ($isSVN) { + $command = "$SVN stat $filesString"; + } elsif ($isGit) { + $command = "$GIT diff -r --name-status -M -C " . diffFromToString(); + $command .= " -- $filesString" unless $gitCommit; + } + + return "$command 2>&1"; +} + +sub createPatchCommand($) +{ + my ($changedFilesString) = @_; + + my $command; + if ($isSVN) { + $command = "'$FindBin::Bin/svn-create-patch' $changedFilesString"; + } elsif ($isGit) { + $command = "$GIT diff -M -C " . diffFromToString(); + $command .= " -- $changedFilesString" unless $gitCommit; + } + + return $command; +} + +sub diffHeaderFormat() +{ + return qr/^Index: (\S+)[\r\n]*$/ if $isSVN; + return qr/^diff --git a\/.+ b\/(.+)$/ if $isGit; +} + +sub findOriginalFileFromSvn($) +{ + my ($file) = @_; + my $baseUrl; + open INFO, "$SVN info . |" or die; + while (<INFO>) { + if (/^URL: (.+?)[\r\n]*$/) { + $baseUrl = $1; + } + } + close INFO; + my $sourceFile; + open INFO, "$SVN info '$file' |" or die; + while (<INFO>) { + if (/^Copied From URL: (.+?)[\r\n]*$/) { + $sourceFile = File::Spec->abs2rel($1, $baseUrl); + } + } + close INFO; + return $sourceFile; +} + +sub determinePropertyChanges($$$) +{ + my ($file, $isAdd, $original) = @_; + + my %changes; + if ($isAdd) { + my %addedProperties; + my %removedProperties; + open PROPLIST, "$SVN proplist '$file' |" or die; + while (<PROPLIST>) { + $addedProperties{$1} = 1 if /^ (.+?)[\r\n]*$/ && $1 ne 'svn:mergeinfo'; + } + close PROPLIST; + if ($original) { + open PROPLIST, "$SVN proplist '$original' |" or die; + while (<PROPLIST>) { + next unless /^ (.+?)[\r\n]*$/; + my $property = $1; + if (exists $addedProperties{$property}) { + delete $addedProperties{$1}; + } else { + $removedProperties{$1} = 1; + } + } + } + $changes{"A"} = [sort keys %addedProperties] if %addedProperties; + $changes{"D"} = [sort keys %removedProperties] if %removedProperties; + } else { + open DIFF, "$SVN diff '$file' |" or die; + while (<DIFF>) { + if (/^Property changes on:/) { + while (<DIFF>) { + my $operation; + my $property; + if (/^Added: (\S*)/) { + $operation = "A"; + $property = $1; + } elsif (/^Modified: (\S*)/) { + $operation = "M"; + $property = $1; + } elsif (/^Deleted: (\S*)/) { + $operation = "D"; + $property = $1; + } elsif (/^Name: (\S*)/) { + # Older versions of svn just say "Name" instead of the type + # of property change. + $operation = "C"; + $property = $1; + } + if ($operation) { + $changes{$operation} = [] unless exists $changes{$operation}; + push @{$changes{$operation}}, $property; + } + } + } + } + close DIFF; + } + return \%changes; +} + +sub pluralizeAndList($$@) +{ + my ($singular, $plural, @items) = @_; + + return if @items == 0; + return "$singular $items[0]" if @items == 1; + return "$plural " . join(", ", @items[0 .. $#items - 1]) . " and " . $items[-1]; +} + +sub generateFileList(\@\@\%) +{ + my ($changedFiles, $conflictFiles, $functionLists) = @_; + print STDERR " Running status to find changed, added, or removed files.\n"; + open STAT, "-|", statusCommand(keys %paths) or die "The status failed: $!.\n"; + while (<STAT>) { + my $status; + my $propertyStatus; + my $propertyChanges; + my $original; + my $file; + + if ($isSVN) { + my $matches; + if (isSVNVersion16OrNewer()) { + $matches = /^([ ACDMR])([ CM]).{5} (.+?)[\r\n]*$/; + $status = $1; + $propertyStatus = $2; + $file = $3; + } else { + $matches = /^([ ACDMR])([ CM]).{4} (.+?)[\r\n]*$/; + $status = $1; + $propertyStatus = $2; + $file = $3; + } + if ($matches) { + $file = normalizePath($file); + $original = findOriginalFileFromSvn($file) if substr($_, 3, 1) eq "+"; + my $isAdd = isAddedStatus($status); + $propertyChanges = determinePropertyChanges($file, $isAdd, $original) if isModifiedStatus($propertyStatus) || $isAdd; + } else { + print; # error output from svn stat + } + } elsif ($isGit) { + if (/^([ADM])\t(.+)$/) { + $status = $1; + $propertyStatus = " "; # git doesn't have properties + $file = normalizePath($2); + } elsif (/^([CR])[0-9]{1,3}\t([^\t]+)\t([^\t\n]+)$/) { # for example: R90% newfile oldfile + $status = $1; + $propertyStatus = " "; + $original = normalizePath($2); + $file = normalizePath($3); + } else { + print; # error output from git diff + } + } + + next if !$status || isUnmodifiedStatus($status) && isUnmodifiedStatus($propertyStatus); + + $file = makeFilePathRelative($file); + + if (isModifiedStatus($status) || isAddedStatus($status) || isModifiedStatus($propertyStatus)) { + my @components = File::Spec->splitdir($file); + if ($components[0] eq "LayoutTests") { + $didChangeRegressionTests = 1; + push @addedRegressionTests, $file + if isAddedStatus($status) + && $file =~ /\.([a-zA-Z]+)$/ + && $supportedTestExtensions{lc($1)} + && !scalar(grep(/^resources$/i, @components)) + && !scalar(grep(/^script-tests$/i, @components)); + } + push @{$changedFiles}, $file if $components[$#components] ne "ChangeLog"; + } elsif (isConflictStatus($status) || isConflictStatus($propertyStatus)) { + push @{$conflictFiles}, $file; + } + if (basename($file) ne "ChangeLog") { + my $description = statusDescription($status, $propertyStatus, $original, $propertyChanges); + $functionLists->{$file} = $description if defined $description; + } + } + close STAT; +} + +sub isUnmodifiedStatus($) +{ + my ($status) = @_; + + my %statusCodes = ( + " " => 1, + ); + + return $statusCodes{$status}; +} + +sub isModifiedStatus($) +{ + my ($status) = @_; + + my %statusCodes = ( + "M" => 1, + ); + + return $statusCodes{$status}; +} + +sub isAddedStatus($) +{ + my ($status) = @_; + + my %statusCodes = ( + "A" => 1, + "C" => $isGit, + "R" => 1, + ); + + return $statusCodes{$status}; +} + +sub isConflictStatus($) +{ + my ($status) = @_; + + my %svn = ( + "C" => 1, + ); + + my %git = ( + "U" => 1, + ); + + return 0 if ($gitCommit || $gitIndex); # an existing commit or staged change cannot have conflicts + return $svn{$status} if $isSVN; + return $git{$status} if $isGit; +} + +sub statusDescription($$$$) +{ + my ($status, $propertyStatus, $original, $propertyChanges) = @_; + + my $propertyDescription = defined $propertyChanges ? propertyChangeDescription($propertyChanges) : ""; + + my %svn = ( + "A" => defined $original ? " Copied from \%s." : " Added.", + "D" => " Removed.", + "M" => "", + "R" => defined $original ? " Replaced with \%s." : " Replaced.", + " " => "", + ); + + my %git = %svn; + $git{"A"} = " Added."; + $git{"C"} = " Copied from \%s."; + $git{"R"} = " Renamed from \%s."; + + my $description; + $description = sprintf($svn{$status}, $original) if $isSVN && exists $svn{$status}; + $description = sprintf($git{$status}, $original) if $isGit && exists $git{$status}; + return unless defined $description; + + $description .= $propertyDescription unless isAddedStatus($status); + return $description; +} + +sub propertyChangeDescription($) +{ + my ($propertyChanges) = @_; + + my %operations = ( + "A" => "Added", + "M" => "Modified", + "D" => "Removed", + "C" => "Changed", + ); + + my $description = ""; + while (my ($operation, $properties) = each %$propertyChanges) { + my $word = $operations{$operation}; + my $list = pluralizeAndList("property", "properties", @$properties); + $description .= " $word $list."; + } + return $description; +} + +sub extractLineRange($) +{ + my ($string) = @_; + + my ($start, $end) = (-1, -1); + + if ($isSVN && $string =~ /^\d+(,\d+)?[acd](\d+)(,(\d+))?/) { + $start = $2; + $end = $4 || $2; + } elsif ($isGit && $string =~ /^@@ -\d+(,\d+)? \+(\d+)(,(\d+))? @@/) { + $start = $2; + $end = defined($4) ? $4 + $2 - 1 : $2; + } + + return ($start, $end); +} + +sub firstDirectoryOrCwd() +{ + my $dir = "."; + my @dirs = keys(%paths); + + $dir = -d $dirs[0] ? $dirs[0] : dirname($dirs[0]) if @dirs; + + return $dir; +} + +sub testListForChangeLog(@) +{ + my (@tests) = @_; + + return "" unless @tests; + + my $leadString = " Test" . (@tests == 1 ? "" : "s") . ": "; + my $list = $leadString; + foreach my $i (0..$#tests) { + $list .= " " x length($leadString) if $i; + my $test = $tests[$i]; + $test =~ s/^LayoutTests\///; + $list .= "$test\n"; + } + $list .= "\n"; + + return $list; +} + +sub reviewerAndDescriptionForGitCommit($) +{ + my ($commit) = @_; + + my $description = ''; + my $reviewer; + + my @args = qw(rev-list --pretty); + push @args, '-1' if $commit !~ m/.+\.\..+/; + my $gitLog; + { + local $/ = undef; + open(GIT, "-|", $GIT, @args, $commit) || die; + $gitLog = <GIT>; + close(GIT); + } + + my @commitLogs = split(/^[Cc]ommit [a-f0-9]{40}/m, $gitLog); + shift @commitLogs; # Remove initial blank commit log + my $commitLogCount = 0; + foreach my $commitLog (@commitLogs) { + $description .= "\n" if $commitLogCount; + $commitLogCount++; + my $inHeader = 1; + my $commitLogIndent; + my @lines = split(/\n/, $commitLog); + shift @lines; # Remove initial blank line + foreach my $line (@lines) { + if ($inHeader) { + if (!$line) { + $inHeader = 0; + } + next; + } elsif ($line =~ /[Ss]igned-[Oo]ff-[Bb]y: (.+)/) { + if (!$reviewer) { + $reviewer = $1; + } else { + $reviewer .= ", " . $1; + } + } elsif ($line =~ /^\s*$/) { + $description = $description . "\n"; + } else { + if (!defined($commitLogIndent)) { + # Let the first line with non-white space determine + # the global indent. + $line =~ /^(\s*)\S/; + $commitLogIndent = length($1); + } + # Strip at most the indent to preserve relative indents. + $line =~ s/^\s{0,$commitLogIndent}//; + $description = $description . (" " x 8) . $line . "\n"; + } + } + } + if (!$reviewer) { + $reviewer = $gitReviewer; + } + + return ($reviewer, $description); +} + +sub normalizeLineEndings($$) +{ + my ($string, $endl) = @_; + $string =~ s/\r?\n/$endl/g; + return $string; +} + +sub decodeEntities($) +{ + my ($text) = @_; + $text =~ s/\</</g; + $text =~ s/\>/>/g; + $text =~ s/\"/\"/g; + $text =~ s/\'/\'/g; + $text =~ s/\&/\&/g; + return $text; +} diff --git a/Tools/Scripts/print-msvc-project-dependencies b/Tools/Scripts/print-msvc-project-dependencies new file mode 100755 index 0000000..dbc8402 --- /dev/null +++ b/Tools/Scripts/print-msvc-project-dependencies @@ -0,0 +1,143 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2008 Apple Inc. All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use File::Basename; + +sub printDependencyTree($); + +my $basename = basename($0); +@ARGV or die "Usage: $basename sln1 [sln2 sln3...]"; + +foreach my $sln (@ARGV) { + printDependencyTree($sln); +} + +exit; + +sub printDependencyTree($) +{ + my ($sln) = @_; + + unless (-f $sln) { + warn "Warning: Can't find $sln; skipping\n"; + return; + } + + unless (open SLN, "<", $sln) { + warn "Warning: Can't open $sln; skipping\n"; + return; + } + + my %projectsByUUID = (); + my $currentProject; + + my $state = "initial"; + foreach my $line (<SLN>) { + if ($state eq "initial") { + if ($line =~ /^Project\([^\)]+\) = "([^"]+)", "[^"]+", "([^"]+)"\r?$/) { + my $name = $1; + my $uuid = $2; + if (exists $projectsByUUID{$uuid}) { + warn "Warning: Project $name appears more than once in $sln; using first definition\n"; + next; + } + $currentProject = { + name => $name, + uuid => $uuid, + dependencies => {}, + }; + $projectsByUUID{$uuid} = $currentProject; + + $state = "inProject"; + } + + next; + } + + if ($state eq "inProject") { + defined($currentProject) or die; + + if ($line =~ /^\s*ProjectSection\(ProjectDependencies\) = postProject\r?$/) { + $state = "inDependencies"; + } elsif ($line =~ /^EndProject\r?$/) { + $currentProject = undef; + $state = "initial"; + } + + next; + } + + if ($state eq "inDependencies") { + defined($currentProject) or die; + + if ($line =~ /^\s*({[^}]+}) = ({[^}]+})\r?$/) { + my $uuid1 = $1; + my $uuid2 = $2; + if (exists $currentProject->{dependencies}->{$uuid1}) { + warn "Warning: UUID $uuid1 listed more than once as dependency of project ", $currentProject->{name}, "\n"; + next; + } + + $uuid1 eq $uuid2 or warn "Warning: UUIDs in depedency section of project ", $currentProject->{name}, " don't match: $uuid1 $uuid2; using first UUID\n"; + + $currentProject->{dependencies}->{$uuid1} = 1; + } elsif ($line =~ /^\s*EndProjectSection\r?$/) { + $state = "inProject"; + } + + next; + } + } + + close SLN or warn "Warning: Can't close $sln\n"; + + my %projectsNotDependedUpon = %projectsByUUID; + CANDIDATE: foreach my $candidateUUID (keys %projectsByUUID) { + foreach my $projectUUID (keys %projectsByUUID) { + next if $candidateUUID eq $projectUUID; + foreach my $dependencyUUID (keys %{$projectsByUUID{$projectUUID}->{dependencies}}) { + if ($candidateUUID eq $dependencyUUID) { + delete $projectsNotDependedUpon{$candidateUUID}; + next CANDIDATE; + } + } + } + } + + foreach my $project (values %projectsNotDependedUpon) { + printProjectAndDependencies($project, 0, \%projectsByUUID); + } +} + +sub printProjectAndDependencies +{ + my ($project, $indentLevel, $projectsByUUID) = @_; + + print " " x $indentLevel, $project->{name}, "\n"; + foreach my $dependencyUUID (keys %{$project->{dependencies}}) { + printProjectAndDependencies($projectsByUUID->{$dependencyUUID}, $indentLevel + 1, $projectsByUUID); + } +} diff --git a/Tools/Scripts/print-vse-failure-logs b/Tools/Scripts/print-vse-failure-logs new file mode 100755 index 0000000..7580465 --- /dev/null +++ b/Tools/Scripts/print-vse-failure-logs @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# This is a very simple script designed to crawl the build directory +# for visual studio express build logs and print them to stdout. + +from __future__ import with_statement + +import codecs +import os +import re + +from webkitpy.common.checkout import scm +from webkitpy.common.system.executive import Executive +from webkitpy.thirdparty import BeautifulSoup + + +class PrintVisualStudioExpressLogs(object): + def __init__(self): + self._executive = Executive() + + def _find_buildlogs(self, build_directory): + build_log_paths = [] + for dirpath, dirnames, filenames in os.walk(build_directory): + for file_name in filenames: + if file_name == "BuildLog.htm": + file_path = os.path.join(dirpath, file_name) + build_log_paths.append(file_path) + return build_log_paths + + def _build_order(self): + """Returns a list of project names in the order in which they are built.""" + script_path = os.path.join(self._scripts_directory(), "print-msvc-project-dependencies") + sln_path = os.path.join(scm.find_checkout_root(), "WebKit", "win", "WebKit.vcproj", "WebKit.sln") + lines = self._executive.run_command([script_path, sln_path]).splitlines() + order = [line.strip() for line in lines if line.find("Folder") == -1] + order.reverse() + return order + + def _sort_buildlogs(self, log_paths): + build_order = self._build_order() + def sort_key(log_path): + project_name = os.path.basename(os.path.dirname(os.path.dirname(log_path))) + try: + index = build_order.index(project_name) + except ValueError: + # If the project isn't in the list, sort it after all items that + # are in the list. + index = len(build_order) + # Sort first by build order, then by project name + return (index, project_name) + return sorted(log_paths, key=sort_key) + + def _obj_directory(self): + build_directory_script_path = os.path.join(self._scripts_directory(), "webkit-build-directory") + # FIXME: ports/webkit.py should provide the build directory in a nice API. + # NOTE: The windows VSE build does not seem to use different directories + # for Debug and Release. + build_directory = self._executive.run_command([build_directory_script_path, "--top-level"]).rstrip() + return os.path.join(build_directory, "obj") + + def _scripts_directory(self): + return os.path.dirname(__file__) + + def _relevant_text(self, log): + soup = BeautifulSoup.BeautifulSoup(log) + # The Output Window table is where the useful output starts in the build log. + output_window_table = soup.find(text=re.compile("Output Window")).findParent("table") + result = [] + for table in [output_window_table] + output_window_table.findNextSiblings("table"): + result.extend([text.replace(" ", "") for text in table.findAll(text=True)]) + result.append("\n") + return "".join(result) + + def main(self): + build_log_paths = self._sort_buildlogs(self._find_buildlogs(self._obj_directory())) + + print "Found %s Visual Studio Express Build Logs:\n%s" % (len(build_log_paths), "\n".join(build_log_paths)) + + for build_log_path in build_log_paths: + print "%s:\n" % build_log_path + with codecs.open(build_log_path, "r", "utf-16") as build_log: + print self._relevant_text(build_log) + + +if __name__ == '__main__': + PrintVisualStudioExpressLogs().main() diff --git a/Tools/Scripts/rebaseline-chromium-webkit-tests b/Tools/Scripts/rebaseline-chromium-webkit-tests new file mode 100755 index 0000000..8d14b86 --- /dev/null +++ b/Tools/Scripts/rebaseline-chromium-webkit-tests @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Wrapper around webkitpy/layout_tests/rebaseline.py""" +import os +import sys + +scripts_directory = os.path.dirname(os.path.abspath(sys.argv[0])) +webkitpy_directory = os.path.join(scripts_directory, "webkitpy") +sys.path.append(os.path.join(webkitpy_directory, "layout_tests")) + +# For simplejson +sys.path.append(os.path.join(webkitpy_directory, "thirdparty")) + +import rebaseline_chromium_webkit_tests + +if __name__ == '__main__': + rebaseline_chromium_webkit_tests.main() diff --git a/Tools/Scripts/report-include-statistics b/Tools/Scripts/report-include-statistics new file mode 100755 index 0000000..17152ab --- /dev/null +++ b/Tools/Scripts/report-include-statistics @@ -0,0 +1,114 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# "report-include-statistics" script for WebKit Open Source Project + +use strict; +use File::Find; + +find(\&wanted, @ARGV ? @ARGV : "."); + +my %paths; +my %sources; +my %includes; + +sub wanted +{ + my $file = $_; + + if ($file eq "icu") { + $File::Find::prune = 1; + return; + } + + if ($file !~ /^\./ && $file =~ /\.(h|cpp|c|mm|m)$/) { + $paths{$file} = $File::Find::name; + $sources{$file} = $File::Find::name if $file !~ /\.h/; + open FILE, $file or die; + while (<FILE>) { + if (m-^\s*#\s*(include|import)\s+["<]((\S+/)*)(\S+)[">]-) { + my $include = ($2 eq "sys/" ? $2 : "") . $4; + $includes{$file}{$include}++; + } + } + close FILE; + } +} + +my %totalIncludes; + +sub fillOut +{ + my ($file) = @_; + + return if defined $totalIncludes{$file}; + + for my $include (keys %{ $includes{$file} }) { + $totalIncludes{$file}{$include} = 1; + fillOut($include); + for my $i (keys %{ $totalIncludes{$include} }) { + $totalIncludes{$file}{$i} = 1; + } + } +} + +my %inclusionCounts; +for my $file (keys %includes) { + $inclusionCounts{$file} = 0; + fillOut($file); +} + +for my $file (keys %sources) { + for my $include (keys %{ $totalIncludes{$file} }) { + $inclusionCounts{$include}++; + } +} + +for my $file (sort mostincludedcmp keys %includes) { + next if !$paths{$file}; + my $count = $inclusionCounts{$file}; + my $numIncludes = keys %{ $includes{$file} }; + my $numTotalIncludes = keys %{ $totalIncludes{$file} }; + print "$file is included $count times, includes $numIncludes files directly, $numTotalIncludes files total.\n" +} + +# Sort most-included files first. +sub mostincludedcmp($$) +{ + my ($filea, $fileb) = @_; + + my $counta = $inclusionCounts{$filea} || 0; + my $countb = $inclusionCounts{$fileb} || 0; + return $countb <=> $counta if $counta != $countb; + + my $ta = keys %{ $totalIncludes{$filea} }; + my $tb = keys %{ $totalIncludes{$fileb} }; + return $ta <=> $tb if $ta != $tb; + + return $filea cmp $fileb; +} diff --git a/Tools/Scripts/resolve-ChangeLogs b/Tools/Scripts/resolve-ChangeLogs new file mode 100755 index 0000000..6635711 --- /dev/null +++ b/Tools/Scripts/resolve-ChangeLogs @@ -0,0 +1,488 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2007, 2008, 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Merge and resolve ChangeLog conflicts for svn and git repositories + +use strict; + +use FindBin; +use lib $FindBin::Bin; + +use File::Basename; +use File::Copy; +use File::Path; +use File::Spec; +use Getopt::Long; +use POSIX; +use VCSUtils; + +sub canonicalRelativePath($); +sub conflictFiles($); +sub findChangeLog($); +sub findUnmergedChangeLogs(); +sub fixMergedChangeLogs($;@); +sub fixOneMergedChangeLog($); +sub hasGitUnmergedFiles(); +sub isInGitFilterBranch(); +sub parseFixMerged($$;$); +sub removeChangeLogArguments($); +sub resolveChangeLog($); +sub resolveConflict($); +sub showStatus($;$); +sub usageAndExit(); + +my $isGit = isGit(); +my $isSVN = isSVN(); + +my $SVN = "svn"; +my $GIT = "git"; + +my $fixMerged; +my $gitRebaseContinue = 0; +my $mergeDriver = 0; +my $printWarnings = 1; +my $showHelp; + +my $getOptionsResult = GetOptions( + 'c|continue!' => \$gitRebaseContinue, + 'f|fix-merged:s' => \&parseFixMerged, + 'm|merge-driver!' => \$mergeDriver, + 'h|help' => \$showHelp, + 'w|warnings!' => \$printWarnings, +); + +my $relativePath = isInGitFilterBranch() ? '.' : chdirReturningRelativePath(determineVCSRoot()); + +my @changeLogFiles = removeChangeLogArguments($relativePath); + +if (!defined $fixMerged && !$mergeDriver && scalar(@changeLogFiles) == 0) { + @changeLogFiles = findUnmergedChangeLogs(); +} + +if (!$mergeDriver && scalar(@ARGV) > 0) { + print STDERR "ERROR: Files listed on command-line that are not ChangeLogs.\n"; + undef $getOptionsResult; +} elsif (!defined $fixMerged && !$mergeDriver && scalar(@changeLogFiles) == 0) { + print STDERR "ERROR: No ChangeLog files listed on command-line or found unmerged.\n"; + undef $getOptionsResult; +} elsif ($gitRebaseContinue && !$isGit) { + print STDERR "ERROR: --continue may only be used with a git repository\n"; + undef $getOptionsResult; +} elsif (defined $fixMerged && !$isGit) { + print STDERR "ERROR: --fix-merged may only be used with a git repository\n"; + undef $getOptionsResult; +} elsif ($mergeDriver && !$isGit) { + print STDERR "ERROR: --merge-driver may only be used with a git repository\n"; + undef $getOptionsResult; +} elsif ($mergeDriver && scalar(@ARGV) < 3) { + print STDERR "ERROR: --merge-driver expects %O %A %B as arguments\n"; + undef $getOptionsResult; +} + +sub usageAndExit() +{ + print STDERR <<__END__; +Usage: @{[ basename($0) ]} [options] [path/to/ChangeLog] [path/to/another/ChangeLog ...] + -c|--[no-]continue run "git rebase --continue" after fixing ChangeLog + entries (default: --no-continue) + -f|--fix-merged [revision-range] fix git-merged ChangeLog entries; if a revision-range + is specified, run git filter-branch on the range + -m|--merge-driver %O %A %B act as a git merge-driver on files %O %A %B + -h|--help show this help message + -w|--[no-]warnings show or suppress warnings (default: show warnings) +__END__ + exit 1; +} + +if (!$getOptionsResult || $showHelp) { + usageAndExit(); +} + +if (defined $fixMerged && length($fixMerged) > 0) { + my $commitRange = $fixMerged; + $commitRange = $commitRange . "..HEAD" if index($commitRange, "..") < 0; + fixMergedChangeLogs($commitRange, @changeLogFiles); +} elsif ($mergeDriver) { + my ($base, $theirs, $ours) = @ARGV; + if (mergeChangeLogs($ours, $base, $theirs)) { + unlink($ours); + copy($theirs, $ours) or die $!; + } else { + exec qw(git merge-file -L THEIRS -L BASE -L OURS), $theirs, $base, $ours; + } +} elsif (@changeLogFiles) { + for my $file (@changeLogFiles) { + if (defined $fixMerged) { + fixOneMergedChangeLog($file); + } else { + resolveChangeLog($file); + } + } +} else { + print STDERR "ERROR: Unknown combination of switches and arguments.\n"; + usageAndExit(); +} + +if ($gitRebaseContinue) { + if (hasGitUnmergedFiles()) { + print "Unmerged files; skipping '$GIT rebase --continue'.\n"; + } else { + print "Running '$GIT rebase --continue'...\n"; + print `$GIT rebase --continue`; + } +} + +exit 0; + +sub canonicalRelativePath($) +{ + my ($originalPath) = @_; + my $absolutePath = Cwd::abs_path($originalPath); + return File::Spec->abs2rel($absolutePath, Cwd::getcwd()); +} + +sub conflictFiles($) +{ + my ($file) = @_; + my $fileMine; + my $fileOlder; + my $fileNewer; + + if (-e $file && -e "$file.orig" && -e "$file.rej") { + return ("$file.rej", "$file.orig", $file); + } + + if ($isSVN) { + open STAT, "-|", $SVN, "status", $file or die $!; + my $status = <STAT>; + close STAT; + if (!$status || $status !~ m/^C\s+/) { + print STDERR "WARNING: ${file} is not in a conflicted state.\n" if $printWarnings; + return (); + } + + $fileMine = "${file}.mine" if -e "${file}.mine"; + + my $currentRevision; + open INFO, "-|", $SVN, "info", $file or die $!; + while (my $line = <INFO>) { + if ($line =~ m/^Revision: ([0-9]+)/) { + $currentRevision = $1; + { local $/ = undef; <INFO>; } # Consume rest of input. + } + } + close INFO; + $fileNewer = "${file}.r${currentRevision}" if -e "${file}.r${currentRevision}"; + + my @matchingFiles = grep { $_ ne $fileNewer } glob("${file}.r[0-9][0-9]*"); + if (scalar(@matchingFiles) > 1) { + print STDERR "WARNING: Too many conflict files exist for ${file}!\n" if $printWarnings; + } else { + $fileOlder = shift @matchingFiles; + } + } elsif ($isGit) { + my $gitPrefix = `$GIT rev-parse --show-prefix`; + chomp $gitPrefix; + open GIT, "-|", $GIT, "ls-files", "--unmerged", $file or die $!; + while (my $line = <GIT>) { + my ($mode, $hash, $stage, $fileName) = split(' ', $line); + my $outputFile; + if ($stage == 1) { + $fileOlder = "${file}.BASE.$$"; + $outputFile = $fileOlder; + } elsif ($stage == 2) { + $fileNewer = "${file}.LOCAL.$$"; + $outputFile = $fileNewer; + } elsif ($stage == 3) { + $fileMine = "${file}.REMOTE.$$"; + $outputFile = $fileMine; + } else { + die "Unknown file stage: $stage"; + } + system("$GIT cat-file blob :${stage}:${gitPrefix}${file} > $outputFile"); + die $! if WEXITSTATUS($?); + } + close GIT or die $!; + } else { + die "Unknown version control system"; + } + + if (!$fileMine && !$fileOlder && !$fileNewer) { + print STDERR "WARNING: ${file} does not need merging.\n" if $printWarnings; + } elsif (!$fileMine || !$fileOlder || !$fileNewer) { + print STDERR "WARNING: ${file} is missing some conflict files.\n" if $printWarnings; + } + + return ($fileMine, $fileOlder, $fileNewer); +} + +sub findChangeLog($) +{ + return $_[0] if basename($_[0]) eq "ChangeLog"; + + my $file = File::Spec->catfile($_[0], "ChangeLog"); + return $file if -d $_[0] and -e $file; + + return undef; +} + +sub findUnmergedChangeLogs() +{ + my $statCommand = ""; + + if ($isSVN) { + $statCommand = "$SVN stat | grep '^C'"; + } elsif ($isGit) { + $statCommand = "$GIT diff -r --name-status --diff-filter=U -C -C -M"; + } else { + return (); + } + + my @results = (); + open STAT, "-|", $statCommand or die "The status failed: $!.\n"; + while (<STAT>) { + if ($isSVN) { + my $matches; + my $file; + if (isSVNVersion16OrNewer()) { + $matches = /^([C]).{6} (.+?)[\r\n]*$/; + $file = $2; + } else { + $matches = /^([C]).{5} (.+?)[\r\n]*$/; + $file = $2; + } + if ($matches) { + $file = findChangeLog(normalizePath($file)); + push @results, $file if $file; + } else { + print; # error output from svn stat + } + } elsif ($isGit) { + if (/^([U])\t(.+)$/) { + my $file = findChangeLog(normalizePath($2)); + push @results, $file if $file; + } else { + print; # error output from git diff + } + } + } + close STAT; + + return @results; +} + +sub fixMergedChangeLogs($;@) +{ + my $revisionRange = shift; + my @changedFiles = @_; + + if (scalar(@changedFiles) < 1) { + # Read in list of files changed in $revisionRange + open GIT, "-|", $GIT, "diff", "--name-only", $revisionRange or die $!; + push @changedFiles, <GIT>; + close GIT or die $!; + die "No changed files in $revisionRange" if scalar(@changedFiles) < 1; + chomp @changedFiles; + } + + my @changeLogs = grep { defined $_ } map { findChangeLog($_) } @changedFiles; + die "No changed ChangeLog files in $revisionRange" if scalar(@changeLogs) < 1; + + system("$GIT filter-branch --tree-filter 'PREVIOUS_COMMIT=\`$GIT rev-parse \$GIT_COMMIT^\` && MAPPED_PREVIOUS_COMMIT=\`map \$PREVIOUS_COMMIT\` \"$0\" -f \"" . join('" "', @changeLogs) . "\"' $revisionRange"); + + # On success, remove the backup refs directory + if (WEXITSTATUS($?) == 0) { + rmtree(qw(.git/refs/original)); + } +} + +sub fixOneMergedChangeLog($) +{ + my $file = shift; + my $patch; + + # Read in patch for incorrectly merged ChangeLog entry + { + local $/ = undef; + open GIT, "-|", $GIT, "diff", ($ENV{GIT_COMMIT} || "HEAD") . "^", $file or die $!; + $patch = <GIT>; + close GIT or die $!; + } + + # Always checkout the previous commit's copy of the ChangeLog + system($GIT, "checkout", $ENV{MAPPED_PREVIOUS_COMMIT} || "HEAD^", $file); + die $! if WEXITSTATUS($?); + + # The patch must have 0 or more lines of context, then 1 or more lines + # of additions, and then 1 or more lines of context. If not, we skip it. + if ($patch =~ /\n@@ -(\d+),(\d+) \+(\d+),(\d+) @@\n( .*\n)*((\+.*\n)+)( .*\n)+$/m) { + # Copy the header from the original patch. + my $newPatch = substr($patch, 0, index($patch, "@@ -${1},${2} +${3},${4} @@")); + + # Generate a new set of line numbers and patch lengths. Our new + # patch will start with the lines for the fixed ChangeLog entry, + # then have 3 lines of context from the top of the current file to + # make the patch apply cleanly. + $newPatch .= "@@ -1,3 +1," . ($4 - $2 + 3) . " @@\n"; + + # We assume that top few lines of the ChangeLog entry are actually + # at the bottom of the list of added lines (due to the way the patch + # algorithm works), so we simply search through the lines until we + # find the date line, then move the rest of the lines to the top. + my @patchLines = map { $_ . "\n" } split(/\n/, $6); + foreach my $i (0 .. $#patchLines) { + if ($patchLines[$i] =~ /^\+\d{4}-\d{2}-\d{2} /) { + unshift(@patchLines, splice(@patchLines, $i, scalar(@patchLines) - $i)); + last; + } + } + + $newPatch .= join("", @patchLines); + + # Add 3 lines of context to the end + open FILE, "<", $file or die $!; + for (my $i = 0; $i < 3; $i++) { + $newPatch .= " " . <FILE>; + } + close FILE; + + # Apply the new patch + open(PATCH, "| patch -p1 $file > " . File::Spec->devnull()) or die $!; + print PATCH $newPatch; + close(PATCH) or die $!; + + # Run "git add" on the fixed ChangeLog file + system($GIT, "add", $file); + die $! if WEXITSTATUS($?); + + showStatus($file, 1); + } elsif ($patch) { + # Restore the current copy of the ChangeLog file since we can't repatch it + system($GIT, "checkout", $ENV{GIT_COMMIT} || "HEAD", $file); + die $! if WEXITSTATUS($?); + print STDERR "WARNING: Last change to ${file} could not be fixed and re-merged.\n" if $printWarnings; + } +} + +sub hasGitUnmergedFiles() +{ + my $output = `$GIT ls-files --unmerged`; + return $output ne ""; +} + +sub isInGitFilterBranch() +{ + return exists $ENV{MAPPED_PREVIOUS_COMMIT} && $ENV{MAPPED_PREVIOUS_COMMIT}; +} + +sub parseFixMerged($$;$) +{ + my ($switchName, $key, $value) = @_; + if (defined $key) { + if (defined findChangeLog($key)) { + unshift(@ARGV, $key); + $fixMerged = ""; + } else { + $fixMerged = $key; + } + } else { + $fixMerged = ""; + } +} + +sub removeChangeLogArguments($) +{ + my ($baseDir) = @_; + my @results = (); + + for (my $i = 0; $i < scalar(@ARGV); ) { + my $file = findChangeLog(canonicalRelativePath(File::Spec->catfile($baseDir, $ARGV[$i]))); + if (defined $file) { + splice(@ARGV, $i, 1); + push @results, $file; + } else { + $i++; + } + } + + return @results; +} + +sub resolveChangeLog($) +{ + my ($file) = @_; + + my ($fileMine, $fileOlder, $fileNewer) = conflictFiles($file); + + return unless $fileMine && $fileOlder && $fileNewer; + + if (mergeChangeLogs($fileMine, $fileOlder, $fileNewer)) { + if ($file ne $fileNewer) { + unlink($file); + rename($fileNewer, $file) or die $!; + } + unlink($fileMine, $fileOlder); + resolveConflict($file); + showStatus($file, 1); + } else { + showStatus($file); + print STDERR "WARNING: ${file} could not be merged using fuzz level 3.\n" if $printWarnings; + unlink($fileMine, $fileOlder, $fileNewer) if $isGit; + } +} + +sub resolveConflict($) +{ + my ($file) = @_; + + if ($isSVN) { + system($SVN, "resolved", $file); + die $! if WEXITSTATUS($?); + } elsif ($isGit) { + system($GIT, "add", $file); + die $! if WEXITSTATUS($?); + } else { + die "Unknown version control system"; + } +} + +sub showStatus($;$) +{ + my ($file, $isConflictResolved) = @_; + + if ($isSVN) { + system($SVN, "status", $file); + } elsif ($isGit) { + my @args = qw(--name-status); + unshift @args, qw(--cached) if $isConflictResolved; + system($GIT, "diff", @args, $file); + } else { + die "Unknown version control system"; + } +} + diff --git a/Tools/Scripts/roll-over-ChangeLogs b/Tools/Scripts/roll-over-ChangeLogs new file mode 100755 index 0000000..7e6d32f --- /dev/null +++ b/Tools/Scripts/roll-over-ChangeLogs @@ -0,0 +1,47 @@ +#!/usr/bin/env ruby + +# Copyright (C) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +require 'date' + +CHANGELOG_SIZE_THRESHOLD = 750 * 1024 + +scripts_directory = File.dirname(__FILE__) +base_directory = File.expand_path(ARGV[0] || `perl -I#{scripts_directory} -Mwebkitdirs -e 'print sourceDir();'`) + +date_suffix = Date.today.strftime("-%Y-%m-%d") + +Dir.chdir base_directory +`find . -type f -name 'ChangeLog'`.split.each do |path| + next unless File.stat(path).size > CHANGELOG_SIZE_THRESHOLD + + old_path = "#{path}#{date_suffix}" + puts "Moving #{path} to #{old_path}..." + system "git", "mv", path, old_path + File.open path, "w" do |file| + file.write "== Rolled over to ChangeLog#{date_suffix} ==\n" + end + system "git", "add", path +end diff --git a/Tools/Scripts/run-api-tests b/Tools/Scripts/run-api-tests new file mode 100755 index 0000000..29430a8 --- /dev/null +++ b/Tools/Scripts/run-api-tests @@ -0,0 +1,246 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS +# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. + +# Features to add: +# - Command line option to run a single test. +# - Command line option to run all tests in a suite. + +use strict; +use warnings; + +use File::Basename; +use FindBin; +use Getopt::Long qw(:config pass_through); +use IPC::Open3; +use lib $FindBin::Bin; +use webkitdirs; +use Term::ANSIColor qw(:constants); + +sub dumpAllTests(); +sub runAllTests(); +sub runAllTestsInSuite($); +sub runTest($$); +sub populateTests(); +sub buildTestTool(); + +my $showHelp = 0; +my $quiet = 0; +my $dump = 0; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] + --help Show this help message + -q|--quite Less verbose output + -d|--dump-tests Dump the names of testcases without running them +EOF + +GetOptions( + 'help' => \$showHelp, + 'quiet|q' => \$quiet, + 'dump|d' => \$dump, +); + +if ($showHelp) { + print STDERR $usage; + exit 1; +} + +setConfiguration(); +buildTestTool(); +setPathForRunningWebKitApp(\%ENV); +my %testsToRun = populateTests(); + +if ($dump) { + dumpAllTests(); + exit 0; +} + +runAllTests(); + +sub dumpAllTests() +{ + print "Dumping test cases\n"; + print "------------------\n"; + for my $suite (keys %testsToRun) { + print $suite . ":\n"; + print map { " " . $_ . "\n" } @{ $testsToRun{$suite} }; + } + print "------------------\n"; +} + +sub runAllTests() +{ + my $anyFailures = 0; + for my $suite (keys %testsToRun) { + my $failed = runAllTestsInSuite($suite); + if ($failed) { + $anyFailures = 1; + } + } + return $anyFailures; +} + +sub runAllTestsInSuite($) +{ + my ($suite) = @_; + print "Suite: $suite\n"; + + my $anyFailures = 0; + for my $test (@{$testsToRun{$suite}}) { + my $failed = runTest($suite, $test); + if ($failed) { + $anyFailures = 1; + } + } + + return $anyFailures; +} + +sub runTest($$) +{ + my ($suite, $testName) = @_; + my $test = $suite . "/" . $testName; + + print " Test: $testName -> "; + + my $result = 0; + if (isAppleMacWebKit()) { + my $productDir = productDir(); + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES"; + my $apiTesterPath = "$productDir/TestWebKitAPI"; + if (architecture()) { + $result = system "arch", "-" . architecture(), $apiTesterPath, $test, @ARGV; + } else { + $result = system $apiTesterPath, $test, @ARGV; + } + } elsif (isAppleWinWebKit()) { + my $apiTesterNameSuffix; + if (configurationForVisualStudio() ne "Debug_All") { + $apiTesterNameSuffix = ""; + } else { + $apiTesterNameSuffix = "_debug"; + } + my $apiTesterPath = File::Spec->catfile(productDir(), "TestWebKitAPI$apiTesterNameSuffix.exe"); + $result = system $apiTesterPath, $test, @ARGV; + } else { + die "run-api-tests is not supported on this platform.\n" + } + + if ($result == 0) { + print BOLD GREEN, "Passed", RESET, "\n"; + } else { + print BOLD RED, "Failed", RESET, "\n"; + } +} + + +sub populateTests() +{ + my @tests; + + if (isAppleMacWebKit()) { + my $productDir = productDir(); + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES"; + my $apiTesterPath = "$productDir/TestWebKitAPI"; + + my ($pid, $childIn, $childOut); + if (architecture()) { + $pid = open3($childIn, $childOut, ">&STDERR", "arch", "-" . architecture(), $apiTesterPath, "--dump-tests") or die "Failed to build list of tests!"; + } else { + $pid = open3($childIn, $childOut, ">&STDERR", $apiTesterPath, "--dump-tests") or die "Failed to build list of tests!"; + } + close($childIn); + @tests = <$childOut>; + close($childOut); + + waitpid($pid, 0); + my $result = $?; + + if ($result) { + print STDERR "Failed to build list of tests!\n"; + exit exitStatus($result); + } + } elsif (isAppleWinWebKit()) { + my $apiTesterNameSuffix; + if (configurationForVisualStudio() ne "Debug_All") { + $apiTesterNameSuffix = ""; + } else { + $apiTesterNameSuffix = "_debug"; + } + my $apiTesterPath = File::Spec->catfile(productDir(), "TestWebKitAPI$apiTesterNameSuffix.exe"); + open(TESTS, "-|", $apiTesterPath, "--dump-tests") or die $!; + @tests = <TESTS>; + close(TESTS) or die $!; + } else { + die "run-api-tests is not supported on this platform.\n" + } + + my %keyedTests = (); + for my $test (@tests) { + $test =~ s/[\r\n]*$//; + my ($suite, $testName) = split(/\//, $test); + push @{$keyedTests{$suite}}, $testName; + } + + return %keyedTests; +} + +sub buildTestTool() +{ + chdirWebKit(); + + my $buildTestTool = "build-api-tests"; + print STDERR "Running $buildTestTool\n"; + + local *DEVNULL; + my ($childIn, $childOut, $childErr); + if ($quiet) { + open(DEVNULL, ">", File::Spec->devnull()) or die "Failed to open /dev/null"; + $childOut = ">&DEVNULL"; + $childErr = ">&DEVNULL"; + } else { + # When not quiet, let the child use our stdout/stderr. + $childOut = ">&STDOUT"; + $childErr = ">&STDERR"; + } + + my @args = argumentsForConfiguration(); + my $buildProcess = open3($childIn, $childOut, $childErr, "Tools/Scripts/$buildTestTool", @args) or die "Failed to run " . $buildTestTool; + close($childIn); + waitpid $buildProcess, 0; + my $buildResult = $?; + close($childOut); + close($childErr); + + close DEVNULL if ($quiet); + + if ($buildResult) { + print STDERR "Compiling TestWebKitAPI failed!\n"; + exit exitStatus($buildResult); + } +} diff --git a/Tools/Scripts/run-bindings-tests b/Tools/Scripts/run-bindings-tests new file mode 100755 index 0000000..4a093d1 --- /dev/null +++ b/Tools/Scripts/run-bindings-tests @@ -0,0 +1,137 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +# This script generates h and cpp file for TestObj.idl using the V8 code +# generator. Please execute the script whenever changes are made to +# CodeGeneratorV8.pm, and submit the changes in V8TestObj.h/cpp in the same +# patch. This makes it easier to track and review changes in generated code. +# To execute, invoke: 'python run_tests.py' + +import os +import os.path +import subprocess +import sys +import tempfile +from webkitpy.common.checkout import scm + + +def generate_from_idl(generator, idl_file, output_directory): + cmd = ['perl', '-w', + '-IWebCore/bindings/scripts', + 'WebCore/bindings/scripts/generate-bindings.pl', + # idl include directories (path relative to generate-bindings.pl) + '--include', '.', + '--defines', 'TESTING_%s' % generator, + '--generator', generator, + '--outputDir', output_directory, + idl_file] + return subprocess.call(cmd) == 0 + + +def detect_changes(work_directory, reference_directory): + changes_found = False + for output_file in os.listdir(work_directory): + print 'Detecting changes in %s...' % output_file + cmd = ['diff', + '-u', + os.path.join(reference_directory, output_file), + os.path.join(work_directory, output_file)] + if subprocess.call(cmd) != 0: + print 'Detected changes in %s (see above)' % output_file + changes_found = True + else: + print 'No changes found.' + + return changes_found + + +def run_tests(generator, input_directory, reference_directory, reset_results): + work_directory = reference_directory + + passed = True + for input_file in os.listdir(input_directory): + (name, extension) = os.path.splitext(input_file) + if extension != '.idl': + continue + print 'Testing the %s generator on %s' % (generator, input_file) + # Generate output into the work directory (either the given one or a + # temp one if not reset_results is performed) + if not reset_results: + work_directory = tempfile.mkdtemp() + if not generate_from_idl(generator, os.path.join(input_directory, + input_file), + work_directory): + passed = False + if reset_results: + print "Overwrote reference files" + continue + # Detect changes + if detect_changes(work_directory, reference_directory): + passed = False + + if not passed: + print '%s generator failed.' % generator + return passed + + +def main(argv): + """Runs WebCore bindings code generators on test IDL files and compares + the results with reference files. + + Options: + --reset-results: Overwrites the reference files with the generated results. + + """ + reset_results = "--reset-results" in argv + + current_scm = scm.detect_scm_system(os.curdir) + os.chdir(current_scm.checkout_root) + + all_tests_passed = True + + generators = [ + 'JS', + 'V8', + 'ObjC', + 'GObject', + 'CPP' + ] + + for generator in generators: + input_directory = os.path.join('WebCore', 'bindings', 'scripts', 'test') + reference_directory = os.path.join('WebCore', 'bindings', 'scripts', 'test', generator) + if not run_tests(generator, input_directory, reference_directory, reset_results): + all_tests_passed = False + + if all_tests_passed: + print 'All tests passed!' + return 0 + else: + print '(To update the reference files, execute "run-bindings-tests --reset-results")' + return -1 + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/Tools/Scripts/run-chromium-webkit-unit-tests b/Tools/Scripts/run-chromium-webkit-unit-tests new file mode 100755 index 0000000..62646af --- /dev/null +++ b/Tools/Scripts/run-chromium-webkit-unit-tests @@ -0,0 +1,51 @@ +#!/usr/bin/perl -w +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use File::Spec; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +# Allow running this script from any directory. +my $sourceRootDir = File::Spec->catfile($FindBin::Bin, "../.."); +chdir($sourceRootDir); + +setConfiguration(); + +my $pathToBinary; +if (isDarwin()) { + $pathToBinary = "WebKit/chromium/xcodebuild/" . configuration() . "/webkit_unit_tests"; +} elsif (isCygwin() || isWindows()) { + $pathToBinary = "WebKit/chromium/" . configuration() . "/webkit_unit_tests.exe"; +} elsif (isLinux()) { + $pathToBinary = "out/" . configuration() . "/webkit_unit_tests"; +} + +exit system ($pathToBinary, @ARGV); diff --git a/Tools/Scripts/run-gtk-tests b/Tools/Scripts/run-gtk-tests new file mode 100644 index 0000000..9a57319 --- /dev/null +++ b/Tools/Scripts/run-gtk-tests @@ -0,0 +1,35 @@ +#!/usr/bin/perl +# +# Copyright (C) 2009 Gustavo Noronha Silva <gns@gnome.org> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Library General Public +# License as published by the Free Software Foundation; either +# version 2 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Library General Public License for more details. +# +# You should have received a copy of the GNU Library General Public License +# along with this library; see the file COPYING.LIB. If not, write to +# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +# Boston, MA 02110-1301, USA. + +use strict; +use warnings; + +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +# This initializes the correct configuration (Release/Debug) +setConfiguration(); + +my $productDir = productDir(); +my @unitTests = glob $productDir . "/Programs/unittests/*"; +if ($#unitTests < 1) { + die "ERROR: tests not found in $productDir.\n"; +} +system "gtester -k @unitTests" diff --git a/Tools/Scripts/run-iexploder-tests b/Tools/Scripts/run-iexploder-tests new file mode 100755 index 0000000..5d7ae55 --- /dev/null +++ b/Tools/Scripts/run-iexploder-tests @@ -0,0 +1,143 @@ +#!/usr/bin/perl + +# Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# A script to semi-automatically run iExploder tests. + +use strict; +use warnings; + +use Cwd; +use File::Spec; +use FindBin; +use Getopt::Long; +use IPC::Open2; + +use lib $FindBin::Bin; +use webkitperl::httpd; +use webkitdirs; + +sub configureAndOpenHTTPDIfNeeded(); +sub runSafariWithIExploder(); + +# Argument handling +my $guardMalloc = ''; +my $httpdPort = 8000; +my $downloadTest; +my $iExploderTestDirectory = "/tmp/iExploderTest"; + +GetOptions( + 'guard-malloc|g' => \$guardMalloc, + 'get=s' => \$downloadTest, + 'port=i' => \$httpdPort +); + + +setConfiguration(); +my $productDir = productDir(); + +chdirWebKit(); + +checkFrameworks(); + +my $isHttpdOpen = 0; +configureAndOpenHTTPDIfNeeded(); + +if ($downloadTest) { + system "/usr/bin/curl -o ~/Desktop/iexploder$downloadTest.html \"http://127.0.0.1:$httpdPort/iexploder.cgi?lookup=1&test=$downloadTest\""; + print "Saved the test as iexploder$downloadTest.html on the desktop\n"; +} else { + runSafariWithIExploder(); + print "Last generated tests:\n"; + system "grep 'iexploder.cgi' $iExploderTestDirectory/access_log.txt | tail -n -5 | awk -F'[ =&\\?]' '{if (\$8 == \"lookup\") print \$11; else print \$9}'"; +} + +rmtree $iExploderTestDirectory; +$isHttpdOpen = !closeHTTPD(); + +sub runSafariWithIExploder() +{ + my $redirectTo; + if (@ARGV) { + $redirectTo = "http://127.0.0.1:$httpdPort/iexploder.cgi?lookup=1&test=$ARGV[0]"; + } else { + $redirectTo = "http://127.0.0.1:$httpdPort/index.html"; + } + + open REDIRECT_HTML, ">", "$iExploderTestDirectory/redirect.html" or die; + print REDIRECT_HTML "<html>\n"; + print REDIRECT_HTML " <head>\n"; + print REDIRECT_HTML " <meta http-equiv=\"refresh\" content=\"1;URL=$redirectTo\" />\n"; + print REDIRECT_HTML " <script type=\"text/javascript\">\n"; + print REDIRECT_HTML " document.location = \"$redirectTo\";\n"; + print REDIRECT_HTML " </script>\n"; + print REDIRECT_HTML " </head>\n"; + print REDIRECT_HTML " <body>\n"; + print REDIRECT_HTML " </body>\n"; + print REDIRECT_HTML "</html>\n"; + close REDIRECT_HTML; + + if (!isAppleWebKit()) { + system "Tools/Scripts/run-launcher", "$iExploderTestDirectory/redirect.html"; + } else { + local %ENV; + $ENV{DYLD_INSERT_LIBRARIES} = "/usr/lib/libgmalloc.dylib" if $guardMalloc; + system "Tools/Scripts/run-safari", "-NSOpen", "$iExploderTestDirectory/redirect.html"; + } +} + +sub configureAndOpenHTTPDIfNeeded() +{ + return if $isHttpdOpen; + mkdir $iExploderTestDirectory; + my $httpdPath = getHTTPDPath(); + my $webkitDirectory = getcwd(); + my $testDirectory = $webkitDirectory . "/LayoutTests"; + my $iExploderDirectory = $webkitDirectory . "/Tools/iExploder"; + + my $httpdConfig = getHTTPDConfigPathForTestDirectory($testDirectory); + + my $documentRoot = "$iExploderDirectory/htdocs"; + my $typesConfig = "$testDirectory/http/conf/mime.types"; + my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem"; + my $listen = "127.0.0.1:$httpdPort"; + + + my @args = ( + "-f", "$httpdConfig", + "-C", "DocumentRoot \"$documentRoot\"", + "-C", "Listen $listen", + "-c", "TypesConfig \"$typesConfig\"", + "-c", "CustomLog \"$iExploderTestDirectory/access_log.txt\" common", + "-c", "ErrorLog \"$iExploderTestDirectory/error_log.txt\"", + "-c", "SSLCertificateFile \"$sslCertificate\"", + # Apache wouldn't run CGIs with permissions==700 otherwise + "-c", "User \"#$<\"" + ); + + $isHttpdOpen = openHTTPD(@args); +} diff --git a/Tools/Scripts/run-javascriptcore-tests b/Tools/Scripts/run-javascriptcore-tests new file mode 100755 index 0000000..9fcbb2e --- /dev/null +++ b/Tools/Scripts/run-javascriptcore-tests @@ -0,0 +1,191 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005 Apple Computer, Inc. All rights reserved. +# Copyright (C) 2007 Eric Seidel <eric@webkit.org> +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to run the WebKit Open Source Project JavaScriptCore tests (adapted from Mozilla). + +use strict; +use FindBin; +use Getopt::Long qw(:config pass_through); +use lib $FindBin::Bin; +use webkitdirs; +use POSIX; + +# determine configuration +setConfiguration(); +my $configuration = configuration(); + +my @testsToSkip = ( + # Various ecma/Date tests sometimes fail on Windows (but not Mac) https://bugs.webkit.org/show_bug.cgi?id=25160 + "ecma/Date/15.9.2.1.js", + "ecma/Date/15.9.2.2-1.js", + "ecma/Date/15.9.2.2-2.js", + "ecma/Date/15.9.2.2-3.js", + "ecma/Date/15.9.2.2-4.js", + "ecma/Date/15.9.2.2-5.js", + "ecma/Date/15.9.2.2-6.js", + # ecma_3/Date/15.9.5.7.js fails on Mac (but not Windows) https://bugs.webkit.org/show_bug.cgi?id=25161 + "ecma_3/Date/15.9.5.7.js", +); + +my $jsDriverArgs = "-L " . join(" ", @testsToSkip); +my $root; # intentionally left undefined +my $skipBuild = 0; +my $showHelp = 0; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] [options to pass to build system] + --help Show this help message + --jsDriver-args= A string of arguments to pass to jsDriver.pl + --root= Path to pre-built root containing jsc +EOF + +GetOptions( + 'j|jsDriver-args=s' => \$jsDriverArgs, + 'root=s' => \$root, + 'skip-build' => \$skipBuild, + 'help' => \$showHelp +); + +# Assume any arguments left over from GetOptions are assumed to be build arguments +my @buildArgs = @ARGV; + +# Arguments passed to --jsDriver-args (if any) are passed to jsDriver.pl +my @jsArgs = split(" ", $jsDriverArgs); + +if ($showHelp) { + print STDERR $usage; + exit 1; +} + +setConfigurationProductDir(Cwd::abs_path($root)) if (defined($root)); + +if (!defined($root) && !$skipBuild) { + chdirWebKit(); + + push(@buildArgs, argumentsForConfiguration()); + + print "Running: build-jsc " . join(" ", @buildArgs) . "\n"; + my $buildResult = system "perl", "Tools/Scripts/build-jsc", @buildArgs; + if ($buildResult) { + print STDERR "Compiling jsc failed!\n"; + exit exitStatus($buildResult); + } +} + + +my $productDir = jscProductDir(); +$ENV{DYLD_FRAMEWORK_PATH} = $productDir; +setPathForRunningWebKitApp(\%ENV) if isCygwin(); + +sub testapiPath($) +{ + my ($productDir) = @_; + my $jscName = "testapi"; + $jscName .= "_debug" if configurationForVisualStudio() eq "Debug_All"; + return "$productDir/$jscName"; +} + +#run api tests +if (isAppleMacWebKit() || isAppleWinWebKit()) { + chdirWebKit(); + chdir($productDir) or die; + my $path = testapiPath($productDir); + # Use an "indirect object" so that system() won't get confused if the path + # contains spaces (see perldoc -f exec). + my $testapiResult = system { $path } $path; + exit exitStatus($testapiResult) if $testapiResult; +} + +# Find JavaScriptCore directory +chdirWebKit(); +chdir("JavaScriptCore"); +chdir "tests/mozilla" or die; +printf "Running: jsDriver.pl -e squirrelfish -s %s -f actual.html %s\n", jscPath($productDir), join(" ", @jsArgs); +my $result = system "perl", "jsDriver.pl", "-e", "squirrelfish", "-s", jscPath($productDir), "-f", "actual.html", @jsArgs; +exit exitStatus($result) if $result; + +my %failures; + +open EXPECTED, "expected.html" or die; +while (<EXPECTED>) { + last if /failures reported\.$/; +} +while (<EXPECTED>) { + chomp; + $failures{$_} = 1; +} +close EXPECTED; + +my %newFailures; + +open ACTUAL, "actual.html" or die; +while (<ACTUAL>) { + last if /failures reported\.$/; +} +while (<ACTUAL>) { + chomp; + if ($failures{$_}) { + delete $failures{$_}; + } else { + $newFailures{$_} = 1; + } +} +close ACTUAL; + +my $numNewFailures = keys %newFailures; +if ($numNewFailures) { + print "\n** Danger, Will Robinson! Danger! The following failures have been introduced:\n"; + foreach my $failure (sort keys %newFailures) { + print "\t$failure\n"; + } +} + +my $numOldFailures = keys %failures; +if ($numOldFailures) { + print "\nYou fixed the following test"; + print "s" if $numOldFailures != 1; + print ":\n"; + foreach my $failure (sort keys %failures) { + print "\t$failure\n"; + } +} + +print "\n"; + +print "$numNewFailures regression"; +print "s" if $numNewFailures != 1; +print " found.\n"; + +print "$numOldFailures test"; +print "s" if $numOldFailures != 1; +print " fixed.\n"; + +print "OK.\n" if $numNewFailures == 0; +exit(1) if $numNewFailures; diff --git a/Tools/Scripts/run-jsc b/Tools/Scripts/run-jsc new file mode 100755 index 0000000..e5341c1 --- /dev/null +++ b/Tools/Scripts/run-jsc @@ -0,0 +1,59 @@ +#!/usr/bin/perl + +# Copyright (C) 2006 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This script runs a list of scripts through jsc a specified number of times. + +use strict; +use warnings; +use File::Spec; +use FindBin; +use lib $FindBin::Bin; +use Getopt::Long; +use webkitdirs; + +my $usage = "Usage: run-jsc [--count run_count] [--verbose] shell_file [file2...]"; + +my $count = 1; +my $verbose = 0; +GetOptions("count|c=i" => \$count, + "verbose|v" => \$verbose); +die "$usage\n" if (@ARGV < 1); + +my $jsc = jscProductDir() . "/jsc @ARGV"; +$jsc .= " 2> " . File::Spec->devnull() unless $verbose; + +my $dyld = jscProductDir(); + +$ENV{"DYLD_FRAMEWORK_PATH"} = $dyld; +print STDERR "Running $count time(s): DYLD_FRAMEWORK_PATH=$dyld $jsc\n"; +while ($count--) { + if (system("$jsc") != 0) { + last; + } +} + diff --git a/Tools/Scripts/run-launcher b/Tools/Scripts/run-launcher new file mode 100755 index 0000000..414d4af --- /dev/null +++ b/Tools/Scripts/run-launcher @@ -0,0 +1,84 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2007 Apple Computer, Inc. All rights reserved. +# Copyright (C) 2007 Staikos Computing Services, Inc. <info@staikos.net> +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Simplified "run" script for WebKit Open Source Project. + +use strict; +use File::Spec::Functions qw/catdir/; +use File::Temp qw/tempfile/; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); +my $productDir = productDir(); +my $launcherPath = productDir(); +my @args = @ARGV; + +# Check to see that all the frameworks are built. +checkFrameworks(); + +# Set paths according to the build system used +if (isQt()) { + my $libDir = catdir(productDir(), 'lib'); + $launcherPath = catdir($launcherPath, "bin", "QtTestBrowser"); + + $ENV{QTWEBKIT_PLUGIN_PATH} = catdir($libDir, 'plugins'); + + print "Starting webkit launcher, running against the built WebKit in $libDir...\n"; + if (isDarwin()) { + $ENV{DYLD_LIBRARY_PATH} = $ENV{DYLD_LIBRARY_PATH} ? "$libDir:$ENV{DYLD_LIBRARY_PATH}" : $libDir; + $ENV{DYLD_FRAMEWORK_PATH} = $ENV{DYLD_FRAMEWORK_PATH} ? "$libDir:$ENV{DYLD_FRAMEWORK_PATH}" : $libDir; + } else { + $ENV{LD_LIBRARY_PATH} = $ENV{LD_LIBRARY_PATH} ? "$libDir:$ENV{LD_LIBRARY_PATH}" : $libDir; + } +} else { + + if (isGtk()) { + $launcherPath = catdir($launcherPath, "Programs", "GtkLauncher"); + } + + if (isEfl()) { + $launcherPath = catdir($launcherPath, "Programs", "EWebLauncher"); + } + + if (isWx()) { + if (isDarwin()) { + $launcherPath = catdir($launcherPath, 'wxBrowser.app', 'Contents', 'MacOS', 'wxBrowser'); + } else { + $ENV{LD_LIBRARY_PATH} = $ENV{LD_LIBRARY_PATH} ? "$productDir:$ENV{LD_LIBRARY_PATH}" : $productDir; + $launcherPath = catdir($launcherPath, 'wxBrowser'); + } + } + + print "Starting webkit launcher.\n"; +} + +exec $launcherPath, @args or die; + diff --git a/Tools/Scripts/run-leaks b/Tools/Scripts/run-leaks new file mode 100755 index 0000000..9dc58de --- /dev/null +++ b/Tools/Scripts/run-leaks @@ -0,0 +1,221 @@ +#!/usr/bin/perl + +# Copyright (C) 2007 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to run the Mac OS X leaks tool with more expressive '-exclude' lists. + +use strict; +use warnings; + +use File::Basename; +use Getopt::Long; + +sub runLeaks($); +sub parseLeaksOutput(\@); +sub removeMatchingRecords(\@$\@); +sub reportError($); + +sub main() +{ + # Read options. + my $usage = + "Usage: " . basename($0) . " [options] pid | executable name\n" . + " --exclude-callstack regexp Exclude leaks whose call stacks match the regular expression 'regexp'.\n" . + " --exclude-type regexp Exclude leaks whose data types match the regular expression 'regexp'.\n" . + " --help Show this help message.\n"; + + my @callStacksToExclude = (); + my @typesToExclude = (); + my $help = 0; + + my $getOptionsResult = GetOptions( + 'exclude-callstack:s' => \@callStacksToExclude, + 'exclude-type:s' => \@typesToExclude, + 'help' => \$help + ); + my $pidOrExecutableName = $ARGV[0]; + + if (!$getOptionsResult || $help) { + print STDERR $usage; + return 1; + } + + if (!$pidOrExecutableName) { + reportError("Missing argument: pid | executable."); + print STDERR $usage; + return 1; + } + + # Run leaks tool. + my $leaksOutput = runLeaks($pidOrExecutableName); + if (!$leaksOutput) { + return 1; + } + + my $leakList = parseLeaksOutput(@$leaksOutput); + if (!$leakList) { + return 1; + } + + # Filter output. + my $leakCount = @$leakList; + removeMatchingRecords(@$leakList, "callStack", @callStacksToExclude); + removeMatchingRecords(@$leakList, "type", @typesToExclude); + my $excludeCount = $leakCount - @$leakList; + + # Dump results. + print $leaksOutput->[0]; + print $leaksOutput->[1]; + foreach my $leak (@$leakList) { + print $leak->{"leaksOutput"}; + } + + if ($excludeCount) { + print "$excludeCount leaks excluded (not printed)\n"; + } + + return 0; +} + +exit(main()); + +# Returns the output of the leaks tool in list form. +sub runLeaks($) +{ + my ($pidOrExecutableName) = @_; + + my @leaksOutput = `leaks $pidOrExecutableName`; + if (!@leaksOutput) { + reportError("Error running leaks tool."); + return; + } + + return \@leaksOutput; +} + +# Returns a list of hash references with the keys { address, size, type, callStack, leaksOutput } +sub parseLeaksOutput(\@) +{ + my ($leaksOutput) = @_; + + # Format: + # Process 00000: 1234 nodes malloced for 1234 KB + # Process 00000: XX leaks for XXX total leaked bytes. + # Leak: 0x00000000 size=1234 [instance of 'blah'] + # 0x00000000 0x00000000 0x00000000 0x00000000 a..d.e.e + # ... + # Call stack: leak_caller() | leak() | malloc + # + # We treat every line except for Process 00000: and Leak: as optional + + # Newer versions of the leaks output have a header section at the top, with the first line describing the version of the output format. + # If we detect the new format is being used then we eat all of the header section so the output matches the format of older versions. + # FIXME: In the future we may wish to propagate this section through to our output. + if ($leaksOutput->[0] =~ /^leaks Report Version:/) { + while ($leaksOutput->[0] !~ /^Process /) { + shift @$leaksOutput; + } + } + + my ($leakCount) = ($leaksOutput->[1] =~ /[[:blank:]]+([0-9]+)[[:blank:]]+leaks?/); + if (!defined($leakCount)) { + reportError("Could not parse leak count reported by leaks tool."); + return; + } + + my @leakList = (); + for my $line (@$leaksOutput) { + next if $line =~ /^Process/; + next if $line =~ /^node buffer added/; + + if ($line =~ /^Leak: /) { + my ($address) = ($line =~ /Leak: ([[:xdigit:]x]+)/); + if (!defined($address)) { + reportError("Could not parse Leak address."); + return; + } + + my ($size) = ($line =~ /size=([[:digit:]]+)/); + if (!defined($size)) { + reportError("Could not parse Leak size."); + return; + } + + my ($type) = ($line =~ /'([^']+)'/); #' + if (!defined($type)) { + $type = ""; # The leaks tool sometimes omits the type. + } + + my %leak = ( + "address" => $address, + "size" => $size, + "type" => $type, + "callStack" => "", # The leaks tool sometimes omits the call stack. + "leaksOutput" => $line + ); + push(@leakList, \%leak); + } else { + $leakList[$#leakList]->{"leaksOutput"} .= $line; + if ($line =~ /Call stack:/) { + $leakList[$#leakList]->{"callStack"} = $line; + } + } + } + + if (@leakList != $leakCount) { + my $parsedLeakCount = @leakList; + reportError("Parsed leak count($parsedLeakCount) does not match leak count reported by leaks tool($leakCount)."); + return; + } + + return \@leakList; +} + +sub removeMatchingRecords(\@$\@) +{ + my ($recordList, $key, $regexpList) = @_; + + RECORD: for (my $i = 0; $i < @$recordList;) { + my $record = $recordList->[$i]; + + foreach my $regexp (@$regexpList) { + if ($record->{$key} =~ $regexp) { + splice(@$recordList, $i, 1); + next RECORD; + } + } + + $i++; + } +} + +sub reportError($) +{ + my ($errorMessage) = @_; + + print STDERR basename($0) . ": $errorMessage\n"; +} diff --git a/Tools/Scripts/run-mangleme-tests b/Tools/Scripts/run-mangleme-tests new file mode 100755 index 0000000..10196ef --- /dev/null +++ b/Tools/Scripts/run-mangleme-tests @@ -0,0 +1,176 @@ +#!/usr/bin/perl + +# Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# A script to semi-automatically run mangleme tests. + +use strict; +use warnings; + +use Cwd; +use File::Spec; +use FindBin; +use Getopt::Long; +use IPC::Open2; + +use lib $FindBin::Bin; +use webkitdirs; + +sub openHTTPDIfNeeded(); +sub closeHTTPD(); +sub runSafariWithMangleme(); + +# Argument handling +my $guardMalloc = ''; +my $httpdPort = 8000; +my $downloadTest; + +GetOptions( + 'guard-malloc|g' => \$guardMalloc, + 'get=s' => \$downloadTest, + 'port=i' => \$httpdPort +); + + +setConfiguration(); +my $productDir = productDir(); + +chdirWebKit(); + +checkFrameworks(); + +mkdir "WebKitBuild/mangleme"; +(system "/usr/bin/make", "-C", "Tools/mangleme") == 0 or die; + +my $httpdOpen = 0; +openHTTPDIfNeeded(); + +if ($downloadTest) { + system "/usr/bin/curl -o ~/Desktop/mangleme$downloadTest.html http://127.0.0.1:$httpdPort/remangle.cgi?$downloadTest"; + print "Saved the test as mangleme$downloadTest.html on the desktop\n"; +} else { + runSafariWithMangleme(); + print "Last generated tests:\n"; + system "grep 'Mangle attempt' /tmp/WebKit/error_log.txt | tail -n -5 | awk ' {print \$4}'"; +} + +closeHTTPD(); + + +sub runSafariWithMangleme() +{ + my $redirectTo; + if (@ARGV) { + $redirectTo = "http://127.0.0.1:$httpdPort/remangle.cgi?$ARGV[0]"; + } else { + $redirectTo = "http://127.0.0.1:$httpdPort/mangle.cgi"; + } + + open REDIRECT_HTML, ">", "/tmp/WebKit/redirect.html" or die; + print REDIRECT_HTML "<html>\n"; + print REDIRECT_HTML " <head>\n"; + print REDIRECT_HTML " <meta http-equiv=\"refresh\" content=\"1;URL=$redirectTo\" />\n"; + print REDIRECT_HTML " <script type=\"text/javascript\">\n"; + print REDIRECT_HTML " document.location = \"$redirectTo\";\n"; + print REDIRECT_HTML " </script>\n"; + print REDIRECT_HTML " </head>\n"; + print REDIRECT_HTML " <body>\n"; + print REDIRECT_HTML " </body>\n"; + print REDIRECT_HTML "</html>\n"; + close REDIRECT_HTML; + + local %ENV; + $ENV{DYLD_INSERT_LIBRARIES} = "/usr/lib/libgmalloc.dylib" if $guardMalloc; + system "Tools/Scripts/run-safari", "-NSOpen", "/tmp/WebKit/redirect.html"; +} + +sub openHTTPDIfNeeded() +{ + return if $httpdOpen; + + mkdir "/tmp/WebKit"; + + if (-f "/tmp/WebKit/httpd.pid") { + my $oldPid = `cat /tmp/WebKit/httpd.pid`; + chomp $oldPid; + if (0 != kill 0, $oldPid) { + print "\nhttpd is already running: pid $oldPid, killing...\n"; + kill 15, $oldPid; + + my $retryCount = 20; + while ((0 != kill 0, $oldPid) && $retryCount) { + sleep 1; + --$retryCount; + } + + die "Timed out waiting for httpd to quit" unless $retryCount; + } + } + + my $testDirectory = getcwd() . "/LayoutTests"; + my $manglemeDirectory = getcwd() . "/WebKitBuild/mangleme"; + my $httpdPath = "/usr/sbin/httpd"; + my $httpdConfig = "$testDirectory/http/conf/httpd.conf"; + $httpdConfig = "$testDirectory/http/conf/apache2-httpd.conf" if `$httpdPath -v` =~ m|Apache/2|; + my $documentRoot = "$manglemeDirectory"; + my $typesConfig = "$testDirectory/http/conf/mime.types"; + my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem"; + my $listen = "127.0.0.1:$httpdPort"; + + open2(\*HTTPDIN, \*HTTPDOUT, $httpdPath, + "-f", "$httpdConfig", + "-C", "DocumentRoot \"$documentRoot\"", + "-C", "Listen $listen", + "-c", "TypesConfig \"$typesConfig\"", + "-c", "CustomLog \"/tmp/WebKit/access_log.txt\" common", + "-c", "ErrorLog \"/tmp/WebKit/error_log.txt\"", + "-c", "SSLCertificateFile \"$sslCertificate\"", + # Apache wouldn't run CGIs with permissions==700 otherwise + "-c", "User \"#$<\""); + + my $retryCount = 20; + while (system("/usr/bin/curl -q --silent --stderr - --output " . File::Spec->devnull() . " $listen") && $retryCount) { + sleep 1; + --$retryCount; + } + + die "Timed out waiting for httpd to start" unless $retryCount; + + $httpdOpen = 1; +} + +sub closeHTTPD() +{ + return if !$httpdOpen; + + close HTTPDIN; + close HTTPDOUT; + + kill 15, `cat /tmp/WebKit/httpd.pid` if -f "/tmp/WebKit/httpd.pid"; + + $httpdOpen = 0; +} diff --git a/Tools/Scripts/run-minibrowser b/Tools/Scripts/run-minibrowser new file mode 100755 index 0000000..c2fd412 --- /dev/null +++ b/Tools/Scripts/run-minibrowser @@ -0,0 +1,38 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2007 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Simplified "run" script for launching the WebKit2 MiniBrowser. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); + +exit exitStatus(runMiniBrowser()); diff --git a/Tools/Scripts/run-pageloadtest b/Tools/Scripts/run-pageloadtest new file mode 100755 index 0000000..ad6daa1 --- /dev/null +++ b/Tools/Scripts/run-pageloadtest @@ -0,0 +1,92 @@ +#!/usr/bin/perl + +# Copyright (C) 2006 Eric Seidel (eric@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to run the WebKit Open Source Project page load tests (PLTs). + +# Run all the tests passed in on the command line. + +use strict; +use warnings; + +use File::Basename; +use File::Spec; +use FindBin; +use Getopt::Long; + +use lib $FindBin::Bin; +use webkitdirs; + +# Argument handling +my $testName = 'svg'; +my $showHelp = 0; + +my $usage = + "Usage: " . basename($0) . "[options] testName\n" . + " --help Show this help message\n"; + +my $getOptionsResult = GetOptions('help' => \$showHelp); + +if (!$getOptionsResult || $showHelp) { + print STDERR $usage; + exit 1; +} + +$testName = shift @ARGV if (@ARGV); + +my $safariExecutablePath = safariPath(); +my $safariResourcePath = File::Spec->catdir(dirname(dirname($safariExecutablePath)), "Resources"); + +# Check to see that all the frameworks are built. +checkFrameworks(); + +chdirWebKit(); + +if ($testName eq 'svg') { + my $suiteFile = "PageLoadTests/$testName/$testName.pltsuite"; + my $webkitPath = sourceDir(); + `cat "$suiteFile" | perl -pe 's|WEBKIT_PATH|$webkitPath|' > $safariResourcePath/$testName.pltsuite` +} + +die "Please copy ${testName}.pltsuite to ${safariResourcePath}/${testName}.pltsuite" + if (! -f "${safariResourcePath}/${testName}.pltsuite"); + +setConfiguration(); + +my $productDir = productDir(); + +# Set up DYLD_FRAMEWORK_PATH to point to the product directory. +print "Starting Safari with DYLD_FRAMEWORK_PATH set to point to built WebKit in $productDir.\n"; +$ENV{DYLD_FRAMEWORK_PATH} = $productDir; +$ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES"; + +my @testCommands = ('activate'); +# Autovicki would clear history, we skip that here as this is likely an active user account +@testCommands = (@testCommands, ("run $testName", 'emptyCache', 'wait 30')); +@testCommands = (@testCommands, (("run $testName", 'wait 10') x 3)); +my $testCommandsString = join('; ', @testCommands); +exec $safariExecutablePath, '--test-commands', $testCommandsString or die; diff --git a/Tools/Scripts/run-qtwebkit-tests b/Tools/Scripts/run-qtwebkit-tests new file mode 100644 index 0000000..373de0a --- /dev/null +++ b/Tools/Scripts/run-qtwebkit-tests @@ -0,0 +1,358 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +#Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies) + +#This library is free software; you can redistribute it and/or +#modify it under the terms of the GNU Library General Public +#License as published by the Free Software Foundation; either +#version 2 of the License, or (at your option) any later version. + +#This library is distributed in the hope that it will be useful, +#but WITHOUT ANY WARRANTY; without even the implied warranty of +#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +#Library General Public License for more details. + +#You should have received a copy of the GNU Library General Public License +#along with this library; see the file COPYING.LIB. If not, write to +#the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, +#Boston, MA 02110-1301, USA. + +from __future__ import with_statement + +import sys +import os +import os.path +import re +import logging +from subprocess import Popen, PIPE, STDOUT +from optparse import OptionParser + + +class Log(object): + def __init__(self, name): + self._log = logging.getLogger(name) + self.debug = self._log.debug + self.warn = self._log.warn + self.error = self._log.error + self.exception = self._log.exception + self.info = self._log.info + + +class Options(Log): + """ Option manager. It parses and checks script's parameters, sets an internal variable. """ + + def __init__(self, args): + Log.__init__(self, "Options") + log = self._log + opt = OptionParser("%prog [options] PathToSearch.\nTry -h or --help.") + opt.add_option("-j", "--parallel-level", action="store", type="int", + dest="parallel_level", default=None, + help="Number of parallel processes executing the Qt's tests. Default: cpu count.") + opt.add_option("-v", "--verbose", action="store", type="int", + dest="verbose", default=2, + help="Verbose level (0 - quiet, 1 - errors only, 2 - infos and warnings, 3 - debug information). Default: %default.") + opt.add_option("", "--tests-options", action="store", type="string", + dest="tests_options", default="", + help="Parameters passed to Qt's tests (for example '-eventdelay 123').") + opt.add_option("-o", "--output-file", action="store", type="string", + dest="output_file", default="/tmp/qtwebkittests.html", + help="File where results will be stored. The file will be overwritten. Default: %default.") + opt.add_option("-b", "--browser", action="store", dest="browser", + default="xdg-open", + help="Browser in which results will be opened. Default %default.") + opt.add_option("", "--do-not-open-results", action="store_false", + dest="open_results", default=True, + help="The results shouldn't pop-up in a browser automatically") + opt.add_option("-d", "--developer-mode", action="store_true", + dest="developer", default=False, + help="Special mode for debugging. In general it simulates human behavior, running all autotests. In the mode everything is executed synchronously, no html output will be generated, no changes or transformation will be applied to stderr or stdout. In this mode options; parallel-level, output-file, browser and do-not-open-results will be ignored.") + + self._o, self._a = opt.parse_args(args) + verbose = self._o.verbose + if verbose == 0: + logging.basicConfig(level=logging.CRITICAL,) + elif verbose == 1: + logging.basicConfig(level=logging.ERROR,) + elif verbose == 2: + logging.basicConfig(level=logging.INFO,) + elif verbose == 3: + logging.basicConfig(level=logging.DEBUG,) + else: + logging.basicConfig(level=logging.INFO,) + log.warn("Bad verbose level, switching to default.") + try: + if not os.path.exists(self._a[0]): + raise Exception("Given path doesn't exist.") + if len(self._a) > 1: + raise IndexError("Only one directory could be provided.") + self._o.path = self._a[0] + except IndexError: + log.error("Bad usage. Please try -h or --help.") + sys.exit(1) + except Exception: + log.error("Path '%s' doesn't exist", self._a[0]) + sys.exit(2) + if self._o.developer: + if not self._o.parallel_level is None: + log.warn("Developer mode sets parallel-level option to one.") + self._o.parallel_level = 1 + self._o.open_results = False + + def __getattr__(self, attr): + """ Maps all options properties into this object (remove one level of indirection). """ + return getattr(self._o, attr) + + +def run_test(args): + """ Runs one given test. + args should contain a tuple with 3 elements; + TestSuiteResult containing full file name of an autotest executable. + str with options that should be passed to the autotest executable + bool if true then the stdout will be buffered and separated from the stderr, if it is false + then the stdout and the stderr will be merged together and left unbuffered (the TestSuiteResult output will be None). + """ + log = logging.getLogger("Exec") + test_suite, options, buffered = args + try: + log.info("Running... %s", test_suite.test_file_name()) + if buffered: + tst = Popen(test_suite.test_file_name() + options, stdout=PIPE, stderr=None, shell=True) + else: + tst = Popen(test_suite.test_file_name() + options, stdout=None, stderr=STDOUT, shell=True) + except OSError, e: + log.exception("Can't open an autotest file: '%s'. Skipping the test...", e.filename) + else: + test_suite.set_output(tst.communicate()[0]) # takes stdout only, in developer mode it would be None. + log.info("Finished %s", test_suite.test_file_name()) + return test_suite + + +class TestSuiteResult(object): + """ Keeps information about a test. """ + + def __init__(self): + self._output = None + self._test_file_name = None + + def set_output(self, xml): + if xml: + self._output = xml.strip() + + def output(self): + return self._output + + def set_test_file_name(self, file_name): + self._test_file_name = file_name + + def test_file_name(self): + return self._test_file_name + + +class Main(Log): + """ The main script. All real work is done in run() method. """ + + def __init__(self, options): + Log.__init__(self, "Main") + self._options = options + if options.parallel_level > 1 or options.parallel_level is None: + try: + from multiprocessing import Pool + except ImportError: + self.warn("Import Error: the multiprocessing module couldn't be loaded (may be lack of python-multiprocessing package?). The Qt autotests will be executed one by one.") + options.parallel_level = 1 + if options.parallel_level == 1: + + class Pool(object): + """ A hack, created to avoid problems with multiprocessing module, this class is single thread replacement for the multiprocessing.Pool class. """ + def __init__(self, processes): + pass + + def imap_unordered(self, func, files): + return map(func, files) + + def map(self, func, files): + return map(func, files) + + self._Pool = Pool + + def run(self): + """ Find && execute && publish results of all test. "All in one" function. """ + self.debug("Searching executables...") + tests_executables = self.find_tests_paths(self._options.path) + self.debug("Found: %s", len(tests_executables)) + self.debug("Executing tests...") + results = self.run_tests(tests_executables) + if not self._options.developer: + self.debug("Transforming...") + transformed_results = self.transform(results) + self.debug("Publishing...") + self.announce_results(transformed_results) + + def find_tests_paths(self, path): + """ Finds all tests executables inside the given path. """ + executables = [] + for root, dirs, files in os.walk(path): + # Check only for a file that name starts from 'tst_' and that we can execute. + filtered_path = filter(lambda w: w.startswith('tst_') and os.access(os.path.join(root, w), os.X_OK), files) + filtered_path = map(lambda w: os.path.join(root, w), filtered_path) + for file_name in filtered_path: + r = TestSuiteResult() + r.set_test_file_name(file_name) + executables.append(r) + return executables + + def run_tests(self, files): + """ Executes given files by using a pool of workers. """ + workers = self._Pool(processes=self._options.parallel_level) + # to each file add options. + self.debug("Using %s the workers pool, number of workers %i", repr(workers), self._options.parallel_level) + package = map(lambda w: [w, self._options.tests_options, not self._options.developer], files) + self.debug("Generated packages for workers: %s", repr(package)) + results = workers.map(run_test, package) # Collects results. + return results + + def transform(self, results): + """ Transforms list of the results to specialized versions. """ + stdout = self.convert_to_stdout(results) + html = self.convert_to_html(results) + return {"stdout": stdout, "html": html} + + def announce_results(self, results): + """ Shows the results. """ + self.announce_results_stdout(results['stdout']) + self.announce_results_html(results['html']) + + def announce_results_stdout(self, results): + """ Show the results by printing to the stdout.""" + print(results) + + def announce_results_html(self, results): + """ Shows the result by creating a html file and calling a web browser to render it. """ + with file(self._options.output_file, 'w') as f: + f.write(results) + if self._options.open_results: + Popen(self._options.browser + " " + self._options.output_file, stdout=None, stderr=None, shell=True) + + def convert_to_stdout(self, results): + """ Converts results, that they could be nicely presented in the stdout. """ + # Join all results into one piece. + txt = "\n\n".join(map(lambda w: w.output(), results)) + # Find total count of failed, skipped and passed tests. + totals = re.findall(r"([0-9]+) passed, ([0-9]+) failed, ([0-9]+) skipped", txt) + totals = reduce(lambda x, y: (int(x[0]) + int(y[0]), int(x[1]) + int(y[1]), int(x[2]) + int(y[2])), totals) + totals = map(str, totals) + totals = totals[0] + " passed, " + totals[1] + " failed, " + totals[2] + " skipped" + # Add a summary. + txt += '\n\n\n' + '*' * 70 + txt += "\n**" + ("TOTALS: " + totals).center(66) + '**' + txt += '\n' + '*' * 70 + '\n' + return txt + + def convert_to_html(self, results): + """ Converts results, that they could showed as a html page. """ + # Join results into one piece. + txt = "\n\n".join(map(lambda w: w.output(), results)) + txt = txt.replace('&', '&').replace('<', "<").replace('>', ">") + # Add a color and a style. + txt = re.sub(r"([* ]+(Finished)[ a-z_A-Z0-9]+[*]+)", + lambda w: r"", + txt) + txt = re.sub(r"([*]+[ a-z_A-Z0-9]+[*]+)", + lambda w: "<case class='good'><br><br><b>" + w.group(0) + r"</b></case>", + txt) + txt = re.sub(r"(Config: Using QTest library)((.)+)", + lambda w: "\n<case class='good'><br><i>" + w.group(0) + r"</i> ", + txt) + txt = re.sub(r"\n(PASS)((.)+)", + lambda w: "</case>\n<case class='good'><br><status class='pass'>" + w.group(1) + r"</status>" + w.group(2), + txt) + txt = re.sub(r"\n(FAIL!)((.)+)", + lambda w: "</case>\n<case class='bad'><br><status class='fail'>" + w.group(1) + r"</status>" + w.group(2), + txt) + txt = re.sub(r"\n(XPASS)((.)+)", + lambda w: "</case>\n<case class='bad'><br><status class='xpass'>" + w.group(1) + r"</status>" + w.group(2), + txt) + txt = re.sub(r"\n(XFAIL)((.)+)", + lambda w: "</case>\n<case class='good'><br><status class='xfail'>" + w.group(1) + r"</status>" + w.group(2), + txt) + txt = re.sub(r"\n(SKIP)((.)+)", + lambda w: "</case>\n<case class='good'><br><status class='xfail'>" + w.group(1) + r"</status>" + w.group(2), + txt) + txt = re.sub(r"\n(QWARN)((.)+)", + lambda w: "</case>\n<case class='bad'><br><status class='warn'>" + w.group(1) + r"</status>" + w.group(2), + txt) + txt = re.sub(r"\n(RESULT)((.)+)", + lambda w: "</case>\n<case class='good'><br><status class='benchmark'>" + w.group(1) + r"</status>" + w.group(2), + txt) + txt = re.sub(r"\n(QFATAL)((.)+)", + lambda w: "</case>\n<case class='bad'><br><status class='crash'>" + w.group(1) + r"</status>" + w.group(2), + txt) + txt = re.sub(r"\n(Totals:)([0-9', a-z]*)", + lambda w: "</case>\n<case class='good'><br><b>" + w.group(1) + r"</b>" + w.group(2) + "</case>", + txt) + # Find total count of failed, skipped and passed tests. + totals = re.findall(r"([0-9]+) passed, ([0-9]+) failed, ([0-9]+) skipped", txt) + totals = reduce(lambda x, y: (int(x[0]) + int(y[0]), int(x[1]) + int(y[1]), int(x[2]) + int(y[2])), totals) + totals = map(str, totals) + totals = totals[0] + " passed, " + totals[1] + " failed, " + totals[2] + " skipped." + # Create a header of the html source. + txt = """ + <html> + <head> + <script> + function init() { + // Try to find the right styleSheet (this document could be embedded in an other html doc) + for (i = document.styleSheets.length - 1; i >= 0; --i) { + if (document.styleSheets[i].cssRules[0].selectorText == "case.good") { + resultStyleSheet = i; + return; + } + } + // The styleSheet hasn't been found, but it should be the last one. + resultStyleSheet = document.styleSheets.length - 1; + } + + function hide() { + document.styleSheets[resultStyleSheet].cssRules[0].style.display='none'; + } + + function show() { + document.styleSheets[resultStyleSheet].cssRules[0].style.display=''; + } + + </script> + <style type="text/css"> + case.good {color:black} + case.bad {color:black} + status.pass {color:green} + status.crash {color:red} + status.fail {color:red} + status.xpass {color:663300} + status.xfail {color:004500} + status.benchmark {color:000088} + status.warn {color:orange} + status.crash {color:red; text-decoration:blink; background-color:black} + </style> + </head> + <body onload="init()"> + <center> + <h1>Qt's autotests results</h1>%(totals)s<br> + <hr> + <form> + <input type="button" value="Show failures only" onclick="hide()"/> + + <input type="button" value="Show all" onclick="show()"/> + </form> + </center> + <hr> + %(results)s + </body> + </html>""" % {"totals": totals, "results": txt} + return txt + + +if __name__ == '__main__': + options = Options(sys.argv[1:]) + main = Main(options) + main.run() diff --git a/Tools/Scripts/run-safari b/Tools/Scripts/run-safari new file mode 100755 index 0000000..d850a4a --- /dev/null +++ b/Tools/Scripts/run-safari @@ -0,0 +1,41 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2007 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Simplified "run" script for WebKit Open Source Project. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); + +# Check to see that all the frameworks are built. +checkFrameworks(); + +exit exitStatus(runSafari()); diff --git a/Tools/Scripts/run-sunspider b/Tools/Scripts/run-sunspider new file mode 100755 index 0000000..15894b0 --- /dev/null +++ b/Tools/Scripts/run-sunspider @@ -0,0 +1,129 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2007 Apple Inc. All rights reserved. +# Copyright (C) 2007 Eric Seidel <eric@webkit.org> +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use FindBin; +use Getopt::Long qw(:config pass_through); +use lib $FindBin::Bin; +use webkitdirs; +use POSIX; + +# determine configuration, but default to "Release" instead of last-used configuration +setConfiguration("Release"); +setConfiguration(); +my $configuration = configuration(); + +my $root; +my $testRuns = 10; # This number may be different from what sunspider defaults to (that's OK) +my $runShark = 0; +my $runShark20 = 0; +my $runSharkCache = 0; +my $suite = ""; +my $ubench = 0; +my $v8 = 0; +my $parseonly = 0; +my $setBaseline = 0; +my $showHelp = 0; +my $testsPattern; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] [options to pass to build system] + --help Show this help message + --set-baseline Set baseline for future comparisons + --root Path to root tools build + --runs Number of times to run tests (default: $testRuns) + --tests Only run tests matching provided pattern + --shark Sample with the Mac OS X "Shark" performance testing tool (implies --runs=1) + --shark20 Like --shark, but with a 20 microsecond sampling interval + --shark-cache Like --shark, but performs a L2 cache-miss sample instead of time sample + --suite Select a specific benchmark suite. The default is sunspider-0.9.1 + --ubench Use microbenchmark suite instead of regular tests. Same as --suite=ubench + --v8-suite Use the V8 benchmark suite. Same as --suite=v8-v4 + --parse-only Use the parse-only benchmark suite. Same as --suite=parse-only +EOF + +GetOptions('root=s' => sub { my ($x, $value) = @_; $root = $value; setConfigurationProductDir(Cwd::abs_path($root)); }, + 'runs=i' => \$testRuns, + 'set-baseline' => \$setBaseline, + 'shark' => \$runShark, + 'shark20' => \$runShark20, + 'shark-cache' => \$runSharkCache, + 'suite=s' => \$suite, + 'ubench' => \$ubench, + 'v8' => \$v8, + 'parse-only' => \$parseonly, + 'tests=s' => \$testsPattern, + 'help' => \$showHelp); + +if ($showHelp) { + print STDERR $usage; + exit 1; +} + +sub buildJSC +{ + if (!defined($root)){ + push(@ARGV, "--" . $configuration); + + chdirWebKit(); + my $buildResult = system currentPerlPath(), "Tools/Scripts/build-jsc", @ARGV; + if ($buildResult) { + print STDERR "Compiling jsc failed!\n"; + exit exitStatus($buildResult); + } + } +} + +sub setupEnvironmentForExecution($) +{ + my ($productDir) = @_; + print "Starting sunspider with DYLD_FRAMEWORK_PATH set to point to built JavaScriptCore in $productDir.\n"; + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + # FIXME: Other platforms may wish to augment this method to use LD_LIBRARY_PATH, etc. +} + +buildJSC(); + +chdirWebKit(); +chdir("SunSpider"); + +my $productDir = jscProductDir(); + +setupEnvironmentForExecution($productDir); +my @args = ("--shell", jscPath($productDir), "--runs", $testRuns); +# This code could be removed if we chose to pass extra args to sunspider instead of Xcode +push @args, "--set-baseline" if $setBaseline; +push @args, "--shark" if $runShark; +push @args, "--shark20" if $runShark20; +push @args, "--shark-cache" if $runSharkCache; +push @args, "--suite=${suite}" if $suite; +push @args, "--ubench" if $ubench; +push @args, "--v8" if $v8; +push @args, "--parse-only" if $parseonly; +push @args, "--tests", $testsPattern if $testsPattern; + +exec currentPerlPath(), "./sunspider", @args; diff --git a/Tools/Scripts/run-test-runner b/Tools/Scripts/run-test-runner new file mode 100755 index 0000000..98fa3b6 --- /dev/null +++ b/Tools/Scripts/run-test-runner @@ -0,0 +1,35 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS +# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +# THE POSSIBILITY OF SUCH DAMAGE. + +# Simplified "run" script for launching the WebKit2 WebKitTestRunner. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); + +exit exitStatus(runWebKitTestRunner()); diff --git a/Tools/Scripts/run-test-webkit-api b/Tools/Scripts/run-test-webkit-api new file mode 100755 index 0000000..dfd85d5 --- /dev/null +++ b/Tools/Scripts/run-test-webkit-api @@ -0,0 +1,38 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Simplified "run" script for launching the WebKit2 estWebKitAPI. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); + +exit exitStatus(runTestWebKitAPI()); diff --git a/Tools/Scripts/run-webkit-app b/Tools/Scripts/run-webkit-app new file mode 100755 index 0000000..452c44c --- /dev/null +++ b/Tools/Scripts/run-webkit-app @@ -0,0 +1,50 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Simplified "run" script for WebKit Open Source Project. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +setConfiguration(); +my $productDir = productDir(); + +die "Did not specify an application to open (e.g. run-webkit-app AppName).\n" unless length($ARGV[0]) > 0; + +# Check to see that all the frameworks are built. +checkFrameworks(); + +# Set up DYLD_FRAMEWORK_PATH to point to the product directory. +print "Start $ARGV[0] with DYLD_FRAMEWORK_PATH set to point to built WebKit in $productDir.\n"; +$ENV{DYLD_FRAMEWORK_PATH} = $productDir; +$ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES"; + +unshift(@ARGV, "-a"); +exec "open", @ARGV; diff --git a/Tools/Scripts/run-webkit-httpd b/Tools/Scripts/run-webkit-httpd new file mode 100755 index 0000000..9ea2551 --- /dev/null +++ b/Tools/Scripts/run-webkit-httpd @@ -0,0 +1,96 @@ +#!/usr/bin/perl + +# Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved. +# Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to run Apache with the same configuration as used in http layout tests. + +use strict; +use warnings; + +use Cwd; +use File::Path; +use File::Basename; +use Getopt::Long; +use FindBin; + +use lib $FindBin::Bin; +use webkitperl::httpd; +use webkitdirs; + +# FIXME: Dynamic HTTP-port configuration in this file is wrong. The various +# apache config files in LayoutTests/http/config govern the port numbers. +# Dynamic configuration as-written will also cause random failures in +# an IPv6 environment. See https://bugs.webkit.org/show_bug.cgi?id=37104. +# Argument handling +my $httpdPort = 8000; +my $allInterfaces = 0; +my $showHelp; + +my $result = GetOptions( + 'all-interfaces|a' => \$allInterfaces, + 'help|h' => \$showHelp, + 'port=i' => \$httpdPort, +); + +if (!$result || @ARGV || $showHelp) { + print "Usage: " . basename($0) . " [options]\n"; + print " -a|--all-interfaces Bind to all interfaces\n"; + print " -h|--help Show this help message\n"; + print " -p|--port NNNN Bind to port NNNN\n"; + exit 1; +} + +setConfiguration(); +my $productDir = productDir(); +chdirWebKit(); +my $testDirectory = File::Spec->catfile(getcwd(), "LayoutTests"); +my $listen = "127.0.0.1:$httpdPort"; +$listen = "$httpdPort" if ($allInterfaces); + +if ($allInterfaces) { + print "Starting httpd on port $httpdPort (all interfaces)...\n"; +} else { + print "Starting httpd on <http://$listen/>...\n"; +} +setShouldWaitForUserInterrupt(); +print "Press Ctrl+C to stop it.\n\n"; + +my @args = ( + "-C", "Listen $listen", + "-c", "CustomLog |/usr/bin/tee common", + "-c", "ErrorLog |/usr/bin/tee", + # Run in single-process mode, do not detach from the controlling terminal. + "-X", + # Disable Keep-Alive support. Makes testing in multiple browsers easier (no need to wait + # for another browser's connection to expire). + "-c", "KeepAlive 0" +); + +my @defaultArgs = getDefaultConfigForTestDirectory($testDirectory); +@args = (@defaultArgs, @args); +openHTTPD(@args); diff --git a/Tools/Scripts/run-webkit-nightly.cmd b/Tools/Scripts/run-webkit-nightly.cmd new file mode 100755 index 0000000..924178a --- /dev/null +++ b/Tools/Scripts/run-webkit-nightly.cmd @@ -0,0 +1,10 @@ +@echo off +set script=%TMP%\run-webkit-nightly2.cmd +set vsvars=%VS80COMNTOOLS%\vsvars32.bat +if exist "%vsvars%" ( + copy "%vsvars%" "%script%" +) else ( + del "%script%" +) +FindSafari.exe %1 /printSafariLauncher >> "%script%" +call "%script%" diff --git a/Tools/Scripts/run-webkit-tests b/Tools/Scripts/run-webkit-tests new file mode 100755 index 0000000..6b530e1 --- /dev/null +++ b/Tools/Scripts/run-webkit-tests @@ -0,0 +1,84 @@ +#!/usr/bin/perl +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This file is a temporary hack. +# It will be removed as soon as all platforms are are ready to move to +# new-run-webkit-tests and we can then update the buildbots to explicitly +# call old-run-webkit-tests for any platforms which will never support +# a Python run-webkit-tests. + +# This is intentionally written in Perl to guarantee support on +# the same set of platforms as old-run-webkit-tests currently supports. +# The buildbot master.cfg also currently passes run-webkit-tests to +# perl directly instead of executing it in a shell. + +use strict; +use warnings; + +use File::Spec; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +sub runningOnBuildBot() +{ + # This is a hack to detect if we're running on the buildbot so we can + # pass --verbose to new-run-webkit-tests. This will be removed when we + # update the buildbot config to call new-run-webkit-tests explicitly. + my %isBuildBotUser = ("apple" => 1, "buildbot" => 1); + return $isBuildBotUser{$ENV{"USER"}}; +} + +sub useNewRunWebKitTests() +{ + # Change this check to control which platforms use + # new-run-webkit-tests by default. + # Example: return runningOnBuildBot() && isLeopard(); + # would enable new-run-webkit-tests on only the leopard buildbots. + return 0; +} + +my $harnessName = "old-run-webkit-tests"; + +if (useNewRunWebKitTests()) { + $harnessName = "new-run-webkit-tests"; + if (runningOnBuildBot()) { + push(@ARGV, "--verbose"); + # old-run-webkit-tests treats --results-directory as $CWD relative. + # new-run-webkit-tests treats --results-directory as build directory relative. + # Override the passed in --results-directory by appending a new one + # (later arguments override earlier ones in Python's optparse). + push(@ARGV, "--results-directory"); + # The buildbot always uses $SRCDIR/layout-test-results, hardcode it: + push(@ARGV, sourceDir() . "/layout-test-results"); + } +} + +my $harnessPath = File::Spec->catfile(relativeScriptsDir(), $harnessName); +exec $harnessPath ($harnessPath, @ARGV) or die "Failed to execute $harnessPath"; diff --git a/Tools/Scripts/run-webkit-websocketserver b/Tools/Scripts/run-webkit-websocketserver new file mode 100755 index 0000000..d030951 --- /dev/null +++ b/Tools/Scripts/run-webkit-websocketserver @@ -0,0 +1,88 @@ +#!/usr/bin/perl +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to run Web Socket server. + +use strict; +use warnings; + +use File::Spec; +use FindBin; +use IPC::Open2; + +use lib $FindBin::Bin; +use webkitdirs; + +sub closeWebSocketServer(); +sub openWebSocketServer(); + +my $webSocketPort = 8880; + +my $srcDir = sourceDir(); +my $layoutTestsName = "$srcDir/LayoutTests"; +my $testDirectory = File::Spec->rel2abs($layoutTestsName); +my $webSocketServerPidFile = "$testDirectory/websocket.pid"; + + +print "Starting Web Socket server...\n"; +openWebSocketServer(); +print "Started.\n"; +print "Hit [ENTER] to stop it."; +<STDIN>; +print "Stopping Web Socket server...\n"; +closeWebSocketServer(); +print "Stopped.\n"; +exit 0; + +sub openWebSocketServer() +{ + my $webSocketHandlerDir = "$testDirectory"; + + my @args = ( + "$srcDir/Tools/Scripts/new-run-webkit-websocketserver", + "--server", "start", + "--port", "$webSocketPort", + "--root", "$webSocketHandlerDir", + "--pidfile", "$webSocketServerPidFile" + ); + system "/usr/bin/python", @args; +} + +sub closeWebSocketServer() +{ + my @args = ( + "$srcDir/Tools/Scripts/new-run-webkit-websocketserver", + "--server", "stop", + "--pidfile", "$webSocketServerPidFile" + ); + system "/usr/bin/python", @args; + unlink "$webSocketServerPidFile"; +} + + diff --git a/Tools/Scripts/set-webkit-configuration b/Tools/Scripts/set-webkit-configuration new file mode 100755 index 0000000..4992256 --- /dev/null +++ b/Tools/Scripts/set-webkit-configuration @@ -0,0 +1,79 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] + --32-bit Set the default architecture to 32-bit + --64-bit Set the default architecture to 64-bit + --debug Set the default configuration to debug + --release Set the default configuration to release +EOF + +my $configuration = passedConfiguration(); +my $architecture = passedArchitecture(); + +if (!$architecture) { + # Handle --64-bit explicitly here, as we don't want our other scripts to accept it + for my $i (0 .. $#ARGV) { + my $opt = $ARGV[$i]; + if ($opt =~ /^--64-bit$/i) { + splice(@ARGV, $i, 1); + $architecture = 'x86_64'; + } + } +} + +if (!$configuration && !$architecture) { + print STDERR $usage; + exit 1; +} + +my $baseProductDir = baseProductDir(); +system "mkdir", "-p", "$baseProductDir"; + +if ($configuration) { + open CONFIGURATION, ">", "$baseProductDir/Configuration" or die; + print CONFIGURATION $configuration; + close CONFIGURATION; +} + +if ($architecture) { + if ($architecture ne "x86_64") { + open ARCHITECTURE, ">", "$baseProductDir/Architecture" or die; + print ARCHITECTURE $architecture; + close ARCHITECTURE; + } else { + unlink "$baseProductDir/Architecture"; + } +} diff --git a/Tools/Scripts/sort-Xcode-project-file b/Tools/Scripts/sort-Xcode-project-file new file mode 100755 index 0000000..705b41d --- /dev/null +++ b/Tools/Scripts/sort-Xcode-project-file @@ -0,0 +1,172 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2007, 2008, 2009, 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Script to sort "children" and "files" sections in Xcode project.pbxproj files + +use strict; + +use File::Basename; +use File::Spec; +use File::Temp qw(tempfile); +use Getopt::Long; + +sub sortChildrenByFileName($$); +sub sortFilesByFileName($$); + +# Files (or products) without extensions +my %isFile = map { $_ => 1 } qw( + create_hash_table + jsc + minidom + testapi + testjsglue +); + +my $printWarnings = 1; +my $showHelp; + +my $getOptionsResult = GetOptions( + 'h|help' => \$showHelp, + 'w|warnings!' => \$printWarnings, +); + +if (scalar(@ARGV) == 0 && !$showHelp) { + print STDERR "ERROR: No Xcode project files (project.pbxproj) listed on command-line.\n"; + undef $getOptionsResult; +} + +if (!$getOptionsResult || $showHelp) { + print STDERR <<__END__; +Usage: @{[ basename($0) ]} [options] path/to/project.pbxproj [path/to/project.pbxproj ...] + -h|--help show this help message + -w|--[no-]warnings show or suppress warnings (default: show warnings) +__END__ + exit 1; +} + +for my $projectFile (@ARGV) { + if (basename($projectFile) =~ /\.xcodeproj$/) { + $projectFile = File::Spec->catfile($projectFile, "project.pbxproj"); + } + + if (basename($projectFile) ne "project.pbxproj") { + print STDERR "WARNING: Not an Xcode project file: $projectFile\n" if $printWarnings; + next; + } + + # Grab the mainGroup for the project file + my $mainGroup = ""; + open(IN, "< $projectFile") || die "Could not open $projectFile: $!"; + while (my $line = <IN>) { + $mainGroup = $2 if $line =~ m#^(\s*)mainGroup = ([0-9A-F]{24} /\* .+ \*/);$#; + } + close(IN); + + my ($OUT, $tempFileName) = tempfile( + basename($projectFile) . "-XXXXXXXX", + DIR => dirname($projectFile), + UNLINK => 0, + ); + + # Clean up temp file in case of die() + $SIG{__DIE__} = sub { + close(IN); + close($OUT); + unlink($tempFileName); + }; + + my @lastTwo = (); + open(IN, "< $projectFile") || die "Could not open $projectFile: $!"; + while (my $line = <IN>) { + if ($line =~ /^(\s*)files = \(\s*$/) { + print $OUT $line; + my $endMarker = $1 . ");"; + my @files; + while (my $fileLine = <IN>) { + if ($fileLine =~ /^\Q$endMarker\E\s*$/) { + $endMarker = $fileLine; + last; + } + push @files, $fileLine; + } + print $OUT sort sortFilesByFileName @files; + print $OUT $endMarker; + } elsif ($line =~ /^(\s*)children = \(\s*$/) { + print $OUT $line; + my $endMarker = $1 . ");"; + my @children; + while (my $childLine = <IN>) { + if ($childLine =~ /^\Q$endMarker\E\s*$/) { + $endMarker = $childLine; + last; + } + push @children, $childLine; + } + if ($lastTwo[0] =~ m#^\s+\Q$mainGroup\E = \{$#) { + # Don't sort mainGroup + print $OUT @children; + } else { + print $OUT sort sortChildrenByFileName @children; + } + print $OUT $endMarker; + } else { + print $OUT $line; + } + + push @lastTwo, $line; + shift @lastTwo if scalar(@lastTwo) > 2; + } + close(IN); + close($OUT); + + unlink($projectFile) || die "Could not delete $projectFile: $!"; + rename($tempFileName, $projectFile) || die "Could not rename $tempFileName to $projectFile: $!"; +} + +exit 0; + +sub sortChildrenByFileName($$) +{ + my ($a, $b) = @_; + my $aFileName = $1 if $a =~ /^\s*[A-Z0-9]{24} \/\* (.+) \*\/,$/; + my $bFileName = $1 if $b =~ /^\s*[A-Z0-9]{24} \/\* (.+) \*\/,$/; + my $aSuffix = $1 if $aFileName =~ m/\.([^.]+)$/; + my $bSuffix = $1 if $bFileName =~ m/\.([^.]+)$/; + if ((!$aSuffix && !$isFile{$aFileName} && $bSuffix) || ($aSuffix && !$bSuffix && !$isFile{$bFileName})) { + return !$aSuffix ? -1 : 1; + } + return lc($aFileName) cmp lc($bFileName); +} + +sub sortFilesByFileName($$) +{ + my ($a, $b) = @_; + my $aFileName = $1 if $a =~ /^\s*[A-Z0-9]{24} \/\* (.+) in /; + my $bFileName = $1 if $b =~ /^\s*[A-Z0-9]{24} \/\* (.+) in /; + return lc($aFileName) cmp lc($bFileName); +} diff --git a/Tools/Scripts/split-file-by-class b/Tools/Scripts/split-file-by-class new file mode 100755 index 0000000..b6aeb68 --- /dev/null +++ b/Tools/Scripts/split-file-by-class @@ -0,0 +1,159 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2006 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Used for splitting a single file into multiple class files +# Usage: split-class <header file> + +use strict; +use File::Copy; +use FindBin; +use lib $FindBin::Bin; +use SpacingHeuristics; + + +for my $filename (@ARGV) { + + $filename =~ m/^(\w+)\.h$/ or die "Command line args must be .h files.\n"; + my $basename = $1; + + open(OLDFILE, "<", $filename) or die "File does not exist: $filename\n"; + print "Splitting class $filename.{h,cpp}:\n"; + + my $currentClassName = ""; + my $classIndent = ""; + my $fileContent = ""; + my %classDefs = (); + while (my $line = <OLDFILE>) { + if ($currentClassName) { + $classDefs{$currentClassName} .= $line; + if ($line =~ /^$classIndent};\s*$/) { + $currentClassName = ""; + } + } else { + if ($line =~ /^(\s*)class\s+(\w+)\s+[^;]*$/) { + $classIndent = $1; + $currentClassName = $2; + $classDefs{$currentClassName} .= $line; + $fileContent .= "###CLASS###$currentClassName\n"; + } else { + $fileContent .= $line; + } + } + } + close(OLDFILE); + + if (scalar(keys(%classDefs)) == 1) { # degenerate case + my ($classname) = keys(%classDefs); + if (!($classname eq $basename)) { + print "Skipping $filename, already correctly named.\n"; + } else { + print "$filename only includes one class, renaming to $classname.h\n"; + system("svn rm --force $classname.h") if (-r "$classname.h"); + system "svn mv $basename.h $classname.h"; + } + } else { + while (my ($classname, $classDef) = each(%classDefs)) { + if (($classname eq $basename)) { + print "Skipping $filename, already correctly named.\n"; + } else { + print "Using SVN to copy $basename.{h,cpp} to $classname.{h,cpp}\n"; + + system("svn rm --force $classname.h") if (-r "$classname.h"); + system "svn cp $basename.h $classname.h"; + + system("svn rm --force $classname.cpp") if (-r "$classname.cpp"); + system "svn cp $basename.cpp $classname.cpp"; + } + + print "Fixing $classname.h as much as possible.\n"; + open(NEWHEADER, ">", "$classname.h") or die "File does not exist: $filename\n"; + my @lines = split("\n", $fileContent); + foreach my $line (@lines) { + if ($line =~ /^###CLASS###(\w+)/) { + if ($1 eq $classname) { + print NEWHEADER $classDef . "\n"; + } + } else { + print NEWHEADER $line . "\n"; + } + } + close(NEWHEADER); + + print "Fixing $classname.cpp as much as possible.\n"; + copy("$classname.cpp", "$classname.cpp.original"); + open(OLDCPP, "<", "$classname.cpp.original") or die "Failed to copy file for reading: $filename\n"; + open(NEWCPP, ">", "$classname.cpp") or die "File does not exist: $filename\n"; + my $insideMemberFunction = 0; + my $shouldPrintMemberFunction = 0; + resetSpacingHeuristics(); + while (my $line = <OLDCPP>) { + if ($insideMemberFunction) { + if ($shouldPrintMemberFunction) { + print NEWCPP $line; + #setPreviousAllowedLine($line); + } else { + ignoringLine($line); + } + if ($line =~ /^}\s*$/) { + $insideMemberFunction = 0; + } + } elsif ($line =~ /$filename/) { + print NEWCPP "#include \"config.h\"\n"; + print NEWCPP "#include \"$classname.h\"\n"; + } elsif ($line =~ /#include/ || $line =~ /#import/) { + next; # skip includes, they're generally wrong or unecessary anyway. + } else { + $line =~ s/DOM:://; + $line =~ s/khtml:://; + $line =~ s/namespace DOM/namespace WebCore/; + $line =~ s/namespace khtml/namespace WebCore/; + + if ($line =~ /^(.*?\s+)?(\*|&)?(\w+)::(~)?\w+\s*\(/) { + $insideMemberFunction = 1; + $shouldPrintMemberFunction = ($classname eq $3); + if ($shouldPrintMemberFunction) { + printPendingEmptyLines(*NEWCPP, $line); + print NEWCPP $line; + } + } else { + next if isOnlyWhiteSpace($line); + next if ($line =~ m/------------/); + printPendingEmptyLines(*NEWCPP, $line); + applySpacingHeuristicsAndPrint(*NEWCPP, $line); + } + } + } + close(NEWCPP); + close(OLDCPP); + unlink("$classname.cpp.original"); + } + } + + print "Opening new files...\n"; + system("open " . join(".* ", keys(%classDefs)) . ".*"); +}
\ No newline at end of file diff --git a/Tools/Scripts/sunspider-compare-results b/Tools/Scripts/sunspider-compare-results new file mode 100755 index 0000000..acb2c04 --- /dev/null +++ b/Tools/Scripts/sunspider-compare-results @@ -0,0 +1,133 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2007 Apple Inc. All rights reserved. +# Copyright (C) 2007 Eric Seidel <eric@webkit.org> +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use File::Spec; +use FindBin; +use Getopt::Long qw(:config pass_through); +use lib $FindBin::Bin; +use webkitdirs; +use POSIX; + +# determine configuration, but default to "Release" instead of last-used configuration to match run-sunspider +setConfiguration("Release"); +setConfiguration(); +my $configuration = configuration(); + +my $root; +my $showHelp = 0; +my $suite = ""; +my $ubench = 0; +my $v8 = 0; +my $parseonly = 0; + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] FILE FILE + --help Show this help message + --root Path to root tools build + --suite Select a specific benchmark suite. The default is sunspider-0.9.1 + --ubench Use microbenchmark suite instead of regular tests. Same as --suite=ubench + --v8-suite Use the V8 benchmark suite. Same as --suite=v8-v4 + --parse-only Use the parse-only benchmark suite. Same as --suite=parse-only +EOF + +GetOptions('root=s' => sub { my ($argName, $value) = @_; setConfigurationProductDir(Cwd::abs_path($value)); $root = $value; }, + 'suite=s' => \$suite, + 'ubench' => \$ubench, + 'v8' => \$v8, + 'parse-only' => \$parseonly, + 'help' => \$showHelp); + +if ($showHelp) { + print STDERR $usage; + exit 1; +} + +@ARGV = map { File::Spec->rel2abs($_) } @ARGV; + +sub buildJSC +{ + if (!defined($root)){ + chdirWebKit(); + my $buildResult = system currentPerlPath(), "Tools/Scripts/build-jsc", "--" . $configuration; + if ($buildResult) { + print STDERR "Compiling jsc failed!\n"; + exit WEXITSTATUS($buildResult); + } + } +} + +sub setupEnvironmentForExecution($) +{ + my ($productDir) = @_; + print "Starting sunspider-compare-results with DYLD_FRAMEWORK_PATH set to point to built JavaScriptCore in $productDir.\n"; + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + # FIXME: Other platforms may wish to augment this method to use LD_LIBRARY_PATH, etc. +} + +sub pathToBuiltJSC($) +{ + my ($productDir) = @_; + my $jscName = "jsc"; + $jscName .= "_debug" if configurationForVisualStudio() eq "Debug_All"; + return "$productDir/$jscName"; +} + +sub pathToSystemJSC() +{ + my $path = "/System/Library/Frameworks/JavaScriptCore.framework/Resources/jsc"; + if (-f $path) { + return $path; + } + return undef; +} + +sub pathToJSC() +{ + my $path = pathToSystemJSC(); + return $path if defined $path; + + buildJSC(); + + my $productDir = jscProductDir(); + + setupEnvironmentForExecution($productDir); + return pathToBuiltJSC($productDir); +} + +my $jscPath = pathToJSC(); +chdirWebKit(); +chdir("SunSpider"); + +my @args = ("--shell", $jscPath); +# This code could be removed if we chose to pass extra args to sunspider instead of Xcode +push @args, "--suite=${suite}" if $suite; +push @args, "--ubench" if $ubench; +push @args, "--v8" if $v8; +push @args, "--parse-only" if $parseonly; + +exec currentPerlPath(), "./sunspider-compare-results", @args, @ARGV; diff --git a/Tools/Scripts/svn-apply b/Tools/Scripts/svn-apply new file mode 100755 index 0000000..cab7fb4 --- /dev/null +++ b/Tools/Scripts/svn-apply @@ -0,0 +1,454 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved. +# Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au> +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# "patch" script for WebKit Open Source Project, used to apply patches. + +# Differences from invoking "patch -p0": +# +# Handles added files (does a svn add with logic to handle local changes). +# Handles added directories (does a svn add). +# Handles removed files (does a svn rm with logic to handle local changes). +# Handles removed directories--those with no more files or directories left in them +# (does a svn rm). +# Has mode where it will roll back to svn version numbers in the patch file so svn +# can do a 3-way merge. +# Paths from Index: lines are used rather than the paths on the patch lines, which +# makes patches generated by "cvs diff" work (increasingly unimportant since we +# use Subversion now). +# ChangeLog patches use --fuzz=3 to prevent rejects. +# Handles binary files (requires patches made by svn-create-patch). +# Handles copied and moved files (requires patches made by svn-create-patch). +# Handles git-diff patches (without binary changes) created at the top-level directory +# +# Missing features: +# +# Handle property changes. +# Handle copied and moved directories (would require patches made by svn-create-patch). +# When doing a removal, check that old file matches what's being removed. +# Notice a patch that's being applied at the "wrong level" and make it work anyway. +# Do a dry run on the whole patch and don't do anything if part of the patch is +# going to fail (probably too strict unless we exclude ChangeLog). +# Handle git-diff patches with binary delta + +use strict; +use warnings; + +use Digest::MD5; +use File::Basename; +use File::Spec; +use Getopt::Long; +use MIME::Base64; +use POSIX qw(strftime); + +use FindBin; +use lib $FindBin::Bin; +use VCSUtils; + +sub addDirectoriesIfNeeded($); +sub applyPatch($$;$); +sub checksum($); +sub handleBinaryChange($$); +sub handleGitBinaryChange($$); +sub isDirectoryEmptyForRemoval($); +sub patch($); +sub removeDirectoriesIfNeeded(); + +# These should be replaced by an scm class/module: +sub scmKnowsOfFile($); +sub scmCopy($$); +sub scmAdd($); +sub scmRemove($); + +my $merge = 0; +my $showHelp = 0; +my $reviewer; +my $force = 0; + +my $optionParseSuccess = GetOptions( + "merge!" => \$merge, + "help!" => \$showHelp, + "reviewer=s" => \$reviewer, + "force!" => \$force +); + +if (!$optionParseSuccess || $showHelp) { + print STDERR basename($0) . " [-h|--help] [--force] [-m|--merge] [-r|--reviewer name] patch1 [patch2 ...]\n"; + exit 1; +} + +my %removeDirectoryIgnoreList = ( + '.' => 1, + '..' => 1, + '.git' => 1, + '.svn' => 1, + '_svn' => 1, +); + +my $epochTime = time(); # This is used to set the date in ChangeLog files. +my $globalExitStatus = 0; + +my $repositoryRootPath = determineVCSRoot(); + +my %checkedDirectories; + +# Need to use a typeglob to pass the file handle as a parameter, +# otherwise get a bareword error. +my @diffHashRefs = parsePatch(*ARGV); + +print "Parsed " . @diffHashRefs . " diffs from patch file(s).\n"; + +my $preparedPatchHash = prepareParsedPatch($force, @diffHashRefs); + +my @copyDiffHashRefs = @{$preparedPatchHash->{copyDiffHashRefs}}; +my @nonCopyDiffHashRefs = @{$preparedPatchHash->{nonCopyDiffHashRefs}}; +my %sourceRevisions = %{$preparedPatchHash->{sourceRevisionHash}}; + +if ($merge) { + die "--merge is currently only supported for SVN" unless isSVN(); + # How do we handle Git patches applied to an SVN checkout here? + for my $file (sort keys %sourceRevisions) { + my $version = $sourceRevisions{$file}; + print "Getting version $version of $file\n"; + system("svn", "update", "-r", $version, $file) == 0 or die "Failed to run svn update -r $version $file."; + } +} + +# Handle copied and moved files first since moved files may have their +# source deleted before the move. +for my $copyDiffHashRef (@copyDiffHashRefs) { + my $indexPath = $copyDiffHashRef->{indexPath}; + my $copiedFromPath = $copyDiffHashRef->{copiedFromPath}; + + addDirectoriesIfNeeded(dirname($indexPath)); + scmCopy($copiedFromPath, $indexPath); +} + +for my $diffHashRef (@nonCopyDiffHashRefs) { + patch($diffHashRef); +} + +removeDirectoriesIfNeeded(); + +exit $globalExitStatus; + +sub addDirectoriesIfNeeded($) +{ + my ($path) = @_; + my @dirs = File::Spec->splitdir($path); + my $dir = "."; + while (scalar @dirs) { + $dir = File::Spec->catdir($dir, shift @dirs); + next if exists $checkedDirectories{$dir}; + if (! -e $dir) { + mkdir $dir or die "Failed to create required directory '$dir' for path '$path'\n"; + scmAdd($dir); + $checkedDirectories{$dir} = 1; + } + elsif (-d $dir) { + # SVN prints "svn: warning: 'directory' is already under version control" + # if you try and add a directory which is already in the repository. + # Git will ignore the add, but re-adding large directories can be sloooow. + # So we check first to see if the directory is under version control first. + if (!scmKnowsOfFile($dir)) { + scmAdd($dir); + } + $checkedDirectories{$dir} = 1; + } + else { + die "'$dir' exists, but is not a directory"; + } + } +} + +# Args: +# $patch: a patch string. +# $pathRelativeToRoot: the path of the file to be patched, relative to the +# repository root. This should normally be the path +# found in the patch's "Index:" line. +# $options: a reference to an array of options to pass to the patch command. +sub applyPatch($$;$) +{ + my ($patch, $pathRelativeToRoot, $options) = @_; + + my $optionalArgs = {options => $options, ensureForce => $force}; + + my $exitStatus = runPatchCommand($patch, $repositoryRootPath, $pathRelativeToRoot, $optionalArgs); + + if ($exitStatus) { + $globalExitStatus = $exitStatus; + } +} + +sub checksum($) +{ + my $file = shift; + open(FILE, $file) or die "Can't open '$file': $!"; + binmode(FILE); + my $checksum = Digest::MD5->new->addfile(*FILE)->hexdigest(); + close(FILE); + return $checksum; +} + +sub handleBinaryChange($$) +{ + my ($fullPath, $contents) = @_; + # [A-Za-z0-9+/] is the class of allowed base64 characters. + # One or more lines, at most 76 characters in length. + # The last line is allowed to have up to two '=' characters at the end (to signify padding). + if ($contents =~ m#((\n[A-Za-z0-9+/]{76})*\n[A-Za-z0-9+/]{2,74}?[A-Za-z0-9+/=]{2}\n)#) { + # Addition or Modification + open FILE, ">", $fullPath or die "Failed to open $fullPath."; + print FILE decode_base64($1); + close FILE; + if (!scmKnowsOfFile($fullPath)) { + # Addition + scmAdd($fullPath); + } + } else { + # Deletion + scmRemove($fullPath); + } +} + +sub handleGitBinaryChange($$) +{ + my ($fullPath, $diffHashRef) = @_; + + my $contents = $diffHashRef->{svnConvertedText}; + + my ($binaryChunkType, $binaryChunk, $reverseBinaryChunkType, $reverseBinaryChunk) = decodeGitBinaryPatch($contents, $fullPath); + # FIXME: support "delta" type. + die "only literal type is supported now" if ($binaryChunkType ne "literal" || $reverseBinaryChunkType ne "literal"); + + my $isFileAddition = $diffHashRef->{isNew}; + my $isFileDeletion = $diffHashRef->{isDeletion}; + + my $originalContents = ""; + if (open FILE, $fullPath) { + die "$fullPath already exists" if $isFileAddition; + + $originalContents = join("", <FILE>); + close FILE; + } + die "Original content of $fullPath mismatches" if $originalContents ne $reverseBinaryChunk; + + if ($isFileDeletion) { + scmRemove($fullPath); + } else { + # Addition or Modification + open FILE, ">", $fullPath or die "Failed to open $fullPath."; + print FILE $binaryChunk; + close FILE; + if ($isFileAddition) { + scmAdd($fullPath); + } + } +} + +sub isDirectoryEmptyForRemoval($) +{ + my ($dir) = @_; + return 1 unless -d $dir; + my $directoryIsEmpty = 1; + opendir DIR, $dir or die "Could not open '$dir' to list files: $?"; + for (my $item = readdir DIR; $item && $directoryIsEmpty; $item = readdir DIR) { + next if exists $removeDirectoryIgnoreList{$item}; + if (-d File::Spec->catdir($dir, $item)) { + $directoryIsEmpty = 0; + } else { + next if (scmWillDeleteFile(File::Spec->catdir($dir, $item))); + $directoryIsEmpty = 0; + } + } + closedir DIR; + return $directoryIsEmpty; +} + +# Args: +# $diffHashRef: a diff hash reference of the type returned by parsePatch(). +sub patch($) +{ + my ($diffHashRef) = @_; + + # Make sure $patch is initialized to some value. A deletion can have no + # svnConvertedText property in the case of a deletion resulting from a + # Git rename. + my $patch = $diffHashRef->{svnConvertedText} || ""; + + my $fullPath = $diffHashRef->{indexPath}; + my $isBinary = $diffHashRef->{isBinary}; + my $isGit = $diffHashRef->{isGit}; + + my $deletion = 0; + my $addition = 0; + + $addition = 1 if ($diffHashRef->{isNew} || $patch =~ /\n@@ -0,0 .* @@/); + $deletion = 1 if ($diffHashRef->{isDeletion} || $patch =~ /\n@@ .* \+0,0 @@/); + + if (!$addition && !$deletion && !$isBinary) { + # Standard patch, patch tool can handle this. + if (basename($fullPath) eq "ChangeLog") { + my $changeLogDotOrigExisted = -f "${fullPath}.orig"; + my $changeLogHash = fixChangeLogPatch($patch); + my $newPatch = setChangeLogDateAndReviewer($changeLogHash->{patch}, $reviewer, $epochTime); + applyPatch($newPatch, $fullPath, ["--fuzz=3"]); + unlink("${fullPath}.orig") if (! $changeLogDotOrigExisted); + } else { + applyPatch($patch, $fullPath) if $patch; + } + } else { + # Either a deletion, an addition or a binary change. + + addDirectoriesIfNeeded(dirname($fullPath)); + + if ($isBinary) { + if ($isGit) { + handleGitBinaryChange($fullPath, $diffHashRef); + } else { + handleBinaryChange($fullPath, $patch) if $patch; + } + } elsif ($deletion) { + applyPatch($patch, $fullPath, ["--force"]) if $patch; + scmRemove($fullPath); + } else { + # Addition + rename($fullPath, "$fullPath.orig") if -e $fullPath; + applyPatch($patch, $fullPath) if $patch; + unlink("$fullPath.orig") if -e "$fullPath.orig" && checksum($fullPath) eq checksum("$fullPath.orig"); + scmAdd($fullPath); + # What is this for? + system("svn", "stat", "$fullPath.orig") if isSVN() && -e "$fullPath.orig"; + } + } + + scmToggleExecutableBit($fullPath, $diffHashRef->{executableBitDelta}) if defined($diffHashRef->{executableBitDelta}); +} + +sub removeDirectoriesIfNeeded() +{ + foreach my $dir (reverse sort keys %checkedDirectories) { + if (isDirectoryEmptyForRemoval($dir)) { + scmRemove($dir); + } + } +} + +# This could be made into a more general "status" call, except svn and git +# have different ideas about "moving" files which might get confusing. +sub scmWillDeleteFile($) +{ + my ($path) = @_; + if (isSVN()) { + my $svnOutput = svnStatus($path); + return 1 if $svnOutput && substr($svnOutput, 0, 1) eq "D"; + } elsif (isGit()) { + my $gitOutput = `git diff-index --name-status HEAD -- $path`; + return 1 if $gitOutput && substr($gitOutput, 0, 1) eq "D"; + } + return 0; +} + +# Return whether the file at the given path is known to Git. +# +# This method outputs a message like the following to STDERR when +# returning false: +# +# "error: pathspec 'test.png' did not match any file(s) known to git. +# Did you forget to 'git add'?" +sub gitKnowsOfFile($) +{ + my $path = shift; + + `git ls-files --error-unmatch -- $path`; + my $exitStatus = exitStatus($?); + return $exitStatus == 0; +} + +sub scmKnowsOfFile($) +{ + my ($path) = @_; + if (isSVN()) { + my $svnOutput = svnStatus($path); + # This will match more than intended. ? might not be the first field in the status + if ($svnOutput && $svnOutput =~ m#\?\s+$path\n#) { + return 0; + } + # This does not handle errors well. + return 1; + } elsif (isGit()) { + my @result = callSilently(\&gitKnowsOfFile, $path); + return $result[0]; + } +} + +sub scmCopy($$) +{ + my ($source, $destination) = @_; + if (isSVN()) { + system("svn", "copy", $source, $destination) == 0 or die "Failed to svn copy $source $destination."; + } elsif (isGit()) { + system("cp", $source, $destination) == 0 or die "Failed to copy $source $destination."; + system("git", "add", $destination) == 0 or die "Failed to git add $destination."; + } +} + +sub scmAdd($) +{ + my ($path) = @_; + if (isSVN()) { + system("svn", "add", $path) == 0 or die "Failed to svn add $path."; + } elsif (isGit()) { + system("git", "add", $path) == 0 or die "Failed to git add $path."; + } +} + +sub scmRemove($) +{ + my ($path) = @_; + if (isSVN()) { + # SVN is very verbose when removing directories. Squelch all output except the last line. + my $svnOutput; + open SVN, "svn rm --force '$path' |" or die "svn rm --force '$path' failed!"; + # Only print the last line. Subversion outputs all changed statuses below $dir + while (<SVN>) { + $svnOutput = $_; + } + close SVN; + print $svnOutput if $svnOutput; + } elsif (isGit()) { + # Git removes a directory if it becomes empty when the last file it contains is + # removed by `git rm`. In svn-apply this can happen when a directory is being + # removed in a patch, and all of the files inside of the directory are removed + # before attemping to remove the directory itself. In this case, Git will have + # already deleted the directory and `git rm` would exit with an error claiming + # there was no file. The --ignore-unmatch switch gracefully handles this case. + system("git", "rm", "--force", "--ignore-unmatch", $path) == 0 or die "Failed to git rm --force --ignore-unmatch $path."; + } +} diff --git a/Tools/Scripts/svn-create-patch b/Tools/Scripts/svn-create-patch new file mode 100755 index 0000000..863998d --- /dev/null +++ b/Tools/Scripts/svn-create-patch @@ -0,0 +1,431 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Extended "svn diff" script for WebKit Open Source Project, used to make patches. + +# Differences from standard "svn diff": +# +# Uses the real diff, not svn's built-in diff. +# Always passes "-p" to diff so it will try to include function names. +# Handles binary files (encoded as a base64 chunk of text). +# Sorts the diffs alphabetically by text files, then binary files. +# Handles copied and moved files. +# +# Missing features: +# +# Handle copied and moved directories. + +use strict; +use warnings; + +use Config; +use File::Basename; +use File::Spec; +use File::stat; +use FindBin; +use Getopt::Long; +use lib $FindBin::Bin; +use MIME::Base64; +use POSIX qw(:errno_h); +use Time::gmtime; +use VCSUtils; + +sub binarycmp($$); +sub diffOptionsForFile($); +sub findBaseUrl($); +sub findMimeType($;$); +sub findModificationType($); +sub findSourceFileAndRevision($); +sub generateDiff($$); +sub generateFileList($\%); +sub hunkHeaderLineRegExForFile($); +sub isBinaryMimeType($); +sub manufacturePatchForAdditionWithHistory($); +sub numericcmp($$); +sub outputBinaryContent($); +sub patchpathcmp($$); +sub pathcmp($$); +sub processPaths(\@); +sub splitpath($); +sub testfilecmp($$); + +$ENV{'LC_ALL'} = 'C'; + +my $showHelp; +my $ignoreChangelogs = 0; +my $devNull = File::Spec->devnull(); + +my $result = GetOptions( + "help" => \$showHelp, + "ignore-changelogs" => \$ignoreChangelogs +); +if (!$result || $showHelp) { + print STDERR basename($0) . " [-h|--help] [--ignore-changelogs] [svndir1 [svndir2 ...]]\n"; + exit 1; +} + +# Sort the diffs for easier reviewing. +my %paths = processPaths(@ARGV); + +# Generate a list of files requiring diffs. +my %diffFiles; +for my $path (keys %paths) { + generateFileList($path, %diffFiles); +} + +my $svnRoot = determineSVNRoot(); +my $prefix = chdirReturningRelativePath($svnRoot); + +my $patchSize = 0; + +# Generate the diffs, in a order chosen for easy reviewing. +for my $path (sort patchpathcmp values %diffFiles) { + $patchSize += generateDiff($path, $prefix); +} + +if ($patchSize > 20480) { + print STDERR "WARNING: Patch's size is " . int($patchSize/1024) . " kbytes.\n"; + print STDERR "Patches 20k or smaller are more likely to be reviewed. Larger patches may sit unreviewed for a long time.\n"; +} + +exit 0; + +# Overall sort, considering multiple criteria. +sub patchpathcmp($$) +{ + my ($a, $b) = @_; + + # All binary files come after all non-binary files. + my $result = binarycmp($a, $b); + return $result if $result; + + # All test files come after all non-test files. + $result = testfilecmp($a, $b); + return $result if $result; + + # Final sort is a "smart" sort by directory and file name. + return pathcmp($a, $b); +} + +# Sort so text files appear before binary files. +sub binarycmp($$) +{ + my ($fileDataA, $fileDataB) = @_; + return $fileDataA->{isBinary} <=> $fileDataB->{isBinary}; +} + +sub diffOptionsForFile($) +{ + my ($file) = @_; + + my $options = "uaNp"; + + if (my $hunkHeaderLineRegEx = hunkHeaderLineRegExForFile($file)) { + $options .= "F'$hunkHeaderLineRegEx'"; + } + + return $options; +} + +sub findBaseUrl($) +{ + my ($infoPath) = @_; + my $baseUrl; + open INFO, "svn info '$infoPath' |" or die; + while (<INFO>) { + if (/^URL: (.+?)[\r\n]*$/) { + $baseUrl = $1; + } + } + close INFO; + return $baseUrl; +} + +sub findMimeType($;$) +{ + my ($file, $revision) = @_; + my $args = $revision ? "--revision $revision" : ""; + open PROPGET, "svn propget svn:mime-type $args '$file' |" or die; + my $mimeType = <PROPGET>; + close PROPGET; + # svn may output a different EOL sequence than $/, so avoid chomp. + if ($mimeType) { + $mimeType =~ s/[\r\n]+$//g; + } + return $mimeType; +} + +sub findModificationType($) +{ + my ($stat) = @_; + my $fileStat = substr($stat, 0, 1); + my $propertyStat = substr($stat, 1, 1); + if ($fileStat eq "A" || $fileStat eq "R") { + my $additionWithHistory = substr($stat, 3, 1); + return $additionWithHistory eq "+" ? "additionWithHistory" : "addition"; + } + return "modification" if ($fileStat eq "M" || $propertyStat eq "M"); + return "deletion" if ($fileStat eq "D"); + return undef; +} + +sub findSourceFileAndRevision($) +{ + my ($file) = @_; + my $baseUrl = findBaseUrl("."); + my $sourceFile; + my $sourceRevision; + open INFO, "svn info '$file' |" or die; + while (<INFO>) { + if (/^Copied From URL: (.+?)[\r\n]*$/) { + $sourceFile = File::Spec->abs2rel($1, $baseUrl); + } elsif (/^Copied From Rev: ([0-9]+)/) { + $sourceRevision = $1; + } + } + close INFO; + return ($sourceFile, $sourceRevision); +} + +sub generateDiff($$) +{ + my ($fileData, $prefix) = @_; + my $file = File::Spec->catdir($prefix, $fileData->{path}); + + if ($ignoreChangelogs && basename($file) eq "ChangeLog") { + return 0; + } + + my $patch = ""; + if ($fileData->{modificationType} eq "additionWithHistory") { + manufacturePatchForAdditionWithHistory($fileData); + } + + my $diffOptions = diffOptionsForFile($file); + open DIFF, "svn diff --diff-cmd diff -x -$diffOptions '$file' |" or die; + while (<DIFF>) { + $patch .= $_; + } + close DIFF; + if (basename($file) eq "ChangeLog") { + my $changeLogHash = fixChangeLogPatch($patch); + $patch = $changeLogHash->{patch}; + } + print $patch; + if ($fileData->{isBinary}) { + print "\n" if ($patch && $patch =~ m/\n\S+$/m); + outputBinaryContent($file); + } + return length($patch); +} + +sub generateFileList($\%) +{ + my ($statPath, $diffFiles) = @_; + my %testDirectories = map { $_ => 1 } qw(LayoutTests); + open STAT, "svn stat '$statPath' |" or die; + while (my $line = <STAT>) { + # svn may output a different EOL sequence than $/, so avoid chomp. + $line =~ s/[\r\n]+$//g; + my $stat; + my $path; + if (isSVNVersion16OrNewer()) { + $stat = substr($line, 0, 8); + $path = substr($line, 8); + } else { + $stat = substr($line, 0, 7); + $path = substr($line, 7); + } + next if -d $path; + my $modificationType = findModificationType($stat); + if ($modificationType) { + $diffFiles->{$path}->{path} = $path; + $diffFiles->{$path}->{modificationType} = $modificationType; + $diffFiles->{$path}->{isBinary} = isBinaryMimeType($path); + $diffFiles->{$path}->{isTestFile} = exists $testDirectories{(File::Spec->splitdir($path))[0]} ? 1 : 0; + if ($modificationType eq "additionWithHistory") { + my ($sourceFile, $sourceRevision) = findSourceFileAndRevision($path); + $diffFiles->{$path}->{sourceFile} = $sourceFile; + $diffFiles->{$path}->{sourceRevision} = $sourceRevision; + } + } else { + print STDERR $line, "\n"; + } + } + close STAT; +} + +sub hunkHeaderLineRegExForFile($) +{ + my ($file) = @_; + + my $startOfObjCInterfaceRegEx = "@(implementation\\|interface\\|protocol)"; + return "^[-+]\\|$startOfObjCInterfaceRegEx" if $file =~ /\.mm?$/; + return "^$startOfObjCInterfaceRegEx" if $file =~ /^(.*\/)?(mac|objc)\// && $file =~ /\.h$/; +} + +sub isBinaryMimeType($) +{ + my ($file) = @_; + my $mimeType = findMimeType($file); + return 0 if (!$mimeType || substr($mimeType, 0, 5) eq "text/"); + return 1; +} + +sub manufacturePatchForAdditionWithHistory($) +{ + my ($fileData) = @_; + my $file = $fileData->{path}; + print "Index: ${file}\n"; + print "=" x 67, "\n"; + my $sourceFile = $fileData->{sourceFile}; + my $sourceRevision = $fileData->{sourceRevision}; + print "--- ${file}\t(revision ${sourceRevision})\t(from ${sourceFile}:${sourceRevision})\n"; + print "+++ ${file}\t(working copy)\n"; + if ($fileData->{isBinary}) { + print "\nCannot display: file marked as a binary type.\n"; + my $mimeType = findMimeType($file, $sourceRevision); + print "svn:mime-type = ${mimeType}\n\n"; + } else { + print `svn cat ${sourceFile} | diff -u $devNull - | tail -n +3`; + } +} + +# Sort numeric parts of strings as numbers, other parts as strings. +# Makes 1.33 come after 1.3, which is cool. +sub numericcmp($$) +{ + my ($aa, $bb) = @_; + + my @a = split /(\d+)/, $aa; + my @b = split /(\d+)/, $bb; + + # Compare one chunk at a time. + # Each chunk is either all numeric digits, or all not numeric digits. + while (@a && @b) { + my $a = shift @a; + my $b = shift @b; + + # Use numeric comparison if chunks are non-equal numbers. + return $a <=> $b if $a =~ /^\d/ && $b =~ /^\d/ && $a != $b; + + # Use string comparison if chunks are any other kind of non-equal string. + return $a cmp $b if $a ne $b; + } + + # One of the two is now empty; compare lengths for result in this case. + return @a <=> @b; +} + +sub outputBinaryContent($) +{ + my ($path) = @_; + # Deletion + return if (! -e $path); + # Addition or Modification + my $buffer; + open BINARY, $path or die; + while (read(BINARY, $buffer, 60*57)) { + print encode_base64($buffer); + } + close BINARY; + print "\n"; +} + +# Sort first by directory, then by file, so all paths in one directory are grouped +# rather than being interspersed with items from subdirectories. +# Use numericcmp to sort directory and filenames to make order logical. +# Also include a special case for ChangeLog, which comes first in any directory. +sub pathcmp($$) +{ + my ($fileDataA, $fileDataB) = @_; + + my ($dira, $namea) = splitpath($fileDataA->{path}); + my ($dirb, $nameb) = splitpath($fileDataB->{path}); + + return numericcmp($dira, $dirb) if $dira ne $dirb; + return -1 if $namea eq "ChangeLog" && $nameb ne "ChangeLog"; + return +1 if $namea ne "ChangeLog" && $nameb eq "ChangeLog"; + return numericcmp($namea, $nameb); +} + +sub processPaths(\@) +{ + my ($paths) = @_; + return ("." => 1) if (!@{$paths}); + + my %result = (); + + for my $file (@{$paths}) { + die "can't handle absolute paths like \"$file\"\n" if File::Spec->file_name_is_absolute($file); + die "can't handle empty string path\n" if $file eq ""; + die "can't handle path with single quote in the name like \"$file\"\n" if $file =~ /'/; # ' (keep Xcode syntax highlighting happy) + + my $untouchedFile = $file; + + $file = canonicalizePath($file); + + die "can't handle paths with .. like \"$untouchedFile\"\n" if $file =~ m|/\.\./|; + + $result{$file} = 1; + } + + return ("." => 1) if ($result{"."}); + + # Remove any paths that also have a parent listed. + for my $path (keys %result) { + for (my $parent = dirname($path); $parent ne '.'; $parent = dirname($parent)) { + if ($result{$parent}) { + delete $result{$path}; + last; + } + } + } + + return %result; +} + +# Break up a path into the directory (with slash) and base name. +sub splitpath($) +{ + my ($path) = @_; + + my $pathSeparator = "/"; + my $dirname = dirname($path) . $pathSeparator; + $dirname = "" if $dirname eq "." . $pathSeparator; + + return ($dirname, basename($path)); +} + +# Sort so source code files appear before test files. +sub testfilecmp($$) +{ + my ($fileDataA, $fileDataB) = @_; + return $fileDataA->{isTestFile} <=> $fileDataB->{isTestFile}; +} + diff --git a/Tools/Scripts/svn-unapply b/Tools/Scripts/svn-unapply new file mode 100755 index 0000000..1dca11c --- /dev/null +++ b/Tools/Scripts/svn-unapply @@ -0,0 +1,280 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006, 2007 Apple Inc. All rights reserved. +# Copyright (C) 2009 Cameron McCormack <cam@mcc.id.au> +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# "unpatch" script for WebKit Open Source Project, used to remove patches. + +# Differences from invoking "patch -p0 -R": +# +# Handles added files (does a svn revert with additional logic to handle local changes). +# Handles added directories (does a svn revert and a rmdir). +# Handles removed files (does a svn revert with additional logic to handle local changes). +# Handles removed directories (does a svn revert). +# Paths from Index: lines are used rather than the paths on the patch lines, which +# makes patches generated by "cvs diff" work (increasingly unimportant since we +# use Subversion now). +# ChangeLog patches use --fuzz=3 to prevent rejects, and the entry date is reset in +# the patch before it is applied (svn-apply sets it when applying a patch). +# Handles binary files (requires patches made by svn-create-patch). +# Handles copied and moved files (requires patches made by svn-create-patch). +# Handles git-diff patches (without binary changes) created at the top-level directory +# +# Missing features: +# +# Handle property changes. +# Handle copied and moved directories (would require patches made by svn-create-patch). +# Use version numbers in the patch file and do a 3-way merge. +# When reversing an addition, check that the file matches what's being removed. +# Notice a patch that's being unapplied at the "wrong level" and make it work anyway. +# Do a dry run on the whole patch and don't do anything if part of the patch is +# going to fail (probably too strict unless we exclude ChangeLog). +# Handle git-diff patches with binary changes + +use strict; +use warnings; + +use Cwd; +use Digest::MD5; +use Fcntl qw(:DEFAULT :seek); +use File::Basename; +use File::Spec; +use File::Temp qw(tempfile); +use Getopt::Long; + +use FindBin; +use lib $FindBin::Bin; +use VCSUtils; + +sub checksum($); +sub patch($); +sub revertDirectories(); +sub unapplyPatch($$;$); +sub unsetChangeLogDate($$); + +my $force = 0; +my $showHelp = 0; + +my $optionParseSuccess = GetOptions( + "force!" => \$force, + "help!" => \$showHelp +); + +if (!$optionParseSuccess || $showHelp) { + print STDERR basename($0) . " [-h|--help] [--force] patch1 [patch2 ...]\n"; + exit 1; +} + +my $globalExitStatus = 0; + +my $repositoryRootPath = determineVCSRoot(); + +my @copiedFiles; +my %directoriesToCheck; + +# Need to use a typeglob to pass the file handle as a parameter, +# otherwise get a bareword error. +my @diffHashRefs = parsePatch(*ARGV); + +print "Parsed " . @diffHashRefs . " diffs from patch file(s).\n"; + +my $preparedPatchHash = prepareParsedPatch($force, @diffHashRefs); + +my @copyDiffHashRefs = @{$preparedPatchHash->{copyDiffHashRefs}}; +my @nonCopyDiffHashRefs = @{$preparedPatchHash->{nonCopyDiffHashRefs}}; + +for my $diffHashRef (@nonCopyDiffHashRefs) { + patch($diffHashRef); +} + +# Handle copied and moved files last since they may have had post-copy changes that have now been unapplied +for my $diffHashRef (@copyDiffHashRefs) { + patch($diffHashRef); +} + +if (isSVN()) { + revertDirectories(); +} + +exit $globalExitStatus; + +sub checksum($) +{ + my $file = shift; + open(FILE, $file) or die "Can't open '$file': $!"; + binmode(FILE); + my $checksum = Digest::MD5->new->addfile(*FILE)->hexdigest(); + close(FILE); + return $checksum; +} + +# Args: +# $diffHashRef: a diff hash reference of the type returned by parsePatch(). +sub patch($) +{ + my ($diffHashRef) = @_; + + # Make sure $patch is initialized to some value. There is no + # svnConvertedText when reversing an svn copy/move. + my $patch = $diffHashRef->{svnConvertedText} || ""; + + my $fullPath = $diffHashRef->{indexPath}; + my $isSvnBinary = $diffHashRef->{isBinary} && $diffHashRef->{isSvn}; + + $directoriesToCheck{dirname($fullPath)} = 1; + + my $deletion = 0; + my $addition = 0; + + $addition = 1 if ($diffHashRef->{isNew} || $diffHashRef->{copiedFromPath} || $patch =~ /\n@@ -0,0 .* @@/); + $deletion = 1 if ($diffHashRef->{isDeletion} || $patch =~ /\n@@ .* \+0,0 @@/); + + if (!$addition && !$deletion && !$isSvnBinary) { + # Standard patch, patch tool can handle this. + if (basename($fullPath) eq "ChangeLog") { + my $changeLogDotOrigExisted = -f "${fullPath}.orig"; + my $changeLogHash = fixChangeLogPatch($patch); + unapplyPatch(unsetChangeLogDate($fullPath, $changeLogHash->{patch}), $fullPath, ["--fuzz=3"]); + unlink("${fullPath}.orig") if (! $changeLogDotOrigExisted); + } else { + unapplyPatch($patch, $fullPath); + } + } else { + # Either a deletion, an addition or a binary change. + + # FIXME: Add support for Git binary files. + if ($isSvnBinary) { + # Reverse binary change + unlink($fullPath) if (-e $fullPath); + system "svn", "revert", $fullPath; + } elsif ($deletion) { + # Reverse deletion + rename($fullPath, "$fullPath.orig") if -e $fullPath; + + unapplyPatch($patch, $fullPath); + + # If we don't ask for the filehandle here, we always get a warning. + my ($fh, $tempPath) = tempfile(basename($fullPath) . "-XXXXXXXX", + DIR => dirname($fullPath), UNLINK => 1); + close($fh); + + # Keep the version from the patch in case it's different from svn. + rename($fullPath, $tempPath); + system "svn", "revert", $fullPath; + rename($tempPath, $fullPath); + + # This works around a bug in the svn client. + # [Issue 1960] file modifications get lost due to FAT 2s time resolution + # http://subversion.tigris.org/issues/show_bug.cgi?id=1960 + system "touch", $fullPath; + + # Remove $fullPath.orig if it is the same as $fullPath + unlink("$fullPath.orig") if -e "$fullPath.orig" && checksum($fullPath) eq checksum("$fullPath.orig"); + + # Show status if the file is modifed + system "svn", "stat", $fullPath; + } else { + # Reverse addition + # + # FIXME: This should use the same logic as svn-apply's deletion + # code. In particular, svn-apply's scmRemove() subroutine + # should be used here. + unapplyPatch($patch, $fullPath, ["--force"]) if $patch; + unlink($fullPath) if -z $fullPath; + system "svn", "revert", $fullPath; + } + } + + scmToggleExecutableBit($fullPath, -1 * $diffHashRef->{executableBitDelta}) if defined($diffHashRef->{executableBitDelta}); +} + +sub revertDirectories() +{ + chdir $repositoryRootPath; + my %checkedDirectories; + foreach my $path (reverse sort keys %directoriesToCheck) { + my @dirs = File::Spec->splitdir($path); + while (scalar @dirs) { + my $dir = File::Spec->catdir(@dirs); + pop(@dirs); + next if (exists $checkedDirectories{$dir}); + if (-d $dir) { + my $svnOutput = svnStatus($dir); + if ($svnOutput && $svnOutput =~ m#A\s+$dir\n#) { + system "svn", "revert", $dir; + rmdir $dir; + } + elsif ($svnOutput && $svnOutput =~ m#D\s+$dir\n#) { + system "svn", "revert", $dir; + } + else { + # Modification + print $svnOutput if $svnOutput; + } + $checkedDirectories{$dir} = 1; + } + else { + die "'$dir' is not a directory"; + } + } + } +} + +# Args: +# $patch: a patch string. +# $pathRelativeToRoot: the path of the file to be patched, relative to the +# repository root. This should normally be the path +# found in the patch's "Index:" line. +# $options: a reference to an array of options to pass to the patch command. +# Do not include --reverse in this array. +sub unapplyPatch($$;$) +{ + my ($patch, $pathRelativeToRoot, $options) = @_; + + my $optionalArgs = {options => $options, ensureForce => $force, shouldReverse => 1}; + + my $exitStatus = runPatchCommand($patch, $repositoryRootPath, $pathRelativeToRoot, $optionalArgs); + + if ($exitStatus) { + $globalExitStatus = $exitStatus; + } +} + +sub unsetChangeLogDate($$) +{ + my $fullPath = shift; + my $patch = shift; + my $newDate; + sysopen(CHANGELOG, $fullPath, O_RDONLY) or die "Failed to open $fullPath: $!"; + sysseek(CHANGELOG, 0, SEEK_SET); + my $byteCount = sysread(CHANGELOG, $newDate, 10); + die "Failed reading $fullPath: $!" if !$byteCount || $byteCount != 10; + close(CHANGELOG); + $patch =~ s/(\n\+)\d{4}-[^-]{2}-[^-]{2}( )/$1$newDate$2/; + return $patch; +} diff --git a/Tools/Scripts/test-webkit-scripts b/Tools/Scripts/test-webkit-scripts new file mode 100755 index 0000000..781e8ce --- /dev/null +++ b/Tools/Scripts/test-webkit-scripts @@ -0,0 +1,85 @@ +#!/usr/bin/python +# +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Run unit tests of WebKit's Perl and Python scripts.""" + +# The docstring above is passed as the "description" to the OptionParser +# used in this script's __main__ block. +# +# For the command options supported by this script, see the code below +# that instantiates the OptionParser class, or else pass --help +# while running this script (since argument help is auto-generated). + +import os +import subprocess +import sys +from optparse import OptionParser + +class ScriptsTester(object): + + """Supports running unit tests of WebKit scripts.""" + + def __init__(self, scripts_directory): + self.scripts_directory = scripts_directory + + def script_path(self, script_file_name): + """Return an absolute path to the given script.""" + return os.path.join(self.scripts_directory, script_file_name) + + def run_test_script(self, script_title, script_path, args=None): + """Run the given test script.""" + print('Testing %s:' % script_title) + call_args = [script_path] + if args: + call_args.extend(args) + subprocess.call(call_args) + print(70 * "*") # dividing line + + def main(self): + parser = OptionParser(description=__doc__) + parser.add_option('-a', '--all', dest='all', action='store_true', + default=False, help='run all available tests, ' + 'including those suppressed by default') + (options, args) = parser.parse_args() + + self.run_test_script('Perl scripts', self.script_path('test-webkitperl')) + self.run_test_script('Python scripts', self.script_path('test-webkitpy'), + ['--all'] if options.all else None) + + # FIXME: Display a cumulative indication of success or failure. + # In addition, call sys.exit() with 0 or 1 depending on that + # cumulative success or failure. + print('Note: Perl and Python results appear separately above.') + + +if __name__ == '__main__': + # The scripts directory is the directory containing this file. + tester = ScriptsTester(os.path.dirname(__file__)) + tester.main() diff --git a/Tools/Scripts/test-webkitperl b/Tools/Scripts/test-webkitperl new file mode 100755 index 0000000..6faa47c --- /dev/null +++ b/Tools/Scripts/test-webkitperl @@ -0,0 +1,59 @@ +#!/usr/bin/perl +# +# Copyright (C) 2009 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Runs unit tests of WebKit Perl code. + +use strict; +use warnings; + +use File::Spec; +use FindBin; +use Test::Harness; +use lib $FindBin::Bin; # so this script can be run from any directory. +use VCSUtils; + +# Change the working directory so that we can pass shorter, relative +# paths to runtests(), rather than longer, absolute paths. +# +# We change to the source root so the paths can be relative to the +# source root. These paths display on the screen, and their meaning +# will be clearer to the user if relative to the root, rather than to +# the Scripts directory, say. +# +# Source root is two levels up from the Scripts directory. +my $sourceRootDir = File::Spec->catfile($FindBin::Bin, "../.."); +chdir($sourceRootDir); + +# Relative to root +my $pattern = "Tools/Scripts/webkitperl/*_unittest/*.pl"; + +my @files = <${pattern}>; # lists files alphabetically + +runtests(@files); diff --git a/Tools/Scripts/test-webkitpy b/Tools/Scripts/test-webkitpy new file mode 100755 index 0000000..7efacb0 --- /dev/null +++ b/Tools/Scripts/test-webkitpy @@ -0,0 +1,266 @@ +#!/usr/bin/env python +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import os +import sys + +# Do not import anything from webkitpy prior to cleaning webkitpy of +# orphaned *.pyc files. This ensures that no orphaned *.pyc files are +# accidentally imported during the course of this script. +# +# Also, do not import or execute any Python code incompatible with +# Python 2.4 until after execution of the init() method below. + + +_log = logging.getLogger("test-webkitpy") + + +# Verbose logging is useful for debugging test-webkitpy code that runs +# before the actual unit tests -- things like autoinstall downloading and +# unit-test auto-detection logic. This is different from verbose logging +# of the unit tests themselves (i.e. the unittest module's --verbose flag). +def configure_logging(is_verbose_logging): + """Configure the root logger. + + Configure the root logger not to log any messages from webkitpy -- + except for messages from the autoinstall module. Also set the + logging level as described below. + + Args: + is_verbose_logging: A boolean value of whether logging should be + verbose. If this parameter is true, the logging + level for the handler on the root logger is set to + logging.DEBUG. Otherwise, it is set to logging.INFO. + + """ + # Don't use the Python ternary operator here so that this method will + # work with Python 2.4. + if is_verbose_logging: + logging_level = logging.DEBUG + else: + logging_level = logging.INFO + + handler = logging.StreamHandler(sys.stderr) + # We constrain the level on the handler rather than on the root + # logger itself. This is probably better because the handler is + # configured and known only to this module, whereas the root logger + # is an object shared (and potentially modified) by many modules. + # Modifying the handler, then, is less intrusive and less likely to + # interfere with modifications made by other modules (e.g. in unit + # tests). + handler.setLevel(logging_level) + formatter = logging.Formatter("%(name)s: %(levelname)-8s %(message)s") + handler.setFormatter(formatter) + + logger = logging.getLogger() + logger.addHandler(handler) + logger.setLevel(logging.NOTSET) + + # Filter out most webkitpy messages. + # + # Messages can be selectively re-enabled for this script by updating + # this method accordingly. + def filter(record): + """Filter out autoinstall and non-third-party webkitpy messages.""" + # FIXME: Figure out a way not to use strings here, for example by + # using syntax like webkitpy.test.__name__. We want to be + # sure not to import any non-Python 2.4 code, though, until + # after the version-checking code has executed. + if (record.name.startswith("webkitpy.common.system.autoinstall") or + record.name.startswith("webkitpy.test")): + return True + if record.name.startswith("webkitpy"): + return False + return True + + testing_filter = logging.Filter() + testing_filter.filter = filter + + # Display a message so developers are not mystified as to why + # logging does not work in the unit tests. + _log.info("Suppressing most webkitpy logging while running unit tests.") + handler.addFilter(testing_filter) + + +def _clean_pyc_files(dir_to_clean, paths_not_to_log): + """Delete from a directory all .pyc files that have no .py file. + + Args: + dir_to_clean: The path to the directory to clean. + paths_not_to_log: A list of paths to .pyc files whose deletions should + not be logged. This list should normally include + only test .pyc files. + + """ + _log.debug("Cleaning orphaned *.pyc files from: %s" % dir_to_clean) + + # Normalize paths not to log. + paths_not_to_log = [os.path.abspath(path) for path in paths_not_to_log] + + for dir_path, dir_names, file_names in os.walk(dir_to_clean): + for file_name in file_names: + if file_name.endswith(".pyc") and file_name[:-1] not in file_names: + file_path = os.path.join(dir_path, file_name) + if os.path.abspath(file_path) not in paths_not_to_log: + _log.info("Deleting orphan *.pyc file: %s" % file_path) + os.remove(file_path) + + +# As a substitute for a unit test, this method tests _clean_pyc_files() +# in addition to calling it. We chose not to use the unittest module +# because _clean_pyc_files() is called only once and is not used elsewhere. +def _clean_packages_with_test(external_package_paths): + webkitpy_dir = os.path.join(os.path.dirname(__file__), "webkitpy") + package_paths = [webkitpy_dir] + external_package_paths + + # The test .pyc file is-- + # webkitpy/python24/TEMP_test-webkitpy_test_pyc_file.pyc. + test_path = os.path.join(webkitpy_dir, "python24", + "TEMP_test-webkitpy_test_pyc_file.pyc") + + test_file = open(test_path, "w") + try: + test_file.write("Test .pyc file generated by test-webkitpy.") + finally: + test_file.close() + + # Confirm that the test file exists so that when we check that it does + # not exist, the result is meaningful. + if not os.path.exists(test_path): + raise Exception("Test .pyc file not created: %s" % test_path) + + for path in package_paths: + _clean_pyc_files(path, [test_path]) + + if os.path.exists(test_path): + raise Exception("Test .pyc file not deleted: %s" % test_path) + + +def init(command_args, external_package_paths): + """Execute code prior to importing from webkitpy.unittests. + + Args: + command_args: The list of command-line arguments -- usually + sys.argv[1:]. + + """ + verbose_logging_flag = "--verbose-logging" + is_verbose_logging = verbose_logging_flag in command_args + if is_verbose_logging: + # Remove the flag so it doesn't cause unittest.main() to error out. + # + # FIXME: Get documentation for the --verbose-logging flag to show + # up in the usage instructions, which are currently generated + # by unittest.main(). It's possible that this will require + # re-implementing the option parser for unittest.main() + # since there may not be an easy way to modify its existing + # option parser. + sys.argv.remove(verbose_logging_flag) + + configure_logging(is_verbose_logging) + _log.debug("Verbose WebKit logging enabled.") + + # We clean orphaned *.pyc files from the packages prior to importing from + # them to make sure that no import statements falsely succeed. + # This helps to check that import statements have been updated correctly + # after any file moves. Otherwise, incorrect import statements can + # be masked. + # + # For example, if webkitpy/python24/versioning.py were moved to a + # different location without changing any import statements, and if + # the corresponding .pyc file were left behind without deleting it, + # then "import webkitpy.python24.versioning" would continue to succeed + # even though it would fail for someone checking out a fresh copy + # of the source tree. This is because of a Python feature: + # + # "It is possible to have a file called spam.pyc (or spam.pyo when -O + # is used) without a file spam.py for the same module. This can be used + # to distribute a library of Python code in a form that is moderately + # hard to reverse engineer." + # + # ( http://docs.python.org/tutorial/modules.html#compiled-python-files ) + # + # Deleting the orphaned .pyc file prior to importing, however, would + # cause an ImportError to occur on import as desired. + _clean_packages_with_test(external_package_paths) + + import webkitpy.python24.versioning as versioning + + versioning.check_version(log=_log) + + (comparison, current_version, minimum_version) = \ + versioning.compare_version() + + if comparison > 0: + # Then the current version is later than the minimum version. + message = ("You are testing webkitpy with a Python version (%s) " + "higher than the minimum version (%s) it was meant " + "to support." % (current_version, minimum_version)) + _log.warn(message) + + +def _path_from_webkit_root(*components): + webkit_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + return os.path.join(webkit_root, *components) + + +def _test_import(module_path): + try: + sys.path.append(os.path.dirname(module_path)) + module_name = os.path.basename(module_path) + __import__(module_name) + return True + except Exception, e: + message = "Skipping tests in %s due to failure (%s)." % (module_path, e) + if module_name.endswith("QueueStatusServer"): + message += " This module is optional. The failure is likely due to a missing Google AppEngine install. (http://code.google.com/appengine/downloads.html)" + _log.warn(message) + return False + +if __name__ == "__main__": + # FIXME: We should probably test each package separately to avoid naming conflicts. + external_package_paths = [ + _path_from_webkit_root('WebKit2', 'Scripts', 'webkit2'), + _path_from_webkit_root('Tools', 'QueueStatusServer'), + ] + init(sys.argv[1:], external_package_paths) + + # We import the unit test code after init() to ensure that any + # Python version warnings are displayed in case an error occurs + # while interpreting webkitpy.unittests. This also allows + # logging to be configured prior to importing -- for example to + # enable the display of autoinstall logging.log messages while + # running the unit tests. + from webkitpy.test.main import Tester + + external_package_paths = filter(_test_import, external_package_paths) + + Tester().run_tests(sys.argv, external_package_paths) diff --git a/Tools/Scripts/update-iexploder-cssproperties b/Tools/Scripts/update-iexploder-cssproperties new file mode 100755 index 0000000..1a9c0d6 --- /dev/null +++ b/Tools/Scripts/update-iexploder-cssproperties @@ -0,0 +1,129 @@ +#!/usr/bin/perl + +# Copyright (C) 2007 Apple Inc. All rights reserved. +# Copyright (C) 2010 Holger Hans Peter Freyther +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This script updates Tools/iExploder/htdocs/*.in based on +# WebCore/css/CSSPropertyNames.in, WebCore/html/HTMLTagNames.in +# and WebCore/html/HTMLAttributeNames.in + +use warnings; +use strict; + +use FindBin; +use lib $FindBin::Bin; +use VCSUtils; +use webkitdirs; + +use File::Spec; + +sub generateEntityListFromFile($); +sub readiExploderFile($); +sub update($$); +sub writeiExploderFile($@); + +update("cssproperties.in", "css/CSSPropertyNames.in"); +update("htmlattrs.in", "html/HTMLAttributeNames.in"); +update("htmltags.in", "html/HTMLTagNames.in"); +print "Successfully updated!\n"; + +exit 0; + +sub generateEntityListFromFile($) +{ + my ($filename) = @_; + + my $revision = svnRevisionForDirectory(dirname($filename)); + my $path = File::Spec->abs2rel($filename, sourceDir()); + my $result = "# From WebKit svn r" . $revision . " (" . $path . ")\n"; + + my @entities = (); + my $in_namespace = 0; + + open(IN, $filename) || die "$!"; + while (my $l = <IN>) { + chomp $l; + if ($l =~ m/^namespace=\"/) { + $in_namespace = 1; + } elsif ($in_namespace && $l =~ m/^$/) { + $in_namespace = 0; + } + + next if $in_namespace; + next if $l =~ m/^\s*#/ || $l =~ m/^\s*$/; + + # For HTML Tags that can have additional information + if ($l =~ m/ /) { + my @split = split / /, $l; + $l = $split[0] + } + + push(@entities, $l); + } + close(IN); + + $result .= join("\n", sort { $a cmp $b } @entities) . "\n\n"; + + return $result; +} + +sub readiExploderFile($) +{ + my ($filename) = @_; + + my @sections = (); + local $/ = "\n\n"; + + open(IN, $filename) || die "$!"; + @sections = <IN>; + close(IN); + + return @sections; +} + +sub update($$) +{ + my ($iexploderPath, $webcorePath) = @_; + + $iexploderPath = File::Spec->catfile(sourceDir(), "Tools", "iExploder", "htdocs", split("/", $iexploderPath)); + $webcorePath = File::Spec->catfile(sourceDir(), "WebCore", split("/", $webcorePath)); + + my @sections = readiExploderFile($iexploderPath); + $sections[0] = generateEntityListFromFile($webcorePath); + writeiExploderFile($iexploderPath, @sections); +} + + +sub writeiExploderFile($@) +{ + my ($filename, @sections) = @_; + + open(OUT, "> $filename") || die "$!"; + print OUT join("", @sections); + close(OUT); +} + diff --git a/Tools/Scripts/update-javascriptcore-test-results b/Tools/Scripts/update-javascriptcore-test-results new file mode 100755 index 0000000..dd8b9b6 --- /dev/null +++ b/Tools/Scripts/update-javascriptcore-test-results @@ -0,0 +1,73 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2007 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use FindBin; +use Getopt::Long; +use lib $FindBin::Bin; +use webkitdirs; + +chdirWebKit(); +chdir "JavaScriptCore/tests/mozilla" or die; + +my $force = 0; +GetOptions('force' => \$force); + +open EXPECTED, "expected.html" or die; +while (<EXPECTED>) { + last if /failures reported\.$/; +} +my %expected; +while (<EXPECTED>) { + chomp; + $expected{$_} = 1; +} +close EXPECTED; + +open ACTUAL, "actual.html" or die; +my $actual; +while (<ACTUAL>) { + $actual .= $_; + last if /failures reported\.$/; +} +my $failed = 0; +while (<ACTUAL>) { + $actual .= $_; + chomp; + if (!$expected{$_}) { + $failed = 1; + print "failure not expected: $_\n"; + } +} +close ACTUAL; + +die "won't update, failures introduced\n" if $failed && !$force; + +open EXPECTED, ">expected.html"; +print EXPECTED $actual; +close EXPECTED; diff --git a/Tools/Scripts/update-sources-list.py b/Tools/Scripts/update-sources-list.py new file mode 100755 index 0000000..433d04a --- /dev/null +++ b/Tools/Scripts/update-sources-list.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python + +# Copyright (C) 2007 Kevin Ollivier All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Make sure any port-independent files added to the Bakefile are +# added to GTK, QT, etc. so that file updates can happen in one place. + +import os, sys +from xml.dom import minidom + +scriptDir = os.path.abspath(sys.path[0]) +wkroot = os.path.abspath(os.path.join(scriptDir, "../..")) + +def getWebCoreFilesDict(): + """ + This method parses the WebCoreSources.bkl file, which has a list of all sources not specific + to any port, and returns the result as a dictionary with items of the form + (groupName, groupFiles). + """ + sources = {} + sources_prefix = "WEBCORE_" + filepath = os.path.join(wkroot, "WebCore/WebCoreSources.bkl") + assert(os.path.exists(filepath)) + + doc = minidom.parse(filepath) + for sourceGroup in doc.getElementsByTagName("set"): + groupName = "" + if sourceGroup.attributes.has_key("var"): + groupName = sourceGroup.attributes["var"].value + groupName = groupName.replace(sources_prefix, "") + + sourcesList = [] + for node in sourceGroup.childNodes: + if node.nodeType == node.TEXT_NODE: + sourcesText = node.nodeValue.strip() + sourcesList = sourcesText.split("\n") + + assert(groupName != "") + assert(sourcesList != []) + + sources[groupName] = sourcesList + + return sources + +def generateWebCoreSourcesGTKAndQT(sources): + """ + Convert the dictionary obtained from getWebCoreFilesDict() into a Unix makefile syntax, + which IIUC is suitable for both GTK and QT build systems. To take advantage of this, + QT and GTK would have to include the file "WebCore/sources.inc" into their makefiles. + """ + makefileString = "" + + for key in sources.keys(): + makefileString += key + "+=" + for source in sources[key]: + makefileString += " \\\n\t\t" + source.strip() + + makefileString += "\n\n" + + makefileString += "BASE_SOURCES +=" + for key in sources.keys(): + makefileString += " \\\n\t\t" + key + + outfile = os.path.join(wkroot, "WebCore/sources.inc") + sourcefile = open(outfile, "w") + sourcefile.write(makefileString) + sourcefile.close() + +sources = getWebCoreFilesDict() +generateWebCoreSourcesGTKAndQT(sources) + +# Coming soon - MSVC and hopefully XCode support! diff --git a/Tools/Scripts/update-webgl-conformance-tests b/Tools/Scripts/update-webgl-conformance-tests new file mode 100755 index 0000000..dfe20a1 --- /dev/null +++ b/Tools/Scripts/update-webgl-conformance-tests @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Wrapper around webkitpy/layout_tests/update-webgl-conformance-tests.py""" + +import webkitpy.layout_tests.update_webgl_conformance_tests +import sys + +if __name__ == '__main__': + sys.exit(webkitpy.layout_tests.update_webgl_conformance_tests.main()) diff --git a/Tools/Scripts/update-webkit b/Tools/Scripts/update-webkit new file mode 100755 index 0000000..6d3e0ee --- /dev/null +++ b/Tools/Scripts/update-webkit @@ -0,0 +1,133 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved. +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Update script for WebKit Open Source Project. + +use strict; +use FindBin; +use lib $FindBin::Bin; +use File::Basename; +use File::Path; +use File::Spec; +use Getopt::Long; +use VCSUtils; +use webkitdirs; + +sub runSvnUpdate(); +sub runGitUpdate(); + +# Handle options +my $quiet = ''; +my $showHelp; + +determineIsChromium(); + +chdirWebKit(); + +my $isGit = isGit(); +my $isSVN = isSVN(); + +my $getOptionsResult = GetOptions( + 'h|help' => \$showHelp, + 'q|quiet' => \$quiet, +); + +if (!$getOptionsResult || $showHelp) { + print STDERR <<__END__; +Usage: @{[ basename($0) ]} [options] + --chromium also update dependencies of the chromium port + -h|--help show the help message + -q|--quiet pass -q to svn update for quiet updates +__END__ + exit 1; +} + +my $startTime = time(); + +my @svnOptions = (); +push @svnOptions, '-q' if $quiet; + +# Don't prompt when using svn-1.6 or newer. +push @svnOptions, qw(--accept postpone) if isSVNVersion16OrNewer(); + +print "Updating OpenSource\n" unless $quiet; +runSvnUpdate() if $isSVN; +runGitUpdate() if $isGit; + +if (-d "../Internal") { + chdir("../Internal"); + print "Updating Internal\n" unless $quiet; + runSvnUpdate() if $isSVN; + runGitUpdate() if $isGit; +} elsif (isChromium()) { + # Workaround for https://bugs.webkit.org/show_bug.cgi?id=38926 + # We should remove the following "if" block when we find a right fix. + if ((isCygwin() || isWindows()) && (stat("WebKit/chromium/features.gypi"))[9] >= $startTime) { + print "features.gypi has been updated. Cleaning the build directories.\n"; + rmtree(["WebKit/chromium/Debug", "WebKit/chromium/Release"]); + } + + system("perl", "Tools/Scripts/update-webkit-chromium") == 0 or die $!; +} elsif (isAppleWinWebKit()) { + system("perl", "Tools/Scripts/update-webkit-auxiliary-libs") == 0 or die; +} + +setupAppleWinEnv() if isAppleWinWebKit(); + +exit 0; + +sub runSvnUpdate() +{ + open UPDATE, "-|", "svn", "update", @svnOptions or die; + my @conflictedChangeLogs; + while (my $line = <UPDATE>) { + print $line; + $line =~ m/^C\s+(.+?)[\r\n]*$/; + if ($1) { + my $filename = normalizePath($1); + push @conflictedChangeLogs, $filename if basename($filename) eq "ChangeLog"; + } + } + close UPDATE or die; + + if (@conflictedChangeLogs) { + print "Attempting to merge conflicted ChangeLogs.\n"; + my $resolveChangeLogsPath = File::Spec->catfile(dirname($0), "resolve-ChangeLogs"); + (system($resolveChangeLogsPath, "--no-warnings", @conflictedChangeLogs) == 0) + or die "Could not open resolve-ChangeLogs script: $!.\n"; + } +} + +sub runGitUpdate() +{ + # Doing a git fetch first allows setups with svn-remote.svn.fetch = trunk:refs/remotes/origin/master + # to perform the rebase much much faster. + system("git", "fetch") == 0 or die; + system("git", "svn", "rebase") == 0 or die; +} diff --git a/Tools/Scripts/update-webkit-auxiliary-libs b/Tools/Scripts/update-webkit-auxiliary-libs new file mode 100755 index 0000000..19e4ad3 --- /dev/null +++ b/Tools/Scripts/update-webkit-auxiliary-libs @@ -0,0 +1,134 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006, 2007 Apple Computer, Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Updates a development environment to the new WebKitAuxiliaryLibrary + +use strict; +use warnings; + +use File::Find; +use File::Spec; +use File::Temp (); +use FindBin; +use HTTP::Date qw(str2time); +use POSIX; +use lib $FindBin::Bin; +use webkitdirs; + +sub lastModifiedToUnixTime($); + +# Time in seconds that the new zip file must be newer than the old for us to +# consider them to be different. If the difference in modification time is less +# than this threshold, we assume that the files are the same. We need this +# because the zip file is served from a set of mirrors with slightly different +# Last-Modified times. +my $newnessThreshold = 30; + +my $sourceDir = sourceDir(); +my $file = "WebKitAuxiliaryLibrary"; +my $zipFile = "$file.zip"; +my $auxiliaryLibsURL = "http://developer.apple.com/opensource/internet/$zipFile"; +my $webkitLibrariesDir = toUnixPath($ENV{'WEBKITLIBRARIESDIR'}) || "$sourceDir/WebKitLibraries/win"; +my $tmpDir = File::Spec->rel2abs(File::Temp::tempdir("webkitlibsXXXXXXX", TMPDIR => 1, CLEANUP => 1)); + +print "Checking Last-Modified date of $zipFile...\n"; + +my $result = system "curl -s -I $auxiliaryLibsURL | grep Last-Modified > \"$tmpDir/$file.headers\""; + +if (WEXITSTATUS($result)) { + print STDERR "Couldn't check Last-Modified date of new $zipFile.\n"; + print STDERR "Please ensure that $auxiliaryLibsURL is reachable.\n"; + + if (! -f "$webkitLibrariesDir/$file.headers") { + print STDERR "Unable to check Last-Modified date and no version of $file to fall back to.\n"; + exit 1; + } + + print STDERR "Falling back to existing version of $file.\n"; + exit 0; +} + +if (open NEW, "$tmpDir/$file.headers") { + my $new = lastModifiedToUnixTime(<NEW>); + close NEW; + + if (defined $new && open OLD, "$webkitLibrariesDir/$file.headers") { + my $old = lastModifiedToUnixTime(<OLD>); + close OLD; + if (defined $old && abs($new - $old) < $newnessThreshold) { + print "Current $file is up to date\n"; + exit 0; + } + } +} + +print "Downloading $zipFile...\n\n"; +$result = system "curl -o \"$tmpDir/$zipFile\" $auxiliaryLibsURL"; +die "Couldn't download $zipFile!" if $result; + +$result = system "unzip", "-q", "-d", $tmpDir, "$tmpDir/$zipFile"; +die "Couldn't unzip $zipFile." if $result; + +print "\nInstalling $file...\n"; + +sub wanted +{ + my $relativeName = File::Spec->abs2rel($File::Find::name, "$tmpDir/$file/win"); + my $destination = "$webkitLibrariesDir/$relativeName"; + + if (-d $_) { + mkdir $destination; + return; + } + + system "cp", $_, $destination; +} + +File::Find::find(\&wanted, "$tmpDir/$file"); + +$result = system "mv", "$tmpDir/$file.headers", $webkitLibrariesDir; +print STDERR "Couldn't move $file.headers to $webkitLibrariesDir" . ".\n" if $result; + +print "The $file has been sucessfully installed in\n $webkitLibrariesDir\n"; +exit; + +sub toUnixPath +{ + my $path = shift; + return unless $path; + chomp($path = `cygpath -u '$path'`); + return $path; +} + +sub lastModifiedToUnixTime($) +{ + my ($str) = @_; + + $str =~ /^Last-Modified: (.*)$/ or return; + return str2time($1); +} diff --git a/Tools/Scripts/update-webkit-chromium b/Tools/Scripts/update-webkit-chromium new file mode 100755 index 0000000..1db1826 --- /dev/null +++ b/Tools/Scripts/update-webkit-chromium @@ -0,0 +1,68 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Update script for the WebKit Chromium Port. + +use File::Path; +use FindBin; +use Getopt::Long; +use lib $FindBin::Bin; +use webkitdirs; + +chdir("WebKit/chromium") or die $!; + +# Find gclient or install it. +my $gclientPath; +if (commandExists('gclient')) { + $gclientPath = 'gclient'; +} elsif (-e 'depot_tools/gclient') { + $gclientPath = 'depot_tools/gclient'; +} else { + print "Installing chromium's depot_tools...\n"; + system("svn co http://src.chromium.org/svn/trunk/tools/depot_tools") == 0 or die $1; + $gclientPath = 'depot_tools/gclient'; +} + +if (! -e ".gclient") { + # If .gclient configuration file doesn't exist, create it. + print "Configuring gclient...\n"; + system($gclientPath, + "config", + "--spec=solutions=[{'name':'./','url':None}]") == 0 or die $!; +} + +my $force = 0; +GetOptions( + 'force' => \$force, +); + +# Execute gclient sync. +print "Updating chromium port dependencies using gclient...\n"; +my @gclientArgs = ($gclientPath, "sync"); +push @gclientArgs, "--force" if $force; +system(@gclientArgs) == 0 or die $!; diff --git a/Tools/Scripts/update-webkit-localizable-strings b/Tools/Scripts/update-webkit-localizable-strings new file mode 100755 index 0000000..7030337 --- /dev/null +++ b/Tools/Scripts/update-webkit-localizable-strings @@ -0,0 +1,46 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2006, 2007 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use warnings; + +use File::Basename; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +my @directoriesToScan = ("WebKit/mac", "WebKit/win", "-WebCore/icu", "-WebKit/mac/icu"); +my $fileToUpdate = "WebKit/English.lproj/Localizable.strings"; +my $exceptionsFile = "WebKit/StringsNotToBeLocalized.txt"; + +@ARGV == 0 or die "Usage: " . basename($0) . "\n"; + +chdirWebKit(); + +system "sort -u $exceptionsFile -o $exceptionsFile"; +exec "./Tools/Scripts/extract-localizable-strings", $exceptionsFile, $fileToUpdate, @directoriesToScan; diff --git a/Tools/Scripts/update-webkit-support-libs b/Tools/Scripts/update-webkit-support-libs new file mode 100755 index 0000000..f0c897e --- /dev/null +++ b/Tools/Scripts/update-webkit-support-libs @@ -0,0 +1,148 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006, 2007 Apple Computer, Inc. All rights reserved. +# Copyright (C) Research In Motion Limited 2010. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Updates a development environment to the new WebKitSupportLibrary + +use strict; +use warnings; + +use File::Find; +use File::Temp (); +use File::Spec; +use FindBin; +use lib $FindBin::Bin; +use webkitdirs; + +use constant NOTAVERSION => "-1"; + +my $sourceDir = sourceDir(); +my $file = "WebKitSupportLibrary"; +my $zipFile = "$file.zip"; +my $zipDirectory = toUnixPath($ENV{'WEBKITSUPPORTLIBRARIESZIPDIR'}) || $sourceDir; +my $pathToZip = File::Spec->catfile($zipDirectory, $zipFile); +my $webkitLibrariesDir = toUnixPath($ENV{'WEBKITLIBRARIESDIR'}) || "$sourceDir/WebKitLibraries/win"; +my $versionFile = $file . "Version"; +my $pathToVersionFile = File::Spec->catfile($webkitLibrariesDir, $versionFile); +my $tmpDir = File::Spec->rel2abs(File::Temp::tempdir("webkitlibsXXXXXXX", TMPDIR => 1, CLEANUP => 1)); +my $versionFileURL = "http://developer.apple.com/opensource/internet/$versionFile"; + +my $extractedVersion = extractedVersion(); + +# Check whether the extracted library is up-to-date. If it is, we don't have anything to do. +my $expectedVersion = downloadExpectedVersionNumber(); +if ($extractedVersion ne NOTAVERSION && $extractedVersion eq $expectedVersion) { + print "$file is up-to-date.\n"; + exit; +} + +# Check whether the downloaded library is up-to-date. If it isn't, the user needs to download it. +my $zipFileVersion = zipFileVersion(); +dieAndInstructToDownload("$zipFile could not be found in $zipDirectory.") if $zipFileVersion eq NOTAVERSION; +dieAndInstructToDownload("$zipFile is out-of-date.") if $expectedVersion ne NOTAVERSION && $zipFileVersion ne $expectedVersion; +if ($zipFileVersion eq $extractedVersion) { + print "Falling back to existing version of $file.\n"; + exit; +} + +my $result = system "unzip", "-q", "-d", $tmpDir, $pathToZip; +die "Couldn't unzip $zipFile." if $result; + +print "\nInstalling $file...\n"; + +sub wanted +{ + my $relativeName = File::Spec->abs2rel($File::Find::name, "$tmpDir/$file/win"); + my $destination = "$webkitLibrariesDir/$relativeName"; + + if (-d $_) { + mkdir $destination; + return; + } + + system "cp", $_, $destination; +} + +File::Find::find(\&wanted, "$tmpDir/$file"); + +print "The $file has been sucessfully installed in\n $webkitLibrariesDir\n"; +exit; + +sub toUnixPath +{ + my $path = shift; + return unless $path; + chomp($path = `cygpath -u '$path'`); + return $path; +} + +sub extractedVersion +{ + if (open VERSION, "<", $pathToVersionFile) { + chomp(my $extractedVersion = <VERSION>); + close VERSION; + return $extractedVersion; + } + return NOTAVERSION; +} + +sub downloadExpectedVersionNumber +{ + chomp(my $expectedVersion = `curl -s $versionFileURL`); + return WEXITSTATUS($?) ? NOTAVERSION : $expectedVersion; +} + +sub zipFileVersion +{ + return NOTAVERSION unless -f $pathToZip; + chomp(my $zipFileVersion = `unzip -p "$pathToZip" $file/win/$versionFile`); + return $zipFileVersion; +} + +sub dieAndInstructToDownload +{ + my $message = shift; + + die <<EOF; + +=============================================================================== +$message +Please download $zipFile from: + + http://developer.apple.com/opensource/internet/webkit_sptlib_agree.html + +and place it in: + + $sourceDir + +Then run build-webkit again. +=============================================================================== + +EOF + +} diff --git a/Tools/Scripts/validate-committer-lists b/Tools/Scripts/validate-committer-lists new file mode 100755 index 0000000..2519e01 --- /dev/null +++ b/Tools/Scripts/validate-committer-lists @@ -0,0 +1,260 @@ +#!/usr/bin/env python + +# Copyright (c) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Checks Python's known list of committers against lists.webkit.org and SVN history. + + +import os +import subprocess +import re +import urllib2 +from datetime import date, datetime, timedelta +from webkitpy.common.config.committers import CommitterList +from webkitpy.common.system.deprecated_logging import log, error +from webkitpy.common.checkout.scm import Git + +# WebKit includes a built copy of BeautifulSoup in Scripts/webkitpy +# so this import should always succeed. +from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup + +def print_list_if_non_empty(title, list_to_print): + if not list_to_print: + return + print # Newline before the list + print title + for item in list_to_print: + print item + +class CommitterListFromMailingList: + committers_list_url = "http://lists.webkit.org/mailman/roster.cgi/webkit-committers" + reviewers_list_url = "http://lists.webkit.org/mailman/roster.cgi/webkit-reviewers" + + def _fetch_emails_from_page(self, url): + page = urllib2.urlopen(url) + soup = BeautifulSoup(page) + + emails = [] + # Grab the cells in the first column (which happens to be the bug ids). + for email_item in soup('li'): + email_link = email_item.find("a") + email = email_link.string.replace(" at ", "@") # The email is obfuscated using " at " instead of "@". + emails.append(email) + return emails + + @staticmethod + def _commiters_not_found_in_email_list(committers, emails): + missing_from_mailing_list = [] + for committer in committers: + for email in committer.emails: + if email in emails: + break + else: + missing_from_mailing_list.append(committer) + return missing_from_mailing_list + + @staticmethod + def _emails_not_found_in_committer_list(committers, emails): + email_to_committer_map = {} + for committer in committers: + for email in committer.emails: + email_to_committer_map[email] = committer + + return filter(lambda email: not email_to_committer_map.get(email), emails) + + def check_for_emails_missing_from_list(self, committer_list): + committer_emails = self._fetch_emails_from_page(self.committers_list_url) + list_name = "webkit-committers@lists.webkit.org" + + missing_from_mailing_list = self._commiters_not_found_in_email_list(committer_list.committers(), committer_emails) + print_list_if_non_empty("Committers missing from %s:" % list_name, missing_from_mailing_list) + + users_missing_from_committers = self._emails_not_found_in_committer_list(committer_list.committers(), committer_emails) + print_list_if_non_empty("Subcribers to %s missing from committer.py:" % list_name, users_missing_from_committers) + + + reviewer_emails = self._fetch_emails_from_page(self.reviewers_list_url) + list_name = "webkit-reviewers@lists.webkit.org" + + missing_from_mailing_list = self._commiters_not_found_in_email_list(committer_list.reviewers(), reviewer_emails) + print_list_if_non_empty("Reviewers missing from %s:" % list_name, missing_from_mailing_list) + + missing_from_reviewers = self._emails_not_found_in_committer_list(committer_list.reviewers(), reviewer_emails) + print_list_if_non_empty("Subcribers to %s missing from reviewers in committer.py:" % list_name, missing_from_reviewers) + + missing_from_committers = self._emails_not_found_in_committer_list(committer_list.committers(), reviewer_emails) + print_list_if_non_empty("Subcribers to %s completely missing from committers.py" % list_name, missing_from_committers) + + +class CommitterListFromGit: + login_to_email_address = { + 'aliceli1' : 'alice.liu@apple.com', + 'bdash' : 'mrowe@apple.com', + 'bdibello' : 'bdibello@apple.com', # Bruce DiBello, only 4 commits: r10023, r9548, r9538, r9535 + 'cblu' : 'cblu@apple.com', + 'cpeterse' : 'cpetersen@apple.com', + 'eseidel' : 'eric@webkit.org', + 'gdennis' : 'gdennis@webkit.org', + 'goldsmit' : 'goldsmit@apple.com', # Debbie Goldsmith, only one commit r8839 + 'gramps' : 'gramps@apple.com', + 'honeycutt' : 'jhoneycutt@apple.com', + 'jdevalk' : 'joost@webkit.org', + 'jens' : 'jens@apple.com', + 'justing' : 'justin.garcia@apple.com', + 'kali' : 'kali@apple.com', # Christy Warren, did BIDI work, 5 commits: r8815, r8802, r8801, r8791, r8773, r8603 + 'kjk' : 'kkowalczyk@gmail.com', + 'kmccullo' : 'kmccullough@apple.com', + 'kocienda' : 'kocienda@apple.com', + 'lamadio' : 'lamadio@apple.com', # Lou Amadio, only 2 commits: r17949 and r17783 + 'lars' : 'lars@kde.org', + 'lweintraub' : 'lweintraub@apple.com', + 'lypanov' : 'lypanov@kde.org', + 'mhay' : 'mhay@apple.com', # Mike Hay, 3 commits: r3813, r2552, r2548 + 'ouch' : 'ouch@apple.com', # John Louch + 'pyeh' : 'patti@apple.com', # Patti Yeh, did VoiceOver work in WebKit + 'rjw' : 'rjw@apple.com', + 'seangies' : 'seangies@apple.com', # Sean Gies?, only 5 commits: r16600, r16592, r16511, r16489, r16484 + 'sheridan' : 'sheridan@apple.com', # Shelly Sheridan + 'thatcher' : 'timothy@apple.com', + 'tomernic' : 'timo@apple.com', + 'trey' : 'trey@usa.net', + 'tristan' : 'tristan@apple.com', + 'vicki' : 'vicki@apple.com', + 'voas' : 'voas@apple.com', # Ed Voas, did some Carbon work in WebKit + 'zack' : 'zack@kde.org', + 'zimmermann' : 'zimmermann@webkit.org', + } + + def __init__(self): + self._last_commit_time_by_author_cache = {} + + def _fetch_authors_and_last_commit_time_from_git_log(self): + last_commit_dates = {} + git_log_args = ['git', 'log', '--reverse', '--pretty=format:%ae %at'] + process = subprocess.Popen(git_log_args, stdout=subprocess.PIPE) + + # eric@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc 1257090899 + line_regexp = re.compile("^(?P<author>.+)@\S+ (?P<timestamp>\d+)$") + while True: + output_line = process.stdout.readline() + if output_line == '' and process.poll() != None: + return last_commit_dates + + match_result = line_regexp.match(output_line) + if not match_result: + error("Failed to match line: %s" % output_line) + last_commit_dates[match_result.group('author')] = float(match_result.group('timestamp')) + + def _fill_in_emails_for_old_logins(self): + authors_missing_email = filter(lambda author: author.find('@') == -1, self._last_commit_time_by_author_cache) + authors_with_email = filter(lambda author: author.find('@') != -1, self._last_commit_time_by_author_cache) + prefixes_of_authors_with_email = map(lambda author: author.split('@')[0], authors_with_email) + + for author in authors_missing_email: + # First check to see if we have a manual mapping from login to email. + author_email = self.login_to_email_address.get(author) + + # Most old logins like 'darin' are now just 'darin@apple.com', so check for a prefix match if a manual mapping was not found. + if not author_email and author in prefixes_of_authors_with_email: + author_email_index = prefixes_of_authors_with_email.index(author) + author_email = authors_with_email[author_email_index] + + if not author_email: + # No known email mapping, likely not an active committer. We could log here. + continue + + # log("%s -> %s" % (author, author_email)) # For sanity checking. + no_email_commit_time = self._last_commit_time_by_author_cache.get(author) + email_commit_time = self._last_commit_time_by_author_cache.get(author_email) + # We compare the timestamps for extra sanity even though we could assume commits before email address were used for login are always going to be older. + if not email_commit_time or email_commit_time < no_email_commit_time: + self._last_commit_time_by_author_cache[author_email] = no_email_commit_time + del self._last_commit_time_by_author_cache[author] + + def _last_commit_by_author(self): + if not self._last_commit_time_by_author_cache: + self._last_commit_time_by_author_cache = self._fetch_authors_and_last_commit_time_from_git_log() + self._fill_in_emails_for_old_logins() + del self._last_commit_time_by_author_cache['(no author)'] # The initial svn import isn't very useful. + return self._last_commit_time_by_author_cache + + @staticmethod + def _print_three_column_row(widths, values): + print "%s%s%s" % (values[0].ljust(widths[0]), values[1].ljust(widths[1]), values[2]) + + def print_possibly_expired_committers(self, committer_list): + authors_and_last_commits = self._last_commit_by_author().items() + authors_and_last_commits.sort(lambda a,b: cmp(a[1], b[1]), reverse=True) + committer_cuttof = date.today() - timedelta(days=365) + column_widths = [13, 25] + print + print "Committers who have not committed within one year:" + self._print_three_column_row(column_widths, ("Last Commit", "Committer Email", "Committer Record")) + for (author, last_commit) in authors_and_last_commits: + last_commit_date = date.fromtimestamp(last_commit) + if committer_cuttof > last_commit_date: + committer_record = committer_list.committer_by_email(author) + self._print_three_column_row(column_widths, (str(last_commit_date), author, committer_record)) + + def print_committers_missing_from_committer_list(self, committer_list): + missing_from_committers_py = [] + last_commit_time_by_author = self._last_commit_by_author() + for author in last_commit_time_by_author: + if not committer_list.committer_by_email(author): + missing_from_committers_py.append(author) + + never_committed = [] + for committer in committer_list.committers(): + for email in committer.emails: + if last_commit_time_by_author.get(email): + break + else: + never_committed.append(committer) + + print_list_if_non_empty("Historical committers missing from committer.py:", missing_from_committers_py) + print_list_if_non_empty("Committers in committer.py who have never committed:", never_committed) + + +def main(): + committer_list = CommitterList() + CommitterListFromMailingList().check_for_emails_missing_from_list(committer_list) + + if not Git.in_working_directory("."): + print """\n\nWARNING: validate-committer-lists requires a git checkout. +The following checks are disabled: + - List of committers ordered by last commit + - List of historical committers missing from committers.py +""" + return 1 + svn_committer_list = CommitterListFromGit() + svn_committer_list.print_possibly_expired_committers(committer_list) + svn_committer_list.print_committers_missing_from_committer_list(committer_list) + +if __name__ == "__main__": + main() diff --git a/Tools/Scripts/webkit-build-directory b/Tools/Scripts/webkit-build-directory new file mode 100755 index 0000000..bf7d66d --- /dev/null +++ b/Tools/Scripts/webkit-build-directory @@ -0,0 +1,69 @@ +#!/usr/bin/perl + +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# A script to expose WebKit's build directory detection logic to non-perl scripts. + +use FindBin; +use Getopt::Long; + +use lib $FindBin::Bin; +use webkitdirs; + +my $showConfigurationDirectory = 0; +my $showHelp = 0; +my $showTopLevelDirectory = 0; + + +my $programName = basename($0); +my $usage = <<EOF; +Usage: $programName [options] + --configuration Show the build directory for a specific configuration (e.g. Debug, Release. Defaults to the active configuration set by set-webkit-configuration) + -h|--help Show this help message + --top-level Show the top-level build directory + +Either --configuration or --top-level is required. +EOF + +setConfiguration(); # Figure out from the command line if we're --debug or --release or the default. + +my $getOptionsResult = GetOptions( + 'configuration' => \$showConfigurationDirectory, + 'top-level' => \$showTopLevelDirectory, + 'help|h' => \$showHelp, +); + +if (!$getOptionsResult || $showHelp || (!$showConfigurationDirectory && !$showTopLevelDirectory)) { + print STDERR $usage; + exit 1; +} + +if ($showTopLevelDirectory) { + print baseProductDir() . "\n"; +} else { + print productDir() . "\n"; +} diff --git a/Tools/Scripts/webkit-patch b/Tools/Scripts/webkit-patch new file mode 100755 index 0000000..007f919 --- /dev/null +++ b/Tools/Scripts/webkit-patch @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# Copyright (c) 2010 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# A tool for automating dealing with bugzilla, posting patches, committing patches, etc. + +import logging +import os +import sys + +from webkitpy.common.system.logutils import configure_logging +import webkitpy.python24.versioning as versioning + +_log = logging.getLogger("webkit-patch") + +def main(): + # This is a hack to let us enable DEBUG logging as early as possible. + # Note this can't be ternary as versioning.check_version() + # hasn't run yet and this python might be older than 2.5. + if set(["-v", "--verbose"]).intersection(set(sys.argv)): + logging_level = logging.DEBUG + else: + logging_level = logging.INFO + configure_logging(logging_level=logging_level) + + versioning.check_version() + + if sys.platform == "win32": + _log.fatal("webkit-patch is only supported under Cygwin Python, " + "not Win32 Python") + sys.exit(1) + + # Import webkit-patch code only after version-checking so that + # script doesn't error out before having a chance to report the + # version warning. + from webkitpy.tool.main import WebKitPatch + + WebKitPatch(__file__).main() + + +if __name__ == "__main__": + + main() diff --git a/Tools/Scripts/webkit-tools-completion.sh b/Tools/Scripts/webkit-tools-completion.sh new file mode 100644 index 0000000..cdedfe3 --- /dev/null +++ b/Tools/Scripts/webkit-tools-completion.sh @@ -0,0 +1,95 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Command line completion for common commands used in WebKit development. +# +# Set-up: +# Add a line like this to your .bashrc: +# source /path/to/WebKitCode/Tools/Scripts/webkit-tools-completion.sh + +__webkit-patch_generate_reply() +{ + COMPREPLY=( $(compgen -W "$1" -- "${COMP_WORDS[COMP_CWORD]}") ) +} + +_webkit-patch_complete() +{ + local command current_command="${COMP_WORDS[1]}" + case "$current_command" in + -h|--help) + command="help"; + ;; + *) + command="$current_command" + ;; + esac + + if [ $COMP_CWORD -eq 1 ]; then + __webkit-patch_generate_reply "--help apply-from-bug bugs-to-commit commit-message land land-from-bug obsolete-attachments patches-to-commit post upload tree-status rollout reviewed-patches" + return + fi + + case "$command" in + apply-from-bug) + __webkit-patch_generate_reply "--force-clean --local-commit --no-clean --no-update" + return + ;; + commit-message) + return + ;; + land) + __webkit-patch_generate_reply "--no-build --no-close --no-test --reviewer= -r" + return + ;; + land-from-bug) + __webkit-patch_generate_reply "--force-clean --no-build --no-clean --no-test" + return + ;; + obsolete-attachments) + return + ;; + post) + __webkit-patch_generate_reply "--description --no-obsolete --no-review --request-commit -m --open-bug" + return + ;; + upload) + __webkit-patch_generate_reply "--description --no-obsolete --no-review --request-commit --cc -m --open-bug" + return + ;; + post-commits) + __webkit-patch_generate_reply "--bug-id= --no-comment --no-obsolete --no-review -b" + return + ;; + esac +} + +complete -F _webkit-patch_complete webkit-patch +complete -W "--continue --fix-merged --help --no-continue --no-warnings --warnings -c -f -h -w" resolve-ChangeLogs +complete -W "--bug --diff --git-commit --git-index --git-reviewer --help --no-update --no-write --open --update --write -d -h -o" prepare-ChangeLog +complete -W "--clean --debug --help -h" build-webkit +complete -o default -W "--add-platform-exceptions --complex-text --configuration --guard-malloc --help --http --ignore-tests --launch-safari --leaks --merge-leak-depth --new-test-results --no-http --no-launch-safari --no-new-test-results --no-sample-on-timeout --no-strip-editing-callbacks --pixel-tests --platform --port --quiet --random --reset-results --results-directory --reverse --root --sample-on-timeout --singly --skipped --slowest --strict --strip-editing-callbacks --threaded --timeout --tolerance --use-remote-links-to-tests --valgrind --verbose -1 -c -g -h -i -l -m -o -p -q -t -v" run-webkit-tests diff --git a/Tools/Scripts/webkitdirs.pm b/Tools/Scripts/webkitdirs.pm new file mode 100644 index 0000000..ac40ec6 --- /dev/null +++ b/Tools/Scripts/webkitdirs.pm @@ -0,0 +1,1872 @@ +# Copyright (C) 2005, 2006, 2007, 2010 Apple Inc. All rights reserved. +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Module to share code to get to WebKit directories. + +use strict; +use warnings; +use Config; +use FindBin; +use File::Basename; +use File::Path; +use File::Spec; +use POSIX; +use VCSUtils; + +BEGIN { + use Exporter (); + our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS); + $VERSION = 1.00; + @ISA = qw(Exporter); + @EXPORT = qw(&chdirWebKit &baseProductDir &productDir &XcodeOptions &XcodeOptionString &XcodeOptionStringNoConfig &passedConfiguration &setConfiguration &safariPath &checkFrameworks ¤tSVNRevision); + %EXPORT_TAGS = ( ); + @EXPORT_OK = (); +} + +our @EXPORT_OK; + +my $architecture; +my $numberOfCPUs; +my $baseProductDir; +my @baseProductDirOption; +my $configuration; +my $configurationForVisualStudio; +my $configurationProductDir; +my $sourceDir; +my $currentSVNRevision; +my $osXVersion; +my $isQt; +my $isSymbian; +my %qtFeatureDefaults; +my $isGtk; +my $isWx; +my $isEfl; +my @wxArgs; +my $isChromium; +my $isInspectorFrontend; + +# Variables for Win32 support +my $vcBuildPath; +my $windowsSourceDir; +my $winVersion; +my $willUseVCExpressWhenBuilding = 0; + +# Defined in VCSUtils. +sub exitStatus($); + +sub determineSourceDir +{ + return if $sourceDir; + $sourceDir = $FindBin::Bin; + $sourceDir =~ s|/+$||; # Remove trailing '/' as we would die later + + # walks up path checking each directory to see if it is the main WebKit project dir, + # defined by containing JavaScriptCore, WebCore, and WebKit + until ((-d "$sourceDir/JavaScriptCore" && -d "$sourceDir/WebCore" && -d "$sourceDir/WebKit") || (-d "$sourceDir/Internal" && -d "$sourceDir/OpenSource")) + { + if ($sourceDir !~ s|/[^/]+$||) { + die "Could not find top level webkit directory above source directory using FindBin.\n"; + } + } + + $sourceDir = "$sourceDir/OpenSource" if -d "$sourceDir/OpenSource"; +} + +sub currentPerlPath() +{ + my $thisPerl = $^X; + if ($^O ne 'VMS') { + $thisPerl .= $Config{_exe} unless $thisPerl =~ m/$Config{_exe}$/i; + } + return $thisPerl; +} + +# used for scripts which are stored in a non-standard location +sub setSourceDir($) +{ + ($sourceDir) = @_; +} + +sub determineBaseProductDir +{ + return if defined $baseProductDir; + determineSourceDir(); + + $baseProductDir = $ENV{"WEBKITOUTPUTDIR"}; + + if (!defined($baseProductDir) and isAppleMacWebKit()) { + # Silently remove ~/Library/Preferences/xcodebuild.plist which can + # cause build failure. The presence of + # ~/Library/Preferences/xcodebuild.plist can prevent xcodebuild from + # respecting global settings such as a custom build products directory + # (<rdar://problem/5585899>). + my $personalPlistFile = $ENV{HOME} . "/Library/Preferences/xcodebuild.plist"; + if (-e $personalPlistFile) { + unlink($personalPlistFile) || die "Could not delete $personalPlistFile: $!"; + } + + open PRODUCT, "defaults read com.apple.Xcode PBXApplicationwideBuildSettings 2> " . File::Spec->devnull() . " |" or die; + $baseProductDir = join '', <PRODUCT>; + close PRODUCT; + + $baseProductDir = $1 if $baseProductDir =~ /SYMROOT\s*=\s*\"(.*?)\";/s; + undef $baseProductDir unless $baseProductDir =~ /^\//; + + if (!defined($baseProductDir)) { + open PRODUCT, "defaults read com.apple.Xcode PBXProductDirectory 2> " . File::Spec->devnull() . " |" or die; + $baseProductDir = <PRODUCT>; + close PRODUCT; + if ($baseProductDir) { + chomp $baseProductDir; + undef $baseProductDir unless $baseProductDir =~ /^\//; + } + } + } elsif (isSymbian()) { + # Shadow builds are not supported on Symbian + $baseProductDir = $sourceDir; + } + + if (!defined($baseProductDir)) { # Port-spesific checks failed, use default + $baseProductDir = "$sourceDir/WebKitBuild"; + } + + if (isGit() && isGitBranchBuild()) { + my $branch = gitBranch(); + $baseProductDir = "$baseProductDir/$branch"; + } + + if (isAppleMacWebKit()) { + $baseProductDir =~ s|^\Q$(SRCROOT)/..\E$|$sourceDir|; + $baseProductDir =~ s|^\Q$(SRCROOT)/../|$sourceDir/|; + $baseProductDir =~ s|^~/|$ENV{HOME}/|; + die "Can't handle Xcode product directory with a ~ in it.\n" if $baseProductDir =~ /~/; + die "Can't handle Xcode product directory with a variable in it.\n" if $baseProductDir =~ /\$/; + @baseProductDirOption = ("SYMROOT=$baseProductDir", "OBJROOT=$baseProductDir"); + } + + if (isCygwin()) { + my $dosBuildPath = `cygpath --windows \"$baseProductDir\"`; + chomp $dosBuildPath; + $ENV{"WEBKITOUTPUTDIR"} = $dosBuildPath; + my $unixBuildPath = `cygpath --unix \"$baseProductDir\"`; + chomp $unixBuildPath; + $baseProductDir = $unixBuildPath; + } +} + +sub setBaseProductDir($) +{ + ($baseProductDir) = @_; +} + +sub determineConfiguration +{ + return if defined $configuration; + determineBaseProductDir(); + if (open CONFIGURATION, "$baseProductDir/Configuration") { + $configuration = <CONFIGURATION>; + close CONFIGURATION; + } + if ($configuration) { + chomp $configuration; + # compatibility for people who have old Configuration files + $configuration = "Release" if $configuration eq "Deployment"; + $configuration = "Debug" if $configuration eq "Development"; + } else { + $configuration = "Release"; + } +} + +sub determineArchitecture +{ + return if defined $architecture; + # make sure $architecture is defined for non-apple-mac builds + $architecture = ""; + return unless isAppleMacWebKit(); + + determineBaseProductDir(); + if (open ARCHITECTURE, "$baseProductDir/Architecture") { + $architecture = <ARCHITECTURE>; + close ARCHITECTURE; + } + if ($architecture) { + chomp $architecture; + } else { + if (isTiger() or isLeopard()) { + $architecture = `arch`; + } else { + my $supports64Bit = `sysctl -n hw.optional.x86_64`; + chomp $supports64Bit; + $architecture = $supports64Bit ? 'x86_64' : `arch`; + } + chomp $architecture; + } +} + +sub determineNumberOfCPUs +{ + return if defined $numberOfCPUs; + if (isLinux()) { + # First try the nproc utility, if it exists. If we get no + # results fall back to just interpretting /proc directly. + chomp($numberOfCPUs = `nproc 2> /dev/null`); + if ($numberOfCPUs eq "") { + $numberOfCPUs = (grep /processor/, `cat /proc/cpuinfo`); + } + } elsif (isWindows() || isCygwin()) { + if (defined($ENV{NUMBER_OF_PROCESSORS})) { + $numberOfCPUs = $ENV{NUMBER_OF_PROCESSORS}; + } else { + # Assumes cygwin + $numberOfCPUs = `ls /proc/registry/HKEY_LOCAL_MACHINE/HARDWARE/DESCRIPTION/System/CentralProcessor | wc -w`; + } + } elsif (isDarwin()) { + $numberOfCPUs = `sysctl -n hw.ncpu`; + } +} + +sub jscPath($) +{ + my ($productDir) = @_; + my $jscName = "jsc"; + $jscName .= "_debug" if configurationForVisualStudio() eq "Debug_All"; + $jscName .= ".exe" if (isWindows() || isCygwin()); + return "$productDir/$jscName" if -e "$productDir/$jscName"; + return "$productDir/JavaScriptCore.framework/Resources/$jscName"; +} + +sub argumentsForConfiguration() +{ + determineConfiguration(); + determineArchitecture(); + + my @args = (); + push(@args, '--debug') if $configuration eq "Debug"; + push(@args, '--release') if $configuration eq "Release"; + push(@args, '--32-bit') if $architecture ne "x86_64"; + push(@args, '--qt') if isQt(); + push(@args, '--symbian') if isSymbian(); + push(@args, '--gtk') if isGtk(); + push(@args, '--efl') if isEfl(); + push(@args, '--wx') if isWx(); + push(@args, '--chromium') if isChromium(); + push(@args, '--inspector-frontend') if isInspectorFrontend(); + return @args; +} + +sub determineConfigurationForVisualStudio +{ + return if defined $configurationForVisualStudio; + determineConfiguration(); + # FIXME: We should detect when Debug_All or Release_LTCG has been chosen. + $configurationForVisualStudio = $configuration; +} + +sub determineConfigurationProductDir +{ + return if defined $configurationProductDir; + determineBaseProductDir(); + determineConfiguration(); + if (isAppleWinWebKit() && !isWx()) { + $configurationProductDir = File::Spec->catdir($baseProductDir, "bin"); + } else { + # [Gtk][Efl] We don't have Release/Debug configurations in straight + # autotool builds (non build-webkit). In this case and if + # WEBKITOUTPUTDIR exist, use that as our configuration dir. This will + # allows us to run run-webkit-tests without using build-webkit. + if ($ENV{"WEBKITOUTPUTDIR"} && (isGtk() || isEfl())) { + $configurationProductDir = "$baseProductDir"; + } else { + $configurationProductDir = "$baseProductDir/$configuration"; + } + } +} + +sub setConfigurationProductDir($) +{ + ($configurationProductDir) = @_; +} + +sub determineCurrentSVNRevision +{ + return if defined $currentSVNRevision; + determineSourceDir(); + $currentSVNRevision = svnRevisionForDirectory($sourceDir); + return $currentSVNRevision; +} + + +sub chdirWebKit +{ + determineSourceDir(); + chdir $sourceDir or die; +} + +sub baseProductDir +{ + determineBaseProductDir(); + return $baseProductDir; +} + +sub sourceDir +{ + determineSourceDir(); + return $sourceDir; +} + +sub productDir +{ + determineConfigurationProductDir(); + return $configurationProductDir; +} + +sub jscProductDir +{ + my $productDir = productDir(); + $productDir .= "/JavaScriptCore" if isQt(); + $productDir .= "/$configuration" if (isQt() && isWindows()); + $productDir .= "/Programs" if (isGtk() || isEfl()); + + return $productDir; +} + +sub configuration() +{ + determineConfiguration(); + return $configuration; +} + +sub configurationForVisualStudio() +{ + determineConfigurationForVisualStudio(); + return $configurationForVisualStudio; +} + +sub currentSVNRevision +{ + determineCurrentSVNRevision(); + return $currentSVNRevision; +} + +sub XcodeOptions +{ + determineBaseProductDir(); + determineConfiguration(); + determineArchitecture(); + return (@baseProductDirOption, "-configuration", $configuration, "ARCHS=$architecture"); +} + +sub XcodeOptionString +{ + return join " ", XcodeOptions(); +} + +sub XcodeOptionStringNoConfig +{ + return join " ", @baseProductDirOption; +} + +sub XcodeCoverageSupportOptions() +{ + my @coverageSupportOptions = (); + push @coverageSupportOptions, "GCC_GENERATE_TEST_COVERAGE_FILES=YES"; + push @coverageSupportOptions, "GCC_INSTRUMENT_PROGRAM_FLOW_ARCS=YES"; + push @coverageSupportOptions, "EXTRA_LINK= \$(EXTRA_LINK) -ftest-coverage -fprofile-arcs"; + push @coverageSupportOptions, "OTHER_CFLAGS= \$(OTHER_CFLAGS) -DCOVERAGE -MD"; + push @coverageSupportOptions, "OTHER_LDFLAGS=\$(OTHER_LDFLAGS) -ftest-coverage -fprofile-arcs -lgcov"; + return @coverageSupportOptions; +} + +my $passedConfiguration; +my $searchedForPassedConfiguration; +sub determinePassedConfiguration +{ + return if $searchedForPassedConfiguration; + $searchedForPassedConfiguration = 1; + + my $isWinCairo = checkForArgumentAndRemoveFromARGV("--wincairo"); + + for my $i (0 .. $#ARGV) { + my $opt = $ARGV[$i]; + if ($opt =~ /^--debug$/i || $opt =~ /^--devel/i) { + splice(@ARGV, $i, 1); + $passedConfiguration = "Debug"; + $passedConfiguration .= "_Cairo_CFLite" if ($isWinCairo && isCygwin()); + return; + } + if ($opt =~ /^--release$/i || $opt =~ /^--deploy/i) { + splice(@ARGV, $i, 1); + $passedConfiguration = "Release"; + $passedConfiguration .= "_Cairo_CFLite" if ($isWinCairo && isCygwin()); + return; + } + if ($opt =~ /^--profil(e|ing)$/i) { + splice(@ARGV, $i, 1); + $passedConfiguration = "Profiling"; + $passedConfiguration .= "_Cairo_CFLite" if ($isWinCairo && isCygwin()); + return; + } + } + $passedConfiguration = undef; +} + +sub passedConfiguration +{ + determinePassedConfiguration(); + return $passedConfiguration; +} + +sub setConfiguration +{ + setArchitecture(); + + if (my $config = shift @_) { + $configuration = $config; + return; + } + + determinePassedConfiguration(); + $configuration = $passedConfiguration if $passedConfiguration; +} + + +my $passedArchitecture; +my $searchedForPassedArchitecture; +sub determinePassedArchitecture +{ + return if $searchedForPassedArchitecture; + $searchedForPassedArchitecture = 1; + + for my $i (0 .. $#ARGV) { + my $opt = $ARGV[$i]; + if ($opt =~ /^--32-bit$/i) { + splice(@ARGV, $i, 1); + if (isAppleMacWebKit()) { + $passedArchitecture = `arch`; + chomp $passedArchitecture; + } + return; + } + } + $passedArchitecture = undef; +} + +sub passedArchitecture +{ + determinePassedArchitecture(); + return $passedArchitecture; +} + +sub architecture() +{ + determineArchitecture(); + return $architecture; +} + +sub numberOfCPUs() +{ + determineNumberOfCPUs(); + return $numberOfCPUs; +} + +sub setArchitecture +{ + if (my $arch = shift @_) { + $architecture = $arch; + return; + } + + determinePassedArchitecture(); + $architecture = $passedArchitecture if $passedArchitecture; +} + + +sub safariPathFromSafariBundle +{ + my ($safariBundle) = @_; + + return "$safariBundle/Contents/MacOS/Safari" if isAppleMacWebKit(); + return $safariBundle if isAppleWinWebKit(); +} + +sub installedSafariPath +{ + my $safariBundle; + + if (isAppleMacWebKit()) { + $safariBundle = "/Applications/Safari.app"; + } elsif (isAppleWinWebKit()) { + $safariBundle = `"$configurationProductDir/FindSafari.exe"`; + $safariBundle =~ s/[\r\n]+$//; + $safariBundle = `cygpath -u '$safariBundle'` if isCygwin(); + $safariBundle =~ s/[\r\n]+$//; + $safariBundle .= "Safari.exe"; + } + + return safariPathFromSafariBundle($safariBundle); +} + +# Locate Safari. +sub safariPath +{ + # Use WEBKIT_SAFARI environment variable if present. + my $safariBundle = $ENV{WEBKIT_SAFARI}; + if (!$safariBundle) { + determineConfigurationProductDir(); + # Use Safari.app in product directory if present (good for Safari development team). + if (isAppleMacWebKit() && -d "$configurationProductDir/Safari.app") { + $safariBundle = "$configurationProductDir/Safari.app"; + } elsif (isAppleWinWebKit()) { + my $path = "$configurationProductDir/Safari.exe"; + my $debugPath = "$configurationProductDir/Safari_debug.exe"; + + if (configurationForVisualStudio() eq "Debug_All" && -x $debugPath) { + $safariBundle = $debugPath; + } elsif (-x $path) { + $safariBundle = $path; + } + } + if (!$safariBundle) { + return installedSafariPath(); + } + } + my $safariPath = safariPathFromSafariBundle($safariBundle); + die "Can't find executable at $safariPath.\n" if isAppleMacWebKit() && !-x $safariPath; + return $safariPath; +} + +sub builtDylibPathForName +{ + my $libraryName = shift; + determineConfigurationProductDir(); + if (isChromium()) { + return "$configurationProductDir/$libraryName"; + } + if (isQt()) { + $libraryName = "QtWebKit"; + if (isDarwin() and -d "$configurationProductDir/lib/$libraryName.framework") { + return "$configurationProductDir/lib/$libraryName.framework/$libraryName"; + } elsif (isDarwin() and -d "$configurationProductDir/lib") { + return "$configurationProductDir/lib/lib$libraryName.dylib"; + } elsif (isWindows()) { + if (configuration() eq "Debug") { + # On Windows, there is a "d" suffix to the library name. See <http://trac.webkit.org/changeset/53924/>. + $libraryName .= "d"; + } + + my $mkspec = `qmake -query QMAKE_MKSPECS`; + $mkspec =~ s/[\n|\r]$//g; + my $qtMajorVersion = retrieveQMakespecVar("$mkspec/qconfig.pri", "QT_MAJOR_VERSION"); + if (not $qtMajorVersion) { + $qtMajorVersion = ""; + } + return "$configurationProductDir/lib/$libraryName$qtMajorVersion.dll"; + } else { + return "$configurationProductDir/lib/lib$libraryName.so"; + } + } + if (isWx()) { + return "$configurationProductDir/libwxwebkit.dylib"; + } + if (isGtk()) { + my $libraryDir = "$configurationProductDir/$libraryName/../.libs/"; + if (-e $libraryDir . "libwebkitgtk-3.0.so") { + return $libraryDir . "libwebkitgtk-3.0.so"; + } + return $libraryDir . "libwebkitgtk-1.0.so"; + } + if (isEfl()) { + return "$configurationProductDir/$libraryName/../.libs/libewebkit.so"; + } + if (isAppleMacWebKit()) { + return "$configurationProductDir/$libraryName.framework/Versions/A/$libraryName"; + } + if (isAppleWinWebKit()) { + if ($libraryName eq "JavaScriptCore") { + return "$baseProductDir/lib/$libraryName.lib"; + } else { + return "$baseProductDir/$libraryName.intermediate/$configuration/$libraryName.intermediate/$libraryName.lib"; + } + } + + die "Unsupported platform, can't determine built library locations.\nTry `build-webkit --help` for more information.\n"; +} + +# Check to see that all the frameworks are built. +sub checkFrameworks # FIXME: This is a poor name since only the Mac calls built WebCore a Framework. +{ + return if isCygwin() || isWindows(); + my @frameworks = ("JavaScriptCore", "WebCore"); + push(@frameworks, "WebKit") if isAppleMacWebKit(); # FIXME: This seems wrong, all ports should have a WebKit these days. + for my $framework (@frameworks) { + my $path = builtDylibPathForName($framework); + die "Can't find built framework at \"$path\".\n" unless -e $path; + } +} + +sub isInspectorFrontend() +{ + determineIsInspectorFrontend(); + return $isInspectorFrontend; +} + +sub determineIsInspectorFrontend() +{ + return if defined($isInspectorFrontend); + $isInspectorFrontend = checkForArgumentAndRemoveFromARGV("--inspector-frontend"); +} + +sub isQt() +{ + determineIsQt(); + return $isQt; +} + +sub isSymbian() +{ + determineIsSymbian(); + return $isSymbian; +} + +sub qtFeatureDefaults() +{ + determineQtFeatureDefaults(); + return %qtFeatureDefaults; +} + +sub commandExists($) +{ + my $command = shift; + my $devnull = File::Spec->devnull(); + return `$command --version 2> $devnull`; +} + +sub determineQtFeatureDefaults() +{ + return if %qtFeatureDefaults; + die "ERROR: qmake missing but required to build WebKit.\n" if not commandExists("qmake"); + my $originalCwd = getcwd(); + chdir File::Spec->catfile(sourceDir(), "WebCore"); + my $defaults = `qmake CONFIG+=compute_defaults 2>&1`; + chdir $originalCwd; + + while ($defaults =~ m/(\S+?)=(\S+?)/gi) { + $qtFeatureDefaults{$1}=$2; + } +} + +sub checkForArgumentAndRemoveFromARGV +{ + my $argToCheck = shift; + return checkForArgumentAndRemoveFromArrayRef($argToCheck, \@ARGV); +} + +sub checkForArgumentAndRemoveFromArrayRef +{ + my ($argToCheck, $arrayRef) = @_; + my @indicesToRemove; + foreach my $index (0 .. $#$arrayRef) { + my $opt = $$arrayRef[$index]; + if ($opt =~ /^$argToCheck$/i ) { + push(@indicesToRemove, $index); + } + } + foreach my $index (@indicesToRemove) { + splice(@$arrayRef, $index, 1); + } + return $#indicesToRemove > -1; +} + + +sub determineIsQt() +{ + return if defined($isQt); + + # Allow override in case QTDIR is not set. + if (checkForArgumentAndRemoveFromARGV("--qt")) { + $isQt = 1; + return; + } + + # The presence of QTDIR only means Qt if --gtk or --wx or --efl are not on the command-line + if (isGtk() || isWx() || isEfl()) { + $isQt = 0; + return; + } + + $isQt = defined($ENV{'QTDIR'}); +} + +sub determineIsSymbian() +{ + return if defined($isSymbian); + + if (checkForArgumentAndRemoveFromARGV("--symbian")) { + $isSymbian = 1; + return; + } +} + +sub determineIsEfl() +{ + return if defined($isEfl); + $isEfl = checkForArgumentAndRemoveFromARGV("--efl"); +} + +sub isEfl() +{ + determineIsEfl(); + return $isEfl; +} + +sub isGtk() +{ + determineIsGtk(); + return $isGtk; +} + +sub determineIsGtk() +{ + return if defined($isGtk); + $isGtk = checkForArgumentAndRemoveFromARGV("--gtk"); +} + +sub isWx() +{ + determineIsWx(); + return $isWx; +} + +sub determineIsWx() +{ + return if defined($isWx); + $isWx = checkForArgumentAndRemoveFromARGV("--wx"); +} + +sub getWxArgs() +{ + if (!@wxArgs) { + @wxArgs = (""); + my $rawWxArgs = ""; + foreach my $opt (@ARGV) { + if ($opt =~ /^--wx-args/i ) { + @ARGV = grep(!/^--wx-args/i, @ARGV); + $rawWxArgs = $opt; + $rawWxArgs =~ s/--wx-args=//i; + } + } + @wxArgs = split(/,/, $rawWxArgs); + } + return @wxArgs; +} + +# Determine if this is debian, ubuntu, linspire, or something similar. +sub isDebianBased() +{ + return -e "/etc/debian_version"; +} + +sub isFedoraBased() +{ + return -e "/etc/fedora-release"; +} + +sub isChromium() +{ + determineIsChromium(); + return $isChromium; +} + +sub determineIsChromium() +{ + return if defined($isChromium); + $isChromium = checkForArgumentAndRemoveFromARGV("--chromium"); +} + +sub isCygwin() +{ + return ($^O eq "cygwin") || 0; +} + +sub determineWinVersion() +{ + return if $winVersion; + + if (!isCygwin()) { + $winVersion = -1; + return; + } + + my $versionString = `uname -s`; + $versionString =~ /(\d\.\d)/; + $winVersion = $1; +} + +sub winVersion() +{ + determineWinVersion(); + return $winVersion; +} + +sub isWindows7() +{ + return winVersion() eq "6.1"; +} + +sub isWindowsVista() +{ + return winVersion() eq "6.0"; +} + +sub isWindowsXP() +{ + return winVersion() eq "5.1"; +} + +sub isDarwin() +{ + return ($^O eq "darwin") || 0; +} + +sub isWindows() +{ + return ($^O eq "MSWin32") || 0; +} + +sub isMsys() +{ + return ($^O eq "msys") || 0; +} + +sub isLinux() +{ + return ($^O eq "linux") || 0; +} + +sub isAppleWebKit() +{ + return !(isQt() or isGtk() or isWx() or isChromium() or isEfl()); +} + +sub isAppleMacWebKit() +{ + return isAppleWebKit() && isDarwin(); +} + +sub isAppleWinWebKit() +{ + return isAppleWebKit() && (isCygwin() || isWindows()); +} + +sub isPerianInstalled() +{ + if (!isAppleWebKit()) { + return 0; + } + + if (-d "/Library/QuickTime/Perian.component") { + return 1; + } + + if (-d "$ENV{HOME}/Library/QuickTime/Perian.component") { + return 1; + } + + return 0; +} + +sub determineOSXVersion() +{ + return if $osXVersion; + + if (!isDarwin()) { + $osXVersion = -1; + return; + } + + my $version = `sw_vers -productVersion`; + my @splitVersion = split(/\./, $version); + @splitVersion >= 2 or die "Invalid version $version"; + $osXVersion = { + "major" => $splitVersion[0], + "minor" => $splitVersion[1], + "subminor" => (defined($splitVersion[2]) ? $splitVersion[2] : 0), + }; +} + +sub osXVersion() +{ + determineOSXVersion(); + return $osXVersion; +} + +sub isTiger() +{ + return isDarwin() && osXVersion()->{"minor"} == 4; +} + +sub isLeopard() +{ + return isDarwin() && osXVersion()->{"minor"} == 5; +} + +sub isSnowLeopard() +{ + return isDarwin() && osXVersion()->{"minor"} == 6; +} + +sub isWindowsNT() +{ + return $ENV{'OS'} eq 'Windows_NT'; +} + +sub relativeScriptsDir() +{ + my $scriptDir = File::Spec->catpath("", File::Spec->abs2rel(dirname($0), getcwd()), ""); + if ($scriptDir eq "") { + $scriptDir = "."; + } + return $scriptDir; +} + +sub launcherPath() +{ + my $relativeScriptsPath = relativeScriptsDir(); + if (isGtk() || isQt() || isWx() || isEfl()) { + return "$relativeScriptsPath/run-launcher"; + } elsif (isAppleWebKit()) { + return "$relativeScriptsPath/run-safari"; + } +} + +sub launcherName() +{ + if (isGtk()) { + return "GtkLauncher"; + } elsif (isQt()) { + return "QtTestBrowser"; + } elsif (isWx()) { + return "wxBrowser"; + } elsif (isAppleWebKit()) { + return "Safari"; + } elsif (isEfl()) { + return "EWebLauncher"; + } +} + +sub checkRequiredSystemConfig +{ + if (isDarwin()) { + chomp(my $productVersion = `sw_vers -productVersion`); + if ($productVersion lt "10.4") { + print "*************************************************************\n"; + print "Mac OS X Version 10.4.0 or later is required to build WebKit.\n"; + print "You have " . $productVersion . ", thus the build will most likely fail.\n"; + print "*************************************************************\n"; + } + my $xcodeVersion = `xcodebuild -version`; + if ($xcodeVersion !~ /DevToolsCore-(\d+)/ || $1 < 747) { + print "*************************************************************\n"; + print "Xcode Version 2.3 or later is required to build WebKit.\n"; + print "You have an earlier version of Xcode, thus the build will\n"; + print "most likely fail. The latest Xcode is available from the web:\n"; + print "http://developer.apple.com/tools/xcode\n"; + print "*************************************************************\n"; + } + } elsif (isGtk() or isQt() or isWx() or isEfl()) { + my @cmds = qw(flex bison gperf); + my @missing = (); + foreach my $cmd (@cmds) { + push @missing, $cmd if not commandExists($cmd); + } + + if (@missing) { + my $list = join ", ", @missing; + die "ERROR: $list missing but required to build WebKit.\n"; + } + } + # Win32 and other platforms may want to check for minimum config +} + +sub determineWindowsSourceDir() +{ + return if $windowsSourceDir; + $windowsSourceDir = sourceDir(); + chomp($windowsSourceDir = `cygpath -w '$windowsSourceDir'`) if isCygwin(); +} + +sub windowsSourceDir() +{ + determineWindowsSourceDir(); + return $windowsSourceDir; +} + +sub windowsLibrariesDir() +{ + return windowsSourceDir() . "\\WebKitLibraries\\win"; +} + +sub windowsOutputDir() +{ + return windowsSourceDir() . "\\WebKitBuild"; +} + +sub setupAppleWinEnv() +{ + return unless isAppleWinWebKit(); + + if (isWindowsNT()) { + my $restartNeeded = 0; + my %variablesToSet = (); + + # Setting the environment variable 'CYGWIN' to 'tty' makes cygwin enable extra support (i.e., termios) + # for UNIX-like ttys in the Windows console + $variablesToSet{CYGWIN} = "tty" unless $ENV{CYGWIN}; + + # Those environment variables must be set to be able to build inside Visual Studio. + $variablesToSet{WEBKITLIBRARIESDIR} = windowsLibrariesDir() unless $ENV{WEBKITLIBRARIESDIR}; + $variablesToSet{WEBKITOUTPUTDIR} = windowsOutputDir() unless $ENV{WEBKITOUTPUTDIR}; + + foreach my $variable (keys %variablesToSet) { + print "Setting the Environment Variable '" . $variable . "' to '" . $variablesToSet{$variable} . "'\n\n"; + system qw(regtool -s set), '\\HKEY_CURRENT_USER\\Environment\\' . $variable, $variablesToSet{$variable}; + $restartNeeded ||= $variable eq "WEBKITLIBRARIESDIR" || $variable eq "WEBKITOUTPUTDIR"; + } + + if ($restartNeeded) { + print "Please restart your computer before attempting to build inside Visual Studio.\n\n"; + } + } else { + if (!$ENV{'WEBKITLIBRARIESDIR'}) { + print "Warning: You must set the 'WebKitLibrariesDir' environment variable\n"; + print " to be able build WebKit from within Visual Studio.\n"; + print " Make sure that 'WebKitLibrariesDir' points to the\n"; + print " 'WebKitLibraries/win' directory, not the 'WebKitLibraries/' directory.\n\n"; + } + if (!$ENV{'WEBKITOUTPUTDIR'}) { + print "Warning: You must set the 'WebKitOutputDir' environment variable\n"; + print " to be able build WebKit from within Visual Studio.\n\n"; + } + } +} + +sub setupCygwinEnv() +{ + return if !isCygwin() && !isWindows(); + return if $vcBuildPath; + + my $vsInstallDir; + my $programFilesPath = $ENV{'PROGRAMFILES(X86)'} || $ENV{'PROGRAMFILES'} || "C:\\Program Files"; + if ($ENV{'VSINSTALLDIR'}) { + $vsInstallDir = $ENV{'VSINSTALLDIR'}; + } else { + $vsInstallDir = File::Spec->catdir($programFilesPath, "Microsoft Visual Studio 8"); + } + chomp($vsInstallDir = `cygpath "$vsInstallDir"`) if isCygwin(); + $vcBuildPath = File::Spec->catfile($vsInstallDir, qw(Common7 IDE devenv.com)); + if (-e $vcBuildPath) { + # Visual Studio is installed; we can use pdevenv to build. + # FIXME: Make pdevenv work with non-Cygwin Perl. + $vcBuildPath = File::Spec->catfile(sourceDir(), qw(Tools Scripts pdevenv)) if isCygwin(); + } else { + # Visual Studio not found, try VC++ Express + $vcBuildPath = File::Spec->catfile($vsInstallDir, qw(Common7 IDE VCExpress.exe)); + if (! -e $vcBuildPath) { + print "*************************************************************\n"; + print "Cannot find '$vcBuildPath'\n"; + print "Please execute the file 'vcvars32.bat' from\n"; + print "'$programFilesPath\\Microsoft Visual Studio 8\\VC\\bin\\'\n"; + print "to setup the necessary environment variables.\n"; + print "*************************************************************\n"; + die; + } + $willUseVCExpressWhenBuilding = 1; + } + + my $qtSDKPath = File::Spec->catdir($programFilesPath, "QuickTime SDK"); + if (0 && ! -e $qtSDKPath) { + print "*************************************************************\n"; + print "Cannot find '$qtSDKPath'\n"; + print "Please download the QuickTime SDK for Windows from\n"; + print "http://developer.apple.com/quicktime/download/\n"; + print "*************************************************************\n"; + die; + } + + unless ($ENV{WEBKITLIBRARIESDIR}) { + $ENV{'WEBKITLIBRARIESDIR'} = File::Spec->catdir($sourceDir, "WebKitLibraries", "win"); + chomp($ENV{WEBKITLIBRARIESDIR} = `cygpath -wa $ENV{WEBKITLIBRARIESDIR}`) if isCygwin(); + } + + print "Building results into: ", baseProductDir(), "\n"; + print "WEBKITOUTPUTDIR is set to: ", $ENV{"WEBKITOUTPUTDIR"}, "\n"; + print "WEBKITLIBRARIESDIR is set to: ", $ENV{"WEBKITLIBRARIESDIR"}, "\n"; +} + +sub dieIfWindowsPlatformSDKNotInstalled +{ + my $registry32Path = "/proc/registry/"; + my $registry64Path = "/proc/registry64/"; + my $windowsPlatformSDKRegistryEntry = "HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/MicrosoftSDK/InstalledSDKs/D2FF9F89-8AA2-4373-8A31-C838BF4DBBE1"; + + # FIXME: It would be better to detect whether we are using 32- or 64-bit Windows + # and only check the appropriate entry. But for now we just blindly check both. + return if (-e $registry32Path . $windowsPlatformSDKRegistryEntry) || (-e $registry64Path . $windowsPlatformSDKRegistryEntry); + + print "*************************************************************\n"; + print "Cannot find registry entry '$windowsPlatformSDKRegistryEntry'.\n"; + print "Please download and install the Microsoft Windows Server 2003 R2\n"; + print "Platform SDK from <http://www.microsoft.com/downloads/details.aspx?\n"; + print "familyid=0baf2b35-c656-4969-ace8-e4c0c0716adb&displaylang=en>.\n\n"; + print "Then follow step 2 in the Windows section of the \"Installing Developer\n"; + print "Tools\" instructions at <http://www.webkit.org/building/tools.html>.\n"; + print "*************************************************************\n"; + die; +} + +sub copyInspectorFrontendFiles +{ + my $productDir = productDir(); + my $sourceInspectorPath = sourceDir() . "/WebCore/inspector/front-end/"; + my $inspectorResourcesDirPath = $ENV{"WEBKITINSPECTORRESOURCESDIR"}; + + if (!defined($inspectorResourcesDirPath)) { + $inspectorResourcesDirPath = ""; + } + + if (isAppleMacWebKit()) { + $inspectorResourcesDirPath = $productDir . "/WebCore.framework/Resources/inspector"; + } elsif (isAppleWinWebKit()) { + $inspectorResourcesDirPath = $productDir . "/WebKit.resources/inspector"; + } elsif (isQt() || isGtk()) { + my $prefix = $ENV{"WebKitInstallationPrefix"}; + $inspectorResourcesDirPath = (defined($prefix) ? $prefix : "/usr/share") . "/webkit-1.0/webinspector"; + } elsif (isEfl()) { + my $prefix = $ENV{"WebKitInstallationPrefix"}; + $inspectorResourcesDirPath = (defined($prefix) ? $prefix : "/usr/share") . "/ewebkit/webinspector"; + } + + if (! -d $inspectorResourcesDirPath) { + print "*************************************************************\n"; + print "Cannot find '$inspectorResourcesDirPath'.\n" if (defined($inspectorResourcesDirPath)); + print "Make sure that you have built WebKit first.\n" if (! -d $productDir || defined($inspectorResourcesDirPath)); + print "Optionally, set the environment variable 'WebKitInspectorResourcesDir'\n"; + print "to point to the directory that contains the WebKit Inspector front-end\n"; + print "files for the built WebCore framework.\n"; + print "*************************************************************\n"; + die; + } + return system "rsync", "-aut", "--exclude=/.DS_Store", "--exclude=*.re2js", "--exclude=.svn/", !isQt() ? "--exclude=/WebKit.qrc" : "", $sourceInspectorPath, $inspectorResourcesDirPath; +} + +sub buildXCodeProject($$@) +{ + my ($project, $clean, @extraOptions) = @_; + + if ($clean) { + push(@extraOptions, "-alltargets"); + push(@extraOptions, "clean"); + } + + return system "xcodebuild", "-project", "$project.xcodeproj", @extraOptions; +} + +sub usingVisualStudioExpress() +{ + setupCygwinEnv(); + return $willUseVCExpressWhenBuilding; +} + +sub buildVisualStudioProject +{ + my ($project, $clean) = @_; + setupCygwinEnv(); + + my $config = configurationForVisualStudio(); + + dieIfWindowsPlatformSDKNotInstalled() if $willUseVCExpressWhenBuilding; + + chomp($project = `cygpath -w "$project"`) if isCygwin(); + + my $action = "/build"; + if ($clean) { + $action = "/clean"; + } + + my @command = ($vcBuildPath, $project, $action, $config); + + print join(" ", @command), "\n"; + return system @command; +} + +sub downloadWafIfNeeded +{ + # get / update waf if needed + my $waf = "$sourceDir/Tools/wx/waf"; + my $wafURL = 'http://wxwebkit.wxcommunity.com/downloads/deps/waf'; + if (!-f $waf) { + my $result = system "curl -o $waf $wafURL"; + chmod 0755, $waf; + } +} + +sub buildWafProject +{ + my ($project, $shouldClean, @options) = @_; + + # set the PYTHONPATH for waf + my $pythonPath = $ENV{'PYTHONPATH'}; + if (!defined($pythonPath)) { + $pythonPath = ''; + } + my $sourceDir = sourceDir(); + my $newPythonPath = "$sourceDir/Tools/wx/build:$pythonPath"; + if (isCygwin()) { + $newPythonPath = `cygpath --mixed --path $newPythonPath`; + } + $ENV{'PYTHONPATH'} = $newPythonPath; + + print "Building $project\n"; + + my $wafCommand = "$sourceDir/Tools/wx/waf"; + if ($ENV{'WXWEBKIT_WAF'}) { + $wafCommand = $ENV{'WXWEBKIT_WAF'}; + } + if (isCygwin()) { + $wafCommand = `cygpath --windows "$wafCommand"`; + chomp($wafCommand); + } + if ($shouldClean) { + return system $wafCommand, "clean", "distclean"; + } + + return system $wafCommand, 'configure', 'build', 'install', @options; +} + +sub retrieveQMakespecVar +{ + my $mkspec = $_[0]; + my $varname = $_[1]; + + my $varvalue = undef; + #print "retrieveMakespecVar " . $mkspec . ", " . $varname . "\n"; + + local *SPEC; + open SPEC, "<$mkspec" or return $varvalue; + while (<SPEC>) { + if ($_ =~ /\s*include\((.+)\)/) { + # open the included mkspec + my $oldcwd = getcwd(); + (my $volume, my $directories, my $file) = File::Spec->splitpath($mkspec); + my $newcwd = "$volume$directories"; + chdir $newcwd if $newcwd; + $varvalue = retrieveQMakespecVar($1, $varname); + chdir $oldcwd; + } elsif ($_ =~ /$varname\s*=\s*([^\s]+)/) { + $varvalue = $1; + last; + } + } + close SPEC; + return $varvalue; +} + +sub qtMakeCommand($) +{ + my ($qmakebin) = @_; + chomp(my $mkspec = `$qmakebin -query QMAKE_MKSPECS`); + $mkspec .= "/default"; + my $compiler = retrieveQMakespecVar("$mkspec/qmake.conf", "QMAKE_CC"); + + #print "default spec: " . $mkspec . "\n"; + #print "compiler found: " . $compiler . "\n"; + + if ($compiler && $compiler eq "cl") { + return "nmake"; + } + + return "make"; +} + +sub autotoolsFlag($$) +{ + my ($flag, $feature) = @_; + my $prefix = $flag ? "--enable" : "--disable"; + + return $prefix . '-' . $feature; +} + +sub autogenArgumentsHaveChanged($@) +{ + my ($filename, @currentArguments) = @_; + + if (! -e $filename) { + return 1; + } + + open(AUTOTOOLS_ARGUMENTS, $filename); + chomp(my $previousArguments = <AUTOTOOLS_ARGUMENTS>); + close(AUTOTOOLS_ARGUMENTS); + + return $previousArguments ne join(" ", @currentArguments); +} + +sub buildAutotoolsProject($@) +{ + my ($clean, @buildParams) = @_; + + my $make = 'make'; + my $dir = productDir(); + my $config = passedConfiguration() || configuration(); + my $prefix; + + my @buildArgs = (); + my $makeArgs = $ENV{"WebKitMakeArguments"} || ""; + for my $i (0 .. $#buildParams) { + my $opt = $buildParams[$i]; + if ($opt =~ /^--makeargs=(.*)/i ) { + $makeArgs = $makeArgs . " " . $1; + } elsif ($opt =~ /^--prefix=(.*)/i ) { + $prefix = $1; + } else { + push @buildArgs, $opt; + } + } + + # Automatically determine the number of CPUs for make only + # if make arguments haven't already been specified. + if ($makeArgs eq "") { + $makeArgs = "-j" . numberOfCPUs(); + } + + $prefix = $ENV{"WebKitInstallationPrefix"} if !defined($prefix); + push @buildArgs, "--prefix=" . $prefix if defined($prefix); + + # check if configuration is Debug + if ($config =~ m/debug/i) { + push @buildArgs, "--enable-debug"; + } else { + push @buildArgs, "--disable-debug"; + } + + # Use rm to clean the build directory since distclean may miss files + if ($clean && -d $dir) { + system "rm", "-rf", "$dir"; + } + + if (! -d $dir) { + File::Path::mkpath($dir) or die "Failed to create build directory " . $dir + } + chdir $dir or die "Failed to cd into " . $dir . "\n"; + + if ($clean) { + return 0; + } + + # If GNUmakefile exists, don't run autogen.sh. The makefile should be + # smart enough to track autotools dependencies and re-run autogen.sh + # when build files change. + my $autogenArgumentsFile = "previous-autogen-arguments.txt"; + my $result; + if (!(-e "GNUmakefile") or autogenArgumentsHaveChanged($autogenArgumentsFile, @buildArgs)) { + + # Write autogen.sh arguments to a file so that we can detect + # when they change and automatically re-run it. + open(AUTOTOOLS_ARGUMENTS, ">$autogenArgumentsFile"); + print AUTOTOOLS_ARGUMENTS join(" ", @buildArgs); + close(AUTOTOOLS_ARGUMENTS); + + print "Calling configure in " . $dir . "\n\n"; + print "Installation prefix directory: $prefix\n" if(defined($prefix)); + + # Make the path relative since it will appear in all -I compiler flags. + # Long argument lists cause bizarre slowdowns in libtool. + my $relSourceDir = File::Spec->abs2rel($sourceDir) || "."; + $result = system "$relSourceDir/autogen.sh", @buildArgs; + if ($result ne 0) { + die "Failed to setup build environment using 'autotools'!\n"; + } + } + + $result = system "$make $makeArgs"; + if ($result ne 0) { + die "\nFailed to build WebKit using '$make'!\n"; + } + + chdir ".." or die; + return $result; +} + +sub buildCMakeProject($@) +{ + my ($port, $clean, @buildParams) = @_; + my $dir = File::Spec->canonpath(baseProductDir()); + my $config = configuration(); + my $result; + my $makeArgs = ""; + my @buildArgs; + + $makeArgs .= " -j" . numberOfCPUs() if ($makeArgs !~ m/-j\s*\d+/); + + if ($clean) { + print "Cleaning the build directory '$dir'\n"; + $dir = File::Spec->catfile($dir, $config); + File::Path::remove_tree($dir, {keep_root => 1}); + $result = 0; + } else { + my $cmakebin = "cmake"; + my $make = "make"; + + push @buildArgs, "-DPORT=$port"; + + for my $i (0 .. $#buildParams) { + my $opt = $buildParams[$i]; + if ($opt =~ /^--makeargs=(.*)/i ) { + $makeArgs = $1; + } elsif ($opt =~ /^--prefix=(.*)/i ) { + push @buildArgs, "-DCMAKE_INSTALL_PREFIX=$1"; + } else { + push @buildArgs, $opt; + } + } + + if ($config =~ m/debug/i) { + push @buildArgs, "-DCMAKE_BUILD_TYPE=Debug"; + } elsif ($config =~ m/release/i) { + push @buildArgs, "-DCMAKE_BUILD_TYPE=Release"; + } + + push @buildArgs, sourceDir(); + + $dir = File::Spec->catfile($dir, $config); + File::Path::mkpath($dir); + chdir $dir or die "Failed to cd into " . $dir . "\n"; + + print "Calling '$cmakebin @buildArgs' in " . $dir . "\n\n"; + my $result = system "$cmakebin @buildArgs"; + if ($result ne 0) { + die "Failed while running $cmakebin to generate makefiles!\n"; + } + + print "Calling '$make $makeArgs' in " . $dir . "\n\n"; + $result = system "$make $makeArgs"; + if ($result ne 0) { + die "Failed to build $port port\n"; + } + + chdir ".." or die; + } + + return $result; +} + +sub buildCMakeEflProject($@) +{ + my ($clean, @buildArgs) = @_; + return buildCMakeProject("Efl", $clean, @buildArgs); +} + +sub buildQMakeProject($@) +{ + my ($clean, @buildParams) = @_; + + my @buildArgs = ("-r"); + + my $qmakebin = "qmake"; # Allow override of the qmake binary from $PATH + my $makeargs = ""; + my $installHeaders; + my $installLibs; + for my $i (0 .. $#buildParams) { + my $opt = $buildParams[$i]; + if ($opt =~ /^--qmake=(.*)/i ) { + $qmakebin = $1; + } elsif ($opt =~ /^--qmakearg=(.*)/i ) { + push @buildArgs, $1; + } elsif ($opt =~ /^--makeargs=(.*)/i ) { + $makeargs = $1; + } elsif ($opt =~ /^--install-headers=(.*)/i ) { + $installHeaders = $1; + } elsif ($opt =~ /^--install-libs=(.*)/i ) { + $installLibs = $1; + } else { + push @buildArgs, $opt; + } + } + + my $make = qtMakeCommand($qmakebin); + my $config = configuration(); + push @buildArgs, "INSTALL_HEADERS=" . $installHeaders if defined($installHeaders); + push @buildArgs, "INSTALL_LIBS=" . $installLibs if defined($installLibs); + my $dir = File::Spec->canonpath(baseProductDir()); + $dir = File::Spec->catfile($dir, $config) unless isSymbian(); + File::Path::mkpath($dir); + chdir $dir or die "Failed to cd into " . $dir . "\n"; + + print "Generating derived sources\n\n"; + + push @buildArgs, "OUTPUT_DIR=" . baseProductDir() . "/$config"; + + my @dsQmakeArgs = @buildArgs; + push @dsQmakeArgs, "-r"; + push @dsQmakeArgs, sourceDir() . "/DerivedSources.pro"; + push @dsQmakeArgs, "-o Makefile.DerivedSources"; + print "Calling '$qmakebin @dsQmakeArgs' in " . $dir . "\n\n"; + my $result = system "$qmakebin @dsQmakeArgs"; + if ($result ne 0) { + die "Failed while running $qmakebin to generate derived sources!\n"; + } + + my $dsMakefile = "Makefile.DerivedSources"; + + # Iterate over different source directories manually to workaround a problem with qmake+extraTargets+s60 + my @subdirs = ("JavaScriptCore", "WebCore", "WebKit/qt/Api"); + if (grep { $_ eq "CONFIG+=webkit2"} @buildArgs) { + push @subdirs, "WebKit2"; + push @subdirs, "Tools/WebKitTestRunner"; + push @subdirs, "Tools/MiniBrowser"; + } + + for my $subdir (@subdirs) { + print "Calling '$make $makeargs -f $dsMakefile generated_files' in " . $dir . "/$subdir\n\n"; + if ($make eq "nmake") { + my $subdirWindows = $subdir; + $subdirWindows =~ s:/:\\:g; + $result = system "pushd $subdirWindows && $make $makeargs -f $dsMakefile generated_files && popd"; + } else { + $result = system "$make $makeargs -C $subdir -f $dsMakefile generated_files"; + } + if ($result ne 0) { + die "Failed to generate ${subdir}'s derived sources!\n"; + } + } + + if ($config =~ m/debug/i) { + push @buildArgs, "CONFIG-=release"; + push @buildArgs, "CONFIG+=debug"; + } else { + my $passedConfig = passedConfiguration() || ""; + if (!isDarwin() || $passedConfig =~ m/release/i) { + push @buildArgs, "CONFIG+=release"; + push @buildArgs, "CONFIG-=debug"; + } else { + push @buildArgs, "CONFIG+=debug"; + push @buildArgs, "CONFIG+=debug_and_release"; + } + } + + push @buildArgs, sourceDir() . "/WebKit.pro"; + + print "Calling '$qmakebin @buildArgs' in " . $dir . "\n\n"; + print "Installation headers directory: $installHeaders\n" if(defined($installHeaders)); + print "Installation libraries directory: $installLibs\n" if(defined($installLibs)); + + $result = system "$qmakebin @buildArgs"; + if ($result ne 0) { + die "Failed to setup build environment using $qmakebin!\n"; + } + + # Manually create makefiles for the examples so we don't build by default + my $examplesDir = $dir . "/WebKit/qt/examples"; + File::Path::mkpath($examplesDir); + $buildArgs[-1] = sourceDir() . "/WebKit/qt/examples/examples.pro"; + chdir $examplesDir or die; + print "Calling '$qmakebin @buildArgs' in " . $examplesDir . "\n\n"; + $result = system "$qmakebin @buildArgs"; + die "Failed to create makefiles for the examples!\n" if $result ne 0; + chdir $dir or die; + + if ($clean) { + print "Calling '$make $makeargs distclean' in " . $dir . "\n\n"; + $result = system "$make $makeargs distclean"; + } elsif (isSymbian()) { + print "\n\nWebKit is now configured for building, but you have to make\n"; + print "a choice about the target yourself. To start the build run:\n\n"; + print " make release-armv5|debug-winscw|etc.\n\n"; + } else { + print "Calling '$make $makeargs' in " . $dir . "\n\n"; + $result = system "$make $makeargs"; + } + + chdir ".." or die; + return $result; +} + +sub buildQMakeQtProject($$@) +{ + my ($project, $clean, @buildArgs) = @_; + + return buildQMakeProject($clean, @buildArgs); +} + +sub buildGtkProject($$@) +{ + my ($project, $clean, @buildArgs) = @_; + + if ($project ne "WebKit") { + die "The Gtk port builds JavaScriptCore, WebCore and WebKit in one shot! Only call it for 'WebKit'.\n"; + } + + return buildAutotoolsProject($clean, @buildArgs); +} + +sub buildChromiumMakefile($$) +{ + my ($target, $clean) = @_; + if ($clean) { + return system qw(rm -rf out); + } + my $config = configuration(); + my $numCpus = numberOfCPUs(); + my @command = ("make", "-fMakefile.chromium", "-j$numCpus", "BUILDTYPE=$config", $target); + print join(" ", @command) . "\n"; + return system @command; +} + +sub buildChromiumVisualStudioProject($$) +{ + my ($projectPath, $clean) = @_; + + my $config = configuration(); + my $action = "/build"; + $action = "/clean" if $clean; + + # Find Visual Studio installation. + my $vsInstallDir; + my $programFilesPath = $ENV{'PROGRAMFILES'} || "C:\\Program Files"; + if ($ENV{'VSINSTALLDIR'}) { + $vsInstallDir = $ENV{'VSINSTALLDIR'}; + } else { + $vsInstallDir = "$programFilesPath/Microsoft Visual Studio 8"; + } + $vsInstallDir = `cygpath "$vsInstallDir"` if isCygwin(); + chomp $vsInstallDir; + $vcBuildPath = "$vsInstallDir/Common7/IDE/devenv.com"; + if (! -e $vcBuildPath) { + # Visual Studio not found, try VC++ Express + $vcBuildPath = "$vsInstallDir/Common7/IDE/VCExpress.exe"; + if (! -e $vcBuildPath) { + print "*************************************************************\n"; + print "Cannot find '$vcBuildPath'\n"; + print "Please execute the file 'vcvars32.bat' from\n"; + print "'$programFilesPath\\Microsoft Visual Studio 8\\VC\\bin\\'\n"; + print "to setup the necessary environment variables.\n"; + print "*************************************************************\n"; + die; + } + } + + # Create command line and execute it. + my @command = ($vcBuildPath, $projectPath, $action, $config); + print "Building results into: ", baseProductDir(), "\n"; + print join(" ", @command), "\n"; + return system @command; +} + +sub buildChromium($@) +{ + my ($clean, @options) = @_; + + # We might need to update DEPS or re-run GYP if things have changed. + if (checkForArgumentAndRemoveFromArrayRef("--update-chromium", \@options)) { + system("perl", "Tools/Scripts/update-webkit-chromium") == 0 or die $!; + } + + my $result = 1; + if (isDarwin()) { + # Mac build - builds the root xcode project. + $result = buildXCodeProject("WebKit/chromium/WebKit", $clean, "-configuration", configuration(), @options); + } elsif (isCygwin() || isWindows()) { + # Windows build - builds the root visual studio solution. + $result = buildChromiumVisualStudioProject("WebKit/chromium/WebKit.sln", $clean); + } elsif (isLinux()) { + # Linux build - build using make. + $ result = buildChromiumMakefile("all", $clean); + } else { + print STDERR "This platform is not supported by chromium.\n"; + } + return $result; +} + +sub appleApplicationSupportPath +{ + open INSTALL_DIR, "</proc/registry/HKEY_LOCAL_MACHINE/SOFTWARE/Apple\ Inc./Apple\ Application\ Support/InstallDir"; + my $path = <INSTALL_DIR>; + $path =~ s/[\r\n\x00].*//; + close INSTALL_DIR; + + my $unixPath = `cygpath -u '$path'`; + chomp $unixPath; + return $unixPath; +} + +sub setPathForRunningWebKitApp +{ + my ($env) = @_; + + if (isAppleWinWebKit()) { + $env->{PATH} = join(':', productDir(), dirname(installedSafariPath()), appleApplicationSupportPath(), $env->{PATH} || ""); + } elsif (isQt()) { + my $qtLibs = `qmake -query QT_INSTALL_LIBS`; + $qtLibs =~ s/[\n|\r]$//g; + $env->{PATH} = join(';', $qtLibs, productDir() . "/lib", $env->{PATH} || ""); + } +} + +sub runSafari +{ + my ($debugger) = @_; + + if (isAppleMacWebKit()) { + return system "$FindBin::Bin/gdb-safari", argumentsForConfiguration() if $debugger; + + my $productDir = productDir(); + print "Starting Safari with DYLD_FRAMEWORK_PATH set to point to built WebKit in $productDir.\n"; + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES"; + if (!isTiger() && architecture()) { + return system "arch", "-" . architecture(), safariPath(), @ARGV; + } else { + return system safariPath(), @ARGV; + } + } + + if (isAppleWinWebKit()) { + my $result; + my $productDir = productDir(); + if ($debugger) { + setupCygwinEnv(); + chomp($ENV{WEBKITNIGHTLY} = `cygpath -wa "$productDir"`); + my $safariPath = safariPath(); + chomp($safariPath = `cygpath -wa "$safariPath"`); + $result = system $vcBuildPath, "/debugexe", "\"$safariPath\"", @ARGV; + } else { + $result = system File::Spec->catfile(productDir(), "WebKit.exe"), @ARGV; + } + return $result if $result; + } + + return 1; +} + +sub runMiniBrowser +{ + if (isAppleMacWebKit()) { + my $productDir = productDir(); + print "Starting MiniBrowser with DYLD_FRAMEWORK_PATH set to point to $productDir.\n"; + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES"; + my $miniBrowserPath = "$productDir/MiniBrowser.app/Contents/MacOS/MiniBrowser"; + if (!isTiger() && architecture()) { + return system "arch", "-" . architecture(), $miniBrowserPath, @ARGV; + } else { + return system $miniBrowserPath, @ARGV; + } + } + + return 1; +} + +sub debugMiniBrowser +{ + if (isAppleMacWebKit()) { + my $gdbPath = "/usr/bin/gdb"; + die "Can't find gdb executable. Is gdb installed?\n" unless -x $gdbPath; + + my $productDir = productDir(); + + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = 'YES'; + + my $miniBrowserPath = "$productDir/MiniBrowser.app/Contents/MacOS/MiniBrowser"; + + print "Starting MiniBrowser under gdb with DYLD_FRAMEWORK_PATH set to point to built WebKit2 in $productDir.\n"; + my @architectureFlags = ("-arch", architecture()) if !isTiger(); + exec $gdbPath, @architectureFlags, $miniBrowserPath or die; + return; + } + + return 1; +} + +sub runWebKitTestRunner +{ + if (isAppleMacWebKit()) { + my $productDir = productDir(); + print "Starting WebKitTestRunner with DYLD_FRAMEWORK_PATH set to point to $productDir.\n"; + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES"; + my $webKitTestRunnerPath = "$productDir/WebKitTestRunner"; + if (!isTiger() && architecture()) { + return system "arch", "-" . architecture(), $webKitTestRunnerPath, @ARGV; + } else { + return system $webKitTestRunnerPath, @ARGV; + } + } + + return 1; +} + +sub debugWebKitTestRunner +{ + if (isAppleMacWebKit()) { + my $gdbPath = "/usr/bin/gdb"; + die "Can't find gdb executable. Is gdb installed?\n" unless -x $gdbPath; + + my $productDir = productDir(); + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = 'YES'; + + my $webKitTestRunnerPath = "$productDir/WebKitTestRunner"; + + print "Starting WebKitTestRunner under gdb with DYLD_FRAMEWORK_PATH set to point to $productDir.\n"; + my @architectureFlags = ("-arch", architecture()) if !isTiger(); + exec $gdbPath, @architectureFlags, $webKitTestRunnerPath or die; + return; + } + + return 1; +} + +sub runTestWebKitAPI +{ + if (isAppleMacWebKit()) { + my $productDir = productDir(); + print "Starting TestWebKitAPI with DYLD_FRAMEWORK_PATH set to point to $productDir.\n"; + $ENV{DYLD_FRAMEWORK_PATH} = $productDir; + $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = "YES"; + my $testWebKitAPIPath = "$productDir/TestWebKitAPI"; + if (!isTiger() && architecture()) { + return system "arch", "-" . architecture(), $testWebKitAPIPath, @ARGV; + } else { + return system $testWebKitAPIPath, @ARGV; + } + } + + return 1; +} + +1; diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatch.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatch.pl new file mode 100644 index 0000000..a7282c7 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/fixChangeLogPatch.pl @@ -0,0 +1,533 @@ +#!/usr/bin/perl +# +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# Copyright (C) Research In Motion 2010. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of VCSUtils::fixChangeLogPatch(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +# The source ChangeLog for these tests is the following: +# +# 2009-12-22 Alice <alice@email.address> +# +# Reviewed by Ray. +# +# Changed some code on 2009-12-22. +# +# * File: +# * File2: +# +# 2009-12-21 Alice <alice@email.address> +# +# Reviewed by Ray. +# +# Changed some code on 2009-12-21. +# +# * File: +# * File2: + +my @testCaseHashRefs = ( +{ # New test + diffName => "fixChangeLogPatch: [no change] In-place change.", + inputText => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -1,5 +1,5 @@ + 2010-12-22 Bob <bob@email.address> + +- Reviewed by Sue. ++ Reviewed by Ray. + + Changed some code on 2010-12-22. +END + expectedReturn => { + patch => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -1,5 +1,5 @@ + 2010-12-22 Bob <bob@email.address> + +- Reviewed by Sue. ++ Reviewed by Ray. + + Changed some code on 2010-12-22. +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: [no change] Remove first entry.", + inputText => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -1,11 +1,3 @@ +-2010-12-22 Bob <bob@email.address> +- +- Reviewed by Ray. +- +- Changed some code on 2010-12-22. +- +- * File: +- + 2010-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + expectedReturn => { + patch => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -1,11 +1,3 @@ +-2010-12-22 Bob <bob@email.address> +- +- Reviewed by Ray. +- +- Changed some code on 2010-12-22. +- +- * File: +- + 2010-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: [no change] Remove entry in the middle.", + inputText => <<'END', +--- ChangeLog ++++ ChangeLog +@@@ -7,10 +7,6 @@ + + * File: + +-2010-12-22 Bob <bob@email.address> +- +- Changed some code on 2010-12-22. +- + 2010-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + expectedReturn => { + patch => <<'END', +--- ChangeLog ++++ ChangeLog +@@@ -7,10 +7,6 @@ + + * File: + +-2010-12-22 Bob <bob@email.address> +- +- Changed some code on 2010-12-22. +- + 2010-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: [no change] Far apart changes (i.e. more than one chunk).", + inputText => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -7,7 +7,7 @@ + + * File: + +-2010-12-22 Bob <bob@email.address> ++2010-12-22 Bobby <bob@email.address> + + Changed some code on 2010-12-22. + +@@ -21,7 +21,7 @@ + + * File2: + +-2010-12-21 Bob <bob@email.address> ++2010-12-21 Bobby <bob@email.address> + + Changed some code on 2010-12-21. +END + expectedReturn => { + patch => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -7,7 +7,7 @@ + + * File: + +-2010-12-22 Bob <bob@email.address> ++2010-12-22 Bobby <bob@email.address> + + Changed some code on 2010-12-22. + +@@ -21,7 +21,7 @@ + + * File2: + +-2010-12-21 Bob <bob@email.address> ++2010-12-21 Bobby <bob@email.address> + + Changed some code on 2010-12-21. +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: [no change] First line is new line.", + inputText => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -1,3 +1,11 @@ ++2009-12-22 Bob <bob@email.address> ++ ++ Reviewed by Ray. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + expectedReturn => { + patch => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -1,3 +1,11 @@ ++2009-12-22 Bob <bob@email.address> ++ ++ Reviewed by Ray. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: [no change] No date string.", + inputText => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -6,6 +6,7 @@ + + * File: + * File2: ++ * File3: + + 2009-12-21 Alice <alice@email.address> + +END + expectedReturn => { + patch => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -6,6 +6,7 @@ + + * File: + * File2: ++ * File3: + + 2009-12-21 Alice <alice@email.address> + +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: [no change] New entry inserted in middle.", + inputText => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -11,6 +11,14 @@ + + Reviewed by Ray. + ++ Changed some more code on 2009-12-21. ++ ++ * File: ++ ++2009-12-21 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ + Changed some code on 2009-12-21. + + * File: +END + expectedReturn => { + hasOverlappingLines => 1, + patch => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -11,6 +11,14 @@ + + Reviewed by Ray. + ++ Changed some more code on 2009-12-21. ++ ++ * File: ++ ++2009-12-21 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ + Changed some code on 2009-12-21. + + * File: +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: [no change] New entry inserted earlier in the file, but after an entry with the same author and date.", + inputText => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -70,6 +70,14 @@ + + 2009-12-22 Alice <alice@email.address> + ++ Reviewed by Sue. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ ++2009-12-22 Alice <alice@email.address> ++ + Reviewed by Ray. + + Changed some code on 2009-12-22. +END + expectedReturn => { + hasOverlappingLines => 1, + patch => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -70,6 +70,14 @@ + + 2009-12-22 Alice <alice@email.address> + ++ Reviewed by Sue. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ ++2009-12-22 Alice <alice@email.address> ++ + Reviewed by Ray. + + Changed some code on 2009-12-22. +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: Leading context includes first line.", + inputText => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -1,5 +1,13 @@ + 2009-12-22 Alice <alice@email.address> + ++ Reviewed by Sue. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ ++2009-12-22 Alice <alice@email.address> ++ + Reviewed by Ray. + + Changed some code on 2009-12-22. +END + expectedReturn => { + patch => <<'END', +--- ChangeLog ++++ ChangeLog +@@ -1,3 +1,11 @@ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Sue. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: Leading context does not include first line.", + inputText => <<'END', +@@ -2,6 +2,14 @@ + + Reviewed by Ray. + ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ + Changed some code on 2009-12-22. + + * File: +END + expectedReturn => { + patch => <<'END', +@@ -1,3 +1,11 @@ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: Non-consecutive line additions.", + +# This can occur, for example, if the new ChangeLog entry includes +# trailing white space in the first blank line but not the second. +# A diff command can then match the second blank line of the new +# ChangeLog entry with the first blank line of the old. +# The svn diff command with the default --diff-cmd has done this. + inputText => <<'END', +@@ -1,5 +1,11 @@ + 2009-12-22 Alice <alice@email.address> ++ <pretend-whitespace> ++ Reviewed by Ray. + ++ Changed some more code on 2009-12-22. ++ ++2009-12-22 Alice <alice@email.address> ++ + Reviewed by Ray. + + Changed some code on 2009-12-22. +END + expectedReturn => { + patch => <<'END', +@@ -1,3 +1,9 @@ ++2009-12-22 Alice <alice@email.address> ++ <pretend-whitespace> ++ Reviewed by Ray. ++ ++ Changed some more code on 2009-12-22. ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. +END + } +}, +{ # New test + diffName => "fixChangeLogPatch: Additional edits after new entry.", + inputText => <<'END', +@@ -2,10 +2,17 @@ + + Reviewed by Ray. + ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ + Changed some code on 2009-12-22. + + * File: +- * File2: + + 2009-12-21 Alice <alice@email.address> + +END + expectedReturn => { + patch => <<'END', +@@ -1,11 +1,18 @@ ++2009-12-22 Alice <alice@email.address> ++ ++ Reviewed by Ray. ++ ++ Changed some more code on 2009-12-22. ++ ++ * File: ++ + 2009-12-22 Alice <alice@email.address> + + Reviewed by Ray. + + Changed some code on 2009-12-22. + + * File: +- * File2: + + 2009-12-21 Alice <alice@email.address> + +END + } +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => $testCasesCount); # Total number of assertions. + +foreach my $testCase (@testCaseHashRefs) { + my $testNameStart = "fixChangeLogPatch(): $testCase->{diffName}: comparing"; + + my $got = VCSUtils::fixChangeLogPatch($testCase->{inputText}); + my $expectedReturn = $testCase->{expectedReturn}; + + is_deeply($got, $expectedReturn, "$testNameStart return value."); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/generatePatchCommand.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/generatePatchCommand.pl new file mode 100644 index 0000000..483a0a8 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/generatePatchCommand.pl @@ -0,0 +1,87 @@ +#!/usr/bin/perl +# +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of VCSUtils::generatePatchCommand(). + +use Test::Simple tests => 10; +use VCSUtils; + +# New test +$title = "generatePatchCommand: Undefined optional arguments."; + +my $argsHashRef; +my ($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0", $title); +ok($isForcing == 0, $title); + +# New test +$title = "generatePatchCommand: Undefined options."; + +my $options; +$argsHashRef = {options => $options}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0", $title); +ok($isForcing == 0, $title); + +# New test +$title = "generatePatchCommand: --force and no \"ensure force\"."; + +$argsHashRef = {options => ["--force"]}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0 --force", $title); +ok($isForcing == 1, $title); + +# New test +$title = "generatePatchCommand: no --force and \"ensure force\"."; + +$argsHashRef = {ensureForce => 1}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0 --force", $title); +ok($isForcing == 1, $title); + +# New test +$title = "generatePatchCommand: \"should reverse\"."; + +$argsHashRef = {shouldReverse => 1}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0 --reverse", $title); + +# New test +$title = "generatePatchCommand: --fuzz=3, --force."; + +$argsHashRef = {options => ["--fuzz=3", "--force"]}; +($patchCommand, $isForcing) = VCSUtils::generatePatchCommand($argsHashRef); + +ok($patchCommand eq "patch -p0 --force --fuzz=3", $title); diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/mergeChangeLogs.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/mergeChangeLogs.pl new file mode 100644 index 0000000..a226e43 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/mergeChangeLogs.pl @@ -0,0 +1,336 @@ +#!/usr/bin/perl +# +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of VCSUtils::mergeChangeLogs(). + +use strict; + +use Test::Simple tests => 16; +use File::Temp qw(tempfile); +use VCSUtils; + +# Read contents of a file and return it. +sub readFile($) +{ + my ($fileName) = @_; + + local $/; + open(FH, "<", $fileName); + my $content = <FH>; + close(FH); + + return $content; +} + +# Write a temporary file and return the filename. +sub writeTempFile($$$) +{ + my ($name, $extension, $content) = @_; + + my ($FH, $fileName) = tempfile( + $name . "-XXXXXXXX", + DIR => ($ENV{'TMPDIR'} || "/tmp"), + UNLINK => 0, + ); + print $FH $content; + close $FH; + + if ($extension) { + my $newFileName = $fileName . $extension; + rename($fileName, $newFileName); + $fileName = $newFileName; + } + + return $fileName; +} + +# -------------------------------------------------------------------------------- + +{ + # New test + my $title = "mergeChangeLogs: traditional rejected patch success"; + + my $fileNewerContent = <<'EOF'; +2010-01-29 Mark Rowe <mrowe@apple.com> + + Fix the Mac build. + + Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional". + +2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. + + Fix the ARM build. +EOF + my $fileNewer = writeTempFile("file", "", $fileNewerContent); + + my $fileMineContent = <<'EOF'; +*************** +*** 1,3 **** + 2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. +--- 1,9 ---- ++ 2010-01-29 Oliver Hunt <oliver@apple.com> ++ ++ Reviewed by Darin Adler. ++ ++ JSC is failing to propagate anonymous slot count on some transitions ++ + 2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. +EOF + my $fileMine = writeTempFile("file", ".rej", $fileMineContent); + rename($fileMine, $fileNewer . ".rej"); + $fileMine = $fileNewer . ".rej"; + + my $fileOlderContent = $fileNewerContent; + my $fileOlder = writeTempFile("file", ".orig", $fileOlderContent); + rename($fileOlder, $fileNewer . ".orig"); + $fileOlder = $fileNewer . ".orig"; + + my $exitStatus = mergeChangeLogs($fileMine, $fileOlder, $fileNewer); + + # mergeChangeLogs() should return 1 since the patch succeeded. + ok($exitStatus == 1, "$title: should return 1 for success"); + + ok(readFile($fileMine) eq $fileMineContent, "$title: \$fileMine should be unchanged"); + ok(readFile($fileOlder) eq $fileOlderContent, "$title: \$fileOlder should be unchanged"); + + my $expectedContent = <<'EOF'; +2010-01-29 Oliver Hunt <oliver@apple.com> + + Reviewed by Darin Adler. + + JSC is failing to propagate anonymous slot count on some transitions + +EOF + $expectedContent .= $fileNewerContent; + ok(readFile($fileNewer) eq $expectedContent, "$title: \$fileNewer should be updated to include patch"); + + unlink($fileMine, $fileOlder, $fileNewer); +} + +# -------------------------------------------------------------------------------- + +{ + # New test + my $title = "mergeChangeLogs: traditional rejected patch failure"; + + my $fileNewerContent = <<'EOF'; +2010-01-29 Mark Rowe <mrowe@apple.com> + + Fix the Mac build. + + Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional". + +2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. + + Fix the ARM build. +EOF + my $fileNewer = writeTempFile("file", "", $fileNewerContent); + + my $fileMineContent = <<'EOF'; +*************** +*** 1,9 **** +- 2010-01-29 Oliver Hunt <oliver@apple.com> +- +- Reviewed by Darin Adler. +- +- JSC is failing to propagate anonymous slot count on some transitions +- + 2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. +--- 1,3 ---- + 2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. +EOF + my $fileMine = writeTempFile("file", ".rej", $fileMineContent); + rename($fileMine, $fileNewer . ".rej"); + $fileMine = $fileNewer . ".rej"; + + my $fileOlderContent = $fileNewerContent; + my $fileOlder = writeTempFile("file", ".orig", $fileOlderContent); + rename($fileOlder, $fileNewer . ".orig"); + $fileOlder = $fileNewer . ".orig"; + + my $exitStatus = mergeChangeLogs($fileMine, $fileOlder, $fileNewer); + + # mergeChangeLogs() should return 0 since the patch failed. + ok($exitStatus == 0, "$title: should return 0 for failure"); + + ok(readFile($fileMine) eq $fileMineContent, "$title: \$fileMine should be unchanged"); + ok(readFile($fileOlder) eq $fileOlderContent, "$title: \$fileOlder should be unchanged"); + ok(readFile($fileNewer) eq $fileNewerContent, "$title: \$fileNewer should be unchanged"); + + unlink($fileMine, $fileOlder, $fileNewer); +} + +# -------------------------------------------------------------------------------- + +{ + # New test + my $title = "mergeChangeLogs: patch succeeds"; + + my $fileMineContent = <<'EOF'; +2010-01-29 Oliver Hunt <oliver@apple.com> + + Reviewed by Darin Adler. + + JSC is failing to propagate anonymous slot count on some transitions + +2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. + + Fix the ARM build. +EOF + my $fileMine = writeTempFile("fileMine", "", $fileMineContent); + + my $fileOlderContent = <<'EOF'; +2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. + + Fix the ARM build. +EOF + my $fileOlder = writeTempFile("fileOlder", "", $fileOlderContent); + + my $fileNewerContent = <<'EOF'; +2010-01-29 Mark Rowe <mrowe@apple.com> + + Fix the Mac build. + + Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional". + +2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. + + Fix the ARM build. +EOF + my $fileNewer = writeTempFile("fileNewer", "", $fileNewerContent); + + my $exitStatus = mergeChangeLogs($fileMine, $fileOlder, $fileNewer); + + # mergeChangeLogs() should return 1 since the patch succeeded. + ok($exitStatus == 1, "$title: should return 1 for success"); + + ok(readFile($fileMine) eq $fileMineContent, "$title: \$fileMine should be unchanged"); + ok(readFile($fileOlder) eq $fileOlderContent, "$title: \$fileOlder should be unchanged"); + + my $expectedContent = <<'EOF'; +2010-01-29 Oliver Hunt <oliver@apple.com> + + Reviewed by Darin Adler. + + JSC is failing to propagate anonymous slot count on some transitions + +EOF + $expectedContent .= $fileNewerContent; + + ok(readFile($fileNewer) eq $expectedContent, "$title: \$fileNewer should be patched"); + + unlink($fileMine, $fileOlder, $fileNewer); +} + +# -------------------------------------------------------------------------------- + +{ + # New test + my $title = "mergeChangeLogs: patch fails"; + + my $fileMineContent = <<'EOF'; +2010-01-29 Mark Rowe <mrowe@apple.com> + + Fix the Mac build. + + Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional". + +2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. + + Fix the ARM build. +EOF + my $fileMine = writeTempFile("fileMine", "", $fileMineContent); + + my $fileOlderContent = <<'EOF'; +2010-01-29 Mark Rowe <mrowe@apple.com> + + Fix the Mac build. + + Disable ENABLE_INDEXED_DATABASE since it is "completely non-functional". + +2010-01-29 Oliver Hunt <oliver@apple.com> + + Reviewed by Darin Adler. + + JSC is failing to propagate anonymous slot count on some transitions + +2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. + + Fix the ARM build. +EOF + my $fileOlder = writeTempFile("fileOlder", "", $fileOlderContent); + + my $fileNewerContent = <<'EOF'; +2010-01-29 Oliver Hunt <oliver@apple.com> + + Reviewed by Darin Adler. + + JSC is failing to propagate anonymous slot count on some transitions + +2010-01-29 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Maciej Stachowiak. + + Fix the ARM build. +EOF + my $fileNewer = writeTempFile("fileNewer", "", $fileNewerContent); + + my $exitStatus = mergeChangeLogs($fileMine, $fileOlder, $fileNewer); + + # mergeChangeLogs() should return a non-zero exit status since the patch failed. + ok($exitStatus == 0, "$title: return non-zero exit status for failure"); + + ok(readFile($fileMine) eq $fileMineContent, "$title: \$fileMine should be unchanged"); + ok(readFile($fileOlder) eq $fileOlderContent, "$title: \$fileOlder should be unchanged"); + + # $fileNewer should still exist unchanged because the patch failed + ok(readFile($fileNewer) eq $fileNewerContent, "$title: \$fileNewer should be unchanged"); + + unlink($fileMine, $fileOlder, $fileNewer); +} + +# -------------------------------------------------------------------------------- + diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl new file mode 100644 index 0000000..9fe077f --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl @@ -0,0 +1,1208 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseDiff(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +# The array of test cases. +my @testCaseHashRefs = ( +{ + # New test + diffName => "SVN: simple", + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53052) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools + + all: +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 53052) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools + + all: +END + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "53052", +}], +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "SVN: binary file (isBinary true)", + inputText => <<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + indexPath => "test_file.swf", + isBinary => 1, + isSvn => 1, +}], +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "SVN: binary file (isBinary true) using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END +), + expectedReturn => [ +[{ + svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END +), + indexPath => "test_file.swf", + isBinary => 1, + isSvn => 1, +}], +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "SVN: leading junk", + inputText => <<'END', + +LEADING JUNK + +Index: Makefile +=================================================================== +--- Makefile (revision 53052) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools + + all: +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text + +LEADING JUNK + +Index: Makefile +=================================================================== +--- Makefile (revision 53052) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools + + all: +END + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "53052", +}], +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "SVN: copied file", + inputText => <<'END', +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) ++++ Makefile_new (working copy) +@@ -0,0 +1,1 @@ ++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + expectedReturn => [ +[{ + copiedFromPath => "Makefile", + indexPath => "Makefile_new", + sourceRevision => "53131", +}], +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "SVN: two diffs", + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "53131", +}], +"Index: Makefile_new\n"], + expectedNextLine => "===================================================================\n", +}, +{ + # New test + diffName => "SVN: SVN diff followed by Git diff", # Should not recognize Git start + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +diff --git a/Makefile b/Makefile +index f5d5e74..3b6aa92 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,1 1,1 @@ public: +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +diff --git a/Makefile b/Makefile +index f5d5e74..3b6aa92 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,1 1,1 @@ public: +END + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "53131", +}], +undef], + expectedNextLine => undef, +}, +#### +# Property Changes: Simple +## +{ + # New test + diffName => "SVN: file change diff with property change diff", + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + +Property changes on: Makefile +___________________________________________________________________ +Name: svn:executable + + * +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + +END + executableBitDelta => 1, + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "60021", +}], +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "SVN: file change diff, followed by property change diff on different file", + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + +Property changes on: Makefile.shared +___________________________________________________________________ +Name: svn:executable + + * +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + +END + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "60021", +}], +"Property changes on: Makefile.shared\n"], + expectedNextLine => "___________________________________________________________________\n", +}, +{ + # New test + diffName => "SVN: property diff, followed by file change diff", + inputText => <<'END', +Property changes on: Makefile +___________________________________________________________________ +Deleted: svn:executable + - * + +Index: Makefile.shared +=================================================================== +--- Makefile.shared (revision 60021) ++++ Makefile.shared (working copy) +@@ -1,3 +1,4 @@ ++ +SCRIPTS_PATH ?= ../WebKitTools/Scripts +XCODE_OPTIONS = `perl -I$(SCRIPTS_PATH) -Mwebkitdirs -e 'print XcodeOptionString()'` $(ARGS) +END + expectedReturn => [ +[{ + executableBitDelta => -1, + indexPath => "Makefile", + isSvn => 1, +}], +"Index: Makefile.shared\n"], + expectedNextLine => "===================================================================\n", +}, +{ + # New test + diffName => "SVN: property diff, followed by file change diff using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Property changes on: Makefile +___________________________________________________________________ +Deleted: svn:executable + - * + +Index: Makefile.shared +=================================================================== +--- Makefile.shared (revision 60021) ++++ Makefile.shared (working copy) +@@ -1,3 +1,4 @@ ++ +SCRIPTS_PATH ?= ../WebKitTools/Scripts +XCODE_OPTIONS = `perl -I$(SCRIPTS_PATH) -Mwebkitdirs -e 'print XcodeOptionString()'` $(ARGS) +END +), + expectedReturn => [ +[{ + executableBitDelta => -1, + indexPath => "Makefile", + isSvn => 1, +}], +"Index: Makefile.shared\r\n"], + expectedNextLine => "===================================================================\r\n", +}, +{ + # New test + diffName => "SVN: copied file with property change", + inputText => <<'END', +Index: NMakefile +=================================================================== +--- NMakefile (revision 60021) (from Makefile:60021) ++++ NMakefile (working copy) +@@ -0,0 +1,1 @@ ++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + +Property changes on: NMakefile +___________________________________________________________________ +Added: svn:executable + + * +END + expectedReturn => [ +[{ + copiedFromPath => "Makefile", + executableBitDelta => 1, + indexPath => "NMakefile", + sourceRevision => "60021", +}], +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "SVN: two consecutive property diffs", + inputText => <<'END', +Property changes on: Makefile +___________________________________________________________________ +Added: svn:executable + + * + + +Property changes on: Makefile.shared +___________________________________________________________________ +Added: svn:executable + + * +END + expectedReturn => [ +[{ + executableBitDelta => 1, + indexPath => "Makefile", + isSvn => 1, +}], +"Property changes on: Makefile.shared\n"], + expectedNextLine => "___________________________________________________________________\n", +}, +{ + # New test + diffName => "SVN: two consecutive property diffs using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Property changes on: Makefile +___________________________________________________________________ +Added: svn:executable + + * + + +Property changes on: Makefile.shared +___________________________________________________________________ +Added: svn:executable + + * +END +), + expectedReturn => [ +[{ + executableBitDelta => 1, + indexPath => "Makefile", + isSvn => 1, +}], +"Property changes on: Makefile.shared\r\n"], + expectedNextLine => "___________________________________________________________________\r\n", +}, +#### +# Property Changes: Binary files +## +{ + # New test + diffName => "SVN: binary file with executable bit change", + inputText => <<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream +Name: svn:executable + + * + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + executableBitDelta => 1, + indexPath => "test_file.swf", + isBinary => 1, + isSvn => 1, +}], +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "SVN: binary file with executable bit change usng Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream +Name: svn:executable + + * + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END +), + expectedReturn => [ +[{ + svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END +), + executableBitDelta => 1, + indexPath => "test_file.swf", + isBinary => 1, + isSvn => 1, +}], +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "SVN: binary file followed by property change on different file", + inputText => <<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== + +Property changes on: Makefile +___________________________________________________________________ +Added: svn:executable + + * +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== + +END + indexPath => "test_file.swf", + isBinary => 1, + isSvn => 1, +}], +"Property changes on: Makefile\n"], + expectedNextLine => "___________________________________________________________________\n", +}, +{ + # New test + diffName => "SVN: binary file followed by property change on different file using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== + +Property changes on: Makefile +___________________________________________________________________ +Added: svn:executable + + * +END +), + expectedReturn => [ +[{ + svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== + +END +), + indexPath => "test_file.swf", + isBinary => 1, + isSvn => 1, +}], +"Property changes on: Makefile\r\n"], + expectedNextLine => "___________________________________________________________________\r\n", +}, +{ + # New test + diffName => "SVN: binary file followed by file change on different file", + inputText => <<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== + +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== + +END + indexPath => "test_file.swf", + isBinary => 1, + isSvn => 1, +}], +"Index: Makefile\n"], + expectedNextLine => "===================================================================\n", +}, +{ + # New test + diffName => "SVN: binary file followed by file change on different file using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== + +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: +END +), + expectedReturn => [ +[{ + svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== + +END +), + indexPath => "test_file.swf", + isBinary => 1, + isSvn => 1, +}], +"Index: Makefile\r\n"], + expectedNextLine => "===================================================================\r\n", +}, +#### +# Property Changes: File change with property change +## +{ + # New test + diffName => "SVN: file change diff with property change, followed by property change diff", + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + +Property changes on: Makefile +___________________________________________________________________ +Added: svn:executable + + * + + +Property changes on: Makefile.shared +___________________________________________________________________ +Deleted: svn:executable + - * +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + + + +END + executableBitDelta => 1, + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "60021", +}], +"Property changes on: Makefile.shared\n"], + expectedNextLine => "___________________________________________________________________\n", +}, +{ + # New test + diffName => "SVN: file change diff with property change, followed by property change diff using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + +Property changes on: Makefile +___________________________________________________________________ +Added: svn:executable + + * + + +Property changes on: Makefile.shared +___________________________________________________________________ +Deleted: svn:executable + - * +END +), + expectedReturn => [ +[{ + svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + + + +END +), + executableBitDelta => 1, + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "60021", +}], +"Property changes on: Makefile.shared\r\n"], + expectedNextLine => "___________________________________________________________________\r\n", +}, +{ + # New test + diffName => "SVN: file change diff with property change, followed by file change diff", + inputText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + +Property changes on: Makefile +___________________________________________________________________ +Name: svn:executable + - * + +Index: Makefile.shared +=================================================================== +--- Makefile.shared (revision 60021) ++++ Makefile.shared (working copy) +@@ -1,3 +1,4 @@ ++ +SCRIPTS_PATH ?= ../WebKitTools/Scripts +XCODE_OPTIONS = `perl -I$(SCRIPTS_PATH) -Mwebkitdirs -e 'print XcodeOptionString()'` $(ARGS) +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + + +END + executableBitDelta => -1, + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "60021", +}], +"Index: Makefile.shared\n"], + expectedNextLine => "===================================================================\n", +}, +{ + # New test + diffName => "SVN: file change diff with property change, followed by file change diff using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + +Property changes on: Makefile +___________________________________________________________________ +Name: svn:executable + - * + +Index: Makefile.shared +=================================================================== +--- Makefile.shared (revision 60021) ++++ Makefile.shared (working copy) +@@ -1,3 +1,4 @@ ++ +SCRIPTS_PATH ?= ../WebKitTools/Scripts +XCODE_OPTIONS = `perl -I$(SCRIPTS_PATH) -Mwebkitdirs -e 'print XcodeOptionString()'` $(ARGS) +END +), + expectedReturn => [ +[{ + svnConvertedText => toWindowsLineEndings(<<'END', # Same as input text +Index: Makefile +=================================================================== +--- Makefile (revision 60021) ++++ Makefile (working copy) +@@ -1,3 +1,4 @@ ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKit2 WebKitTools + + all: + + +END +), + executableBitDelta => -1, + indexPath => "Makefile", + isSvn => 1, + sourceRevision => "60021", +}], +"Index: Makefile.shared\r\n"], + expectedNextLine => "===================================================================\r\n", +}, +#### +# Git test cases +## +{ + # New test + diffName => "Git: simple", + inputText => <<'END', +diff --git a/Makefile b/Makefile +index f5d5e74..3b6aa92 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,1 1,1 @@ public: +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', +Index: Makefile +index f5d5e74..3b6aa92 100644 +--- Makefile ++++ Makefile +@@ -1,1 1,1 @@ public: +END + indexPath => "Makefile", + isGit => 1, +}], +undef], + expectedNextLine => undef, +}, +{ # New test + diffName => "Git: new file", + inputText => <<'END', +diff --git a/foo.h b/foo.h +new file mode 100644 +index 0000000..3c9f114 +--- /dev/null ++++ b/foo.h +@@ -0,0 +1,34 @@ ++<html> +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', +Index: foo.h +new file mode 100644 +index 0000000..3c9f114 +--- foo.h ++++ foo.h +@@ -0,0 +1,34 @@ ++<html> +END + indexPath => "foo.h", + isGit => 1, + isNew => 1, +}], +"diff --git a/bar b/bar\n"], + expectedNextLine => "index d45dd40..3494526 100644\n", +}, +{ # New test + diffName => "Git: file deletion", + inputText => <<'END', +diff --git a/foo b/foo +deleted file mode 100644 +index 1e50d1d..0000000 +--- a/foo ++++ /dev/null +@@ -1,1 +0,0 @@ +-line1 +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', +Index: foo +deleted file mode 100644 +index 1e50d1d..0000000 +--- foo ++++ foo +@@ -1,1 +0,0 @@ +-line1 +END + indexPath => "foo", + isDeletion => 1, + isGit => 1, +}], +"diff --git a/bar b/bar\n"], + expectedNextLine => "index d45dd40..3494526 100644\n", +}, +{ + # New test + diffName => "Git: Git diff followed by SVN diff", # Should not recognize SVN start + inputText => <<'END', +diff --git a/Makefile b/Makefile +index f5d5e74..3b6aa92 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,1 1,1 @@ public: +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) +END + expectedReturn => [ +[{ + svnConvertedText => <<'END', +Index: Makefile +index f5d5e74..3b6aa92 100644 +--- Makefile ++++ Makefile +@@ -1,1 1,1 @@ public: +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) +END + indexPath => "Makefile", + isGit => 1, +}], +undef], + expectedNextLine => undef, +}, +#### +# Git test cases: file moves (multiple return values) +## +{ + diffName => "Git: rename (with similarity index 100%)", + inputText => <<'END', +diff --git a/foo b/foo_new +similarity index 100% +rename from foo +rename to foo_new +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +[{ + indexPath => "foo", + isDeletion => 1, +}, +{ + copiedFromPath => "foo", + indexPath => "foo_new", +}], +"diff --git a/bar b/bar\n"], + expectedNextLine => "index d45dd40..3494526 100644\n", +}, +{ + diffName => "rename (with similarity index < 100%)", + inputText => <<'END', +diff --git a/foo b/foo_new +similarity index 99% +rename from foo +rename to foo_new +index 1e50d1d..1459d21 100644 +--- a/foo ++++ b/foo_new +@@ -15,3 +15,4 @@ release r deployment dep deploy: + line1 + line2 + line3 ++line4 +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +[{ + indexPath => "foo", + isDeletion => 1, +}, +{ + copiedFromPath => "foo", + indexPath => "foo_new", +}, +{ + indexPath => "foo_new", + isGit => 1, + svnConvertedText => <<'END', +Index: foo_new +similarity index 99% +rename from foo +rename to foo_new +index 1e50d1d..1459d21 100644 +--- foo_new ++++ foo_new +@@ -15,3 +15,4 @@ release r deployment dep deploy: + line1 + line2 + line3 ++line4 +END +}], +"diff --git a/bar b/bar\n"], + expectedNextLine => "index d45dd40..3494526 100644\n", +}, +{ + diffName => "rename (with executable bit change)", + inputText => <<'END', +diff --git a/foo b/foo_new +old mode 100644 +new mode 100755 +similarity index 100% +rename from foo +rename to foo_new +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +[{ + indexPath => "foo", + isDeletion => 1, +}, +{ + copiedFromPath => "foo", + indexPath => "foo_new", +}, +{ + executableBitDelta => 1, + indexPath => "foo_new", + isGit => 1, + svnConvertedText => <<'END', +Index: foo_new +old mode 100644 +new mode 100755 +similarity index 100% +rename from foo +rename to foo_new +END +}], +"diff --git a/bar b/bar\n"], + expectedNextLine => "index d45dd40..3494526 100644\n", +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => 2 * $testCasesCount); # Total number of assertions. + +foreach my $testCase (@testCaseHashRefs) { + my $testNameStart = "parseDiff(): $testCase->{diffName}: comparing"; + + my $fileHandle; + open($fileHandle, "<", \$testCase->{inputText}); + my $line = <$fileHandle>; + + my @got = VCSUtils::parseDiff($fileHandle, $line); + my $expectedReturn = $testCase->{expectedReturn}; + + is_deeply(\@got, $expectedReturn, "$testNameStart return value."); + + my $gotNextLine = <$fileHandle>; + is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line."); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl new file mode 100644 index 0000000..8c20f65 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl @@ -0,0 +1,121 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseDiffHeader(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +# The unit tests for parseGitDiffHeader() and parseSvnDiffHeader() +# already thoroughly test parsing each format. +# +# For parseDiffHeader(), it should suffice to verify that -- (1) for each +# format, the method can return non-trivial values back for each key +# supported by that format (e.g. "sourceRevision" for SVN), (2) the method +# correctly sets default values when specific key-values are not set +# (e.g. undef for "sourceRevision" for Git), and (3) key-values unique to +# this method are set correctly (e.g. "scmFormat"). +my @testCaseHashRefs = ( +#### +# SVN test cases +## +{ # New test + diffName => "SVN: non-trivial copiedFromPath and sourceRevision values", + inputText => <<'END', +Index: index_path.py +=================================================================== +--- index_path.py (revision 53048) (from copied_from_path.py:53048) ++++ index_path.py (working copy) +@@ -0,0 +1,7 @@ ++# Python file... +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: index_path.py +=================================================================== +--- index_path.py (revision 53048) (from copied_from_path.py:53048) ++++ index_path.py (working copy) +END + copiedFromPath => "copied_from_path.py", + indexPath => "index_path.py", + isSvn => 1, + sourceRevision => 53048, +}, +"@@ -0,0 +1,7 @@\n"], + expectedNextLine => "+# Python file...\n", +}, +#### +# Git test cases +## +{ # New test case + diffName => "Git: Non-zero executable bit", + inputText => <<'END', +diff --git a/foo.exe b/foo.exe +old mode 100644 +new mode 100755 +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo.exe +old mode 100644 +new mode 100755 +END + executableBitDelta => 1, + indexPath => "foo.exe", + isGit => 1, +}, +undef], + expectedNextLine => undef, +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => 2 * $testCasesCount); # Total number of assertions. + +foreach my $testCase (@testCaseHashRefs) { + my $testNameStart = "parseDiffHeader(): $testCase->{diffName}: comparing"; + + my $fileHandle; + open($fileHandle, "<", \$testCase->{inputText}); + my $line = <$fileHandle>; + + my @got = VCSUtils::parseDiffHeader($fileHandle, $line); + my $expectedReturn = $testCase->{expectedReturn}; + + is_deeply(\@got, $expectedReturn, "$testNameStart return value."); + + my $gotNextLine = <$fileHandle>; + is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line."); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl new file mode 100644 index 0000000..bc0d4d4 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl @@ -0,0 +1,494 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseGitDiffHeader(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +# The array of test cases. +my @testCaseHashRefs = ( +{ # New test + diffName => "Modified file", + inputText => <<'END', +diff --git a/foo.h b/foo.h +index f5d5e74..3b6aa92 100644 +--- a/foo.h ++++ b/foo.h +@@ -1 +1 @@ +-file contents ++new file contents +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo.h +index f5d5e74..3b6aa92 100644 +--- foo.h ++++ foo.h +END + indexPath => "foo.h", +}, +"@@ -1 +1 @@\n"], + expectedNextLine => "-file contents\n", +}, +{ # New test + diffName => "new file", + inputText => <<'END', +diff --git a/foo.h b/foo.h +new file mode 100644 +index 0000000..3c9f114 +--- /dev/null ++++ b/foo.h +@@ -0,0 +1,34 @@ ++<html> +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo.h +new file mode 100644 +index 0000000..3c9f114 +--- foo.h ++++ foo.h +END + indexPath => "foo.h", + isNew => 1, +}, +"@@ -0,0 +1,34 @@\n"], + expectedNextLine => "+<html>\n", +}, +{ # New test + diffName => "file deletion", + inputText => <<'END', +diff --git a/foo b/foo +deleted file mode 100644 +index 1e50d1d..0000000 +--- a/foo ++++ /dev/null +@@ -1,1 +0,0 @@ +-line1 +diff --git a/configure.ac b/configure.ac +index d45dd40..3494526 100644 +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo +deleted file mode 100644 +index 1e50d1d..0000000 +--- foo ++++ foo +END + indexPath => "foo", + isDeletion => 1, +}, +"@@ -1,1 +0,0 @@\n"], + expectedNextLine => "-line1\n", +}, +{ # New test + diffName => "using --no-prefix", + inputText => <<'END', +diff --git foo.h foo.h +index c925780..9e65c43 100644 +--- foo.h ++++ foo.h +@@ -1,3 +1,17 @@ ++contents +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo.h +index c925780..9e65c43 100644 +--- foo.h ++++ foo.h +END + indexPath => "foo.h", +}, +"@@ -1,3 +1,17 @@\n"], + expectedNextLine => "+contents\n", +}, +#### +# Copy operations +## +{ # New test + diffName => "copy (with similarity index 100%)", + inputText => <<'END', +diff --git a/foo b/foo_new +similarity index 100% +copy from foo +copy to foo_new +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo_new +similarity index 100% +copy from foo +copy to foo_new +END + copiedFromPath => "foo", + indexPath => "foo_new", +}, +"diff --git a/bar b/bar\n"], + expectedNextLine => "index d45dd40..3494526 100644\n", +}, +{ # New test + diffName => "copy (with similarity index < 100%)", + inputText => <<'END', +diff --git a/foo b/foo_new +similarity index 99% +copy from foo +copy to foo_new +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo_new +similarity index 99% +copy from foo +copy to foo_new +END + copiedFromPath => "foo", + indexPath => "foo_new", + isCopyWithChanges => 1, +}, +"diff --git a/bar b/bar\n"], + expectedNextLine => "index d45dd40..3494526 100644\n", +}, +{ # New test + diffName => "rename (with similarity index 100%)", + inputText => <<'END', +diff --git a/foo b/foo_new +similarity index 100% +rename from foo +rename to foo_new +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo_new +similarity index 100% +rename from foo +rename to foo_new +END + copiedFromPath => "foo", + indexPath => "foo_new", + shouldDeleteSource => 1, +}, +"diff --git a/bar b/bar\n"], + expectedNextLine => "index d45dd40..3494526 100644\n", +}, +{ # New test + diffName => "rename (with similarity index < 100%)", + inputText => <<'END', +diff --git a/foo b/foo_new +similarity index 99% +rename from foo +rename to foo_new +index 1e50d1d..1459d21 100644 +--- a/foo ++++ b/foo_new +@@ -15,3 +15,4 @@ release r deployment dep deploy: + line1 + line2 + line3 ++line4 +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo_new +similarity index 99% +rename from foo +rename to foo_new +index 1e50d1d..1459d21 100644 +--- foo_new ++++ foo_new +END + copiedFromPath => "foo", + indexPath => "foo_new", + isCopyWithChanges => 1, + shouldDeleteSource => 1, +}, +"@@ -15,3 +15,4 @@ release r deployment dep deploy:\n"], + expectedNextLine => " line1\n", +}, +{ # New test + diffName => "rename (with executable bit change)", + inputText => <<'END', +diff --git a/foo b/foo_new +old mode 100644 +new mode 100755 +similarity index 100% +rename from foo +rename to foo_new +diff --git a/bar b/bar +index d45dd40..3494526 100644 +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo_new +old mode 100644 +new mode 100755 +similarity index 100% +rename from foo +rename to foo_new +END + copiedFromPath => "foo", + executableBitDelta => 1, + indexPath => "foo_new", + isCopyWithChanges => 1, + shouldDeleteSource => 1, +}, +"diff --git a/bar b/bar\n"], + expectedNextLine => "index d45dd40..3494526 100644\n", +}, +#### +# Binary file test cases +## +{ + # New test case + diffName => "New binary file", + inputText => <<'END', +diff --git a/foo.gif b/foo.gif +new file mode 100644 +index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d9060151690 +GIT binary patch +literal 7 +OcmYex&reDa;sO8*F9L)B + +literal 0 +HcmV?d00001 + +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo.gif +new file mode 100644 +index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d9060151690 +GIT binary patch +END + indexPath => "foo.gif", + isBinary => 1, + isNew => 1, +}, +"literal 7\n"], + expectedNextLine => "OcmYex&reDa;sO8*F9L)B\n", +}, +{ + # New test case + diffName => "Deleted binary file", + inputText => <<'END', +diff --git a/foo.gif b/foo.gif +deleted file mode 100644 +index 323fae0..0000000 +GIT binary patch +literal 0 +HcmV?d00001 + +literal 7 +OcmYex&reDa;sO8*F9L)B + +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo.gif +deleted file mode 100644 +index 323fae0..0000000 +GIT binary patch +END + indexPath => "foo.gif", + isBinary => 1, + isDeletion => 1, +}, +"literal 0\n"], + expectedNextLine => "HcmV?d00001\n", +}, +#### +# Executable bit test cases +## +{ + # New test case + diffName => "Modified executable file", + inputText => <<'END', +diff --git a/foo b/foo +index d03e242..435ad3a 100755 +--- a/foo ++++ b/foo +@@ -1 +1 @@ +-file contents ++new file contents + +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo +index d03e242..435ad3a 100755 +--- foo ++++ foo +END + indexPath => "foo", +}, +"@@ -1 +1 @@\n"], + expectedNextLine => "-file contents\n", +}, +{ + # New test case + diffName => "Making file executable (last diff)", + inputText => <<'END', +diff --git a/foo.exe b/foo.exe +old mode 100644 +new mode 100755 +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo.exe +old mode 100644 +new mode 100755 +END + executableBitDelta => 1, + indexPath => "foo.exe", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test case + diffName => "Making file executable (not last diff)", + inputText => <<'END', +diff --git a/foo.exe b/foo.exe +old mode 100644 +new mode 100755 +diff --git a/another_file.txt b/another_file.txt +index d03e242..435ad3a 100755 +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo.exe +old mode 100644 +new mode 100755 +END + executableBitDelta => 1, + indexPath => "foo.exe", +}, +"diff --git a/another_file.txt b/another_file.txt\n"], + expectedNextLine => "index d03e242..435ad3a 100755\n", +}, +{ + # New test case + diffName => "New executable file", + inputText => <<'END', +diff --git a/foo b/foo +new file mode 100755 +index 0000000..d03e242 +--- /dev/null ++++ b/foo +@@ -0,0 +1 @@ ++file contents + +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo +new file mode 100755 +index 0000000..d03e242 +--- foo ++++ foo +END + executableBitDelta => 1, + indexPath => "foo", + isNew => 1, +}, +"@@ -0,0 +1 @@\n"], + expectedNextLine => "+file contents\n", +}, +{ + # New test case + diffName => "Deleted executable file", + inputText => <<'END', +diff --git a/foo b/foo +deleted file mode 100755 +index d03e242..0000000 +--- a/foo ++++ /dev/null +@@ -1 +0,0 @@ +-file contents + +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: foo +deleted file mode 100755 +index d03e242..0000000 +--- foo ++++ foo +END + executableBitDelta => -1, + indexPath => "foo", + isDeletion => 1, +}, +"@@ -1 +0,0 @@\n"], + expectedNextLine => "-file contents\n", +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => 2 * $testCasesCount); # Total number of assertions. + +foreach my $testCase (@testCaseHashRefs) { + my $testNameStart = "parseGitDiffHeader(): $testCase->{diffName}: comparing"; + + my $fileHandle; + open($fileHandle, "<", \$testCase->{inputText}); + my $line = <$fileHandle>; + + my @got = VCSUtils::parseGitDiffHeader($fileHandle, $line); + my $expectedReturn = $testCase->{expectedReturn}; + + is_deeply(\@got, $expectedReturn, "$testNameStart return value."); + + my $gotNextLine = <$fileHandle>; + is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line."); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parsePatch.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parsePatch.pl new file mode 100644 index 0000000..8aae3d4 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parsePatch.pl @@ -0,0 +1,94 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseDiffHeader(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +my @diffHashRefKeys = ( # The hash reference keys to check per diff. + "copiedFromPath", + "indexPath", + "sourceRevision", + "svnConvertedText", +); + +# New test +my $testNameStart = "parsePatch(): [SVN: Rename] "; +my $patch = <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +Index: Makefile_new +=================================================================== +--- Makefile_new (revision 53131) (from Makefile:53131) ++++ Makefile_new (working copy) +@@ -0,0 +1,1 @@ ++MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + +my @expectedDiffHashRefs = ( +{ + svnConvertedText => <<'END', +Index: Makefile +=================================================================== +--- Makefile (revision 53131) ++++ Makefile (working copy) +@@ -1,1 +0,0 @@ +-MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +END + copiedFromPath => undef, + indexPath => "Makefile", + sourceRevision => "53131", +}, +{ + copiedFromPath => "Makefile", + indexPath => "Makefile_new", + sourceRevision => "53131", +}, +); + +plan(tests => @expectedDiffHashRefs * @diffHashRefKeys); + +my $fileHandle; +open($fileHandle, "<", \$patch); + +my @gotDiffHashRefs = parsePatch($fileHandle); + +my $i = 0; +foreach my $expectedDiffHashRef (@expectedDiffHashRefs) { + + my $gotDiffHashRef = $gotDiffHashRefs[$i++]; + + foreach my $diffHashRefKey (@diffHashRefKeys) { + my $testName = "${testNameStart}[diff $i] key=\"$diffHashRefKey\""; + is($gotDiffHashRef->{$diffHashRefKey}, $expectedDiffHashRef->{$diffHashRefKey}, $testName); + } +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffFooter.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffFooter.pl new file mode 100644 index 0000000..4f05431 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffFooter.pl @@ -0,0 +1,397 @@ +#!/usr/bin/perl -w +# +# Copyright (C) Research in Motion Limited 2010. All Rights Reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseSvnDiffProperties(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +my @testCaseHashRefs = ( +#### +# Simple test cases +## +{ + # New test + diffName => "simple: add svn:executable", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: svn:executable + + * +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => 1, +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: add svn:mergeinfo", + inputText => <<'END', +Property changes on: Makefile +___________________________________________________________________ +Added: svn:mergeinfo + Merged /trunk/Makefile:r33020 +END + expectedReturn => [ +{ + propertyPath => "Makefile", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: delete svn:mergeinfo", + inputText => <<'END', +Property changes on: Makefile +___________________________________________________________________ +Deleted: svn:mergeinfo + Reverse-merged /trunk/Makefile:r33020 +END + expectedReturn => [ +{ + propertyPath => "Makefile", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: modified svn:mergeinfo", + inputText => <<'END', +Property changes on: Makefile +___________________________________________________________________ +Modified: svn:mergeinfo + Reverse-merged /trunk/Makefile:r33020 + Merged /trunk/Makefile:r41697 +END + expectedReturn => [ +{ + propertyPath => "Makefile", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: delete svn:executable", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Deleted: svn:executable + - * +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => -1, +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: delete svn:executable using SVN 1.4 syntax", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Name: svn:executable + - * +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => -1, +}, +undef], + expectedNextLine => undef, +}, +#### +# Property value followed by empty line and start of next diff +## +{ + # New test + diffName => "add svn:executable, followed by empty line and start of next diff", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: svn:executable + + * + +Index: Makefile.shared +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => 1, +}, +"\n"], + expectedNextLine => "Index: Makefile.shared\n", +}, +{ + # New test + diffName => "add svn:executable, followed by empty line and start of next property diff", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: svn:executable + + * + +Property changes on: Makefile.shared +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => 1, +}, +"\n"], + expectedNextLine => "Property changes on: Makefile.shared\n", +}, +#### +# Property value followed by empty line and start of the binary contents +## +{ + # New test + diffName => "add svn:executable, followed by empty line and start of binary contents", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: svn:executable + + * + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => 1, +}, +"\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n", +}, +{ + # New test + diffName => "custom property followed by svn:executable, empty line and start of binary contents", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: documentation + + This is an example sentence. +Added: svn:executable + + * + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => 1, +}, +"\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n", +}, +#### +# Successive properties +## +{ + # New test + diffName => "svn:executable followed by custom property", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: svn:executable + + * +Added: documentation + + This is an example sentence. +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => 1, +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "custom property followed by svn:executable", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: documentation + + This is an example sentence. +Added: svn:executable + + * +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => 1, +}, +undef], + expectedNextLine => undef, +}, +#### +# Successive properties followed by empty line and start of next diff +## +{ + # New test + diffName => "custom property followed by svn:executable, empty line and start of next property diff", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: documentation + + This is an example sentence. +Added: svn:executable + + * + +Property changes on: Makefile.shared +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => 1, +}, +"\n"], + expectedNextLine => "Property changes on: Makefile.shared\n", +}, +{ + # New test + diffName => "custom property followed by svn:executable, empty line and start of next index diff", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: documentation + + This is an example sentence. +Added: svn:executable + + * + +Index: Makefile.shared +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => 1, +}, +"\n"], + expectedNextLine => "Index: Makefile.shared\n", +}, +#### +# Custom properties +## +# FIXME: We do not support anything other than the svn:executable property. +# We should add support for handling other properties. +{ + # New test + diffName => "simple: custom property", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Name: documentation + + This is an example sentence. +END + expectedReturn => [ +{ + propertyPath => "FileA", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "custom property followed by custom property", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: copyright + + Copyright (C) Research in Motion Limited 2010. All Rights Reserved. +Added: documentation + + This is an example sentence. +END + expectedReturn => [ +{ + propertyPath => "FileA", +}, +undef], + expectedNextLine => undef, +}, +#### +# Malformed property diffs +## +# We shouldn't encounter such diffs in practice. +{ + # New test + diffName => "svn:executable followed by custom property and svn:executable", + inputText => <<'END', +Property changes on: FileA +___________________________________________________________________ +Added: svn:executable + + * +Added: documentation + + This is an example sentence. +Deleted: svn:executable + - * +END + expectedReturn => [ +{ + propertyPath => "FileA", + executableBitDelta => -1, +}, +undef], + expectedNextLine => undef, +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => 2 * $testCasesCount); # Total number of assertions. + +foreach my $testCase (@testCaseHashRefs) { + my $testNameStart = "parseSvnDiffProperties(): $testCase->{diffName}: comparing"; + + my $fileHandle; + open($fileHandle, "<", \$testCase->{inputText}); + my $line = <$fileHandle>; + + my @got = VCSUtils::parseSvnDiffProperties($fileHandle, $line); + my $expectedReturn = $testCase->{expectedReturn}; + + is_deeply(\@got, $expectedReturn, "$testNameStart return value."); + + my $gotNextLine = <$fileHandle>; + is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line."); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffHeader.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffHeader.pl new file mode 100644 index 0000000..ed8550d --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnDiffHeader.pl @@ -0,0 +1,220 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseSvnDiffHeader(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +# The array of test cases. +my @testCaseHashRefs = ( +{ + # New test + diffName => "simple diff", + inputText => <<'END', +Index: WebKitTools/Scripts/VCSUtils.pm +=================================================================== +--- WebKitTools/Scripts/VCSUtils.pm (revision 53004) ++++ WebKitTools/Scripts/VCSUtils.pm (working copy) +@@ -32,6 +32,7 @@ use strict; + use warnings; +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: WebKitTools/Scripts/VCSUtils.pm +=================================================================== +--- WebKitTools/Scripts/VCSUtils.pm (revision 53004) ++++ WebKitTools/Scripts/VCSUtils.pm (working copy) +END + indexPath => "WebKitTools/Scripts/VCSUtils.pm", + sourceRevision => "53004", +}, +"@@ -32,6 +32,7 @@ use strict;\n"], + expectedNextLine => " use warnings;\n", +}, +{ + # New test + diffName => "new file", + inputText => <<'END', +Index: WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl +=================================================================== +--- WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0) ++++ WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0) +@@ -0,0 +1,262 @@ ++#!/usr/bin/perl -w +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl +=================================================================== +--- WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0) ++++ WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl (revision 0) +END + indexPath => "WebKitTools/Scripts/webkitperl/VCSUtils_unittest/parseDiffHeader.pl", + isNew => 1, +}, +"@@ -0,0 +1,262 @@\n"], + expectedNextLine => "+#!/usr/bin/perl -w\n", +}, +{ + # New test + diffName => "copied file", + inputText => <<'END', +Index: index_path.py +=================================================================== +--- index_path.py (revision 53048) (from copied_from_path.py:53048) ++++ index_path.py (working copy) +@@ -0,0 +1,7 @@ ++# Python file... +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: index_path.py +=================================================================== +--- index_path.py (revision 53048) (from copied_from_path.py:53048) ++++ index_path.py (working copy) +END + copiedFromPath => "copied_from_path.py", + indexPath => "index_path.py", + sourceRevision => 53048, +}, +"@@ -0,0 +1,7 @@\n"], + expectedNextLine => "+# Python file...\n", +}, +{ + # New test + diffName => "contains \\r\\n lines", + inputText => <<END, # No single quotes to allow interpolation of "\r" +Index: index_path.py\r +===================================================================\r +--- index_path.py (revision 53048)\r ++++ index_path.py (working copy)\r +@@ -0,0 +1,7 @@\r ++# Python file...\r +END + expectedReturn => [ +{ + svnConvertedText => <<END, # No single quotes to allow interpolation of "\r" +Index: index_path.py\r +===================================================================\r +--- index_path.py (revision 53048)\r ++++ index_path.py (working copy)\r +END + indexPath => "index_path.py", + sourceRevision => 53048, +}, +"@@ -0,0 +1,7 @@\r\n"], + expectedNextLine => "+# Python file...\r\n", +}, +{ + # New test + diffName => "contains path corrections", + inputText => <<'END', +Index: index_path.py +=================================================================== +--- bad_path (revision 53048) (from copied_from_path.py:53048) ++++ bad_path (working copy) +@@ -0,0 +1,7 @@ ++# Python file... +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: index_path.py +=================================================================== +--- index_path.py (revision 53048) (from copied_from_path.py:53048) ++++ index_path.py (working copy) +END + copiedFromPath => "copied_from_path.py", + indexPath => "index_path.py", + sourceRevision => 53048, +}, +"@@ -0,0 +1,7 @@\n"], + expectedNextLine => "+# Python file...\n", +}, +#### +# Binary test cases +## +{ + # New test + diffName => "binary file", + inputText => <<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => [ +{ + svnConvertedText => <<'END', +Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +END + indexPath => "test_file.swf", + isBinary => 1, +}, +"svn:mime-type = application/octet-stream\n"], + expectedNextLine => "\n", +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => 2 * $testCasesCount); # Total number of assertions. + +foreach my $testCase (@testCaseHashRefs) { + my $testNameStart = "parseSvnDiffHeader(): $testCase->{diffName}: comparing"; + + my $fileHandle; + open($fileHandle, "<", \$testCase->{inputText}); + my $line = <$fileHandle>; + + my @got = VCSUtils::parseSvnDiffHeader($fileHandle, $line); + my $expectedReturn = $testCase->{expectedReturn}; + + is_deeply(\@got, $expectedReturn, "$testNameStart return value."); + + my $gotNextLine = <$fileHandle>; + is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line."); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnProperty.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnProperty.pl new file mode 100644 index 0000000..6914051 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnProperty.pl @@ -0,0 +1,743 @@ +#!/usr/bin/perl -w +# +# Copyright (C) Research in Motion Limited 2010. All Rights Reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseSvnProperty(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +my @testCaseHashRefs = ( +#### +# Simple test cases +## +{ + # New test + diffName => "simple: add svn:executable", + inputText => <<'END', +Added: svn:executable + + * +END + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => 1, + value => "*", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: delete svn:executable", + inputText => <<'END', +Deleted: svn:executable + - * +END + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => -1, + value => "*", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: add svn:mergeinfo", + inputText => <<'END', +Added: svn:mergeinfo + Merged /trunk/Makefile:r33020 +END + expectedReturn => [ +{ + name => "svn:mergeinfo", + propertyChangeDelta => 1, + value => "/trunk/Makefile:r33020", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: delete svn:mergeinfo", + inputText => <<'END', +Deleted: svn:mergeinfo + Reverse-merged /trunk/Makefile:r33020 +END + expectedReturn => [ +{ + name => "svn:mergeinfo", + propertyChangeDelta => -1, + value => "/trunk/Makefile:r33020", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: modified svn:mergeinfo", + inputText => <<'END', +Modified: svn:mergeinfo + Reverse-merged /trunk/Makefile:r33020 + Merged /trunk/Makefile:r41697 +END + expectedReturn => [ +{ + name => "svn:mergeinfo", + propertyChangeDelta => 1, + value => "/trunk/Makefile:r41697", +}, +undef], + expectedNextLine => undef, +}, +#### +# Using SVN 1.4 syntax +## +{ + # New test + diffName => "simple: modified svn:mergeinfo using SVN 1.4 syntax", + inputText => <<'END', +Name: svn:mergeinfo + Reverse-merged /trunk/Makefile:r33020 + Merged /trunk/Makefile:r41697 +END + expectedReturn => [ +{ + name => "svn:mergeinfo", + propertyChangeDelta => 1, + value => "/trunk/Makefile:r41697", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: delete svn:executable using SVN 1.4 syntax", + inputText => <<'END', +Name: svn:executable + - * +END + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => -1, + value => "*", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "simple: add svn:executable using SVN 1.4 syntax", + inputText => <<'END', +Name: svn:executable + + * +END + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => 1, + value => "*", +}, +undef], + expectedNextLine => undef, +}, +#### +# Property value followed by empty line and start of next diff +## +{ + # New test + diffName => "add svn:executable, followed by empty line and start of next diff", + inputText => <<'END', +Added: svn:executable + + * + +Index: Makefile.shared +END + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => 1, + value => "*", +}, +"\n"], + expectedNextLine => "Index: Makefile.shared\n", +}, +{ + # New test + diffName => "add svn:executable, followed by empty line and start of next diff using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Added: svn:executable + + * + +Index: Makefile.shared +END +), + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => 1, + value => "*", +}, +"\r\n"], + expectedNextLine => "Index: Makefile.shared\r\n", +}, +{ + # New test + diffName => "add svn:executable, followed by empty line and start of next property diff", + inputText => <<'END', +Added: svn:executable + + * + +Property changes on: Makefile.shared +END + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => 1, + value => "*", +}, +"\n"], + expectedNextLine => "Property changes on: Makefile.shared\n", +}, +{ + # New test + diffName => "add svn:executable, followed by empty line and start of next property diff using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Added: svn:executable + + * + +Property changes on: Makefile.shared +END +), + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => 1, + value => "*", +}, +"\r\n"], + expectedNextLine => "Property changes on: Makefile.shared\r\n", +}, +{ + # New test + diffName => "multi-line '+' change, followed by empty line and start of next diff", + inputText => <<'END', +Name: documentation + + A +long sentence that spans +multiple lines. + +Index: Makefile.shared +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A\nlong sentence that spans\nmultiple lines.", +}, +"\n"], + expectedNextLine => "Index: Makefile.shared\n", +}, +{ + # New test + diffName => "multi-line '+' change, followed by empty line and start of next diff using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Name: documentation + + A +long sentence that spans +multiple lines. + +Index: Makefile.shared +END +), + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A\r\nlong sentence that spans\r\nmultiple lines.", +}, +"\r\n"], + expectedNextLine => "Index: Makefile.shared\r\n", +}, +{ + # New test + diffName => "multi-line '+' change, followed by empty line and start of next property diff", + inputText => <<'END', +Name: documentation + + A +long sentence that spans +multiple lines. + +Property changes on: Makefile.shared +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A\nlong sentence that spans\nmultiple lines.", +}, +"\n"], + expectedNextLine => "Property changes on: Makefile.shared\n", +}, +{ + # New test + diffName => "multi-line '+' change, followed by empty line and start of next property diff using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Name: documentation + + A +long sentence that spans +multiple lines. + +Property changes on: Makefile.shared +END +), + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A\r\nlong sentence that spans\r\nmultiple lines.", +}, +"\r\n"], + expectedNextLine => "Property changes on: Makefile.shared\r\n", +}, +#### +# Property value followed by empty line and start of binary patch +## +{ + # New test + diffName => "add svn:executable, followed by empty line and start of binary patch", + inputText => <<'END', +Added: svn:executable + + * + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => 1, + value => "*", +}, +"\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n", +}, +{ + # New test + diffName => "add svn:executable, followed by empty line and start of binary patch using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Added: svn:executable + + * + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END +), + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => 1, + value => "*", +}, +"\r\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\r\n", +}, +{ + # New test + diffName => "multi-line '+' change, followed by empty line and start of binary patch", + inputText => <<'END', +Name: documentation + + A +long sentence that spans +multiple lines. + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A\nlong sentence that spans\nmultiple lines.", +}, +"\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n", +}, +{ + # New test + diffName => "multi-line '+' change, followed by empty line and start of binary patch using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Name: documentation + + A +long sentence that spans +multiple lines. + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END +), + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A\r\nlong sentence that spans\r\nmultiple lines.", +}, +"\r\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\r\n", +}, +{ + # New test + diffName => "multi-line '-' change, followed by multi-line '+' change, empty line, and start of binary patch", + inputText => <<'END', +Modified: documentation + - A +long sentence that spans +multiple lines. + + Another +long sentence that spans +multiple lines. + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "Another\nlong sentence that spans\nmultiple lines.", +}, +"\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n", +}, +{ + # New test + diffName => "multi-line '-' change, followed by multi-line '+' change, empty line, and start of binary patch using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Modified: documentation + - A +long sentence that spans +multiple lines. + + Another +long sentence that spans +multiple lines. + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END +), + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "Another\r\nlong sentence that spans\r\nmultiple lines.", +}, +"\r\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\r\n", +}, +#### +# Successive properties +## +{ + # New test + diffName => "single-line '+' change followed by custom property with single-line '+' change", + inputText => <<'END', +Added: svn:executable + + * +Added: documentation + + A sentence. +END + expectedReturn => [ +{ + name => "svn:executable", + propertyChangeDelta => 1, + value => "*", +}, +"Added: documentation\n"], + expectedNextLine => " + A sentence.\n", +}, +{ + # New test + diffName => "multi-line '+' change, followed by svn:executable", + inputText => <<'END', +Name: documentation + + A +long sentence that spans +multiple lines. +Name: svn:executable + + * +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A\nlong sentence that spans\nmultiple lines.", +}, +"Name: svn:executable\n"], + expectedNextLine => " + *\n", +}, +{ + # New test + diffName => "multi-line '-' change, followed by multi-line '+' change and add svn:executable", + inputText => <<'END', +Modified: documentation + - A +long sentence that spans +multiple lines. + + Another +long sentence that spans +multiple lines. +Added: svn:executable + + * +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "Another\nlong sentence that spans\nmultiple lines.", +}, +"Added: svn:executable\n"], + expectedNextLine => " + *\n", +}, +{ + # New test + diffName => "'Merged' change followed by 'Merged' change", + inputText => <<'END', +Added: svn:mergeinfo + Merged /trunk/Makefile:r33020 + Merged /trunk/Makefile.shared:r58350 +END + expectedReturn => [ +{ + name => "svn:mergeinfo", + propertyChangeDelta => 1, + value => "/trunk/Makefile.shared:r58350", +}, +undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "'Reverse-merged' change followed by 'Reverse-merged' change", + inputText => <<'END', +Deleted: svn:mergeinfo + Reverse-merged /trunk/Makefile:r33020 + Reverse-merged /trunk/Makefile.shared:r58350 +END + expectedReturn => [ +{ + name => "svn:mergeinfo", + propertyChangeDelta => -1, + value => "/trunk/Makefile.shared:r58350", +}, +undef], + expectedNextLine => undef, +}, +#### +# Property values with trailing new lines. +## +# FIXME: We do not support property values with trailing new lines, since it is difficult to +# disambiguate them from the empty line that preceeds the contents of a binary patch as +# in the test case (above): "multi-line '+' change, followed by empty line and start of binary patch". +{ + # New test + diffName => "single-line '+' with trailing new line", + inputText => <<'END', +Added: documentation + + A sentence. + +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A sentence.", +}, +"\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "single-line '+' with trailing new line using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Added: documentation + + A sentence. + +END +), + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A sentence.", +}, +"\r\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "single-line '+' with trailing new line, followed by empty line and start of binary patch", + inputText => <<'END', +Added: documentation + + A sentence. + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A sentence.", +}, +"\n"], + expectedNextLine => "\n", +}, +{ + # New test + diffName => "single-line '+' with trailing new line, followed by empty line and start of binary patch using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Added: documentation + + A sentence. + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END +), + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => 1, + value => "A sentence.", +}, +"\r\n"], + expectedNextLine => "\r\n", +}, +{ + # New test + diffName => "single-line '-' change with trailing new line, and single-line '+' change", + inputText => <<'END', +Modified: documentation + - A long sentence. + + + A sentence. +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => -1, # Since we only interpret the '-' property. + value => "A long sentence.", +}, +"\n"], + expectedNextLine => " + A sentence.\n", +}, +{ + # New test + diffName => "single-line '-' change with trailing new line, and single-line '+' change using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Modified: documentation + - A long sentence. + + + A sentence. +END +), + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => -1, # Since we only interpret the '-' property. + value => "A long sentence.", +}, +"\r\n"], + expectedNextLine => " + A sentence.\r\n", +}, +{ + # New test + diffName => "multi-line '-' change with trailing new line, and multi-line '+' change", + inputText => <<'END', +Modified: documentation + - A +long sentence that spans +multiple lines. + + + Another +long sentence that spans +multiple lines. +END + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => -1, # Since we only interpret the '-' property. + value => "A\nlong sentence that spans\nmultiple lines.", +}, +"\n"], + expectedNextLine => " + Another\n", +}, +{ + # New test + diffName => "multi-line '-' change with trailing new line, and multi-line '+' change using Windows line endings", + inputText => toWindowsLineEndings(<<'END', +Modified: documentation + - A +long sentence that spans +multiple lines. + + + Another +long sentence that spans +multiple lines. +END +), + expectedReturn => [ +{ + name => "documentation", + propertyChangeDelta => -1, # Since we only interpret the '-' property. + value => "A\r\nlong sentence that spans\r\nmultiple lines.", +}, +"\r\n"], + expectedNextLine => " + Another\r\n", +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => 2 * $testCasesCount); # Total number of assertions. + +foreach my $testCase (@testCaseHashRefs) { + my $testNameStart = "parseSvnProperty(): $testCase->{diffName}: comparing"; + + my $fileHandle; + open($fileHandle, "<", \$testCase->{inputText}); + my $line = <$fileHandle>; + + my @got = VCSUtils::parseSvnProperty($fileHandle, $line); + my $expectedReturn = $testCase->{expectedReturn}; + + is_deeply(\@got, $expectedReturn, "$testNameStart return value."); + + my $gotNextLine = <$fileHandle>; + is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line."); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnPropertyValue.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnPropertyValue.pl new file mode 100644 index 0000000..2de8ae3 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseSvnPropertyValue.pl @@ -0,0 +1,233 @@ +#!/usr/bin/perl -w +# +# Copyright (C) Research in Motion Limited 2010. All Rights Reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of parseSvnPropertyValue(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +my @testCaseHashRefs = ( +{ + # New test + diffName => "singe-line '+' change", + inputText => <<'END', + + * +END + expectedReturn => ["*", undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "single-line '-' change", + inputText => <<'END', + - * +END + expectedReturn => ["*", undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "'Merged' change", + inputText => <<'END', + Merged /trunk/Makefile:r33020 +END + expectedReturn => ["/trunk/Makefile:r33020", undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "'Reverse-merged' change", + inputText => <<'END', + Reverse-merged /trunk/Makefile:r33020 +END + expectedReturn => ["/trunk/Makefile:r33020", undef], + expectedNextLine => undef, +}, +{ + # New test + diffName => "single-line '-' change followed by empty line with Unix line endings", + inputText => <<'END', + - * + +END + expectedReturn => ["*", "\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "single-line '-' change followed by empty line with Windows line endings", + inputText => toWindowsLineEndings(<<'END', + - * + +END +), + expectedReturn => ["*", "\r\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "single-line '-' change followed by the next property", + inputText => <<'END', + - * +Deleted: svn:executable +END + expectedReturn => ["*", "Deleted: svn:executable\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "multi-line '+' change and start of binary patch", + inputText => <<'END', + + A +long sentence that spans +multiple lines. + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END + expectedReturn => ["A\nlong sentence that spans\nmultiple lines.", "\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\n", +}, +{ + # New test + diffName => "multi-line '+' change and start of binary patch with Windows line endings", + inputText => toWindowsLineEndings(<<'END', + + A +long sentence that spans +multiple lines. + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +END +), + expectedReturn => ["A\r\nlong sentence that spans\r\nmultiple lines.", "\r\n"], + expectedNextLine => "Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==\r\n", +}, +{ + # New test + diffName => "multi-line '-' change followed by '+' single-line change", + inputText => <<'END', + - A +long sentence that spans +multiple lines. + + A single-line. +END + expectedReturn => ["A\nlong sentence that spans\nmultiple lines.", " + A single-line.\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "multi-line '-' change followed by the next property", + inputText => <<'END', + - A +long sentence that spans +multiple lines. +Added: svn:executable +END + expectedReturn => ["A\nlong sentence that spans\nmultiple lines.", "Added: svn:executable\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "multi-line '-' change followed by '+' multi-line change", + inputText => <<'END', + - A +long sentence that spans +multiple lines. + + Another +long sentence that spans +multiple lines. +END + expectedReturn => ["A\nlong sentence that spans\nmultiple lines.", " + Another\n"], + expectedNextLine => "long sentence that spans\n", +}, +{ + # New test + diffName => "'Reverse-merged' change followed by 'Merge' change", + inputText => <<'END', + Reverse-merged /trunk/Makefile:r33020 + Merged /trunk/Makefile:r41697 +END + expectedReturn => ["/trunk/Makefile:r33020", " Merged /trunk/Makefile:r41697\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "'Merged' change followed by 'Merge' change", + inputText => <<'END', + Merged /trunk/Makefile:r33020 + Merged /trunk/Makefile.shared:r58350 +END + expectedReturn => ["/trunk/Makefile:r33020", " Merged /trunk/Makefile.shared:r58350\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "'Reverse-merged' change followed by 'Reverse-merged' change", + inputText => <<'END', + Reverse-merged /trunk/Makefile:r33020 + Reverse-merged /trunk/Makefile.shared:r58350 +END + expectedReturn => ["/trunk/Makefile:r33020", " Reverse-merged /trunk/Makefile.shared:r58350\n"], + expectedNextLine => undef, +}, +{ + # New test + diffName => "'Reverse-merged' change followed by 'Reverse-merged' change followed by 'Merged' change", + inputText => <<'END', + Reverse-merged /trunk/Makefile:r33020 + Reverse-merged /trunk/Makefile.shared:r58350 + Merged /trunk/ChangeLog:r64190 +END + expectedReturn => ["/trunk/Makefile:r33020", " Reverse-merged /trunk/Makefile.shared:r58350\n"], + expectedNextLine => " Merged /trunk/ChangeLog:r64190\n", +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => 2 * $testCasesCount); # Total number of assertions. + +foreach my $testCase (@testCaseHashRefs) { + my $testNameStart = "parseSvnPropertyValue(): $testCase->{diffName}: comparing"; + + my $fileHandle; + open($fileHandle, "<", \$testCase->{inputText}); + my $line = <$fileHandle>; + + my @got = VCSUtils::parseSvnPropertyValue($fileHandle, $line); + my $expectedReturn = $testCase->{expectedReturn}; + + is_deeply(\@got, $expectedReturn, "$testNameStart return value."); + + my $gotNextLine = <$fileHandle>; + is($gotNextLine, $testCase->{expectedNextLine}, "$testNameStart next read line."); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/prepareParsedPatch.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/prepareParsedPatch.pl new file mode 100644 index 0000000..a7ae807 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/prepareParsedPatch.pl @@ -0,0 +1,136 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of prepareParsedPatch(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +my $diffHashRef1 = { # not a copy, no source revision + copiedFromPath => undef, + indexPath => "indexPath1", + sourceRevision => undef, + svnConvertedText => "diff1", +}; +my $diffHashRef2 = { # not a copy, has source revision + copiedFromPath => undef, + indexPath => "indexPath2", + sourceRevision => 20, + svnConvertedText => "diff2", +}; +my $diffHashRef3 = { # a copy (copies always have source revision) + copiedFromPath => "sourcePath3", + indexPath => "indexPath2", # Deliberately choosing same as $diffHashRef2 + sourceRevision => 3, + svnConvertedText => "diff3", +}; + +my @testCases = ( +{ + # New test + testName => "zero diffs: empty array", + diffHashRefsInput => [], + expected => { + copyDiffHashRefs => [], + nonCopyDiffHashRefs => [], + sourceRevisionHash => {}, + }, +}, +{ + # New test + testName => "one diff: non-copy, no revision", + diffHashRefsInput => [$diffHashRef1], + expected => { + copyDiffHashRefs => [], + nonCopyDiffHashRefs => [$diffHashRef1], + sourceRevisionHash => {}, + }, +}, +{ + # New test + testName => "one diff: non-copy, has revision", + diffHashRefsInput => [$diffHashRef2], + expected => { + copyDiffHashRefs => [], + nonCopyDiffHashRefs => [$diffHashRef2], + sourceRevisionHash => { + "indexPath2" => 20, + } + }, +}, +{ + # New test + testName => "one diff: copy (has revision)", + diffHashRefsInput => [$diffHashRef3], + expected => { + copyDiffHashRefs => [$diffHashRef3], + nonCopyDiffHashRefs => [], + sourceRevisionHash => { + "sourcePath3" => 3, + } + }, +}, +{ + # New test + testName => "two diffs: two non-copies", + diffHashRefsInput => [$diffHashRef1, $diffHashRef2], + expected => { + copyDiffHashRefs => [], + nonCopyDiffHashRefs => [$diffHashRef1, $diffHashRef2], + sourceRevisionHash => { + "indexPath2" => 20, + } + }, +}, +{ + # New test + testName => "two diffs: non-copy and copy", + diffHashRefsInput => [$diffHashRef2, $diffHashRef3], + expected => { + copyDiffHashRefs => [$diffHashRef3], + nonCopyDiffHashRefs => [$diffHashRef2], + sourceRevisionHash => { + "sourcePath3" => 3, + "indexPath2" => 20, + } + }, +}, +); + +my $testCasesCount = @testCases; +plan(tests => $testCasesCount); + +foreach my $testCase (@testCases) { + my $testName = $testCase->{testName}; + my @diffHashRefs = @{$testCase->{diffHashRefsInput}}; + my $expected = $testCase->{expected}; + + my $got = prepareParsedPatch(0, @diffHashRefs); + + is_deeply($got, $expected, $testName); +} + diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl new file mode 100644 index 0000000..8bd8e90 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl @@ -0,0 +1,56 @@ +#!/usr/bin/perl +# +# Copyright (C) Research In Motion Limited 2010. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Research In Motion Limited nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of VCSUtils::removeEOL(). + +use Test::Simple tests => 5; +use VCSUtils; + +my $title; + +# New test +$title = "removeEOL: Undefined argument."; +ok(removeEOL(undef) eq ""); + +# New test +$title = "removeEOL: Line with Windows line ending."; +ok(removeEOL("This line ends with a Windows line ending.\r\n") eq "This line ends with a Windows line ending."); + +# New test +$title = "removeEOL: Line with Unix line ending."; +ok(removeEOL("This line ends with a Unix line ending.\n") eq "This line ends with a Unix line ending."); + +# New test +$title = "removeEOL: Line with Mac line ending."; +ok(removeEOL("This line ends with a Mac line ending.\r") eq "This line ends with a Mac line ending."); + +# New test +$title = "removeEOL: Line with a mix of line endings."; +ok(removeEOL("This line contains a mix of line endings.\r\n\r\n\r\r\n\n\n\n") eq "This line contains a mix of line endings."); diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/runPatchCommand.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/runPatchCommand.pl new file mode 100644 index 0000000..5acc517 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/runPatchCommand.pl @@ -0,0 +1,92 @@ +#!/usr/bin/perl +# +# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of VCSUtils::runPatchCommand(). + +use Test::Simple tests => 4; +use VCSUtils; + +# New test +$title = "runPatchCommand: Unsuccessful patch, forcing."; + +# Since $patch has no "Index:" path, passing this to runPatchCommand +# should not affect any files. +my $patch = <<'END'; +Garbage patch contents +END + +# We call via callSilently() to avoid output like the following to STDERR: +# patch: **** Only garbage was found in the patch input. +$argsHashRef = {ensureForce => 1}; +$exitStatus = callSilently(\&runPatchCommand, $patch, ".", "file_to_patch.txt", $argsHashRef); + +ok($exitStatus != 0, $title); + +# New test +$title = "runPatchCommand: New file, --dry-run."; + +# This file should not exist after the tests, but we take care with the +# file name and contents just in case. +my $fileToPatch = "temp_OK_TO_ERASE__README_FOR_MORE.txt"; +$patch = <<END; +Index: $fileToPatch +=================================================================== +--- $fileToPatch (revision 0) ++++ $fileToPatch (revision 0) +@@ -0,0 +1,5 @@ ++This is a test file for WebKitTools/Scripts/VCSUtils_unittest.pl. ++This file should not have gotten created on your system. ++If it did, some unit tests don't seem to be working quite right: ++It would be great if you could file a bug report. Thanks! ++--------------------------------------------------------------------- +END + +# --dry-run prevents creating any files. +# --silent suppresses the success message to STDOUT. +$argsHashRef = {options => ["--dry-run", "--silent"]}; +$exitStatus = runPatchCommand($patch, ".", $fileToPatch, $argsHashRef); + +ok($exitStatus == 0, $title); + +# New test +$title = "runPatchCommand: New file: \"$fileToPatch\"."; + +$argsHashRef = {options => ["--silent"]}; +$exitStatus = runPatchCommand($patch, ".", $fileToPatch, $argsHashRef); + +ok($exitStatus == 0, $title); + +# New test +$title = "runPatchCommand: Reverse new file (clean up previous)."; + +$argsHashRef = {shouldReverse => 1, + options => ["--silent", "--remove-empty-files"]}; # To clean up. +$exitStatus = runPatchCommand($patch, ".", $fileToPatch, $argsHashRef); +ok($exitStatus == 0, $title); diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/setChangeLogDateAndReviewer.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/setChangeLogDateAndReviewer.pl new file mode 100644 index 0000000..076d88c --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/setChangeLogDateAndReviewer.pl @@ -0,0 +1,128 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Unit tests of setChangeLogDateAndReviewer(). + +use strict; +use warnings; + +use Test::More; +use VCSUtils; + +my @testCaseHashRefs = ( +{ + testName => "reviewer defined and \"NOBODY (OOPS!)\" in leading junk", + reviewer => "John Doe", + epochTime => 1273414321, + patch => <<'END', +Subject: [PATCH] + +Reviewed by NOBODY (OOPS!). + +diff --git a/WebCore/ChangeLog b/WebCore/ChangeLog +--- a/WebCore/ChangeLog ++++ b/WebCore/ChangeLog +@@ -1,3 +1,15 @@ ++2010-05-08 Chris Jerdonek <cjerdonek@webkit.org> ++ ++ Reviewed by NOBODY (OOPS!). ++ + 2010-05-08 Chris Jerdonek <cjerdonek@webkit.org> + + Reviewed by Jane Doe. +END + expectedReturn => <<'END', +Subject: [PATCH] + +Reviewed by NOBODY (OOPS!). + +diff --git a/WebCore/ChangeLog b/WebCore/ChangeLog +--- a/WebCore/ChangeLog ++++ b/WebCore/ChangeLog +@@ -1,3 +1,15 @@ ++2010-05-09 Chris Jerdonek <cjerdonek@webkit.org> ++ ++ Reviewed by John Doe. ++ + 2010-05-08 Chris Jerdonek <cjerdonek@webkit.org> + + Reviewed by Jane Doe. +END +}, +{ + testName => "reviewer not defined and \"NOBODY (OOPS!)\" in leading junk", + reviewer => undef, + epochTime => 1273414321, + patch => <<'END', +Subject: [PATCH] + +Reviewed by NOBODY (OOPS!). + +diff --git a/WebCore/ChangeLog b/WebCore/ChangeLog +--- a/WebCore/ChangeLog ++++ b/WebCore/ChangeLog +@@ -1,3 +1,15 @@ ++2010-05-08 Chris Jerdonek <cjerdonek@webkit.org> ++ ++ Reviewed by NOBODY (OOPS!). ++ + 2010-05-08 Chris Jerdonek <cjerdonek@webkit.org> + + Reviewed by Jane Doe. +END + expectedReturn => <<'END', +Subject: [PATCH] + +Reviewed by NOBODY (OOPS!). + +diff --git a/WebCore/ChangeLog b/WebCore/ChangeLog +--- a/WebCore/ChangeLog ++++ b/WebCore/ChangeLog +@@ -1,3 +1,15 @@ ++2010-05-09 Chris Jerdonek <cjerdonek@webkit.org> ++ ++ Reviewed by NOBODY (OOPS!). ++ + 2010-05-08 Chris Jerdonek <cjerdonek@webkit.org> + + Reviewed by Jane Doe. +END +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => 1 * $testCasesCount); # Total number of assertions. + +foreach my $testCase (@testCaseHashRefs) { + my $testNameStart = "setChangeLogDateAndReviewer(): $testCase->{testName}: comparing"; + + my $patch = $testCase->{patch}; + my $reviewer = $testCase->{reviewer}; + my $epochTime = $testCase->{epochTime}; + + my $got = VCSUtils::setChangeLogDateAndReviewer($patch, $reviewer, $epochTime); + my $expectedReturn = $testCase->{expectedReturn}; + + is($got, $expectedReturn, "$testNameStart return value."); +} diff --git a/Tools/Scripts/webkitperl/features.pm b/Tools/Scripts/webkitperl/features.pm new file mode 100644 index 0000000..7ca924b --- /dev/null +++ b/Tools/Scripts/webkitperl/features.pm @@ -0,0 +1,105 @@ +# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved +# Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com) +# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Module to share code to detect the existance of features in built binaries. + +use strict; +use warnings; + +BEGIN { + use Exporter (); + our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS); + $VERSION = 1.00; + @ISA = qw(Exporter); + @EXPORT = qw(&checkWebCoreFeatureSupport + &removeLibraryDependingOnFeature); + %EXPORT_TAGS = ( ); + @EXPORT_OK = (); +} + +sub libraryContainsSymbol($$) +{ + my ($path, $symbol) = @_; + + if (isCygwin() or isWindows()) { + # FIXME: Implement this for Windows. + return 0; + } + + my $foundSymbol = 0; + if (-e $path) { + open NM, "-|", "nm", $path or die; + while (<NM>) { + $foundSymbol = 1 if /$symbol/; # FIXME: This should probably check for word boundaries before/after the symbol name. + } + close NM; + } + return $foundSymbol; +} + +sub hasFeature($$) +{ + my ($featureName, $path) = @_; + my %symbolForFeature = ( + "MathML" => "MathMLElement", + "SVG" => "SVGDefsElement", # We used to look for SVGElement but isSVGElement exists (and would match) in --no-svg builds. + "Accelerated Compositing" => "GraphicsLayer", + "3D Rendering" => "WebCoreHas3DRendering", + "3D Canvas" => "WebGLShader", + "WML" => "WMLElement", + "WCSS" => "parseWCSSInputProperty", + "XHTMLMP" => "isXHTMLMPDocument", + ); + my $symbolName = $symbolForFeature{$featureName}; + die "Unknown feature: $featureName" unless $symbolName; + return libraryContainsSymbol($path, $symbolName); +} + +sub checkWebCoreFeatureSupport($$) +{ + my ($feature, $required) = @_; + my $libraryName = "WebCore"; + my $path = builtDylibPathForName($libraryName); + my $hasFeature = hasFeature($feature, $path); + if ($required && !$hasFeature) { + die "$libraryName at \"$path\" does not include $hasFeature support. See build-webkit --help\n"; + } + return $hasFeature; +} + +sub removeLibraryDependingOnFeature($$$) +{ + my ($libraryName, $featureName, $shouldHaveFeature) = @_; + my $path = builtDylibPathForName($libraryName); + return unless -x $path; + + my $hasFeature = hasFeature($featureName, $path); + system "rm -f $path" if ($shouldHaveFeature xor $hasFeature); +} + +1; diff --git a/Tools/Scripts/webkitperl/httpd.pm b/Tools/Scripts/webkitperl/httpd.pm new file mode 100644 index 0000000..b415db6 --- /dev/null +++ b/Tools/Scripts/webkitperl/httpd.pm @@ -0,0 +1,321 @@ +# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved +# Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com) +# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Module to share code to start and stop the Apache daemon. + +use strict; +use warnings; + +use File::Copy; +use File::Path; +use File::Spec; +use File::Spec::Functions; +use Fcntl ':flock'; +use IPC::Open2; + +use webkitdirs; + +BEGIN { + use Exporter (); + our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS); + $VERSION = 1.00; + @ISA = qw(Exporter); + @EXPORT = qw(&getHTTPDPath + &getHTTPDConfigPathForTestDirectory + &getDefaultConfigForTestDirectory + &openHTTPD + &closeHTTPD + &setShouldWaitForUserInterrupt + &waitForHTTPDLock + &getWaitTime); + %EXPORT_TAGS = ( ); + @EXPORT_OK = (); +} + +my $tmpDir = "/tmp"; +my $httpdLockPrefix = "WebKitHttpd.lock."; +my $myLockFile; +my $exclusiveLockFile = File::Spec->catfile($tmpDir, "WebKit.lock"); +my $httpdPath; +my $httpdPidDir = File::Spec->catfile($tmpDir, "WebKit"); +my $httpdPidFile = File::Spec->catfile($httpdPidDir, "httpd.pid"); +my $httpdPid; +my $waitForUserInterrupt = 0; +my $waitBeginTime; +my $waitEndTime; + +$SIG{'INT'} = 'handleInterrupt'; +$SIG{'TERM'} = 'handleInterrupt'; + +sub getHTTPDPath +{ + if (isDebianBased()) { + $httpdPath = "/usr/sbin/apache2"; + } else { + $httpdPath = "/usr/sbin/httpd"; + } + return $httpdPath; +} + +sub getDefaultConfigForTestDirectory +{ + my ($testDirectory) = @_; + die "No test directory has been specified." unless ($testDirectory); + + my $httpdConfig = getHTTPDConfigPathForTestDirectory($testDirectory); + my $documentRoot = "$testDirectory/http/tests"; + my $jsTestResourcesDirectory = $testDirectory . "/fast/js/resources"; + my $typesConfig = "$testDirectory/http/conf/mime.types"; + my $httpdLockFile = File::Spec->catfile($httpdPidDir, "httpd.lock"); + my $httpdScoreBoardFile = File::Spec->catfile($httpdPidDir, "httpd.scoreboard"); + + my @httpdArgs = ( + "-f", "$httpdConfig", + "-C", "DocumentRoot \"$documentRoot\"", + # Setup a link to where the js test templates are stored, use -c so that mod_alias will already be loaded. + "-c", "Alias /js-test-resources \"$jsTestResourcesDirectory\"", + "-c", "TypesConfig \"$typesConfig\"", + # Apache wouldn't run CGIs with permissions==700 otherwise + "-c", "User \"#$<\"", + "-c", "LockFile \"$httpdLockFile\"", + "-c", "PidFile \"$httpdPidFile\"", + "-c", "ScoreBoardFile \"$httpdScoreBoardFile\"", + ); + + # FIXME: Enable this on Windows once <rdar://problem/5345985> is fixed + # The version of Apache we use with Cygwin does not support SSL + my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem"; + push(@httpdArgs, "-c", "SSLCertificateFile \"$sslCertificate\"") unless isCygwin(); + + return @httpdArgs; + +} + +sub getHTTPDConfigPathForTestDirectory +{ + my ($testDirectory) = @_; + die "No test directory has been specified." unless ($testDirectory); + my $httpdConfig; + getHTTPDPath(); + if (isCygwin()) { + my $windowsConfDirectory = "$testDirectory/http/conf/"; + unless (-x "/usr/lib/apache/libphp4.dll") { + copy("$windowsConfDirectory/libphp4.dll", "/usr/lib/apache/libphp4.dll"); + chmod(0755, "/usr/lib/apache/libphp4.dll"); + } + $httpdConfig = "$windowsConfDirectory/cygwin-httpd.conf"; + } elsif (isDebianBased()) { + $httpdConfig = "$testDirectory/http/conf/apache2-debian-httpd.conf"; + } elsif (isFedoraBased()) { + $httpdConfig = "$testDirectory/http/conf/fedora-httpd.conf"; + } else { + $httpdConfig = "$testDirectory/http/conf/httpd.conf"; + $httpdConfig = "$testDirectory/http/conf/apache2-httpd.conf" if `$httpdPath -v` =~ m|Apache/2|; + } + return $httpdConfig; +} + +sub openHTTPD(@) +{ + my (@args) = @_; + die "No HTTPD configuration has been specified" unless (@args); + mkdir($httpdPidDir, 0755); + die "No write permissions to $httpdPidDir" unless (-w $httpdPidDir); + + if (-f $httpdPidFile) { + open (PIDFILE, $httpdPidFile); + my $oldPid = <PIDFILE>; + chomp $oldPid; + close PIDFILE; + if (0 != kill 0, $oldPid) { + print "\nhttpd is already running: pid $oldPid, killing...\n"; + if (!killHTTPD($oldPid)) { + cleanUp(); + die "Timed out waiting for httpd to quit"; + } + } + unlink $httpdPidFile; + } + + $httpdPath = "/usr/sbin/httpd" unless ($httpdPath); + + open2(">&1", \*HTTPDIN, $httpdPath, @args); + + my $retryCount = 20; + while (!-f $httpdPidFile && $retryCount) { + sleep 1; + --$retryCount; + } + + if (!$retryCount) { + cleanUp(); + die "Timed out waiting for httpd to start"; + } + + $httpdPid = <PIDFILE> if open(PIDFILE, $httpdPidFile); + chomp $httpdPid if $httpdPid; + close PIDFILE; + + waitpid($httpdPid, 0) if ($waitForUserInterrupt && $httpdPid); + + return 1; +} + +sub closeHTTPD +{ + close HTTPDIN; + my $succeeded = killHTTPD($httpdPid); + cleanUp(); + unless ($succeeded) { + print STDERR "Timed out waiting for httpd to terminate!\n" unless $succeeded; + return 0; + } + return 1; +} + +sub killHTTPD +{ + my ($pid) = @_; + + return 1 unless $pid; + + kill 15, $pid; + + my $retryCount = 20; + while (kill(0, $pid) && $retryCount) { + sleep 1; + --$retryCount; + } + return $retryCount != 0; +} + +sub setShouldWaitForUserInterrupt +{ + $waitForUserInterrupt = 1; +} + +sub handleInterrupt +{ + # On Cygwin, when we receive a signal Apache is still running, so we need + # to kill it. On other platforms (at least Mac OS X), Apache will have + # already been killed, and trying to kill it again will cause us to hang. + # All we need to do in this case is clean up our own files. + if (isCygwin()) { + closeHTTPD(); + } else { + cleanUp(); + } + + print "\n"; + exit(1); +} + +sub cleanUp +{ + rmdir $httpdPidDir; + unlink $exclusiveLockFile; + unlink $myLockFile if $myLockFile; +} + +sub extractLockNumber +{ + my ($lockFile) = @_; + return -1 unless $lockFile; + return substr($lockFile, length($httpdLockPrefix)); +} + +sub getLockFiles +{ + opendir(TMPDIR, $tmpDir) or die "Could not open " . $tmpDir . "."; + my @lockFiles = grep {m/^$httpdLockPrefix\d+$/} readdir(TMPDIR); + @lockFiles = sort { extractLockNumber($a) <=> extractLockNumber($b) } @lockFiles; + closedir(TMPDIR); + return @lockFiles; +} + +sub getNextAvailableLockNumber +{ + my @lockFiles = getLockFiles(); + return 0 unless @lockFiles; + return extractLockNumber($lockFiles[-1]) + 1; +} + +sub getLockNumberForCurrentRunning +{ + my @lockFiles = getLockFiles(); + return 0 unless @lockFiles; + return extractLockNumber($lockFiles[0]); +} + +sub waitForHTTPDLock +{ + $waitBeginTime = time; + scheduleHttpTesting(); + # If we are the only one waiting for Apache just run the tests without any further checking + if (scalar getLockFiles() > 1) { + my $currentLockFile = File::Spec->catfile($tmpDir, "$httpdLockPrefix" . getLockNumberForCurrentRunning()); + my $currentLockPid = <SCHEDULER_LOCK> if (-f $currentLockFile && open(SCHEDULER_LOCK, "<$currentLockFile")); + # Wait until we are allowed to run the http tests + while ($currentLockPid && $currentLockPid != $$) { + $currentLockFile = File::Spec->catfile($tmpDir, "$httpdLockPrefix" . getLockNumberForCurrentRunning()); + if ($currentLockFile eq $myLockFile) { + $currentLockPid = <SCHEDULER_LOCK> if open(SCHEDULER_LOCK, "<$currentLockFile"); + if ($currentLockPid != $$) { + print STDERR "\nPID mismatch.\n"; + last; + } + } else { + sleep 1; + } + } + } + $waitEndTime = time; +} + +sub scheduleHttpTesting +{ + # We need an exclusive lock file to avoid deadlocks and starvation and ensure that the scheduler lock numbers are sequential. + # The scheduler locks are used to schedule the running test sessions in first come first served order. + while (!(open(SEQUENTIAL_GUARD_LOCK, ">$exclusiveLockFile") && flock(SEQUENTIAL_GUARD_LOCK, LOCK_EX|LOCK_NB))) {} + $myLockFile = File::Spec->catfile($tmpDir, "$httpdLockPrefix" . getNextAvailableLockNumber()); + open(SCHEDULER_LOCK, ">$myLockFile"); + print SCHEDULER_LOCK "$$"; + print SEQUENTIAL_GUARD_LOCK "$$"; + close(SCHEDULER_LOCK); + close(SEQUENTIAL_GUARD_LOCK); + unlink $exclusiveLockFile; +} + +sub getWaitTime +{ + my $waitTime = 0; + if ($waitBeginTime && $waitEndTime) { + $waitTime = $waitEndTime - $waitBeginTime; + } + return $waitTime; +} diff --git a/Tools/Scripts/webkitpy/__init__.py b/Tools/Scripts/webkitpy/__init__.py new file mode 100644 index 0000000..b376bf2 --- /dev/null +++ b/Tools/Scripts/webkitpy/__init__.py @@ -0,0 +1,13 @@ +# Required for Python to search this directory for module files + +# Keep this file free of any code or import statements that could +# cause either an error to occur or a log message to be logged. +# This ensures that calling code can import initialization code from +# webkitpy before any errors or log messages due to code in this file. +# Initialization code can include things like version-checking code and +# logging configuration code. +# +# We do not execute any version-checking code or logging configuration +# code in this file so that callers can opt-in as they want. This also +# allows different callers to choose different initialization code, +# as necessary. diff --git a/Tools/Scripts/webkitpy/common/__init__.py b/Tools/Scripts/webkitpy/common/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/common/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/common/array_stream.py b/Tools/Scripts/webkitpy/common/array_stream.py new file mode 100644 index 0000000..e425d02 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/array_stream.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Package that private an array-based implementation of a stream.""" + + +class ArrayStream(object): + """Simple class that implmements a stream interface on top of an array. + + This is used primarily by unit test classes to mock output streams. It + performs a similar function to StringIO, but (a) it is write-only, and + (b) it can be used to retrieve each individual write(); StringIO + concatenates all of the writes together. + """ + + def __init__(self): + self._contents = [] + + def write(self, msg): + """Implement stream.write() by appending to the stream's contents.""" + self._contents.append(msg) + + def get(self): + """Return the contents of a stream (as an array).""" + return self._contents + + def reset(self): + """Empty the stream.""" + self._contents = [] + + def empty(self): + """Return whether the stream is empty.""" + return (len(self._contents) == 0) + + def flush(self): + """Flush the stream (a no-op implemented for compatibility).""" + pass + + def __repr__(self): + return '<ArrayStream: ' + str(self._contents) + '>' diff --git a/Tools/Scripts/webkitpy/common/array_stream_unittest.py b/Tools/Scripts/webkitpy/common/array_stream_unittest.py new file mode 100644 index 0000000..1a9b34a --- /dev/null +++ b/Tools/Scripts/webkitpy/common/array_stream_unittest.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for array_stream.py.""" + +import pdb +import unittest + +from webkitpy.common.array_stream import ArrayStream + + +class ArrayStreamTest(unittest.TestCase): + def assertEmpty(self, a_stream): + self.assertTrue(a_stream.empty()) + + def assertNotEmpty(self, a_stream): + self.assertFalse(a_stream.empty()) + + def assertContentsMatch(self, a_stream, contents): + self.assertEquals(a_stream.get(), contents) + + def test_basics(self): + a = ArrayStream() + self.assertEmpty(a) + self.assertContentsMatch(a, []) + + a.flush() + self.assertEmpty(a) + self.assertContentsMatch(a, []) + + a.write("foo") + a.write("bar") + self.assertNotEmpty(a) + self.assertContentsMatch(a, ["foo", "bar"]) + + a.flush() + self.assertNotEmpty(a) + self.assertContentsMatch(a, ["foo", "bar"]) + + a.reset() + self.assertEmpty(a) + self.assertContentsMatch(a, []) + + self.assertEquals(str(a), "<ArrayStream: []>") + + a.write("foo") + self.assertNotEmpty(a) + self.assertContentsMatch(a, ["foo"]) + self.assertEquals(str(a), "<ArrayStream: ['foo']>") + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/checkout/__init__.py b/Tools/Scripts/webkitpy/common/checkout/__init__.py new file mode 100644 index 0000000..597dcbd --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/__init__.py @@ -0,0 +1,3 @@ +# Required for Python to search this directory for module files + +from api import Checkout diff --git a/Tools/Scripts/webkitpy/common/checkout/api.py b/Tools/Scripts/webkitpy/common/checkout/api.py new file mode 100644 index 0000000..6357982 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/api.py @@ -0,0 +1,164 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import StringIO + +from webkitpy.common.config import urls +from webkitpy.common.checkout.changelog import ChangeLog +from webkitpy.common.checkout.commitinfo import CommitInfo +from webkitpy.common.checkout.scm import CommitMessage +from webkitpy.common.memoized import memoized +from webkitpy.common.net.bugzilla import parse_bug_id +from webkitpy.common.system.executive import Executive, run_command, ScriptError +from webkitpy.common.system.deprecated_logging import log + + +# This class represents the WebKit-specific parts of the checkout (like +# ChangeLogs). +# FIXME: Move a bunch of ChangeLog-specific processing from SCM to this object. +class Checkout(object): + def __init__(self, scm): + self._scm = scm + + def _is_path_to_changelog(self, path): + return os.path.basename(path) == "ChangeLog" + + def _latest_entry_for_changelog_at_revision(self, changelog_path, revision): + changelog_contents = self._scm.contents_at_revision(changelog_path, revision) + # contents_at_revision returns a byte array (str()), but we know + # that ChangeLog files are utf-8. parse_latest_entry_from_file + # expects a file-like object which vends unicode(), so we decode here. + changelog_file = StringIO.StringIO(changelog_contents.decode("utf-8")) + return ChangeLog.parse_latest_entry_from_file(changelog_file) + + def changelog_entries_for_revision(self, revision): + changed_files = self._scm.changed_files_for_revision(revision) + return [self._latest_entry_for_changelog_at_revision(path, revision) for path in changed_files if self._is_path_to_changelog(path)] + + @memoized + def commit_info_for_revision(self, revision): + committer_email = self._scm.committer_email_for_revision(revision) + changelog_entries = self.changelog_entries_for_revision(revision) + # Assume for now that the first entry has everything we need: + # FIXME: This will throw an exception if there were no ChangeLogs. + if not len(changelog_entries): + return None + changelog_entry = changelog_entries[0] + changelog_data = { + "bug_id": parse_bug_id(changelog_entry.contents()), + "author_name": changelog_entry.author_name(), + "author_email": changelog_entry.author_email(), + "author": changelog_entry.author(), + "reviewer_text": changelog_entry.reviewer_text(), + "reviewer": changelog_entry.reviewer(), + } + # We could pass the changelog_entry instead of a dictionary here, but that makes + # mocking slightly more involved, and would make aggregating data from multiple + # entries more difficult to wire in if we need to do that in the future. + return CommitInfo(revision, committer_email, changelog_data) + + def bug_id_for_revision(self, revision): + return self.commit_info_for_revision(revision).bug_id() + + def _modified_files_matching_predicate(self, git_commit, predicate, changed_files=None): + # SCM returns paths relative to scm.checkout_root + # Callers (especially those using the ChangeLog class) may + # expect absolute paths, so this method returns absolute paths. + if not changed_files: + changed_files = self._scm.changed_files(git_commit) + absolute_paths = [os.path.join(self._scm.checkout_root, path) for path in changed_files] + return [path for path in absolute_paths if predicate(path)] + + def modified_changelogs(self, git_commit, changed_files=None): + return self._modified_files_matching_predicate(git_commit, self._is_path_to_changelog, changed_files=changed_files) + + def modified_non_changelogs(self, git_commit, changed_files=None): + return self._modified_files_matching_predicate(git_commit, lambda path: not self._is_path_to_changelog(path), changed_files=changed_files) + + def commit_message_for_this_commit(self, git_commit, changed_files=None): + changelog_paths = self.modified_changelogs(git_commit, changed_files) + if not len(changelog_paths): + raise ScriptError(message="Found no modified ChangeLogs, cannot create a commit message.\n" + "All changes require a ChangeLog. See:\n %s" % urls.contribution_guidelines) + + changelog_messages = [] + for changelog_path in changelog_paths: + log("Parsing ChangeLog: %s" % changelog_path) + changelog_entry = ChangeLog(changelog_path).latest_entry() + if not changelog_entry: + raise ScriptError(message="Failed to parse ChangeLog: %s" % os.path.abspath(changelog_path)) + changelog_messages.append(changelog_entry.contents()) + + # FIXME: We should sort and label the ChangeLog messages like commit-log-editor does. + return CommitMessage("".join(changelog_messages).splitlines()) + + def recent_commit_infos_for_files(self, paths): + revisions = set(sum(map(self._scm.revisions_changing_file, paths), [])) + return set(map(self.commit_info_for_revision, revisions)) + + def suggested_reviewers(self, git_commit, changed_files=None): + changed_files = self.modified_non_changelogs(git_commit, changed_files) + commit_infos = self.recent_commit_infos_for_files(changed_files) + reviewers = [commit_info.reviewer() for commit_info in commit_infos if commit_info.reviewer()] + reviewers.extend([commit_info.author() for commit_info in commit_infos if commit_info.author() and commit_info.author().can_review]) + return sorted(set(reviewers)) + + def bug_id_for_this_commit(self, git_commit, changed_files=None): + try: + return parse_bug_id(self.commit_message_for_this_commit(git_commit, changed_files).message()) + except ScriptError, e: + pass # We might not have ChangeLogs. + + def apply_patch(self, patch, force=False): + # It's possible that the patch was not made from the root directory. + # We should detect and handle that case. + # FIXME: Move _scm.script_path here once we get rid of all the dependencies. + args = [self._scm.script_path('svn-apply')] + if patch.reviewer(): + args += ['--reviewer', patch.reviewer().full_name] + if force: + args.append('--force') + run_command(args, input=patch.contents()) + + def apply_reverse_diff(self, revision): + self._scm.apply_reverse_diff(revision) + + # We revert the ChangeLogs because removing lines from a ChangeLog + # doesn't make sense. ChangeLogs are append only. + changelog_paths = self.modified_changelogs(git_commit=None) + if len(changelog_paths): + self._scm.revert_files(changelog_paths) + + conflicts = self._scm.conflicted_files() + if len(conflicts): + raise ScriptError(message="Failed to apply reverse diff for revision %s because of the following conflicts:\n%s" % (revision, "\n".join(conflicts))) + + def apply_reverse_diffs(self, revision_list): + for revision in sorted(revision_list, reverse=True): + self.apply_reverse_diff(revision) diff --git a/Tools/Scripts/webkitpy/common/checkout/api_unittest.py b/Tools/Scripts/webkitpy/common/checkout/api_unittest.py new file mode 100644 index 0000000..1f97abd --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/api_unittest.py @@ -0,0 +1,196 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import codecs +import os +import shutil +import tempfile +import unittest + +from webkitpy.common.checkout.api import Checkout +from webkitpy.common.checkout.changelog import ChangeLogEntry +from webkitpy.common.checkout.scm import detect_scm_system, CommitMessage +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.thirdparty.mock import Mock + + +# FIXME: Copied from scm_unittest.py +def write_into_file_at_path(file_path, contents, encoding="utf-8"): + with codecs.open(file_path, "w", encoding) as file: + file.write(contents) + + +_changelog1entry1 = u"""2010-03-25 Tor Arne Vestb\u00f8 <vestbo@webkit.org> + + Unreviewed build fix to un-break webkit-patch land. + + Move commit_message_for_this_commit from scm to checkout + https://bugs.webkit.org/show_bug.cgi?id=36629 + + * Scripts/webkitpy/common/checkout/api.py: import scm.CommitMessage +""" +_changelog1entry2 = u"""2010-03-25 Adam Barth <abarth@webkit.org> + + Reviewed by Eric Seidel. + + Move commit_message_for_this_commit from scm to checkout + https://bugs.webkit.org/show_bug.cgi?id=36629 + + * Scripts/webkitpy/common/checkout/api.py: +""" +_changelog1 = u"\n".join([_changelog1entry1, _changelog1entry2]) +_changelog2 = u"""2010-03-25 Tor Arne Vestb\u00f8 <vestbo@webkit.org> + + Unreviewed build fix to un-break webkit-patch land. + + Second part of this complicated change. + + * Path/To/Complicated/File: Added. + +2010-03-25 Adam Barth <abarth@webkit.org> + + Reviewed by Eric Seidel. + + Filler change. +""" + +class CommitMessageForThisCommitTest(unittest.TestCase): + expected_commit_message = u"""2010-03-25 Tor Arne Vestb\u00f8 <vestbo@webkit.org> + + Unreviewed build fix to un-break webkit-patch land. + + Move commit_message_for_this_commit from scm to checkout + https://bugs.webkit.org/show_bug.cgi?id=36629 + + * Scripts/webkitpy/common/checkout/api.py: import scm.CommitMessage +2010-03-25 Tor Arne Vestb\u00f8 <vestbo@webkit.org> + + Unreviewed build fix to un-break webkit-patch land. + + Second part of this complicated change. + + * Path/To/Complicated/File: Added. +""" + + def setUp(self): + self.temp_dir = tempfile.mkdtemp(suffix="changelogs") + self.old_cwd = os.getcwd() + os.chdir(self.temp_dir) + write_into_file_at_path("ChangeLog1", _changelog1) + write_into_file_at_path("ChangeLog2", _changelog2) + + def tearDown(self): + shutil.rmtree(self.temp_dir, ignore_errors=True) + os.chdir(self.old_cwd) + + # FIXME: This should not need to touch the file system, however + # ChangeLog is difficult to mock at current. + def test_commit_message_for_this_commit(self): + checkout = Checkout(None) + checkout.modified_changelogs = lambda git_commit, changed_files=None: ["ChangeLog1", "ChangeLog2"] + output = OutputCapture() + expected_stderr = "Parsing ChangeLog: ChangeLog1\nParsing ChangeLog: ChangeLog2\n" + commit_message = output.assert_outputs(self, checkout.commit_message_for_this_commit, + kwargs={"git_commit": None}, expected_stderr=expected_stderr) + self.assertEqual(commit_message.message(), self.expected_commit_message) + + +class CheckoutTest(unittest.TestCase): + def test_latest_entry_for_changelog_at_revision(self): + scm = Mock() + def mock_contents_at_revision(changelog_path, revision): + self.assertEqual(changelog_path, "foo") + self.assertEqual(revision, "bar") + # contents_at_revision is expected to return a byte array (str) + # so we encode our unicode ChangeLog down to a utf-8 stream. + return _changelog1.encode("utf-8") + scm.contents_at_revision = mock_contents_at_revision + checkout = Checkout(scm) + entry = checkout._latest_entry_for_changelog_at_revision("foo", "bar") + self.assertEqual(entry.contents(), _changelog1entry1) + + def test_commit_info_for_revision(self): + scm = Mock() + scm.committer_email_for_revision = lambda revision: "committer@example.com" + checkout = Checkout(scm) + checkout.changelog_entries_for_revision = lambda revision: [ChangeLogEntry(_changelog1entry1)] + commitinfo = checkout.commit_info_for_revision(4) + self.assertEqual(commitinfo.bug_id(), 36629) + self.assertEqual(commitinfo.author_name(), u"Tor Arne Vestb\u00f8") + self.assertEqual(commitinfo.author_email(), "vestbo@webkit.org") + self.assertEqual(commitinfo.reviewer_text(), None) + self.assertEqual(commitinfo.reviewer(), None) + self.assertEqual(commitinfo.committer_email(), "committer@example.com") + self.assertEqual(commitinfo.committer(), None) + + checkout.changelog_entries_for_revision = lambda revision: [] + self.assertEqual(checkout.commit_info_for_revision(1), None) + + def test_bug_id_for_revision(self): + scm = Mock() + scm.committer_email_for_revision = lambda revision: "committer@example.com" + checkout = Checkout(scm) + checkout.changelog_entries_for_revision = lambda revision: [ChangeLogEntry(_changelog1entry1)] + self.assertEqual(checkout.bug_id_for_revision(4), 36629) + + def test_bug_id_for_this_commit(self): + scm = Mock() + checkout = Checkout(scm) + checkout.commit_message_for_this_commit = lambda git_commit, changed_files=None: CommitMessage(ChangeLogEntry(_changelog1entry1).contents().splitlines()) + self.assertEqual(checkout.bug_id_for_this_commit(git_commit=None), 36629) + + def test_modified_changelogs(self): + scm = Mock() + scm.checkout_root = "/foo/bar" + scm.changed_files = lambda git_commit: ["file1", "ChangeLog", "relative/path/ChangeLog"] + checkout = Checkout(scm) + expected_changlogs = ["/foo/bar/ChangeLog", "/foo/bar/relative/path/ChangeLog"] + self.assertEqual(checkout.modified_changelogs(git_commit=None), expected_changlogs) + + def test_suggested_reviewers(self): + def mock_changelog_entries_for_revision(revision): + if revision % 2 == 0: + return [ChangeLogEntry(_changelog1entry1)] + return [ChangeLogEntry(_changelog1entry2)] + + def mock_revisions_changing_file(path, limit=5): + if path.endswith("ChangeLog"): + return [3] + return [4, 8] + + scm = Mock() + scm.checkout_root = "/foo/bar" + scm.changed_files = lambda git_commit: ["file1", "file2", "relative/path/ChangeLog"] + scm.revisions_changing_file = mock_revisions_changing_file + checkout = Checkout(scm) + checkout.changelog_entries_for_revision = mock_changelog_entries_for_revision + reviewers = checkout.suggested_reviewers(git_commit=None) + reviewer_names = [reviewer.full_name for reviewer in reviewers] + self.assertEqual(reviewer_names, [u'Tor Arne Vestb\xf8']) diff --git a/Tools/Scripts/webkitpy/common/checkout/changelog.py b/Tools/Scripts/webkitpy/common/checkout/changelog.py new file mode 100644 index 0000000..07f905d --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/changelog.py @@ -0,0 +1,191 @@ +# Copyright (C) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# WebKit's Python module for parsing and modifying ChangeLog files + +import codecs +import fileinput # inplace file editing for set_reviewer_in_changelog +import os.path +import re +import textwrap + +from webkitpy.common.system.deprecated_logging import log +from webkitpy.common.config.committers import CommitterList +from webkitpy.common.config import urls +from webkitpy.common.net.bugzilla import parse_bug_id +from webkitpy.tool.grammar import join_with_separators + + +class ChangeLogEntry(object): + # e.g. 2009-06-03 Eric Seidel <eric@webkit.org> + date_line_regexp = r'^(?P<date>\d{4}-\d{2}-\d{2})\s+(?P<name>.+?)\s+<(?P<email>[^<>]+)>$' + + def __init__(self, contents, committer_list=CommitterList()): + self._contents = contents + self._committer_list = committer_list + self._parse_entry() + + def _parse_entry(self): + match = re.match(self.date_line_regexp, self._contents, re.MULTILINE) + if not match: + log("WARNING: Creating invalid ChangeLogEntry:\n%s" % self._contents) + + # FIXME: group("name") does not seem to be Unicode? Probably due to self._contents not being unicode. + self._author_name = match.group("name") if match else None + self._author_email = match.group("email") if match else None + + match = re.search("^\s+Reviewed by (?P<reviewer>.*?)[\.,]?\s*$", self._contents, re.MULTILINE) # Discard everything after the first period + self._reviewer_text = match.group("reviewer") if match else None + + self._reviewer = self._committer_list.committer_by_name(self._reviewer_text) + self._author = self._committer_list.committer_by_email(self._author_email) or self._committer_list.committer_by_name(self._author_name) + + def author_name(self): + return self._author_name + + def author_email(self): + return self._author_email + + def author(self): + return self._author # Might be None + + # FIXME: Eventually we would like to map reviwer names to reviewer objects. + # See https://bugs.webkit.org/show_bug.cgi?id=26533 + def reviewer_text(self): + return self._reviewer_text + + def reviewer(self): + return self._reviewer # Might be None + + def contents(self): + return self._contents + + def bug_id(self): + return parse_bug_id(self._contents) + + +# FIXME: Various methods on ChangeLog should move into ChangeLogEntry instead. +class ChangeLog(object): + + def __init__(self, path): + self.path = path + + _changelog_indent = " " * 8 + + @staticmethod + def parse_latest_entry_from_file(changelog_file): + """changelog_file must be a file-like object which returns + unicode strings. Use codecs.open or StringIO(unicode()) + to pass file objects to this class.""" + date_line_regexp = re.compile(ChangeLogEntry.date_line_regexp) + entry_lines = [] + # The first line should be a date line. + first_line = changelog_file.readline() + assert(isinstance(first_line, unicode)) + if not date_line_regexp.match(first_line): + return None + entry_lines.append(first_line) + + for line in changelog_file: + # If we've hit the next entry, return. + if date_line_regexp.match(line): + # Remove the extra newline at the end + return ChangeLogEntry(''.join(entry_lines[:-1])) + entry_lines.append(line) + return None # We never found a date line! + + def latest_entry(self): + # ChangeLog files are always UTF-8, we read them in as such to support Reviewers with unicode in their names. + changelog_file = codecs.open(self.path, "r", "utf-8") + try: + return self.parse_latest_entry_from_file(changelog_file) + finally: + changelog_file.close() + + # _wrap_line and _wrap_lines exist to work around + # http://bugs.python.org/issue1859 + + def _wrap_line(self, line): + return textwrap.fill(line, + width=70, + initial_indent=self._changelog_indent, + # Don't break urls which may be longer than width. + break_long_words=False, + subsequent_indent=self._changelog_indent) + + # Workaround as suggested by guido in + # http://bugs.python.org/issue1859#msg60040 + + def _wrap_lines(self, message): + lines = [self._wrap_line(line) for line in message.splitlines()] + return "\n".join(lines) + + # This probably does not belong in changelogs.py + def _message_for_revert(self, revision_list, reason, bug_url): + message = "Unreviewed, rolling out %s.\n" % join_with_separators(['r' + str(revision) for revision in revision_list]) + for revision in revision_list: + message += "%s\n" % urls.view_revision_url(revision) + if bug_url: + message += "%s\n" % bug_url + # Add an extra new line after the rollout links, before any reason. + message += "\n" + if reason: + message += "%s\n\n" % reason + return self._wrap_lines(message) + + def update_for_revert(self, revision_list, reason, bug_url=None): + reviewed_by_regexp = re.compile( + "%sReviewed by NOBODY \(OOPS!\)\." % self._changelog_indent) + removing_boilerplate = False + # inplace=1 creates a backup file and re-directs stdout to the file + for line in fileinput.FileInput(self.path, inplace=1): + if reviewed_by_regexp.search(line): + message_lines = self._message_for_revert(revision_list, + reason, + bug_url) + print reviewed_by_regexp.sub(message_lines, line), + # Remove all the ChangeLog boilerplate between the Reviewed by + # line and the first changed file. + removing_boilerplate = True + elif removing_boilerplate: + if line.find('*') >= 0: # each changed file is preceded by a * + removing_boilerplate = False + + if not removing_boilerplate: + print line, + + def set_reviewer(self, reviewer): + # inplace=1 creates a backup file and re-directs stdout to the file + for line in fileinput.FileInput(self.path, inplace=1): + # Trailing comma suppresses printing newline + print line.replace("NOBODY (OOPS!)", reviewer.encode("utf-8")), + + def set_short_description_and_bug_url(self, short_description, bug_url): + message = "%s\n %s" % (short_description, bug_url) + for line in fileinput.FileInput(self.path, inplace=1): + print line.replace("Need a short description and bug URL (OOPS!)", message.encode("utf-8")), diff --git a/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py b/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py new file mode 100644 index 0000000..20c6cfa --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py @@ -0,0 +1,230 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import codecs +import os +import tempfile +import unittest + +from StringIO import StringIO + +from webkitpy.common.checkout.changelog import * + + +class ChangeLogTest(unittest.TestCase): + + _example_entry = u'''2009-08-17 Peter Kasting <pkasting@google.com> + + Reviewed by Tor Arne Vestb\xf8. + + https://bugs.webkit.org/show_bug.cgi?id=27323 + Only add Cygwin to the path when it isn't already there. This avoids + causing problems for people who purposefully have non-Cygwin versions of + executables like svn in front of the Cygwin ones in their paths. + + * DumpRenderTree/win/DumpRenderTree.vcproj: + * DumpRenderTree/win/ImageDiff.vcproj: + * DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj: +''' + + # More example text than we need. Eventually we need to support parsing this all and write tests for the parsing. + _example_changelog = u"""2009-08-17 Tor Arne Vestb\xf8 <vestbo@webkit.org> + + <http://webkit.org/b/28393> check-webkit-style: add check for use of std::max()/std::min() instead of MAX()/MIN() + + Reviewed by David Levin. + + * Scripts/modules/cpp_style.py: + (_ERROR_CATEGORIES): Added 'runtime/max_min_macros'. + (check_max_min_macros): Added. Returns level 4 error when MAX() + and MIN() macros are used in header files and C++ source files. + (check_style): Added call to check_max_min_macros(). + * Scripts/modules/cpp_style_unittest.py: Added unit tests. + (test_max_macro): Added. + (test_min_macro): Added. + +2009-08-16 David Kilzer <ddkilzer@apple.com> + + Backed out r47343 which was mistakenly committed + + * Scripts/bugzilla-tool: + * Scripts/modules/scm.py: + +2009-06-18 Darin Adler <darin@apple.com> + + Rubber stamped by Mark Rowe. + + * DumpRenderTree/mac/DumpRenderTreeWindow.mm: + (-[DumpRenderTreeWindow close]): Resolved crashes seen during regression + tests. The close method can be called on a window that's already closed + so we can't assert here. + +== Rolled over to ChangeLog-2009-06-16 == +""" + + def test_latest_entry_parse(self): + changelog_contents = u"%s\n%s" % (self._example_entry, self._example_changelog) + changelog_file = StringIO(changelog_contents) + latest_entry = ChangeLog.parse_latest_entry_from_file(changelog_file) + self.assertEquals(latest_entry.contents(), self._example_entry) + self.assertEquals(latest_entry.author_name(), "Peter Kasting") + self.assertEquals(latest_entry.author_email(), "pkasting@google.com") + self.assertEquals(latest_entry.reviewer_text(), u"Tor Arne Vestb\xf8") + self.assertTrue(latest_entry.reviewer()) # Make sure that our UTF8-based lookup of Tor works. + + @staticmethod + def _write_tmp_file_with_contents(byte_array): + assert(isinstance(byte_array, str)) + (file_descriptor, file_path) = tempfile.mkstemp() # NamedTemporaryFile always deletes the file on close in python < 2.6 + with os.fdopen(file_descriptor, "w") as file: + file.write(byte_array) + return file_path + + @staticmethod + def _read_file_contents(file_path, encoding): + with codecs.open(file_path, "r", encoding) as file: + return file.read() + + _new_entry_boilerplate = '''2009-08-19 Eric Seidel <eric@webkit.org> + + Reviewed by NOBODY (OOPS!). + + Need a short description and bug URL (OOPS!) + + * Scripts/bugzilla-tool: +''' + + def test_set_reviewer(self): + changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog) + changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8")) + reviewer_name = 'Test Reviewer' + ChangeLog(changelog_path).set_reviewer(reviewer_name) + actual_contents = self._read_file_contents(changelog_path, "utf-8") + expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name) + os.remove(changelog_path) + self.assertEquals(actual_contents, expected_contents) + + def test_set_short_description_and_bug_url(self): + changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog) + changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8")) + short_description = "A short description" + bug_url = "http://example.com/b/2344" + ChangeLog(changelog_path).set_short_description_and_bug_url(short_description, bug_url) + actual_contents = self._read_file_contents(changelog_path, "utf-8") + expected_message = "%s\n %s" % (short_description, bug_url) + expected_contents = changelog_contents.replace("Need a short description and bug URL (OOPS!)", expected_message) + os.remove(changelog_path) + self.assertEquals(actual_contents, expected_contents) + + _revert_message = """ Unreviewed, rolling out r12345. + http://trac.webkit.org/changeset/12345 + http://example.com/123 + + This is a very long reason which should be long enough so that + _message_for_revert will need to wrap it. We'll also include + a + https://veryveryveryveryverylongbugurl.com/reallylongbugthingy.cgi?bug_id=12354 + link so that we can make sure we wrap that right too. +""" + + def test_message_for_revert(self): + changelog = ChangeLog("/fake/path") + long_reason = "This is a very long reason which should be long enough so that _message_for_revert will need to wrap it. We'll also include a https://veryveryveryveryverylongbugurl.com/reallylongbugthingy.cgi?bug_id=12354 link so that we can make sure we wrap that right too." + message = changelog._message_for_revert([12345], long_reason, "http://example.com/123") + self.assertEquals(message, self._revert_message) + + _revert_entry_with_bug_url = '''2009-08-19 Eric Seidel <eric@webkit.org> + + Unreviewed, rolling out r12345. + http://trac.webkit.org/changeset/12345 + http://example.com/123 + + Reason + + * Scripts/bugzilla-tool: +''' + + _revert_entry_without_bug_url = '''2009-08-19 Eric Seidel <eric@webkit.org> + + Unreviewed, rolling out r12345. + http://trac.webkit.org/changeset/12345 + + Reason + + * Scripts/bugzilla-tool: +''' + + _multiple_revert_entry_with_bug_url = '''2009-08-19 Eric Seidel <eric@webkit.org> + + Unreviewed, rolling out r12345, r12346, and r12347. + http://trac.webkit.org/changeset/12345 + http://trac.webkit.org/changeset/12346 + http://trac.webkit.org/changeset/12347 + http://example.com/123 + + Reason + + * Scripts/bugzilla-tool: +''' + + _multiple_revert_entry_without_bug_url = '''2009-08-19 Eric Seidel <eric@webkit.org> + + Unreviewed, rolling out r12345, r12346, and r12347. + http://trac.webkit.org/changeset/12345 + http://trac.webkit.org/changeset/12346 + http://trac.webkit.org/changeset/12347 + + Reason + + * Scripts/bugzilla-tool: +''' + + def _assert_update_for_revert_output(self, args, expected_entry): + changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog) + changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8")) + changelog = ChangeLog(changelog_path) + changelog.update_for_revert(*args) + actual_entry = changelog.latest_entry() + os.remove(changelog_path) + self.assertEquals(actual_entry.contents(), expected_entry) + self.assertEquals(actual_entry.reviewer_text(), None) + # These checks could be removed to allow this to work on other entries: + self.assertEquals(actual_entry.author_name(), "Eric Seidel") + self.assertEquals(actual_entry.author_email(), "eric@webkit.org") + + def test_update_for_revert(self): + self._assert_update_for_revert_output([[12345], "Reason"], self._revert_entry_without_bug_url) + self._assert_update_for_revert_output([[12345], "Reason", "http://example.com/123"], self._revert_entry_with_bug_url) + self._assert_update_for_revert_output([[12345, 12346, 12347], "Reason"], self._multiple_revert_entry_without_bug_url) + self._assert_update_for_revert_output([[12345, 12346, 12347], "Reason", "http://example.com/123"], self._multiple_revert_entry_with_bug_url) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/checkout/commitinfo.py b/Tools/Scripts/webkitpy/common/checkout/commitinfo.py new file mode 100644 index 0000000..f121f36 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/commitinfo.py @@ -0,0 +1,93 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# WebKit's python module for holding information on a commit + +from webkitpy.common.config import urls +from webkitpy.common.config.committers import CommitterList + + +class CommitInfo(object): + def __init__(self, revision, committer_email, changelog_data, committer_list=CommitterList()): + self._revision = revision + self._committer_email = committer_email + self._bug_id = changelog_data["bug_id"] + self._author_name = changelog_data["author_name"] + self._author_email = changelog_data["author_email"] + self._author = changelog_data["author"] + self._reviewer_text = changelog_data["reviewer_text"] + self._reviewer = changelog_data["reviewer"] + + # Derived values: + self._committer = committer_list.committer_by_email(committer_email) + + def revision(self): + return self._revision + + def committer(self): + return self._committer # None if committer isn't in committers.py + + def committer_email(self): + return self._committer_email + + def bug_id(self): + return self._bug_id # May be None + + def author(self): + return self._author # May be None + + def author_name(self): + return self._author_name + + def author_email(self): + return self._author_email + + def reviewer(self): + return self._reviewer # May be None + + def reviewer_text(self): + return self._reviewer_text # May be None + + def responsible_parties(self): + responsible_parties = [ + self.committer(), + self.author(), + self.reviewer(), + ] + return set([party for party in responsible_parties if party]) # Filter out None + + # FIXME: It is slightly lame that this "view" method is on this "model" class (in MVC terms) + def blame_string(self, bugs): + string = "r%s:\n" % self.revision() + string += " %s\n" % urls.view_revision_url(self.revision()) + string += " Bug: %s (%s)\n" % (self.bug_id(), bugs.bug_url_for_bug_id(self.bug_id())) + author_line = "\"%s\" <%s>" % (self.author_name(), self.author_email()) + string += " Author: %s\n" % (self.author() or author_line) + string += " Reviewer: %s\n" % (self.reviewer() or self.reviewer_text()) + string += " Committer: %s" % self.committer() + return string diff --git a/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py b/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py new file mode 100644 index 0000000..f58e6f1 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py @@ -0,0 +1,61 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.checkout.commitinfo import CommitInfo +from webkitpy.common.config.committers import CommitterList, Committer, Reviewer + +class CommitInfoTest(unittest.TestCase): + + def test_commit_info_creation(self): + author = Committer("Author", "author@example.com") + committer = Committer("Committer", "committer@example.com") + reviewer = Reviewer("Reviewer", "reviewer@example.com") + committer_list = CommitterList(committers=[author, committer], reviewers=[reviewer]) + + changelog_data = { + "bug_id": 1234, + "author_name": "Committer", + "author_email": "author@example.com", + "author": author, + "reviewer_text": "Reviewer", + "reviewer": reviewer, + } + commit = CommitInfo(123, "committer@example.com", changelog_data, committer_list) + + self.assertEqual(commit.revision(), 123) + self.assertEqual(commit.bug_id(), 1234) + self.assertEqual(commit.author_name(), "Committer") + self.assertEqual(commit.author_email(), "author@example.com") + self.assertEqual(commit.author(), author) + self.assertEqual(commit.reviewer_text(), "Reviewer") + self.assertEqual(commit.reviewer(), reviewer) + self.assertEqual(commit.committer(), committer) + self.assertEqual(commit.committer_email(), "committer@example.com") + self.assertEqual(commit.responsible_parties(), set([author, committer, reviewer])) diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py new file mode 100644 index 0000000..a6ea756 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py @@ -0,0 +1,181 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""WebKit's Python module for interacting with patches.""" + +import logging +import re + +_log = logging.getLogger("webkitpy.common.checkout.diff_parser") + + +# FIXME: This is broken. We should compile our regexps up-front +# instead of using a custom cache. +_regexp_compile_cache = {} + + +# FIXME: This function should be removed. +def match(pattern, string): + """Matches the string with the pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = re.compile(pattern) + return _regexp_compile_cache[pattern].match(string) + + +# FIXME: This belongs on DiffParser (e.g. as to_svn_diff()). +def git_diff_to_svn_diff(line): + """Converts a git formatted diff line to a svn formatted line. + + Args: + line: A string representing a line of the diff. + """ + # FIXME: This list should be a class member on DiffParser. + # These regexp patterns should be compiled once instead of every time. + conversion_patterns = (("^diff --git \w/(.+) \w/(?P<FilePath>.+)", lambda matched: "Index: " + matched.group('FilePath') + "\n"), + ("^new file.*", lambda matched: "\n"), + ("^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}", lambda matched: "===================================================================\n"), + ("^--- \w/(?P<FilePath>.+)", lambda matched: "--- " + matched.group('FilePath') + "\n"), + ("^\+\+\+ \w/(?P<FilePath>.+)", lambda matched: "+++ " + matched.group('FilePath') + "\n")) + + for pattern, conversion in conversion_patterns: + matched = match(pattern, line) + if matched: + return conversion(matched) + return line + + +# FIXME: This method belongs on DiffParser +def get_diff_converter(first_diff_line): + """Gets a converter function of diff lines. + + Args: + first_diff_line: The first filename line of a diff file. + If this line is git formatted, we'll return a + converter from git to SVN. + """ + if match(r"^diff --git \w/", first_diff_line): + return git_diff_to_svn_diff + return lambda input: input + + +_INITIAL_STATE = 1 +_DECLARED_FILE_PATH = 2 +_PROCESSING_CHUNK = 3 + + +class DiffFile(object): + """Contains the information for one file in a patch. + + The field "lines" is a list which contains tuples in this format: + (deleted_line_number, new_line_number, line_string) + If deleted_line_number is zero, it means this line is newly added. + If new_line_number is zero, it means this line is deleted. + """ + # FIXME: Tuples generally grow into classes. We should consider + # adding a DiffLine object. + + def added_or_modified_line_numbers(self): + # This logic was moved from patchreader.py, but may not be + # the right API for this object long-term. + return [line[1] for line in self.lines if not line[0]] + + def __init__(self, filename): + self.filename = filename + self.lines = [] + + def add_new_line(self, line_number, line): + self.lines.append((0, line_number, line)) + + def add_deleted_line(self, line_number, line): + self.lines.append((line_number, 0, line)) + + def add_unchanged_line(self, deleted_line_number, new_line_number, line): + self.lines.append((deleted_line_number, new_line_number, line)) + + +class DiffParser(object): + """A parser for a patch file. + + The field "files" is a dict whose key is the filename and value is + a DiffFile object. + """ + + # FIXME: This function is way too long and needs to be broken up. + def __init__(self, diff_input): + """Parses a diff. + + Args: + diff_input: An iterable object. + """ + state = _INITIAL_STATE + + self.files = {} + current_file = None + old_diff_line = None + new_diff_line = None + for line in diff_input: + line = line.rstrip("\n") + if state == _INITIAL_STATE: + transform_line = get_diff_converter(line) + line = transform_line(line) + + file_declaration = match(r"^Index: (?P<FilePath>.+)", line) + if file_declaration: + filename = file_declaration.group('FilePath') + current_file = DiffFile(filename) + self.files[filename] = current_file + state = _DECLARED_FILE_PATH + continue + + lines_changed = match(r"^@@ -(?P<OldStartLine>\d+)(,\d+)? \+(?P<NewStartLine>\d+)(,\d+)? @@", line) + if lines_changed: + if state != _DECLARED_FILE_PATH and state != _PROCESSING_CHUNK: + _log.error('Unexpected line change without file path ' + 'declaration: %r' % line) + old_diff_line = int(lines_changed.group('OldStartLine')) + new_diff_line = int(lines_changed.group('NewStartLine')) + state = _PROCESSING_CHUNK + continue + + if state == _PROCESSING_CHUNK: + if line.startswith('+'): + current_file.add_new_line(new_diff_line, line[1:]) + new_diff_line += 1 + elif line.startswith('-'): + current_file.add_deleted_line(old_diff_line, line[1:]) + old_diff_line += 1 + elif line.startswith(' '): + current_file.add_unchanged_line(old_diff_line, new_diff_line, line[1:]) + old_diff_line += 1 + new_diff_line += 1 + elif line == '\\ No newline at end of file': + # Nothing to do. We may still have some added lines. + pass + else: + _log.error('Unexpected diff format when parsing a ' + 'chunk: %r' % line) diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py new file mode 100644 index 0000000..7eb0eab --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py @@ -0,0 +1,146 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest +import diff_parser +import re + + +class DiffParserTest(unittest.TestCase): + + _PATCH = '''diff --git a/WebCore/rendering/style/StyleFlexibleBoxData.h b/WebCore/rendering/style/StyleFlexibleBoxData.h +index f5d5e74..3b6aa92 100644 +--- a/WebCore/rendering/style/StyleFlexibleBoxData.h ++++ b/WebCore/rendering/style/StyleFlexibleBoxData.h +@@ -47,7 +47,6 @@ public: + + unsigned align : 3; // EBoxAlignment + unsigned pack: 3; // EBoxAlignment +- unsigned orient: 1; // EBoxOrient + unsigned lines : 1; // EBoxLines + + private: +diff --git a/WebCore/rendering/style/StyleRareInheritedData.cpp b/WebCore/rendering/style/StyleRareInheritedData.cpp +index ce21720..324929e 100644 +--- a/WebCore/rendering/style/StyleRareInheritedData.cpp ++++ b/WebCore/rendering/style/StyleRareInheritedData.cpp +@@ -39,6 +39,7 @@ StyleRareInheritedData::StyleRareInheritedData() + , textSizeAdjust(RenderStyle::initialTextSizeAdjust()) + , resize(RenderStyle::initialResize()) + , userSelect(RenderStyle::initialUserSelect()) ++ , boxOrient(RenderStyle::initialBoxOrient()) + { + } + +@@ -58,6 +59,7 @@ StyleRareInheritedData::StyleRareInheritedData(const StyleRareInheritedData& o) + , textSizeAdjust(o.textSizeAdjust) + , resize(o.resize) + , userSelect(o.userSelect) ++ , boxOrient(o.boxOrient) + { + } + +@@ -81,7 +83,8 @@ bool StyleRareInheritedData::operator==(const StyleRareInheritedData& o) const + && khtmlLineBreak == o.khtmlLineBreak + && textSizeAdjust == o.textSizeAdjust + && resize == o.resize +- && userSelect == o.userSelect; ++ && userSelect == o.userSelect ++ && boxOrient == o.boxOrient; + } + + bool StyleRareInheritedData::shadowDataEquivalent(const StyleRareInheritedData& o) const +diff --git a/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum +new file mode 100644 +index 0000000..6db26bd +--- /dev/null ++++ b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum +@@ -0,0 +1 @@ ++61a373ee739673a9dcd7bac62b9f182e +\ No newline at end of file +''' + + def test_diff_parser(self, parser = None): + if not parser: + parser = diff_parser.DiffParser(self._PATCH.splitlines()) + self.assertEquals(3, len(parser.files)) + + self.assertTrue('WebCore/rendering/style/StyleFlexibleBoxData.h' in parser.files) + diff = parser.files['WebCore/rendering/style/StyleFlexibleBoxData.h'] + self.assertEquals(7, len(diff.lines)) + # The first two unchaged lines. + self.assertEquals((47, 47), diff.lines[0][0:2]) + self.assertEquals('', diff.lines[0][2]) + self.assertEquals((48, 48), diff.lines[1][0:2]) + self.assertEquals(' unsigned align : 3; // EBoxAlignment', diff.lines[1][2]) + # The deleted line + self.assertEquals((50, 0), diff.lines[3][0:2]) + self.assertEquals(' unsigned orient: 1; // EBoxOrient', diff.lines[3][2]) + + # The first file looks OK. Let's check the next, more complicated file. + self.assertTrue('WebCore/rendering/style/StyleRareInheritedData.cpp' in parser.files) + diff = parser.files['WebCore/rendering/style/StyleRareInheritedData.cpp'] + # There are 3 chunks. + self.assertEquals(7 + 7 + 9, len(diff.lines)) + # Around an added line. + self.assertEquals((60, 61), diff.lines[9][0:2]) + self.assertEquals((0, 62), diff.lines[10][0:2]) + self.assertEquals((61, 63), diff.lines[11][0:2]) + # Look through the last chunk, which contains both add's and delete's. + self.assertEquals((81, 83), diff.lines[14][0:2]) + self.assertEquals((82, 84), diff.lines[15][0:2]) + self.assertEquals((83, 85), diff.lines[16][0:2]) + self.assertEquals((84, 0), diff.lines[17][0:2]) + self.assertEquals((0, 86), diff.lines[18][0:2]) + self.assertEquals((0, 87), diff.lines[19][0:2]) + self.assertEquals((85, 88), diff.lines[20][0:2]) + self.assertEquals((86, 89), diff.lines[21][0:2]) + self.assertEquals((87, 90), diff.lines[22][0:2]) + + # Check if a newly added file is correctly handled. + diff = parser.files['LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum'] + self.assertEquals(1, len(diff.lines)) + self.assertEquals((0, 1), diff.lines[0][0:2]) + + def test_git_mnemonicprefix(self): + p = re.compile(r' ([a|b])/') + + prefixes = [ + { 'a' : 'i', 'b' : 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree) + { 'a' : 'c', 'b' : 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree) + { 'a' : 'c', 'b' : 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex) + { 'a' : 'o', 'b' : 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity) + { 'a' : '1', 'b' : '2' }, # git diff --no-index a b (compares two non-git things (1) and (2)) + ] + + for prefix in prefixes: + patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], self._PATCH) + self.test_diff_parser(diff_parser.DiffParser(patch.splitlines())) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/checkout/scm.py b/Tools/Scripts/webkitpy/common/checkout/scm.py new file mode 100644 index 0000000..c54fb42 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/scm.py @@ -0,0 +1,941 @@ +# Copyright (c) 2009, Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Python module for interacting with an SCM system (like SVN or Git) + +import os +import re +import sys +import shutil + +from webkitpy.common.system.executive import Executive, run_command, ScriptError +from webkitpy.common.system.deprecated_logging import error, log +from webkitpy.common.memoized import memoized + + +def find_checkout_root(): + """Returns the current checkout root (as determined by default_scm(). + + Returns the absolute path to the top of the WebKit checkout, or None + if it cannot be determined. + + """ + scm_system = default_scm() + if scm_system: + return scm_system.checkout_root + return None + + +def default_scm(): + """Return the default SCM object as determined by the CWD and running code. + + Returns the default SCM object for the current working directory; if the + CWD is not in a checkout, then we attempt to figure out if the SCM module + itself is part of a checkout, and return that one. If neither is part of + a checkout, None is returned. + + """ + cwd = os.getcwd() + scm_system = detect_scm_system(cwd) + if not scm_system: + script_directory = os.path.dirname(os.path.abspath(__file__)) + scm_system = detect_scm_system(script_directory) + if scm_system: + log("The current directory (%s) is not a WebKit checkout, using %s" % (cwd, scm_system.checkout_root)) + else: + error("FATAL: Failed to determine the SCM system for either %s or %s" % (cwd, script_directory)) + return scm_system + + +def detect_scm_system(path): + absolute_path = os.path.abspath(path) + + if SVN.in_working_directory(absolute_path): + return SVN(cwd=absolute_path) + + if Git.in_working_directory(absolute_path): + return Git(cwd=absolute_path) + + return None + + +def first_non_empty_line_after_index(lines, index=0): + first_non_empty_line = index + for line in lines[index:]: + if re.match("^\s*$", line): + first_non_empty_line += 1 + else: + break + return first_non_empty_line + + +class CommitMessage: + def __init__(self, message): + self.message_lines = message[first_non_empty_line_after_index(message, 0):] + + def body(self, lstrip=False): + lines = self.message_lines[first_non_empty_line_after_index(self.message_lines, 1):] + if lstrip: + lines = [line.lstrip() for line in lines] + return "\n".join(lines) + "\n" + + def description(self, lstrip=False, strip_url=False): + line = self.message_lines[0] + if lstrip: + line = line.lstrip() + if strip_url: + line = re.sub("^(\s*)<.+> ", "\1", line) + return line + + def message(self): + return "\n".join(self.message_lines) + "\n" + + +class CheckoutNeedsUpdate(ScriptError): + def __init__(self, script_args, exit_code, output, cwd): + ScriptError.__init__(self, script_args=script_args, exit_code=exit_code, output=output, cwd=cwd) + + +def commit_error_handler(error): + if re.search("resource out of date", error.output): + raise CheckoutNeedsUpdate(script_args=error.script_args, exit_code=error.exit_code, output=error.output, cwd=error.cwd) + Executive.default_error_handler(error) + + +class AuthenticationError(Exception): + def __init__(self, server_host): + self.server_host = server_host + + +class AmbiguousCommitError(Exception): + def __init__(self, num_local_commits, working_directory_is_clean): + self.num_local_commits = num_local_commits + self.working_directory_is_clean = working_directory_is_clean + + +# SCM methods are expected to return paths relative to self.checkout_root. +class SCM: + def __init__(self, cwd): + self.cwd = cwd + self.checkout_root = self.find_checkout_root(self.cwd) + self.dryrun = False + + # A wrapper used by subclasses to create processes. + def run(self, args, cwd=None, input=None, error_handler=None, return_exit_code=False, return_stderr=True, decode_output=True): + # FIXME: We should set cwd appropriately. + # FIXME: We should use Executive. + return run_command(args, + cwd=cwd, + input=input, + error_handler=error_handler, + return_exit_code=return_exit_code, + return_stderr=return_stderr, + decode_output=decode_output) + + # SCM always returns repository relative path, but sometimes we need + # absolute paths to pass to rm, etc. + def absolute_path(self, repository_relative_path): + return os.path.join(self.checkout_root, repository_relative_path) + + # FIXME: This belongs in Checkout, not SCM. + def scripts_directory(self): + return os.path.join(self.checkout_root, "Tools", "Scripts") + + # FIXME: This belongs in Checkout, not SCM. + def script_path(self, script_name): + return os.path.join(self.scripts_directory(), script_name) + + def ensure_clean_working_directory(self, force_clean): + if not force_clean and not self.working_directory_is_clean(): + # FIXME: Shouldn't this use cwd=self.checkout_root? + print self.run(self.status_command(), error_handler=Executive.ignore_error) + raise ScriptError(message="Working directory has modifications, pass --force-clean or --no-clean to continue.") + + log("Cleaning working directory") + self.clean_working_directory() + + def ensure_no_local_commits(self, force): + if not self.supports_local_commits(): + return + commits = self.local_commits() + if not len(commits): + return + if not force: + error("Working directory has local commits, pass --force-clean to continue.") + self.discard_local_commits() + + def run_status_and_extract_filenames(self, status_command, status_regexp): + filenames = [] + # We run with cwd=self.checkout_root so that returned-paths are root-relative. + for line in self.run(status_command, cwd=self.checkout_root).splitlines(): + match = re.search(status_regexp, line) + if not match: + continue + # status = match.group('status') + filename = match.group('filename') + filenames.append(filename) + return filenames + + def strip_r_from_svn_revision(self, svn_revision): + match = re.match("^r(?P<svn_revision>\d+)", unicode(svn_revision)) + if (match): + return match.group('svn_revision') + return svn_revision + + def svn_revision_from_commit_text(self, commit_text): + match = re.search(self.commit_success_regexp(), commit_text, re.MULTILINE) + return match.group('svn_revision') + + @staticmethod + def _subclass_must_implement(): + raise NotImplementedError("subclasses must implement") + + @staticmethod + def in_working_directory(path): + SCM._subclass_must_implement() + + @staticmethod + def find_checkout_root(path): + SCM._subclass_must_implement() + + @staticmethod + def commit_success_regexp(): + SCM._subclass_must_implement() + + def working_directory_is_clean(self): + self._subclass_must_implement() + + def clean_working_directory(self): + self._subclass_must_implement() + + def status_command(self): + self._subclass_must_implement() + + def add(self, path, return_exit_code=False): + self._subclass_must_implement() + + def delete(self, path): + self._subclass_must_implement() + + def changed_files(self, git_commit=None): + self._subclass_must_implement() + + def changed_files_for_revision(self, revision): + self._subclass_must_implement() + + def revisions_changing_file(self, path, limit=5): + self._subclass_must_implement() + + def added_files(self): + self._subclass_must_implement() + + def conflicted_files(self): + self._subclass_must_implement() + + def display_name(self): + self._subclass_must_implement() + + def create_patch(self, git_commit=None, changed_files=[]): + self._subclass_must_implement() + + def committer_email_for_revision(self, revision): + self._subclass_must_implement() + + def contents_at_revision(self, path, revision): + self._subclass_must_implement() + + def diff_for_revision(self, revision): + self._subclass_must_implement() + + def diff_for_file(self, path, log=None): + self._subclass_must_implement() + + def show_head(self, path): + self._subclass_must_implement() + + def apply_reverse_diff(self, revision): + self._subclass_must_implement() + + def revert_files(self, file_paths): + self._subclass_must_implement() + + def commit_with_message(self, message, username=None, git_commit=None, force_squash=False): + self._subclass_must_implement() + + def svn_commit_log(self, svn_revision): + self._subclass_must_implement() + + def last_svn_commit_log(self): + self._subclass_must_implement() + + # Subclasses must indicate if they support local commits, + # but the SCM baseclass will only call local_commits methods when this is true. + @staticmethod + def supports_local_commits(): + SCM._subclass_must_implement() + + def remote_merge_base(): + SCM._subclass_must_implement() + + def commit_locally_with_message(self, message): + error("Your source control manager does not support local commits.") + + def discard_local_commits(self): + pass + + def local_commits(self): + return [] + + +class SVN(SCM): + # FIXME: We should move these values to a WebKit-specific config. file. + svn_server_host = "svn.webkit.org" + svn_server_realm = "<http://svn.webkit.org:80> Mac OS Forge" + + def __init__(self, cwd): + SCM.__init__(self, cwd) + self._bogus_dir = None + + @staticmethod + def in_working_directory(path): + return os.path.isdir(os.path.join(path, '.svn')) + + @classmethod + def find_uuid(cls, path): + if not cls.in_working_directory(path): + return None + return cls.value_from_svn_info(path, 'Repository UUID') + + @classmethod + def value_from_svn_info(cls, path, field_name): + svn_info_args = ['svn', 'info', path] + info_output = run_command(svn_info_args).rstrip() + match = re.search("^%s: (?P<value>.+)$" % field_name, info_output, re.MULTILINE) + if not match: + raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name) + return match.group('value') + + @staticmethod + def find_checkout_root(path): + uuid = SVN.find_uuid(path) + # If |path| is not in a working directory, we're supposed to return |path|. + if not uuid: + return path + # Search up the directory hierarchy until we find a different UUID. + last_path = None + while True: + if uuid != SVN.find_uuid(path): + return last_path + last_path = path + (path, last_component) = os.path.split(path) + if last_path == path: + return None + + @staticmethod + def commit_success_regexp(): + return "^Committed revision (?P<svn_revision>\d+)\.$" + + def has_authorization_for_realm(self, realm=svn_server_realm, home_directory=os.getenv("HOME")): + # Assumes find and grep are installed. + if not os.path.isdir(os.path.join(home_directory, ".subversion")): + return False + find_args = ["find", ".subversion", "-type", "f", "-exec", "grep", "-q", realm, "{}", ";", "-print"]; + find_output = self.run(find_args, cwd=home_directory, error_handler=Executive.ignore_error).rstrip() + return find_output and os.path.isfile(os.path.join(home_directory, find_output)) + + @memoized + def svn_version(self): + return self.run(['svn', '--version', '--quiet']) + + def working_directory_is_clean(self): + return self.run(["svn", "diff"], cwd=self.checkout_root, decode_output=False) == "" + + def clean_working_directory(self): + # Make sure there are no locks lying around from a previously aborted svn invocation. + # This is slightly dangerous, as it's possible the user is running another svn process + # on this checkout at the same time. However, it's much more likely that we're running + # under windows and svn just sucks (or the user interrupted svn and it failed to clean up). + self.run(["svn", "cleanup"], cwd=self.checkout_root) + + # svn revert -R is not as awesome as git reset --hard. + # It will leave added files around, causing later svn update + # calls to fail on the bots. We make this mirror git reset --hard + # by deleting any added files as well. + added_files = reversed(sorted(self.added_files())) + # added_files() returns directories for SVN, we walk the files in reverse path + # length order so that we remove files before we try to remove the directories. + self.run(["svn", "revert", "-R", "."], cwd=self.checkout_root) + for path in added_files: + # This is robust against cwd != self.checkout_root + absolute_path = self.absolute_path(path) + # Completely lame that there is no easy way to remove both types with one call. + if os.path.isdir(path): + os.rmdir(absolute_path) + else: + os.remove(absolute_path) + + def status_command(self): + return ['svn', 'status'] + + def _status_regexp(self, expected_types): + field_count = 6 if self.svn_version() > "1.6" else 5 + return "^(?P<status>[%s]).{%s} (?P<filename>.+)$" % (expected_types, field_count) + + def _add_parent_directories(self, path): + """Does 'svn add' to the path and its parents.""" + if self.in_working_directory(path): + return + dirname = os.path.dirname(path) + # We have dirname directry - ensure it added. + if dirname != path: + self._add_parent_directories(dirname) + self.add(path) + + def add(self, path, return_exit_code=False): + self._add_parent_directories(os.path.dirname(os.path.abspath(path))) + return self.run(["svn", "add", path], return_exit_code=return_exit_code) + + def delete(self, path): + parent, base = os.path.split(os.path.abspath(path)) + return self.run(["svn", "delete", "--force", base], cwd=parent) + + def changed_files(self, git_commit=None): + return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("ACDMR")) + + def changed_files_for_revision(self, revision): + # As far as I can tell svn diff --summarize output looks just like svn status output. + # No file contents printed, thus utf-8 auto-decoding in self.run is fine. + status_command = ["svn", "diff", "--summarize", "-c", revision] + return self.run_status_and_extract_filenames(status_command, self._status_regexp("ACDMR")) + + def revisions_changing_file(self, path, limit=5): + revisions = [] + # svn log will exit(1) (and thus self.run will raise) if the path does not exist. + log_command = ['svn', 'log', '--quiet', '--limit=%s' % limit, path] + for line in self.run(log_command, cwd=self.checkout_root).splitlines(): + match = re.search('^r(?P<revision>\d+) ', line) + if not match: + continue + revisions.append(int(match.group('revision'))) + return revisions + + def conflicted_files(self): + return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("C")) + + def added_files(self): + return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("A")) + + def deleted_files(self): + return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("D")) + + @staticmethod + def supports_local_commits(): + return False + + def display_name(self): + return "svn" + + # FIXME: This method should be on Checkout. + def create_patch(self, git_commit=None, changed_files=[]): + """Returns a byte array (str()) representing the patch file. + Patch files are effectively binary since they may contain + files of multiple different encodings.""" + return self.run([self.script_path("svn-create-patch")] + changed_files, + cwd=self.checkout_root, return_stderr=False, + decode_output=False) + + def committer_email_for_revision(self, revision): + return self.run(["svn", "propget", "svn:author", "--revprop", "-r", revision]).rstrip() + + def contents_at_revision(self, path, revision): + """Returns a byte array (str()) containing the contents + of path @ revision in the repository.""" + remote_path = "%s/%s" % (self._repository_url(), path) + return self.run(["svn", "cat", "-r", revision, remote_path], decode_output=False) + + def diff_for_revision(self, revision): + # FIXME: This should probably use cwd=self.checkout_root + return self.run(['svn', 'diff', '-c', revision]) + + def _bogus_dir_name(self): + if sys.platform.startswith("win"): + parent_dir = tempfile.gettempdir() + else: + parent_dir = sys.path[0] # tempdir is not secure. + return os.path.join(parent_dir, "temp_svn_config") + + def _setup_bogus_dir(self, log): + self._bogus_dir = self._bogus_dir_name() + if not os.path.exists(self._bogus_dir): + os.mkdir(self._bogus_dir) + self._delete_bogus_dir = True + else: + self._delete_bogus_dir = False + if log: + log.debug(' Html: temp config dir: "%s".', self._bogus_dir) + + def _teardown_bogus_dir(self, log): + if self._delete_bogus_dir: + shutil.rmtree(self._bogus_dir, True) + if log: + log.debug(' Html: removed temp config dir: "%s".', self._bogus_dir) + self._bogus_dir = None + + def diff_for_file(self, path, log=None): + self._setup_bogus_dir(log) + try: + args = ['svn', 'diff'] + if self._bogus_dir: + args += ['--config-dir', self._bogus_dir] + args.append(path) + return self.run(args) + finally: + self._teardown_bogus_dir(log) + + def show_head(self, path): + return self.run(['svn', 'cat', '-r', 'BASE', path], decode_output=False) + + def _repository_url(self): + return self.value_from_svn_info(self.checkout_root, 'URL') + + def apply_reverse_diff(self, revision): + # '-c -revision' applies the inverse diff of 'revision' + svn_merge_args = ['svn', 'merge', '--non-interactive', '-c', '-%s' % revision, self._repository_url()] + log("WARNING: svn merge has been known to take more than 10 minutes to complete. It is recommended you use git for rollouts.") + log("Running '%s'" % " ".join(svn_merge_args)) + # FIXME: Should this use cwd=self.checkout_root? + self.run(svn_merge_args) + + def revert_files(self, file_paths): + # FIXME: This should probably use cwd=self.checkout_root. + self.run(['svn', 'revert'] + file_paths) + + def commit_with_message(self, message, username=None, git_commit=None, force_squash=False): + # git-commit and force are not used by SVN. + if self.dryrun: + # Return a string which looks like a commit so that things which parse this output will succeed. + return "Dry run, no commit.\nCommitted revision 0." + + svn_commit_args = ["svn", "commit"] + + if not username and not self.has_authorization_for_realm(): + raise AuthenticationError(self.svn_server_host) + if username: + svn_commit_args.extend(["--username", username]) + + svn_commit_args.extend(["-m", message]) + # FIXME: Should this use cwd=self.checkout_root? + return self.run(svn_commit_args, error_handler=commit_error_handler) + + def svn_commit_log(self, svn_revision): + svn_revision = self.strip_r_from_svn_revision(svn_revision) + return self.run(['svn', 'log', '--non-interactive', '--revision', svn_revision]) + + def last_svn_commit_log(self): + # BASE is the checkout revision, HEAD is the remote repository revision + # http://svnbook.red-bean.com/en/1.0/ch03s03.html + return self.svn_commit_log('BASE') + + def propset(self, pname, pvalue, path): + dir, base = os.path.split(path) + return self.run(['svn', 'pset', pname, pvalue, base], cwd=dir) + + def propget(self, pname, path): + dir, base = os.path.split(path) + return self.run(['svn', 'pget', pname, base], cwd=dir).encode('utf-8').rstrip("\n") + + +# All git-specific logic should go here. +class Git(SCM): + def __init__(self, cwd): + SCM.__init__(self, cwd) + self._check_git_architecture() + + def _machine_is_64bit(self): + import platform + # This only is tested on Mac. + if not platform.mac_ver()[0]: + return False + + # platform.architecture()[0] can be '64bit' even if the machine is 32bit: + # http://mail.python.org/pipermail/pythonmac-sig/2009-September/021648.html + # Use the sysctl command to find out what the processor actually supports. + return self.run(['sysctl', '-n', 'hw.cpu64bit_capable']).rstrip() == '1' + + def _executable_is_64bit(self, path): + # Again, platform.architecture() fails us. On my machine + # git_bits = platform.architecture(executable=git_path, bits='default')[0] + # git_bits is just 'default', meaning the call failed. + file_output = self.run(['file', path]) + return re.search('x86_64', file_output) + + def _check_git_architecture(self): + if not self._machine_is_64bit(): + return + + # We could path-search entirely in python or with + # which.py (http://code.google.com/p/which), but this is easier: + git_path = self.run(['which', 'git']).rstrip() + if self._executable_is_64bit(git_path): + return + + webkit_dev_thead_url = "https://lists.webkit.org/pipermail/webkit-dev/2010-December/015249.html" + log("Warning: This machine is 64-bit, but the git binary (%s) does not support 64-bit.\nInstall a 64-bit git for better performance, see:\n%s\n" % (git_path, webkit_dev_thead_url)) + + @classmethod + def in_working_directory(cls, path): + return run_command(['git', 'rev-parse', '--is-inside-work-tree'], cwd=path, error_handler=Executive.ignore_error).rstrip() == "true" + + @classmethod + def find_checkout_root(cls, path): + # "git rev-parse --show-cdup" would be another way to get to the root + (checkout_root, dot_git) = os.path.split(run_command(['git', 'rev-parse', '--git-dir'], cwd=(path or "./"))) + # If we were using 2.6 # checkout_root = os.path.relpath(checkout_root, path) + if not os.path.isabs(checkout_root): # Sometimes git returns relative paths + checkout_root = os.path.join(path, checkout_root) + return checkout_root + + @classmethod + def to_object_name(cls, filepath): + root_end_with_slash = os.path.join(cls.find_checkout_root(os.path.dirname(filepath)), '') + return filepath.replace(root_end_with_slash, '') + + @classmethod + def read_git_config(cls, key): + # FIXME: This should probably use cwd=self.checkout_root. + # Pass --get-all for cases where the config has multiple values + return run_command(["git", "config", "--get-all", key], + error_handler=Executive.ignore_error).rstrip('\n') + + @staticmethod + def commit_success_regexp(): + return "^Committed r(?P<svn_revision>\d+)$" + + def discard_local_commits(self): + # FIXME: This should probably use cwd=self.checkout_root + self.run(['git', 'reset', '--hard', self.remote_branch_ref()]) + + def local_commits(self): + # FIXME: This should probably use cwd=self.checkout_root + return self.run(['git', 'log', '--pretty=oneline', 'HEAD...' + self.remote_branch_ref()]).splitlines() + + def rebase_in_progress(self): + return os.path.exists(os.path.join(self.checkout_root, '.git/rebase-apply')) + + def working_directory_is_clean(self): + # FIXME: This should probably use cwd=self.checkout_root + return self.run(['git', 'diff', 'HEAD', '--name-only']) == "" + + def clean_working_directory(self): + # FIXME: These should probably use cwd=self.checkout_root. + # Could run git clean here too, but that wouldn't match working_directory_is_clean + self.run(['git', 'reset', '--hard', 'HEAD']) + # Aborting rebase even though this does not match working_directory_is_clean + if self.rebase_in_progress(): + self.run(['git', 'rebase', '--abort']) + + def status_command(self): + # git status returns non-zero when there are changes, so we use git diff name --name-status HEAD instead. + # No file contents printed, thus utf-8 autodecoding in self.run is fine. + return ["git", "diff", "--name-status", "HEAD"] + + def _status_regexp(self, expected_types): + return '^(?P<status>[%s])\t(?P<filename>.+)$' % expected_types + + def add(self, path, return_exit_code=False): + return self.run(["git", "add", path], return_exit_code=return_exit_code) + + def delete(self, path): + return self.run(["git", "rm", "-f", path]) + + def merge_base(self, git_commit): + if git_commit: + # Special-case HEAD.. to mean working-copy changes only. + if git_commit.upper() == 'HEAD..': + return 'HEAD' + + if '..' not in git_commit: + git_commit = git_commit + "^.." + git_commit + return git_commit + + return self.remote_merge_base() + + def changed_files(self, git_commit=None): + status_command = ['git', 'diff', '-r', '--name-status', '-C', '-M', "--no-ext-diff", "--full-index", self.merge_base(git_commit)] + return self.run_status_and_extract_filenames(status_command, self._status_regexp("ADM")) + + def _changes_files_for_commit(self, git_commit): + # --pretty="format:" makes git show not print the commit log header, + changed_files = self.run(["git", "show", "--pretty=format:", "--name-only", git_commit]).splitlines() + # instead it just prints a blank line at the top, so we skip the blank line: + return changed_files[1:] + + def changed_files_for_revision(self, revision): + commit_id = self.git_commit_from_svn_revision(revision) + return self._changes_files_for_commit(commit_id) + + def revisions_changing_file(self, path, limit=5): + # git rev-list head --remove-empty --limit=5 -- path would be equivalent. + commit_ids = self.run(["git", "log", "--remove-empty", "--pretty=format:%H", "-%s" % limit, "--", path]).splitlines() + return filter(lambda revision: revision, map(self.svn_revision_from_git_commit, commit_ids)) + + def conflicted_files(self): + # We do not need to pass decode_output for this diff command + # as we're passing --name-status which does not output any data. + status_command = ['git', 'diff', '--name-status', '-C', '-M', '--diff-filter=U'] + return self.run_status_and_extract_filenames(status_command, self._status_regexp("U")) + + def added_files(self): + return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("A")) + + def deleted_files(self): + return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("D")) + + @staticmethod + def supports_local_commits(): + return True + + def display_name(self): + return "git" + + def create_patch(self, git_commit=None, changed_files=[]): + """Returns a byte array (str()) representing the patch file. + Patch files are effectively binary since they may contain + files of multiple different encodings.""" + return self.run(['git', 'diff', '--binary', "--no-ext-diff", "--full-index", "-M", self.merge_base(git_commit), "--"] + changed_files, decode_output=False, cwd=self.checkout_root) + + def _run_git_svn_find_rev(self, arg): + # git svn find-rev always exits 0, even when the revision or commit is not found. + return self.run(['git', 'svn', 'find-rev', arg], cwd=self.checkout_root).rstrip() + + def _string_to_int_or_none(self, string): + try: + return int(string) + except ValueError, e: + return None + + @memoized + def git_commit_from_svn_revision(self, svn_revision): + git_commit = self._run_git_svn_find_rev('r%s' % svn_revision) + if not git_commit: + # FIXME: Alternatively we could offer to update the checkout? Or return None? + raise ScriptError(message='Failed to find git commit for revision %s, your checkout likely needs an update.' % svn_revision) + return git_commit + + @memoized + def svn_revision_from_git_commit(self, git_commit): + svn_revision = self._run_git_svn_find_rev(git_commit) + return self._string_to_int_or_none(svn_revision) + + def contents_at_revision(self, path, revision): + """Returns a byte array (str()) containing the contents + of path @ revision in the repository.""" + return self.run(["git", "show", "%s:%s" % (self.git_commit_from_svn_revision(revision), path)], decode_output=False) + + def diff_for_revision(self, revision): + git_commit = self.git_commit_from_svn_revision(revision) + return self.create_patch(git_commit) + + def diff_for_file(self, path, log=None): + return self.run(['git', 'diff', 'HEAD', '--', path]) + + def show_head(self, path): + return self.run(['git', 'show', 'HEAD:' + self.to_object_name(path)], decode_output=False) + + def committer_email_for_revision(self, revision): + git_commit = self.git_commit_from_svn_revision(revision) + committer_email = self.run(["git", "log", "-1", "--pretty=format:%ce", git_commit]) + # Git adds an extra @repository_hash to the end of every committer email, remove it: + return committer_email.rsplit("@", 1)[0] + + def apply_reverse_diff(self, revision): + # Assume the revision is an svn revision. + git_commit = self.git_commit_from_svn_revision(revision) + # I think this will always fail due to ChangeLogs. + self.run(['git', 'revert', '--no-commit', git_commit], error_handler=Executive.ignore_error) + + def revert_files(self, file_paths): + self.run(['git', 'checkout', 'HEAD'] + file_paths) + + def _assert_can_squash(self, working_directory_is_clean): + squash = Git.read_git_config('webkit-patch.commit-should-always-squash') + should_squash = squash and squash.lower() == "true" + + if not should_squash: + # Only warn if there are actually multiple commits to squash. + num_local_commits = len(self.local_commits()) + if num_local_commits > 1 or (num_local_commits > 0 and not working_directory_is_clean): + raise AmbiguousCommitError(num_local_commits, working_directory_is_clean) + + def commit_with_message(self, message, username=None, git_commit=None, force_squash=False): + # Username is ignored during Git commits. + working_directory_is_clean = self.working_directory_is_clean() + + if git_commit: + # Special-case HEAD.. to mean working-copy changes only. + if git_commit.upper() == 'HEAD..': + if working_directory_is_clean: + raise ScriptError(message="The working copy is not modified. --git-commit=HEAD.. only commits working copy changes.") + self.commit_locally_with_message(message) + return self._commit_on_branch(message, 'HEAD') + + # Need working directory changes to be committed so we can checkout the merge branch. + if not working_directory_is_clean: + # FIXME: webkit-patch land will modify the ChangeLogs to correct the reviewer. + # That will modify the working-copy and cause us to hit this error. + # The ChangeLog modification could be made to modify the existing local commit. + raise ScriptError(message="Working copy is modified. Cannot commit individual git_commits.") + return self._commit_on_branch(message, git_commit) + + if not force_squash: + self._assert_can_squash(working_directory_is_clean) + self.run(['git', 'reset', '--soft', self.remote_merge_base()]) + self.commit_locally_with_message(message) + return self.push_local_commits_to_server() + + def _commit_on_branch(self, message, git_commit): + branch_ref = self.run(['git', 'symbolic-ref', 'HEAD']).strip() + branch_name = branch_ref.replace('refs/heads/', '') + commit_ids = self.commit_ids_from_commitish_arguments([git_commit]) + + # We want to squash all this branch's commits into one commit with the proper description. + # We do this by doing a "merge --squash" into a new commit branch, then dcommitting that. + MERGE_BRANCH_NAME = 'webkit-patch-land' + self.delete_branch(MERGE_BRANCH_NAME) + + # We might be in a directory that's present in this branch but not in the + # trunk. Move up to the top of the tree so that git commands that expect a + # valid CWD won't fail after we check out the merge branch. + os.chdir(self.checkout_root) + + # Stuff our change into the merge branch. + # We wrap in a try...finally block so if anything goes wrong, we clean up the branches. + commit_succeeded = True + try: + self.run(['git', 'checkout', '-q', '-b', MERGE_BRANCH_NAME, self.remote_branch_ref()]) + + for commit in commit_ids: + # We're on a different branch now, so convert "head" to the branch name. + commit = re.sub(r'(?i)head', branch_name, commit) + # FIXME: Once changed_files and create_patch are modified to separately handle each + # commit in a commit range, commit each cherry pick so they'll get dcommitted separately. + self.run(['git', 'cherry-pick', '--no-commit', commit]) + + self.run(['git', 'commit', '-m', message]) + output = self.push_local_commits_to_server() + except Exception, e: + log("COMMIT FAILED: " + str(e)) + output = "Commit failed." + commit_succeeded = False + finally: + # And then swap back to the original branch and clean up. + self.clean_working_directory() + self.run(['git', 'checkout', '-q', branch_name]) + self.delete_branch(MERGE_BRANCH_NAME) + + return output + + def svn_commit_log(self, svn_revision): + svn_revision = self.strip_r_from_svn_revision(svn_revision) + return self.run(['git', 'svn', 'log', '-r', svn_revision]) + + def last_svn_commit_log(self): + return self.run(['git', 'svn', 'log', '--limit=1']) + + # Git-specific methods: + def _branch_ref_exists(self, branch_ref): + return self.run(['git', 'show-ref', '--quiet', '--verify', branch_ref], return_exit_code=True) == 0 + + def delete_branch(self, branch_name): + if self._branch_ref_exists('refs/heads/' + branch_name): + self.run(['git', 'branch', '-D', branch_name]) + + def remote_merge_base(self): + return self.run(['git', 'merge-base', self.remote_branch_ref(), 'HEAD']).strip() + + def remote_branch_ref(self): + # Use references so that we can avoid collisions, e.g. we don't want to operate on refs/heads/trunk if it exists. + remote_branch_refs = Git.read_git_config('svn-remote.svn.fetch') + if not remote_branch_refs: + remote_master_ref = 'refs/remotes/origin/master' + if not self._branch_ref_exists(remote_master_ref): + raise ScriptError(message="Can't find a branch to diff against. svn-remote.svn.fetch is not in the git config and %s does not exist" % remote_master_ref) + return remote_master_ref + + # FIXME: What's the right behavior when there are multiple svn-remotes listed? + # For now, just use the first one. + first_remote_branch_ref = remote_branch_refs.split('\n')[0] + return first_remote_branch_ref.split(':')[1] + + def commit_locally_with_message(self, message): + self.run(['git', 'commit', '--all', '-F', '-'], input=message) + + def push_local_commits_to_server(self): + dcommit_command = ['git', 'svn', 'dcommit'] + if self.dryrun: + dcommit_command.append('--dry-run') + output = self.run(dcommit_command, error_handler=commit_error_handler) + # Return a string which looks like a commit so that things which parse this output will succeed. + if self.dryrun: + output += "\nCommitted r0" + return output + + # This function supports the following argument formats: + # no args : rev-list trunk..HEAD + # A..B : rev-list A..B + # A...B : error! + # A B : [A, B] (different from git diff, which would use "rev-list A..B") + def commit_ids_from_commitish_arguments(self, args): + if not len(args): + args.append('%s..HEAD' % self.remote_branch_ref()) + + commit_ids = [] + for commitish in args: + if '...' in commitish: + raise ScriptError(message="'...' is not supported (found in '%s'). Did you mean '..'?" % commitish) + elif '..' in commitish: + commit_ids += reversed(self.run(['git', 'rev-list', commitish]).splitlines()) + else: + # Turn single commits or branch or tag names into commit ids. + commit_ids += self.run(['git', 'rev-parse', '--revs-only', commitish]).splitlines() + return commit_ids + + def commit_message_for_local_commit(self, commit_id): + commit_lines = self.run(['git', 'cat-file', 'commit', commit_id]).splitlines() + + # Skip the git headers. + first_line_after_headers = 0 + for line in commit_lines: + first_line_after_headers += 1 + if line == "": + break + return CommitMessage(commit_lines[first_line_after_headers:]) + + def files_changed_summary_for_commit(self, commit_id): + return self.run(['git', 'diff-tree', '--shortstat', '--no-commit-id', commit_id]) diff --git a/Tools/Scripts/webkitpy/common/checkout/scm_unittest.py b/Tools/Scripts/webkitpy/common/checkout/scm_unittest.py new file mode 100644 index 0000000..8f24beb --- /dev/null +++ b/Tools/Scripts/webkitpy/common/checkout/scm_unittest.py @@ -0,0 +1,1320 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import base64 +import codecs +import getpass +import os +import os.path +import re +import stat +import sys +import subprocess +import tempfile +import unittest +import urllib +import shutil + +from datetime import date +from webkitpy.common.checkout.api import Checkout +from webkitpy.common.checkout.scm import detect_scm_system, SCM, SVN, CheckoutNeedsUpdate, commit_error_handler, AuthenticationError, AmbiguousCommitError, find_checkout_root, default_scm +from webkitpy.common.config.committers import Committer # FIXME: This should not be needed +from webkitpy.common.net.bugzilla import Attachment # FIXME: This should not be needed +from webkitpy.common.system.executive import Executive, run_command, ScriptError +from webkitpy.common.system.outputcapture import OutputCapture + +# Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.) +# Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from. + +# FIXME: This should be unified into one of the executive.py commands! +# Callers could use run_and_throw_if_fail(args, cwd=cwd, quiet=True) +def run_silent(args, cwd=None): + # Note: Not thread safe: http://bugs.python.org/issue2320 + process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd) + process.communicate() # ignore output + exit_code = process.wait() + if exit_code: + raise ScriptError('Failed to run "%s" exit_code: %d cwd: %s' % (args, exit_code, cwd)) + + +def write_into_file_at_path(file_path, contents, encoding="utf-8"): + if encoding: + with codecs.open(file_path, "w", encoding) as file: + file.write(contents) + else: + with open(file_path, "w") as file: + file.write(contents) + + +def read_from_path(file_path, encoding="utf-8"): + with codecs.open(file_path, "r", encoding) as file: + return file.read() + + +def _make_diff(command, *args): + # We use this wrapper to disable output decoding. diffs should be treated as + # binary files since they may include text files of multiple differnet encodings. + return run_command([command, "diff"] + list(args), decode_output=False) + + +def _svn_diff(*args): + return _make_diff("svn", *args) + + +def _git_diff(*args): + return _make_diff("git", *args) + + +# Exists to share svn repository creation code between the git and svn tests +class SVNTestRepository: + @classmethod + def _svn_add(cls, path): + run_command(["svn", "add", path]) + + @classmethod + def _svn_commit(cls, message): + run_command(["svn", "commit", "--quiet", "--message", message]) + + @classmethod + def _setup_test_commits(cls, test_object): + # Add some test commits + os.chdir(test_object.svn_checkout_path) + + write_into_file_at_path("test_file", "test1") + cls._svn_add("test_file") + cls._svn_commit("initial commit") + + write_into_file_at_path("test_file", "test1test2") + # This used to be the last commit, but doing so broke + # GitTest.test_apply_git_patch which use the inverse diff of the last commit. + # svn-apply fails to remove directories in Git, see: + # https://bugs.webkit.org/show_bug.cgi?id=34871 + os.mkdir("test_dir") + # Slash should always be the right path separator since we use cygwin on Windows. + test_file3_path = "test_dir/test_file3" + write_into_file_at_path(test_file3_path, "third file") + cls._svn_add("test_dir") + cls._svn_commit("second commit") + + write_into_file_at_path("test_file", "test1test2test3\n") + write_into_file_at_path("test_file2", "second file") + cls._svn_add("test_file2") + cls._svn_commit("third commit") + + # This 4th commit is used to make sure that our patch file handling + # code correctly treats patches as binary and does not attempt to + # decode them assuming they're utf-8. + write_into_file_at_path("test_file", u"latin1 test: \u00A0\n", "latin1") + write_into_file_at_path("test_file2", u"utf-8 test: \u00A0\n", "utf-8") + cls._svn_commit("fourth commit") + + # svn does not seem to update after commit as I would expect. + run_command(['svn', 'update']) + + @classmethod + def setup(cls, test_object): + # Create an test SVN repository + test_object.svn_repo_path = tempfile.mkdtemp(suffix="svn_test_repo") + test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path # Not sure this will work on windows + # git svn complains if we don't pass --pre-1.5-compatible, not sure why: + # Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477 + run_command(['svnadmin', 'create', '--pre-1.5-compatible', test_object.svn_repo_path]) + + # Create a test svn checkout + test_object.svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout") + run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url, test_object.svn_checkout_path]) + + # Create and checkout a trunk dir to match the standard svn configuration to match git-svn's expectations + os.chdir(test_object.svn_checkout_path) + os.mkdir('trunk') + cls._svn_add('trunk') + # We can add tags and branches as well if we ever need to test those. + cls._svn_commit('add trunk') + + # Change directory out of the svn checkout so we can delete the checkout directory. + # _setup_test_commits will CD back to the svn checkout directory. + os.chdir('/') + run_command(['rm', '-rf', test_object.svn_checkout_path]) + run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url + '/trunk', test_object.svn_checkout_path]) + + cls._setup_test_commits(test_object) + + @classmethod + def tear_down(cls, test_object): + run_command(['rm', '-rf', test_object.svn_repo_path]) + run_command(['rm', '-rf', test_object.svn_checkout_path]) + + # Now that we've deleted the checkout paths, cwddir may be invalid + # Change back to a valid directory so that later calls to os.getcwd() do not fail. + os.chdir(detect_scm_system(os.path.dirname(__file__)).checkout_root) + + +class StandaloneFunctionsTest(unittest.TestCase): + """This class tests any standalone/top-level functions in the package.""" + def setUp(self): + self.orig_cwd = os.path.abspath(os.getcwd()) + self.orig_abspath = os.path.abspath + + # We capture but ignore the output from stderr to reduce unwanted + # logging. + self.output = OutputCapture() + self.output.capture_output() + + def tearDown(self): + os.chdir(self.orig_cwd) + os.path.abspath = self.orig_abspath + self.output.restore_output() + + def test_find_checkout_root(self): + # Test from inside the tree. + os.chdir(sys.path[0]) + dir = find_checkout_root() + self.assertNotEqual(dir, None) + self.assertTrue(os.path.exists(dir)) + + # Test from outside the tree. + os.chdir(os.path.expanduser("~")) + dir = find_checkout_root() + self.assertNotEqual(dir, None) + self.assertTrue(os.path.exists(dir)) + + # Mock out abspath() to test being not in a checkout at all. + os.path.abspath = lambda x: "/" + self.assertRaises(SystemExit, find_checkout_root) + os.path.abspath = self.orig_abspath + + def test_default_scm(self): + # Test from inside the tree. + os.chdir(sys.path[0]) + scm = default_scm() + self.assertNotEqual(scm, None) + + # Test from outside the tree. + os.chdir(os.path.expanduser("~")) + dir = find_checkout_root() + self.assertNotEqual(dir, None) + + # Mock out abspath() to test being not in a checkout at all. + os.path.abspath = lambda x: "/" + self.assertRaises(SystemExit, default_scm) + os.path.abspath = self.orig_abspath + +# For testing the SCM baseclass directly. +class SCMClassTests(unittest.TestCase): + def setUp(self): + self.dev_null = open(os.devnull, "w") # Used to make our Popen calls quiet. + + def tearDown(self): + self.dev_null.close() + + def test_run_command_with_pipe(self): + input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) + self.assertEqual(run_command(['grep', 'bar'], input=input_process.stdout), "bar\n") + + # Test the non-pipe case too: + self.assertEqual(run_command(['grep', 'bar'], input="foo\nbar"), "bar\n") + + command_returns_non_zero = ['/bin/sh', '--invalid-option'] + # Test when the input pipe process fails. + input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null) + self.assertTrue(input_process.poll() != 0) + self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout) + + # Test when the run_command process fails. + input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) # grep shows usage and calls exit(2) when called w/o arguments. + self.assertRaises(ScriptError, run_command, command_returns_non_zero, input=input_process.stdout) + + def test_error_handlers(self): + git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469" + svn_failure_message="""svn: Commit failed (details follow): +svn: File or directory 'ChangeLog' is out of date; try updating +svn: resource out of date; try updating +""" + command_does_not_exist = ['does_not_exist', 'invalid_option'] + self.assertRaises(OSError, run_command, command_does_not_exist) + self.assertRaises(OSError, run_command, command_does_not_exist, error_handler=Executive.ignore_error) + + command_returns_non_zero = ['/bin/sh', '--invalid-option'] + self.assertRaises(ScriptError, run_command, command_returns_non_zero) + # Check if returns error text: + self.assertTrue(run_command(command_returns_non_zero, error_handler=Executive.ignore_error)) + + self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message)) + self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message)) + self.assertRaises(ScriptError, commit_error_handler, ScriptError(output='blah blah blah')) + + +# GitTest and SVNTest inherit from this so any test_ methods here will be run once for this class and then once for each subclass. +class SCMTest(unittest.TestCase): + def _create_patch(self, patch_contents): + # FIXME: This code is brittle if the Attachment API changes. + attachment = Attachment({"bug_id": 12345}, None) + attachment.contents = lambda: patch_contents + + joe_cool = Committer(name="Joe Cool", email_or_emails=None) + attachment.reviewer = lambda: joe_cool + + return attachment + + def _setup_webkittools_scripts_symlink(self, local_scm): + webkit_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__))) + webkit_scripts_directory = webkit_scm.scripts_directory() + local_scripts_directory = local_scm.scripts_directory() + os.mkdir(os.path.dirname(local_scripts_directory)) + os.symlink(webkit_scripts_directory, local_scripts_directory) + + # Tests which both GitTest and SVNTest should run. + # FIXME: There must be a simpler way to add these w/o adding a wrapper method to both subclasses + + def _shared_test_changed_files(self): + write_into_file_at_path("test_file", "changed content") + self.assertEqual(self.scm.changed_files(), ["test_file"]) + write_into_file_at_path("test_dir/test_file3", "new stuff") + self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"]) + old_cwd = os.getcwd() + os.chdir("test_dir") + # Validate that changed_files does not change with our cwd, see bug 37015. + self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"]) + os.chdir(old_cwd) + + def _shared_test_added_files(self): + write_into_file_at_path("test_file", "changed content") + self.assertEqual(self.scm.added_files(), []) + + write_into_file_at_path("added_file", "new stuff") + self.scm.add("added_file") + + os.mkdir("added_dir") + write_into_file_at_path("added_dir/added_file2", "new stuff") + self.scm.add("added_dir") + + # SVN reports directory changes, Git does not. + added_files = self.scm.added_files() + if "added_dir" in added_files: + added_files.remove("added_dir") + self.assertEqual(added_files, ["added_dir/added_file2", "added_file"]) + + # Test also to make sure clean_working_directory removes added files + self.scm.clean_working_directory() + self.assertEqual(self.scm.added_files(), []) + self.assertFalse(os.path.exists("added_file")) + self.assertFalse(os.path.exists("added_dir")) + + def _shared_test_changed_files_for_revision(self): + # SVN reports directory changes, Git does not. + changed_files = self.scm.changed_files_for_revision(3) + if "test_dir" in changed_files: + changed_files.remove("test_dir") + self.assertEqual(changed_files, ["test_dir/test_file3", "test_file"]) + self.assertEqual(sorted(self.scm.changed_files_for_revision(4)), sorted(["test_file", "test_file2"])) # Git and SVN return different orders. + self.assertEqual(self.scm.changed_files_for_revision(2), ["test_file"]) + + def _shared_test_contents_at_revision(self): + self.assertEqual(self.scm.contents_at_revision("test_file", 3), "test1test2") + self.assertEqual(self.scm.contents_at_revision("test_file", 4), "test1test2test3\n") + + # Verify that contents_at_revision returns a byte array, aka str(): + self.assertEqual(self.scm.contents_at_revision("test_file", 5), u"latin1 test: \u00A0\n".encode("latin1")) + self.assertEqual(self.scm.contents_at_revision("test_file2", 5), u"utf-8 test: \u00A0\n".encode("utf-8")) + + self.assertEqual(self.scm.contents_at_revision("test_file2", 4), "second file") + # Files which don't exist: + # Currently we raise instead of returning None because detecting the difference between + # "file not found" and any other error seems impossible with svn (git seems to expose such through the return code). + self.assertRaises(ScriptError, self.scm.contents_at_revision, "test_file2", 2) + self.assertRaises(ScriptError, self.scm.contents_at_revision, "does_not_exist", 2) + + def _shared_test_revisions_changing_file(self): + self.assertEqual(self.scm.revisions_changing_file("test_file"), [5, 4, 3, 2]) + self.assertRaises(ScriptError, self.scm.revisions_changing_file, "non_existent_file") + + def _shared_test_committer_email_for_revision(self): + self.assertEqual(self.scm.committer_email_for_revision(3), getpass.getuser()) # Committer "email" will be the current user + + def _shared_test_reverse_diff(self): + self._setup_webkittools_scripts_symlink(self.scm) # Git's apply_reverse_diff uses resolve-ChangeLogs + # Only test the simple case, as any other will end up with conflict markers. + self.scm.apply_reverse_diff('5') + self.assertEqual(read_from_path('test_file'), "test1test2test3\n") + + def _shared_test_diff_for_revision(self): + # Patch formats are slightly different between svn and git, so just regexp for things we know should be there. + r3_patch = self.scm.diff_for_revision(4) + self.assertTrue(re.search('test3', r3_patch)) + self.assertFalse(re.search('test4', r3_patch)) + self.assertTrue(re.search('test2', r3_patch)) + self.assertTrue(re.search('test2', self.scm.diff_for_revision(3))) + + def _shared_test_svn_apply_git_patch(self): + self._setup_webkittools_scripts_symlink(self.scm) + git_binary_addition = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif +new file mode 100644 +index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d90 +60151690 +GIT binary patch +literal 512 +zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c? +zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap +zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ +zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A +zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&) +zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b +zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB +z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X +z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4 +ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H + +literal 0 +HcmV?d00001 + +""" + self.checkout.apply_patch(self._create_patch(git_binary_addition)) + added = read_from_path('fizzbuzz7.gif', encoding=None) + self.assertEqual(512, len(added)) + self.assertTrue(added.startswith('GIF89a')) + self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files()) + + # The file already exists. + self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_addition)) + + git_binary_modification = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif +index 64a9532e7794fcd791f6f12157406d9060151690..323fae03f4606ea9991df8befbb2fca7 +GIT binary patch +literal 7 +OcmYex&reD$;sO8*F9L)B + +literal 512 +zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c? +zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap +zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ +zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A +zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&) +zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b +zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB +z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X +z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4 +ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H + +""" + self.checkout.apply_patch(self._create_patch(git_binary_modification)) + modified = read_from_path('fizzbuzz7.gif', encoding=None) + self.assertEqual('foobar\n', modified) + self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files()) + + # Applying the same modification should fail. + self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_modification)) + + git_binary_deletion = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif +deleted file mode 100644 +index 323fae0..0000000 +GIT binary patch +literal 0 +HcmV?d00001 + +literal 7 +OcmYex&reD$;sO8*F9L)B + +""" + self.checkout.apply_patch(self._create_patch(git_binary_deletion)) + self.assertFalse(os.path.exists('fizzbuzz7.gif')) + self.assertFalse('fizzbuzz7.gif' in self.scm.changed_files()) + + # Cannot delete again. + self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_deletion)) + + def _shared_test_add_recursively(self): + os.mkdir("added_dir") + write_into_file_at_path("added_dir/added_file", "new stuff") + self.scm.add("added_dir/added_file") + self.assertTrue("added_dir/added_file" in self.scm.added_files()) + +class SVNTest(SCMTest): + + @staticmethod + def _set_date_and_reviewer(changelog_entry): + # Joe Cool matches the reviewer set in SCMTest._create_patch + changelog_entry = changelog_entry.replace('REVIEWER_HERE', 'Joe Cool') + # svn-apply will update ChangeLog entries with today's date. + return changelog_entry.replace('DATE_HERE', date.today().isoformat()) + + def test_svn_apply(self): + first_entry = """2009-10-26 Eric Seidel <eric@webkit.org> + + Reviewed by Foo Bar. + + Most awesome change ever. + + * scm_unittest.py: +""" + intermediate_entry = """2009-10-27 Eric Seidel <eric@webkit.org> + + Reviewed by Baz Bar. + + A more awesomer change yet! + + * scm_unittest.py: +""" + one_line_overlap_patch = """Index: ChangeLog +=================================================================== +--- ChangeLog (revision 5) ++++ ChangeLog (working copy) +@@ -1,5 +1,13 @@ + 2009-10-26 Eric Seidel <eric@webkit.org> + ++ Reviewed by NOBODY (OOPS!). ++ ++ Second most awesome change ever. ++ ++ * scm_unittest.py: ++ ++2009-10-26 Eric Seidel <eric@webkit.org> ++ + Reviewed by Foo Bar. + + Most awesome change ever. +""" + one_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org> + + Reviewed by REVIEWER_HERE. + + Second most awesome change ever. + + * scm_unittest.py: +""" + two_line_overlap_patch = """Index: ChangeLog +=================================================================== +--- ChangeLog (revision 5) ++++ ChangeLog (working copy) +@@ -2,6 +2,14 @@ + + Reviewed by Foo Bar. + ++ Second most awesome change ever. ++ ++ * scm_unittest.py: ++ ++2009-10-26 Eric Seidel <eric@webkit.org> ++ ++ Reviewed by Foo Bar. ++ + Most awesome change ever. + + * scm_unittest.py: +""" + two_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org> + + Reviewed by Foo Bar. + + Second most awesome change ever. + + * scm_unittest.py: +""" + write_into_file_at_path('ChangeLog', first_entry) + run_command(['svn', 'add', 'ChangeLog']) + run_command(['svn', 'commit', '--quiet', '--message', 'ChangeLog commit']) + + # Patch files were created against just 'first_entry'. + # Add a second commit to make svn-apply have to apply the patches with fuzz. + changelog_contents = "%s\n%s" % (intermediate_entry, first_entry) + write_into_file_at_path('ChangeLog', changelog_contents) + run_command(['svn', 'commit', '--quiet', '--message', 'Intermediate commit']) + + self._setup_webkittools_scripts_symlink(self.scm) + self.checkout.apply_patch(self._create_patch(one_line_overlap_patch)) + expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(one_line_overlap_entry), changelog_contents) + self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents) + + self.scm.revert_files(['ChangeLog']) + self.checkout.apply_patch(self._create_patch(two_line_overlap_patch)) + expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(two_line_overlap_entry), changelog_contents) + self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents) + + def setUp(self): + SVNTestRepository.setup(self) + os.chdir(self.svn_checkout_path) + self.scm = detect_scm_system(self.svn_checkout_path) + # For historical reasons, we test some checkout code here too. + self.checkout = Checkout(self.scm) + + def tearDown(self): + SVNTestRepository.tear_down(self) + + def test_detect_scm_system_relative_url(self): + scm = detect_scm_system(".") + # I wanted to assert that we got the right path, but there was some + # crazy magic with temp folder names that I couldn't figure out. + self.assertTrue(scm.checkout_root) + + def test_create_patch_is_full_patch(self): + test_dir_path = os.path.join(self.svn_checkout_path, "test_dir2") + os.mkdir(test_dir_path) + test_file_path = os.path.join(test_dir_path, 'test_file2') + write_into_file_at_path(test_file_path, 'test content') + run_command(['svn', 'add', 'test_dir2']) + + # create_patch depends on 'svn-create-patch', so make a dummy version. + scripts_path = os.path.join(self.svn_checkout_path, 'Tools', 'Scripts') + os.makedirs(scripts_path) + create_patch_path = os.path.join(scripts_path, 'svn-create-patch') + write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD') # We could pass -n to prevent the \n, but not all echo accept -n. + os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR) + + # Change into our test directory and run the create_patch command. + os.chdir(test_dir_path) + scm = detect_scm_system(test_dir_path) + self.assertEqual(scm.checkout_root, self.svn_checkout_path) # Sanity check that detection worked right. + patch_contents = scm.create_patch() + # Our fake 'svn-create-patch' returns $PWD instead of a patch, check that it was executed from the root of the repo. + self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents) # Add a \n because echo adds a \n. + + def test_detection(self): + scm = detect_scm_system(self.svn_checkout_path) + self.assertEqual(scm.display_name(), "svn") + self.assertEqual(scm.supports_local_commits(), False) + + def test_apply_small_binary_patch(self): + patch_contents = """Index: test_file.swf +=================================================================== +Cannot display: file marked as a binary type. +svn:mime-type = application/octet-stream + +Property changes on: test_file.swf +___________________________________________________________________ +Name: svn:mime-type + + application/octet-stream + + +Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA== +""" + expected_contents = base64.b64decode("Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==") + self._setup_webkittools_scripts_symlink(self.scm) + patch_file = self._create_patch(patch_contents) + self.checkout.apply_patch(patch_file) + actual_contents = read_from_path("test_file.swf", encoding=None) + self.assertEqual(actual_contents, expected_contents) + + def test_apply_svn_patch(self): + scm = detect_scm_system(self.svn_checkout_path) + patch = self._create_patch(_svn_diff("-r5:4")) + self._setup_webkittools_scripts_symlink(scm) + Checkout(scm).apply_patch(patch) + + def test_apply_svn_patch_force(self): + scm = detect_scm_system(self.svn_checkout_path) + patch = self._create_patch(_svn_diff("-r3:5")) + self._setup_webkittools_scripts_symlink(scm) + self.assertRaises(ScriptError, Checkout(scm).apply_patch, patch, force=True) + + def test_commit_logs(self): + # Commits have dates and usernames in them, so we can't just direct compare. + self.assertTrue(re.search('fourth commit', self.scm.last_svn_commit_log())) + self.assertTrue(re.search('second commit', self.scm.svn_commit_log(3))) + + def _shared_test_commit_with_message(self, username=None): + write_into_file_at_path('test_file', 'more test content') + commit_text = self.scm.commit_with_message("another test commit", username) + self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6') + + self.scm.dryrun = True + write_into_file_at_path('test_file', 'still more test content') + commit_text = self.scm.commit_with_message("yet another test commit", username) + self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '0') + + def test_commit_text_parsing(self): + self._shared_test_commit_with_message() + + def test_commit_with_username(self): + self._shared_test_commit_with_message("dbates@webkit.org") + + def test_commit_without_authorization(self): + self.scm.has_authorization_for_realm = lambda: False + self.assertRaises(AuthenticationError, self._shared_test_commit_with_message) + + def test_has_authorization_for_realm(self): + scm = detect_scm_system(self.svn_checkout_path) + fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir") + svn_config_dir_path = os.path.join(fake_home_dir, ".subversion") + os.mkdir(svn_config_dir_path) + fake_webkit_auth_file = os.path.join(svn_config_dir_path, "fake_webkit_auth_file") + write_into_file_at_path(fake_webkit_auth_file, SVN.svn_server_realm) + self.assertTrue(scm.has_authorization_for_realm(home_directory=fake_home_dir)) + os.remove(fake_webkit_auth_file) + os.rmdir(svn_config_dir_path) + os.rmdir(fake_home_dir) + + def test_not_have_authorization_for_realm(self): + scm = detect_scm_system(self.svn_checkout_path) + fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir") + svn_config_dir_path = os.path.join(fake_home_dir, ".subversion") + os.mkdir(svn_config_dir_path) + self.assertFalse(scm.has_authorization_for_realm(home_directory=fake_home_dir)) + os.rmdir(svn_config_dir_path) + os.rmdir(fake_home_dir) + + def test_reverse_diff(self): + self._shared_test_reverse_diff() + + def test_diff_for_revision(self): + self._shared_test_diff_for_revision() + + def test_svn_apply_git_patch(self): + self._shared_test_svn_apply_git_patch() + + def test_changed_files(self): + self._shared_test_changed_files() + + def test_changed_files_for_revision(self): + self._shared_test_changed_files_for_revision() + + def test_added_files(self): + self._shared_test_added_files() + + def test_contents_at_revision(self): + self._shared_test_contents_at_revision() + + def test_revisions_changing_file(self): + self._shared_test_revisions_changing_file() + + def test_committer_email_for_revision(self): + self._shared_test_committer_email_for_revision() + + def test_add_recursively(self): + self._shared_test_add_recursively() + + def test_delete(self): + os.chdir(self.svn_checkout_path) + self.scm.delete("test_file") + self.assertTrue("test_file" in self.scm.deleted_files()) + + def test_propset_propget(self): + filepath = os.path.join(self.svn_checkout_path, "test_file") + expected_mime_type = "x-application/foo-bar" + self.scm.propset("svn:mime-type", expected_mime_type, filepath) + self.assertEqual(expected_mime_type, self.scm.propget("svn:mime-type", filepath)) + + def test_show_head(self): + write_into_file_at_path("test_file", u"Hello!", "utf-8") + SVNTestRepository._svn_commit("fourth commit") + self.assertEqual("Hello!", self.scm.show_head('test_file')) + + def test_show_head_binary(self): + data = "\244" + write_into_file_at_path("binary_file", data, encoding=None) + self.scm.add("binary_file") + self.scm.commit_with_message("a test commit") + self.assertEqual(data, self.scm.show_head('binary_file')) + + def do_test_diff_for_file(self): + write_into_file_at_path('test_file', 'some content') + self.scm.commit_with_message("a test commit") + diff = self.scm.diff_for_file('test_file') + self.assertEqual(diff, "") + + write_into_file_at_path("test_file", "changed content") + diff = self.scm.diff_for_file('test_file') + self.assertTrue("-some content" in diff) + self.assertTrue("+changed content" in diff) + + def clean_bogus_dir(self): + self.bogus_dir = self.scm._bogus_dir_name() + if os.path.exists(self.bogus_dir): + shutil.rmtree(self.bogus_dir) + + def test_diff_for_file_with_existing_bogus_dir(self): + self.clean_bogus_dir() + os.mkdir(self.bogus_dir) + self.do_test_diff_for_file() + self.assertTrue(os.path.exists(self.bogus_dir)) + shutil.rmtree(self.bogus_dir) + + def test_diff_for_file_with_missing_bogus_dir(self): + self.clean_bogus_dir() + self.do_test_diff_for_file() + self.assertFalse(os.path.exists(self.bogus_dir)) + + def test_svn_lock(self): + svn_root_lock_path = ".svn/lock" + write_into_file_at_path(svn_root_lock_path, "", "utf-8") + # webkit-patch uses a Checkout object and runs update-webkit, just use svn update here. + self.assertRaises(ScriptError, run_command, ['svn', 'update']) + self.scm.clean_working_directory() + self.assertFalse(os.path.exists(svn_root_lock_path)) + run_command(['svn', 'update']) # Should succeed and not raise. + + +class GitTest(SCMTest): + + def setUp(self): + """Sets up fresh git repository with one commit. Then setups a second git + repo that tracks the first one.""" + self.original_dir = os.getcwd() + + self.untracking_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout2") + run_command(['git', 'init', self.untracking_checkout_path]) + + os.chdir(self.untracking_checkout_path) + write_into_file_at_path('foo_file', 'foo') + run_command(['git', 'add', 'foo_file']) + run_command(['git', 'commit', '-am', 'dummy commit']) + self.untracking_scm = detect_scm_system(self.untracking_checkout_path) + + self.tracking_git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout") + run_command(['git', 'clone', '--quiet', self.untracking_checkout_path, self.tracking_git_checkout_path]) + os.chdir(self.tracking_git_checkout_path) + self.tracking_scm = detect_scm_system(self.tracking_git_checkout_path) + + def tearDown(self): + # Change back to a valid directory so that later calls to os.getcwd() do not fail. + os.chdir(self.original_dir) + run_command(['rm', '-rf', self.tracking_git_checkout_path]) + run_command(['rm', '-rf', self.untracking_checkout_path]) + + def test_remote_branch_ref(self): + self.assertEqual(self.tracking_scm.remote_branch_ref(), 'refs/remotes/origin/master') + + os.chdir(self.untracking_checkout_path) + self.assertRaises(ScriptError, self.untracking_scm.remote_branch_ref) + + def test_multiple_remotes(self): + run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote1']) + run_command(['git', 'config', '--add', 'svn-remote.svn.fetch', 'trunk:remote2']) + self.assertEqual(self.tracking_scm.remote_branch_ref(), 'remote1') + +class GitSVNTest(SCMTest): + + def _setup_git_checkout(self): + self.git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout") + # --quiet doesn't make git svn silent, so we use run_silent to redirect output + run_silent(['git', 'svn', 'clone', '-T', 'trunk', self.svn_repo_url, self.git_checkout_path]) + os.chdir(self.git_checkout_path) + + def _tear_down_git_checkout(self): + # Change back to a valid directory so that later calls to os.getcwd() do not fail. + os.chdir(self.original_dir) + run_command(['rm', '-rf', self.git_checkout_path]) + + def setUp(self): + self.original_dir = os.getcwd() + + SVNTestRepository.setup(self) + self._setup_git_checkout() + self.scm = detect_scm_system(self.git_checkout_path) + # For historical reasons, we test some checkout code here too. + self.checkout = Checkout(self.scm) + + def tearDown(self): + SVNTestRepository.tear_down(self) + self._tear_down_git_checkout() + + def test_detection(self): + scm = detect_scm_system(self.git_checkout_path) + self.assertEqual(scm.display_name(), "git") + self.assertEqual(scm.supports_local_commits(), True) + + def test_read_git_config(self): + key = 'test.git-config' + value = 'git-config value' + run_command(['git', 'config', key, value]) + self.assertEqual(self.scm.read_git_config(key), value) + + def test_local_commits(self): + test_file = os.path.join(self.git_checkout_path, 'test_file') + write_into_file_at_path(test_file, 'foo') + run_command(['git', 'commit', '-a', '-m', 'local commit']) + + self.assertEqual(len(self.scm.local_commits()), 1) + + def test_discard_local_commits(self): + test_file = os.path.join(self.git_checkout_path, 'test_file') + write_into_file_at_path(test_file, 'foo') + run_command(['git', 'commit', '-a', '-m', 'local commit']) + + self.assertEqual(len(self.scm.local_commits()), 1) + self.scm.discard_local_commits() + self.assertEqual(len(self.scm.local_commits()), 0) + + def test_delete_branch(self): + new_branch = 'foo' + + run_command(['git', 'checkout', '-b', new_branch]) + self.assertEqual(run_command(['git', 'symbolic-ref', 'HEAD']).strip(), 'refs/heads/' + new_branch) + + run_command(['git', 'checkout', '-b', 'bar']) + self.scm.delete_branch(new_branch) + + self.assertFalse(re.search(r'foo', run_command(['git', 'branch']))) + + def test_remote_merge_base(self): + # Diff to merge-base should include working-copy changes, + # which the diff to svn_branch.. doesn't. + test_file = os.path.join(self.git_checkout_path, 'test_file') + write_into_file_at_path(test_file, 'foo') + + diff_to_common_base = _git_diff(self.scm.remote_branch_ref() + '..') + diff_to_merge_base = _git_diff(self.scm.remote_merge_base()) + + self.assertFalse(re.search(r'foo', diff_to_common_base)) + self.assertTrue(re.search(r'foo', diff_to_merge_base)) + + def test_rebase_in_progress(self): + svn_test_file = os.path.join(self.svn_checkout_path, 'test_file') + write_into_file_at_path(svn_test_file, "svn_checkout") + run_command(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path) + + git_test_file = os.path.join(self.git_checkout_path, 'test_file') + write_into_file_at_path(git_test_file, "git_checkout") + run_command(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort']) + + # --quiet doesn't make git svn silent, so use run_silent to redirect output + self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase. + + scm = detect_scm_system(self.git_checkout_path) + self.assertTrue(scm.rebase_in_progress()) + + # Make sure our cleanup works. + scm.clean_working_directory() + self.assertFalse(scm.rebase_in_progress()) + + # Make sure cleanup doesn't throw when no rebase is in progress. + scm.clean_working_directory() + + def test_commitish_parsing(self): + scm = detect_scm_system(self.git_checkout_path) + + # Multiple revisions are cherry-picked. + self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1) + self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2) + + # ... is an invalid range specifier + self.assertRaises(ScriptError, scm.commit_ids_from_commitish_arguments, ['trunk...HEAD']) + + def test_commitish_order(self): + scm = detect_scm_system(self.git_checkout_path) + + commit_range = 'HEAD~3..HEAD' + + actual_commits = scm.commit_ids_from_commitish_arguments([commit_range]) + expected_commits = [] + expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines()) + + self.assertEqual(actual_commits, expected_commits) + + def test_apply_git_patch(self): + scm = detect_scm_system(self.git_checkout_path) + # We carefullly pick a diff which does not have a directory addition + # as currently svn-apply will error out when trying to remove directories + # in Git: https://bugs.webkit.org/show_bug.cgi?id=34871 + patch = self._create_patch(_git_diff('HEAD..HEAD^')) + self._setup_webkittools_scripts_symlink(scm) + Checkout(scm).apply_patch(patch) + + def test_apply_git_patch_force(self): + scm = detect_scm_system(self.git_checkout_path) + patch = self._create_patch(_git_diff('HEAD~2..HEAD')) + self._setup_webkittools_scripts_symlink(scm) + self.assertRaises(ScriptError, Checkout(scm).apply_patch, patch, force=True) + + def test_commit_text_parsing(self): + write_into_file_at_path('test_file', 'more test content') + commit_text = self.scm.commit_with_message("another test commit") + self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6') + + self.scm.dryrun = True + write_into_file_at_path('test_file', 'still more test content') + commit_text = self.scm.commit_with_message("yet another test commit") + self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '0') + + def test_commit_with_message_working_copy_only(self): + write_into_file_at_path('test_file_commit1', 'more test content') + run_command(['git', 'add', 'test_file_commit1']) + scm = detect_scm_system(self.git_checkout_path) + commit_text = scm.commit_with_message("yet another test commit") + + self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6') + svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose']) + self.assertTrue(re.search(r'test_file_commit1', svn_log)) + + def _local_commit(self, filename, contents, message): + write_into_file_at_path(filename, contents) + run_command(['git', 'add', filename]) + self.scm.commit_locally_with_message(message) + + def _one_local_commit(self): + self._local_commit('test_file_commit1', 'more test content', 'another test commit') + + def _one_local_commit_plus_working_copy_changes(self): + self._one_local_commit() + write_into_file_at_path('test_file_commit2', 'still more test content') + run_command(['git', 'add', 'test_file_commit2']) + + def _two_local_commits(self): + self._one_local_commit() + self._local_commit('test_file_commit2', 'still more test content', 'yet another test commit') + + def _three_local_commits(self): + self._local_commit('test_file_commit0', 'more test content', 'another test commit') + self._two_local_commits() + + def test_revisions_changing_files_with_local_commit(self): + self._one_local_commit() + self.assertEquals(self.scm.revisions_changing_file('test_file_commit1'), []) + + def test_commit_with_message(self): + self._one_local_commit_plus_working_copy_changes() + scm = detect_scm_system(self.git_checkout_path) + self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit") + commit_text = scm.commit_with_message("yet another test commit", force_squash=True) + + self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6') + svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose']) + self.assertTrue(re.search(r'test_file_commit2', svn_log)) + self.assertTrue(re.search(r'test_file_commit1', svn_log)) + + def test_commit_with_message_git_commit(self): + self._two_local_commits() + + scm = detect_scm_system(self.git_checkout_path) + commit_text = scm.commit_with_message("another test commit", git_commit="HEAD^") + self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6') + + svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose']) + self.assertTrue(re.search(r'test_file_commit1', svn_log)) + self.assertFalse(re.search(r'test_file_commit2', svn_log)) + + def test_commit_with_message_git_commit_range(self): + self._three_local_commits() + + scm = detect_scm_system(self.git_checkout_path) + commit_text = scm.commit_with_message("another test commit", git_commit="HEAD~2..HEAD") + self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6') + + svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose']) + self.assertFalse(re.search(r'test_file_commit0', svn_log)) + self.assertTrue(re.search(r'test_file_commit1', svn_log)) + self.assertTrue(re.search(r'test_file_commit2', svn_log)) + + def test_changed_files_working_copy_only(self): + self._one_local_commit_plus_working_copy_changes() + scm = detect_scm_system(self.git_checkout_path) + commit_text = scm.commit_with_message("another test commit", git_commit="HEAD..") + self.assertFalse(re.search(r'test_file_commit1', svn_log)) + self.assertTrue(re.search(r'test_file_commit2', svn_log)) + + def test_commit_with_message_only_local_commit(self): + self._one_local_commit() + scm = detect_scm_system(self.git_checkout_path) + commit_text = scm.commit_with_message("another test commit") + svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose']) + self.assertTrue(re.search(r'test_file_commit1', svn_log)) + + def test_commit_with_message_multiple_local_commits_and_working_copy(self): + self._two_local_commits() + write_into_file_at_path('test_file_commit1', 'working copy change') + scm = detect_scm_system(self.git_checkout_path) + + self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit") + commit_text = scm.commit_with_message("another test commit", force_squash=True) + + self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6') + svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose']) + self.assertTrue(re.search(r'test_file_commit2', svn_log)) + self.assertTrue(re.search(r'test_file_commit1', svn_log)) + + def test_commit_with_message_git_commit_and_working_copy(self): + self._two_local_commits() + write_into_file_at_path('test_file_commit1', 'working copy change') + scm = detect_scm_system(self.git_checkout_path) + self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", git_commit="HEAD^") + + def test_commit_with_message_multiple_local_commits_always_squash(self): + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + scm._assert_can_squash = lambda working_directory_is_clean: True + commit_text = scm.commit_with_message("yet another test commit") + self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6') + + svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose']) + self.assertTrue(re.search(r'test_file_commit2', svn_log)) + self.assertTrue(re.search(r'test_file_commit1', svn_log)) + + def test_commit_with_message_multiple_local_commits(self): + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit") + commit_text = scm.commit_with_message("yet another test commit", force_squash=True) + + self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6') + + svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose']) + self.assertTrue(re.search(r'test_file_commit2', svn_log)) + self.assertTrue(re.search(r'test_file_commit1', svn_log)) + + def test_commit_with_message_not_synced(self): + run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3']) + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit") + commit_text = scm.commit_with_message("another test commit", force_squash=True) + + self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6') + + svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose']) + self.assertFalse(re.search(r'test_file2', svn_log)) + self.assertTrue(re.search(r'test_file_commit2', svn_log)) + self.assertTrue(re.search(r'test_file_commit1', svn_log)) + + def test_commit_with_message_not_synced_with_conflict(self): + run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3']) + self._local_commit('test_file2', 'asdf', 'asdf commit') + + scm = detect_scm_system(self.git_checkout_path) + # There's a conflict between trunk and the test_file2 modification. + self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", force_squash=True) + + def test_remote_branch_ref(self): + self.assertEqual(self.scm.remote_branch_ref(), 'refs/remotes/trunk') + + def test_reverse_diff(self): + self._shared_test_reverse_diff() + + def test_diff_for_revision(self): + self._shared_test_diff_for_revision() + + def test_svn_apply_git_patch(self): + self._shared_test_svn_apply_git_patch() + + def test_create_patch_local_plus_working_copy(self): + self._one_local_commit_plus_working_copy_changes() + scm = detect_scm_system(self.git_checkout_path) + patch = scm.create_patch() + self.assertTrue(re.search(r'test_file_commit1', patch)) + self.assertTrue(re.search(r'test_file_commit2', patch)) + + def test_create_patch(self): + self._one_local_commit_plus_working_copy_changes() + scm = detect_scm_system(self.git_checkout_path) + patch = scm.create_patch() + self.assertTrue(re.search(r'test_file_commit2', patch)) + self.assertTrue(re.search(r'test_file_commit1', patch)) + + def test_create_patch_with_changed_files(self): + self._one_local_commit_plus_working_copy_changes() + scm = detect_scm_system(self.git_checkout_path) + patch = scm.create_patch(changed_files=['test_file_commit2']) + self.assertTrue(re.search(r'test_file_commit2', patch)) + + def test_create_patch_with_rm_and_changed_files(self): + self._one_local_commit_plus_working_copy_changes() + scm = detect_scm_system(self.git_checkout_path) + os.remove('test_file_commit1') + patch = scm.create_patch() + patch_with_changed_files = scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2']) + self.assertEquals(patch, patch_with_changed_files) + + def test_create_patch_git_commit(self): + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + patch = scm.create_patch(git_commit="HEAD^") + self.assertTrue(re.search(r'test_file_commit1', patch)) + self.assertFalse(re.search(r'test_file_commit2', patch)) + + def test_create_patch_git_commit_range(self): + self._three_local_commits() + scm = detect_scm_system(self.git_checkout_path) + patch = scm.create_patch(git_commit="HEAD~2..HEAD") + self.assertFalse(re.search(r'test_file_commit0', patch)) + self.assertTrue(re.search(r'test_file_commit2', patch)) + self.assertTrue(re.search(r'test_file_commit1', patch)) + + def test_create_patch_working_copy_only(self): + self._one_local_commit_plus_working_copy_changes() + scm = detect_scm_system(self.git_checkout_path) + patch = scm.create_patch(git_commit="HEAD..") + self.assertFalse(re.search(r'test_file_commit1', patch)) + self.assertTrue(re.search(r'test_file_commit2', patch)) + + def test_create_patch_multiple_local_commits(self): + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + patch = scm.create_patch() + self.assertTrue(re.search(r'test_file_commit2', patch)) + self.assertTrue(re.search(r'test_file_commit1', patch)) + + def test_create_patch_not_synced(self): + run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3']) + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + patch = scm.create_patch() + self.assertFalse(re.search(r'test_file2', patch)) + self.assertTrue(re.search(r'test_file_commit2', patch)) + self.assertTrue(re.search(r'test_file_commit1', patch)) + + def test_create_binary_patch(self): + # Create a git binary patch and check the contents. + scm = detect_scm_system(self.git_checkout_path) + test_file_name = 'binary_file' + test_file_path = os.path.join(self.git_checkout_path, test_file_name) + file_contents = ''.join(map(chr, range(256))) + write_into_file_at_path(test_file_path, file_contents, encoding=None) + run_command(['git', 'add', test_file_name]) + patch = scm.create_patch() + self.assertTrue(re.search(r'\nliteral 0\n', patch)) + self.assertTrue(re.search(r'\nliteral 256\n', patch)) + + # Check if we can apply the created patch. + run_command(['git', 'rm', '-f', test_file_name]) + self._setup_webkittools_scripts_symlink(scm) + self.checkout.apply_patch(self._create_patch(patch)) + self.assertEqual(file_contents, read_from_path(test_file_path, encoding=None)) + + # Check if we can create a patch from a local commit. + write_into_file_at_path(test_file_path, file_contents, encoding=None) + run_command(['git', 'add', test_file_name]) + run_command(['git', 'commit', '-m', 'binary diff']) + patch_from_local_commit = scm.create_patch('HEAD') + self.assertTrue(re.search(r'\nliteral 0\n', patch_from_local_commit)) + self.assertTrue(re.search(r'\nliteral 256\n', patch_from_local_commit)) + + def test_changed_files_local_plus_working_copy(self): + self._one_local_commit_plus_working_copy_changes() + scm = detect_scm_system(self.git_checkout_path) + files = scm.changed_files() + self.assertTrue('test_file_commit1' in files) + self.assertTrue('test_file_commit2' in files) + + def test_changed_files_git_commit(self): + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + files = scm.changed_files(git_commit="HEAD^") + self.assertTrue('test_file_commit1' in files) + self.assertFalse('test_file_commit2' in files) + + def test_changed_files_git_commit_range(self): + self._three_local_commits() + scm = detect_scm_system(self.git_checkout_path) + files = scm.changed_files(git_commit="HEAD~2..HEAD") + self.assertTrue('test_file_commit0' not in files) + self.assertTrue('test_file_commit1' in files) + self.assertTrue('test_file_commit2' in files) + + def test_changed_files_working_copy_only(self): + self._one_local_commit_plus_working_copy_changes() + scm = detect_scm_system(self.git_checkout_path) + files = scm.changed_files(git_commit="HEAD..") + self.assertFalse('test_file_commit1' in files) + self.assertTrue('test_file_commit2' in files) + + def test_changed_files_multiple_local_commits(self): + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + files = scm.changed_files() + self.assertTrue('test_file_commit2' in files) + self.assertTrue('test_file_commit1' in files) + + def test_changed_files_not_synced(self): + run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3']) + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + files = scm.changed_files() + self.assertFalse('test_file2' in files) + self.assertTrue('test_file_commit2' in files) + self.assertTrue('test_file_commit1' in files) + + def test_changed_files_not_synced(self): + run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3']) + self._two_local_commits() + scm = detect_scm_system(self.git_checkout_path) + files = scm.changed_files() + self.assertFalse('test_file2' in files) + self.assertTrue('test_file_commit2' in files) + self.assertTrue('test_file_commit1' in files) + + def test_changed_files(self): + self._shared_test_changed_files() + + def test_changed_files_for_revision(self): + self._shared_test_changed_files_for_revision() + + def test_contents_at_revision(self): + self._shared_test_contents_at_revision() + + def test_revisions_changing_file(self): + self._shared_test_revisions_changing_file() + + def test_added_files(self): + self._shared_test_added_files() + + def test_committer_email_for_revision(self): + self._shared_test_committer_email_for_revision() + + def test_add_recursively(self): + self._shared_test_add_recursively() + + def test_delete(self): + self._two_local_commits() + self.scm.delete('test_file_commit1') + self.assertTrue("test_file_commit1" in self.scm.deleted_files()) + + def test_to_object_name(self): + relpath = 'test_file_commit1' + fullpath = os.path.join(self.git_checkout_path, relpath) + self._two_local_commits() + self.assertEqual(relpath, self.scm.to_object_name(fullpath)) + + def test_show_head(self): + self._two_local_commits() + self.assertEqual("more test content", self.scm.show_head('test_file_commit1')) + + def test_show_head_binary(self): + self._two_local_commits() + data = "\244" + write_into_file_at_path("binary_file", data, encoding=None) + self.scm.add("binary_file") + self.scm.commit_locally_with_message("a test commit") + self.assertEqual(data, self.scm.show_head('binary_file')) + + def test_diff_for_file(self): + self._two_local_commits() + write_into_file_at_path('test_file_commit1', "Updated", encoding=None) + + diff = self.scm.diff_for_file('test_file_commit1') + cached_diff = self.scm.diff_for_file('test_file_commit1') + self.assertTrue("+Updated" in diff) + self.assertTrue("-more test content" in diff) + + self.scm.add('test_file_commit1') + + cached_diff = self.scm.diff_for_file('test_file_commit1') + self.assertTrue("+Updated" in cached_diff) + self.assertTrue("-more test content" in cached_diff) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/config/__init__.py b/Tools/Scripts/webkitpy/common/config/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/common/config/build.py b/Tools/Scripts/webkitpy/common/config/build.py new file mode 100644 index 0000000..2a432ce --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/build.py @@ -0,0 +1,136 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Functions relating to building WebKit""" + +import re + + +def _should_file_trigger_build(target_platform, file): + # The directories and patterns lists below map directory names or + # regexp patterns to the bot platforms for which they should trigger a + # build. Mapping to the empty list means that no builds should be + # triggered on any platforms. Earlier directories/patterns take + # precendence over later ones. + + # FIXME: The patterns below have only been verified to be correct on + # Windows. We should implement this for other platforms and start using + # it for their bots. Someone familiar with each platform will have to + # figure out what the right set of directories/patterns is for that + # platform. + assert(target_platform == "win") + + directories = [ + # Directories that shouldn't trigger builds on any bots. + ("PageLoadTests", []), + ("WebCore/manual-tests", []), + ("Examples", []), + ("Websites", []), + ("android", []), + ("brew", []), + ("efl", []), + ("haiku", []), + ("iphone", []), + ("opengl", []), + ("opentype", []), + ("openvg", []), + ("wx", []), + ("wince", []), + + # Directories that should trigger builds on only some bots. + ("JavaScriptGlue", ["mac"]), + ("LayoutTests/platform/mac", ["mac", "win"]), + ("LayoutTests/platform/mac-snowleopard", ["mac-snowleopard", "win"]), + ("WebCore/image-decoders", ["chromium"]), + ("cairo", ["gtk", "wincairo"]), + ("cf", ["chromium-mac", "mac", "qt", "win"]), + ("chromium", ["chromium"]), + ("cocoa", ["chromium-mac", "mac"]), + ("curl", ["gtk", "wincairo"]), + ("gobject", ["gtk"]), + ("gpu", ["chromium", "mac"]), + ("gstreamer", ["gtk"]), + ("gtk", ["gtk"]), + ("mac", ["chromium-mac", "mac"]), + ("mac-leopard", ["mac-leopard"]), + ("mac-snowleopard", ["mac-snowleopard"]), + ("mac-wk2", ["mac-snowleopard", "win"]), + ("objc", ["mac"]), + ("qt", ["qt"]), + ("skia", ["chromium"]), + ("soup", ["gtk"]), + ("v8", ["chromium"]), + ("win", ["chromium-win", "win"]), + ] + patterns = [ + # Patterns that shouldn't trigger builds on any bots. + (r"(?:^|/)Makefile$", []), + (r"/ARM", []), + (r"/CMake.*", []), + (r"/ChangeLog.*$", []), + (r"/LICENSE[^/]+$", []), + (r"ARM(?:v7)?\.(?:cpp|h)$", []), + (r"MIPS\.(?:cpp|h)$", []), + (r"WinCE\.(?:cpp|h|mm)$", []), + (r"\.(?:bkl|mk)$", []), + + # Patterns that should trigger builds on only some bots. + (r"/GNUmakefile\.am$", ["gtk"]), + (r"/\w+Chromium\w*\.(?:cpp|h|mm)$", ["chromium"]), + (r"Mac\.(?:cpp|h|mm)$", ["mac"]), + (r"\.exp$", ["mac"]), + (r"\.gypi?", ["chromium"]), + (r"\.order$", ["mac"]), + (r"\.pr[io]$", ["qt"]), + (r"\.xcconfig$", ["mac"]), + (r"\.xcodeproj/", ["mac"]), + ] + + base_platform = target_platform.split("-")[0] + + # See if the file is in one of the known directories. + for directory, platforms in directories: + if re.search(r"(?:^|/)%s/" % directory, file): + return target_platform in platforms or base_platform in platforms + + # See if the file matches a known pattern. + for pattern, platforms in patterns: + if re.search(pattern, file): + return target_platform in platforms or base_platform in platforms + + # See if the file is a platform-specific test result. + match = re.match("LayoutTests/platform/(?P<platform>[^/]+)/", file) + if match: + # See if the file is a test result for this platform, our base + # platform, or one of our sub-platforms. + return match.group("platform") in (target_platform, base_platform) or match.group("platform").startswith("%s-" % target_platform) + + # The file isn't one we know about specifically, so we should assume we + # have to build. + return True + + +def should_build(target_platform, changed_files): + """Returns true if the changed files affect the given platform, and + thus a build should be performed. target_platform should be one of the + platforms used in the build.webkit.org master's config.json file.""" + return any(_should_file_trigger_build(target_platform, file) for file in changed_files) diff --git a/Tools/Scripts/webkitpy/common/config/build_unittest.py b/Tools/Scripts/webkitpy/common/config/build_unittest.py new file mode 100644 index 0000000..d833464 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/build_unittest.py @@ -0,0 +1,64 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.config import build + + +class ShouldBuildTest(unittest.TestCase): + _should_build_tests = [ + (["Websites/bugs.webkit.org/foo", "WebCore/bar"], ["*"]), + (["Websites/bugs.webkit.org/foo"], []), + (["JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-leopard", "mac-snowleopard"]), + (["JavaScriptGlue/foo", "WebCore/bar"], ["*"]), + (["JavaScriptGlue/foo"], ["mac-leopard", "mac-snowleopard"]), + (["LayoutTests/foo"], ["*"]), + (["LayoutTests/platform/chromium-linux/foo"], ["chromium-linux"]), + (["LayoutTests/platform/chromium-win/fast/compact/001-expected.txt"], ["chromium-win"]), + (["LayoutTests/platform/mac-leopard/foo"], ["mac-leopard"]), + (["LayoutTests/platform/mac-snowleopard/foo"], ["mac-snowleopard", "win"]), + (["LayoutTests/platform/mac-wk2/Skipped"], ["mac-snowleopard", "win"]), + (["LayoutTests/platform/mac/foo"], ["mac-leopard", "mac-snowleopard", "win"]), + (["LayoutTests/platform/win-xp/foo"], ["win"]), + (["LayoutTests/platform/win-wk2/foo"], ["win"]), + (["LayoutTests/platform/win/foo"], ["win"]), + (["WebCore/mac/foo"], ["chromium-mac", "mac-leopard", "mac-snowleopard"]), + (["WebCore/win/foo"], ["chromium-win", "win"]), + (["WebCore/platform/graphics/gpu/foo"], ["mac-leopard", "mac-snowleopard"]), + (["WebCore/platform/wx/wxcode/win/foo"], []), + (["WebCore/rendering/RenderThemeMac.mm", "WebCore/rendering/RenderThemeMac.h"], ["mac-leopard", "mac-snowleopard"]), + (["WebCore/rendering/RenderThemeChromiumLinux.h"], ["chromium-linux"]), + (["WebCore/rendering/RenderThemeWinCE.h"], []), + ] + + def test_should_build(self): + for files, platforms in self._should_build_tests: + # FIXME: We should test more platforms here once + # build._should_file_trigger_build is implemented for them. + for platform in ["win"]: + should_build = platform in platforms or "*" in platforms + self.assertEqual(build.should_build(platform, files), should_build, "%s should%s have built but did%s (files: %s)" % (platform, "" if should_build else "n't", "n't" if should_build else "", str(files))) + + +if __name__ == "__main__": + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/config/committers.py b/Tools/Scripts/webkitpy/common/config/committers.py new file mode 100644 index 0000000..7c5bf8b --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/committers.py @@ -0,0 +1,335 @@ +# Copyright (c) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# WebKit's Python module for committer and reviewer validation + + +class Committer: + + def __init__(self, name, email_or_emails, irc_nickname=None): + self.full_name = name + if isinstance(email_or_emails, str): + self.emails = [email_or_emails] + else: + self.emails = email_or_emails + self.irc_nickname = irc_nickname + self.can_review = False + + def bugzilla_email(self): + # FIXME: We're assuming the first email is a valid bugzilla email, + # which might not be right. + return self.emails[0] + + def __str__(self): + return '"%s" <%s>' % (self.full_name, self.emails[0]) + + +class Reviewer(Committer): + + def __init__(self, name, email_or_emails, irc_nickname=None): + Committer.__init__(self, name, email_or_emails, irc_nickname) + self.can_review = True + + +# This is intended as a canonical, machine-readable list of all non-reviewer +# committers for WebKit. If your name is missing here and you are a committer, +# please add it. No review needed. All reviewers are committers, so this list +# is only of committers who are not reviewers. + + +committers_unable_to_review = [ + Committer("Aaron Boodman", "aa@chromium.org", "aboodman"), + Committer("Abhishek Arya", "inferno@chromium.org", "inferno-sec"), + Committer("Adam Langley", "agl@chromium.org", "agl"), + Committer("Adrienne Walker", ["enne@google.com", "enne@chromium.org"], "enne"), + Committer("Albert J. Wong", "ajwong@chromium.org"), + Committer("Alejandro G. Castro", ["alex@igalia.com", "alex@webkit.org"]), + Committer("Alexander Kellett", ["lypanov@mac.com", "a-lists001@lypanov.net", "lypanov@kde.org"], "lypanov"), + Committer("Alexander Pavlov", "apavlov@chromium.org", "apavlov"), + Committer("Andre Boule", "aboule@apple.com"), + Committer("Andrei Popescu", "andreip@google.com", "andreip"), + Committer("Andrew Wellington", ["andrew@webkit.org", "proton@wiretapped.net"], "proton"), + Committer("Andrew Scherkus", "scherkus@chromium.org", "scherkus"), + Committer("Andrey Kosyakov", "caseq@chromium.org", "caseq"), + Committer("Andras Becsi", ["abecsi@webkit.org", "abecsi@inf.u-szeged.hu"], "bbandix"), + Committer("Andy Estes", "aestes@apple.com", "estes"), + Committer("Anthony Ricaud", "rik@webkit.org", "rik"), + Committer("Anton Muhin", "antonm@chromium.org", "antonm"), + Committer("Balazs Kelemen", "kbalazs@webkit.org", "kbalazs"), + Committer("Ben Murdoch", "benm@google.com", "benm"), + Committer("Benjamin C Meyer", ["ben@meyerhome.net", "ben@webkit.org"], "icefox"), + Committer("Benjamin Otte", ["otte@gnome.org", "otte@webkit.org"], "otte"), + Committer("Benjamin Poulain", ["benjamin.poulain@nokia.com", "ikipou@gmail.com"]), + Committer("Brent Fulgham", "bfulgham@webkit.org", "bfulgham"), + Committer("Brett Wilson", "brettw@chromium.org", "brettx"), + Committer("Brian Weinstein", "bweinstein@apple.com", "bweinstein"), + Committer("Cameron McCormack", "cam@webkit.org", "heycam"), + Committer("Carol Szabo", "carol.szabo@nokia.com"), + Committer("Chang Shu", "Chang.Shu@nokia.com"), + Committer("Chris Evans", "cevans@google.com"), + Committer("Chris Petersen", "cpetersen@apple.com", "cpetersen"), + Committer("Chris Rogers", "crogers@google.com", "crogers"), + Committer("Christian Dywan", ["christian@twotoasts.de", "christian@webkit.org"]), + Committer("Collin Jackson", "collinj@webkit.org"), + Committer("David Smith", ["catfish.man@gmail.com", "dsmith@webkit.org"], "catfishman"), + Committer("Dean Jackson", "dino@apple.com", "dino"), + Committer("Diego Gonzalez", ["diegohcg@webkit.org", "diego.gonzalez@openbossa.org"], "diegohcg"), + Committer("Dirk Pranke", "dpranke@chromium.org"), + Committer("Drew Wilson", "atwilson@chromium.org", "atwilson"), + Committer("Eli Fidler", "eli@staikos.net", "QBin"), + Committer("Enrica Casucci", "enrica@apple.com"), + Committer("Erik Arvidsson", "arv@chromium.org", "arv"), + Committer("Eric Roman", "eroman@chromium.org", "eroman"), + Committer("Evan Martin", "evan@chromium.org", "evmar"), + Committer("Evan Stade", "estade@chromium.org", "estade"), + Committer("Fady Samuel", "fsamuel@chromium.org", "fsamuel"), + Committer("Feng Qian", "feng@chromium.org"), + Committer("Fumitoshi Ukai", "ukai@chromium.org", "ukai"), + Committer("Gabor Loki", "loki@webkit.org", "loki04"), + Committer("Girish Ramakrishnan", ["girish@forwardbias.in", "ramakrishnan.girish@gmail.com"]), + Committer("Graham Dennis", ["Graham.Dennis@gmail.com", "gdennis@webkit.org"]), + Committer("Greg Bolsinga", "bolsinga@apple.com"), + Committer("Gyuyoung Kim", ["gyuyoung.kim@samsung.com", "gyuyoung@gmail.com", "gyuyoung@webkit.org"], "gyuyoung"), + Committer("Hans Wennborg", "hans@chromium.org", "hwennborg"), + Committer("Hayato Ito", "hayato@chromium.org", "hayato"), + Committer("Hin-Chung Lam", ["hclam@google.com", "hclam@chromium.org"]), + Committer("Ilya Tikhonovsky", "loislo@chromium.org", "loislo"), + Committer("Jakob Petsovits", ["jpetsovits@rim.com", "jpetso@gmx.at"], "jpetso"), + Committer("Jakub Wieczorek", "jwieczorek@webkit.org", "fawek"), + Committer("James Hawkins", ["jhawkins@chromium.org", "jhawkins@google.com"], "jhawkins"), + Committer("Jay Civelli", "jcivelli@chromium.org", "jcivelli"), + Committer("Jens Alfke", ["snej@chromium.org", "jens@apple.com"]), + Committer("Jer Noble", "jer.noble@apple.com", "jernoble"), + Committer("Jeremy Moskovich", ["playmobil@google.com", "jeremy@chromium.org"], "jeremymos"), + Committer("Jessie Berlin", ["jberlin@webkit.org", "jberlin@apple.com"]), + Committer("Jesus Sanchez-Palencia", ["jesus@webkit.org", "jesus.palencia@openbossa.org"], "jeez_"), + Committer("Jocelyn Turcotte", "jocelyn.turcotte@nokia.com", "jturcotte"), + Committer("Jochen Eisinger", "jochen@chromium.org", "jochen__"), + Committer("John Abd-El-Malek", "jam@chromium.org", "jam"), + Committer("John Gregg", ["johnnyg@google.com", "johnnyg@chromium.org"], "johnnyg"), + Committer("Johnny Ding", ["jnd@chromium.org", "johnnyding.webkit@gmail.com"], "johnnyding"), + Committer("Joost de Valk", ["joost@webkit.org", "webkit-dev@joostdevalk.nl"], "Altha"), + Committer("Julie Parent", ["jparent@google.com", "jparent@chromium.org"], "jparent"), + Committer("Julien Chaffraix", ["jchaffraix@webkit.org", "julien.chaffraix@gmail.com"]), + Committer("Jungshik Shin", "jshin@chromium.org"), + Committer("Justin Schuh", "jschuh@chromium.org", "jschuh"), + Committer("Keishi Hattori", "keishi@webkit.org", "keishi"), + Committer("Kelly Norton", "knorton@google.com"), + Committer("Kent Hansen", "kent.hansen@nokia.com", "khansen"), + Committer("Kimmo Kinnunen", ["kimmo.t.kinnunen@nokia.com", "kimmok@iki.fi", "ktkinnun@webkit.org"], "kimmok"), + Committer("Kinuko Yasuda", "kinuko@chromium.org", "kinuko"), + Committer("Krzysztof Kowalczyk", "kkowalczyk@gmail.com"), + Committer("Kwang Yul Seo", ["kwangyul.seo@gmail.com", "skyul@company100.net", "kseo@webkit.org"], "kwangseo"), + Committer("Leandro Pereira", ["leandro@profusion.mobi", "leandro@webkit.org"], "acidx"), + Committer("Levi Weintraub", "lweintraub@apple.com"), + Committer("Lucas De Marchi", ["lucas.demarchi@profusion.mobi", "demarchi@webkit.org"], "demarchi"), + Committer("Luiz Agostini", ["luiz@webkit.org", "luiz.agostini@openbossa.org"], "lca"), + Committer("Mads Ager", "ager@chromium.org"), + Committer("Marcus Voltis Bulach", "bulach@chromium.org"), + Committer("Mario Sanchez Prada", ["msanchez@igalia.com", "mario@webkit.org"], "msanchez"), + Committer("Matt Delaney", "mdelaney@apple.com"), + Committer("Matt Lilek", ["webkit@mattlilek.com", "pewtermoose@webkit.org"]), + Committer("Matt Perry", "mpcomplete@chromium.org"), + Committer("Maxime Britto", ["maxime.britto@gmail.com", "britto@apple.com"]), + Committer("Maxime Simon", ["simon.maxime@gmail.com", "maxime.simon@webkit.org"], "maxime.simon"), + Committer("Michael Nordman", "michaeln@google.com", "michaeln"), + Committer("Michael Saboff", "msaboff@apple.com"), + Committer("Michelangelo De Simone", "michelangelo@webkit.org", "michelangelo"), + Committer("Mihai Parparita", "mihaip@chromium.org", "mihaip"), + Committer("Mike Belshe", ["mbelshe@chromium.org", "mike@belshe.com"]), + Committer("Mike Fenton", ["mifenton@rim.com", "mike.fenton@torchmobile.com"], "mfenton"), + Committer("Mike Thole", ["mthole@mikethole.com", "mthole@apple.com"]), + Committer("Mikhail Naganov", "mnaganov@chromium.org"), + Committer("MORITA Hajime", "morrita@google.com", "morrita"), + Committer("Nico Weber", ["thakis@chromium.org", "thakis@google.com"], "thakis"), + Committer("Noam Rosenthal", "noam.rosenthal@nokia.com", "noamr"), + Committer("Pam Greene", "pam@chromium.org", "pamg"), + Committer("Patrick Gansterer", ["paroga@paroga.com", "paroga@webkit.org"], "paroga"), + Committer("Pavel Podivilov", "podivilov@chromium.org", "podivilov"), + Committer("Peter Kasting", ["pkasting@google.com", "pkasting@chromium.org"], "pkasting"), + Committer("Philippe Normand", ["pnormand@igalia.com", "philn@webkit.org"], "philn-tp"), + Committer("Pierre d'Herbemont", ["pdherbemont@free.fr", "pdherbemont@apple.com"], "pdherbemont"), + Committer("Pierre-Olivier Latour", "pol@apple.com", "pol"), + Committer("Renata Hodovan", "reni@webkit.org", "reni"), + Committer("Robert Hogan", ["robert@webkit.org", "robert@roberthogan.net", "lists@roberthogan.net"], "mwenge"), + Committer("Roland Steiner", "rolandsteiner@chromium.org"), + Committer("Satish Sampath", "satish@chromium.org"), + Committer("Scott Violet", "sky@chromium.org", "sky"), + Committer("Sergio Villar Senin", ["svillar@igalia.com", "sergio@webkit.org"], "svillar"), + Committer("Stephen White", "senorblanco@chromium.org", "senorblanco"), + Committer("Tony Gentilcore", "tonyg@chromium.org", "tonyg-cr"), + Committer("Trey Matteson", "trey@usa.net", "trey"), + Committer("Tristan O'Tierney", ["tristan@otierney.net", "tristan@apple.com"]), + Committer("Vangelis Kokkevis", "vangelis@chromium.org", "vangelis"), + Committer("Victor Wang", "victorw@chromium.org", "victorw"), + Committer("Vitaly Repeshko", "vitalyr@chromium.org"), + Committer("William Siegrist", "wsiegrist@apple.com", "wms"), + Committer("Xiaomei Ji", "xji@chromium.org", "xji"), + Committer("Yael Aharon", "yael.aharon@nokia.com"), + Committer("Yaar Schnitman", ["yaar@chromium.org", "yaar@google.com"]), + Committer("Yong Li", ["yong.li.webkit@gmail.com", "yong.li@torchmobile.com"], "yong"), + Committer("Yongjun Zhang", "yongjun.zhang@nokia.com"), + Committer("Yuta Kitamura", "yutak@chromium.org", "yutak"), + Committer("Yuzo Fujishima", "yuzo@google.com", "yuzo"), + Committer("Zhenyao Mo", "zmo@google.com", "zhenyao"), + Committer("Zoltan Herczeg", "zherczeg@webkit.org", "zherczeg"), + Committer("Zoltan Horvath", ["zoltan@webkit.org", "hzoltan@inf.u-szeged.hu", "horvath.zoltan.6@stud.u-szeged.hu"], "zoltan"), +] + + +# This is intended as a canonical, machine-readable list of all reviewers for +# WebKit. If your name is missing here and you are a reviewer, please add it. +# No review needed. + + +reviewers_list = [ + Reviewer("Ada Chan", "adachan@apple.com", "chanada"), + Reviewer("Adam Barth", "abarth@webkit.org", "abarth"), + Reviewer("Adam Roben", "aroben@apple.com", "aroben"), + Reviewer("Adam Treat", ["treat@kde.org", "treat@webkit.org", "atreat@rim.com"], "manyoso"), + Reviewer("Adele Peterson", "adele@apple.com", "adele"), + Reviewer("Alexey Proskuryakov", ["ap@webkit.org", "ap@apple.com"], "ap"), + Reviewer("Alice Liu", "alice.liu@apple.com", "aliu"), + Reviewer("Alp Toker", ["alp@nuanti.com", "alp@atoker.com", "alp@webkit.org"], "alp"), + Reviewer("Anders Carlsson", ["andersca@apple.com", "acarlsson@apple.com"], "andersca"), + Reviewer("Andreas Kling", ["kling@webkit.org", "andreas.kling@nokia.com"], "kling"), + Reviewer("Antonio Gomes", ["tonikitoo@webkit.org", "agomes@rim.com"], "tonikitoo"), + Reviewer("Antti Koivisto", ["koivisto@iki.fi", "antti@apple.com", "antti.j.koivisto@nokia.com"], "anttik"), + Reviewer("Ariya Hidayat", ["ariya.hidayat@gmail.com", "ariya@sencha.com", "ariya@webkit.org"], "ariya"), + Reviewer("Beth Dakin", "bdakin@apple.com", "dethbakin"), + Reviewer("Brady Eidson", "beidson@apple.com", "bradee-oh"), + Reviewer("Cameron Zwarich", ["zwarich@apple.com", "cwzwarich@apple.com", "cwzwarich@webkit.org"]), + Reviewer("Chris Blumenberg", "cblu@apple.com", "cblu"), + Reviewer("Chris Marrin", "cmarrin@apple.com", "cmarrin"), + Reviewer("Chris Fleizach", "cfleizach@apple.com", "cfleizach"), + Reviewer("Chris Jerdonek", "cjerdonek@webkit.org", "cjerdonek"), + Reviewer(u"Csaba Osztrogon\u00e1c", "ossy@webkit.org", "ossy"), + Reviewer("Dan Bernstein", ["mitz@webkit.org", "mitz@apple.com"], "mitzpettel"), + Reviewer("Daniel Bates", "dbates@webkit.org", "dydz"), + Reviewer("Darin Adler", "darin@apple.com", "darin"), + Reviewer("Darin Fisher", ["fishd@chromium.org", "darin@chromium.org"], "fishd"), + Reviewer("David Harrison", "harrison@apple.com", "harrison"), + Reviewer("David Hyatt", "hyatt@apple.com", "hyatt"), + Reviewer("David Kilzer", ["ddkilzer@webkit.org", "ddkilzer@apple.com"], "ddkilzer"), + Reviewer("David Levin", "levin@chromium.org", "dave_levin"), + Reviewer("Dimitri Glazkov", "dglazkov@chromium.org", "dglazkov"), + Reviewer("Dirk Schulze", "krit@webkit.org", "krit"), + Reviewer("Dmitry Titov", "dimich@chromium.org", "dimich"), + Reviewer("Don Melton", "gramps@apple.com", "gramps"), + Reviewer("Dumitru Daniliuc", "dumi@chromium.org", "dumi"), + Reviewer("Eric Carlson", "eric.carlson@apple.com"), + Reviewer("Eric Seidel", "eric@webkit.org", "eseidel"), + Reviewer("Gavin Barraclough", "barraclough@apple.com", "gbarra"), + Reviewer("Geoffrey Garen", "ggaren@apple.com", "ggaren"), + Reviewer("George Staikos", ["staikos@kde.org", "staikos@webkit.org"]), + Reviewer("Gustavo Noronha Silva", ["gns@gnome.org", "kov@webkit.org", "gustavo.noronha@collabora.co.uk"], "kov"), + Reviewer("Holger Freyther", ["zecke@selfish.org", "zecke@webkit.org"], "zecke"), + Reviewer("James Robinson", ["jamesr@chromium.org", "jamesr@google.com"], "jamesr"), + Reviewer("Jan Alonzo", ["jmalonzo@gmail.com", "jmalonzo@webkit.org"], "janm"), + Reviewer("Jeremy Orlow", "jorlow@chromium.org", "jorlow"), + Reviewer("Jian Li", "jianli@chromium.org", "jianli"), + Reviewer("John Sullivan", "sullivan@apple.com", "sullivan"), + Reviewer("Jon Honeycutt", "jhoneycutt@apple.com", "jhoneycutt"), + Reviewer("Joseph Pecoraro", ["joepeck@webkit.org", "pecoraro@apple.com"], "JoePeck"), + Reviewer("Justin Garcia", "justin.garcia@apple.com", "justing"), + Reviewer("Ken Kocienda", "kocienda@apple.com"), + Reviewer("Kenneth Rohde Christiansen", ["kenneth@webkit.org", "kenneth.christiansen@openbossa.org", "kenneth.christiansen@gmail.com"], "kenne"), + Reviewer("Kenneth Russell", "kbr@google.com", "kbr_google"), + Reviewer("Kent Tamura", "tkent@chromium.org", "tkent"), + Reviewer("Kevin Decker", "kdecker@apple.com", "superkevin"), + Reviewer("Kevin McCullough", "kmccullough@apple.com", "maculloch"), + Reviewer("Kevin Ollivier", ["kevino@theolliviers.com", "kevino@webkit.org"], "kollivier"), + Reviewer("Lars Knoll", ["lars@trolltech.com", "lars@kde.org", "lars.knoll@nokia.com"], "lars"), + Reviewer("Laszlo Gombos", "laszlo.1.gombos@nokia.com", "lgombos"), + Reviewer("Maciej Stachowiak", "mjs@apple.com", "othermaciej"), + Reviewer("Mark Rowe", "mrowe@apple.com", "bdash"), + Reviewer("Martin Robinson", ["mrobinson@webkit.org", "mrobinson@igalia.com", "martin.james.robinson@gmail.com"], "mrobinson"), + Reviewer("Nate Chapin", "japhet@chromium.org", "japhet"), + Reviewer("Nikolas Zimmermann", ["zimmermann@kde.org", "zimmermann@physik.rwth-aachen.de", "zimmermann@webkit.org"], "wildfox"), + Reviewer("Ojan Vafai", "ojan@chromium.org", "ojan"), + Reviewer("Oliver Hunt", "oliver@apple.com", "olliej"), + Reviewer("Pavel Feldman", "pfeldman@chromium.org", "pfeldman"), + Reviewer("Richard Williamson", "rjw@apple.com", "rjw"), + Reviewer("Rob Buis", ["rwlbuis@gmail.com", "rwlbuis@webkit.org"], "rwlbuis"), + Reviewer("Ryosuke Niwa", "rniwa@webkit.org", "rniwa"), + Reviewer("Sam Weinig", ["sam@webkit.org", "weinig@apple.com"], "weinig"), + Reviewer("Shinichiro Hamaji", "hamaji@chromium.org", "hamaji"), + Reviewer("Simon Fraser", "simon.fraser@apple.com", "smfr"), + Reviewer("Simon Hausmann", ["hausmann@webkit.org", "hausmann@kde.org", "simon.hausmann@nokia.com"], "tronical"), + Reviewer("Stephanie Lewis", "slewis@apple.com", "sundiamonde"), + Reviewer("Steve Block", "steveblock@google.com", "steveblock"), + Reviewer("Steve Falkenburg", "sfalken@apple.com", "sfalken"), + Reviewer("Tim Omernick", "timo@apple.com"), + Reviewer("Timothy Hatcher", ["timothy@apple.com", "timothy@hatcher.name"], "xenon"), + Reviewer("Tony Chang", "tony@chromium.org", "tony^work"), + Reviewer(u"Tor Arne Vestb\u00f8", ["vestbo@webkit.org", "tor.arne.vestbo@nokia.com"], "torarne"), + Reviewer("Vicki Murley", "vicki@apple.com"), + Reviewer("Xan Lopez", ["xan.lopez@gmail.com", "xan@gnome.org", "xan@webkit.org"], "xan"), + Reviewer("Yury Semikhatsky", "yurys@chromium.org", "yurys"), + Reviewer("Zack Rusin", "zack@kde.org", "zackr"), +] + + +class CommitterList: + + # Committers and reviewers are passed in to allow easy testing + + def __init__(self, + committers=committers_unable_to_review, + reviewers=reviewers_list): + self._committers = committers + reviewers + self._reviewers = reviewers + self._committers_by_email = {} + + def committers(self): + return self._committers + + def reviewers(self): + return self._reviewers + + def _email_to_committer_map(self): + if not len(self._committers_by_email): + for committer in self._committers: + for email in committer.emails: + self._committers_by_email[email] = committer + return self._committers_by_email + + def committer_by_name(self, name): + # This could be made into a hash lookup if callers need it to be fast. + for committer in self.committers(): + if committer.full_name == name: + return committer + + def committer_by_email(self, email): + return self._email_to_committer_map().get(email) + + def reviewer_by_email(self, email): + committer = self.committer_by_email(email) + if committer and not committer.can_review: + return None + return committer diff --git a/Tools/Scripts/webkitpy/common/config/committers_unittest.py b/Tools/Scripts/webkitpy/common/config/committers_unittest.py new file mode 100644 index 0000000..068c0ee --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/committers_unittest.py @@ -0,0 +1,72 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest +from webkitpy.common.config.committers import CommitterList, Committer, Reviewer + +class CommittersTest(unittest.TestCase): + + def test_committer_lookup(self): + committer = Committer('Test One', 'one@test.com', 'one') + reviewer = Reviewer('Test Two', ['two@test.com', 'two@rad.com', 'so_two@gmail.com']) + committer_list = CommitterList(committers=[committer], reviewers=[reviewer]) + + # Test valid committer and reviewer lookup + self.assertEqual(committer_list.committer_by_email('one@test.com'), committer) + self.assertEqual(committer_list.reviewer_by_email('two@test.com'), reviewer) + self.assertEqual(committer_list.committer_by_email('two@test.com'), reviewer) + self.assertEqual(committer_list.committer_by_email('two@rad.com'), reviewer) + self.assertEqual(committer_list.reviewer_by_email('so_two@gmail.com'), reviewer) + + # Test valid committer and reviewer lookup + self.assertEqual(committer_list.committer_by_name("Test One"), committer) + self.assertEqual(committer_list.committer_by_name("Test Two"), reviewer) + self.assertEqual(committer_list.committer_by_name("Test Three"), None) + + # Test that the first email is assumed to be the Bugzilla email address (for now) + self.assertEqual(committer_list.committer_by_email('two@rad.com').bugzilla_email(), 'two@test.com') + + # Test that a known committer is not returned during reviewer lookup + self.assertEqual(committer_list.reviewer_by_email('one@test.com'), None) + + # Test that unknown email address fail both committer and reviewer lookup + self.assertEqual(committer_list.committer_by_email('bar@bar.com'), None) + self.assertEqual(committer_list.reviewer_by_email('bar@bar.com'), None) + + # Test that emails returns a list. + self.assertEqual(committer.emails, ['one@test.com']) + + self.assertEqual(committer.irc_nickname, 'one') + + # Test that committers returns committers and reviewers and reviewers() just reviewers. + self.assertEqual(committer_list.committers(), [committer, reviewer]) + self.assertEqual(committer_list.reviewers(), [reviewer]) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/config/committervalidator.py b/Tools/Scripts/webkitpy/common/config/committervalidator.py new file mode 100644 index 0000000..9b1bbea --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/committervalidator.py @@ -0,0 +1,114 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# Copyright (c) 2010 Research In Motion Limited. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.common.system.ospath import relpath +from webkitpy.common.config import committers, urls + + +class CommitterValidator(object): + + def __init__(self, bugzilla): + self._bugzilla = bugzilla + + def _checkout_root(self): + # FIXME: This is a hack, we would have this from scm.checkout_root + # if we had any way to get to an scm object here. + components = __file__.split(os.sep) + tools_index = components.index("Tools") + return os.sep.join(components[:tools_index]) + + def _committers_py_path(self): + # extension can sometimes be .pyc, we always want .py + (path, extension) = os.path.splitext(committers.__file__) + # FIXME: When we're allowed to use python 2.6 we can use the real + # os.path.relpath + path = relpath(path, self._checkout_root()) + return ".".join([path, "py"]) + + def _flag_permission_rejection_message(self, setter_email, flag_name): + # This could be queried from the status_server. + queue_administrator = "eseidel@chromium.org" + # This could be queried from the tool. + queue_name = "commit-queue" + committers_list = self._committers_py_path() + message = "%s does not have %s permissions according to %s." % ( + setter_email, + flag_name, + urls.view_source_url(committers_list)) + message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % ( + flag_name, urls.contribution_guidelines) + message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed). " % ( + flag_name, committers_list) + message += "The %s restarts itself every 2 hours. After restart the %s will correctly respect your %s rights." % ( + queue_name, queue_name, flag_name) + return message + + def _validate_setter_email(self, patch, result_key, rejection_function): + committer = getattr(patch, result_key)() + # If the flag is set, and we don't recognize the setter, reject the + # flag! + setter_email = patch._attachment_dictionary.get("%s_email" % result_key) + if setter_email and not committer: + rejection_function(patch.id(), + self._flag_permission_rejection_message(setter_email, + result_key)) + return False + return True + + def _reject_patch_if_flags_are_invalid(self, patch): + return (self._validate_setter_email( + patch, "reviewer", self.reject_patch_from_review_queue) + and self._validate_setter_email( + patch, "committer", self.reject_patch_from_commit_queue)) + + def patches_after_rejecting_invalid_commiters_and_reviewers(self, patches): + return [patch for patch in patches if self._reject_patch_if_flags_are_invalid(patch)] + + def reject_patch_from_commit_queue(self, + attachment_id, + additional_comment_text=None): + comment_text = "Rejecting attachment %s from commit-queue." % attachment_id + self._bugzilla.set_flag_on_attachment(attachment_id, + "commit-queue", + "-", + comment_text, + additional_comment_text) + + def reject_patch_from_review_queue(self, + attachment_id, + additional_comment_text=None): + comment_text = "Rejecting attachment %s from review queue." % attachment_id + self._bugzilla.set_flag_on_attachment(attachment_id, + 'review', + '-', + comment_text, + additional_comment_text) diff --git a/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py b/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py new file mode 100644 index 0000000..58fd3a5 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py @@ -0,0 +1,43 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from .committervalidator import CommitterValidator + + +class CommitterValidatorTest(unittest.TestCase): + def test_flag_permission_rejection_message(self): + validator = CommitterValidator(bugzilla=None) + self.assertEqual(validator._committers_py_path(), "Tools/Scripts/webkitpy/common/config/committers.py") + expected_messsage = """foo@foo.com does not have review permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py. + +- If you do not have review rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags. + +- If you have review rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your review rights.""" + self.assertEqual(validator._flag_permission_rejection_message("foo@foo.com", "review"), expected_messsage) diff --git a/Tools/Scripts/webkitpy/common/config/irc.py b/Tools/Scripts/webkitpy/common/config/irc.py new file mode 100644 index 0000000..950c573 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/irc.py @@ -0,0 +1,31 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +server="irc.freenode.net" +port=6667 +channel="#webkit" diff --git a/Tools/Scripts/webkitpy/common/config/ports.py b/Tools/Scripts/webkitpy/common/config/ports.py new file mode 100644 index 0000000..163d5ef --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/ports.py @@ -0,0 +1,249 @@ +# Copyright (C) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# WebKit's Python module for understanding the various ports + +import os +import platform + +from webkitpy.common.system.executive import Executive + + +class WebKitPort(object): + + # We might need to pass scm into this function for scm.checkout_root + @classmethod + def script_path(cls, script_name): + return os.path.join("Tools", "Scripts", script_name) + + @staticmethod + def port(port_name): + ports = { + "chromium": ChromiumPort, + "chromium-xvfb": ChromiumXVFBPort, + "gtk": GtkPort, + "mac": MacPort, + "win": WinPort, + "qt": QtPort, + "efl": EflPort, + } + default_port = { + "Windows": WinPort, + "Darwin": MacPort, + } + # Do we really need MacPort as the ultimate default? + return ports.get(port_name, default_port.get(platform.system(), MacPort)) + + @staticmethod + def makeArgs(): + args = '--makeargs="-j%s"' % Executive().cpu_count() + if os.environ.has_key('MAKEFLAGS'): + args = '--makeargs="%s"' % os.environ['MAKEFLAGS'] + return args + + @classmethod + def name(cls): + raise NotImplementedError("subclasses must implement") + + @classmethod + def flag(cls): + raise NotImplementedError("subclasses must implement") + + @classmethod + def update_webkit_command(cls): + return [cls.script_path("update-webkit")] + + @classmethod + def build_webkit_command(cls, build_style=None): + command = [cls.script_path("build-webkit")] + if build_style == "debug": + command.append("--debug") + if build_style == "release": + command.append("--release") + return command + + @classmethod + def run_javascriptcore_tests_command(cls): + return [cls.script_path("run-javascriptcore-tests")] + + @classmethod + def run_webkit_tests_command(cls): + return [cls.script_path("run-webkit-tests")] + + @classmethod + def run_python_unittests_command(cls): + return [cls.script_path("test-webkitpy")] + + @classmethod + def run_perl_unittests_command(cls): + return [cls.script_path("test-webkitperl")] + + @classmethod + def layout_tests_results_path(cls): + return "/tmp/layout-test-results/results.html" + + +class MacPort(WebKitPort): + + @classmethod + def name(cls): + return "Mac" + + @classmethod + def flag(cls): + return "--port=mac" + + @classmethod + def _system_version(cls): + version_string = platform.mac_ver()[0] # e.g. "10.5.6" + version_tuple = version_string.split('.') + return map(int, version_tuple) + + @classmethod + def is_leopard(cls): + return tuple(cls._system_version()[:2]) == (10, 5) + + +class WinPort(WebKitPort): + + @classmethod + def name(cls): + return "Win" + + @classmethod + def flag(cls): + # FIXME: This is lame. We should autogenerate this from a codename or something. + return "--port=win" + + +class GtkPort(WebKitPort): + + @classmethod + def name(cls): + return "Gtk" + + @classmethod + def flag(cls): + return "--port=gtk" + + @classmethod + def build_webkit_command(cls, build_style=None): + command = WebKitPort.build_webkit_command(build_style=build_style) + command.append("--gtk") + command.append(WebKitPort.makeArgs()) + return command + + @classmethod + def run_webkit_tests_command(cls): + command = WebKitPort.run_webkit_tests_command() + command.append("--gtk") + return command + + +class QtPort(WebKitPort): + + @classmethod + def name(cls): + return "Qt" + + @classmethod + def flag(cls): + return "--port=qt" + + @classmethod + def build_webkit_command(cls, build_style=None): + command = WebKitPort.build_webkit_command(build_style=build_style) + command.append("--qt") + command.append(WebKitPort.makeArgs()) + return command + + +class EflPort(WebKitPort): + + @classmethod + def name(cls): + return "Efl" + + @classmethod + def flag(cls): + return "--port=efl" + + @classmethod + def build_webkit_command(cls, build_style=None): + command = WebKitPort.build_webkit_command(build_style=build_style) + command.append("--efl") + command.append(WebKitPort.makeArgs()) + return command + + +class ChromiumPort(WebKitPort): + + @classmethod + def name(cls): + return "Chromium" + + @classmethod + def flag(cls): + return "--port=chromium" + + @classmethod + def update_webkit_command(cls): + command = WebKitPort.update_webkit_command() + command.append("--chromium") + return command + + @classmethod + def build_webkit_command(cls, build_style=None): + command = WebKitPort.build_webkit_command(build_style=build_style) + command.append("--chromium") + command.append("--update-chromium") + return command + + @classmethod + def run_webkit_tests_command(cls): + return [ + cls.script_path("new-run-webkit-tests"), + "--chromium", + "--no-pixel-tests", + ] + + @classmethod + def run_javascriptcore_tests_command(cls): + return None + + +class ChromiumXVFBPort(ChromiumPort): + + @classmethod + def flag(cls): + return "--port=chromium-xvfb" + + @classmethod + def run_webkit_tests_command(cls): + # FIXME: We should find a better way to do this. + return ["xvfb-run"] + ChromiumPort.run_webkit_tests_command() diff --git a/Tools/Scripts/webkitpy/common/config/ports_unittest.py b/Tools/Scripts/webkitpy/common/config/ports_unittest.py new file mode 100644 index 0000000..ba255c0 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/ports_unittest.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python +# Copyright (c) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.config.ports import * + + +class WebKitPortTest(unittest.TestCase): + def test_mac_port(self): + self.assertEquals(MacPort.name(), "Mac") + self.assertEquals(MacPort.flag(), "--port=mac") + self.assertEquals(MacPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")]) + self.assertEquals(MacPort.build_webkit_command(), [WebKitPort.script_path("build-webkit")]) + self.assertEquals(MacPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug"]) + self.assertEquals(MacPort.build_webkit_command(build_style="release"), [WebKitPort.script_path("build-webkit"), "--release"]) + + class TestIsLeopard(MacPort): + @classmethod + def _system_version(cls): + return [10, 5] + self.assertTrue(TestIsLeopard.is_leopard()) + + def test_gtk_port(self): + self.assertEquals(GtkPort.name(), "Gtk") + self.assertEquals(GtkPort.flag(), "--port=gtk") + self.assertEquals(GtkPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests"), "--gtk"]) + self.assertEquals(GtkPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--gtk", WebKitPort.makeArgs()]) + self.assertEquals(GtkPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug", "--gtk", WebKitPort.makeArgs()]) + + def test_qt_port(self): + self.assertEquals(QtPort.name(), "Qt") + self.assertEquals(QtPort.flag(), "--port=qt") + self.assertEquals(QtPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")]) + self.assertEquals(QtPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--qt", WebKitPort.makeArgs()]) + self.assertEquals(QtPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug", "--qt", WebKitPort.makeArgs()]) + + def test_chromium_port(self): + self.assertEquals(ChromiumPort.name(), "Chromium") + self.assertEquals(ChromiumPort.flag(), "--port=chromium") + self.assertEquals(ChromiumPort.run_webkit_tests_command(), [WebKitPort.script_path("new-run-webkit-tests"), "--chromium", "--no-pixel-tests"]) + self.assertEquals(ChromiumPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--chromium", "--update-chromium"]) + self.assertEquals(ChromiumPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug", "--chromium", "--update-chromium"]) + self.assertEquals(ChromiumPort.update_webkit_command(), [WebKitPort.script_path("update-webkit"), "--chromium"]) + + def test_chromium_xvfb_port(self): + self.assertEquals(ChromiumXVFBPort.run_webkit_tests_command(), ["xvfb-run", "Tools/Scripts/new-run-webkit-tests", "--chromium", "--no-pixel-tests"]) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/config/urls.py b/Tools/Scripts/webkitpy/common/config/urls.py new file mode 100644 index 0000000..dfa6d69 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/config/urls.py @@ -0,0 +1,38 @@ +# Copyright (c) 2010, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +def view_source_url(local_path): + return "http://trac.webkit.org/browser/trunk/%s" % local_path + + +def view_revision_url(revision_number): + return "http://trac.webkit.org/changeset/%s" % revision_number + + +contribution_guidelines = "http://webkit.org/coding/contributing.html" diff --git a/Tools/Scripts/webkitpy/common/memoized.py b/Tools/Scripts/webkitpy/common/memoized.py new file mode 100644 index 0000000..dc844a5 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/memoized.py @@ -0,0 +1,55 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Python does not (yet) seem to provide automatic memoization. So we've +# written a small decorator to do so. + +import functools + + +class memoized(object): + def __init__(self, function): + self._function = function + self._results_cache = {} + + def __call__(self, *args): + try: + return self._results_cache[args] + except KeyError: + # If we didn't find the args in our cache, call and save the results. + result = self._function(*args) + self._results_cache[args] = result + return result + # FIXME: We may need to handle TypeError here in the case + # that "args" is not a valid dictionary key. + + # Use python "descriptor" protocol __get__ to appear + # invisible during property access. + def __get__(self, instance, owner): + # Return a function partial with obj already bound as self. + return functools.partial(self.__call__, instance) diff --git a/Tools/Scripts/webkitpy/common/memoized_unittest.py b/Tools/Scripts/webkitpy/common/memoized_unittest.py new file mode 100644 index 0000000..dd7c793 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/memoized_unittest.py @@ -0,0 +1,65 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.memoized import memoized + + +class _TestObject(object): + def __init__(self): + self.callCount = 0 + + @memoized + def memoized_add(self, argument): + """testing docstring""" + self.callCount += 1 + if argument is None: + return None # Avoid the TypeError from None + 1 + return argument + 1 + + +class MemoizedTest(unittest.TestCase): + def test_caching(self): + test = _TestObject() + test.callCount = 0 + self.assertEqual(test.memoized_add(1), 2) + self.assertEqual(test.callCount, 1) + self.assertEqual(test.memoized_add(1), 2) + self.assertEqual(test.callCount, 1) + + # Validate that callCount is working as expected. + self.assertEqual(test.memoized_add(2), 3) + self.assertEqual(test.callCount, 2) + + def test_tearoff(self): + test = _TestObject() + # Make sure that get()/tear-offs work: + tearoff = test.memoized_add + self.assertEqual(tearoff(4), 5) + self.assertEqual(test.callCount, 1) diff --git a/Tools/Scripts/webkitpy/common/net/__init__.py b/Tools/Scripts/webkitpy/common/net/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/__init__.py b/Tools/Scripts/webkitpy/common/net/bugzilla/__init__.py new file mode 100644 index 0000000..cfaf3b1 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/bugzilla/__init__.py @@ -0,0 +1,8 @@ +# Required for Python to search this directory for module files + +# We only export public API here. +# FIXME: parse_bug_id should not be a free function. +from .bugzilla import Bugzilla, parse_bug_id +# Unclear if Bug and Attachment need to be public classes. +from .bug import Bug +from .attachment import Attachment diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/attachment.py b/Tools/Scripts/webkitpy/common/net/bugzilla/attachment.py new file mode 100644 index 0000000..85761fe --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/bugzilla/attachment.py @@ -0,0 +1,114 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# Copyright (c) 2010 Research In Motion Limited. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.common.system.deprecated_logging import log + + +class Attachment(object): + + rollout_preamble = "ROLLOUT of r" + + def __init__(self, attachment_dictionary, bug): + self._attachment_dictionary = attachment_dictionary + self._bug = bug + self._reviewer = None + self._committer = None + + def _bugzilla(self): + return self._bug._bugzilla + + def id(self): + return int(self._attachment_dictionary.get("id")) + + def attacher_is_committer(self): + return self._bugzilla.committers.committer_by_email( + patch.attacher_email()) + + def attacher_email(self): + return self._attachment_dictionary.get("attacher_email") + + def bug(self): + return self._bug + + def bug_id(self): + return int(self._attachment_dictionary.get("bug_id")) + + def is_patch(self): + return not not self._attachment_dictionary.get("is_patch") + + def is_obsolete(self): + return not not self._attachment_dictionary.get("is_obsolete") + + def is_rollout(self): + return self.name().startswith(self.rollout_preamble) + + def name(self): + return self._attachment_dictionary.get("name") + + def attach_date(self): + return self._attachment_dictionary.get("attach_date") + + def review(self): + return self._attachment_dictionary.get("review") + + def commit_queue(self): + return self._attachment_dictionary.get("commit-queue") + + def url(self): + # FIXME: This should just return + # self._bugzilla().attachment_url_for_id(self.id()). scm_unittest.py + # depends on the current behavior. + return self._attachment_dictionary.get("url") + + def contents(self): + # FIXME: We shouldn't be grabbing at _bugzilla. + return self._bug._bugzilla.fetch_attachment_contents(self.id()) + + def _validate_flag_value(self, flag): + email = self._attachment_dictionary.get("%s_email" % flag) + if not email: + return None + committer = getattr(self._bugzilla().committers, + "%s_by_email" % flag)(email) + if committer: + return committer + log("Warning, attachment %s on bug %s has invalid %s (%s)" % ( + self._attachment_dictionary['id'], + self._attachment_dictionary['bug_id'], flag, email)) + + def reviewer(self): + if not self._reviewer: + self._reviewer = self._validate_flag_value("reviewer") + return self._reviewer + + def committer(self): + if not self._committer: + self._committer = self._validate_flag_value("committer") + return self._committer diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py new file mode 100644 index 0000000..af258eb --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py @@ -0,0 +1,111 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# Copyright (c) 2010 Research In Motion Limited. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from .attachment import Attachment + + +class Bug(object): + # FIXME: This class is kinda a hack for now. It exists so we have one + # place to hold bug logic, even if much of the code deals with + # dictionaries still. + + def __init__(self, bug_dictionary, bugzilla): + self.bug_dictionary = bug_dictionary + self._bugzilla = bugzilla + + def id(self): + return self.bug_dictionary["id"] + + def title(self): + return self.bug_dictionary["title"] + + def reporter_email(self): + return self.bug_dictionary["reporter_email"] + + def assigned_to_email(self): + return self.bug_dictionary["assigned_to_email"] + + # FIXME: This information should be stored in some sort of webkit_config.py instead of here. + unassigned_emails = frozenset([ + "webkit-unassigned@lists.webkit.org", + "webkit-qt-unassigned@trolltech.com", + ]) + + def is_unassigned(self): + return self.assigned_to_email() in self.unassigned_emails + + def status(self): + return self.bug_dictionary["bug_status"] + + # Bugzilla has many status states we don't really use in WebKit: + # https://bugs.webkit.org/page.cgi?id=fields.html#status + _open_states = ["UNCONFIRMED", "NEW", "ASSIGNED", "REOPENED"] + _closed_states = ["RESOLVED", "VERIFIED", "CLOSED"] + + def is_open(self): + return self.status() in self._open_states + + def is_closed(self): + return not self.is_open() + + def duplicate_of(self): + return self.bug_dictionary.get('dup_id', None) + + # Rarely do we actually want obsolete attachments + def attachments(self, include_obsolete=False): + attachments = self.bug_dictionary["attachments"] + if not include_obsolete: + attachments = filter(lambda attachment: + not attachment["is_obsolete"], attachments) + return [Attachment(attachment, self) for attachment in attachments] + + def patches(self, include_obsolete=False): + return [patch for patch in self.attachments(include_obsolete) + if patch.is_patch()] + + def unreviewed_patches(self): + return [patch for patch in self.patches() if patch.review() == "?"] + + def reviewed_patches(self, include_invalid=False): + patches = [patch for patch in self.patches() if patch.review() == "+"] + if include_invalid: + return patches + # Checking reviewer() ensures that it was both reviewed and has a valid + # reviewer. + return filter(lambda patch: patch.reviewer(), patches) + + def commit_queued_patches(self, include_invalid=False): + patches = [patch for patch in self.patches() + if patch.commit_queue() == "+"] + if include_invalid: + return patches + # Checking committer() ensures that it was both commit-queue+'d and has + # a valid committer. + return filter(lambda patch: patch.committer(), patches) diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py new file mode 100644 index 0000000..d43d64f --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py @@ -0,0 +1,40 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from .bug import Bug + + +class BugTest(unittest.TestCase): + def test_is_unassigned(self): + for email in Bug.unassigned_emails: + bug = Bug({"assigned_to_email": email}, bugzilla=None) + self.assertTrue(bug.is_unassigned()) + bug = Bug({"assigned_to_email": "test@test.com"}, bugzilla=None) + self.assertFalse(bug.is_unassigned()) diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py new file mode 100644 index 0000000..d6210d5 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py @@ -0,0 +1,761 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# Copyright (c) 2010 Research In Motion Limited. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# WebKit's Python module for interacting with Bugzilla + +import os.path +import re +import StringIO +import urllib + +from datetime import datetime # used in timestamp() + +from .attachment import Attachment +from .bug import Bug + +from webkitpy.common.system.deprecated_logging import log +from webkitpy.common.config import committers +from webkitpy.common.net.credentials import Credentials +from webkitpy.common.system.user import User +from webkitpy.thirdparty.autoinstalled.mechanize import Browser +from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, SoupStrainer + + +# FIXME: parse_bug_id should not be a free function. +def parse_bug_id(message): + if not message: + return None + match = re.search("http\://webkit\.org/b/(?P<bug_id>\d+)", message) + if match: + return int(match.group('bug_id')) + match = re.search( + Bugzilla.bug_server_regex + "show_bug\.cgi\?id=(?P<bug_id>\d+)", + message) + if match: + return int(match.group('bug_id')) + return None + + +def timestamp(): + return datetime.now().strftime("%Y%m%d%H%M%S") + + +# A container for all of the logic for making and parsing buzilla queries. +class BugzillaQueries(object): + + def __init__(self, bugzilla): + self._bugzilla = bugzilla + + def _is_xml_bugs_form(self, form): + # ClientForm.HTMLForm.find_control throws if the control is not found, + # so we do a manual search instead: + return "xml" in [control.id for control in form.controls] + + # This is kinda a hack. There is probably a better way to get this information from bugzilla. + def _parse_result_count(self, results_page): + result_count_text = BeautifulSoup(results_page).find(attrs={'class': 'bz_result_count'}).string + result_count_parts = result_count_text.strip().split(" ") + if result_count_parts[0] == "Zarro": + return 0 + if result_count_parts[0] == "One": + return 1 + return int(result_count_parts[0]) + + # Note: _load_query, _fetch_bug and _fetch_bugs_from_advanced_query + # are the only methods which access self._bugzilla. + + def _load_query(self, query): + self._bugzilla.authenticate() + full_url = "%s%s" % (self._bugzilla.bug_server_url, query) + return self._bugzilla.browser.open(full_url) + + def _fetch_bugs_from_advanced_query(self, query): + results_page = self._load_query(query) + if not self._parse_result_count(results_page): + return [] + # Bugzilla results pages have an "XML" submit button at the bottom + # which can be used to get an XML page containing all of the <bug> elements. + # This is slighty lame that this assumes that _load_query used + # self._bugzilla.browser and that it's in an acceptable state. + self._bugzilla.browser.select_form(predicate=self._is_xml_bugs_form) + bugs_xml = self._bugzilla.browser.submit() + return self._bugzilla._parse_bugs_from_xml(bugs_xml) + + def _fetch_bug(self, bug_id): + return self._bugzilla.fetch_bug(bug_id) + + def _fetch_bug_ids_advanced_query(self, query): + soup = BeautifulSoup(self._load_query(query)) + # The contents of the <a> inside the cells in the first column happen + # to be the bug id. + return [int(bug_link_cell.find("a").string) + for bug_link_cell in soup('td', "first-child")] + + def _parse_attachment_ids_request_query(self, page): + digits = re.compile("\d+") + attachment_href = re.compile("attachment.cgi\?id=\d+&action=review") + attachment_links = SoupStrainer("a", href=attachment_href) + return [int(digits.search(tag["href"]).group(0)) + for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)] + + def _fetch_attachment_ids_request_query(self, query): + return self._parse_attachment_ids_request_query(self._load_query(query)) + + def _parse_quips(self, page): + soup = BeautifulSoup(page, convertEntities=BeautifulSoup.HTML_ENTITIES) + quips = soup.find(text=re.compile(r"Existing quips:")).findNext("ul").findAll("li") + return [unicode(quip_entry.string) for quip_entry in quips] + + def fetch_quips(self): + return self._parse_quips(self._load_query("/quips.cgi?action=show")) + + # List of all r+'d bugs. + def fetch_bug_ids_from_pending_commit_list(self): + needs_commit_query_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B" + return self._fetch_bug_ids_advanced_query(needs_commit_query_url) + + def fetch_bugs_matching_quicksearch(self, search_string): + # We may want to use a more explicit query than "quicksearch". + # If quicksearch changes we should probably change to use + # a normal buglist.cgi?query_format=advanced query. + quicksearch_url = "buglist.cgi?quicksearch=%s" % urllib.quote(search_string) + return self._fetch_bugs_from_advanced_query(quicksearch_url) + + # Currently this returns all bugs across all components. + # In the future we may wish to extend this API to construct more restricted searches. + def fetch_bugs_matching_search(self, search_string, author_email=None): + query = "buglist.cgi?query_format=advanced" + if search_string: + query += "&short_desc_type=allwordssubstr&short_desc=%s" % urllib.quote(search_string) + if author_email: + query += "&emailreporter1=1&emailtype1=substring&email1=%s" % urllib.quote(search_string) + return self._fetch_bugs_from_advanced_query(query) + + def fetch_patches_from_pending_commit_list(self): + return sum([self._fetch_bug(bug_id).reviewed_patches() + for bug_id in self.fetch_bug_ids_from_pending_commit_list()], []) + + def fetch_bug_ids_from_commit_queue(self): + commit_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B&order=Last+Changed" + return self._fetch_bug_ids_advanced_query(commit_queue_url) + + def fetch_patches_from_commit_queue(self): + # This function will only return patches which have valid committers + # set. It won't reject patches with invalid committers/reviewers. + return sum([self._fetch_bug(bug_id).commit_queued_patches() + for bug_id in self.fetch_bug_ids_from_commit_queue()], []) + + def fetch_bug_ids_from_review_queue(self): + review_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?" + return self._fetch_bug_ids_advanced_query(review_queue_url) + + # This method will make several requests to bugzilla. + def fetch_patches_from_review_queue(self, limit=None): + # [:None] returns the whole array. + return sum([self._fetch_bug(bug_id).unreviewed_patches() + for bug_id in self.fetch_bug_ids_from_review_queue()[:limit]], []) + + # NOTE: This is the only client of _fetch_attachment_ids_request_query + # This method only makes one request to bugzilla. + def fetch_attachment_ids_from_review_queue(self): + review_queue_url = "request.cgi?action=queue&type=review&group=type" + return self._fetch_attachment_ids_request_query(review_queue_url) + + +class Bugzilla(object): + + def __init__(self, dryrun=False, committers=committers.CommitterList()): + self.dryrun = dryrun + self.authenticated = False + self.queries = BugzillaQueries(self) + self.committers = committers + self.cached_quips = [] + + # FIXME: We should use some sort of Browser mock object when in dryrun + # mode (to prevent any mistakes). + self.browser = Browser() + # Ignore bugs.webkit.org/robots.txt until we fix it to allow this + # script. + self.browser.set_handle_robots(False) + + # FIXME: Much of this should go into some sort of config module: + bug_server_host = "bugs.webkit.org" + bug_server_regex = "https?://%s/" % re.sub('\.', '\\.', bug_server_host) + bug_server_url = "https://%s/" % bug_server_host + + def quips(self): + # We only fetch and parse the list of quips once per instantiation + # so that we do not burden bugs.webkit.org. + if not self.cached_quips and not self.dryrun: + self.cached_quips = self.queries.fetch_quips() + return self.cached_quips + + def bug_url_for_bug_id(self, bug_id, xml=False): + if not bug_id: + return None + content_type = "&ctype=xml" if xml else "" + return "%sshow_bug.cgi?id=%s%s" % (self.bug_server_url, bug_id, content_type) + + def short_bug_url_for_bug_id(self, bug_id): + if not bug_id: + return None + return "http://webkit.org/b/%s" % bug_id + + def add_attachment_url(self, bug_id): + return "%sattachment.cgi?action=enter&bugid=%s" % (self.bug_server_url, bug_id) + + def attachment_url_for_id(self, attachment_id, action="view"): + if not attachment_id: + return None + action_param = "" + if action and action != "view": + action_param = "&action=%s" % action + return "%sattachment.cgi?id=%s%s" % (self.bug_server_url, + attachment_id, + action_param) + + def _parse_attachment_flag(self, + element, + flag_name, + attachment, + result_key): + flag = element.find('flag', attrs={'name': flag_name}) + if flag: + attachment[flag_name] = flag['status'] + if flag['status'] == '+': + attachment[result_key] = flag['setter'] + # Sadly show_bug.cgi?ctype=xml does not expose the flag modification date. + + def _string_contents(self, soup): + # WebKit's bugzilla instance uses UTF-8. + # BeautifulSoup always returns Unicode strings, however + # the .string method returns a (unicode) NavigableString. + # NavigableString can confuse other parts of the code, so we + # convert from NavigableString to a real unicode() object using unicode(). + return unicode(soup.string) + + # Example: 2010-01-20 14:31 PST + # FIXME: Some bugzilla dates seem to have seconds in them? + # Python does not support timezones out of the box. + # Assume that bugzilla always uses PST (which is true for bugs.webkit.org) + _bugzilla_date_format = "%Y-%m-%d %H:%M" + + @classmethod + def _parse_date(cls, date_string): + (date, time, time_zone) = date_string.split(" ") + # Ignore the timezone because python doesn't understand timezones out of the box. + date_string = "%s %s" % (date, time) + return datetime.strptime(date_string, cls._bugzilla_date_format) + + def _date_contents(self, soup): + return self._parse_date(self._string_contents(soup)) + + def _parse_attachment_element(self, element, bug_id): + attachment = {} + attachment['bug_id'] = bug_id + attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1") + attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1") + attachment['id'] = int(element.find('attachid').string) + # FIXME: No need to parse out the url here. + attachment['url'] = self.attachment_url_for_id(attachment['id']) + attachment["attach_date"] = self._date_contents(element.find("date")) + attachment['name'] = self._string_contents(element.find('desc')) + attachment['attacher_email'] = self._string_contents(element.find('attacher')) + attachment['type'] = self._string_contents(element.find('type')) + self._parse_attachment_flag( + element, 'review', attachment, 'reviewer_email') + self._parse_attachment_flag( + element, 'commit-queue', attachment, 'committer_email') + return attachment + + def _parse_bugs_from_xml(self, page): + soup = BeautifulSoup(page) + # Without the unicode() call, BeautifulSoup occasionally complains of being + # passed None for no apparent reason. + return [Bug(self._parse_bug_dictionary_from_xml(unicode(bug_xml)), self) for bug_xml in soup('bug')] + + def _parse_bug_dictionary_from_xml(self, page): + soup = BeautifulSoup(page) + bug = {} + bug["id"] = int(soup.find("bug_id").string) + bug["title"] = self._string_contents(soup.find("short_desc")) + bug["bug_status"] = self._string_contents(soup.find("bug_status")) + dup_id = soup.find("dup_id") + if dup_id: + bug["dup_id"] = self._string_contents(dup_id) + bug["reporter_email"] = self._string_contents(soup.find("reporter")) + bug["assigned_to_email"] = self._string_contents(soup.find("assigned_to")) + bug["cc_emails"] = [self._string_contents(element) for element in soup.findAll('cc')] + bug["attachments"] = [self._parse_attachment_element(element, bug["id"]) for element in soup.findAll('attachment')] + return bug + + # Makes testing fetch_*_from_bug() possible until we have a better + # BugzillaNetwork abstration. + + def _fetch_bug_page(self, bug_id): + bug_url = self.bug_url_for_bug_id(bug_id, xml=True) + log("Fetching: %s" % bug_url) + return self.browser.open(bug_url) + + def fetch_bug_dictionary(self, bug_id): + try: + return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id)) + except KeyboardInterrupt: + raise + except: + self.authenticate() + return self._parse_bug_dictionary_from_xml(self._fetch_bug_page(bug_id)) + + # FIXME: A BugzillaCache object should provide all these fetch_ methods. + + def fetch_bug(self, bug_id): + return Bug(self.fetch_bug_dictionary(bug_id), self) + + def fetch_attachment_contents(self, attachment_id): + attachment_url = self.attachment_url_for_id(attachment_id) + # We need to authenticate to download patches from security bugs. + self.authenticate() + return self.browser.open(attachment_url).read() + + def _parse_bug_id_from_attachment_page(self, page): + # The "Up" relation happens to point to the bug. + up_link = BeautifulSoup(page).find('link', rel='Up') + if not up_link: + # This attachment does not exist (or you don't have permissions to + # view it). + return None + match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href']) + return int(match.group('bug_id')) + + def bug_id_for_attachment_id(self, attachment_id): + self.authenticate() + + attachment_url = self.attachment_url_for_id(attachment_id, 'edit') + log("Fetching: %s" % attachment_url) + page = self.browser.open(attachment_url) + return self._parse_bug_id_from_attachment_page(page) + + # FIXME: This should just return Attachment(id), which should be able to + # lazily fetch needed data. + + def fetch_attachment(self, attachment_id): + # We could grab all the attachment details off of the attachment edit + # page but we already have working code to do so off of the bugs page, + # so re-use that. + bug_id = self.bug_id_for_attachment_id(attachment_id) + if not bug_id: + return None + attachments = self.fetch_bug(bug_id).attachments(include_obsolete=True) + for attachment in attachments: + if attachment.id() == int(attachment_id): + return attachment + return None # This should never be hit. + + def authenticate(self): + if self.authenticated: + return + + if self.dryrun: + log("Skipping log in for dry run...") + self.authenticated = True + return + + credentials = Credentials(self.bug_server_host, git_prefix="bugzilla") + + attempts = 0 + while not self.authenticated: + attempts += 1 + username, password = credentials.read_credentials() + + log("Logging in as %s..." % username) + self.browser.open(self.bug_server_url + + "index.cgi?GoAheadAndLogIn=1") + self.browser.select_form(name="login") + self.browser['Bugzilla_login'] = username + self.browser['Bugzilla_password'] = password + response = self.browser.submit() + + match = re.search("<title>(.+?)</title>", response.read()) + # If the resulting page has a title, and it contains the word + # "invalid" assume it's the login failure page. + if match and re.search("Invalid", match.group(1), re.IGNORECASE): + errorMessage = "Bugzilla login failed: %s" % match.group(1) + # raise an exception only if this was the last attempt + if attempts < 5: + log(errorMessage) + else: + raise Exception(errorMessage) + else: + self.authenticated = True + self.username = username + + def _commit_queue_flag(self, mark_for_landing, mark_for_commit_queue): + if mark_for_landing: + return '+' + elif mark_for_commit_queue: + return '?' + return 'X' + + # FIXME: mark_for_commit_queue and mark_for_landing should be joined into a single commit_flag argument. + def _fill_attachment_form(self, + description, + file_object, + mark_for_review=False, + mark_for_commit_queue=False, + mark_for_landing=False, + is_patch=False, + filename=None, + mimetype=None): + self.browser['description'] = description + if is_patch: + self.browser['ispatch'] = ("1",) + # FIXME: Should this use self._find_select_element_for_flag? + self.browser['flag_type-1'] = ('?',) if mark_for_review else ('X',) + self.browser['flag_type-3'] = (self._commit_queue_flag(mark_for_landing, mark_for_commit_queue),) + + filename = filename or "%s.patch" % timestamp() + mimetype = mimetype or "text/plain" + self.browser.add_file(file_object, mimetype, filename, 'data') + + def _file_object_for_upload(self, file_or_string): + if hasattr(file_or_string, 'read'): + return file_or_string + # Only if file_or_string is not already encoded do we want to encode it. + if isinstance(file_or_string, unicode): + file_or_string = file_or_string.encode('utf-8') + return StringIO.StringIO(file_or_string) + + # timestamp argument is just for unittests. + def _filename_for_upload(self, file_object, bug_id, extension="txt", timestamp=timestamp): + if hasattr(file_object, "name"): + return file_object.name + return "bug-%s-%s.%s" % (bug_id, timestamp(), extension) + + def add_attachment_to_bug(self, + bug_id, + file_or_string, + description, + filename=None, + comment_text=None): + self.authenticate() + log('Adding attachment "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id))) + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.add_attachment_url(bug_id)) + self.browser.select_form(name="entryform") + file_object = self._file_object_for_upload(file_or_string) + filename = filename or self._filename_for_upload(file_object, bug_id) + self._fill_attachment_form(description, file_object, filename=filename) + if comment_text: + log(comment_text) + self.browser['comment'] = comment_text + self.browser.submit() + + # FIXME: The arguments to this function should be simplified and then + # this should be merged into add_attachment_to_bug + def add_patch_to_bug(self, + bug_id, + file_or_string, + description, + comment_text=None, + mark_for_review=False, + mark_for_commit_queue=False, + mark_for_landing=False): + self.authenticate() + log('Adding patch "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id))) + + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.add_attachment_url(bug_id)) + self.browser.select_form(name="entryform") + file_object = self._file_object_for_upload(file_or_string) + filename = self._filename_for_upload(file_object, bug_id, extension="patch") + self._fill_attachment_form(description, + file_object, + mark_for_review=mark_for_review, + mark_for_commit_queue=mark_for_commit_queue, + mark_for_landing=mark_for_landing, + is_patch=True, + filename=filename) + if comment_text: + log(comment_text) + self.browser['comment'] = comment_text + self.browser.submit() + + # FIXME: There has to be a more concise way to write this method. + def _check_create_bug_response(self, response_html): + match = re.search("<title>Bug (?P<bug_id>\d+) Submitted</title>", + response_html) + if match: + return match.group('bug_id') + + match = re.search( + '<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">', + response_html, + re.DOTALL) + error_message = "FAIL" + if match: + text_lines = BeautifulSoup( + match.group('error_message')).findAll(text=True) + error_message = "\n" + '\n'.join( + [" " + line.strip() + for line in text_lines if line.strip()]) + raise Exception("Bug not created: %s" % error_message) + + def create_bug(self, + bug_title, + bug_description, + component=None, + diff=None, + patch_description=None, + cc=None, + blocked=None, + assignee=None, + mark_for_review=False, + mark_for_commit_queue=False): + self.authenticate() + + log('Creating bug with title "%s"' % bug_title) + if self.dryrun: + log(bug_description) + # FIXME: This will make some paths fail, as they assume this returns an id. + return + + self.browser.open(self.bug_server_url + "enter_bug.cgi?product=WebKit") + self.browser.select_form(name="Create") + component_items = self.browser.find_control('component').items + component_names = map(lambda item: item.name, component_items) + if not component: + component = "New Bugs" + if component not in component_names: + component = User.prompt_with_list("Please pick a component:", component_names) + self.browser["component"] = [component] + if cc: + self.browser["cc"] = cc + if blocked: + self.browser["blocked"] = unicode(blocked) + if not assignee: + assignee = self.username + if assignee and not self.browser.find_control("assigned_to").disabled: + self.browser["assigned_to"] = assignee + self.browser["short_desc"] = bug_title + self.browser["comment"] = bug_description + + if diff: + # _fill_attachment_form expects a file-like object + # Patch files are already binary, so no encoding needed. + assert(isinstance(diff, str)) + patch_file_object = StringIO.StringIO(diff) + self._fill_attachment_form( + patch_description, + patch_file_object, + mark_for_review=mark_for_review, + mark_for_commit_queue=mark_for_commit_queue, + is_patch=True) + + response = self.browser.submit() + + bug_id = self._check_create_bug_response(response.read()) + log("Bug %s created." % bug_id) + log("%sshow_bug.cgi?id=%s" % (self.bug_server_url, bug_id)) + return bug_id + + def _find_select_element_for_flag(self, flag_name): + # FIXME: This will break if we ever re-order attachment flags + if flag_name == "review": + return self.browser.find_control(type='select', nr=0) + elif flag_name == "commit-queue": + return self.browser.find_control(type='select', nr=1) + raise Exception("Don't know how to find flag named \"%s\"" % flag_name) + + def clear_attachment_flags(self, + attachment_id, + additional_comment_text=None): + self.authenticate() + + comment_text = "Clearing flags on attachment: %s" % attachment_id + if additional_comment_text: + comment_text += "\n\n%s" % additional_comment_text + log(comment_text) + + if self.dryrun: + return + + self.browser.open(self.attachment_url_for_id(attachment_id, 'edit')) + self.browser.select_form(nr=1) + self.browser.set_value(comment_text, name='comment', nr=0) + self._find_select_element_for_flag('review').value = ("X",) + self._find_select_element_for_flag('commit-queue').value = ("X",) + self.browser.submit() + + def set_flag_on_attachment(self, + attachment_id, + flag_name, + flag_value, + comment_text=None, + additional_comment_text=None): + # FIXME: We need a way to test this function on a live bugzilla + # instance. + + self.authenticate() + + if additional_comment_text: + comment_text += "\n\n%s" % additional_comment_text + log(comment_text) + + if self.dryrun: + return + + self.browser.open(self.attachment_url_for_id(attachment_id, 'edit')) + self.browser.select_form(nr=1) + + if comment_text: + self.browser.set_value(comment_text, name='comment', nr=0) + + self._find_select_element_for_flag(flag_name).value = (flag_value,) + self.browser.submit() + + # FIXME: All of these bug editing methods have a ridiculous amount of + # copy/paste code. + + def obsolete_attachment(self, attachment_id, comment_text=None): + self.authenticate() + + log("Obsoleting attachment: %s" % attachment_id) + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.attachment_url_for_id(attachment_id, 'edit')) + self.browser.select_form(nr=1) + self.browser.find_control('isobsolete').items[0].selected = True + # Also clear any review flag (to remove it from review/commit queues) + self._find_select_element_for_flag('review').value = ("X",) + self._find_select_element_for_flag('commit-queue').value = ("X",) + if comment_text: + log(comment_text) + # Bugzilla has two textareas named 'comment', one is somehow + # hidden. We want the first. + self.browser.set_value(comment_text, name='comment', nr=0) + self.browser.submit() + + def add_cc_to_bug(self, bug_id, email_address_list): + self.authenticate() + + log("Adding %s to the CC list for bug %s" % (email_address_list, + bug_id)) + if self.dryrun: + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + self.browser["newcc"] = ", ".join(email_address_list) + self.browser.submit() + + def post_comment_to_bug(self, bug_id, comment_text, cc=None): + self.authenticate() + + log("Adding comment to bug %s" % bug_id) + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + self.browser["comment"] = comment_text + if cc: + self.browser["newcc"] = ", ".join(cc) + self.browser.submit() + + def close_bug_as_fixed(self, bug_id, comment_text=None): + self.authenticate() + + log("Closing bug %s as fixed" % bug_id) + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + if comment_text: + self.browser['comment'] = comment_text + self.browser['bug_status'] = ['RESOLVED'] + self.browser['resolution'] = ['FIXED'] + self.browser.submit() + + def reassign_bug(self, bug_id, assignee, comment_text=None): + self.authenticate() + + log("Assigning bug %s to %s" % (bug_id, assignee)) + if self.dryrun: + log(comment_text) + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + if comment_text: + log(comment_text) + self.browser["comment"] = comment_text + self.browser["assigned_to"] = assignee + self.browser.submit() + + def reopen_bug(self, bug_id, comment_text): + self.authenticate() + + log("Re-opening bug %s" % bug_id) + # Bugzilla requires a comment when re-opening a bug, so we know it will + # never be None. + log(comment_text) + if self.dryrun: + return + + self.browser.open(self.bug_url_for_bug_id(bug_id)) + self.browser.select_form(name="changeform") + bug_status = self.browser.find_control("bug_status", type="select") + # This is a hack around the fact that ClientForm.ListControl seems to + # have no simpler way to ask if a control has an item named "REOPENED" + # without using exceptions for control flow. + possible_bug_statuses = map(lambda item: item.name, bug_status.items) + if "REOPENED" in possible_bug_statuses: + bug_status.value = ["REOPENED"] + # If the bug was never confirmed it will not have a "REOPENED" + # state, but only an "UNCONFIRMED" state. + elif "UNCONFIRMED" in possible_bug_statuses: + bug_status.value = ["UNCONFIRMED"] + else: + # FIXME: This logic is slightly backwards. We won't print this + # message if the bug is already open with state "UNCONFIRMED". + log("Did not reopen bug %s, it appears to already be open with status %s." % (bug_id, bug_status.value)) + self.browser['comment'] = comment_text + self.browser.submit() diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py new file mode 100644 index 0000000..1d08ca5 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py @@ -0,0 +1,392 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest +import datetime +import StringIO + +from .bugzilla import Bugzilla, BugzillaQueries, parse_bug_id + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.mocktool import MockBrowser +from webkitpy.thirdparty.mock import Mock +from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup + + +class BugzillaTest(unittest.TestCase): + _example_attachment = ''' + <attachment + isobsolete="1" + ispatch="1" + isprivate="0" + > + <attachid>33721</attachid> + <date>2009-07-29 10:23 PDT</date> + <desc>Fixed whitespace issue</desc> + <filename>patch</filename> + <type>text/plain</type> + <size>9719</size> + <attacher>christian.plesner.hansen@gmail.com</attacher> + <flag name="review" + id="17931" + status="+" + setter="one@test.com" + /> + <flag name="commit-queue" + id="17932" + status="+" + setter="two@test.com" + /> + </attachment> +''' + _expected_example_attachment_parsing = { + 'attach_date': datetime.datetime(2009, 07, 29, 10, 23), + 'bug_id' : 100, + 'is_obsolete' : True, + 'is_patch' : True, + 'id' : 33721, + 'url' : "https://bugs.webkit.org/attachment.cgi?id=33721", + 'name' : "Fixed whitespace issue", + 'type' : "text/plain", + 'review' : '+', + 'reviewer_email' : 'one@test.com', + 'commit-queue' : '+', + 'committer_email' : 'two@test.com', + 'attacher_email' : 'christian.plesner.hansen@gmail.com', + } + + def test_url_creation(self): + # FIXME: These would be all better as doctests + bugs = Bugzilla() + self.assertEquals(None, bugs.bug_url_for_bug_id(None)) + self.assertEquals(None, bugs.short_bug_url_for_bug_id(None)) + self.assertEquals(None, bugs.attachment_url_for_id(None)) + + def test_parse_bug_id(self): + # FIXME: These would be all better as doctests + bugs = Bugzilla() + self.assertEquals(12345, parse_bug_id("http://webkit.org/b/12345")) + self.assertEquals(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345")) + self.assertEquals(12345, parse_bug_id(bugs.short_bug_url_for_bug_id(12345))) + self.assertEquals(12345, parse_bug_id(bugs.bug_url_for_bug_id(12345))) + self.assertEquals(12345, parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True))) + + # Our bug parser is super-fragile, but at least we're testing it. + self.assertEquals(None, parse_bug_id("http://www.webkit.org/b/12345")) + self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345")) + + _bug_xml = """ + <bug> + <bug_id>32585</bug_id> + <creation_ts>2009-12-15 15:17 PST</creation_ts> + <short_desc>bug to test webkit-patch and commit-queue failures</short_desc> + <delta_ts>2009-12-27 21:04:50 PST</delta_ts> + <reporter_accessible>1</reporter_accessible> + <cclist_accessible>1</cclist_accessible> + <classification_id>1</classification_id> + <classification>Unclassified</classification> + <product>WebKit</product> + <component>Tools / Tests</component> + <version>528+ (Nightly build)</version> + <rep_platform>PC</rep_platform> + <op_sys>Mac OS X 10.5</op_sys> + <bug_status>NEW</bug_status> + <priority>P2</priority> + <bug_severity>Normal</bug_severity> + <target_milestone>---</target_milestone> + <everconfirmed>1</everconfirmed> + <reporter name="Eric Seidel">eric@webkit.org</reporter> + <assigned_to name="Nobody">webkit-unassigned@lists.webkit.org</assigned_to> + <cc>foo@bar.com</cc> + <cc>example@example.com</cc> + <long_desc isprivate="0"> + <who name="Eric Seidel">eric@webkit.org</who> + <bug_when>2009-12-15 15:17:28 PST</bug_when> + <thetext>bug to test webkit-patch and commit-queue failures + +Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.</thetext> + </long_desc> + <attachment + isobsolete="0" + ispatch="1" + isprivate="0" + > + <attachid>45548</attachid> + <date>2009-12-27 23:51 PST</date> + <desc>Patch</desc> + <filename>bug-32585-20091228005112.patch</filename> + <type>text/plain</type> + <size>10882</size> + <attacher>mjs@apple.com</attacher> + + <token>1261988248-dc51409e9c421a4358f365fa8bec8357</token> + <data encoding="base64">SW5kZXg6IFdlYktpdC9tYWMvQ2hhbmdlTG9nCj09PT09PT09PT09PT09PT09PT09PT09PT09PT09 +removed-because-it-was-really-long +ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg== +</data> + + <flag name="review" + id="27602" + status="?" + setter="mjs@apple.com" + /> + </attachment> + </bug> +""" + + _single_bug_xml = """ +<?xml version="1.0" encoding="UTF-8" standalone="yes" ?> +<!DOCTYPE bugzilla SYSTEM "https://bugs.webkit.org/bugzilla.dtd"> +<bugzilla version="3.2.3" + urlbase="https://bugs.webkit.org/" + maintainer="admin@webkit.org" + exporter="eric@webkit.org" +> +%s +</bugzilla> +""" % _bug_xml + + _expected_example_bug_parsing = { + "id" : 32585, + "title" : u"bug to test webkit-patch and commit-queue failures", + "cc_emails" : ["foo@bar.com", "example@example.com"], + "reporter_email" : "eric@webkit.org", + "assigned_to_email" : "webkit-unassigned@lists.webkit.org", + "bug_status": "NEW", + "attachments" : [{ + "attach_date": datetime.datetime(2009, 12, 27, 23, 51), + 'name': u'Patch', + 'url' : "https://bugs.webkit.org/attachment.cgi?id=45548", + 'is_obsolete': False, + 'review': '?', + 'is_patch': True, + 'attacher_email': 'mjs@apple.com', + 'bug_id': 32585, + 'type': 'text/plain', + 'id': 45548 + }], + } + + # FIXME: This should move to a central location and be shared by more unit tests. + def _assert_dictionaries_equal(self, actual, expected): + # Make sure we aren't parsing more or less than we expect + self.assertEquals(sorted(actual.keys()), sorted(expected.keys())) + + for key, expected_value in expected.items(): + self.assertEquals(actual[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, actual[key], expected_value))) + + def test_parse_bug_dictionary_from_xml(self): + bug = Bugzilla()._parse_bug_dictionary_from_xml(self._single_bug_xml) + self._assert_dictionaries_equal(bug, self._expected_example_bug_parsing) + + _sample_multi_bug_xml = """ +<bugzilla version="3.2.3" urlbase="https://bugs.webkit.org/" maintainer="admin@webkit.org" exporter="eric@webkit.org"> + %s + %s +</bugzilla> +""" % (_bug_xml, _bug_xml) + + def test_parse_bugs_from_xml(self): + bugzilla = Bugzilla() + bugs = bugzilla._parse_bugs_from_xml(self._sample_multi_bug_xml) + self.assertEquals(len(bugs), 2) + self.assertEquals(bugs[0].id(), self._expected_example_bug_parsing['id']) + bugs = bugzilla._parse_bugs_from_xml("") + self.assertEquals(len(bugs), 0) + + # This could be combined into test_bug_parsing later if desired. + def test_attachment_parsing(self): + bugzilla = Bugzilla() + soup = BeautifulSoup(self._example_attachment) + attachment_element = soup.find("attachment") + attachment = bugzilla._parse_attachment_element(attachment_element, self._expected_example_attachment_parsing['bug_id']) + self.assertTrue(attachment) + self._assert_dictionaries_equal(attachment, self._expected_example_attachment_parsing) + + _sample_attachment_detail_page = """ +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" + "http://www.w3.org/TR/html4/loose.dtd"> +<html> + <head> + <title> + Attachment 41073 Details for Bug 27314</title> +<link rel="Top" href="https://bugs.webkit.org/"> + <link rel="Up" href="show_bug.cgi?id=27314"> +""" + + def test_attachment_detail_bug_parsing(self): + bugzilla = Bugzilla() + self.assertEquals(27314, bugzilla._parse_bug_id_from_attachment_page(self._sample_attachment_detail_page)) + + def test_add_cc_to_bug(self): + bugzilla = Bugzilla() + bugzilla.browser = MockBrowser() + bugzilla.authenticate = lambda: None + expected_stderr = "Adding ['adam@example.com'] to the CC list for bug 42\n" + OutputCapture().assert_outputs(self, bugzilla.add_cc_to_bug, [42, ["adam@example.com"]], expected_stderr=expected_stderr) + + def _mock_control_item(self, name): + mock_item = Mock() + mock_item.name = name + return mock_item + + def _mock_find_control(self, item_names=[], selected_index=0): + mock_control = Mock() + mock_control.items = [self._mock_control_item(name) for name in item_names] + mock_control.value = [item_names[selected_index]] if item_names else None + return lambda name, type: mock_control + + def _assert_reopen(self, item_names=None, selected_index=None, extra_stderr=None): + bugzilla = Bugzilla() + bugzilla.browser = MockBrowser() + bugzilla.authenticate = lambda: None + + mock_find_control = self._mock_find_control(item_names, selected_index) + bugzilla.browser.find_control = mock_find_control + expected_stderr = "Re-opening bug 42\n['comment']\n" + if extra_stderr: + expected_stderr += extra_stderr + OutputCapture().assert_outputs(self, bugzilla.reopen_bug, [42, ["comment"]], expected_stderr=expected_stderr) + + def test_reopen_bug(self): + self._assert_reopen(item_names=["REOPENED", "RESOLVED", "CLOSED"], selected_index=1) + self._assert_reopen(item_names=["UNCONFIRMED", "RESOLVED", "CLOSED"], selected_index=1) + extra_stderr = "Did not reopen bug 42, it appears to already be open with status ['NEW'].\n" + self._assert_reopen(item_names=["NEW", "RESOLVED"], selected_index=0, extra_stderr=extra_stderr) + + def test_file_object_for_upload(self): + bugzilla = Bugzilla() + file_object = StringIO.StringIO() + unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!" + utf8_tor = unicode_tor.encode("utf-8") + self.assertEqual(bugzilla._file_object_for_upload(file_object), file_object) + self.assertEqual(bugzilla._file_object_for_upload(utf8_tor).read(), utf8_tor) + self.assertEqual(bugzilla._file_object_for_upload(unicode_tor).read(), utf8_tor) + + def test_filename_for_upload(self): + bugzilla = Bugzilla() + mock_file = Mock() + mock_file.name = "foo" + self.assertEqual(bugzilla._filename_for_upload(mock_file, 1234), 'foo') + mock_timestamp = lambda: "now" + filename = bugzilla._filename_for_upload(StringIO.StringIO(), 1234, extension="patch", timestamp=mock_timestamp) + self.assertEqual(filename, "bug-1234-now.patch") + + +class BugzillaQueriesTest(unittest.TestCase): + _sample_request_page = """ +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" + "http://www.w3.org/TR/html4/loose.dtd"> +<html> + <head> + <title>Request Queue</title> + </head> +<body> + +<h3>Flag: review</h3> + <table class="requests" cellspacing="0" cellpadding="4" border="1"> + <tr> + <th>Requester</th> + <th>Requestee</th> + <th>Bug</th> + <th>Attachment</th> + <th>Created</th> + </tr> + <tr> + <td>Shinichiro Hamaji <hamaji@chromium.org></td> + <td></td> + <td><a href="show_bug.cgi?id=30015">30015: text-transform:capitalize is failing in CSS2.1 test suite</a></td> + <td><a href="attachment.cgi?id=40511&action=review"> +40511: Patch v0</a></td> + <td>2009-10-02 04:58 PST</td> + </tr> + <tr> + <td>Zan Dobersek <zandobersek@gmail.com></td> + <td></td> + <td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td> + <td><a href="attachment.cgi?id=40722&action=review"> +40722: Media controls, the simple approach</a></td> + <td>2009-10-06 09:13 PST</td> + </tr> + <tr> + <td>Zan Dobersek <zandobersek@gmail.com></td> + <td></td> + <td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td> + <td><a href="attachment.cgi?id=40723&action=review"> +40723: Adjust the media slider thumb size</a></td> + <td>2009-10-06 09:15 PST</td> + </tr> + </table> +</body> +</html> +""" + _sample_quip_page = u""" +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" + "http://www.w3.org/TR/html4/loose.dtd"> +<html> + <head> + <title>Bugzilla Quip System</title> + </head> + <body> + <h2> + + Existing quips: + </h2> + <ul> + <li>Everything should be made as simple as possible, but not simpler. - Albert Einstein</li> + <li>Good artists copy. Great artists steal. - Pablo Picasso</li> + <li>\u00e7gua mole em pedra dura, tanto bate at\u008e que fura.</li> + + </ul> + </body> +</html> +""" + + def _assert_result_count(self, queries, html, count): + self.assertEquals(queries._parse_result_count(html), count) + + def test_parse_result_count(self): + queries = BugzillaQueries(None) + # Pages with results, always list the count at least twice. + self._assert_result_count(queries, '<span class="bz_result_count">314 bugs found.</span><span class="bz_result_count">314 bugs found.</span>', 314) + self._assert_result_count(queries, '<span class="bz_result_count">Zarro Boogs found.</span>', 0) + self._assert_result_count(queries, '<span class="bz_result_count">\n \nOne bug found.</span>', 1) + self.assertRaises(Exception, queries._parse_result_count, ['Invalid']) + + def test_request_page_parsing(self): + queries = BugzillaQueries(None) + self.assertEquals([40511, 40722, 40723], queries._parse_attachment_ids_request_query(self._sample_request_page)) + + def test_quip_page_parsing(self): + queries = BugzillaQueries(None) + expected_quips = ["Everything should be made as simple as possible, but not simpler. - Albert Einstein", "Good artists copy. Great artists steal. - Pablo Picasso", u"\u00e7gua mole em pedra dura, tanto bate at\u008e que fura."] + self.assertEquals(expected_quips, queries._parse_quips(self._sample_quip_page)) + + def test_load_query(self): + queries = BugzillaQueries(Mock()) + queries._load_query("request.cgi?action=queue&type=review&group=type") diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/__init__.py b/Tools/Scripts/webkitpy/common/net/buildbot/__init__.py new file mode 100644 index 0000000..631ef6b --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/buildbot/__init__.py @@ -0,0 +1,5 @@ +# Required for Python to search this directory for module files + +# We only export public API here. +# It's unclear if Builder and Build need to be public. +from .buildbot import BuildBot, Builder, Build diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py new file mode 100644 index 0000000..3cb6da5 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py @@ -0,0 +1,463 @@ +# Copyright (c) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# WebKit's Python module for interacting with WebKit's buildbot + +try: + import json +except ImportError: + # python 2.5 compatibility + import webkitpy.thirdparty.simplejson as json + +import operator +import re +import urllib +import urllib2 + +from webkitpy.common.net.failuremap import FailureMap +from webkitpy.common.net.layouttestresults import LayoutTestResults +from webkitpy.common.net.regressionwindow import RegressionWindow +from webkitpy.common.system.logutils import get_logger +from webkitpy.thirdparty.autoinstalled.mechanize import Browser +from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup + +_log = get_logger(__file__) + + +class Builder(object): + def __init__(self, name, buildbot): + self._name = name + self._buildbot = buildbot + self._builds_cache = {} + self._revision_to_build_number = None + self._browser = Browser() + self._browser.set_handle_robots(False) # The builder pages are excluded by robots.txt + + def name(self): + return self._name + + def results_url(self): + return "http://%s/results/%s" % (self._buildbot.buildbot_host, self.url_encoded_name()) + + def url_encoded_name(self): + return urllib.quote(self._name) + + def url(self): + return "http://%s/builders/%s" % (self._buildbot.buildbot_host, self.url_encoded_name()) + + # This provides a single place to mock + def _fetch_build(self, build_number): + build_dictionary = self._buildbot._fetch_build_dictionary(self, build_number) + if not build_dictionary: + return None + return Build(self, + build_number=int(build_dictionary['number']), + revision=int(build_dictionary['sourceStamp']['revision']), + is_green=(build_dictionary['results'] == 0) # Undocumented, 0 seems to mean "pass" + ) + + def build(self, build_number): + if not build_number: + return None + cached_build = self._builds_cache.get(build_number) + if cached_build: + return cached_build + + build = self._fetch_build(build_number) + self._builds_cache[build_number] = build + return build + + def force_build(self, username="webkit-patch", comments=None): + def predicate(form): + try: + return form.find_control("username") + except Exception, e: + return False + self._browser.open(self.url()) + self._browser.select_form(predicate=predicate) + self._browser["username"] = username + if comments: + self._browser["comments"] = comments + return self._browser.submit() + + file_name_regexp = re.compile(r"r(?P<revision>\d+) \((?P<build_number>\d+)\)") + def _revision_and_build_for_filename(self, filename): + # Example: "r47483 (1)/" or "r47483 (1).zip" + match = self.file_name_regexp.match(filename) + return (int(match.group("revision")), int(match.group("build_number"))) + + def _fetch_revision_to_build_map(self): + # All _fetch requests go through _buildbot for easier mocking + # FIXME: This should use NetworkTransaction's 404 handling instead. + try: + # FIXME: This method is horribly slow due to the huge network load. + # FIXME: This is a poor way to do revision -> build mapping. + # Better would be to ask buildbot through some sort of API. + print "Loading revision/build list from %s." % self.results_url() + print "This may take a while..." + result_files = self._buildbot._fetch_twisted_directory_listing(self.results_url()) + except urllib2.HTTPError, error: + if error.code != 404: + raise + result_files = [] + + # This assumes there was only one build per revision, which is false but we don't care for now. + return dict([self._revision_and_build_for_filename(file_info["filename"]) for file_info in result_files]) + + def _revision_to_build_map(self): + if not self._revision_to_build_number: + self._revision_to_build_number = self._fetch_revision_to_build_map() + return self._revision_to_build_number + + def revision_build_pairs_with_results(self): + return self._revision_to_build_map().items() + + # This assumes there can be only one build per revision, which is false, but we don't care for now. + def build_for_revision(self, revision, allow_failed_lookups=False): + # NOTE: This lookup will fail if that exact revision was never built. + build_number = self._revision_to_build_map().get(int(revision)) + if not build_number: + return None + build = self.build(build_number) + if not build and allow_failed_lookups: + # Builds for old revisions with fail to lookup via buildbot's json api. + build = Build(self, + build_number=build_number, + revision=revision, + is_green=False, + ) + return build + + def find_regression_window(self, red_build, look_back_limit=30): + if not red_build or red_build.is_green(): + return RegressionWindow(None, None) + common_failures = None + current_build = red_build + build_after_current_build = None + look_back_count = 0 + while current_build: + if current_build.is_green(): + # current_build can't possibly have any failures in common + # with red_build because it's green. + break + results = current_build.layout_test_results() + # We treat a lack of results as if all the test failed. + # This occurs, for example, when we can't compile at all. + if results: + failures = set(results.failing_tests()) + if common_failures == None: + common_failures = failures + else: + common_failures = common_failures.intersection(failures) + if not common_failures: + # current_build doesn't have any failures in common with + # the red build we're worried about. We assume that any + # failures in current_build were due to flakiness. + break + look_back_count += 1 + if look_back_count > look_back_limit: + return RegressionWindow(None, current_build, failing_tests=common_failures) + build_after_current_build = current_build + current_build = current_build.previous_build() + # We must iterate at least once because red_build is red. + assert(build_after_current_build) + # Current build must either be green or have no failures in common + # with red build, so we've found our failure transition. + return RegressionWindow(current_build, build_after_current_build, failing_tests=common_failures) + + def find_blameworthy_regression_window(self, red_build_number, look_back_limit=30, avoid_flakey_tests=True): + red_build = self.build(red_build_number) + regression_window = self.find_regression_window(red_build, look_back_limit) + if not regression_window.build_before_failure(): + return None # We ran off the limit of our search + # If avoid_flakey_tests, require at least 2 bad builds before we + # suspect a real failure transition. + if avoid_flakey_tests and regression_window.failing_build() == red_build: + return None + return regression_window + + +class Build(object): + def __init__(self, builder, build_number, revision, is_green): + self._builder = builder + self._number = build_number + self._revision = revision + self._is_green = is_green + self._layout_test_results = None + + @staticmethod + def build_url(builder, build_number): + return "%s/builds/%s" % (builder.url(), build_number) + + def url(self): + return self.build_url(self.builder(), self._number) + + def results_url(self): + results_directory = "r%s (%s)" % (self.revision(), self._number) + return "%s/%s" % (self._builder.results_url(), urllib.quote(results_directory)) + + def _fetch_results_html(self): + results_html = "%s/results.html" % (self.results_url()) + # FIXME: This should use NetworkTransaction's 404 handling instead. + try: + # It seems this can return None if the url redirects and then returns 404. + return urllib2.urlopen(results_html) + except urllib2.HTTPError, error: + if error.code != 404: + raise + + def layout_test_results(self): + if not self._layout_test_results: + # FIXME: This should cache that the result was a 404 and stop hitting the network. + self._layout_test_results = LayoutTestResults.results_from_string(self._fetch_results_html()) + return self._layout_test_results + + def builder(self): + return self._builder + + def revision(self): + return self._revision + + def is_green(self): + return self._is_green + + def previous_build(self): + # previous_build() allows callers to avoid assuming build numbers are sequential. + # They may not be sequential across all master changes, or when non-trunk builds are made. + return self._builder.build(self._number - 1) + + +class BuildBot(object): + # FIXME: This should move into some sort of webkit_config.py + default_host = "build.webkit.org" + + def __init__(self, host=default_host): + self.buildbot_host = host + self._builder_by_name = {} + + # If any core builder is red we should not be landing patches. Other + # builders should be added to this list once they are known to be + # reliable. + # See https://bugs.webkit.org/show_bug.cgi?id=33296 and related bugs. + self.core_builder_names_regexps = [ + "SnowLeopard.*Build", + "SnowLeopard.*\(Test", # Exclude WebKit2 for now. + "Leopard", + "Tiger", + "Windows.*Build", + "GTK.*32", + "GTK.*64.*Debug", # Disallow the 64-bit Release bot which is broken. + "Qt", + "Chromium.*Release$", + ] + + def _parse_last_build_cell(self, builder, cell): + status_link = cell.find('a') + if status_link: + # Will be either a revision number or a build number + revision_string = status_link.string + # If revision_string has non-digits assume it's not a revision number. + builder['built_revision'] = int(revision_string) \ + if not re.match('\D', revision_string) \ + else None + + # FIXME: We treat slave lost as green even though it is not to + # work around the Qts bot being on a broken internet connection. + # The real fix is https://bugs.webkit.org/show_bug.cgi?id=37099 + builder['is_green'] = not re.search('fail', cell.renderContents()) or \ + not not re.search('lost', cell.renderContents()) + + status_link_regexp = r"builders/(?P<builder_name>.*)/builds/(?P<build_number>\d+)" + link_match = re.match(status_link_regexp, status_link['href']) + builder['build_number'] = int(link_match.group("build_number")) + else: + # We failed to find a link in the first cell, just give up. This + # can happen if a builder is just-added, the first cell will just + # be "no build" + # Other parts of the code depend on is_green being present. + builder['is_green'] = False + builder['built_revision'] = None + builder['build_number'] = None + + def _parse_current_build_cell(self, builder, cell): + activity_lines = cell.renderContents().split("<br />") + builder["activity"] = activity_lines[0] # normally "building" or "idle" + # The middle lines document how long left for any current builds. + match = re.match("(?P<pending_builds>\d) pending", activity_lines[-1]) + builder["pending_builds"] = int(match.group("pending_builds")) if match else 0 + + def _parse_builder_status_from_row(self, status_row): + status_cells = status_row.findAll('td') + builder = {} + + # First cell is the name + name_link = status_cells[0].find('a') + builder["name"] = unicode(name_link.string) + + self._parse_last_build_cell(builder, status_cells[1]) + self._parse_current_build_cell(builder, status_cells[2]) + return builder + + def _matches_regexps(self, builder_name, name_regexps): + for name_regexp in name_regexps: + if re.match(name_regexp, builder_name): + return True + return False + + # FIXME: Should move onto Builder + def _is_core_builder(self, builder_name): + return self._matches_regexps(builder_name, self.core_builder_names_regexps) + + # FIXME: This method needs to die, but is used by a unit test at the moment. + def _builder_statuses_with_names_matching_regexps(self, builder_statuses, name_regexps): + return [builder for builder in builder_statuses if self._matches_regexps(builder["name"], name_regexps)] + + def red_core_builders(self): + return [builder for builder in self.core_builder_statuses() if not builder["is_green"]] + + def red_core_builders_names(self): + return [builder["name"] for builder in self.red_core_builders()] + + def idle_red_core_builders(self): + return [builder for builder in self.red_core_builders() if builder["activity"] == "idle"] + + def core_builders_are_green(self): + return not self.red_core_builders() + + # FIXME: These _fetch methods should move to a networking class. + def _fetch_build_dictionary(self, builder, build_number): + try: + base = "http://%s" % self.buildbot_host + path = urllib.quote("json/builders/%s/builds/%s" % (builder.name(), + build_number)) + url = "%s/%s" % (base, path) + jsondata = urllib2.urlopen(url) + return json.load(jsondata) + except urllib2.URLError, err: + build_url = Build.build_url(builder, build_number) + _log.error("Error fetching data for %s build %s (%s): %s" % (builder.name(), build_number, build_url, err)) + return None + except ValueError, err: + build_url = Build.build_url(builder, build_number) + _log.error("Error decoding json data from %s: %s" % (build_url, err)) + return None + + def _fetch_one_box_per_builder(self): + build_status_url = "http://%s/one_box_per_builder" % self.buildbot_host + return urllib2.urlopen(build_status_url) + + def _file_cell_text(self, file_cell): + """Traverses down through firstChild elements until one containing a string is found, then returns that string""" + element = file_cell + while element.string is None and element.contents: + element = element.contents[0] + return element.string + + def _parse_twisted_file_row(self, file_row): + string_or_empty = lambda string: unicode(string) if string else u"" + file_cells = file_row.findAll('td') + return { + "filename": string_or_empty(self._file_cell_text(file_cells[0])), + "size": string_or_empty(self._file_cell_text(file_cells[1])), + "type": string_or_empty(self._file_cell_text(file_cells[2])), + "encoding": string_or_empty(self._file_cell_text(file_cells[3])), + } + + def _parse_twisted_directory_listing(self, page): + soup = BeautifulSoup(page) + # HACK: Match only table rows with a class to ignore twisted header/footer rows. + file_rows = soup.find('table').findAll('tr', {'class': re.compile(r'\b(?:directory|file)\b')}) + return [self._parse_twisted_file_row(file_row) for file_row in file_rows] + + # FIXME: There should be a better way to get this information directly from twisted. + def _fetch_twisted_directory_listing(self, url): + return self._parse_twisted_directory_listing(urllib2.urlopen(url)) + + def builders(self): + return [self.builder_with_name(status["name"]) for status in self.builder_statuses()] + + # This method pulls from /one_box_per_builder as an efficient way to get information about + def builder_statuses(self): + soup = BeautifulSoup(self._fetch_one_box_per_builder()) + return [self._parse_builder_status_from_row(status_row) for status_row in soup.find('table').findAll('tr')] + + def core_builder_statuses(self): + return [builder for builder in self.builder_statuses() if self._is_core_builder(builder["name"])] + + def builder_with_name(self, name): + builder = self._builder_by_name.get(name) + if not builder: + builder = Builder(name, self) + self._builder_by_name[name] = builder + return builder + + def failure_map(self, only_core_builders=True): + builder_statuses = self.core_builder_statuses() if only_core_builders else self.builder_statuses() + failure_map = FailureMap() + revision_to_failing_bots = {} + for builder_status in builder_statuses: + if builder_status["is_green"]: + continue + builder = self.builder_with_name(builder_status["name"]) + regression_window = builder.find_blameworthy_regression_window(builder_status["build_number"]) + if regression_window: + failure_map.add_regression_window(builder, regression_window) + return failure_map + + # This makes fewer requests than calling Builder.latest_build would. It grabs all builder + # statuses in one request using self.builder_statuses (fetching /one_box_per_builder instead of builder pages). + def _latest_builds_from_builders(self, only_core_builders=True): + builder_statuses = self.core_builder_statuses() if only_core_builders else self.builder_statuses() + return [self.builder_with_name(status["name"]).build(status["build_number"]) for status in builder_statuses] + + def _build_at_or_before_revision(self, build, revision): + while build: + if build.revision() <= revision: + return build + build = build.previous_build() + + def last_green_revision(self, only_core_builders=True): + builds = self._latest_builds_from_builders(only_core_builders) + target_revision = builds[0].revision() + # An alternate way to do this would be to start at one revision and walk backwards + # checking builder.build_for_revision, however build_for_revision is very slow on first load. + while True: + # Make builds agree on revision + builds = [self._build_at_or_before_revision(build, target_revision) for build in builds] + if None in builds: # One of the builds failed to load from the server. + return None + min_revision = min(map(lambda build: build.revision(), builds)) + if min_revision != target_revision: + target_revision = min_revision + continue # Builds don't all agree on revision, keep searching + # Check to make sure they're all green + all_are_green = reduce(operator.and_, map(lambda build: build.is_green(), builds)) + if not all_are_green: + target_revision -= 1 + continue + return min_revision diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py new file mode 100644 index 0000000..a10e432 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py @@ -0,0 +1,413 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.net.layouttestresults import LayoutTestResults +from webkitpy.common.net.buildbot import BuildBot, Builder, Build +from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup + + +class BuilderTest(unittest.TestCase): + def _install_fetch_build(self, failure): + def _mock_fetch_build(build_number): + build = Build( + builder=self.builder, + build_number=build_number, + revision=build_number + 1000, + is_green=build_number < 4 + ) + parsed_results = {LayoutTestResults.fail_key: failure(build_number)} + build._layout_test_results = LayoutTestResults(parsed_results) + return build + self.builder._fetch_build = _mock_fetch_build + + def setUp(self): + self.buildbot = BuildBot() + self.builder = Builder(u"Test Builder \u2661", self.buildbot) + self._install_fetch_build(lambda build_number: ["test1", "test2"]) + + def test_find_regression_window(self): + regression_window = self.builder.find_regression_window(self.builder.build(10)) + self.assertEqual(regression_window.build_before_failure().revision(), 1003) + self.assertEqual(regression_window.failing_build().revision(), 1004) + + regression_window = self.builder.find_regression_window(self.builder.build(10), look_back_limit=2) + self.assertEqual(regression_window.build_before_failure(), None) + self.assertEqual(regression_window.failing_build().revision(), 1008) + + def test_none_build(self): + self.builder._fetch_build = lambda build_number: None + regression_window = self.builder.find_regression_window(self.builder.build(10)) + self.assertEqual(regression_window.build_before_failure(), None) + self.assertEqual(regression_window.failing_build(), None) + + def test_flaky_tests(self): + self._install_fetch_build(lambda build_number: ["test1"] if build_number % 2 else ["test2"]) + regression_window = self.builder.find_regression_window(self.builder.build(10)) + self.assertEqual(regression_window.build_before_failure().revision(), 1009) + self.assertEqual(regression_window.failing_build().revision(), 1010) + + def test_failure_and_flaky(self): + self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"]) + regression_window = self.builder.find_regression_window(self.builder.build(10)) + self.assertEqual(regression_window.build_before_failure().revision(), 1003) + self.assertEqual(regression_window.failing_build().revision(), 1004) + + def test_no_results(self): + self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number % 2 else ["test2"]) + regression_window = self.builder.find_regression_window(self.builder.build(10)) + self.assertEqual(regression_window.build_before_failure().revision(), 1003) + self.assertEqual(regression_window.failing_build().revision(), 1004) + + def test_failure_after_flaky(self): + self._install_fetch_build(lambda build_number: ["test1", "test2"] if build_number > 6 else ["test3"]) + regression_window = self.builder.find_regression_window(self.builder.build(10)) + self.assertEqual(regression_window.build_before_failure().revision(), 1006) + self.assertEqual(regression_window.failing_build().revision(), 1007) + + def test_find_blameworthy_regression_window(self): + self.assertEqual(self.builder.find_blameworthy_regression_window(10).revisions(), [1004]) + self.assertEqual(self.builder.find_blameworthy_regression_window(10, look_back_limit=2), None) + # Flakey test avoidance requires at least 2 red builds: + self.assertEqual(self.builder.find_blameworthy_regression_window(4), None) + self.assertEqual(self.builder.find_blameworthy_regression_window(4, avoid_flakey_tests=False).revisions(), [1004]) + # Green builder: + self.assertEqual(self.builder.find_blameworthy_regression_window(3), None) + + def test_build_caching(self): + self.assertEqual(self.builder.build(10), self.builder.build(10)) + + def test_build_and_revision_for_filename(self): + expectations = { + "r47483 (1)/" : (47483, 1), + "r47483 (1).zip" : (47483, 1), + } + for filename, revision_and_build in expectations.items(): + self.assertEqual(self.builder._revision_and_build_for_filename(filename), revision_and_build) + + +class BuildTest(unittest.TestCase): + def test_layout_test_results(self): + build = Build(None, None, None, None) + build._fetch_results_html = lambda: None + # Test that layout_test_results() returns None if the fetch fails. + self.assertEqual(build.layout_test_results(), None) + + +class BuildBotTest(unittest.TestCase): + + _example_one_box_status = ''' + <table> + <tr> + <td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td> + <td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td> + <td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td> + <tr> + <td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td> + <td class="LastBuild box" >no build</td> + <td align="center" class="Activity building">building<br />< 1 min</td> + <tr> + <td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td> + <td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td> + <td align="center" class="Activity idle">idle<br />3 pending</td> + <tr> + <td class="box"><a href="builders/Qt%20Windows%2032-bit%20Debug">Qt Windows 32-bit Debug</a></td> + <td align="center" class="LastBuild box failure"><a href="builders/Qt%20Windows%2032-bit%20Debug/builds/2090">60563</a><br />failed<br />failed<br />slave<br />lost</td> + <td align="center" class="Activity building">building<br />ETA in<br />~ 5 mins<br />at 08:25</td> + </table> +''' + _expected_example_one_box_parsings = [ + { + 'is_green': True, + 'build_number' : 3693, + 'name': u'Windows Debug (Tests)', + 'built_revision': 47380, + 'activity': 'building', + 'pending_builds': 0, + }, + { + 'is_green': False, + 'build_number' : None, + 'name': u'SnowLeopard Intel Release', + 'built_revision': None, + 'activity': 'building', + 'pending_builds': 0, + }, + { + 'is_green': False, + 'build_number' : 654, + 'name': u'Qt Linux Release', + 'built_revision': 47383, + 'activity': 'idle', + 'pending_builds': 3, + }, + { + 'is_green': True, + 'build_number' : 2090, + 'name': u'Qt Windows 32-bit Debug', + 'built_revision': 60563, + 'activity': 'building', + 'pending_builds': 0, + }, + ] + + def test_status_parsing(self): + buildbot = BuildBot() + + soup = BeautifulSoup(self._example_one_box_status) + status_table = soup.find("table") + input_rows = status_table.findAll('tr') + + for x in range(len(input_rows)): + status_row = input_rows[x] + expected_parsing = self._expected_example_one_box_parsings[x] + + builder = buildbot._parse_builder_status_from_row(status_row) + + # Make sure we aren't parsing more or less than we expect + self.assertEquals(builder.keys(), expected_parsing.keys()) + + for key, expected_value in expected_parsing.items(): + self.assertEquals(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value))) + + def test_core_builder_methods(self): + buildbot = BuildBot() + + # Override builder_statuses function to not touch the network. + def example_builder_statuses(): # We could use instancemethod() to bind 'self' but we don't need to. + return BuildBotTest._expected_example_one_box_parsings + buildbot.builder_statuses = example_builder_statuses + + buildbot.core_builder_names_regexps = [ 'Leopard', "Windows.*Build" ] + self.assertEquals(buildbot.red_core_builders_names(), []) + self.assertTrue(buildbot.core_builders_are_green()) + + buildbot.core_builder_names_regexps = [ 'SnowLeopard', 'Qt' ] + self.assertEquals(buildbot.red_core_builders_names(), [ u'SnowLeopard Intel Release', u'Qt Linux Release' ]) + self.assertFalse(buildbot.core_builders_are_green()) + + def test_builder_name_regexps(self): + buildbot = BuildBot() + + # For complete testing, this list should match the list of builders at build.webkit.org: + example_builders = [ + {'name': u'Tiger Intel Release', }, + {'name': u'Leopard Intel Release (Build)', }, + {'name': u'Leopard Intel Release (Tests)', }, + {'name': u'Leopard Intel Debug (Build)', }, + {'name': u'Leopard Intel Debug (Tests)', }, + {'name': u'SnowLeopard Intel Release (Build)', }, + {'name': u'SnowLeopard Intel Release (Tests)', }, + {'name': u'SnowLeopard Intel Release (WebKit2 Tests)', }, + {'name': u'SnowLeopard Intel Leaks', }, + {'name': u'Windows Release (Build)', }, + {'name': u'Windows Release (Tests)', }, + {'name': u'Windows Debug (Build)', }, + {'name': u'Windows Debug (Tests)', }, + {'name': u'GTK Linux 32-bit Release', }, + {'name': u'GTK Linux 32-bit Debug', }, + {'name': u'GTK Linux 64-bit Debug', }, + {'name': u'GTK Linux 64-bit Release', }, + {'name': u'Qt Linux Release', }, + {'name': u'Qt Linux Release minimal', }, + {'name': u'Qt Linux ARMv5 Release', }, + {'name': u'Qt Linux ARMv7 Release', }, + {'name': u'Qt Windows 32-bit Release', }, + {'name': u'Qt Windows 32-bit Debug', }, + {'name': u'Chromium Linux Release', }, + {'name': u'Chromium Mac Release', }, + {'name': u'Chromium Win Release', }, + {'name': u'Chromium Linux Release (Tests)', }, + {'name': u'Chromium Mac Release (Tests)', }, + {'name': u'Chromium Win Release (Tests)', }, + {'name': u'New run-webkit-tests', }, + ] + name_regexps = [ + "SnowLeopard.*Build", + "SnowLeopard.*\(Test", + "Leopard", + "Tiger", + "Windows.*Build", + "GTK.*32", + "GTK.*64.*Debug", # Disallow the 64-bit Release bot which is broken. + "Qt", + "Chromium.*Release$", + ] + expected_builders = [ + {'name': u'Tiger Intel Release', }, + {'name': u'Leopard Intel Release (Build)', }, + {'name': u'Leopard Intel Release (Tests)', }, + {'name': u'Leopard Intel Debug (Build)', }, + {'name': u'Leopard Intel Debug (Tests)', }, + {'name': u'SnowLeopard Intel Release (Build)', }, + {'name': u'SnowLeopard Intel Release (Tests)', }, + {'name': u'Windows Release (Build)', }, + {'name': u'Windows Debug (Build)', }, + {'name': u'GTK Linux 32-bit Release', }, + {'name': u'GTK Linux 32-bit Debug', }, + {'name': u'GTK Linux 64-bit Debug', }, + {'name': u'Qt Linux Release', }, + {'name': u'Qt Linux Release minimal', }, + {'name': u'Qt Linux ARMv5 Release', }, + {'name': u'Qt Linux ARMv7 Release', }, + {'name': u'Qt Windows 32-bit Release', }, + {'name': u'Qt Windows 32-bit Debug', }, + {'name': u'Chromium Linux Release', }, + {'name': u'Chromium Mac Release', }, + {'name': u'Chromium Win Release', }, + ] + + # This test should probably be updated if the default regexp list changes + self.assertEquals(buildbot.core_builder_names_regexps, name_regexps) + + builders = buildbot._builder_statuses_with_names_matching_regexps(example_builders, name_regexps) + self.assertEquals(builders, expected_builders) + + def test_builder_with_name(self): + buildbot = BuildBot() + + builder = buildbot.builder_with_name("Test Builder") + self.assertEqual(builder.name(), "Test Builder") + self.assertEqual(builder.url(), "http://build.webkit.org/builders/Test%20Builder") + self.assertEqual(builder.url_encoded_name(), "Test%20Builder") + self.assertEqual(builder.results_url(), "http://build.webkit.org/results/Test%20Builder") + + # Override _fetch_build_dictionary function to not touch the network. + def mock_fetch_build_dictionary(self, build_number): + build_dictionary = { + "sourceStamp": { + "revision" : 2 * build_number, + }, + "number" : int(build_number), + "results" : build_number % 2, # 0 means pass + } + return build_dictionary + buildbot._fetch_build_dictionary = mock_fetch_build_dictionary + + build = builder.build(10) + self.assertEqual(build.builder(), builder) + self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/10") + self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r20%20%2810%29") + self.assertEqual(build.revision(), 20) + self.assertEqual(build.is_green(), True) + + build = build.previous_build() + self.assertEqual(build.builder(), builder) + self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/9") + self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r18%20%289%29") + self.assertEqual(build.revision(), 18) + self.assertEqual(build.is_green(), False) + + self.assertEqual(builder.build(None), None) + + _example_directory_listing = ''' +<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1> + +<table> + <tr class="alt"> + <th>Filename</th> + <th>Size</th> + <th>Content type</th> + <th>Content encoding</th> + </tr> +<tr class="directory "> + <td><a href="r47483%20%281%29/"><b>r47483 (1)/</b></a></td> + <td><b></b></td> + <td><b>[Directory]</b></td> + <td><b></b></td> +</tr> +<tr class="file alt"> + <td><a href="r47484%20%282%29.zip">r47484 (2).zip</a></td> + <td>89K</td> + <td>[application/zip]</td> + <td></td> +</tr> +''' + _expected_files = [ + { + "filename" : "r47483 (1)/", + "size" : "", + "type" : "[Directory]", + "encoding" : "", + }, + { + "filename" : "r47484 (2).zip", + "size" : "89K", + "type" : "[application/zip]", + "encoding" : "", + }, + ] + + def test_parse_build_to_revision_map(self): + buildbot = BuildBot() + files = buildbot._parse_twisted_directory_listing(self._example_directory_listing) + self.assertEqual(self._expected_files, files) + + # Revision, is_green + # Ordered from newest (highest number) to oldest. + fake_builder1 = [ + [2, False], + [1, True], + ] + fake_builder2 = [ + [2, False], + [1, True], + ] + fake_builders = [ + fake_builder1, + fake_builder2, + ] + def _build_from_fake(self, fake_builder, index): + if index >= len(fake_builder): + return None + fake_build = fake_builder[index] + build = Build( + builder=fake_builder, + build_number=index, + revision=fake_build[0], + is_green=fake_build[1], + ) + def mock_previous_build(): + return self._build_from_fake(fake_builder, index + 1) + build.previous_build = mock_previous_build + return build + + def _fake_builds_at_index(self, index): + return [self._build_from_fake(builder, index) for builder in self.fake_builders] + + def test_last_green_revision(self): + buildbot = BuildBot() + def mock_builds_from_builders(only_core_builders): + return self._fake_builds_at_index(0) + buildbot._latest_builds_from_builders = mock_builds_from_builders + self.assertEqual(buildbot.last_green_revision(), 1) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/net/credentials.py b/Tools/Scripts/webkitpy/common/net/credentials.py new file mode 100644 index 0000000..30480b3 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/credentials.py @@ -0,0 +1,155 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Python module for reading stored web credentials from the OS. + +import getpass +import os +import platform +import re + +from webkitpy.common.checkout.scm import Git +from webkitpy.common.system.executive import Executive, ScriptError +from webkitpy.common.system.user import User +from webkitpy.common.system.deprecated_logging import log + +try: + # Use keyring, a cross platform keyring interface, as a fallback: + # http://pypi.python.org/pypi/keyring + import keyring +except ImportError: + keyring = None + + +class Credentials(object): + _environ_prefix = "webkit_bugzilla_" + + def __init__(self, host, git_prefix=None, executive=None, cwd=os.getcwd(), + keyring=keyring): + self.host = host + self.git_prefix = "%s." % git_prefix if git_prefix else "" + self.executive = executive or Executive() + self.cwd = cwd + self._keyring = keyring + + def _credentials_from_git(self): + try: + if not Git.in_working_directory(self.cwd): + return (None, None) + return (Git.read_git_config(self.git_prefix + "username"), + Git.read_git_config(self.git_prefix + "password")) + except OSError, e: + # Catch and ignore OSError exceptions such as "no such file + # or directory" (OSError errno 2), which imply that the Git + # command cannot be found/is not installed. + pass + return (None, None) + + def _keychain_value_with_label(self, label, source_text): + match = re.search("%s\"(?P<value>.+)\"" % label, + source_text, + re.MULTILINE) + if match: + return match.group('value') + + def _is_mac_os_x(self): + return platform.mac_ver()[0] + + def _parse_security_tool_output(self, security_output): + username = self._keychain_value_with_label("^\s*\"acct\"<blob>=", + security_output) + password = self._keychain_value_with_label("^password: ", + security_output) + return [username, password] + + def _run_security_tool(self, username=None): + security_command = [ + "/usr/bin/security", + "find-internet-password", + "-g", + "-s", + self.host, + ] + if username: + security_command += ["-a", username] + + log("Reading Keychain for %s account and password. " + "Click \"Allow\" to continue..." % self.host) + try: + return self.executive.run_command(security_command) + except ScriptError: + # Failed to either find a keychain entry or somekind of OS-related + # error occured (for instance, couldn't find the /usr/sbin/security + # command). + log("Could not find a keychain entry for %s." % self.host) + return None + + def _credentials_from_keychain(self, username=None): + if not self._is_mac_os_x(): + return [username, None] + + security_output = self._run_security_tool(username) + if security_output: + return self._parse_security_tool_output(security_output) + else: + return [None, None] + + def _read_environ(self, key): + environ_key = self._environ_prefix + key + return os.environ.get(environ_key.upper()) + + def _credentials_from_environment(self): + return (self._read_environ("username"), self._read_environ("password")) + + def _offer_to_store_credentials_in_keyring(self, username, password): + if not self._keyring: + return + if not User().confirm("Store password in system keyring?", User.DEFAULT_NO): + return + self._keyring.set_password(self.host, username, password) + + def read_credentials(self): + username, password = self._credentials_from_environment() + # FIXME: We don't currently support pulling the username from one + # source and the password from a separate source. + if not username or not password: + username, password = self._credentials_from_git() + if not username or not password: + username, password = self._credentials_from_keychain(username) + + if username and not password and self._keyring: + password = self._keyring.get_password(self.host, username) + + if not username: + username = User.prompt("%s login: " % self.host) + if not password: + password = getpass.getpass("%s password for %s: " % (self.host, username)) + self._offer_to_store_credentials_in_keyring(username, password) + + return (username, password) diff --git a/Tools/Scripts/webkitpy/common/net/credentials_unittest.py b/Tools/Scripts/webkitpy/common/net/credentials_unittest.py new file mode 100644 index 0000000..6f2d909 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/credentials_unittest.py @@ -0,0 +1,176 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import os +import tempfile +import unittest +from webkitpy.common.net.credentials import Credentials +from webkitpy.common.system.executive import Executive +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.thirdparty.mock import Mock + + +# FIXME: Other unit tests probably want this class. +class _TemporaryDirectory(object): + def __init__(self, **kwargs): + self._kwargs = kwargs + self._directory_path = None + + def __enter__(self): + self._directory_path = tempfile.mkdtemp(**self._kwargs) + return self._directory_path + + def __exit__(self, type, value, traceback): + os.rmdir(self._directory_path) + + +class CredentialsTest(unittest.TestCase): + example_security_output = """keychain: "/Users/test/Library/Keychains/login.keychain" +class: "inet" +attributes: + 0x00000007 <blob>="bugs.webkit.org (test@webkit.org)" + 0x00000008 <blob>=<NULL> + "acct"<blob>="test@webkit.org" + "atyp"<blob>="form" + "cdat"<timedate>=0x32303039303832353233353231365A00 "20090825235216Z\000" + "crtr"<uint32>=<NULL> + "cusi"<sint32>=<NULL> + "desc"<blob>="Web form password" + "icmt"<blob>="default" + "invi"<sint32>=<NULL> + "mdat"<timedate>=0x32303039303930393137323635315A00 "20090909172651Z\000" + "nega"<sint32>=<NULL> + "path"<blob>=<NULL> + "port"<uint32>=0x00000000 + "prot"<blob>=<NULL> + "ptcl"<uint32>="htps" + "scrp"<sint32>=<NULL> + "sdmn"<blob>=<NULL> + "srvr"<blob>="bugs.webkit.org" + "type"<uint32>=<NULL> +password: "SECRETSAUCE" +""" + + def test_keychain_lookup_on_non_mac(self): + class FakeCredentials(Credentials): + def _is_mac_os_x(self): + return False + credentials = FakeCredentials("bugs.webkit.org") + self.assertEqual(credentials._is_mac_os_x(), False) + self.assertEqual(credentials._credentials_from_keychain("foo"), ["foo", None]) + + def test_security_output_parse(self): + credentials = Credentials("bugs.webkit.org") + self.assertEqual(credentials._parse_security_tool_output(self.example_security_output), ["test@webkit.org", "SECRETSAUCE"]) + + def test_security_output_parse_entry_not_found(self): + credentials = Credentials("foo.example.com") + if not credentials._is_mac_os_x(): + return # This test does not run on a non-Mac. + + # Note, we ignore the captured output because it is already covered + # by the test case CredentialsTest._assert_security_call (below). + outputCapture = OutputCapture() + outputCapture.capture_output() + self.assertEqual(credentials._run_security_tool(), None) + outputCapture.restore_output() + + def _assert_security_call(self, username=None): + executive_mock = Mock() + credentials = Credentials("example.com", executive=executive_mock) + + expected_stderr = "Reading Keychain for example.com account and password. Click \"Allow\" to continue...\n" + OutputCapture().assert_outputs(self, credentials._run_security_tool, [username], expected_stderr=expected_stderr) + + security_args = ["/usr/bin/security", "find-internet-password", "-g", "-s", "example.com"] + if username: + security_args += ["-a", username] + executive_mock.run_command.assert_called_with(security_args) + + def test_security_calls(self): + self._assert_security_call() + self._assert_security_call(username="foo") + + def test_credentials_from_environment(self): + executive_mock = Mock() + credentials = Credentials("example.com", executive=executive_mock) + + saved_environ = os.environ.copy() + os.environ['WEBKIT_BUGZILLA_USERNAME'] = "foo" + os.environ['WEBKIT_BUGZILLA_PASSWORD'] = "bar" + username, password = credentials._credentials_from_environment() + self.assertEquals(username, "foo") + self.assertEquals(password, "bar") + os.environ = saved_environ + + def test_read_credentials_without_git_repo(self): + # FIXME: This should share more code with test_keyring_without_git_repo + class FakeCredentials(Credentials): + def _is_mac_os_x(self): + return True + + def _credentials_from_keychain(self, username): + return ("test@webkit.org", "SECRETSAUCE") + + def _credentials_from_environment(self): + return (None, None) + + with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path: + credentials = FakeCredentials("bugs.webkit.org", cwd=temp_dir_path) + # FIXME: Using read_credentials here seems too broad as higher-priority + # credential source could be affected by the user's environment. + self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "SECRETSAUCE")) + + + def test_keyring_without_git_repo(self): + # FIXME: This should share more code with test_read_credentials_without_git_repo + class MockKeyring(object): + def get_password(self, host, username): + return "NOMNOMNOM" + + class FakeCredentials(Credentials): + def _is_mac_os_x(self): + return True + + def _credentials_from_keychain(self, username): + return ("test@webkit.org", None) + + def _credentials_from_environment(self): + return (None, None) + + with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path: + credentials = FakeCredentials("fake.hostname", cwd=temp_dir_path, keyring=MockKeyring()) + # FIXME: Using read_credentials here seems too broad as higher-priority + # credential source could be affected by the user's environment. + self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "NOMNOMNOM")) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/net/failuremap.py b/Tools/Scripts/webkitpy/common/net/failuremap.py new file mode 100644 index 0000000..48cd3e6 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/failuremap.py @@ -0,0 +1,85 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +# FIXME: This probably belongs in the buildbot module. +class FailureMap(object): + def __init__(self): + self._failures = [] + + def add_regression_window(self, builder, regression_window): + self._failures.append({ + 'builder': builder, + 'regression_window': regression_window, + }) + + def is_empty(self): + return not self._failures + + def failing_revisions(self): + failing_revisions = [failure_info['regression_window'].revisions() + for failure_info in self._failures] + return sorted(set(sum(failing_revisions, []))) + + def builders_failing_for(self, revision): + return self._builders_failing_because_of([revision]) + + def tests_failing_for(self, revision): + tests = [failure_info['regression_window'].failing_tests() + for failure_info in self._failures + if revision in failure_info['regression_window'].revisions() + and failure_info['regression_window'].failing_tests()] + result = set() + for test in tests: + result = result.union(test) + return sorted(result) + + def _old_failures(self, is_old_failure): + return filter(lambda revision: is_old_failure(revision), + self.failing_revisions()) + + def _builders_failing_because_of(self, revisions): + revision_set = set(revisions) + return [failure_info['builder'] for failure_info in self._failures + if revision_set.intersection( + failure_info['regression_window'].revisions())] + + # FIXME: We should re-process old failures after some time delay. + # https://bugs.webkit.org/show_bug.cgi?id=36581 + def filter_out_old_failures(self, is_old_failure): + old_failures = self._old_failures(is_old_failure) + old_failing_builder_names = set([builder.name() + for builder in self._builders_failing_because_of(old_failures)]) + + # We filter out all the failing builders that could have been caused + # by old_failures. We could miss some new failures this way, but + # emperically, this reduces the amount of spam we generate. + failures = self._failures + self._failures = [failure_info for failure_info in failures + if failure_info['builder'].name() not in old_failing_builder_names] + self._cache = {} diff --git a/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py b/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py new file mode 100644 index 0000000..2f0b49d --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py @@ -0,0 +1,76 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.net.buildbot import Build +from webkitpy.common.net.failuremap import * +from webkitpy.common.net.regressionwindow import RegressionWindow +from webkitpy.tool.mocktool import MockBuilder + + +class FailureMapTest(unittest.TestCase): + builder1 = MockBuilder("Builder1") + builder2 = MockBuilder("Builder2") + + build1a = Build(builder1, build_number=22, revision=1233, is_green=True) + build1b = Build(builder1, build_number=23, revision=1234, is_green=False) + build2a = Build(builder2, build_number=89, revision=1233, is_green=True) + build2b = Build(builder2, build_number=90, revision=1235, is_green=False) + + regression_window1 = RegressionWindow(build1a, build1b, failing_tests=[u'test1', u'test1']) + regression_window2 = RegressionWindow(build2a, build2b, failing_tests=[u'test1']) + + def _make_failure_map(self): + failure_map = FailureMap() + failure_map.add_regression_window(self.builder1, self.regression_window1) + failure_map.add_regression_window(self.builder2, self.regression_window2) + return failure_map + + def test_failing_revisions(self): + failure_map = self._make_failure_map() + self.assertEquals(failure_map.failing_revisions(), [1234, 1235]) + + def test_new_failures(self): + failure_map = self._make_failure_map() + failure_map.filter_out_old_failures(lambda revision: False) + self.assertEquals(failure_map.failing_revisions(), [1234, 1235]) + + def test_new_failures_with_old_revisions(self): + failure_map = self._make_failure_map() + failure_map.filter_out_old_failures(lambda revision: revision == 1234) + self.assertEquals(failure_map.failing_revisions(), []) + + def test_new_failures_with_more_old_revisions(self): + failure_map = self._make_failure_map() + failure_map.filter_out_old_failures(lambda revision: revision == 1235) + self.assertEquals(failure_map.failing_revisions(), [1234]) + + def test_tests_failing_for(self): + failure_map = self._make_failure_map() + self.assertEquals(failure_map.tests_failing_for(1234), [u'test1']) diff --git a/Tools/Scripts/webkitpy/common/net/irc/__init__.py b/Tools/Scripts/webkitpy/common/net/irc/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/irc/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/common/net/irc/ircbot.py b/Tools/Scripts/webkitpy/common/net/irc/ircbot.py new file mode 100644 index 0000000..f742867 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/irc/ircbot.py @@ -0,0 +1,91 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import webkitpy.common.config.irc as config_irc + +from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate +from webkitpy.thirdparty.autoinstalled.irc import ircbot +from webkitpy.thirdparty.autoinstalled.irc import irclib + + +class IRCBotDelegate(object): + def irc_message_received(self, nick, message): + raise NotImplementedError, "subclasses must implement" + + def irc_nickname(self): + raise NotImplementedError, "subclasses must implement" + + def irc_password(self): + raise NotImplementedError, "subclasses must implement" + + +class IRCBot(ircbot.SingleServerIRCBot, MessagePumpDelegate): + # FIXME: We should get this information from a config file. + def __init__(self, + message_queue, + delegate): + self._message_queue = message_queue + self._delegate = delegate + ircbot.SingleServerIRCBot.__init__( + self, + [( + config_irc.server, + config_irc.port, + self._delegate.irc_password() + )], + self._delegate.irc_nickname(), + self._delegate.irc_nickname()) + self._channel = config_irc.channel + + # ircbot.SingleServerIRCBot methods + + def on_nicknameinuse(self, connection, event): + connection.nick(connection.get_nickname() + "_") + + def on_welcome(self, connection, event): + connection.join(self._channel) + self._message_pump = MessagePump(self, self._message_queue) + + def on_pubmsg(self, connection, event): + nick = irclib.nm_to_n(event.source()) + request = event.arguments()[0].split(":", 1) + if len(request) > 1 and irclib.irc_lower(request[0]) == irclib.irc_lower(self.connection.get_nickname()): + response = self._delegate.irc_message_received(nick, request[1]) + if response: + connection.privmsg(self._channel, response) + + # MessagePumpDelegate methods + + def schedule(self, interval, callback): + self.connection.execute_delayed(interval, callback) + + def message_available(self, message): + self.connection.privmsg(self._channel, message) + + def final_message_delivered(self): + self.die() diff --git a/Tools/Scripts/webkitpy/common/net/irc/ircproxy.py b/Tools/Scripts/webkitpy/common/net/irc/ircproxy.py new file mode 100644 index 0000000..13348b4 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/irc/ircproxy.py @@ -0,0 +1,62 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import threading + +from webkitpy.common.net.irc.ircbot import IRCBot +from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue +from webkitpy.common.system.deprecated_logging import log + + +class _IRCThread(threading.Thread): + def __init__(self, message_queue, irc_delegate, irc_bot): + threading.Thread.__init__(self) + self.setDaemon(True) + self._message_queue = message_queue + self._irc_delegate = irc_delegate + self._irc_bot = irc_bot + + def run(self): + bot = self._irc_bot(self._message_queue, self._irc_delegate) + bot.start() + + +class IRCProxy(object): + def __init__(self, irc_delegate, irc_bot=IRCBot): + log("Connecting to IRC") + self._message_queue = ThreadedMessageQueue() + self._child_thread = _IRCThread(self._message_queue, irc_delegate, irc_bot) + self._child_thread.start() + + def post(self, message): + self._message_queue.post(message) + + def disconnect(self): + log("Disconnecting from IRC...") + self._message_queue.stop() + self._child_thread.join() diff --git a/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py b/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py new file mode 100644 index 0000000..b44ce40 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py @@ -0,0 +1,43 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.net.irc.ircproxy import IRCProxy +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.thirdparty.mock import Mock + +class IRCProxyTest(unittest.TestCase): + def test_trivial(self): + def fun(): + proxy = IRCProxy(Mock(), Mock()) + proxy.post("hello") + proxy.disconnect() + + expected_stderr = "Connecting to IRC\nDisconnecting from IRC...\n" + OutputCapture().assert_outputs(self, fun, expected_stderr=expected_stderr) diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults.py b/Tools/Scripts/webkitpy/common/net/layouttestresults.py new file mode 100644 index 0000000..15e95ce --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/layouttestresults.py @@ -0,0 +1,92 @@ +# Copyright (c) 2010, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# A module for parsing results.html files generated by old-run-webkit-tests + +from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, SoupStrainer + + +# FIXME: This should be unified with all the layout test results code in the layout_tests package +# This doesn't belong in common.net, but we don't have a better place for it yet. +def path_for_layout_test(test_name): + return "LayoutTests/%s" % test_name + + +# FIXME: This should be unified with all the layout test results code in the layout_tests package +# This doesn't belong in common.net, but we don't have a better place for it yet. +class LayoutTestResults(object): + """This class knows how to parse old-run-webkit-tests results.html files.""" + + stderr_key = u'Tests that had stderr output:' + fail_key = u'Tests where results did not match expected results:' + timeout_key = u'Tests that timed out:' + crash_key = u'Tests that caused the DumpRenderTree tool to crash:' + missing_key = u'Tests that had no expected results (probably new):' + + expected_keys = [ + stderr_key, + fail_key, + crash_key, + timeout_key, + missing_key, + ] + + @classmethod + def _parse_results_html(cls, page): + if not page: + return None + parsed_results = {} + tables = BeautifulSoup(page).findAll("table") + for table in tables: + table_title = unicode(table.findPreviousSibling("p").string) + if table_title not in cls.expected_keys: + # This Exception should only ever be hit if run-webkit-tests changes its results.html format. + raise Exception("Unhandled title: %s" % table_title) + # We might want to translate table titles into identifiers before storing. + parsed_results[table_title] = [unicode(row.find("a").string) for row in table.findAll("tr")] + + return parsed_results + + @classmethod + def results_from_string(cls, string): + parsed_results = cls._parse_results_html(string) + if not parsed_results: + return None + return cls(parsed_results) + + def __init__(self, parsed_results): + self._parsed_results = parsed_results + + def parsed_results(self): + return self._parsed_results + + def results_matching_keys(self, result_keys): + return sorted(sum([tests for key, tests in self._parsed_results.items() if key in result_keys], [])) + + def failing_tests(self): + return self.results_matching_keys([self.fail_key, self.crash_key, self.timeout_key]) diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py new file mode 100644 index 0000000..8490eae --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py @@ -0,0 +1,77 @@ +# Copyright (c) 2010, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.net.layouttestresults import LayoutTestResults + + +class LayoutTestResultsTest(unittest.TestCase): + _example_results_html = """ +<html> +<head> +<title>Layout Test Results</title> +</head> +<body> +<p>Tests that had stderr output:</p> +<table> +<tr> +<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/accessibility/aria-activedescendant-crash.html">accessibility/aria-activedescendant-crash.html</a></td> +<td><a href="accessibility/aria-activedescendant-crash-stderr.txt">stderr</a></td> +</tr> +<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/http/tests/security/canvas-remote-read-svg-image.html">http/tests/security/canvas-remote-read-svg-image.html</a></td> +<td><a href="http/tests/security/canvas-remote-read-svg-image-stderr.txt">stderr</a></td> +</tr> +</table><p>Tests that had no expected results (probably new):</p> +<table> +<tr> +<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/fast/repaint/no-caret-repaint-in-non-content-editable-element.html">fast/repaint/no-caret-repaint-in-non-content-editable-element.html</a></td> +<td><a href="fast/repaint/no-caret-repaint-in-non-content-editable-element-actual.txt">result</a></td> +</tr> +</table></body> +</html> +""" + + _expected_layout_test_results = { + 'Tests that had stderr output:': [ + 'accessibility/aria-activedescendant-crash.html', + ], + 'Tests that had no expected results (probably new):': [ + 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html', + ], + } + + def test_parse_layout_test_results(self): + results = LayoutTestResults._parse_results_html(self._example_results_html) + self.assertEqual(self._expected_layout_test_results, results) + + def test_results_from_string(self): + self.assertEqual(LayoutTestResults.results_from_string(None), None) + self.assertEqual(LayoutTestResults.results_from_string(""), None) + results = LayoutTestResults.results_from_string(self._example_results_html) + self.assertEqual(len(results.failing_tests()), 0) diff --git a/Tools/Scripts/webkitpy/common/net/networktransaction.py b/Tools/Scripts/webkitpy/common/net/networktransaction.py new file mode 100644 index 0000000..de19e94 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/networktransaction.py @@ -0,0 +1,72 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import time + +from webkitpy.thirdparty.autoinstalled import mechanize +from webkitpy.common.system.deprecated_logging import log + + +_log = logging.getLogger(__name__) + + +class NetworkTimeout(Exception): + pass + + +class NetworkTransaction(object): + def __init__(self, initial_backoff_seconds=10, grown_factor=1.5, timeout_seconds=(10 * 60), convert_404_to_None=False): + self._initial_backoff_seconds = initial_backoff_seconds + self._grown_factor = grown_factor + self._timeout_seconds = timeout_seconds + self._convert_404_to_None = convert_404_to_None + + def run(self, request): + self._total_sleep = 0 + self._backoff_seconds = self._initial_backoff_seconds + while True: + try: + return request() + # FIXME: We should catch urllib2.HTTPError here too. + except mechanize.HTTPError, e: + if self._convert_404_to_None and e.code == 404: + return None + self._check_for_timeout() + _log.warn("Received HTTP status %s from server. Retrying in " + "%s seconds..." % (e.code, self._backoff_seconds)) + self._sleep() + + def _check_for_timeout(self): + if self._total_sleep + self._backoff_seconds > self._timeout_seconds: + raise NetworkTimeout() + + def _sleep(self): + time.sleep(self._backoff_seconds) + self._total_sleep += self._backoff_seconds + self._backoff_seconds *= self._grown_factor diff --git a/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py b/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py new file mode 100644 index 0000000..49aaeed --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py @@ -0,0 +1,93 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout +from webkitpy.common.system.logtesting import LoggingTestCase +from webkitpy.thirdparty.autoinstalled.mechanize import HTTPError + + +class NetworkTransactionTest(LoggingTestCase): + exception = Exception("Test exception") + + def test_success(self): + transaction = NetworkTransaction() + self.assertEqual(transaction.run(lambda: 42), 42) + + def _raise_exception(self): + raise self.exception + + def test_exception(self): + transaction = NetworkTransaction() + did_process_exception = False + did_throw_exception = True + try: + transaction.run(lambda: self._raise_exception()) + did_throw_exception = False + except Exception, e: + did_process_exception = True + self.assertEqual(e, self.exception) + self.assertTrue(did_throw_exception) + self.assertTrue(did_process_exception) + + def _raise_500_error(self): + self._run_count += 1 + if self._run_count < 3: + raise HTTPError("http://example.com/", 500, "internal server error", None, None) + return 42 + + def _raise_404_error(self): + raise HTTPError("http://foo.com/", 404, "not found", None, None) + + def test_retry(self): + self._run_count = 0 + transaction = NetworkTransaction(initial_backoff_seconds=0) + self.assertEqual(transaction.run(lambda: self._raise_500_error()), 42) + self.assertEqual(self._run_count, 3) + self.assertLog(['WARNING: Received HTTP status 500 from server. ' + 'Retrying in 0 seconds...\n', + 'WARNING: Received HTTP status 500 from server. ' + 'Retrying in 0.0 seconds...\n']) + + def test_convert_404_to_None(self): + transaction = NetworkTransaction(convert_404_to_None=True) + self.assertEqual(transaction.run(lambda: self._raise_404_error()), None) + + def test_timeout(self): + self._run_count = 0 + transaction = NetworkTransaction(initial_backoff_seconds=60*60, timeout_seconds=60) + did_process_exception = False + did_throw_exception = True + try: + transaction.run(lambda: self._raise_500_error()) + did_throw_exception = False + except NetworkTimeout, e: + did_process_exception = True + self.assertTrue(did_throw_exception) + self.assertTrue(did_process_exception) diff --git a/Tools/Scripts/webkitpy/common/net/regressionwindow.py b/Tools/Scripts/webkitpy/common/net/regressionwindow.py new file mode 100644 index 0000000..3960ba2 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/regressionwindow.py @@ -0,0 +1,52 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +# FIXME: This probably belongs in the buildbot module. +class RegressionWindow(object): + def __init__(self, build_before_failure, failing_build, failing_tests=None): + self._build_before_failure = build_before_failure + self._failing_build = failing_build + self._failing_tests = failing_tests + self._revisions = None + + def build_before_failure(self): + return self._build_before_failure + + def failing_build(self): + return self._failing_build + + def failing_tests(self): + return self._failing_tests + + def revisions(self): + # Cache revisions to avoid excessive allocations. + if not self._revisions: + self._revisions = range(self._failing_build.revision(), self._build_before_failure.revision(), -1) + self._revisions.reverse() + return self._revisions diff --git a/Tools/Scripts/webkitpy/common/net/statusserver.py b/Tools/Scripts/webkitpy/common/net/statusserver.py new file mode 100644 index 0000000..64dd77b --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/statusserver.py @@ -0,0 +1,160 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.common.net.networktransaction import NetworkTransaction +from webkitpy.common.system.deprecated_logging import log +from webkitpy.thirdparty.autoinstalled.mechanize import Browser +from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup + +import logging +import urllib2 + + +_log = logging.getLogger("webkitpy.common.net.statusserver") + + +class StatusServer: + default_host = "queues.webkit.org" + + def __init__(self, host=default_host, browser=None, bot_id=None): + self.set_host(host) + self._browser = browser or Browser() + self.set_bot_id(bot_id) + + def set_host(self, host): + self.host = host + self.url = "http://%s" % self.host + + def set_bot_id(self, bot_id): + self.bot_id = bot_id + + def results_url_for_status(self, status_id): + return "%s/results/%s" % (self.url, status_id) + + def _add_patch(self, patch): + if not patch: + return + if patch.bug_id(): + self._browser["bug_id"] = unicode(patch.bug_id()) + if patch.id(): + self._browser["patch_id"] = unicode(patch.id()) + + def _add_results_file(self, results_file): + if not results_file: + return + self._browser.add_file(results_file, "text/plain", "results.txt", 'results_file') + + def _post_status_to_server(self, queue_name, status, patch, results_file): + if results_file: + # We might need to re-wind the file if we've already tried to post it. + results_file.seek(0) + + update_status_url = "%s/update-status" % self.url + self._browser.open(update_status_url) + self._browser.select_form(name="update_status") + self._browser["queue_name"] = queue_name + if self.bot_id: + self._browser["bot_id"] = self.bot_id + self._add_patch(patch) + self._browser["status"] = status + self._add_results_file(results_file) + return self._browser.submit().read() # This is the id of the newly created status object. + + def _post_svn_revision_to_server(self, svn_revision_number, broken_bot): + update_svn_revision_url = "%s/update-svn-revision" % self.url + self._browser.open(update_svn_revision_url) + self._browser.select_form(name="update_svn_revision") + self._browser["number"] = unicode(svn_revision_number) + self._browser["broken_bot"] = broken_bot + return self._browser.submit().read() + + def _post_work_items_to_server(self, queue_name, work_items): + update_work_items_url = "%s/update-work-items" % self.url + self._browser.open(update_work_items_url) + self._browser.select_form(name="update_work_items") + self._browser["queue_name"] = queue_name + work_items = map(unicode, work_items) # .join expects strings + self._browser["work_items"] = " ".join(work_items) + return self._browser.submit().read() + + def _post_work_item_to_ews(self, attachment_id): + submit_to_ews_url = "%s/submit-to-ews" % self.url + self._browser.open(submit_to_ews_url) + self._browser.select_form(name="submit_to_ews") + self._browser["attachment_id"] = unicode(attachment_id) + self._browser.submit() + + def submit_to_ews(self, attachment_id): + _log.info("Submitting attachment %s to EWS queues" % attachment_id) + return NetworkTransaction().run(lambda: self._post_work_item_to_ews(attachment_id)) + + def next_work_item(self, queue_name): + _log.debug("Fetching next work item for %s" % queue_name) + patch_status_url = "%s/next-patch/%s" % (self.url, queue_name) + return self._fetch_url(patch_status_url) + + def _post_release_work_item(self, queue_name, patch): + release_patch_url = "%s/release-patch" % (self.url) + self._browser.open(release_patch_url) + self._browser.select_form(name="release_patch") + self._browser["queue_name"] = queue_name + self._browser["attachment_id"] = unicode(patch.id()) + self._browser.submit() + + def release_work_item(self, queue_name, patch): + _log.info("Releasing work item %s from %s" % (patch.id(), queue_name)) + return NetworkTransaction(convert_404_to_None=True).run(lambda: self._post_release_work_item(queue_name, patch)) + + def update_work_items(self, queue_name, work_items): + _log.debug("Recording work items: %s for %s" % (work_items, queue_name)) + return NetworkTransaction().run(lambda: self._post_work_items_to_server(queue_name, work_items)) + + def update_status(self, queue_name, status, patch=None, results_file=None): + log(status) + return NetworkTransaction().run(lambda: self._post_status_to_server(queue_name, status, patch, results_file)) + + def update_svn_revision(self, svn_revision_number, broken_bot): + log("SVN revision: %s broke %s" % (svn_revision_number, broken_bot)) + return NetworkTransaction().run(lambda: self._post_svn_revision_to_server(svn_revision_number, broken_bot)) + + def _fetch_url(self, url): + # FIXME: This should use NetworkTransaction's 404 handling instead. + try: + return urllib2.urlopen(url).read() + except urllib2.HTTPError, e: + if e.code == 404: + return None + raise e + + def patch_status(self, queue_name, patch_id): + patch_status_url = "%s/patch-status/%s/%s" % (self.url, queue_name, patch_id) + return self._fetch_url(patch_status_url) + + def svn_revision(self, svn_revision_number): + svn_revision_url = "%s/svn-revision/%s" % (self.url, svn_revision_number) + return self._fetch_url(svn_revision_url) diff --git a/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py b/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py new file mode 100644 index 0000000..1169ba0 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py @@ -0,0 +1,43 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.net.statusserver import StatusServer +from webkitpy.common.system.outputcapture import OutputCaptureTestCaseBase +from webkitpy.tool.mocktool import MockBrowser + + +class StatusServerTest(OutputCaptureTestCaseBase): + def test_url_for_issue(self): + mock_browser = MockBrowser() + status_server = StatusServer(browser=mock_browser, bot_id='123') + status_server.update_status('queue name', 'the status') + self.assertEqual('queue name', mock_browser.params['queue_name']) + self.assertEqual('the status', mock_browser.params['status']) + self.assertEqual('123', mock_browser.params['bot_id']) diff --git a/Tools/Scripts/webkitpy/common/newstringio.py b/Tools/Scripts/webkitpy/common/newstringio.py new file mode 100644 index 0000000..f6d08ec --- /dev/null +++ b/Tools/Scripts/webkitpy/common/newstringio.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""'with'-compliant StringIO implementation.""" + +import StringIO + + +class StringIO(StringIO.StringIO): + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + pass diff --git a/Tools/Scripts/webkitpy/common/newstringio_unittest.py b/Tools/Scripts/webkitpy/common/newstringio_unittest.py new file mode 100644 index 0000000..5755c98 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/newstringio_unittest.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for newstringio module.""" + +from __future__ import with_statement + +import unittest + +import newstringio + + +class NewStringIOTest(unittest.TestCase): + def test_with(self): + with newstringio.StringIO("foo") as f: + contents = f.read() + self.assertEqual(contents, "foo") + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/prettypatch.py b/Tools/Scripts/webkitpy/common/prettypatch.py new file mode 100644 index 0000000..e8a913a --- /dev/null +++ b/Tools/Scripts/webkitpy/common/prettypatch.py @@ -0,0 +1,67 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import tempfile + + +class PrettyPatch(object): + # FIXME: PrettyPatch should not require checkout_root. + def __init__(self, executive, checkout_root): + self._executive = executive + self._checkout_root = checkout_root + + def pretty_diff_file(self, diff): + # Diffs can contain multiple text files of different encodings + # so we always deal with them as byte arrays, not unicode strings. + assert(isinstance(diff, str)) + pretty_diff = self.pretty_diff(diff) + diff_file = tempfile.NamedTemporaryFile(suffix=".html") + diff_file.write(pretty_diff) + diff_file.flush() + return diff_file + + def pretty_diff(self, diff): + # pretify.rb will hang forever if given no input. + # Avoid the hang by returning an empty string. + if not diff: + return "" + + pretty_patch_path = os.path.join(self._checkout_root, + "Websites", "bugs.webkit.org", + "PrettyPatch") + prettify_path = os.path.join(pretty_patch_path, "prettify.rb") + args = [ + "ruby", + "-I", + pretty_patch_path, + prettify_path, + ] + # PrettyPatch does not modify the encoding of the diff output + # so we can't expect it to be utf-8. + return self._executive.run_command(args, input=diff, decode_output=False) diff --git a/Tools/Scripts/webkitpy/common/prettypatch_unittest.py b/Tools/Scripts/webkitpy/common/prettypatch_unittest.py new file mode 100644 index 0000000..1307856 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/prettypatch_unittest.py @@ -0,0 +1,70 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os.path +import unittest + +from webkitpy.common.system.executive import Executive +from webkitpy.common.prettypatch import PrettyPatch + + +class PrettyPatchTest(unittest.TestCase): + + _diff_with_multiple_encodings = """ +Index: utf8_test +=================================================================== +--- utf8_test\t(revision 0) ++++ utf8_test\t(revision 0) +@@ -0,0 +1 @@ ++utf-8 test: \xc2\xa0 +Index: latin1_test +=================================================================== +--- latin1_test\t(revision 0) ++++ latin1_test\t(revision 0) +@@ -0,0 +1 @@ ++latin1 test: \xa0 +""" + + def _webkit_root(self): + webkitpy_common = os.path.dirname(__file__) + webkitpy = os.path.dirname(webkitpy_common) + scripts = os.path.dirname(webkitpy) + webkit_tools = os.path.dirname(scripts) + webkit_root = os.path.dirname(webkit_tools) + return webkit_root + + def test_pretty_diff_encodings(self): + pretty_patch = PrettyPatch(Executive(), self._webkit_root()) + pretty = pretty_patch.pretty_diff(self._diff_with_multiple_encodings) + self.assertTrue(pretty) # We got some output + self.assertTrue(isinstance(pretty, str)) # It's a byte array, not unicode + + def test_pretty_print_empty_string(self): + # Make sure that an empty diff does not hang the process. + pretty_patch = PrettyPatch(Executive(), self._webkit_root()) + self.assertEqual(pretty_patch.pretty_diff(""), "") diff --git a/Tools/Scripts/webkitpy/common/system/__init__.py b/Tools/Scripts/webkitpy/common/system/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/common/system/autoinstall.py b/Tools/Scripts/webkitpy/common/system/autoinstall.py new file mode 100755 index 0000000..9adab29 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/autoinstall.py @@ -0,0 +1,517 @@ +# Copyright (c) 2009, Daniel Krech All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of the Daniel Krech nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Support for automatically downloading Python packages from an URL.""" + + +from __future__ import with_statement + +import codecs +import logging +import new +import os +import shutil +import sys +import tarfile +import tempfile +import urllib +import urlparse +import zipfile +import zipimport + +_log = logging.getLogger(__name__) + + +class AutoInstaller(object): + + """Supports automatically installing Python packages from an URL. + + Supports uncompressed files, .tar.gz, and .zip formats. + + Basic usage: + + installer = AutoInstaller() + + installer.install(url="http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b", + url_subpath="pep8-0.5.0/pep8.py") + installer.install(url="http://pypi.python.org/packages/source/m/mechanize/mechanize-0.1.11.zip", + url_subpath="mechanize") + + """ + + def __init__(self, append_to_search_path=False, make_package=True, + target_dir=None, temp_dir=None): + """Create an AutoInstaller instance, and set up the target directory. + + Args: + append_to_search_path: A boolean value of whether to append the + target directory to the sys.path search path. + make_package: A boolean value of whether to make the target + directory a package. This adds an __init__.py file + to the target directory -- allowing packages and + modules within the target directory to be imported + explicitly using dotted module names. + target_dir: The directory path to which packages should be installed. + Defaults to a subdirectory of the folder containing + this module called "autoinstalled". + temp_dir: The directory path to use for any temporary files + generated while downloading, unzipping, and extracting + packages to install. Defaults to a standard temporary + location generated by the tempfile module. This + parameter should normally be used only for development + testing. + + """ + if target_dir is None: + this_dir = os.path.dirname(__file__) + target_dir = os.path.join(this_dir, "autoinstalled") + + # Ensure that the target directory exists. + self._set_up_target_dir(target_dir, append_to_search_path, make_package) + + self._target_dir = target_dir + self._temp_dir = temp_dir + + def _log_transfer(self, message, source, target, log_method=None): + """Log a debug message that involves a source and target.""" + if log_method is None: + log_method = _log.debug + + log_method("%s" % message) + log_method(' From: "%s"' % source) + log_method(' To: "%s"' % target) + + def _create_directory(self, path, name=None): + """Create a directory.""" + log = _log.debug + + name = name + " " if name is not None else "" + log('Creating %sdirectory...' % name) + log(' "%s"' % path) + + os.makedirs(path) + + def _write_file(self, path, text, encoding): + """Create a file at the given path with given text. + + This method overwrites any existing file. + + """ + _log.debug("Creating file...") + _log.debug(' "%s"' % path) + with codecs.open(path, "w", encoding) as file: + file.write(text) + + def _set_up_target_dir(self, target_dir, append_to_search_path, + make_package): + """Set up a target directory. + + Args: + target_dir: The path to the target directory to set up. + append_to_search_path: A boolean value of whether to append the + target directory to the sys.path search path. + make_package: A boolean value of whether to make the target + directory a package. This adds an __init__.py file + to the target directory -- allowing packages and + modules within the target directory to be imported + explicitly using dotted module names. + + """ + if not os.path.exists(target_dir): + self._create_directory(target_dir, "autoinstall target") + + if append_to_search_path: + sys.path.append(target_dir) + + if make_package: + init_path = os.path.join(target_dir, "__init__.py") + if not os.path.exists(init_path): + text = ("# This file is required for Python to search this " + "directory for modules.\n") + self._write_file(init_path, text, "ascii") + + def _create_scratch_directory_inner(self, prefix): + """Create a scratch directory without exception handling. + + Creates a scratch directory inside the AutoInstaller temp + directory self._temp_dir, or inside a platform-dependent temp + directory if self._temp_dir is None. Returns the path to the + created scratch directory. + + Raises: + OSError: [Errno 2] if the containing temp directory self._temp_dir + is not None and does not exist. + + """ + # The tempfile.mkdtemp() method function requires that the + # directory corresponding to the "dir" parameter already exist + # if it is not None. + scratch_dir = tempfile.mkdtemp(prefix=prefix, dir=self._temp_dir) + return scratch_dir + + def _create_scratch_directory(self, target_name): + """Create a temporary scratch directory, and return its path. + + The scratch directory is generated inside the temp directory + of this AutoInstaller instance. This method also creates the + temp directory if it does not already exist. + + """ + prefix = target_name + "_" + try: + scratch_dir = self._create_scratch_directory_inner(prefix) + except OSError: + # Handle case of containing temp directory not existing-- + # OSError: [Errno 2] No such file or directory:... + temp_dir = self._temp_dir + if temp_dir is None or os.path.exists(temp_dir): + raise + # Else try again after creating the temp directory. + self._create_directory(temp_dir, "autoinstall temp") + scratch_dir = self._create_scratch_directory_inner(prefix) + + return scratch_dir + + def _url_downloaded_path(self, target_name): + """Return the path to the file containing the URL downloaded.""" + filename = ".%s.url" % target_name + path = os.path.join(self._target_dir, filename) + return path + + def _is_downloaded(self, target_name, url): + """Return whether a package version has been downloaded.""" + version_path = self._url_downloaded_path(target_name) + + _log.debug('Checking %s URL downloaded...' % target_name) + _log.debug(' "%s"' % version_path) + + if not os.path.exists(version_path): + # Then no package version has been downloaded. + _log.debug("No URL file found.") + return False + + with codecs.open(version_path, "r", "utf-8") as file: + version = file.read() + + return version.strip() == url.strip() + + def _record_url_downloaded(self, target_name, url): + """Record the URL downloaded to a file.""" + version_path = self._url_downloaded_path(target_name) + _log.debug("Recording URL downloaded...") + _log.debug(' URL: "%s"' % url) + _log.debug(' To: "%s"' % version_path) + + self._write_file(version_path, url, "utf-8") + + def _extract_targz(self, path, scratch_dir): + # tarfile.extractall() extracts to a path without the + # trailing ".tar.gz". + target_basename = os.path.basename(path[:-len(".tar.gz")]) + target_path = os.path.join(scratch_dir, target_basename) + + self._log_transfer("Starting gunzip/extract...", path, target_path) + + try: + tar_file = tarfile.open(path) + except tarfile.ReadError, err: + # Append existing Error message to new Error. + message = ("Could not open tar file: %s\n" + " The file probably does not have the correct format.\n" + " --> Inner message: %s" + % (path, err)) + raise Exception(message) + + try: + # This is helpful for debugging purposes. + _log.debug("Listing tar file contents...") + for name in tar_file.getnames(): + _log.debug(' * "%s"' % name) + _log.debug("Extracting gzipped tar file...") + tar_file.extractall(target_path) + finally: + tar_file.close() + + return target_path + + # This is a replacement for ZipFile.extractall(), which is + # available in Python 2.6 but not in earlier versions. + def _extract_all(self, zip_file, target_dir): + self._log_transfer("Extracting zip file...", zip_file, target_dir) + + # This is helpful for debugging purposes. + _log.debug("Listing zip file contents...") + for name in zip_file.namelist(): + _log.debug(' * "%s"' % name) + + for name in zip_file.namelist(): + path = os.path.join(target_dir, name) + self._log_transfer("Extracting...", name, path) + + if not os.path.basename(path): + # Then the path ends in a slash, so it is a directory. + self._create_directory(path) + continue + # Otherwise, it is a file. + + try: + # We open this file w/o encoding, as we're reading/writing + # the raw byte-stream from the zip file. + outfile = open(path, 'wb') + except IOError, err: + # Not all zip files seem to list the directories explicitly, + # so try again after creating the containing directory. + _log.debug("Got IOError: retrying after creating directory...") + dir = os.path.dirname(path) + self._create_directory(dir) + outfile = open(path, 'wb') + + try: + outfile.write(zip_file.read(name)) + finally: + outfile.close() + + def _unzip(self, path, scratch_dir): + # zipfile.extractall() extracts to a path without the + # trailing ".zip". + target_basename = os.path.basename(path[:-len(".zip")]) + target_path = os.path.join(scratch_dir, target_basename) + + self._log_transfer("Starting unzip...", path, target_path) + + try: + zip_file = zipfile.ZipFile(path, "r") + except zipfile.BadZipfile, err: + message = ("Could not open zip file: %s\n" + " --> Inner message: %s" + % (path, err)) + raise Exception(message) + + try: + self._extract_all(zip_file, scratch_dir) + finally: + zip_file.close() + + return target_path + + def _prepare_package(self, path, scratch_dir): + """Prepare a package for use, if necessary, and return the new path. + + For example, this method unzips zipped files and extracts + tar files. + + Args: + path: The path to the downloaded URL contents. + scratch_dir: The scratch directory. Note that the scratch + directory contains the file designated by the + path parameter. + + """ + # FIXME: Add other natural extensions. + if path.endswith(".zip"): + new_path = self._unzip(path, scratch_dir) + elif path.endswith(".tar.gz"): + new_path = self._extract_targz(path, scratch_dir) + else: + # No preparation is needed. + new_path = path + + return new_path + + def _download_to_stream(self, url, stream): + """Download an URL to a stream, and return the number of bytes.""" + try: + netstream = urllib.urlopen(url) + except IOError, err: + # Append existing Error message to new Error. + message = ('Could not download Python modules from URL "%s".\n' + " Make sure you are connected to the internet.\n" + " You must be connected to the internet when " + "downloading needed modules for the first time.\n" + " --> Inner message: %s" + % (url, err)) + raise IOError(message) + code = 200 + if hasattr(netstream, "getcode"): + code = netstream.getcode() + if not 200 <= code < 300: + raise ValueError("HTTP Error code %s" % code) + + BUFSIZE = 2**13 # 8KB + bytes = 0 + while True: + data = netstream.read(BUFSIZE) + if not data: + break + stream.write(data) + bytes += len(data) + netstream.close() + return bytes + + def _download(self, url, scratch_dir): + """Download URL contents, and return the download path.""" + url_path = urlparse.urlsplit(url)[2] + url_path = os.path.normpath(url_path) # Removes trailing slash. + target_filename = os.path.basename(url_path) + target_path = os.path.join(scratch_dir, target_filename) + + self._log_transfer("Starting download...", url, target_path) + + with open(target_path, "wb") as stream: + bytes = self._download_to_stream(url, stream) + + _log.debug("Downloaded %s bytes." % bytes) + + return target_path + + def _install(self, scratch_dir, package_name, target_path, url, + url_subpath): + """Install a python package from an URL. + + This internal method overwrites the target path if the target + path already exists. + + """ + path = self._download(url=url, scratch_dir=scratch_dir) + path = self._prepare_package(path, scratch_dir) + + if url_subpath is None: + source_path = path + else: + source_path = os.path.join(path, url_subpath) + + if os.path.exists(target_path): + _log.debug('Refreshing install: deleting "%s".' % target_path) + if os.path.isdir(target_path): + shutil.rmtree(target_path) + else: + os.remove(target_path) + + self._log_transfer("Moving files into place...", source_path, target_path) + + # The shutil.move() command creates intermediate directories if they + # do not exist, but we do not rely on this behavior since we + # need to create the __init__.py file anyway. + shutil.move(source_path, target_path) + + self._record_url_downloaded(package_name, url) + + def install(self, url, should_refresh=False, target_name=None, + url_subpath=None): + """Install a python package from an URL. + + Args: + url: The URL from which to download the package. + + Optional Args: + should_refresh: A boolean value of whether the package should be + downloaded again if the package is already present. + target_name: The name of the folder or file in the autoinstaller + target directory at which the package should be + installed. Defaults to the base name of the + URL sub-path. This parameter must be provided if + the URL sub-path is not specified. + url_subpath: The relative path of the URL directory that should + be installed. Defaults to the full directory, or + the entire URL contents. + + """ + if target_name is None: + if not url_subpath: + raise ValueError('The "target_name" parameter must be ' + 'provided if the "url_subpath" parameter ' + "is not provided.") + # Remove any trailing slashes. + url_subpath = os.path.normpath(url_subpath) + target_name = os.path.basename(url_subpath) + + target_path = os.path.join(self._target_dir, target_name) + if not should_refresh and self._is_downloaded(target_name, url): + _log.debug('URL for %s already downloaded. Skipping...' + % target_name) + _log.debug(' "%s"' % url) + return + + self._log_transfer("Auto-installing package: %s" % target_name, + url, target_path, log_method=_log.info) + + # The scratch directory is where we will download and prepare + # files specific to this install until they are ready to move + # into place. + scratch_dir = self._create_scratch_directory(target_name) + + try: + self._install(package_name=target_name, + target_path=target_path, + scratch_dir=scratch_dir, + url=url, + url_subpath=url_subpath) + except Exception, err: + # Append existing Error message to new Error. + message = ("Error auto-installing the %s package to:\n" + ' "%s"\n' + " --> Inner message: %s" + % (target_name, target_path, err)) + raise Exception(message) + finally: + _log.debug('Cleaning up: deleting "%s".' % scratch_dir) + shutil.rmtree(scratch_dir) + _log.debug('Auto-installed %s to:' % target_name) + _log.debug(' "%s"' % target_path) + + +if __name__=="__main__": + + # Configure the autoinstall logger to log DEBUG messages for + # development testing purposes. + console = logging.StreamHandler() + + formatter = logging.Formatter('%(name)s: %(levelname)-8s %(message)s') + console.setFormatter(formatter) + _log.addHandler(console) + _log.setLevel(logging.DEBUG) + + # Use a more visible temp directory for debug purposes. + this_dir = os.path.dirname(__file__) + target_dir = os.path.join(this_dir, "autoinstalled") + temp_dir = os.path.join(target_dir, "Temp") + + installer = AutoInstaller(target_dir=target_dir, + temp_dir=temp_dir) + + installer.install(should_refresh=False, + target_name="pep8.py", + url="http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b", + url_subpath="pep8-0.5.0/pep8.py") + installer.install(should_refresh=False, + target_name="mechanize", + url="http://pypi.python.org/packages/source/m/mechanize/mechanize-0.1.11.zip", + url_subpath="mechanize") + diff --git a/Tools/Scripts/webkitpy/common/system/deprecated_logging.py b/Tools/Scripts/webkitpy/common/system/deprecated_logging.py new file mode 100644 index 0000000..9e6b529 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/deprecated_logging.py @@ -0,0 +1,91 @@ +# Copyright (c) 2009, Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# WebKit's Python module for logging +# This module is now deprecated in favor of python's built-in logging.py. + +import codecs +import os +import sys + + +def log(string): + print >> sys.stderr, string + + +def error(string): + log("ERROR: %s" % string) + exit(1) + + +# Simple class to split output between multiple destinations +class tee: + def __init__(self, *files): + self.files = files + + # Callers should pass an already encoded string for writing. + def write(self, bytes): + for file in self.files: + file.write(bytes) + + +class OutputTee: + def __init__(self): + self._original_stdout = None + self._original_stderr = None + self._files_for_output = [] + + def add_log(self, path): + log_file = self._open_log_file(path) + self._files_for_output.append(log_file) + self._tee_outputs_to_files(self._files_for_output) + return log_file + + def remove_log(self, log_file): + self._files_for_output.remove(log_file) + self._tee_outputs_to_files(self._files_for_output) + log_file.close() + + @staticmethod + def _open_log_file(log_path): + (log_directory, log_name) = os.path.split(log_path) + if log_directory and not os.path.exists(log_directory): + os.makedirs(log_directory) + return codecs.open(log_path, "a+", "utf-8") + + def _tee_outputs_to_files(self, files): + if not self._original_stdout: + self._original_stdout = sys.stdout + self._original_stderr = sys.stderr + if files and len(files): + sys.stdout = tee(self._original_stdout, *files) + sys.stderr = tee(self._original_stderr, *files) + else: + sys.stdout = self._original_stdout + sys.stderr = self._original_stderr diff --git a/Tools/Scripts/webkitpy/common/system/deprecated_logging_unittest.py b/Tools/Scripts/webkitpy/common/system/deprecated_logging_unittest.py new file mode 100644 index 0000000..3778162 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/deprecated_logging_unittest.py @@ -0,0 +1,60 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import StringIO +import tempfile +import unittest + +from webkitpy.common.system.executive import ScriptError +from webkitpy.common.system.deprecated_logging import * + +class LoggingTest(unittest.TestCase): + + def assert_log_equals(self, log_input, expected_output): + original_stderr = sys.stderr + test_stderr = StringIO.StringIO() + sys.stderr = test_stderr + + try: + log(log_input) + actual_output = test_stderr.getvalue() + finally: + sys.stderr = original_stderr + + self.assertEquals(actual_output, expected_output, "log(\"%s\") expected: %s actual: %s" % (log_input, expected_output, actual_output)) + + def test_log(self): + self.assert_log_equals("test", "test\n") + + # Test that log() does not throw an exception when passed an object instead of a string. + self.assert_log_equals(ScriptError(message="ScriptError"), "ScriptError\n") + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/system/executive.py b/Tools/Scripts/webkitpy/common/system/executive.py new file mode 100644 index 0000000..85a683a --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/executive.py @@ -0,0 +1,399 @@ +# Copyright (c) 2009, Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +try: + # This API exists only in Python 2.6 and higher. :( + import multiprocessing +except ImportError: + multiprocessing = None + +import ctypes +import errno +import logging +import os +import platform +import StringIO +import signal +import subprocess +import sys +import time + +from webkitpy.common.system.deprecated_logging import tee +from webkitpy.python24 import versioning + + +_log = logging.getLogger("webkitpy.common.system") + + +class ScriptError(Exception): + + def __init__(self, + message=None, + script_args=None, + exit_code=None, + output=None, + cwd=None): + if not message: + message = 'Failed to run "%s"' % script_args + if exit_code: + message += " exit_code: %d" % exit_code + if cwd: + message += " cwd: %s" % cwd + + Exception.__init__(self, message) + self.script_args = script_args # 'args' is already used by Exception + self.exit_code = exit_code + self.output = output + self.cwd = cwd + + def message_with_output(self, output_limit=500): + if self.output: + if output_limit and len(self.output) > output_limit: + return u"%s\nLast %s characters of output:\n%s" % \ + (self, output_limit, self.output[-output_limit:]) + return u"%s\n%s" % (self, self.output) + return unicode(self) + + def command_name(self): + command_path = self.script_args + if type(command_path) is list: + command_path = command_path[0] + return os.path.basename(command_path) + + +def run_command(*args, **kwargs): + # FIXME: This should not be a global static. + # New code should use Executive.run_command directly instead + return Executive().run_command(*args, **kwargs) + + +class Executive(object): + + def _should_close_fds(self): + # We need to pass close_fds=True to work around Python bug #2320 + # (otherwise we can hang when we kill DumpRenderTree when we are running + # multiple threads). See http://bugs.python.org/issue2320 . + # Note that close_fds isn't supported on Windows, but this bug only + # shows up on Mac and Linux. + return sys.platform not in ('win32', 'cygwin') + + def _run_command_with_teed_output(self, args, teed_output): + args = map(unicode, args) # Popen will throw an exception if args are non-strings (like int()) + args = map(self._encode_argument_if_needed, args) + + child_process = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + close_fds=self._should_close_fds()) + + # Use our own custom wait loop because Popen ignores a tee'd + # stderr/stdout. + # FIXME: This could be improved not to flatten output to stdout. + while True: + output_line = child_process.stdout.readline() + if output_line == "" and child_process.poll() != None: + # poll() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + return child_process.poll() + # We assume that the child process wrote to us in utf-8, + # so no re-encoding is necessary before writing here. + teed_output.write(output_line) + + # FIXME: Remove this deprecated method and move callers to run_command. + # FIXME: This method is a hack to allow running command which both + # capture their output and print out to stdin. Useful for things + # like "build-webkit" where we want to display to the user that we're building + # but still have the output to stuff into a log file. + def run_and_throw_if_fail(self, args, quiet=False, decode_output=True): + # Cache the child's output locally so it can be used for error reports. + child_out_file = StringIO.StringIO() + tee_stdout = sys.stdout + if quiet: + dev_null = open(os.devnull, "w") # FIXME: Does this need an encoding? + tee_stdout = dev_null + child_stdout = tee(child_out_file, tee_stdout) + exit_code = self._run_command_with_teed_output(args, child_stdout) + if quiet: + dev_null.close() + + child_output = child_out_file.getvalue() + child_out_file.close() + + if decode_output: + child_output = child_output.decode(self._child_process_encoding()) + + if exit_code: + raise ScriptError(script_args=args, + exit_code=exit_code, + output=child_output) + return child_output + + def cpu_count(self): + if multiprocessing: + return multiprocessing.cpu_count() + # Darn. We don't have the multiprocessing package. + system_name = platform.system() + if system_name == "Darwin": + return int(self.run_command(["sysctl", "-n", "hw.ncpu"])) + elif system_name == "Windows": + return int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) + elif system_name == "Linux": + num_cores = os.sysconf("SC_NPROCESSORS_ONLN") + if isinstance(num_cores, int) and num_cores > 0: + return num_cores + # This quantity is a lie but probably a reasonable guess for modern + # machines. + return 2 + + def kill_process(self, pid): + """Attempts to kill the given pid. + Will fail silently if pid does not exist or insufficient permisssions.""" + if sys.platform == "win32": + # We only use taskkill.exe on windows (not cygwin) because subprocess.pid + # is a CYGWIN pid and taskkill.exe expects a windows pid. + # Thankfully os.kill on CYGWIN handles either pid type. + command = ["taskkill.exe", "/f", "/pid", pid] + # taskkill will exit 128 if the process is not found. We should log. + self.run_command(command, error_handler=self.ignore_error) + return + + # According to http://docs.python.org/library/os.html + # os.kill isn't available on Windows. python 2.5.5 os.kill appears + # to work in cygwin, however it occasionally raises EAGAIN. + retries_left = 10 if sys.platform == "cygwin" else 1 + while retries_left > 0: + try: + retries_left -= 1 + os.kill(pid, signal.SIGKILL) + except OSError, e: + if e.errno == errno.EAGAIN: + if retries_left <= 0: + _log.warn("Failed to kill pid %s. Too many EAGAIN errors." % pid) + continue + if e.errno == errno.ESRCH: # The process does not exist. + _log.warn("Called kill_process with a non-existant pid %s" % pid) + return + raise + + def _win32_check_running_pid(self, pid): + + class PROCESSENTRY32(ctypes.Structure): + _fields_ = [("dwSize", ctypes.c_ulong), + ("cntUsage", ctypes.c_ulong), + ("th32ProcessID", ctypes.c_ulong), + ("th32DefaultHeapID", ctypes.c_ulong), + ("th32ModuleID", ctypes.c_ulong), + ("cntThreads", ctypes.c_ulong), + ("th32ParentProcessID", ctypes.c_ulong), + ("pcPriClassBase", ctypes.c_ulong), + ("dwFlags", ctypes.c_ulong), + ("szExeFile", ctypes.c_char * 260)] + + CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot + Process32First = ctypes.windll.kernel32.Process32First + Process32Next = ctypes.windll.kernel32.Process32Next + CloseHandle = ctypes.windll.kernel32.CloseHandle + TH32CS_SNAPPROCESS = 0x00000002 # win32 magic number + hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) + pe32 = PROCESSENTRY32() + pe32.dwSize = ctypes.sizeof(PROCESSENTRY32) + result = False + if not Process32First(hProcessSnap, ctypes.byref(pe32)): + _log.debug("Failed getting first process.") + CloseHandle(hProcessSnap) + return result + while True: + if pe32.th32ProcessID == pid: + result = True + break + if not Process32Next(hProcessSnap, ctypes.byref(pe32)): + break + CloseHandle(hProcessSnap) + return result + + def check_running_pid(self, pid): + """Return True if pid is alive, otherwise return False.""" + if sys.platform in ('darwin', 'linux2', 'cygwin'): + try: + os.kill(pid, 0) + return True + except OSError: + return False + elif sys.platform == 'win32': + return self._win32_check_running_pid(pid) + + assert(False) + + def _windows_image_name(self, process_name): + name, extension = os.path.splitext(process_name) + if not extension: + # taskkill expects processes to end in .exe + # If necessary we could add a flag to disable appending .exe. + process_name = "%s.exe" % name + return process_name + + def kill_all(self, process_name): + """Attempts to kill processes matching process_name. + Will fail silently if no process are found.""" + if sys.platform in ("win32", "cygwin"): + image_name = self._windows_image_name(process_name) + command = ["taskkill.exe", "/f", "/im", image_name] + # taskkill will exit 128 if the process is not found. We should log. + self.run_command(command, error_handler=self.ignore_error) + return + + # FIXME: This is inconsistent that kill_all uses TERM and kill_process + # uses KILL. Windows is always using /f (which seems like -KILL). + # We should pick one mode, or add support for switching between them. + # Note: Mac OS X 10.6 requires -SIGNALNAME before -u USER + command = ["killall", "-TERM", "-u", os.getenv("USER"), process_name] + # killall returns 1 if no process can be found and 2 on command error. + # FIXME: We should pass a custom error_handler to allow only exit_code 1. + # We should log in exit_code == 1 + self.run_command(command, error_handler=self.ignore_error) + + # Error handlers do not need to be static methods once all callers are + # updated to use an Executive object. + + @staticmethod + def default_error_handler(error): + raise error + + @staticmethod + def ignore_error(error): + pass + + def _compute_stdin(self, input): + """Returns (stdin, string_to_communicate)""" + # FIXME: We should be returning /dev/null for stdin + # or closing stdin after process creation to prevent + # child processes from getting input from the user. + if not input: + return (None, None) + if hasattr(input, "read"): # Check if the input is a file. + return (input, None) # Assume the file is in the right encoding. + + # Popen in Python 2.5 and before does not automatically encode unicode objects. + # http://bugs.python.org/issue5290 + # See https://bugs.webkit.org/show_bug.cgi?id=37528 + # for an example of a regresion caused by passing a unicode string directly. + # FIXME: We may need to encode differently on different platforms. + if isinstance(input, unicode): + input = input.encode(self._child_process_encoding()) + return (subprocess.PIPE, input) + + def _command_for_printing(self, args): + """Returns a print-ready string representing command args. + The string should be copy/paste ready for execution in a shell.""" + escaped_args = [] + for arg in args: + if isinstance(arg, unicode): + # Escape any non-ascii characters for easy copy/paste + arg = arg.encode("unicode_escape") + # FIXME: Do we need to fix quotes here? + escaped_args.append(arg) + return " ".join(escaped_args) + + # FIXME: run_and_throw_if_fail should be merged into this method. + def run_command(self, + args, + cwd=None, + input=None, + error_handler=None, + return_exit_code=False, + return_stderr=True, + decode_output=True): + """Popen wrapper for convenience and to work around python bugs.""" + assert(isinstance(args, list) or isinstance(args, tuple)) + start_time = time.time() + args = map(unicode, args) # Popen will throw an exception if args are non-strings (like int()) + args = map(self._encode_argument_if_needed, args) + + stdin, string_to_communicate = self._compute_stdin(input) + stderr = subprocess.STDOUT if return_stderr else None + + process = subprocess.Popen(args, + stdin=stdin, + stdout=subprocess.PIPE, + stderr=stderr, + cwd=cwd, + close_fds=self._should_close_fds()) + output = process.communicate(string_to_communicate)[0] + + # run_command automatically decodes to unicode() unless explicitly told not to. + if decode_output: + output = output.decode(self._child_process_encoding()) + + # wait() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + exit_code = process.wait() + + _log.debug('"%s" took %.2fs' % (self._command_for_printing(args), time.time() - start_time)) + + if return_exit_code: + return exit_code + + if exit_code: + script_error = ScriptError(script_args=args, + exit_code=exit_code, + output=output, + cwd=cwd) + (error_handler or self.default_error_handler)(script_error) + return output + + def _child_process_encoding(self): + # Win32 Python 2.x uses CreateProcessA rather than CreateProcessW + # to launch subprocesses, so we have to encode arguments using the + # current code page. + if sys.platform == 'win32' and versioning.compare_version(sys, '3.0')[0] < 0: + return 'mbcs' + # All other platforms use UTF-8. + # FIXME: Using UTF-8 on Cygwin will confuse Windows-native commands + # which will expect arguments to be encoded using the current code + # page. + return 'utf-8' + + def _should_encode_child_process_arguments(self): + # Cygwin's Python's os.execv doesn't support unicode command + # arguments, and neither does Cygwin's execv itself. + if sys.platform == 'cygwin': + return True + + # Win32 Python 2.x uses CreateProcessA rather than CreateProcessW + # to launch subprocesses, so we have to encode arguments using the + # current code page. + if sys.platform == 'win32' and versioning.compare_version(sys, '3.0')[0] < 0: + return True + + return False + + def _encode_argument_if_needed(self, argument): + if not self._should_encode_child_process_arguments(): + return argument + return argument.encode(self._child_process_encoding()) diff --git a/Tools/Scripts/webkitpy/common/system/executive_mock.py b/Tools/Scripts/webkitpy/common/system/executive_mock.py new file mode 100644 index 0000000..c1cf999 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/executive_mock.py @@ -0,0 +1,59 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# FIXME: Implement the rest of the interface as needed for testing :). + +# FIXME: Unify with tool/mocktool.MockExecutive. + + +class MockExecutive2(object): + def __init__(self, output='', exit_code=0, exception=None, + run_command_fn=None): + self._output = output + self._exit_code = exit_code + self._exception = exception + self._run_command_fn = run_command_fn + + def cpu_count(self): + return 2 + + def kill_all(self, process_name): + pass + + def kill_process(self, pid): + pass + + def run_command(self, arg_list, return_exit_code=False, + decode_output=False): + if self._exception: + raise self._exception + if return_exit_code: + return self._exit_code + if self._run_command_fn: + return self._run_command_fn(arg_list) + return self._output diff --git a/Tools/Scripts/webkitpy/common/system/executive_unittest.py b/Tools/Scripts/webkitpy/common/system/executive_unittest.py new file mode 100644 index 0000000..b8fd82e --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/executive_unittest.py @@ -0,0 +1,151 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2009 Daniel Bates (dbates@intudata.com). All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import signal +import subprocess +import sys +import unittest + +from webkitpy.common.system.executive import Executive, run_command, ScriptError +from webkitpy.test import cat, echo + + +def never_ending_command(): + """Arguments for a command that will never end (useful for testing process + killing). It should be a process that is unlikely to already be running + because all instances will be killed.""" + if sys.platform == 'win32': + return ['wmic'] + return ['yes'] + + +class ExecutiveTest(unittest.TestCase): + + def test_run_command_with_bad_command(self): + def run_bad_command(): + run_command(["foo_bar_command_blah"], error_handler=Executive.ignore_error, return_exit_code=True) + self.failUnlessRaises(OSError, run_bad_command) + + def test_run_command_args_type(self): + executive = Executive() + self.assertRaises(AssertionError, executive.run_command, "echo") + self.assertRaises(AssertionError, executive.run_command, u"echo") + executive.run_command(echo.command_arguments('foo')) + executive.run_command(tuple(echo.command_arguments('foo'))) + + def test_run_command_with_unicode(self): + """Validate that it is safe to pass unicode() objects + to Executive.run* methods, and they will return unicode() + objects by default unless decode_output=False""" + unicode_tor_input = u"WebKit \u2661 Tor Arne Vestb\u00F8!" + if sys.platform == 'win32': + encoding = 'mbcs' + else: + encoding = 'utf-8' + encoded_tor = unicode_tor_input.encode(encoding) + # On Windows, we expect the unicode->mbcs->unicode roundtrip to be + # lossy. On other platforms, we expect a lossless roundtrip. + if sys.platform == 'win32': + unicode_tor_output = encoded_tor.decode(encoding) + else: + unicode_tor_output = unicode_tor_input + + executive = Executive() + + output = executive.run_command(cat.command_arguments(), input=unicode_tor_input) + self.assertEquals(output, unicode_tor_output) + + output = executive.run_command(echo.command_arguments("-n", unicode_tor_input)) + self.assertEquals(output, unicode_tor_output) + + output = executive.run_command(echo.command_arguments("-n", unicode_tor_input), decode_output=False) + self.assertEquals(output, encoded_tor) + + # Make sure that str() input also works. + output = executive.run_command(cat.command_arguments(), input=encoded_tor, decode_output=False) + self.assertEquals(output, encoded_tor) + + # FIXME: We should only have one run* method to test + output = executive.run_and_throw_if_fail(echo.command_arguments("-n", unicode_tor_input), quiet=True) + self.assertEquals(output, unicode_tor_output) + + output = executive.run_and_throw_if_fail(echo.command_arguments("-n", unicode_tor_input), quiet=True, decode_output=False) + self.assertEquals(output, encoded_tor) + + def test_kill_process(self): + executive = Executive() + process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE) + self.assertEqual(process.poll(), None) # Process is running + executive.kill_process(process.pid) + # Note: Can't use a ternary since signal.SIGKILL is undefined for sys.platform == "win32" + if sys.platform == "win32": + expected_exit_code = 1 + else: + expected_exit_code = -signal.SIGKILL + self.assertEqual(process.wait(), expected_exit_code) + # Killing again should fail silently. + executive.kill_process(process.pid) + + def _assert_windows_image_name(self, name, expected_windows_name): + executive = Executive() + windows_name = executive._windows_image_name(name) + self.assertEqual(windows_name, expected_windows_name) + + def test_windows_image_name(self): + self._assert_windows_image_name("foo", "foo.exe") + self._assert_windows_image_name("foo.exe", "foo.exe") + self._assert_windows_image_name("foo.com", "foo.com") + # If the name looks like an extension, even if it isn't + # supposed to, we have no choice but to return the original name. + self._assert_windows_image_name("foo.baz", "foo.baz") + self._assert_windows_image_name("foo.baz.exe", "foo.baz.exe") + + def test_kill_all(self): + executive = Executive() + # We use "yes" because it loops forever. + process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE) + self.assertEqual(process.poll(), None) # Process is running + executive.kill_all(never_ending_command()[0]) + # Note: Can't use a ternary since signal.SIGTERM is undefined for sys.platform == "win32" + if sys.platform == "cygwin": + expected_exit_code = 0 # os.kill results in exit(0) for this process. + elif sys.platform == "win32": + expected_exit_code = 1 + else: + expected_exit_code = -signal.SIGTERM + self.assertEqual(process.wait(), expected_exit_code) + # Killing again should fail silently. + executive.kill_all(never_ending_command()[0]) + + def test_check_running_pid(self): + executive = Executive() + self.assertTrue(executive.check_running_pid(os.getpid())) + # Maximum pid number on Linux is 32768 by default + self.assertFalse(executive.check_running_pid(100000)) diff --git a/Tools/Scripts/webkitpy/common/system/file_lock.py b/Tools/Scripts/webkitpy/common/system/file_lock.py new file mode 100644 index 0000000..7296958 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/file_lock.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""This class helps to lock files exclusively across processes.""" + +import logging +import os +import sys +import time + + +_log = logging.getLogger("webkitpy.common.system.file_lock") + + +class FileLock(object): + + def __init__(self, lock_file_path, max_wait_time_sec=20): + self._lock_file_path = lock_file_path + self._lock_file_descriptor = None + self._max_wait_time_sec = max_wait_time_sec + + def _create_lock(self): + if sys.platform in ('darwin', 'linux2', 'cygwin'): + import fcntl + fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_EX | fcntl.LOCK_NB) + elif sys.platform == 'win32': + import msvcrt + msvcrt.locking(self._lock_file_descriptor, msvcrt.LK_NBLCK, 32) + + def _remove_lock(self): + if sys.platform in ('darwin', 'linux2', 'cygwin'): + import fcntl + fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_UN) + elif sys.platform == 'win32': + import msvcrt + msvcrt.locking(self._lock_file_descriptor, msvcrt.LK_UNLCK, 32) + + def acquire_lock(self): + self._lock_file_descriptor = os.open(self._lock_file_path, os.O_TRUNC | os.O_CREAT) + start_time = time.time() + while True: + try: + self._create_lock() + return True + except IOError: + if time.time() - start_time > self._max_wait_time_sec: + _log.debug("File locking failed: %s" % str(sys.exc_info())) + os.close(self._lock_file_descriptor) + self._lock_file_descriptor = None + return False + + def release_lock(self): + try: + if self._lock_file_descriptor: + self._remove_lock() + os.close(self._lock_file_descriptor) + self._lock_file_descriptor = None + os.unlink(self._lock_file_path) + except (IOError, OSError): + _log.debug("Warning in release lock: %s" % str(sys.exc_info())) diff --git a/Tools/Scripts/webkitpy/common/system/file_lock_unittest.py b/Tools/Scripts/webkitpy/common/system/file_lock_unittest.py new file mode 100644 index 0000000..c5c1db3 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/file_lock_unittest.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import tempfile +import unittest + +from webkitpy.common.system.file_lock import FileLock + + +class FileLockTest(unittest.TestCase): + + def setUp(self): + self._lock_name = "TestWebKit" + str(os.getpid()) + ".lock" + self._lock_path = os.path.join(tempfile.gettempdir(), self._lock_name) + self._file_lock1 = FileLock(self._lock_path, 1) + self._file_lock2 = FileLock(self._lock_path, 1) + + def tearDown(self): + self._file_lock1.release_lock() + self._file_lock2.release_lock() + + def test_lock_lifecycle(self): + # Create the lock. + self._file_lock1.acquire_lock() + self.assertTrue(os.path.exists(self._lock_path)) + + # Try to lock again. + self.assertFalse(self._file_lock2.acquire_lock()) + + # Release the lock. + self._file_lock1.release_lock() + self.assertFalse(os.path.exists(self._lock_path)) + + def test_stuck_lock(self): + open(self._lock_path, 'w').close() + self._file_lock1.acquire_lock() + self._file_lock1.release_lock() diff --git a/Tools/Scripts/webkitpy/common/system/filesystem.py b/Tools/Scripts/webkitpy/common/system/filesystem.py new file mode 100644 index 0000000..f0b5e44 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/filesystem.py @@ -0,0 +1,154 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Wrapper object for the file system / source tree.""" + +from __future__ import with_statement + +import codecs +import errno +import exceptions +import os +import shutil +import tempfile +import time + +class FileSystem(object): + """FileSystem interface for webkitpy. + + Unless otherwise noted, all paths are allowed to be either absolute + or relative.""" + + def exists(self, path): + """Return whether the path exists in the filesystem.""" + return os.path.exists(path) + + def isfile(self, path): + """Return whether the path refers to a file.""" + return os.path.isfile(path) + + def isdir(self, path): + """Return whether the path refers to a directory.""" + return os.path.isdir(path) + + def join(self, *comps): + """Return the path formed by joining the components.""" + return os.path.join(*comps) + + def listdir(self, path): + """Return the contents of the directory pointed to by path.""" + return os.listdir(path) + + def mkdtemp(self, **kwargs): + """Create and return a uniquely named directory. + + This is like tempfile.mkdtemp, but if used in a with statement + the directory will self-delete at the end of the block (if the + directory is empty; non-empty directories raise errors). The + directory can be safely deleted inside the block as well, if so + desired.""" + class TemporaryDirectory(object): + def __init__(self, **kwargs): + self._kwargs = kwargs + self._directory_path = None + + def __enter__(self): + self._directory_path = tempfile.mkdtemp(**self._kwargs) + return self._directory_path + + def __exit__(self, type, value, traceback): + # Only self-delete if necessary. + + # FIXME: Should we delete non-empty directories? + if os.path.exists(self._directory_path): + os.rmdir(self._directory_path) + + return TemporaryDirectory(**kwargs) + + def maybe_make_directory(self, *path): + """Create the specified directory if it doesn't already exist.""" + try: + os.makedirs(os.path.join(*path)) + except OSError, e: + if e.errno != errno.EEXIST: + raise + + class _WindowsError(exceptions.OSError): + """Fake exception for Linux and Mac.""" + pass + + def remove(self, path, osremove=os.remove): + """On Windows, if a process was recently killed and it held on to a + file, the OS will hold on to the file for a short while. This makes + attempts to delete the file fail. To work around that, this method + will retry for a few seconds until Windows is done with the file.""" + try: + exceptions.WindowsError + except AttributeError: + exceptions.WindowsError = FileSystem._WindowsError + + retry_timeout_sec = 3.0 + sleep_interval = 0.1 + while True: + try: + osremove(path) + return True + except exceptions.WindowsError, e: + time.sleep(sleep_interval) + retry_timeout_sec -= sleep_interval + if retry_timeout_sec < 0: + raise e + + def read_binary_file(self, path): + """Return the contents of the file at the given path as a byte string.""" + with file(path, 'rb') as f: + return f.read() + + def read_text_file(self, path): + """Return the contents of the file at the given path as a Unicode string. + + The file is read assuming it is a UTF-8 encoded file with no BOM.""" + with codecs.open(path, 'r', 'utf8') as f: + return f.read() + + def write_binary_file(self, path, contents): + """Write the contents to the file at the given location.""" + with file(path, 'wb') as f: + f.write(contents) + + def write_text_file(self, path, contents): + """Write the contents to the file at the given location. + + The file is written encoded as UTF-8 with no BOM.""" + with codecs.open(path, 'w', 'utf8') as f: + f.write(contents) + + def copyfile(self, source, destination): + """Copies the contents of the file at the given path to the destination + path.""" + shutil.copyfile(source, destination) diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py new file mode 100644 index 0000000..ea0f3f9 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py @@ -0,0 +1,109 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import errno +import os +import path + + +class MockFileSystem(object): + def __init__(self, files=None): + """Initializes a "mock" filesystem that can be used to completely + stub out a filesystem. + + Args: + files: a dict of filenames -> file contents. A file contents + value of None is used to indicate that the file should + not exist. + """ + self.files = files or {} + + def exists(self, path): + return self.isfile(path) or self.isdir(path) + + def isfile(self, path): + return path in self.files and self.files[path] is not None + + def isdir(self, path): + if path in self.files: + return False + if not path.endswith('/'): + path += '/' + return any(f.startswith(path) for f in self.files) + + def join(self, *comps): + return '/'.join(comps) + + def listdir(self, path): + if not self.isdir(path): + raise OSError("%s is not a directory" % path) + + if not path.endswith('/'): + path += '/' + + dirs = [] + files = [] + for f in self.files: + if self.exists(f) and f.startswith(path): + remaining = f[len(path):] + if '/' in remaining: + dir = remaining[:remaining.index('/')] + if not dir in dirs: + dirs.append(dir) + else: + files.append(remaining) + return dirs + files + + def maybe_make_directory(self, *path): + # FIXME: Implement such that subsequent calls to isdir() work? + pass + + def read_text_file(self, path): + return self.read_binary_file(path) + + def read_binary_file(self, path): + if path in self.files: + if self.files[path] is None: + raise IOError(errno.ENOENT, path, os.strerror(errno.ENOENT)) + return self.files[path] + + def write_text_file(self, path, contents): + return self.write_binary_file(path, contents) + + def write_binary_file(self, path, contents): + self.files[path] = contents + + def copyfile(self, source, destination): + if not self.exists(source): + raise IOError(errno.ENOENT, source, os.strerror(errno.ENOENT)) + if self.isdir(source): + raise IOError(errno.EISDIR, source, os.strerror(errno.ISDIR)) + if self.isdir(destination): + raise IOError(errno.EISDIR, destination, os.strerror(errno.ISDIR)) + + self.files[destination] = self.files[source] diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py b/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py new file mode 100644 index 0000000..267ca13 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py @@ -0,0 +1,172 @@ +# vim: set fileencoding=utf-8 : +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# NOTE: The fileencoding comment on the first line of the file is +# important; without it, Python will choke while trying to parse the file, +# since it includes non-ASCII characters. + +from __future__ import with_statement + +import os +import stat +import sys +import tempfile +import unittest + +from filesystem import FileSystem + + +class FileSystemTest(unittest.TestCase): + def setUp(self): + self._this_dir = os.path.dirname(os.path.abspath(__file__)) + self._missing_file = os.path.join(self._this_dir, 'missing_file.py') + self._this_file = os.path.join(self._this_dir, 'filesystem_unittest.py') + + def test_exists__true(self): + fs = FileSystem() + self.assertTrue(fs.exists(self._this_file)) + + def test_exists__false(self): + fs = FileSystem() + self.assertFalse(fs.exists(self._missing_file)) + + def test_isdir__true(self): + fs = FileSystem() + self.assertTrue(fs.isdir(self._this_dir)) + + def test_isdir__false(self): + fs = FileSystem() + self.assertFalse(fs.isdir(self._this_file)) + + def test_join(self): + fs = FileSystem() + self.assertEqual(fs.join('foo', 'bar'), + os.path.join('foo', 'bar')) + + def test_listdir(self): + fs = FileSystem() + with fs.mkdtemp(prefix='filesystem_unittest_') as d: + self.assertEqual(fs.listdir(d), []) + new_file = os.path.join(d, 'foo') + fs.write_text_file(new_file, u'foo') + self.assertEqual(fs.listdir(d), ['foo']) + os.remove(new_file) + + def test_maybe_make_directory__success(self): + fs = FileSystem() + + with fs.mkdtemp(prefix='filesystem_unittest_') as base_path: + sub_path = os.path.join(base_path, "newdir") + self.assertFalse(os.path.exists(sub_path)) + self.assertFalse(fs.isdir(sub_path)) + + fs.maybe_make_directory(sub_path) + self.assertTrue(os.path.exists(sub_path)) + self.assertTrue(fs.isdir(sub_path)) + + # Make sure we can re-create it. + fs.maybe_make_directory(sub_path) + self.assertTrue(os.path.exists(sub_path)) + self.assertTrue(fs.isdir(sub_path)) + + # Clean up. + os.rmdir(sub_path) + + self.assertFalse(os.path.exists(base_path)) + self.assertFalse(fs.isdir(base_path)) + + def test_maybe_make_directory__failure(self): + # FIXME: os.chmod() doesn't work on Windows to set directories + # as readonly, so we skip this test for now. + if sys.platform in ('win32', 'cygwin'): + return + + fs = FileSystem() + with fs.mkdtemp(prefix='filesystem_unittest_') as d: + # Remove write permissions on the parent directory. + os.chmod(d, stat.S_IRUSR) + + # Now try to create a sub directory - should fail. + sub_dir = fs.join(d, 'subdir') + self.assertRaises(OSError, fs.maybe_make_directory, sub_dir) + + # Clean up in case the test failed and we did create the + # directory. + if os.path.exists(sub_dir): + os.rmdir(sub_dir) + + def test_read_and_write_file(self): + fs = FileSystem() + text_path = None + binary_path = None + + unicode_text_string = u'Ūnĭcōde̽' + hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD' + try: + text_path = tempfile.mktemp(prefix='tree_unittest_') + binary_path = tempfile.mktemp(prefix='tree_unittest_') + fs.write_text_file(text_path, unicode_text_string) + contents = fs.read_binary_file(text_path) + self.assertEqual(contents, hex_equivalent) + + fs.write_text_file(binary_path, hex_equivalent) + text_contents = fs.read_text_file(binary_path) + self.assertEqual(text_contents, unicode_text_string) + except: + if text_path: + os.remove(text_path) + if binary_path: + os.remove(binary_path) + + def test_read_binary_file__missing(self): + fs = FileSystem() + self.assertRaises(IOError, fs.read_binary_file, self._missing_file) + + def test_read_text_file__missing(self): + fs = FileSystem() + self.assertRaises(IOError, fs.read_text_file, self._missing_file) + + def test_remove_file_with_retry(self): + FileSystemTest._remove_failures = 2 + + def remove_with_exception(filename): + FileSystemTest._remove_failures -= 1 + if FileSystemTest._remove_failures >= 0: + try: + raise WindowsError + except NameError: + raise FileSystem._WindowsError + + fs = FileSystem() + self.assertTrue(fs.remove('filename', remove_with_exception)) + self.assertEquals(-1, FileSystemTest._remove_failures) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/system/fileutils.py b/Tools/Scripts/webkitpy/common/system/fileutils.py new file mode 100644 index 0000000..55821f8 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/fileutils.py @@ -0,0 +1,33 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import sys + + +def make_stdout_binary(): + """Puts sys.stdout into binary mode (on platforms that have a distinction + between text and binary mode).""" + if sys.platform != 'win32' or not hasattr(sys.stdout, 'fileno'): + return + import msvcrt + import os + msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) diff --git a/Tools/Scripts/webkitpy/common/system/logtesting.py b/Tools/Scripts/webkitpy/common/system/logtesting.py new file mode 100644 index 0000000..e361cb5 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/logtesting.py @@ -0,0 +1,258 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Supports the unit-testing of logging code. + +Provides support for unit-testing messages logged using the built-in +logging module. + +Inherit from the LoggingTestCase class for basic testing needs. For +more advanced needs (e.g. unit-testing methods that configure logging), +see the TestLogStream class, and perhaps also the LogTesting class. + +""" + +import logging +import unittest + + +class TestLogStream(object): + + """Represents a file-like object for unit-testing logging. + + This is meant for passing to the logging.StreamHandler constructor. + Log messages captured by instances of this object can be tested + using self.assertMessages() below. + + """ + + def __init__(self, test_case): + """Create an instance. + + Args: + test_case: A unittest.TestCase instance. + + """ + self._test_case = test_case + self.messages = [] + """A list of log messages written to the stream.""" + + # Python documentation says that any object passed to the StreamHandler + # constructor should support write() and flush(): + # + # http://docs.python.org/library/logging.html#module-logging.handlers + def write(self, message): + self.messages.append(message) + + def flush(self): + pass + + def assertMessages(self, messages): + """Assert that the given messages match the logged messages. + + messages: A list of log message strings. + + """ + self._test_case.assertEquals(messages, self.messages) + + +class LogTesting(object): + + """Supports end-to-end unit-testing of log messages. + + Sample usage: + + class SampleTest(unittest.TestCase): + + def setUp(self): + self._log = LogTesting.setUp(self) # Turn logging on. + + def tearDown(self): + self._log.tearDown() # Turn off and reset logging. + + def test_logging_in_some_method(self): + call_some_method() # Contains calls to _log.info(), etc. + + # Check the resulting log messages. + self._log.assertMessages(["INFO: expected message #1", + "WARNING: expected message #2"]) + + """ + + def __init__(self, test_stream, handler): + """Create an instance. + + This method should never be called directly. Instances should + instead be created using the static setUp() method. + + Args: + test_stream: A TestLogStream instance. + handler: The handler added to the logger. + + """ + self._test_stream = test_stream + self._handler = handler + + @staticmethod + def _getLogger(): + """Return the logger being tested.""" + # It is possible we might want to return something other than + # the root logger in some special situation. For now, the + # root logger seems to suffice. + return logging.getLogger() + + @staticmethod + def setUp(test_case, logging_level=logging.INFO): + """Configure logging for unit testing. + + Configures the root logger to log to a testing log stream. + Only messages logged at or above the given level are logged + to the stream. Messages logged to the stream are formatted + in the following way, for example-- + + "INFO: This is a test log message." + + This method should normally be called in the setUp() method + of a unittest.TestCase. See the docstring of this class + for more details. + + Returns: + A LogTesting instance. + + Args: + test_case: A unittest.TestCase instance. + logging_level: An integer logging level that is the minimum level + of log messages you would like to test. + + """ + stream = TestLogStream(test_case) + handler = logging.StreamHandler(stream) + handler.setLevel(logging_level) + formatter = logging.Formatter("%(levelname)s: %(message)s") + handler.setFormatter(formatter) + + # Notice that we only change the root logger by adding a handler + # to it. In particular, we do not reset its level using + # logger.setLevel(). This ensures that we have not interfered + # with how the code being tested may have configured the root + # logger. + logger = LogTesting._getLogger() + logger.addHandler(handler) + + return LogTesting(stream, handler) + + def tearDown(self): + """Assert there are no remaining log messages, and reset logging. + + This method asserts that there are no more messages in the array of + log messages, and then restores logging to its original state. + This method should normally be called in the tearDown() method of a + unittest.TestCase. See the docstring of this class for more details. + + """ + self.assertMessages([]) + logger = LogTesting._getLogger() + logger.removeHandler(self._handler) + + def messages(self): + """Return the current list of log messages.""" + return self._test_stream.messages + + # FIXME: Add a clearMessages() method for cases where the caller + # deliberately doesn't want to assert every message. + + # We clear the log messages after asserting since they are no longer + # needed after asserting. This serves two purposes: (1) it simplifies + # the calling code when we want to check multiple logging calls in a + # single test method, and (2) it lets us check in the tearDown() method + # that there are no remaining log messages to be asserted. + # + # The latter ensures that no extra log messages are getting logged that + # the caller might not be aware of or may have forgotten to check for. + # This gets us a bit more mileage out of our tests without writing any + # additional code. + def assertMessages(self, messages): + """Assert the current array of log messages, and clear its contents. + + Args: + messages: A list of log message strings. + + """ + try: + self._test_stream.assertMessages(messages) + finally: + # We want to clear the array of messages even in the case of + # an Exception (e.g. an AssertionError). Otherwise, another + # AssertionError can occur in the tearDown() because the + # array might not have gotten emptied. + self._test_stream.messages = [] + + +# This class needs to inherit from unittest.TestCase. Otherwise, the +# setUp() and tearDown() methods will not get fired for test case classes +# that inherit from this class -- even if the class inherits from *both* +# unittest.TestCase and LoggingTestCase. +# +# FIXME: Rename this class to LoggingTestCaseBase to be sure that +# the unittest module does not interpret this class as a unittest +# test case itself. +class LoggingTestCase(unittest.TestCase): + + """Supports end-to-end unit-testing of log messages. + + Sample usage: + + class SampleTest(LoggingTestCase): + + def test_logging_in_some_method(self): + call_some_method() # Contains calls to _log.info(), etc. + + # Check the resulting log messages. + self.assertLog(["INFO: expected message #1", + "WARNING: expected message #2"]) + + """ + + def setUp(self): + self._log = LogTesting.setUp(self) + + def tearDown(self): + self._log.tearDown() + + def logMessages(self): + """Return the current list of log messages.""" + return self._log.messages() + + # FIXME: Add a clearMessages() method for cases where the caller + # deliberately doesn't want to assert every message. + + # See the code comments preceding LogTesting.assertMessages() for + # an explanation of why we clear the array of messages after + # asserting its contents. + def assertLog(self, messages): + """Assert the current array of log messages, and clear its contents. + + Args: + messages: A list of log message strings. + + """ + self._log.assertMessages(messages) diff --git a/Tools/Scripts/webkitpy/common/system/logutils.py b/Tools/Scripts/webkitpy/common/system/logutils.py new file mode 100644 index 0000000..cd4e60f --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/logutils.py @@ -0,0 +1,207 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Supports webkitpy logging.""" + +# FIXME: Move this file to webkitpy/python24 since logging needs to +# be configured prior to running version-checking code. + +import logging +import os +import sys + +import webkitpy + + +_log = logging.getLogger(__name__) + +# We set these directory paths lazily in get_logger() below. +_scripts_dir = "" +"""The normalized, absolute path to the ...Scripts directory.""" + +_webkitpy_dir = "" +"""The normalized, absolute path to the ...Scripts/webkitpy directory.""" + + +def _normalize_path(path): + """Return the given path normalized. + + Converts a path to an absolute path, removes any trailing slashes, + removes any extension, and lower-cases it. + + """ + path = os.path.abspath(path) + path = os.path.normpath(path) + path = os.path.splitext(path)[0] # Remove the extension, if any. + path = path.lower() + + return path + + +# Observe that the implementation of this function does not require +# the use of any hard-coded strings like "webkitpy", etc. +# +# The main benefit this function has over using-- +# +# _log = logging.getLogger(__name__) +# +# is that get_logger() returns the same value even if __name__ is +# "__main__" -- i.e. even if the module is the script being executed +# from the command-line. +def get_logger(path): + """Return a logging.logger for the given path. + + Returns: + A logger whose name is the name of the module corresponding to + the given path. If the module is in webkitpy, the name is + the fully-qualified dotted module name beginning with webkitpy.... + Otherwise, the name is the base name of the module (i.e. without + any dotted module name prefix). + + Args: + path: The path of the module. Normally, this parameter should be + the __file__ variable of the module. + + Sample usage: + + import webkitpy.common.system.logutils as logutils + + _log = logutils.get_logger(__file__) + + """ + # Since we assign to _scripts_dir and _webkitpy_dir in this function, + # we need to declare them global. + global _scripts_dir + global _webkitpy_dir + + path = _normalize_path(path) + + # Lazily evaluate _webkitpy_dir and _scripts_dir. + if not _scripts_dir: + # The normalized, absolute path to ...Scripts/webkitpy/__init__. + webkitpy_path = _normalize_path(webkitpy.__file__) + + _webkitpy_dir = os.path.split(webkitpy_path)[0] + _scripts_dir = os.path.split(_webkitpy_dir)[0] + + if path.startswith(_webkitpy_dir): + # Remove the initial Scripts directory portion, so the path + # starts with /webkitpy, for example "/webkitpy/init/logutils". + path = path[len(_scripts_dir):] + + parts = [] + while True: + (path, tail) = os.path.split(path) + if not tail: + break + parts.insert(0, tail) + + logger_name = ".".join(parts) # For example, webkitpy.common.system.logutils. + else: + # The path is outside of webkitpy. Default to the basename + # without the extension. + basename = os.path.basename(path) + logger_name = os.path.splitext(basename)[0] + + return logging.getLogger(logger_name) + + +def _default_handlers(stream): + """Return a list of the default logging handlers to use. + + Args: + stream: See the configure_logging() docstring. + + """ + # Create the filter. + def should_log(record): + """Return whether a logging.LogRecord should be logged.""" + # FIXME: Enable the logging of autoinstall messages once + # autoinstall is adjusted. Currently, autoinstall logs + # INFO messages when importing already-downloaded packages, + # which is too verbose. + if record.name.startswith("webkitpy.thirdparty.autoinstall"): + return False + return True + + logging_filter = logging.Filter() + logging_filter.filter = should_log + + # Create the handler. + handler = logging.StreamHandler(stream) + formatter = logging.Formatter("%(name)s: [%(levelname)s] %(message)s") + handler.setFormatter(formatter) + handler.addFilter(logging_filter) + + return [handler] + + +def configure_logging(logging_level=None, logger=None, stream=None, + handlers=None): + """Configure logging for standard purposes. + + Returns: + A list of references to the logging handlers added to the root + logger. This allows the caller to later remove the handlers + using logger.removeHandler. This is useful primarily during unit + testing where the caller may want to configure logging temporarily + and then undo the configuring. + + Args: + logging_level: The minimum logging level to log. Defaults to + logging.INFO. + logger: A logging.logger instance to configure. This parameter + should be used only in unit tests. Defaults to the + root logger. + stream: A file-like object to which to log used in creating the default + handlers. The stream must define an "encoding" data attribute, + or else logging raises an error. Defaults to sys.stderr. + handlers: A list of logging.Handler instances to add to the logger + being configured. If this parameter is provided, then the + stream parameter is not used. + + """ + # If the stream does not define an "encoding" data attribute, the + # logging module can throw an error like the following: + # + # Traceback (most recent call last): + # File "/System/Library/Frameworks/Python.framework/Versions/2.6/... + # lib/python2.6/logging/__init__.py", line 761, in emit + # self.stream.write(fs % msg.encode(self.stream.encoding)) + # LookupError: unknown encoding: unknown + if logging_level is None: + logging_level = logging.INFO + if logger is None: + logger = logging.getLogger() + if stream is None: + stream = sys.stderr + if handlers is None: + handlers = _default_handlers(stream) + + logger.setLevel(logging_level) + + for handler in handlers: + logger.addHandler(handler) + + _log.debug("Debug logging enabled.") + + return handlers diff --git a/Tools/Scripts/webkitpy/common/system/logutils_unittest.py b/Tools/Scripts/webkitpy/common/system/logutils_unittest.py new file mode 100644 index 0000000..b77c284 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/logutils_unittest.py @@ -0,0 +1,142 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for logutils.py.""" + +import logging +import os +import unittest + +from webkitpy.common.system.logtesting import LogTesting +from webkitpy.common.system.logtesting import TestLogStream +import webkitpy.common.system.logutils as logutils + + +class GetLoggerTest(unittest.TestCase): + + """Tests get_logger().""" + + def test_get_logger_in_webkitpy(self): + logger = logutils.get_logger(__file__) + self.assertEquals(logger.name, "webkitpy.common.system.logutils_unittest") + + def test_get_logger_not_in_webkitpy(self): + # Temporarily change the working directory so that we + # can test get_logger() for a path outside of webkitpy. + working_directory = os.getcwd() + root_dir = "/" + os.chdir(root_dir) + + logger = logutils.get_logger("/Tools/Scripts/test-webkitpy") + self.assertEquals(logger.name, "test-webkitpy") + + logger = logutils.get_logger("/Tools/Scripts/test-webkitpy.py") + self.assertEquals(logger.name, "test-webkitpy") + + os.chdir(working_directory) + + +class ConfigureLoggingTestBase(unittest.TestCase): + + """Base class for configure_logging() unit tests.""" + + def _logging_level(self): + raise Exception("Not implemented.") + + def setUp(self): + log_stream = TestLogStream(self) + + # Use a logger other than the root logger or one prefixed with + # "webkitpy." so as not to conflict with test-webkitpy logging. + logger = logging.getLogger("unittest") + + # Configure the test logger not to pass messages along to the + # root logger. This prevents test messages from being + # propagated to loggers used by test-webkitpy logging (e.g. + # the root logger). + logger.propagate = False + + logging_level = self._logging_level() + self._handlers = logutils.configure_logging(logging_level=logging_level, + logger=logger, + stream=log_stream) + self._log = logger + self._log_stream = log_stream + + def tearDown(self): + """Reset logging to its original state. + + This method ensures that the logging configuration set up + for a unit test does not affect logging in other unit tests. + + """ + logger = self._log + for handler in self._handlers: + logger.removeHandler(handler) + + def _assert_log_messages(self, messages): + """Assert that the logged messages equal the given messages.""" + self._log_stream.assertMessages(messages) + + +class ConfigureLoggingTest(ConfigureLoggingTestBase): + + """Tests configure_logging() with the default logging level.""" + + def _logging_level(self): + return None + + def test_info_message(self): + self._log.info("test message") + self._assert_log_messages(["unittest: [INFO] test message\n"]) + + def test_below_threshold_message(self): + # We test the boundary case of a logging level equal to 19. + # In practice, we will probably only be calling log.debug(), + # which corresponds to a logging level of 10. + level = logging.INFO - 1 # Equals 19. + self._log.log(level, "test message") + self._assert_log_messages([]) + + def test_two_messages(self): + self._log.info("message1") + self._log.info("message2") + self._assert_log_messages(["unittest: [INFO] message1\n", + "unittest: [INFO] message2\n"]) + + +class ConfigureLoggingCustomLevelTest(ConfigureLoggingTestBase): + + """Tests configure_logging() with a custom logging level.""" + + _level = 36 + + def _logging_level(self): + return self._level + + def test_logged_message(self): + self._log.log(self._level, "test message") + self._assert_log_messages(["unittest: [Level 36] test message\n"]) + + def test_below_threshold_message(self): + self._log.log(self._level - 1, "test message") + self._assert_log_messages([]) diff --git a/Tools/Scripts/webkitpy/common/system/ospath.py b/Tools/Scripts/webkitpy/common/system/ospath.py new file mode 100644 index 0000000..aed7a3d --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/ospath.py @@ -0,0 +1,83 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains a substitute for Python 2.6's os.path.relpath().""" + +import os + + +# This function is a replacement for os.path.relpath(), which is only +# available in Python 2.6: +# +# http://docs.python.org/library/os.path.html#os.path.relpath +# +# It should behave essentially the same as os.path.relpath(), except for +# returning None on paths not contained in abs_start_path. +def relpath(path, start_path, os_path_abspath=None): + """Return a path relative to the given start path, or None. + + Returns None if the path is not contained in the directory start_path. + + Args: + path: An absolute or relative path to convert to a relative path. + start_path: The path relative to which the given path should be + converted. + os_path_abspath: A replacement function for unit testing. This + function should strip trailing slashes just like + os.path.abspath(). Defaults to os.path.abspath. + + """ + if os_path_abspath is None: + os_path_abspath = os.path.abspath + + # Since os_path_abspath() calls os.path.normpath()-- + # + # (see http://docs.python.org/library/os.path.html#os.path.abspath ) + # + # it also removes trailing slashes and converts forward and backward + # slashes to the preferred slash os.sep. + start_path = os_path_abspath(start_path) + path = os_path_abspath(path) + + if not path.lower().startswith(start_path.lower()): + # Then path is outside the directory given by start_path. + return None + + rel_path = path[len(start_path):] + + if not rel_path: + # Then the paths are the same. + pass + elif rel_path[0] == os.sep: + # It is probably sufficient to remove just the first character + # since os.path.normpath() collapses separators, but we use + # lstrip() just to be sure. + rel_path = rel_path.lstrip(os.sep) + else: + # We are in the case typified by the following example: + # + # start_path = "/tmp/foo" + # path = "/tmp/foobar" + # rel_path = "bar" + return None + + return rel_path diff --git a/Tools/Scripts/webkitpy/common/system/ospath_unittest.py b/Tools/Scripts/webkitpy/common/system/ospath_unittest.py new file mode 100644 index 0000000..d84c2c6 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/ospath_unittest.py @@ -0,0 +1,62 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for ospath.py.""" + +import os +import unittest + +from webkitpy.common.system.ospath import relpath + + +# Make sure the tests in this class are platform independent. +class RelPathTest(unittest.TestCase): + + """Tests relpath().""" + + os_path_abspath = lambda self, path: path + + def _rel_path(self, path, abs_start_path): + return relpath(path, abs_start_path, self.os_path_abspath) + + def test_same_path(self): + rel_path = self._rel_path("WebKit", "WebKit") + self.assertEquals(rel_path, "") + + def test_long_rel_path(self): + start_path = "WebKit" + expected_rel_path = os.path.join("test", "Foo.txt") + path = os.path.join(start_path, expected_rel_path) + + rel_path = self._rel_path(path, start_path) + self.assertEquals(expected_rel_path, rel_path) + + def test_none_rel_path(self): + """Test _rel_path() with None return value.""" + start_path = "WebKit" + path = os.path.join("other_dir", "foo.txt") + + rel_path = self._rel_path(path, start_path) + self.assertTrue(rel_path is None) + + rel_path = self._rel_path("Tools", "WebKit") + self.assertTrue(rel_path is None) diff --git a/Tools/Scripts/webkitpy/common/system/outputcapture.py b/Tools/Scripts/webkitpy/common/system/outputcapture.py new file mode 100644 index 0000000..45e0e3f --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/outputcapture.py @@ -0,0 +1,86 @@ +# Copyright (c) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Class for unittest support. Used for capturing stderr/stdout. + +import sys +import unittest +from StringIO import StringIO + +class OutputCapture(object): + def __init__(self): + self.saved_outputs = dict() + + def _capture_output_with_name(self, output_name): + self.saved_outputs[output_name] = getattr(sys, output_name) + captured_output = StringIO() + setattr(sys, output_name, captured_output) + return captured_output + + def _restore_output_with_name(self, output_name): + captured_output = getattr(sys, output_name).getvalue() + setattr(sys, output_name, self.saved_outputs[output_name]) + del self.saved_outputs[output_name] + return captured_output + + def capture_output(self): + return (self._capture_output_with_name("stdout"), self._capture_output_with_name("stderr")) + + def restore_output(self): + return (self._restore_output_with_name("stdout"), self._restore_output_with_name("stderr")) + + def assert_outputs(self, testcase, function, args=[], kwargs={}, expected_stdout="", expected_stderr="", expected_exception=None): + self.capture_output() + if expected_exception: + return_value = testcase.assertRaises(expected_exception, function, *args, **kwargs) + else: + return_value = function(*args, **kwargs) + (stdout_string, stderr_string) = self.restore_output() + testcase.assertEqual(stdout_string, expected_stdout) + testcase.assertEqual(stderr_string, expected_stderr) + # This is a little strange, but I don't know where else to return this information. + return return_value + + +class OutputCaptureTestCaseBase(unittest.TestCase): + def setUp(self): + unittest.TestCase.setUp(self) + self.output_capture = OutputCapture() + (self.__captured_stdout, self.__captured_stderr) = self.output_capture.capture_output() + + def tearDown(self): + del self.__captured_stdout + del self.__captured_stderr + self.output_capture.restore_output() + unittest.TestCase.tearDown(self) + + def assertStdout(self, expected_stdout): + self.assertEquals(expected_stdout, self.__captured_stdout.getvalue()) + + def assertStderr(self, expected_stderr): + self.assertEquals(expected_stderr, self.__captured_stderr.getvalue()) diff --git a/Tools/Scripts/webkitpy/common/system/path.py b/Tools/Scripts/webkitpy/common/system/path.py new file mode 100644 index 0000000..09787d7 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/path.py @@ -0,0 +1,138 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""generic routines to convert platform-specific paths to URIs.""" +from __future__ import with_statement + +import atexit +import subprocess +import sys +import threading +import urllib + + +def abspath_to_uri(path, platform=None): + """Converts a platform-specific absolute path to a file: URL.""" + if platform is None: + platform = sys.platform + return "file:" + _escape(_convert_path(path, platform)) + + +def cygpath(path): + """Converts an absolute cygwin path to an absolute Windows path.""" + return _CygPath.convert_using_singleton(path) + + +# Note that this object is not threadsafe and must only be called +# from multiple threads under protection of a lock (as is done in cygpath()) +class _CygPath(object): + """Manages a long-running 'cygpath' process for file conversion.""" + _lock = None + _singleton = None + + @staticmethod + def stop_cygpath_subprocess(): + if not _CygPath._lock: + return + + with _CygPath._lock: + if _CygPath._singleton: + _CygPath._singleton.stop() + + @staticmethod + def convert_using_singleton(path): + if not _CygPath._lock: + _CygPath._lock = threading.Lock() + + with _CygPath._lock: + if not _CygPath._singleton: + _CygPath._singleton = _CygPath() + # Make sure the cygpath subprocess always gets shutdown cleanly. + atexit.register(_CygPath.stop_cygpath_subprocess) + + return _CygPath._singleton.convert(path) + + def __init__(self): + self._child_process = None + + def start(self): + assert(self._child_process is None) + args = ['cygpath', '-f', '-', '-wa'] + self._child_process = subprocess.Popen(args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + + def is_running(self): + if not self._child_process: + return False + return self._child_process.returncode is None + + def stop(self): + if self._child_process: + self._child_process.stdin.close() + self._child_process.wait() + self._child_process = None + + def convert(self, path): + if not self.is_running(): + self.start() + self._child_process.stdin.write("%s\r\n" % path) + self._child_process.stdin.flush() + windows_path = self._child_process.stdout.readline().rstrip() + # Some versions of cygpath use lowercase drive letters while others + # use uppercase. We always convert to uppercase for consistency. + windows_path = '%s%s' % (windows_path[0].upper(), windows_path[1:]) + return windows_path + + +def _escape(path): + """Handle any characters in the path that should be escaped.""" + # FIXME: web browsers don't appear to blindly quote every character + # when converting filenames to files. Instead of using urllib's default + # rules, we allow a small list of other characters through un-escaped. + # It's unclear if this is the best possible solution. + return urllib.quote(path, safe='/+:') + + +def _convert_path(path, platform): + """Handles any os-specific path separators, mappings, etc.""" + if platform == 'win32': + return _winpath_to_uri(path) + if platform == 'cygwin': + return _winpath_to_uri(cygpath(path)) + return _unixypath_to_uri(path) + + +def _winpath_to_uri(path): + """Converts a window absolute path to a file: URL.""" + return "///" + path.replace("\\", "/") + + +def _unixypath_to_uri(path): + """Converts a unix-style path to a file: URL.""" + return "//" + path diff --git a/Tools/Scripts/webkitpy/common/system/path_unittest.py b/Tools/Scripts/webkitpy/common/system/path_unittest.py new file mode 100644 index 0000000..4dbd38a --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/path_unittest.py @@ -0,0 +1,105 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest +import sys + +import path + +class AbspathTest(unittest.TestCase): + def assertMatch(self, test_path, expected_uri, + platform=None): + if platform == 'cygwin' and sys.platform != 'cygwin': + return + self.assertEqual(path.abspath_to_uri(test_path, platform=platform), + expected_uri) + + def test_abspath_to_uri_cygwin(self): + if sys.platform != 'cygwin': + return + + self.assertMatch('/cygdrive/c/foo/bar.html', + 'file:///C:/foo/bar.html', + platform='cygwin') + self.assertEqual(path.abspath_to_uri('/cygdrive/c/foo/bar.html', + platform='cygwin'), + 'file:///C:/foo/bar.html') + + def test_abspath_to_uri_darwin(self): + self.assertMatch('/foo/bar.html', + 'file:///foo/bar.html', + platform='darwin') + self.assertEqual(path.abspath_to_uri("/foo/bar.html", + platform='darwin'), + "file:///foo/bar.html") + + def test_abspath_to_uri_linux2(self): + self.assertMatch('/foo/bar.html', + 'file:///foo/bar.html', + platform='darwin') + self.assertEqual(path.abspath_to_uri("/foo/bar.html", + platform='linux2'), + "file:///foo/bar.html") + + def test_abspath_to_uri_win(self): + self.assertMatch('c:\\foo\\bar.html', + 'file:///c:/foo/bar.html', + platform='win32') + self.assertEqual(path.abspath_to_uri("c:\\foo\\bar.html", + platform='win32'), + "file:///c:/foo/bar.html") + + def test_abspath_to_uri_escaping(self): + self.assertMatch('/foo/bar + baz%?.html', + 'file:///foo/bar%20+%20baz%25%3F.html', + platform='darwin') + self.assertMatch('/foo/bar + baz%?.html', + 'file:///foo/bar%20+%20baz%25%3F.html', + platform='linux2') + + # Note that you can't have '?' in a filename on windows. + self.assertMatch('/cygdrive/c/foo/bar + baz%.html', + 'file:///C:/foo/bar%20+%20baz%25.html', + platform='cygwin') + + def test_stop_cygpath_subprocess(self): + if sys.platform != 'cygwin': + return + + # Call cygpath to ensure the subprocess is running. + path.cygpath("/cygdrive/c/foo.txt") + self.assertTrue(path._CygPath._singleton.is_running()) + + # Stop it. + path._CygPath.stop_cygpath_subprocess() + + # Ensure that it is stopped. + self.assertFalse(path._CygPath._singleton.is_running()) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/common/system/platforminfo.py b/Tools/Scripts/webkitpy/common/system/platforminfo.py new file mode 100644 index 0000000..cc370ba --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/platforminfo.py @@ -0,0 +1,43 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import platform + + +# We use this instead of calls to platform directly to allow mocking. +class PlatformInfo(object): + + def display_name(self): + # platform.platform() returns Darwin information for Mac, which is just confusing. + if platform.system() == "Darwin": + return "Mac OS X %s" % platform.mac_ver()[0] + + # Returns strings like: + # Linux-2.6.18-194.3.1.el5-i686-with-redhat-5.5-Final + # Windows-2008ServerR2-6.1.7600 + return platform.platform() diff --git a/Tools/Scripts/webkitpy/common/system/user.py b/Tools/Scripts/webkitpy/common/system/user.py new file mode 100644 index 0000000..b79536c --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/user.py @@ -0,0 +1,143 @@ +# Copyright (c) 2009, Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import os +import re +import shlex +import subprocess +import sys +import webbrowser + + +_log = logging.getLogger("webkitpy.common.system.user") + + +try: + import readline +except ImportError: + if sys.platform != "win32": + # There is no readline module for win32, not much to do except cry. + _log.warn("Unable to import readline.") + # FIXME: We could give instructions for non-mac platforms. + # Lack of readline results in a very bad user experiance. + if sys.platform == "darwin": + _log.warn("If you're using MacPorts, try running:") + _log.warn(" sudo port install py25-readline") + + +class User(object): + DEFAULT_NO = 'n' + DEFAULT_YES = 'y' + + # FIXME: These are @classmethods because bugzilla.py doesn't have a Tool object (thus no User instance). + @classmethod + def prompt(cls, message, repeat=1, raw_input=raw_input): + response = None + while (repeat and not response): + repeat -= 1 + response = raw_input(message) + return response + + @classmethod + def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input): + print list_title + i = 0 + for item in list_items: + i += 1 + print "%2d. %s" % (i, item) + + # Loop until we get valid input + while True: + if can_choose_multiple: + response = cls.prompt("Enter one or more numbers (comma-separated), or \"all\": ", raw_input=raw_input) + if not response.strip() or response == "all": + return list_items + try: + indices = [int(r) - 1 for r in re.split("\s*,\s*", response)] + except ValueError, err: + continue + return [list_items[i] for i in indices] + else: + try: + result = int(cls.prompt("Enter a number: ", raw_input=raw_input)) - 1 + except ValueError, err: + continue + return list_items[result] + + def edit(self, files): + editor = os.environ.get("EDITOR") or "vi" + args = shlex.split(editor) + # Note: Not thread safe: http://bugs.python.org/issue2320 + subprocess.call(args + files) + + def _warn_if_application_is_xcode(self, edit_application): + if "Xcode" in edit_application: + print "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\"." + + def edit_changelog(self, files): + edit_application = os.environ.get("CHANGE_LOG_EDIT_APPLICATION") + if edit_application and sys.platform == "darwin": + # On Mac we support editing ChangeLogs using an application. + args = shlex.split(edit_application) + print "Using editor in the CHANGE_LOG_EDIT_APPLICATION environment variable." + print "Please quit the editor application when done editing." + self._warn_if_application_is_xcode(edit_application) + subprocess.call(["open", "-W", "-n", "-a"] + args + files) + return + self.edit(files) + + def page(self, message): + pager = os.environ.get("PAGER") or "less" + try: + # Note: Not thread safe: http://bugs.python.org/issue2320 + child_process = subprocess.Popen([pager], stdin=subprocess.PIPE) + child_process.communicate(input=message) + except IOError, e: + pass + + def confirm(self, message=None, default=DEFAULT_YES, raw_input=raw_input): + if not message: + message = "Continue?" + choice = {'y': 'Y/n', 'n': 'y/N'}[default] + response = raw_input("%s [%s]: " % (message, choice)) + if not response: + response = default + return response.lower() == 'y' + + def can_open_url(self): + try: + webbrowser.get() + return True + except webbrowser.Error, e: + return False + + def open_url(self, url): + if not self.can_open_url(): + _log.warn("Failed to open %s" % url) + webbrowser.open(url) diff --git a/Tools/Scripts/webkitpy/common/system/user_unittest.py b/Tools/Scripts/webkitpy/common/system/user_unittest.py new file mode 100644 index 0000000..7ec9b34 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/system/user_unittest.py @@ -0,0 +1,109 @@ +# Copyright (C) 2010 Research in Motion Ltd. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Research in Motion Ltd. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.common.system.user import User + +class UserTest(unittest.TestCase): + + example_user_response = "example user response" + + def test_prompt_repeat(self): + self.repeatsRemaining = 2 + def mock_raw_input(message): + self.repeatsRemaining -= 1 + if not self.repeatsRemaining: + return UserTest.example_user_response + return None + self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), UserTest.example_user_response) + + def test_prompt_when_exceeded_repeats(self): + self.repeatsRemaining = 2 + def mock_raw_input(message): + self.repeatsRemaining -= 1 + return None + self.assertEqual(User.prompt("input", repeat=self.repeatsRemaining, raw_input=mock_raw_input), None) + + def test_prompt_with_list(self): + def run_prompt_test(inputs, expected_result, can_choose_multiple=False): + def mock_raw_input(message): + return inputs.pop(0) + output_capture = OutputCapture() + actual_result = output_capture.assert_outputs( + self, + User.prompt_with_list, + args=["title", ["foo", "bar"]], + kwargs={"can_choose_multiple": can_choose_multiple, "raw_input": mock_raw_input}, + expected_stdout="title\n 1. foo\n 2. bar\n") + self.assertEqual(actual_result, expected_result) + self.assertEqual(len(inputs), 0) + + run_prompt_test(["1"], "foo") + run_prompt_test(["badinput", "2"], "bar") + + run_prompt_test(["1,2"], ["foo", "bar"], can_choose_multiple=True) + run_prompt_test([" 1, 2 "], ["foo", "bar"], can_choose_multiple=True) + run_prompt_test(["all"], ["foo", "bar"], can_choose_multiple=True) + run_prompt_test([""], ["foo", "bar"], can_choose_multiple=True) + run_prompt_test([" "], ["foo", "bar"], can_choose_multiple=True) + run_prompt_test(["badinput", "all"], ["foo", "bar"], can_choose_multiple=True) + + def test_confirm(self): + test_cases = ( + (("Continue? [Y/n]: ", True), (User.DEFAULT_YES, 'y')), + (("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'n')), + (("Continue? [Y/n]: ", True), (User.DEFAULT_YES, '')), + (("Continue? [Y/n]: ", False), (User.DEFAULT_YES, 'q')), + (("Continue? [y/N]: ", True), (User.DEFAULT_NO, 'y')), + (("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'n')), + (("Continue? [y/N]: ", False), (User.DEFAULT_NO, '')), + (("Continue? [y/N]: ", False), (User.DEFAULT_NO, 'q')), + ) + for test_case in test_cases: + expected, inputs = test_case + + def mock_raw_input(message): + self.assertEquals(expected[0], message) + return inputs[1] + + result = User().confirm(default=inputs[0], + raw_input=mock_raw_input) + self.assertEquals(expected[1], result) + + def test_warn_if_application_is_xcode(self): + output = OutputCapture() + user = User() + output.assert_outputs(self, user._warn_if_application_is_xcode, ["TextMate"]) + output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Applications/TextMate.app"]) + output.assert_outputs(self, user._warn_if_application_is_xcode, ["XCode"]) # case sensitive matching + + xcode_warning = "Instead of using Xcode.app, consider using EDITOR=\"xed --wait\".\n" + output.assert_outputs(self, user._warn_if_application_is_xcode, ["Xcode"], expected_stdout=xcode_warning) + output.assert_outputs(self, user._warn_if_application_is_xcode, ["/Developer/Applications/Xcode.app"], expected_stdout=xcode_warning) diff --git a/Tools/Scripts/webkitpy/common/thread/__init__.py b/Tools/Scripts/webkitpy/common/thread/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/common/thread/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/common/thread/messagepump.py b/Tools/Scripts/webkitpy/common/thread/messagepump.py new file mode 100644 index 0000000..0e39285 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/thread/messagepump.py @@ -0,0 +1,59 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +class MessagePumpDelegate(object): + def schedule(self, interval, callback): + raise NotImplementedError, "subclasses must implement" + + def message_available(self, message): + raise NotImplementedError, "subclasses must implement" + + def final_message_delivered(self): + raise NotImplementedError, "subclasses must implement" + + +class MessagePump(object): + interval = 10 # seconds + + def __init__(self, delegate, message_queue): + self._delegate = delegate + self._message_queue = message_queue + self._schedule() + + def _schedule(self): + self._delegate.schedule(self.interval, self._callback) + + def _callback(self): + (messages, is_running) = self._message_queue.take_all() + for message in messages: + self._delegate.message_available(message) + if not is_running: + self._delegate.final_message_delivered() + return + self._schedule() diff --git a/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py b/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py new file mode 100644 index 0000000..f731db2 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py @@ -0,0 +1,83 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate +from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue + + +class TestDelegate(MessagePumpDelegate): + def __init__(self): + self.log = [] + + def schedule(self, interval, callback): + self.callback = callback + self.log.append("schedule") + + def message_available(self, message): + self.log.append("message_available: %s" % message) + + def final_message_delivered(self): + self.log.append("final_message_delivered") + + +class MessagePumpTest(unittest.TestCase): + + def test_basic(self): + queue = ThreadedMessageQueue() + delegate = TestDelegate() + pump = MessagePump(delegate, queue) + self.assertEqual(delegate.log, [ + 'schedule' + ]) + delegate.callback() + queue.post("Hello") + queue.post("There") + delegate.callback() + self.assertEqual(delegate.log, [ + 'schedule', + 'schedule', + 'message_available: Hello', + 'message_available: There', + 'schedule' + ]) + queue.post("More") + queue.post("Messages") + queue.stop() + delegate.callback() + self.assertEqual(delegate.log, [ + 'schedule', + 'schedule', + 'message_available: Hello', + 'message_available: There', + 'schedule', + 'message_available: More', + 'message_available: Messages', + 'final_message_delivered' + ]) diff --git a/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue.py b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue.py new file mode 100644 index 0000000..17b6277 --- /dev/null +++ b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue.py @@ -0,0 +1,54 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import threading + + +class ThreadedMessageQueue(object): + def __init__(self): + self._messages = [] + self._is_running = True + self._lock = threading.Lock() + + def post(self, message): + with self._lock: + self._messages.append(message) + + def stop(self): + with self._lock: + self._is_running = False + + def take_all(self): + with self._lock: + messages = self._messages + is_running = self._is_running + self._messages = [] + return (messages, is_running) + diff --git a/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py new file mode 100644 index 0000000..cb67c1e --- /dev/null +++ b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py @@ -0,0 +1,53 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue + +class ThreadedMessageQueueTest(unittest.TestCase): + + def test_basic(self): + queue = ThreadedMessageQueue() + queue.post("Hello") + queue.post("There") + (messages, is_running) = queue.take_all() + self.assertEqual(messages, ["Hello", "There"]) + self.assertTrue(is_running) + (messages, is_running) = queue.take_all() + self.assertEqual(messages, []) + self.assertTrue(is_running) + queue.post("More") + queue.stop() + queue.post("Messages") + (messages, is_running) = queue.take_all() + self.assertEqual(messages, ["More", "Messages"]) + self.assertFalse(is_running) + (messages, is_running) = queue.take_all() + self.assertEqual(messages, []) + self.assertFalse(is_running) diff --git a/Tools/Scripts/webkitpy/layout_tests/__init__.py b/Tools/Scripts/webkitpy/layout_tests/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/layout_tests/deduplicate_tests.py b/Tools/Scripts/webkitpy/layout_tests/deduplicate_tests.py new file mode 100644 index 0000000..51dcac8 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/deduplicate_tests.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""deduplicate_tests -- lists duplicated between platforms. + +If platform/mac-leopard is missing an expected test output, we fall back on +platform/mac. This means it's possible to grow redundant test outputs, +where we have the same expected data in both a platform directory and another +platform it falls back on. +""" + +import collections +import fnmatch +import os +import subprocess +import sys +import re +import webkitpy.common.checkout.scm as scm +import webkitpy.common.system.executive as executive +import webkitpy.common.system.logutils as logutils +import webkitpy.common.system.ospath as ospath +import webkitpy.layout_tests.port.factory as port_factory + +_log = logutils.get_logger(__file__) + +_BASE_PLATFORM = 'base' + + +def port_fallbacks(): + """Get the port fallback information. + Returns: + A dictionary mapping platform name to a list of other platforms to fall + back on. All platforms fall back on 'base'. + """ + fallbacks = {_BASE_PLATFORM: []} + platform_dir = os.path.join(scm.find_checkout_root(), 'LayoutTests', + 'platform') + for port_name in os.listdir(platform_dir): + try: + platforms = port_factory.get(port_name).baseline_search_path() + except NotImplementedError: + _log.error("'%s' lacks baseline_search_path(), please fix." + % port_name) + fallbacks[port_name] = [_BASE_PLATFORM] + continue + fallbacks[port_name] = [os.path.basename(p) for p in platforms][1:] + fallbacks[port_name].append(_BASE_PLATFORM) + return fallbacks + + +def parse_git_output(git_output, glob_pattern): + """Parses the output of git ls-tree and filters based on glob_pattern. + Args: + git_output: result of git ls-tree -r HEAD LayoutTests. + glob_pattern: a pattern to filter the files. + Returns: + A dictionary mapping (test name, hash of content) => [paths] + """ + hashes = collections.defaultdict(set) + for line in git_output.split('\n'): + if not line: + break + attrs, path = line.strip().split('\t') + if not fnmatch.fnmatch(path, glob_pattern): + continue + path = path[len('LayoutTests/'):] + match = re.match(r'^(platform/.*?/)?(.*)', path) + test = match.group(2) + _, _, hash = attrs.split(' ') + hashes[(test, hash)].add(path) + return hashes + + +def cluster_file_hashes(glob_pattern): + """Get the hashes of all the test expectations in the tree. + We cheat and use git's hashes. + Args: + glob_pattern: a pattern to filter the files. + Returns: + A dictionary mapping (test name, hash of content) => [paths] + """ + + # A map of file hash => set of all files with that hash. + hashes = collections.defaultdict(set) + + # Fill in the map. + cmd = ('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests') + try: + git_output = executive.Executive().run_command(cmd, + cwd=scm.find_checkout_root()) + except OSError, e: + if e.errno == 2: # No such file or directory. + _log.error("Error: 'No such file' when running git.") + _log.error("This script requires git.") + sys.exit(1) + raise e + return parse_git_output(git_output, glob_pattern) + + +def extract_platforms(paths): + """Extracts the platforms from a list of paths matching ^platform/(.*?)/. + Args: + paths: a list of paths. + Returns: + A dictionary containing all platforms from paths. + """ + platforms = {} + for path in paths: + match = re.match(r'^platform/(.*?)/', path) + if match: + platform = match.group(1) + else: + platform = _BASE_PLATFORM + platforms[platform] = path + return platforms + + +def has_intermediate_results(test, fallbacks, matching_platform, + path_exists=os.path.exists): + """Returns True if there is a test result that causes us to not delete + this duplicate. + + For example, chromium-linux may be a duplicate of the checked in result, + but chromium-win may have a different result checked in. In this case, + we need to keep the duplicate results. + + Args: + test: The test name. + fallbacks: A list of platforms we fall back on. + matching_platform: The platform that we found the duplicate test + result. We can stop checking here. + path_exists: Optional parameter that allows us to stub out + os.path.exists for testing. + """ + for platform in fallbacks: + if platform == matching_platform: + return False + test_path = os.path.join('LayoutTests', 'platform', platform, test) + if path_exists(test_path): + return True + return False + + +def get_relative_test_path(filename, relative_to, + checkout_root=scm.find_checkout_root()): + """Constructs a relative path to |filename| from |relative_to|. + Args: + filename: The test file we're trying to get a relative path to. + relative_to: The absolute path we're relative to. + Returns: + A relative path to filename or None if |filename| is not below + |relative_to|. + """ + layout_test_dir = os.path.join(checkout_root, 'LayoutTests') + abs_path = os.path.join(layout_test_dir, filename) + return ospath.relpath(abs_path, relative_to) + + +def find_dups(hashes, port_fallbacks, relative_to): + """Yields info about redundant test expectations. + Args: + hashes: a list of hashes as returned by cluster_file_hashes. + port_fallbacks: a list of fallback information as returned by + get_port_fallbacks. + relative_to: the directory that we want the results relative to + Returns: + a tuple containing (test, platform, fallback, platforms) + """ + for (test, hash), cluster in hashes.items(): + if len(cluster) < 2: + continue # Common case: only one file with that hash. + + # Compute the list of platforms we have this particular hash for. + platforms = extract_platforms(cluster) + if len(platforms) == 1: + continue + + # See if any of the platforms are redundant with each other. + for platform in platforms.keys(): + for fallback in port_fallbacks[platform]: + if fallback not in platforms.keys(): + continue + # We have to verify that there isn't an intermediate result + # that causes this duplicate hash to exist. + if has_intermediate_results(test, port_fallbacks[platform], + fallback): + continue + # We print the relative path so it's easy to pipe the results + # to xargs rm. + path = get_relative_test_path(platforms[platform], relative_to) + if not path: + continue + yield { + 'test': test, + 'platform': platform, + 'fallback': fallback, + 'path': path, + } + + +def deduplicate(glob_pattern): + """Traverses LayoutTests and returns information about duplicated files. + Args: + glob pattern to filter the files in LayoutTests. + Returns: + a dictionary containing test, path, platform and fallback. + """ + fallbacks = port_fallbacks() + hashes = cluster_file_hashes(glob_pattern) + return list(find_dups(hashes, fallbacks, os.getcwd())) diff --git a/Tools/Scripts/webkitpy/layout_tests/deduplicate_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/deduplicate_tests_unittest.py new file mode 100644 index 0000000..309bf8d --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/deduplicate_tests_unittest.py @@ -0,0 +1,210 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for deduplicate_tests.py.""" + +import deduplicate_tests +import os +import unittest +import webkitpy.common.checkout.scm as scm + + +class MockExecutive(object): + last_run_command = [] + response = '' + + class Executive(object): + def run_command(self, + args, + cwd=None, + input=None, + error_handler=None, + return_exit_code=False, + return_stderr=True, + decode_output=True): + MockExecutive.last_run_command += [args] + return MockExecutive.response + + +class ListDuplicatesTest(unittest.TestCase): + def setUp(self): + MockExecutive.last_run_command = [] + MockExecutive.response = '' + deduplicate_tests.executive = MockExecutive + self._original_cwd = os.getcwd() + checkout_root = scm.find_checkout_root() + self.assertNotEqual(checkout_root, None) + os.chdir(checkout_root) + + def tearDown(self): + os.chdir(self._original_cwd) + + def test_parse_git_output(self): + git_output = ( + '100644 blob 5053240b3353f6eb39f7cb00259785f16d121df2\tLayoutTests/mac/foo-expected.txt\n' + '100644 blob a004548d107ecc4e1ea08019daf0a14e8634a1ff\tLayoutTests/platform/chromium/foo-expected.txt\n' + '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/foo-expected.txt\n' + '100644 blob abcdebc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/animage.png\n' + '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/foo-expected.txt\n' + '100644 blob abcdebc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/animage.png\n' + '100644 blob 4303df5389ca87cae83dd3236b8dd84e16606517\tLayoutTests/platform/mac/foo-expected.txt\n') + hashes = deduplicate_tests.parse_git_output(git_output, '*') + expected = {('mac/foo-expected.txt', '5053240b3353f6eb39f7cb00259785f16d121df2'): set(['mac/foo-expected.txt']), + ('animage.png', 'abcdebc762e3aec5df03b5c04485b2cb3b65ffb1'): set(['platform/chromium-linux/animage.png', 'platform/chromium-win/animage.png']), + ('foo-expected.txt', '4303df5389ca87cae83dd3236b8dd84e16606517'): set(['platform/mac/foo-expected.txt']), + ('foo-expected.txt', 'd6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1'): set(['platform/chromium-linux/foo-expected.txt', 'platform/chromium-win/foo-expected.txt']), + ('foo-expected.txt', 'a004548d107ecc4e1ea08019daf0a14e8634a1ff'): set(['platform/chromium/foo-expected.txt'])} + self.assertEquals(expected, hashes) + + hashes = deduplicate_tests.parse_git_output(git_output, '*.png') + expected = {('animage.png', 'abcdebc762e3aec5df03b5c04485b2cb3b65ffb1'): set(['platform/chromium-linux/animage.png', 'platform/chromium-win/animage.png'])} + self.assertEquals(expected, hashes) + + def test_extract_platforms(self): + self.assertEquals({'foo': 'platform/foo/bar', + 'zoo': 'platform/zoo/com'}, + deduplicate_tests.extract_platforms(['platform/foo/bar', 'platform/zoo/com'])) + self.assertEquals({'foo': 'platform/foo/bar', + deduplicate_tests._BASE_PLATFORM: 'what/'}, + deduplicate_tests.extract_platforms(['platform/foo/bar', 'what/'])) + + def test_has_intermediate_results(self): + test_cases = ( + # If we found a duplicate in our first fallback, we have no + # intermediate results. + (False, ('fast/foo-expected.txt', + ['chromium-win', 'chromium', 'base'], + 'chromium-win', + lambda path: True)), + # Since chromium-win has a result, we have an intermediate result. + (True, ('fast/foo-expected.txt', + ['chromium-win', 'chromium', 'base'], + 'chromium', + lambda path: True)), + # There are no intermediate results. + (False, ('fast/foo-expected.txt', + ['chromium-win', 'chromium', 'base'], + 'chromium', + lambda path: False)), + # There are no intermediate results since a result for chromium is + # our duplicate file. + (False, ('fast/foo-expected.txt', + ['chromium-win', 'chromium', 'base'], + 'chromium', + lambda path: path == 'LayoutTests/platform/chromium/fast/foo-expected.txt')), + # We have an intermediate result in 'chromium' even though our + # duplicate is with the file in 'base'. + (True, ('fast/foo-expected.txt', + ['chromium-win', 'chromium', 'base'], + 'base', + lambda path: path == 'LayoutTests/platform/chromium/fast/foo-expected.txt')), + # We have an intermediate result in 'chromium-win' even though our + # duplicate is in 'base'. + (True, ('fast/foo-expected.txt', + ['chromium-win', 'chromium', 'base'], + 'base', + lambda path: path == 'LayoutTests/platform/chromium-win/fast/foo-expected.txt')), + ) + for expected, inputs in test_cases: + self.assertEquals(expected, + deduplicate_tests.has_intermediate_results(*inputs)) + + def test_unique(self): + MockExecutive.response = ( + '100644 blob 5053240b3353f6eb39f7cb00259785f16d121df2\tLayoutTests/mac/foo-expected.txt\n' + '100644 blob a004548d107ecc4e1ea08019daf0a14e8634a1ff\tLayoutTests/platform/chromium/foo-expected.txt\n' + '100644 blob abcd0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/foo-expected.txt\n' + '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/foo-expected.txt\n' + '100644 blob 4303df5389ca87cae83dd3236b8dd84e16606517\tLayoutTests/platform/mac/foo-expected.txt\n') + result = deduplicate_tests.deduplicate('*') + self.assertEquals(1, len(MockExecutive.last_run_command)) + self.assertEquals(('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests'), MockExecutive.last_run_command[-1]) + self.assertEquals(0, len(result)) + + def test_duplicates(self): + MockExecutive.response = ( + '100644 blob 5053240b3353f6eb39f7cb00259785f16d121df2\tLayoutTests/mac/foo-expected.txt\n' + '100644 blob a004548d107ecc4e1ea08019daf0a14e8634a1ff\tLayoutTests/platform/chromium/foo-expected.txt\n' + '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/foo-expected.txt\n' + '100644 blob abcdebc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-linux/animage.png\n' + '100644 blob d6bb0bc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/foo-expected.txt\n' + '100644 blob abcdebc762e3aec5df03b5c04485b2cb3b65ffb1\tLayoutTests/platform/chromium-win/animage.png\n' + '100644 blob 4303df5389ca87cae83dd3236b8dd84e16606517\tLayoutTests/platform/mac/foo-expected.txt\n') + + result = deduplicate_tests.deduplicate('*') + self.assertEquals(1, len(MockExecutive.last_run_command)) + self.assertEquals(('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests'), MockExecutive.last_run_command[-1]) + self.assertEquals(2, len(result)) + self.assertEquals({'test': 'animage.png', + 'path': 'LayoutTests/platform/chromium-linux/animage.png', + 'fallback': 'chromium-win', + 'platform': 'chromium-linux'}, + result[0]) + self.assertEquals({'test': 'foo-expected.txt', + 'path': 'LayoutTests/platform/chromium-linux/foo-expected.txt', + 'fallback': 'chromium-win', + 'platform': 'chromium-linux'}, + result[1]) + + result = deduplicate_tests.deduplicate('*.txt') + self.assertEquals(2, len(MockExecutive.last_run_command)) + self.assertEquals(('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests'), MockExecutive.last_run_command[-1]) + self.assertEquals(1, len(result)) + self.assertEquals({'test': 'foo-expected.txt', + 'path': 'LayoutTests/platform/chromium-linux/foo-expected.txt', + 'fallback': 'chromium-win', + 'platform': 'chromium-linux'}, + result[0]) + + result = deduplicate_tests.deduplicate('*.png') + self.assertEquals(3, len(MockExecutive.last_run_command)) + self.assertEquals(('git', 'ls-tree', '-r', 'HEAD', 'LayoutTests'), MockExecutive.last_run_command[-1]) + self.assertEquals(1, len(result)) + self.assertEquals({'test': 'animage.png', + 'path': 'LayoutTests/platform/chromium-linux/animage.png', + 'fallback': 'chromium-win', + 'platform': 'chromium-linux'}, + result[0]) + + def test_get_relative_test_path(self): + checkout_root = scm.find_checkout_root() + layout_test_dir = os.path.join(checkout_root, 'LayoutTests') + test_cases = ( + ('platform/mac/test.html', + ('platform/mac/test.html', layout_test_dir)), + ('LayoutTests/platform/mac/test.html', + ('platform/mac/test.html', checkout_root)), + (None, + ('platform/mac/test.html', os.path.join(checkout_root, 'WebCore'))), + ('test.html', + ('platform/mac/test.html', os.path.join(layout_test_dir, 'platform/mac'))), + (None, + ('platform/mac/test.html', os.path.join(layout_test_dir, 'platform/win'))), + ) + for expected, inputs in test_cases: + self.assertEquals(expected, + deduplicate_tests.get_relative_test_path(*inputs)) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py new file mode 100644 index 0000000..fdb8da6 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py @@ -0,0 +1,569 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A Thread object for running DumpRenderTree and processing URLs from a +shared queue. + +Each thread runs a separate instance of the DumpRenderTree binary and validates +the output. When there are no more URLs to process in the shared queue, the +thread exits. +""" + +from __future__ import with_statement + +import codecs +import copy +import logging +import os +import Queue +import signal +import sys +import thread +import threading +import time + + +from webkitpy.layout_tests.test_types import image_diff +from webkitpy.layout_tests.test_types import test_type_base +from webkitpy.layout_tests.test_types import text_diff + +import test_failures +import test_output +import test_results + +_log = logging.getLogger("webkitpy.layout_tests.layout_package." + "dump_render_tree_thread") + + +def _expected_test_output(port, filename): + """Returns an expected TestOutput object.""" + return test_output.TestOutput(port.expected_text(filename), + port.expected_image(filename), + port.expected_checksum(filename)) + +def _process_output(port, options, test_input, test_types, test_args, + test_output, worker_name): + """Receives the output from a DumpRenderTree process, subjects it to a + number of tests, and returns a list of failure types the test produced. + + Args: + port: port-specific hooks + options: command line options argument from optparse + proc: an active DumpRenderTree process + test_input: Object containing the test filename and timeout + test_types: list of test types to subject the output to + test_args: arguments to be passed to each test + test_output: a TestOutput object containing the output of the test + worker_name: worker name for logging + + Returns: a TestResult object + """ + failures = [] + + if test_output.crash: + failures.append(test_failures.FailureCrash()) + if test_output.timeout: + failures.append(test_failures.FailureTimeout()) + + test_name = port.relative_test_filename(test_input.filename) + if test_output.crash: + _log.debug("%s Stacktrace for %s:\n%s" % (worker_name, test_name, + test_output.error)) + filename = os.path.join(options.results_directory, test_name) + filename = os.path.splitext(filename)[0] + "-stack.txt" + port.maybe_make_directory(os.path.split(filename)[0]) + with codecs.open(filename, "wb", "utf-8") as file: + file.write(test_output.error) + elif test_output.error: + _log.debug("%s %s output stderr lines:\n%s" % (worker_name, test_name, + test_output.error)) + + expected_test_output = _expected_test_output(port, test_input.filename) + + # Check the output and save the results. + start_time = time.time() + time_for_diffs = {} + for test_type in test_types: + start_diff_time = time.time() + new_failures = test_type.compare_output(port, test_input.filename, + test_args, test_output, + expected_test_output) + # Don't add any more failures if we already have a crash, so we don't + # double-report those tests. We do double-report for timeouts since + # we still want to see the text and image output. + if not test_output.crash: + failures.extend(new_failures) + time_for_diffs[test_type.__class__.__name__] = ( + time.time() - start_diff_time) + + total_time_for_all_diffs = time.time() - start_diff_time + return test_results.TestResult(test_input.filename, failures, test_output.test_time, + total_time_for_all_diffs, time_for_diffs) + + +def _pad_timeout(timeout): + """Returns a safe multiple of the per-test timeout value to use + to detect hung test threads. + + """ + # When we're running one test per DumpRenderTree process, we can + # enforce a hard timeout. The DumpRenderTree watchdog uses 2.5x + # the timeout; we want to be larger than that. + return timeout * 3 + + +def _milliseconds_to_seconds(msecs): + return float(msecs) / 1000.0 + + +def _should_fetch_expected_checksum(options): + return options.pixel_tests and not (options.new_baseline or options.reset_results) + + +def _run_single_test(port, options, test_input, test_types, test_args, driver, worker_name): + # FIXME: Pull this into TestShellThread._run(). + + # The image hash is used to avoid doing an image dump if the + # checksums match, so it should be set to a blank value if we + # are generating a new baseline. (Otherwise, an image from a + # previous run will be copied into the baseline.""" + if _should_fetch_expected_checksum(options): + test_input.image_hash = port.expected_checksum(test_input.filename) + test_output = driver.run_test(test_input) + return _process_output(port, options, test_input, test_types, test_args, + test_output, worker_name) + + +class SingleTestThread(threading.Thread): + """Thread wrapper for running a single test file.""" + + def __init__(self, port, options, worker_number, worker_name, + test_input, test_types, test_args): + """ + Args: + port: object implementing port-specific hooks + options: command line argument object from optparse + worker_number: worker number for tests + worker_name: for logging + test_input: Object containing the test filename and timeout + test_types: A list of TestType objects to run the test output + against. + test_args: A TestArguments object to pass to each TestType. + """ + + threading.Thread.__init__(self) + self._port = port + self._options = options + self._test_input = test_input + self._test_types = test_types + self._test_args = test_args + self._driver = None + self._worker_number = worker_number + self._name = worker_name + + def run(self): + self._covered_run() + + def _covered_run(self): + # FIXME: this is a separate routine to work around a bug + # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85. + self._driver = self._port.create_driver(self._worker_number) + self._driver.start() + self._test_result = _run_single_test(self._port, self._options, + self._test_input, self._test_types, + self._test_args, self._driver, + self._name) + self._driver.stop() + + def get_test_result(self): + return self._test_result + + +class WatchableThread(threading.Thread): + """This class abstracts an interface used by + run_webkit_tests.TestRunner._wait_for_threads_to_finish for thread + management.""" + def __init__(self): + threading.Thread.__init__(self) + self._canceled = False + self._exception_info = None + self._next_timeout = None + self._thread_id = None + + def cancel(self): + """Set a flag telling this thread to quit.""" + self._canceled = True + + def clear_next_timeout(self): + """Mark a flag telling this thread to stop setting timeouts.""" + self._timeout = 0 + + def exception_info(self): + """If run() terminated on an uncaught exception, return it here + ((type, value, traceback) tuple). + Returns None if run() terminated normally. Meant to be called after + joining this thread.""" + return self._exception_info + + def id(self): + """Return a thread identifier.""" + return self._thread_id + + def next_timeout(self): + """Return the time the test is supposed to finish by.""" + return self._next_timeout + + +class TestShellThread(WatchableThread): + def __init__(self, port, options, worker_number, worker_name, + filename_list_queue, result_queue): + """Initialize all the local state for this DumpRenderTree thread. + + Args: + port: interface to port-specific hooks + options: command line options argument from optparse + worker_number: identifier for a particular worker thread. + worker_name: for logging. + filename_list_queue: A thread safe Queue class that contains lists + of tuples of (filename, uri) pairs. + result_queue: A thread safe Queue class that will contain + serialized TestResult objects. + """ + WatchableThread.__init__(self) + self._port = port + self._options = options + self._worker_number = worker_number + self._name = worker_name + self._filename_list_queue = filename_list_queue + self._result_queue = result_queue + self._filename_list = [] + self._driver = None + self._test_group_timing_stats = {} + self._test_results = [] + self._num_tests = 0 + self._start_time = 0 + self._stop_time = 0 + self._have_http_lock = False + self._http_lock_wait_begin = 0 + self._http_lock_wait_end = 0 + + self._test_types = [] + for cls in self._get_test_type_classes(): + self._test_types.append(cls(self._port, + self._options.results_directory)) + self._test_args = self._get_test_args(worker_number) + + # Current group of tests we're running. + self._current_group = None + # Number of tests in self._current_group. + self._num_tests_in_current_group = None + # Time at which we started running tests from self._current_group. + self._current_group_start_time = None + + def _get_test_args(self, worker_number): + """Returns the tuple of arguments for tests and for DumpRenderTree.""" + test_args = test_type_base.TestArguments() + test_args.new_baseline = self._options.new_baseline + test_args.reset_results = self._options.reset_results + + return test_args + + def _get_test_type_classes(self): + classes = [text_diff.TestTextDiff] + if self._options.pixel_tests: + classes.append(image_diff.ImageDiff) + return classes + + def get_test_group_timing_stats(self): + """Returns a dictionary mapping test group to a tuple of + (number of tests in that group, time to run the tests)""" + return self._test_group_timing_stats + + def get_test_results(self): + """Return the list of all tests run on this thread. + + This is used to calculate per-thread statistics. + + """ + return self._test_results + + def get_total_time(self): + return max(self._stop_time - self._start_time - + self._http_lock_wait_time(), 0.0) + + def get_num_tests(self): + return self._num_tests + + def run(self): + """Delegate main work to a helper method and watch for uncaught + exceptions.""" + self._covered_run() + + def _covered_run(self): + # FIXME: this is a separate routine to work around a bug + # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85. + self._thread_id = thread.get_ident() + self._start_time = time.time() + self._num_tests = 0 + try: + _log.debug('%s starting' % (self.getName())) + self._run(test_runner=None, result_summary=None) + _log.debug('%s done (%d tests)' % (self.getName(), + self.get_num_tests())) + except KeyboardInterrupt: + self._exception_info = sys.exc_info() + _log.debug("%s interrupted" % self.getName()) + except: + # Save the exception for our caller to see. + self._exception_info = sys.exc_info() + self._stop_time = time.time() + _log.error('%s dying, exception raised' % self.getName()) + + self._stop_time = time.time() + + def run_in_main_thread(self, test_runner, result_summary): + """This hook allows us to run the tests from the main thread if + --num-test-shells==1, instead of having to always run two or more + threads. This allows us to debug the test harness without having to + do multi-threaded debugging.""" + self._run(test_runner, result_summary) + + def cancel(self): + """Clean up http lock and set a flag telling this thread to quit.""" + self._stop_servers_with_lock() + WatchableThread.cancel(self) + + def next_timeout(self): + """Return the time the test is supposed to finish by.""" + if self._next_timeout: + return self._next_timeout + self._http_lock_wait_time() + return self._next_timeout + + def _http_lock_wait_time(self): + """Return the time what http locking takes.""" + if self._http_lock_wait_begin == 0: + return 0 + if self._http_lock_wait_end == 0: + return time.time() - self._http_lock_wait_begin + return self._http_lock_wait_end - self._http_lock_wait_begin + + def _run(self, test_runner, result_summary): + """Main work entry point of the thread. Basically we pull urls from the + filename queue and run the tests until we run out of urls. + + If test_runner is not None, then we call test_runner.UpdateSummary() + with the results of each test.""" + batch_size = self._options.batch_size + batch_count = 0 + + # Append tests we're running to the existing tests_run.txt file. + # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput. + tests_run_filename = os.path.join(self._options.results_directory, + "tests_run.txt") + tests_run_file = codecs.open(tests_run_filename, "a", "utf-8") + + while True: + if self._canceled: + _log.debug('Testing cancelled') + tests_run_file.close() + return + + if len(self._filename_list) is 0: + if self._current_group is not None: + self._test_group_timing_stats[self._current_group] = \ + (self._num_tests_in_current_group, + time.time() - self._current_group_start_time) + + try: + self._current_group, self._filename_list = \ + self._filename_list_queue.get_nowait() + except Queue.Empty: + self._stop_servers_with_lock() + self._kill_dump_render_tree() + tests_run_file.close() + return + + if self._current_group == "tests_to_http_lock": + self._start_servers_with_lock() + elif self._have_http_lock: + self._stop_servers_with_lock() + + self._num_tests_in_current_group = len(self._filename_list) + self._current_group_start_time = time.time() + + test_input = self._filename_list.pop() + + # We have a url, run tests. + batch_count += 1 + self._num_tests += 1 + if self._options.run_singly: + result = self._run_test_in_another_thread(test_input) + else: + result = self._run_test_in_this_thread(test_input) + + filename = test_input.filename + tests_run_file.write(filename + "\n") + if result.failures: + # Check and kill DumpRenderTree if we need to. + if len([1 for f in result.failures + if f.should_kill_dump_render_tree()]): + self._kill_dump_render_tree() + # Reset the batch count since the shell just bounced. + batch_count = 0 + # Print the error message(s). + error_str = '\n'.join([' ' + f.message() for + f in result.failures]) + _log.debug("%s %s failed:\n%s" % (self.getName(), + self._port.relative_test_filename(filename), + error_str)) + else: + _log.debug("%s %s passed" % (self.getName(), + self._port.relative_test_filename(filename))) + self._result_queue.put(result.dumps()) + + if batch_size > 0 and batch_count >= batch_size: + # Bounce the shell and reset count. + self._kill_dump_render_tree() + batch_count = 0 + + if test_runner: + test_runner.update_summary(result_summary) + + def _run_test_in_another_thread(self, test_input): + """Run a test in a separate thread, enforcing a hard time limit. + + Since we can only detect the termination of a thread, not any internal + state or progress, we can only run per-test timeouts when running test + files singly. + + Args: + test_input: Object containing the test filename and timeout + + Returns: + A TestResult + """ + worker = SingleTestThread(self._port, + self._options, + self._worker_number, + self._name, + test_input, + self._test_types, + self._test_args) + + worker.start() + + thread_timeout = _milliseconds_to_seconds( + _pad_timeout(int(test_input.timeout))) + thread._next_timeout = time.time() + thread_timeout + worker.join(thread_timeout) + if worker.isAlive(): + # If join() returned with the thread still running, the + # DumpRenderTree is completely hung and there's nothing + # more we can do with it. We have to kill all the + # DumpRenderTrees to free it up. If we're running more than + # one DumpRenderTree thread, we'll end up killing the other + # DumpRenderTrees too, introducing spurious crashes. We accept + # that tradeoff in order to avoid losing the rest of this + # thread's results. + _log.error('Test thread hung: killing all DumpRenderTrees') + if worker._driver: + worker._driver.stop() + + try: + result = worker.get_test_result() + except AttributeError, e: + # This gets raised if the worker thread has already exited. + failures = [] + _log.error('Cannot get results of test: %s' % + test_input.filename) + result = test_results.TestResult(test_input.filename, failures=[], + test_run_time=0, total_time_for_all_diffs=0, time_for_diffs={}) + + return result + + def _run_test_in_this_thread(self, test_input): + """Run a single test file using a shared DumpRenderTree process. + + Args: + test_input: Object containing the test filename, uri and timeout + + Returns: a TestResult object. + """ + self._ensure_dump_render_tree_is_running() + thread_timeout = _milliseconds_to_seconds( + _pad_timeout(int(test_input.timeout))) + self._next_timeout = time.time() + thread_timeout + test_result = _run_single_test(self._port, self._options, test_input, + self._test_types, self._test_args, + self._driver, self._name) + self._test_results.append(test_result) + return test_result + + def _ensure_dump_render_tree_is_running(self): + """Start the shared DumpRenderTree, if it's not running. + + This is not for use when running tests singly, since those each start + a separate DumpRenderTree in their own thread. + + """ + # poll() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + if not self._driver or self._driver.poll() is not None: + self._driver = self._port.create_driver(self._worker_number) + self._driver.start() + + def _start_servers_with_lock(self): + """Acquire http lock and start the servers.""" + self._http_lock_wait_begin = time.time() + _log.debug('Acquire http lock ...') + self._port.acquire_http_lock() + _log.debug('Starting HTTP server ...') + self._port.start_http_server() + _log.debug('Starting WebSocket server ...') + self._port.start_websocket_server() + self._http_lock_wait_end = time.time() + self._have_http_lock = True + + def _stop_servers_with_lock(self): + """Stop the servers and release http lock.""" + if self._have_http_lock: + _log.debug('Stopping HTTP server ...') + self._port.stop_http_server() + _log.debug('Stopping WebSocket server ...') + self._port.stop_websocket_server() + _log.debug('Release http lock ...') + self._port.release_http_lock() + self._have_http_lock = False + + def _kill_dump_render_tree(self): + """Kill the DumpRenderTree process if it's running.""" + if self._driver: + self._driver.stop() + self._driver = None diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py new file mode 100644 index 0000000..b054c5b --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py @@ -0,0 +1,212 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import os + +from webkitpy.layout_tests.layout_package import json_results_generator +from webkitpy.layout_tests.layout_package import test_expectations +from webkitpy.layout_tests.layout_package import test_failures +import webkitpy.thirdparty.simplejson as simplejson + + +class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase): + """A JSON results generator for layout tests.""" + + LAYOUT_TESTS_PATH = "LayoutTests" + + # Additional JSON fields. + WONTFIX = "wontfixCounts" + + # Note that we omit test_expectations.FAIL from this list because + # it should never show up (it's a legacy input expectation, never + # an output expectation). + FAILURE_TO_CHAR = {test_expectations.CRASH: "C", + test_expectations.TIMEOUT: "T", + test_expectations.IMAGE: "I", + test_expectations.TEXT: "F", + test_expectations.MISSING: "O", + test_expectations.IMAGE_PLUS_TEXT: "Z"} + + def __init__(self, port, builder_name, build_name, build_number, + results_file_base_path, builder_base_url, + test_timings, expectations, result_summary, all_tests, + generate_incremental_results=False, test_results_server=None, + test_type="", master_name=""): + """Modifies the results.json file. Grabs it off the archive directory + if it is not found locally. + + Args: + result_summary: ResultsSummary object storing the summary of the test + results. + """ + super(JSONLayoutResultsGenerator, self).__init__( + builder_name, build_name, build_number, results_file_base_path, + builder_base_url, {}, port.test_repository_paths(), + generate_incremental_results, test_results_server, + test_type, master_name) + + self._port = port + self._expectations = expectations + + # We want relative paths to LayoutTest root for JSON output. + path_to_name = self._get_path_relative_to_layout_test_root + self._result_summary = result_summary + self._failures = dict( + (path_to_name(test), test_failures.determine_result_type(failures)) + for (test, failures) in result_summary.failures.iteritems()) + self._all_tests = [path_to_name(test) for test in all_tests] + self._test_timings = dict( + (path_to_name(test_tuple.filename), test_tuple.test_run_time) + for test_tuple in test_timings) + + self.generate_json_output() + + def _get_path_relative_to_layout_test_root(self, test): + """Returns the path of the test relative to the layout test root. + For example, for: + src/third_party/WebKit/LayoutTests/fast/forms/foo.html + We would return + fast/forms/foo.html + """ + index = test.find(self.LAYOUT_TESTS_PATH) + if index is not -1: + index += len(self.LAYOUT_TESTS_PATH) + + if index is -1: + # Already a relative path. + relativePath = test + else: + relativePath = test[index + 1:] + + # Make sure all paths are unix-style. + return relativePath.replace('\\', '/') + + # override + def _get_test_timing(self, test_name): + if test_name in self._test_timings: + # Floor for now to get time in seconds. + return int(self._test_timings[test_name]) + return 0 + + # override + def _get_failed_test_names(self): + return set(self._failures.keys()) + + # override + def _get_modifier_char(self, test_name): + if test_name not in self._all_tests: + return self.NO_DATA_RESULT + + if test_name in self._failures: + return self.FAILURE_TO_CHAR[self._failures[test_name]] + + return self.PASS_RESULT + + # override + def _get_result_char(self, test_name): + return self._get_modifier_char(test_name) + + # override + def _convert_json_to_current_version(self, results_json): + archive_version = None + if self.VERSION_KEY in results_json: + archive_version = results_json[self.VERSION_KEY] + + super(JSONLayoutResultsGenerator, + self)._convert_json_to_current_version(results_json) + + # version 2->3 + if archive_version == 2: + for results_for_builder in results_json.itervalues(): + try: + test_results = results_for_builder[self.TESTS] + except: + continue + + for test in test_results: + # Make sure all paths are relative + test_path = self._get_path_relative_to_layout_test_root(test) + if test_path != test: + test_results[test_path] = test_results[test] + del test_results[test] + + # override + def _insert_failure_summaries(self, results_for_builder): + summary = self._result_summary + + self._insert_item_into_raw_list(results_for_builder, + len((set(summary.failures.keys()) | + summary.tests_by_expectation[test_expectations.SKIP]) & + summary.tests_by_timeline[test_expectations.NOW]), + self.FIXABLE_COUNT) + self._insert_item_into_raw_list(results_for_builder, + self._get_failure_summary_entry(test_expectations.NOW), + self.FIXABLE) + self._insert_item_into_raw_list(results_for_builder, + len(self._expectations.get_tests_with_timeline( + test_expectations.NOW)), self.ALL_FIXABLE_COUNT) + self._insert_item_into_raw_list(results_for_builder, + self._get_failure_summary_entry(test_expectations.WONTFIX), + self.WONTFIX) + + # override + def _normalize_results_json(self, test, test_name, tests): + super(JSONLayoutResultsGenerator, self)._normalize_results_json( + test, test_name, tests) + + # Remove tests that don't exist anymore. + full_path = os.path.join(self._port.layout_tests_dir(), test_name) + full_path = os.path.normpath(full_path) + if not os.path.exists(full_path): + del tests[test_name] + + def _get_failure_summary_entry(self, timeline): + """Creates a summary object to insert into the JSON. + + Args: + summary ResultSummary object with test results + timeline current test_expectations timeline to build entry for + (e.g., test_expectations.NOW, etc.) + """ + entry = {} + summary = self._result_summary + timeline_tests = summary.tests_by_timeline[timeline] + entry[self.SKIP_RESULT] = len( + summary.tests_by_expectation[test_expectations.SKIP] & + timeline_tests) + entry[self.PASS_RESULT] = len( + summary.tests_by_expectation[test_expectations.PASS] & + timeline_tests) + for failure_type in summary.tests_by_expectation.keys(): + if failure_type not in self.FAILURE_TO_CHAR: + continue + count = len(summary.tests_by_expectation[failure_type] & + timeline_tests) + entry[self.FAILURE_TO_CHAR[failure_type]] = count + return entry diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py new file mode 100644 index 0000000..54d129b --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py @@ -0,0 +1,598 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import codecs +import logging +import os +import subprocess +import sys +import time +import urllib2 +import xml.dom.minidom + +from webkitpy.layout_tests.layout_package import test_results_uploader + +import webkitpy.thirdparty.simplejson as simplejson + +# A JSON results generator for generic tests. +# FIXME: move this code out of the layout_package directory. + +_log = logging.getLogger("webkitpy.layout_tests.layout_package.json_results_generator") + +class TestResult(object): + """A simple class that represents a single test result.""" + + # Test modifier constants. + (NONE, FAILS, FLAKY, DISABLED) = range(4) + + def __init__(self, name, failed=False, elapsed_time=0): + self.name = name + self.failed = failed + self.time = elapsed_time + + test_name = name + try: + test_name = name.split('.')[1] + except IndexError: + _log.warn("Invalid test name: %s.", name) + pass + + if test_name.startswith('FAILS_'): + self.modifier = self.FAILS + elif test_name.startswith('FLAKY_'): + self.modifier = self.FLAKY + elif test_name.startswith('DISABLED_'): + self.modifier = self.DISABLED + else: + self.modifier = self.NONE + + def fixable(self): + return self.failed or self.modifier == self.DISABLED + + +class JSONResultsGeneratorBase(object): + """A JSON results generator for generic tests.""" + + MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750 + # Min time (seconds) that will be added to the JSON. + MIN_TIME = 1 + JSON_PREFIX = "ADD_RESULTS(" + JSON_SUFFIX = ");" + + # Note that in non-chromium tests those chars are used to indicate + # test modifiers (FAILS, FLAKY, etc) but not actual test results. + PASS_RESULT = "P" + SKIP_RESULT = "X" + FAIL_RESULT = "F" + FLAKY_RESULT = "L" + NO_DATA_RESULT = "N" + + MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT, + TestResult.DISABLED: SKIP_RESULT, + TestResult.FAILS: FAIL_RESULT, + TestResult.FLAKY: FLAKY_RESULT} + + VERSION = 3 + VERSION_KEY = "version" + RESULTS = "results" + TIMES = "times" + BUILD_NUMBERS = "buildNumbers" + TIME = "secondsSinceEpoch" + TESTS = "tests" + + FIXABLE_COUNT = "fixableCount" + FIXABLE = "fixableCounts" + ALL_FIXABLE_COUNT = "allFixableCount" + + RESULTS_FILENAME = "results.json" + INCREMENTAL_RESULTS_FILENAME = "incremental_results.json" + + URL_FOR_TEST_LIST_JSON = \ + "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s" + + def __init__(self, builder_name, build_name, build_number, + results_file_base_path, builder_base_url, + test_results_map, svn_repositories=None, + generate_incremental_results=False, + test_results_server=None, + test_type="", + master_name=""): + """Modifies the results.json file. Grabs it off the archive directory + if it is not found locally. + + Args + builder_name: the builder name (e.g. Webkit). + build_name: the build name (e.g. webkit-rel). + build_number: the build number. + results_file_base_path: Absolute path to the directory containing the + results json file. + builder_base_url: the URL where we have the archived test results. + If this is None no archived results will be retrieved. + test_results_map: A dictionary that maps test_name to TestResult. + svn_repositories: A (json_field_name, svn_path) pair for SVN + repositories that tests rely on. The SVN revision will be + included in the JSON with the given json_field_name. + generate_incremental_results: If true, generate incremental json file + from current run results. + test_results_server: server that hosts test results json. + test_type: test type string (e.g. 'layout-tests'). + master_name: the name of the buildbot master. + """ + self._builder_name = builder_name + self._build_name = build_name + self._build_number = build_number + self._builder_base_url = builder_base_url + self._results_directory = results_file_base_path + self._results_file_path = os.path.join(results_file_base_path, + self.RESULTS_FILENAME) + self._incremental_results_file_path = os.path.join( + results_file_base_path, self.INCREMENTAL_RESULTS_FILENAME) + + self._test_results_map = test_results_map + self._test_results = test_results_map.values() + self._generate_incremental_results = generate_incremental_results + + self._svn_repositories = svn_repositories + if not self._svn_repositories: + self._svn_repositories = {} + + self._test_results_server = test_results_server + self._test_type = test_type + self._master_name = master_name + + self._json = None + self._archived_results = None + + def generate_json_output(self): + """Generates the JSON output file.""" + + # Generate the JSON output file that has full results. + # FIXME: stop writing out the full results file once all bots use + # incremental results. + if not self._json: + self._json = self.get_json() + if self._json: + self._generate_json_file(self._json, self._results_file_path) + + # Generate the JSON output file that only has incremental results. + if self._generate_incremental_results: + json = self.get_json(incremental=True) + if json: + self._generate_json_file( + json, self._incremental_results_file_path) + + def get_json(self, incremental=False): + """Gets the results for the results.json file.""" + results_json = {} + if not incremental: + if self._json: + return self._json + + if self._archived_results: + results_json = self._archived_results + + if not results_json: + results_json, error = self._get_archived_json_results(incremental) + if error: + # If there was an error don't write a results.json + # file at all as it would lose all the information on the + # bot. + _log.error("Archive directory is inaccessible. Not " + "modifying or clobbering the results.json " + "file: " + str(error)) + return None + + builder_name = self._builder_name + if results_json and builder_name not in results_json: + _log.debug("Builder name (%s) is not in the results.json file." + % builder_name) + + self._convert_json_to_current_version(results_json) + + if builder_name not in results_json: + results_json[builder_name] = ( + self._create_results_for_builder_json()) + + results_for_builder = results_json[builder_name] + + self._insert_generic_metadata(results_for_builder) + + self._insert_failure_summaries(results_for_builder) + + # Update the all failing tests with result type and time. + tests = results_for_builder[self.TESTS] + all_failing_tests = self._get_failed_test_names() + all_failing_tests.update(tests.iterkeys()) + for test in all_failing_tests: + self._insert_test_time_and_result(test, tests, incremental) + + return results_json + + def set_archived_results(self, archived_results): + self._archived_results = archived_results + + def upload_json_files(self, json_files): + """Uploads the given json_files to the test_results_server (if the + test_results_server is given).""" + if not self._test_results_server: + return + + if not self._master_name: + _log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.") + return + + _log.info("Uploading JSON files for builder: %s", self._builder_name) + attrs = [("builder", self._builder_name), + ("testtype", self._test_type), + ("master", self._master_name)] + + files = [(file, os.path.join(self._results_directory, file)) + for file in json_files] + + uploader = test_results_uploader.TestResultsUploader( + self._test_results_server) + try: + # Set uploading timeout in case appengine server is having problem. + # 120 seconds are more than enough to upload test results. + uploader.upload(attrs, files, 120) + except Exception, err: + _log.error("Upload failed: %s" % err) + return + + _log.info("JSON files uploaded.") + + def _generate_json_file(self, json, file_path): + # Specify separators in order to get compact encoding. + json_data = simplejson.dumps(json, separators=(',', ':')) + json_string = self.JSON_PREFIX + json_data + self.JSON_SUFFIX + + results_file = codecs.open(file_path, "w", "utf-8") + results_file.write(json_string) + results_file.close() + + def _get_test_timing(self, test_name): + """Returns test timing data (elapsed time) in second + for the given test_name.""" + if test_name in self._test_results_map: + # Floor for now to get time in seconds. + return int(self._test_results_map[test_name].time) + return 0 + + def _get_failed_test_names(self): + """Returns a set of failed test names.""" + return set([r.name for r in self._test_results if r.failed]) + + def _get_modifier_char(self, test_name): + """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, + PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier + for the given test_name. + """ + if test_name not in self._test_results_map: + return self.__class__.NO_DATA_RESULT + + test_result = self._test_results_map[test_name] + if test_result.modifier in self.MODIFIER_TO_CHAR.keys(): + return self.MODIFIER_TO_CHAR[test_result.modifier] + + return self.__class__.PASS_RESULT + + def _get_result_char(self, test_name): + """Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT, + PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result + for the given test_name. + """ + if test_name not in self._test_results_map: + return self.__class__.NO_DATA_RESULT + + test_result = self._test_results_map[test_name] + if test_result.modifier == TestResult.DISABLED: + return self.__class__.SKIP_RESULT + + if test_result.failed: + return self.__class__.FAIL_RESULT + + return self.__class__.PASS_RESULT + + # FIXME: Callers should use scm.py instead. + # FIXME: Identify and fix the run-time errors that were observed on Windows + # chromium buildbot when we had updated this code to use scm.py once before. + def _get_svn_revision(self, in_directory): + """Returns the svn revision for the given directory. + + Args: + in_directory: The directory where svn is to be run. + """ + if os.path.exists(os.path.join(in_directory, '.svn')): + # Note: Not thread safe: http://bugs.python.org/issue2320 + output = subprocess.Popen(["svn", "info", "--xml"], + cwd=in_directory, + shell=(sys.platform == 'win32'), + stdout=subprocess.PIPE).communicate()[0] + try: + dom = xml.dom.minidom.parseString(output) + return dom.getElementsByTagName('entry')[0].getAttribute( + 'revision') + except xml.parsers.expat.ExpatError: + return "" + return "" + + def _get_archived_json_results(self, for_incremental=False): + """Reads old results JSON file if it exists. + Returns (archived_results, error) tuple where error is None if results + were successfully read. + + if for_incremental is True, download JSON file that only contains test + name list from test-results server. This is for generating incremental + JSON so the file generated has info for tests that failed before but + pass or are skipped from current run. + """ + results_json = {} + old_results = None + error = None + + if os.path.exists(self._results_file_path) and not for_incremental: + with codecs.open(self._results_file_path, "r", "utf-8") as file: + old_results = file.read() + elif self._builder_base_url or for_incremental: + if for_incremental: + if not self._test_results_server: + # starting from fresh if no test results server specified. + return {}, None + + results_file_url = (self.URL_FOR_TEST_LIST_JSON % + (urllib2.quote(self._test_results_server), + urllib2.quote(self._builder_name), + self.RESULTS_FILENAME, + urllib2.quote(self._test_type))) + else: + # Check if we have the archived JSON file on the buildbot + # server. + results_file_url = (self._builder_base_url + + self._build_name + "/" + self.RESULTS_FILENAME) + _log.error("Local results.json file does not exist. Grabbing " + "it off the archive at " + results_file_url) + + try: + results_file = urllib2.urlopen(results_file_url) + info = results_file.info() + old_results = results_file.read() + except urllib2.HTTPError, http_error: + # A non-4xx status code means the bot is hosed for some reason + # and we can't grab the results.json file off of it. + if (http_error.code < 400 and http_error.code >= 500): + error = http_error + except urllib2.URLError, url_error: + error = url_error + + if old_results: + # Strip the prefix and suffix so we can get the actual JSON object. + old_results = old_results[len(self.JSON_PREFIX): + len(old_results) - len(self.JSON_SUFFIX)] + + try: + results_json = simplejson.loads(old_results) + except: + _log.debug("results.json was not valid JSON. Clobbering.") + # The JSON file is not valid JSON. Just clobber the results. + results_json = {} + else: + _log.debug('Old JSON results do not exist. Starting fresh.') + results_json = {} + + return results_json, error + + def _insert_failure_summaries(self, results_for_builder): + """Inserts aggregate pass/failure statistics into the JSON. + This method reads self._test_results and generates + FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries. + + Args: + results_for_builder: Dictionary containing the test results for a + single builder. + """ + # Insert the number of tests that failed or skipped. + fixable_count = len([r for r in self._test_results if r.fixable()]) + self._insert_item_into_raw_list(results_for_builder, + fixable_count, self.FIXABLE_COUNT) + + # Create a test modifiers (FAILS, FLAKY etc) summary dictionary. + entry = {} + for test_name in self._test_results_map.iterkeys(): + result_char = self._get_modifier_char(test_name) + entry[result_char] = entry.get(result_char, 0) + 1 + + # Insert the pass/skip/failure summary dictionary. + self._insert_item_into_raw_list(results_for_builder, entry, + self.FIXABLE) + + # Insert the number of all the tests that are supposed to pass. + all_test_count = len(self._test_results) + self._insert_item_into_raw_list(results_for_builder, + all_test_count, self.ALL_FIXABLE_COUNT) + + def _insert_item_into_raw_list(self, results_for_builder, item, key): + """Inserts the item into the list with the given key in the results for + this builder. Creates the list if no such list exists. + + Args: + results_for_builder: Dictionary containing the test results for a + single builder. + item: Number or string to insert into the list. + key: Key in results_for_builder for the list to insert into. + """ + if key in results_for_builder: + raw_list = results_for_builder[key] + else: + raw_list = [] + + raw_list.insert(0, item) + raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG] + results_for_builder[key] = raw_list + + def _insert_item_run_length_encoded(self, item, encoded_results): + """Inserts the item into the run-length encoded results. + + Args: + item: String or number to insert. + encoded_results: run-length encoded results. An array of arrays, e.g. + [[3,'A'],[1,'Q']] encodes AAAQ. + """ + if len(encoded_results) and item == encoded_results[0][1]: + num_results = encoded_results[0][0] + if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: + encoded_results[0][0] = num_results + 1 + else: + # Use a list instead of a class for the run-length encoding since + # we want the serialized form to be concise. + encoded_results.insert(0, [1, item]) + + def _insert_generic_metadata(self, results_for_builder): + """ Inserts generic metadata (such as version number, current time etc) + into the JSON. + + Args: + results_for_builder: Dictionary containing the test results for + a single builder. + """ + self._insert_item_into_raw_list(results_for_builder, + self._build_number, self.BUILD_NUMBERS) + + # Include SVN revisions for the given repositories. + for (name, path) in self._svn_repositories: + self._insert_item_into_raw_list(results_for_builder, + self._get_svn_revision(path), + name + 'Revision') + + self._insert_item_into_raw_list(results_for_builder, + int(time.time()), + self.TIME) + + def _insert_test_time_and_result(self, test_name, tests, incremental=False): + """ Insert a test item with its results to the given tests dictionary. + + Args: + tests: Dictionary containing test result entries. + """ + + result = self._get_result_char(test_name) + time = self._get_test_timing(test_name) + + if test_name not in tests: + tests[test_name] = self._create_results_and_times_json() + + thisTest = tests[test_name] + if self.RESULTS in thisTest: + self._insert_item_run_length_encoded(result, thisTest[self.RESULTS]) + else: + thisTest[self.RESULTS] = [[1, result]] + + if self.TIMES in thisTest: + self._insert_item_run_length_encoded(time, thisTest[self.TIMES]) + else: + thisTest[self.TIMES] = [[1, time]] + + # Don't normalize the incremental results json because we need results + # for tests that pass or have no data from current run. + if not incremental: + self._normalize_results_json(thisTest, test_name, tests) + + def _convert_json_to_current_version(self, results_json): + """If the JSON does not match the current version, converts it to the + current version and adds in the new version number. + """ + if (self.VERSION_KEY in results_json and + results_json[self.VERSION_KEY] == self.VERSION): + return + + results_json[self.VERSION_KEY] = self.VERSION + + def _create_results_and_times_json(self): + results_and_times = {} + results_and_times[self.RESULTS] = [] + results_and_times[self.TIMES] = [] + return results_and_times + + def _create_results_for_builder_json(self): + results_for_builder = {} + results_for_builder[self.TESTS] = {} + return results_for_builder + + def _remove_items_over_max_number_of_builds(self, encoded_list): + """Removes items from the run-length encoded list after the final + item that exceeds the max number of builds to track. + + Args: + encoded_results: run-length encoded results. An array of arrays, e.g. + [[3,'A'],[1,'Q']] encodes AAAQ. + """ + num_builds = 0 + index = 0 + for result in encoded_list: + num_builds = num_builds + result[0] + index = index + 1 + if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG: + return encoded_list[:index] + return encoded_list + + def _normalize_results_json(self, test, test_name, tests): + """ Prune tests where all runs pass or tests that no longer exist and + truncate all results to maxNumberOfBuilds. + + Args: + test: ResultsAndTimes object for this test. + test_name: Name of the test. + tests: The JSON object with all the test results for this builder. + """ + test[self.RESULTS] = self._remove_items_over_max_number_of_builds( + test[self.RESULTS]) + test[self.TIMES] = self._remove_items_over_max_number_of_builds( + test[self.TIMES]) + + is_all_pass = self._is_results_all_of_type(test[self.RESULTS], + self.PASS_RESULT) + is_all_no_data = self._is_results_all_of_type(test[self.RESULTS], + self.NO_DATA_RESULT) + max_time = max([time[1] for time in test[self.TIMES]]) + + # Remove all passes/no-data from the results to reduce noise and + # filesize. If a test passes every run, but takes > MIN_TIME to run, + # don't throw away the data. + if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME): + del tests[test_name] + + def _is_results_all_of_type(self, results, type): + """Returns whether all the results are of the given type + (e.g. all passes).""" + return len(results) == 1 and results[0][1] == type + + +# Left here not to break anything. +class JSONResultsGenerator(JSONResultsGeneratorBase): + pass diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py new file mode 100644 index 0000000..dad549a --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py @@ -0,0 +1,220 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for json_results_generator.py.""" + +import unittest +import optparse +import random +import shutil +import tempfile + +from webkitpy.layout_tests.layout_package import json_results_generator +from webkitpy.layout_tests.layout_package import test_expectations + + +class JSONGeneratorTest(unittest.TestCase): + def setUp(self): + self.builder_name = 'DUMMY_BUILDER_NAME' + self.build_name = 'DUMMY_BUILD_NAME' + self.build_number = 'DUMMY_BUILDER_NUMBER' + + # For archived results. + self._json = None + self._num_runs = 0 + self._tests_set = set([]) + self._test_timings = {} + self._failed_count_map = {} + + self._PASS_count = 0 + self._DISABLED_count = 0 + self._FLAKY_count = 0 + self._FAILS_count = 0 + self._fixable_count = 0 + + def _test_json_generation(self, passed_tests_list, failed_tests_list): + tests_set = set(passed_tests_list) | set(failed_tests_list) + + DISABLED_tests = set([t for t in tests_set + if t.startswith('DISABLED_')]) + FLAKY_tests = set([t for t in tests_set + if t.startswith('FLAKY_')]) + FAILS_tests = set([t for t in tests_set + if t.startswith('FAILS_')]) + PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests) + + failed_tests = set(failed_tests_list) - DISABLED_tests + failed_count_map = dict([(t, 1) for t in failed_tests]) + + test_timings = {} + i = 0 + for test in tests_set: + test_timings[test] = float(self._num_runs * 100 + i) + i += 1 + + test_results_map = dict() + for test in tests_set: + test_results_map[test] = json_results_generator.TestResult(test, + failed=(test in failed_tests), + elapsed_time=test_timings[test]) + + generator = json_results_generator.JSONResultsGeneratorBase( + self.builder_name, self.build_name, self.build_number, + '', + None, # don't fetch past json results archive + test_results_map) + + failed_count_map = dict([(t, 1) for t in failed_tests]) + + # Test incremental json results + incremental_json = generator.get_json(incremental=True) + self._verify_json_results( + tests_set, + test_timings, + failed_count_map, + len(PASS_tests), + len(DISABLED_tests), + len(FLAKY_tests), + len(DISABLED_tests | failed_tests), + incremental_json, + 1) + + # Test aggregated json results + generator.set_archived_results(self._json) + json = generator.get_json(incremental=False) + self._json = json + self._num_runs += 1 + self._tests_set |= tests_set + self._test_timings.update(test_timings) + self._PASS_count += len(PASS_tests) + self._DISABLED_count += len(DISABLED_tests) + self._FLAKY_count += len(FLAKY_tests) + self._fixable_count += len(DISABLED_tests | failed_tests) + + get = self._failed_count_map.get + for test in failed_count_map.iterkeys(): + self._failed_count_map[test] = get(test, 0) + 1 + + self._verify_json_results( + self._tests_set, + self._test_timings, + self._failed_count_map, + self._PASS_count, + self._DISABLED_count, + self._FLAKY_count, + self._fixable_count, + self._json, + self._num_runs) + + def _verify_json_results(self, tests_set, test_timings, failed_count_map, + PASS_count, DISABLED_count, FLAKY_count, + fixable_count, + json, num_runs): + # Aliasing to a short name for better access to its constants. + JRG = json_results_generator.JSONResultsGeneratorBase + + self.assertTrue(JRG.VERSION_KEY in json) + self.assertTrue(self.builder_name in json) + + buildinfo = json[self.builder_name] + self.assertTrue(JRG.FIXABLE in buildinfo) + self.assertTrue(JRG.TESTS in buildinfo) + self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs) + self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number) + + if tests_set or DISABLED_count: + fixable = {} + for fixable_items in buildinfo[JRG.FIXABLE]: + for (type, count) in fixable_items.iteritems(): + if type in fixable: + fixable[type] = fixable[type] + count + else: + fixable[type] = count + + if PASS_count: + self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count) + else: + self.assertTrue(JRG.PASS_RESULT not in fixable or + fixable[JRG.PASS_RESULT] == 0) + if DISABLED_count: + self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count) + else: + self.assertTrue(JRG.SKIP_RESULT not in fixable or + fixable[JRG.SKIP_RESULT] == 0) + if FLAKY_count: + self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count) + else: + self.assertTrue(JRG.FLAKY_RESULT not in fixable or + fixable[JRG.FLAKY_RESULT] == 0) + + if failed_count_map: + tests = buildinfo[JRG.TESTS] + for test_name in failed_count_map.iterkeys(): + self.assertTrue(test_name in tests) + test = tests[test_name] + + failed = 0 + for result in test[JRG.RESULTS]: + if result[1] == JRG.FAIL_RESULT: + failed += result[0] + self.assertEqual(failed_count_map[test_name], failed) + + timing_count = 0 + for timings in test[JRG.TIMES]: + if timings[1] == test_timings[test_name]: + timing_count = timings[0] + self.assertEqual(1, timing_count) + + if fixable_count: + self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count) + + def test_json_generation(self): + self._test_json_generation([], []) + self._test_json_generation(['A1', 'B1'], []) + self._test_json_generation([], ['FAILS_A2', 'FAILS_B2']) + self._test_json_generation(['DISABLED_A3', 'DISABLED_B3'], []) + self._test_json_generation(['A4'], ['B4', 'FAILS_C4']) + self._test_json_generation(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5']) + self._test_json_generation( + ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'], + ['FAILS_D6']) + + # Generate JSON with the same test sets. (Both incremental results and + # archived results must be updated appropriately.) + self._test_json_generation( + ['A', 'FLAKY_B', 'DISABLED_C'], + ['FAILS_D', 'FLAKY_E']) + self._test_json_generation( + ['A', 'DISABLED_C', 'FLAKY_E'], + ['FLAKY_B', 'FAILS_D']) + self._test_json_generation( + ['FLAKY_B', 'DISABLED_C', 'FAILS_D'], + ['A', 'FLAKY_E']) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py new file mode 100644 index 0000000..e0ca8db --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py @@ -0,0 +1,197 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Module for handling messages, threads, processes, and concurrency for run-webkit-tests. + +Testing is accomplished by having a manager (TestRunner) gather all of the +tests to be run, and sending messages to a pool of workers (TestShellThreads) +to run each test. Each worker communicates with one driver (usually +DumpRenderTree) to run one test at a time and then compare the output against +what we expected to get. + +This modules provides a message broker that connects the manager to the +workers: it provides a messaging abstraction and message loops, and +handles launching threads and/or processes depending on the +requested configuration. +""" + +import logging +import sys +import time +import traceback + +import dump_render_tree_thread + +_log = logging.getLogger(__name__) + + +def get(port, options): + """Return an instance of a WorkerMessageBroker.""" + worker_model = options.worker_model + if worker_model == 'old-inline': + return InlineBroker(port, options) + if worker_model == 'old-threads': + return MultiThreadedBroker(port, options) + raise ValueError('unsupported value for --worker-model: %s' % worker_model) + + +class _WorkerState(object): + def __init__(self, name): + self.name = name + self.thread = None + + +class WorkerMessageBroker(object): + def __init__(self, port, options): + self._port = port + self._options = options + self._num_workers = int(self._options.child_processes) + + # This maps worker names to their _WorkerState values. + self._workers = {} + + def _threads(self): + return tuple([w.thread for w in self._workers.values()]) + + def start_workers(self, test_runner): + """Starts up the pool of workers for running the tests. + + Args: + test_runner: a handle to the manager/TestRunner object + """ + self._test_runner = test_runner + for worker_number in xrange(self._num_workers): + worker = _WorkerState('worker-%d' % worker_number) + worker.thread = self._start_worker(worker_number, worker.name) + self._workers[worker.name] = worker + return self._threads() + + def _start_worker(self, worker_number, worker_name): + raise NotImplementedError + + def run_message_loop(self): + """Loop processing messages until done.""" + raise NotImplementedError + + def cancel_workers(self): + """Cancel/interrupt any workers that are still alive.""" + pass + + def cleanup(self): + """Perform any necessary cleanup on shutdown.""" + pass + + +class InlineBroker(WorkerMessageBroker): + def _start_worker(self, worker_number, worker_name): + # FIXME: Replace with something that isn't a thread. + thread = dump_render_tree_thread.TestShellThread(self._port, + self._options, worker_number, worker_name, + self._test_runner._current_filename_queue, + self._test_runner._result_queue) + # Note: Don't start() the thread! If we did, it would actually + # create another thread and start executing it, and we'd no longer + # be single-threaded. + return thread + + def run_message_loop(self): + thread = self._threads()[0] + thread.run_in_main_thread(self._test_runner, + self._test_runner._current_result_summary) + self._test_runner.update() + + +class MultiThreadedBroker(WorkerMessageBroker): + def _start_worker(self, worker_number, worker_name): + thread = dump_render_tree_thread.TestShellThread(self._port, + self._options, worker_number, worker_name, + self._test_runner._current_filename_queue, + self._test_runner._result_queue) + thread.start() + return thread + + def run_message_loop(self): + threads = self._threads() + + # Loop through all the threads waiting for them to finish. + some_thread_is_alive = True + while some_thread_is_alive: + some_thread_is_alive = False + t = time.time() + for thread in threads: + exception_info = thread.exception_info() + if exception_info is not None: + # Re-raise the thread's exception here to make it + # clear that testing was aborted. Otherwise, + # the tests that did not run would be assumed + # to have passed. + raise exception_info[0], exception_info[1], exception_info[2] + + if thread.isAlive(): + some_thread_is_alive = True + next_timeout = thread.next_timeout() + if next_timeout and t > next_timeout: + log_wedged_worker(thread.getName(), thread.id()) + thread.clear_next_timeout() + + self._test_runner.update() + + if some_thread_is_alive: + time.sleep(0.01) + + def cancel_workers(self): + threads = self._threads() + for thread in threads: + thread.cancel() + + +def log_wedged_worker(name, id): + """Log information about the given worker state.""" + stack = _find_thread_stack(id) + assert(stack is not None) + _log.error("") + _log.error("%s (tid %d) is wedged" % (name, id)) + _log_stack(stack) + _log.error("") + + +def _find_thread_stack(id): + """Returns a stack object that can be used to dump a stack trace for + the given thread id (or None if the id is not found).""" + for thread_id, stack in sys._current_frames().items(): + if thread_id == id: + return stack + return None + + +def _log_stack(stack): + """Log a stack trace to log.error().""" + for filename, lineno, name, line in traceback.extract_stack(stack): + _log.error('File: "%s", line %d, in %s' % (filename, lineno, name)) + if line: + _log.error(' %s' % line.strip()) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py new file mode 100644 index 0000000..6f04fd3 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py @@ -0,0 +1,183 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import Queue +import sys +import thread +import threading +import time +import unittest + +from webkitpy.common import array_stream +from webkitpy.common.system import outputcapture +from webkitpy.tool import mocktool + +from webkitpy.layout_tests import run_webkit_tests + +import message_broker + + +class TestThread(threading.Thread): + def __init__(self, started_queue, stopping_queue): + threading.Thread.__init__(self) + self._thread_id = None + self._started_queue = started_queue + self._stopping_queue = stopping_queue + self._timeout = False + self._timeout_queue = Queue.Queue() + self._exception_info = None + + def id(self): + return self._thread_id + + def getName(self): + return "worker-0" + + def run(self): + self._covered_run() + + def _covered_run(self): + # FIXME: this is a separate routine to work around a bug + # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85. + self._thread_id = thread.get_ident() + try: + self._started_queue.put('') + msg = self._stopping_queue.get() + if msg == 'KeyboardInterrupt': + raise KeyboardInterrupt + elif msg == 'Exception': + raise ValueError() + elif msg == 'Timeout': + self._timeout = True + self._timeout_queue.get() + except: + self._exception_info = sys.exc_info() + + def exception_info(self): + return self._exception_info + + def next_timeout(self): + if self._timeout: + self._timeout_queue.put('done') + return time.time() - 10 + return time.time() + + def clear_next_timeout(self): + self._next_timeout = None + +class TestHandler(logging.Handler): + def __init__(self, astream): + logging.Handler.__init__(self) + self._stream = astream + + def emit(self, record): + self._stream.write(self.format(record)) + + +class MultiThreadedBrokerTest(unittest.TestCase): + class MockTestRunner(object): + def __init__(self): + pass + + def __del__(self): + pass + + def update(self): + pass + + def run_one_thread(self, msg): + runner = self.MockTestRunner() + port = None + options = mocktool.MockOptions(child_processes='1') + starting_queue = Queue.Queue() + stopping_queue = Queue.Queue() + broker = message_broker.MultiThreadedBroker(port, options) + broker._test_runner = runner + child_thread = TestThread(starting_queue, stopping_queue) + broker._workers['worker-0'] = message_broker._WorkerState('worker-0') + broker._workers['worker-0'].thread = child_thread + child_thread.start() + started_msg = starting_queue.get() + stopping_queue.put(msg) + return broker.run_message_loop() + + def test_basic(self): + interrupted = self.run_one_thread('') + self.assertFalse(interrupted) + + def test_interrupt(self): + self.assertRaises(KeyboardInterrupt, self.run_one_thread, 'KeyboardInterrupt') + + def test_timeout(self): + oc = outputcapture.OutputCapture() + oc.capture_output() + interrupted = self.run_one_thread('Timeout') + self.assertFalse(interrupted) + oc.restore_output() + + def test_exception(self): + self.assertRaises(ValueError, self.run_one_thread, 'Exception') + + +class Test(unittest.TestCase): + def test_find_thread_stack_found(self): + id, stack = sys._current_frames().items()[0] + found_stack = message_broker._find_thread_stack(id) + self.assertNotEqual(found_stack, None) + + def test_find_thread_stack_not_found(self): + found_stack = message_broker._find_thread_stack(0) + self.assertEqual(found_stack, None) + + def test_log_wedged_worker(self): + oc = outputcapture.OutputCapture() + oc.capture_output() + logger = message_broker._log + astream = array_stream.ArrayStream() + handler = TestHandler(astream) + logger.addHandler(handler) + + starting_queue = Queue.Queue() + stopping_queue = Queue.Queue() + child_thread = TestThread(starting_queue, stopping_queue) + child_thread.start() + msg = starting_queue.get() + + message_broker.log_wedged_worker(child_thread.getName(), + child_thread.id()) + stopping_queue.put('') + child_thread.join(timeout=1.0) + + self.assertFalse(astream.empty()) + self.assertFalse(child_thread.isAlive()) + oc.restore_output() + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py new file mode 100644 index 0000000..20646a1 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +Package that implements a stream wrapper that has 'meters' as well as +regular output. A 'meter' is a single line of text that can be erased +and rewritten repeatedly, without producing multiple lines of output. It +can be used to produce effects like progress bars. + +This package should only be called by the printing module in the layout_tests +package. +""" + +import logging + +_log = logging.getLogger("webkitpy.layout_tests.metered_stream") + + +class MeteredStream: + """This class is a wrapper around a stream that allows you to implement + meters (progress bars, etc.). + + It can be used directly as a stream, by calling write(), but provides + two other methods for output, update(), and progress(). + + In normal usage, update() will overwrite the output of the immediately + preceding update() (write() also will overwrite update()). So, calling + multiple update()s in a row can provide an updating status bar (note that + if an update string contains newlines, only the text following the last + newline will be overwritten/erased). + + If the MeteredStream is constructed in "verbose" mode (i.e., by passing + verbose=true), then update() no longer overwrite a previous update(), and + instead the call is equivalent to write(), although the text is + actually sent to the logger rather than to the stream passed + to the constructor. + + progress() is just like update(), except that if you are in verbose mode, + progress messages are not output at all (they are dropped). This is + used for things like progress bars which are presumed to be unwanted in + verbose mode. + + Note that the usual usage for this class is as a destination for + a logger that can also be written to directly (i.e., some messages go + through the logger, some don't). We thus have to dance around a + layering inversion in update() for things to work correctly. + """ + + def __init__(self, verbose, stream): + """ + Args: + verbose: whether progress is a no-op and updates() aren't overwritten + stream: output stream to write to + """ + self._dirty = False + self._verbose = verbose + self._stream = stream + self._last_update = "" + + def write(self, txt): + """Write to the stream, overwriting and resetting the meter.""" + if self._dirty: + self._write(txt) + self._dirty = False + self._last_update = '' + else: + self._stream.write(txt) + + def flush(self): + """Flush any buffered output.""" + self._stream.flush() + + def progress(self, str): + """ + Write a message to the stream that will get overwritten. + + This is used for progress updates that don't need to be preserved in + the log. If the MeteredStream was initialized with verbose==True, + then this output is discarded. We have this in case we are logging + lots of output and the update()s will get lost or won't work + properly (typically because verbose streams are redirected to files). + + """ + if self._verbose: + return + self._write(str) + + def update(self, str): + """ + Write a message that is also included when logging verbosely. + + This routine preserves the same console logging behavior as progress(), + but will also log the message if verbose() was true. + + """ + # Note this is a separate routine that calls either into the logger + # or the metering stream. We have to be careful to avoid a layering + # inversion (stream calling back into the logger). + if self._verbose: + _log.info(str) + else: + self._write(str) + + def _write(self, str): + """Actually write the message to the stream.""" + + # FIXME: Figure out if there is a way to detect if we're writing + # to a stream that handles CRs correctly (e.g., terminals). That might + # be a cleaner way of handling this. + + # Print the necessary number of backspaces to erase the previous + # message. + if len(self._last_update): + self._stream.write("\b" * len(self._last_update) + + " " * len(self._last_update) + + "\b" * len(self._last_update)) + self._stream.write(str) + last_newline = str.rfind("\n") + self._last_update = str[(last_newline + 1):] + self._dirty = True diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py new file mode 100644 index 0000000..9421ff8 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/metered_stream_unittest.py @@ -0,0 +1,115 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for metered_stream.py.""" + +import os +import optparse +import pdb +import sys +import unittest + +from webkitpy.common.array_stream import ArrayStream +from webkitpy.layout_tests.layout_package import metered_stream + + +class TestMeteredStream(unittest.TestCase): + def test_regular(self): + a = ArrayStream() + m = metered_stream.MeteredStream(verbose=False, stream=a) + self.assertTrue(a.empty()) + + # basic test - note that the flush() is a no-op, but we include it + # for coverage. + m.write("foo") + m.flush() + exp = ['foo'] + self.assertEquals(a.get(), exp) + + # now check that a second write() does not overwrite the first. + m.write("bar") + exp.append('bar') + self.assertEquals(a.get(), exp) + + m.update("batter") + exp.append('batter') + self.assertEquals(a.get(), exp) + + # The next update() should overwrite the laste update() but not the + # other text. Note that the cursor is effectively positioned at the + # end of 'foo', even though we had to erase three more characters. + m.update("foo") + exp.append('\b\b\b\b\b\b \b\b\b\b\b\b') + exp.append('foo') + self.assertEquals(a.get(), exp) + + m.progress("progress") + exp.append('\b\b\b \b\b\b') + exp.append('progress') + self.assertEquals(a.get(), exp) + + # now check that a write() does overwrite the progress bar + m.write("foo") + exp.append('\b\b\b\b\b\b\b\b \b\b\b\b\b\b\b\b') + exp.append('foo') + self.assertEquals(a.get(), exp) + + # Now test that we only back up to the most recent newline. + + # Note also that we do not back up to erase the most recent write(), + # i.e., write()s do not get erased. + a.reset() + m.update("foo\nbar") + m.update("baz") + self.assertEquals(a.get(), ['foo\nbar', '\b\b\b \b\b\b', 'baz']) + + def test_verbose(self): + a = ArrayStream() + m = metered_stream.MeteredStream(verbose=True, stream=a) + self.assertTrue(a.empty()) + m.write("foo") + self.assertEquals(a.get(), ['foo']) + + import logging + b = ArrayStream() + logger = logging.getLogger() + handler = logging.StreamHandler(b) + logger.addHandler(handler) + m.update("bar") + logger.handlers.remove(handler) + self.assertEquals(a.get(), ['foo']) + self.assertEquals(b.get(), ['bar\n']) + + m.progress("dropped") + self.assertEquals(a.get(), ['foo']) + self.assertEquals(b.get(), ['bar\n']) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing.py new file mode 100644 index 0000000..7a6aad1 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing.py @@ -0,0 +1,553 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Package that handles non-debug, non-file output for run-webkit-tests.""" + +import logging +import optparse +import os +import pdb + +from webkitpy.layout_tests.layout_package import metered_stream +from webkitpy.layout_tests.layout_package import test_expectations + +_log = logging.getLogger("webkitpy.layout_tests.printer") + +TestExpectationsFile = test_expectations.TestExpectationsFile + +NUM_SLOW_TESTS_TO_LOG = 10 + +PRINT_DEFAULT = ("misc,one-line-progress,one-line-summary,unexpected," + "unexpected-results,updates") +PRINT_EVERYTHING = ("actual,config,expected,misc,one-line-progress," + "one-line-summary,slowest,timing,unexpected," + "unexpected-results,updates") + +HELP_PRINTING = """ +Output for run-webkit-tests is controlled by a comma-separated list of +values passed to --print. Values either influence the overall output, or +the output at the beginning of the run, during the run, or at the end: + +Overall options: + nothing don't print anything. This overrides every other option + default include the default options. This is useful for logging + the default options plus additional settings. + everything print everything (except the trace-* options and the + detailed-progress option, see below for the full list ) + misc print miscellaneous things like blank lines + +At the beginning of the run: + config print the test run configuration + expected print a summary of what is expected to happen + (# passes, # failures, etc.) + +During the run: + detailed-progress print one dot per test completed + one-line-progress print a one-line progress bar + unexpected print any unexpected results as they occur + updates print updates on which stage is executing + trace-everything print detailed info on every test's results + (baselines, expectation, time it took to run). If + this is specified it will override the '*-progress' + options, the 'trace-unexpected' option, and the + 'unexpected' option. + trace-unexpected like 'trace-everything', but only for tests with + unexpected results. If this option is specified, + it will override the 'unexpected' option. + +At the end of the run: + actual print a summary of the actual results + slowest print %(slowest)d slowest tests and the time they took + timing print timing statistics + unexpected-results print a list of the tests with unexpected results + one-line-summary print a one-line summary of the run + +Notes: + - 'detailed-progress' can only be used if running in a single thread + (using --child-processes=1) or a single queue of tests (using + --experimental-fully-parallel). If these conditions aren't true, + 'one-line-progress' will be used instead. + - If both 'detailed-progress' and 'one-line-progress' are specified (and + both are possible), 'detailed-progress' will be used. + - If 'nothing' is specified, it overrides all of the other options. + - Specifying --verbose is equivalent to --print everything plus it + changes the format of the log messages to add timestamps and other + information. If you specify --verbose and --print X, then X overrides + the --print everything implied by --verbose. + +--print 'everything' is equivalent to --print '%(everything)s'. + +The default (--print default) is equivalent to --print '%(default)s'. +""" % {'slowest': NUM_SLOW_TESTS_TO_LOG, 'everything': PRINT_EVERYTHING, + 'default': PRINT_DEFAULT} + + +def print_options(): + return [ + # Note: We use print_options rather than just 'print' because print + # is a reserved word. + # Note: Also, we don't specify a default value so we can detect when + # no flag is specified on the command line and use different defaults + # based on whether or not --verbose is specified (since --print + # overrides --verbose). + optparse.make_option("--print", dest="print_options", + help=("controls print output of test run. " + "Use --help-printing for more.")), + optparse.make_option("--help-printing", action="store_true", + help="show detailed help on controlling print output"), + optparse.make_option("-v", "--verbose", action="store_true", + default=False, help="include debug-level logging"), + ] + + +def parse_print_options(print_options, verbose, child_processes, + is_fully_parallel): + """Parse the options provided to --print and dedup and rank them. + + Returns + a set() of switches that govern how logging is done + + """ + if print_options: + switches = set(print_options.split(',')) + elif verbose: + switches = set(PRINT_EVERYTHING.split(',')) + else: + switches = set(PRINT_DEFAULT.split(',')) + + if 'nothing' in switches: + return set() + + if (child_processes != 1 and not is_fully_parallel and + 'detailed-progress' in switches): + _log.warn("Can only print 'detailed-progress' if running " + "with --child-processes=1 or " + "with --experimental-fully-parallel. " + "Using 'one-line-progress' instead.") + switches.discard('detailed-progress') + switches.add('one-line-progress') + + if 'everything' in switches: + switches.discard('everything') + switches.update(set(PRINT_EVERYTHING.split(','))) + + if 'default' in switches: + switches.discard('default') + switches.update(set(PRINT_DEFAULT.split(','))) + + if 'detailed-progress' in switches: + switches.discard('one-line-progress') + + if 'trace-everything' in switches: + switches.discard('detailed-progress') + switches.discard('one-line-progress') + switches.discard('trace-unexpected') + switches.discard('unexpected') + + if 'trace-unexpected' in switches: + switches.discard('unexpected') + + return switches + + +def _configure_logging(stream, verbose): + log_fmt = '%(message)s' + log_datefmt = '%y%m%d %H:%M:%S' + log_level = logging.INFO + if verbose: + log_fmt = ('%(asctime)s %(process)d %(filename)s:%(lineno)d ' + '%(levelname)s %(message)s') + log_level = logging.DEBUG + + root = logging.getLogger() + handler = logging.StreamHandler(stream) + handler.setFormatter(logging.Formatter(log_fmt, None)) + root.addHandler(handler) + root.setLevel(log_level) + return handler + + +def _restore_logging(handler_to_remove): + root = logging.getLogger() + root.handlers.remove(handler_to_remove) + + +class Printer(object): + """Class handling all non-debug-logging printing done by run-webkit-tests. + + Printing from run-webkit-tests falls into two buckets: general or + regular output that is read only by humans and can be changed at any + time, and output that is parsed by buildbots (and humans) and hence + must be changed more carefully and in coordination with the buildbot + parsing code (in chromium.org's buildbot/master.chromium/scripts/master/ + log_parser/webkit_test_command.py script). + + By default the buildbot-parsed code gets logged to stdout, and regular + output gets logged to stderr.""" + def __init__(self, port, options, regular_output, buildbot_output, + child_processes, is_fully_parallel): + """ + Args + port interface to port-specific routines + options OptionParser object with command line settings + regular_output stream to which output intended only for humans + should be written + buildbot_output stream to which output intended to be read by + the buildbots (and humans) should be written + child_processes number of parallel threads running (usually + controlled by --child-processes) + is_fully_parallel are the tests running in a single queue, or + in shards (usually controlled by + --experimental-fully-parallel) + + Note that the last two args are separate rather than bundled into + the options structure so that this object does not assume any flags + set in options that weren't returned from logging_options(), above. + The two are used to determine whether or not we can sensibly use + the 'detailed-progress' option, or can only use 'one-line-progress'. + """ + self._buildbot_stream = buildbot_output + self._options = options + self._port = port + self._stream = regular_output + + # These are used for --print detailed-progress to track status by + # directory. + self._current_dir = None + self._current_progress_str = "" + self._current_test_number = 0 + + self._meter = metered_stream.MeteredStream(options.verbose, + regular_output) + self._logging_handler = _configure_logging(self._meter, + options.verbose) + + self.switches = parse_print_options(options.print_options, + options.verbose, child_processes, is_fully_parallel) + + def cleanup(self): + """Restore logging configuration to its initial settings.""" + if self._logging_handler: + _restore_logging(self._logging_handler) + self._logging_handler = None + + def __del__(self): + self.cleanup() + + # These two routines just hide the implementation of the switches. + def disabled(self, option): + return not option in self.switches + + def enabled(self, option): + return option in self.switches + + def help_printing(self): + self._write(HELP_PRINTING) + + def print_actual(self, msg): + if self.disabled('actual'): + return + self._buildbot_stream.write("%s\n" % msg) + + def print_config(self, msg): + self.write(msg, 'config') + + def print_expected(self, msg): + self.write(msg, 'expected') + + def print_timing(self, msg): + self.write(msg, 'timing') + + def print_one_line_summary(self, total, expected, unexpected): + """Print a one-line summary of the test run to stdout. + + Args: + total: total number of tests run + expected: number of expected results + unexpected: number of unexpected results + """ + if self.disabled('one-line-summary'): + return + + incomplete = total - expected - unexpected + if incomplete: + self._write("") + incomplete_str = " (%d didn't run)" % incomplete + expected_str = str(expected) + else: + incomplete_str = "" + expected_str = "All %d" % expected + + if unexpected == 0: + self._write("%s tests ran as expected%s." % + (expected_str, incomplete_str)) + elif expected == 1: + self._write("1 test ran as expected, %d didn't%s:" % + (unexpected, incomplete_str)) + else: + self._write("%d tests ran as expected, %d didn't%s:" % + (expected, unexpected, incomplete_str)) + self._write("") + + def print_test_result(self, result, expected, exp_str, got_str): + """Print the result of the test as determined by --print. + + This routine is used to print the details of each test as it completes. + + Args: + result - The actual TestResult object + expected - Whether the result we got was an expected result + exp_str - What we expected to get (used for tracing) + got_str - What we actually got (used for tracing) + + Note that we need all of these arguments even though they seem + somewhat redundant, in order to keep this routine from having to + known anything about the set of expectations. + """ + if (self.enabled('trace-everything') or + self.enabled('trace-unexpected') and not expected): + self._print_test_trace(result, exp_str, got_str) + elif (not expected and self.enabled('unexpected') and + self.disabled('detailed-progress')): + # Note: 'detailed-progress' handles unexpected results internally, + # so we skip it here. + self._print_unexpected_test_result(result) + + def _print_test_trace(self, result, exp_str, got_str): + """Print detailed results of a test (triggered by --print trace-*). + For each test, print: + - location of the expected baselines + - expected results + - actual result + - timing info + """ + filename = result.filename + test_name = self._port.relative_test_filename(filename) + self._write('trace: %s' % test_name) + txt_file = self._port.expected_filename(filename, '.txt') + if self._port.path_exists(txt_file): + self._write(' txt: %s' % + self._port.relative_test_filename(txt_file)) + else: + self._write(' txt: <none>') + checksum_file = self._port.expected_filename(filename, '.checksum') + if self._port.path_exists(checksum_file): + self._write(' sum: %s' % + self._port.relative_test_filename(checksum_file)) + else: + self._write(' sum: <none>') + png_file = self._port.expected_filename(filename, '.png') + if self._port.path_exists(png_file): + self._write(' png: %s' % + self._port.relative_test_filename(png_file)) + else: + self._write(' png: <none>') + self._write(' exp: %s' % exp_str) + self._write(' got: %s' % got_str) + self._write(' took: %-.3f' % result.test_run_time) + self._write('') + + def _print_unexpected_test_result(self, result): + """Prints one unexpected test result line.""" + desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result.type][0] + self.write(" %s -> unexpected %s" % + (self._port.relative_test_filename(result.filename), + desc), "unexpected") + + def print_progress(self, result_summary, retrying, test_list): + """Print progress through the tests as determined by --print.""" + if self.enabled('detailed-progress'): + self._print_detailed_progress(result_summary, test_list) + elif self.enabled('one-line-progress'): + self._print_one_line_progress(result_summary, retrying) + else: + return + + if result_summary.remaining == 0: + self._meter.update('') + + def _print_one_line_progress(self, result_summary, retrying): + """Displays the progress through the test run.""" + percent_complete = 100 * (result_summary.expected + + result_summary.unexpected) / result_summary.total + action = "Testing" + if retrying: + action = "Retrying" + self._meter.progress("%s (%d%%): %d ran as expected, %d didn't," + " %d left" % (action, percent_complete, result_summary.expected, + result_summary.unexpected, result_summary.remaining)) + + def _print_detailed_progress(self, result_summary, test_list): + """Display detailed progress output where we print the directory name + and one dot for each completed test. This is triggered by + "--log detailed-progress".""" + if self._current_test_number == len(test_list): + return + + next_test = test_list[self._current_test_number] + next_dir = os.path.dirname( + self._port.relative_test_filename(next_test)) + if self._current_progress_str == "": + self._current_progress_str = "%s: " % (next_dir) + self._current_dir = next_dir + + while next_test in result_summary.results: + if next_dir != self._current_dir: + self._meter.write("%s\n" % (self._current_progress_str)) + self._current_progress_str = "%s: ." % (next_dir) + self._current_dir = next_dir + else: + self._current_progress_str += "." + + if (next_test in result_summary.unexpected_results and + self.enabled('unexpected')): + self._meter.write("%s\n" % self._current_progress_str) + test_result = result_summary.results[next_test] + self._print_unexpected_test_result(test_result) + self._current_progress_str = "%s: " % self._current_dir + + self._current_test_number += 1 + if self._current_test_number == len(test_list): + break + + next_test = test_list[self._current_test_number] + next_dir = os.path.dirname( + self._port.relative_test_filename(next_test)) + + if result_summary.remaining: + remain_str = " (%d)" % (result_summary.remaining) + self._meter.progress("%s%s" % (self._current_progress_str, + remain_str)) + else: + self._meter.progress("%s" % (self._current_progress_str)) + + def print_unexpected_results(self, unexpected_results): + """Prints a list of the unexpected results to the buildbot stream.""" + if self.disabled('unexpected-results'): + return + + passes = {} + flaky = {} + regressions = {} + + for test, results in unexpected_results['tests'].iteritems(): + actual = results['actual'].split(" ") + expected = results['expected'].split(" ") + if actual == ['PASS']: + if 'CRASH' in expected: + _add_to_dict_of_lists(passes, + 'Expected to crash, but passed', + test) + elif 'TIMEOUT' in expected: + _add_to_dict_of_lists(passes, + 'Expected to timeout, but passed', + test) + else: + _add_to_dict_of_lists(passes, + 'Expected to fail, but passed', + test) + elif len(actual) > 1: + # We group flaky tests by the first actual result we got. + _add_to_dict_of_lists(flaky, actual[0], test) + else: + _add_to_dict_of_lists(regressions, results['actual'], test) + + if len(passes) or len(flaky) or len(regressions): + self._buildbot_stream.write("\n") + + if len(passes): + for key, tests in passes.iteritems(): + self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests))) + tests.sort() + for test in tests: + self._buildbot_stream.write(" %s\n" % test) + self._buildbot_stream.write("\n") + self._buildbot_stream.write("\n") + + if len(flaky): + descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS + for key, tests in flaky.iteritems(): + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n" + % (descriptions[result][1], len(tests))) + tests.sort() + + for test in tests: + result = unexpected_results['tests'][test] + actual = result['actual'].split(" ") + expected = result['expected'].split(" ") + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + new_expectations_list = list(set(actual) | set(expected)) + self._buildbot_stream.write(" %s = %s\n" % + (test, " ".join(new_expectations_list))) + self._buildbot_stream.write("\n") + self._buildbot_stream.write("\n") + + if len(regressions): + descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS + for key, tests in regressions.iteritems(): + result = TestExpectationsFile.EXPECTATIONS[key.lower()] + self._buildbot_stream.write( + "Regressions: Unexpected %s : (%d)\n" % ( + descriptions[result][1], len(tests))) + tests.sort() + for test in tests: + self._buildbot_stream.write(" %s = %s\n" % (test, key)) + self._buildbot_stream.write("\n") + self._buildbot_stream.write("\n") + + if len(unexpected_results['tests']) and self._options.verbose: + self._buildbot_stream.write("%s\n" % ("-" * 78)) + + def print_update(self, msg): + if self.disabled('updates'): + return + self._meter.update(msg) + + def write(self, msg, option="misc"): + if self.disabled(option): + return + self._write(msg) + + def _write(self, msg): + # FIXME: we could probably get away with calling _log.info() all of + # the time, but there doesn't seem to be a good way to test the output + # from the logger :(. + if self._options.verbose: + _log.info(msg) + else: + self._meter.write("%s\n" % msg) + +# +# Utility routines used by the Controller class +# + + +def _add_to_dict_of_lists(dict, key, value): + dict.setdefault(key, []).append(value) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py new file mode 100644 index 0000000..0e478c8 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/printing_unittest.py @@ -0,0 +1,608 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for printing.py.""" + +import os +import optparse +import pdb +import sys +import unittest +import logging + +from webkitpy.common import array_stream +from webkitpy.common.system import logtesting +from webkitpy.layout_tests import port + +from webkitpy.layout_tests.layout_package import printing +from webkitpy.layout_tests.layout_package import result_summary +from webkitpy.layout_tests.layout_package import test_expectations +from webkitpy.layout_tests.layout_package import test_failures +from webkitpy.layout_tests.layout_package import test_results +from webkitpy.layout_tests.layout_package import test_runner + + +def get_options(args): + print_options = printing.print_options() + option_parser = optparse.OptionParser(option_list=print_options) + return option_parser.parse_args(args) + + +class TestUtilityFunctions(unittest.TestCase): + def test_configure_logging(self): + options, args = get_options([]) + stream = array_stream.ArrayStream() + handler = printing._configure_logging(stream, options.verbose) + logging.info("this should be logged") + self.assertFalse(stream.empty()) + + stream.reset() + logging.debug("this should not be logged") + self.assertTrue(stream.empty()) + + printing._restore_logging(handler) + + stream.reset() + options, args = get_options(['--verbose']) + handler = printing._configure_logging(stream, options.verbose) + logging.debug("this should be logged") + self.assertFalse(stream.empty()) + printing._restore_logging(handler) + + def test_print_options(self): + options, args = get_options([]) + self.assertTrue(options is not None) + + def test_parse_print_options(self): + def test_switches(args, expected_switches_str, + verbose=False, child_processes=1, + is_fully_parallel=False): + options, args = get_options(args) + if expected_switches_str: + expected_switches = set(expected_switches_str.split(',')) + else: + expected_switches = set() + switches = printing.parse_print_options(options.print_options, + verbose, + child_processes, + is_fully_parallel) + self.assertEqual(expected_switches, switches) + + # test that we default to the default set of switches + test_switches([], printing.PRINT_DEFAULT) + + # test that verbose defaults to everything + test_switches([], printing.PRINT_EVERYTHING, verbose=True) + + # test that --print default does what it's supposed to + test_switches(['--print', 'default'], printing.PRINT_DEFAULT) + + # test that --print nothing does what it's supposed to + test_switches(['--print', 'nothing'], None) + + # test that --print everything does what it's supposed to + test_switches(['--print', 'everything'], printing.PRINT_EVERYTHING) + + # this tests that '--print X' overrides '--verbose' + test_switches(['--print', 'actual'], 'actual', verbose=True) + + + +class Testprinter(unittest.TestCase): + def get_printer(self, args=None, single_threaded=False, + is_fully_parallel=False): + printing_options = printing.print_options() + option_parser = optparse.OptionParser(option_list=printing_options) + options, args = option_parser.parse_args(args) + self._port = port.get('test', options) + nproc = 2 + if single_threaded: + nproc = 1 + + regular_output = array_stream.ArrayStream() + buildbot_output = array_stream.ArrayStream() + printer = printing.Printer(self._port, options, regular_output, + buildbot_output, single_threaded, + is_fully_parallel) + return printer, regular_output, buildbot_output + + def get_result(self, test, result_type=test_expectations.PASS, run_time=0): + failures = [] + if result_type == test_expectations.TIMEOUT: + failures = [test_failures.FailureTimeout()] + elif result_type == test_expectations.CRASH: + failures = [test_failures.FailureCrash()] + path = os.path.join(self._port.layout_tests_dir(), test) + return test_results.TestResult(path, failures, run_time, + total_time_for_all_diffs=0, + time_for_diffs=0) + + def get_result_summary(self, tests, expectations_str): + test_paths = [os.path.join(self._port.layout_tests_dir(), test) for + test in tests] + expectations = test_expectations.TestExpectations( + self._port, test_paths, expectations_str, + self._port.test_platform_name(), is_debug_mode=False, + is_lint_mode=False) + + rs = result_summary.ResultSummary(expectations, test_paths) + return test_paths, rs, expectations + + def test_help_printer(self): + # Here and below we'll call the "regular" printer err and the + # buildbot printer out; this corresponds to how things run on the + # bots with stderr and stdout. + printer, err, out = self.get_printer() + + # This routine should print something to stdout. testing what it is + # is kind of pointless. + printer.help_printing() + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + def do_switch_tests(self, method_name, switch, to_buildbot, + message='hello', exp_err=None, exp_bot=None): + def do_helper(method_name, switch, message, exp_err, exp_bot): + printer, err, bot = self.get_printer(['--print', switch]) + getattr(printer, method_name)(message) + self.assertEqual(err.get(), exp_err) + self.assertEqual(bot.get(), exp_bot) + + if to_buildbot: + if exp_err is None: + exp_err = [] + if exp_bot is None: + exp_bot = [message + "\n"] + else: + if exp_err is None: + exp_err = [message + "\n"] + if exp_bot is None: + exp_bot = [] + do_helper(method_name, 'nothing', 'hello', [], []) + do_helper(method_name, switch, 'hello', exp_err, exp_bot) + do_helper(method_name, 'everything', 'hello', exp_err, exp_bot) + + def test_configure_and_cleanup(self): + # This test verifies that calling cleanup repeatedly and deleting + # the object is safe. + printer, err, out = self.get_printer(['--print', 'everything']) + printer.cleanup() + printer.cleanup() + printer = None + + def test_print_actual(self): + # Actual results need to be logged to the buildbot's stream. + self.do_switch_tests('print_actual', 'actual', to_buildbot=True) + + def test_print_actual_buildbot(self): + # FIXME: Test that the format of the actual results matches what the + # buildbot is expecting. + pass + + def test_print_config(self): + self.do_switch_tests('print_config', 'config', to_buildbot=False) + + def test_print_expected(self): + self.do_switch_tests('print_expected', 'expected', to_buildbot=False) + + def test_print_timing(self): + self.do_switch_tests('print_timing', 'timing', to_buildbot=False) + + def test_print_update(self): + # Note that there shouldn't be a carriage return here; updates() + # are meant to be overwritten. + self.do_switch_tests('print_update', 'updates', to_buildbot=False, + message='hello', exp_err=['hello']) + + def test_print_one_line_summary(self): + printer, err, out = self.get_printer(['--print', 'nothing']) + printer.print_one_line_summary(1, 1, 0) + self.assertTrue(err.empty()) + + printer, err, out = self.get_printer(['--print', 'one-line-summary']) + printer.print_one_line_summary(1, 1, 0) + self.assertEquals(err.get(), ["All 1 tests ran as expected.\n", "\n"]) + + printer, err, out = self.get_printer(['--print', 'everything']) + printer.print_one_line_summary(1, 1, 0) + self.assertEquals(err.get(), ["All 1 tests ran as expected.\n", "\n"]) + + err.reset() + printer.print_one_line_summary(2, 1, 1) + self.assertEquals(err.get(), + ["1 test ran as expected, 1 didn't:\n", "\n"]) + + err.reset() + printer.print_one_line_summary(3, 2, 1) + self.assertEquals(err.get(), + ["2 tests ran as expected, 1 didn't:\n", "\n"]) + + err.reset() + printer.print_one_line_summary(3, 2, 0) + self.assertEquals(err.get(), + ['\n', "2 tests ran as expected (1 didn't run).\n", + '\n']) + + + def test_print_test_result(self): + # Note here that we don't use meaningful exp_str and got_str values; + # the actual contents of the string are treated opaquely by + # print_test_result() when tracing, and usually we don't want + # to test what exactly is printed, just that something + # was printed (or that nothing was printed). + # + # FIXME: this is actually some goofy layering; it would be nice + # we could refactor it so that the args weren't redundant. Maybe + # the TestResult should contain what was expected, and the + # strings could be derived from the TestResult? + printer, err, out = self.get_printer(['--print', 'nothing']) + result = self.get_result('passes/image.html') + printer.print_test_result(result, expected=False, exp_str='', + got_str='') + self.assertTrue(err.empty()) + + printer, err, out = self.get_printer(['--print', 'unexpected']) + printer.print_test_result(result, expected=True, exp_str='', + got_str='') + self.assertTrue(err.empty()) + printer.print_test_result(result, expected=False, exp_str='', + got_str='') + self.assertEquals(err.get(), + [' passes/image.html -> unexpected pass\n']) + + printer, err, out = self.get_printer(['--print', 'everything']) + printer.print_test_result(result, expected=True, exp_str='', + got_str='') + self.assertTrue(err.empty()) + + printer.print_test_result(result, expected=False, exp_str='', + got_str='') + self.assertEquals(err.get(), + [' passes/image.html -> unexpected pass\n']) + + printer, err, out = self.get_printer(['--print', 'nothing']) + printer.print_test_result(result, expected=False, exp_str='', + got_str='') + self.assertTrue(err.empty()) + + printer, err, out = self.get_printer(['--print', + 'trace-unexpected']) + printer.print_test_result(result, expected=True, exp_str='', + got_str='') + self.assertTrue(err.empty()) + + printer, err, out = self.get_printer(['--print', + 'trace-unexpected']) + printer.print_test_result(result, expected=False, exp_str='', + got_str='') + self.assertFalse(err.empty()) + + printer, err, out = self.get_printer(['--print', + 'trace-unexpected']) + result = self.get_result("passes/text.html") + printer.print_test_result(result, expected=False, exp_str='', + got_str='') + self.assertFalse(err.empty()) + + err.reset() + printer.print_test_result(result, expected=False, exp_str='', + got_str='') + self.assertFalse(err.empty()) + + printer, err, out = self.get_printer(['--print', 'trace-everything']) + result = self.get_result('passes/image.html') + printer.print_test_result(result, expected=True, exp_str='', + got_str='') + result = self.get_result('failures/expected/missing_text.html') + printer.print_test_result(result, expected=True, exp_str='', + got_str='') + result = self.get_result('failures/expected/missing_check.html') + printer.print_test_result(result, expected=True, exp_str='', + got_str='') + result = self.get_result('failures/expected/missing_image.html') + printer.print_test_result(result, expected=True, exp_str='', + got_str='') + self.assertFalse(err.empty()) + + err.reset() + printer.print_test_result(result, expected=False, exp_str='', + got_str='') + + def test_print_progress(self): + expectations = '' + + # test that we print nothing + printer, err, out = self.get_printer(['--print', 'nothing']) + tests = ['passes/text.html', 'failures/expected/timeout.html', + 'failures/expected/crash.html'] + paths, rs, exp = self.get_result_summary(tests, expectations) + + printer.print_progress(rs, False, paths) + self.assertTrue(out.empty()) + self.assertTrue(err.empty()) + + printer.print_progress(rs, True, paths) + self.assertTrue(out.empty()) + self.assertTrue(err.empty()) + + # test regular functionality + printer, err, out = self.get_printer(['--print', + 'one-line-progress']) + printer.print_progress(rs, False, paths) + self.assertTrue(out.empty()) + self.assertFalse(err.empty()) + + err.reset() + out.reset() + printer.print_progress(rs, True, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + def test_print_progress__detailed(self): + tests = ['passes/text.html', 'failures/expected/timeout.html', + 'failures/expected/crash.html'] + expectations = 'failures/expected/timeout.html = TIMEOUT' + + # first, test that it is disabled properly + # should still print one-line-progress + printer, err, out = self.get_printer( + ['--print', 'detailed-progress'], single_threaded=False) + paths, rs, exp = self.get_result_summary(tests, expectations) + printer.print_progress(rs, False, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + # now test the enabled paths + printer, err, out = self.get_printer( + ['--print', 'detailed-progress'], single_threaded=True) + paths, rs, exp = self.get_result_summary(tests, expectations) + printer.print_progress(rs, False, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + err.reset() + out.reset() + printer.print_progress(rs, True, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), False) + rs.add(self.get_result('failures/expected/timeout.html'), True) + rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), True) + err.reset() + out.reset() + printer.print_progress(rs, False, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + # We only clear the meter when retrying w/ detailed-progress. + err.reset() + out.reset() + printer.print_progress(rs, True, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + printer, err, out = self.get_printer( + ['--print', 'detailed-progress,unexpected'], single_threaded=True) + paths, rs, exp = self.get_result_summary(tests, expectations) + printer.print_progress(rs, False, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + err.reset() + out.reset() + printer.print_progress(rs, True, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), False) + rs.add(self.get_result('failures/expected/timeout.html'), True) + rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), True) + err.reset() + out.reset() + printer.print_progress(rs, False, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + # We only clear the meter when retrying w/ detailed-progress. + err.reset() + out.reset() + printer.print_progress(rs, True, paths) + self.assertFalse(err.empty()) + self.assertTrue(out.empty()) + + def test_write_nothing(self): + printer, err, out = self.get_printer(['--print', 'nothing']) + printer.write("foo") + self.assertTrue(err.empty()) + + def test_write_misc(self): + printer, err, out = self.get_printer(['--print', 'misc']) + printer.write("foo") + self.assertFalse(err.empty()) + err.reset() + printer.write("foo", "config") + self.assertTrue(err.empty()) + + def test_write_everything(self): + printer, err, out = self.get_printer(['--print', 'everything']) + printer.write("foo") + self.assertFalse(err.empty()) + err.reset() + printer.write("foo", "config") + self.assertFalse(err.empty()) + + def test_write_verbose(self): + printer, err, out = self.get_printer(['--verbose']) + printer.write("foo") + self.assertTrue(not err.empty() and "foo" in err.get()[0]) + self.assertTrue(out.empty()) + + def test_print_unexpected_results(self): + # This routine is the only one that prints stuff that the bots + # care about. + # + # FIXME: there's some weird layering going on here. It seems + # like we shouldn't be both using an expectations string and + # having to specify whether or not the result was expected. + # This whole set of tests should probably be rewritten. + # + # FIXME: Plus, the fact that we're having to call into + # run_webkit_tests is clearly a layering inversion. + def get_unexpected_results(expected, passing, flaky): + """Return an unexpected results summary matching the input description. + + There are a lot of different combinations of test results that + can be tested; this routine produces various combinations based + on the values of the input flags. + + Args + expected: whether the tests ran as expected + passing: whether the tests should all pass + flaky: whether the tests should be flaky (if False, they + produce the same results on both runs; if True, they + all pass on the second run). + + """ + paths, rs, exp = self.get_result_summary(tests, expectations) + if expected: + rs.add(self.get_result('passes/text.html', test_expectations.PASS), + expected) + rs.add(self.get_result('failures/expected/timeout.html', + test_expectations.TIMEOUT), expected) + rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), + expected) + elif passing: + rs.add(self.get_result('passes/text.html'), expected) + rs.add(self.get_result('failures/expected/timeout.html'), expected) + rs.add(self.get_result('failures/expected/crash.html'), expected) + else: + rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), + expected) + rs.add(self.get_result('failures/expected/timeout.html', + test_expectations.CRASH), expected) + rs.add(self.get_result('failures/expected/crash.html', + test_expectations.TIMEOUT), + expected) + retry = rs + if flaky: + paths, retry, exp = self.get_result_summary(tests, + expectations) + retry.add(self.get_result('passes/text.html'), True) + retry.add(self.get_result('failures/expected/timeout.html'), True) + retry.add(self.get_result('failures/expected/crash.html'), True) + unexpected_results = test_runner.summarize_unexpected_results( + self._port, exp, rs, retry) + return unexpected_results + + tests = ['passes/text.html', 'failures/expected/timeout.html', + 'failures/expected/crash.html'] + expectations = '' + + printer, err, out = self.get_printer(['--print', 'nothing']) + ur = get_unexpected_results(expected=False, passing=False, flaky=False) + printer.print_unexpected_results(ur) + self.assertTrue(err.empty()) + self.assertTrue(out.empty()) + + printer, err, out = self.get_printer(['--print', + 'unexpected-results']) + + # test everything running as expected + ur = get_unexpected_results(expected=True, passing=False, flaky=False) + printer.print_unexpected_results(ur) + self.assertTrue(err.empty()) + self.assertTrue(out.empty()) + + # test failures + err.reset() + out.reset() + ur = get_unexpected_results(expected=False, passing=False, flaky=False) + printer.print_unexpected_results(ur) + self.assertTrue(err.empty()) + self.assertFalse(out.empty()) + + # test unexpected flaky results + err.reset() + out.reset() + ur = get_unexpected_results(expected=False, passing=True, flaky=False) + printer.print_unexpected_results(ur) + self.assertTrue(err.empty()) + self.assertFalse(out.empty()) + + # test unexpected passes + err.reset() + out.reset() + ur = get_unexpected_results(expected=False, passing=False, flaky=True) + printer.print_unexpected_results(ur) + self.assertTrue(err.empty()) + self.assertFalse(out.empty()) + + err.reset() + out.reset() + printer, err, out = self.get_printer(['--print', 'everything']) + ur = get_unexpected_results(expected=False, passing=False, flaky=False) + printer.print_unexpected_results(ur) + self.assertTrue(err.empty()) + self.assertFalse(out.empty()) + + expectations = """ +failures/expected/crash.html = CRASH +failures/expected/timeout.html = TIMEOUT +""" + err.reset() + out.reset() + ur = get_unexpected_results(expected=False, passing=False, flaky=False) + printer.print_unexpected_results(ur) + self.assertTrue(err.empty()) + self.assertFalse(out.empty()) + + err.reset() + out.reset() + ur = get_unexpected_results(expected=False, passing=True, flaky=False) + printer.print_unexpected_results(ur) + self.assertTrue(err.empty()) + self.assertFalse(out.empty()) + + # Test handling of --verbose as well. + err.reset() + out.reset() + printer, err, out = self.get_printer(['--verbose']) + ur = get_unexpected_results(expected=False, passing=False, flaky=False) + printer.print_unexpected_results(ur) + self.assertTrue(err.empty()) + self.assertFalse(out.empty()) + + def test_print_unexpected_results_buildbot(self): + # FIXME: Test that print_unexpected_results() produces the printer the + # buildbot is expecting. + pass + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py new file mode 100644 index 0000000..80fd6ac --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/result_summary.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Run layout tests.""" + +import logging + +import test_expectations + +_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests") + +TestExpectationsFile = test_expectations.TestExpectationsFile + + +class ResultSummary(object): + """A class for partitioning the test results we get into buckets. + + This class is basically a glorified struct and it's private to this file + so we don't bother with any information hiding.""" + + def __init__(self, expectations, test_files): + self.total = len(test_files) + self.remaining = self.total + self.expectations = expectations + self.expected = 0 + self.unexpected = 0 + self.unexpected_failures = 0 + self.unexpected_crashes_or_timeouts = 0 + self.tests_by_expectation = {} + self.tests_by_timeline = {} + self.results = {} + self.unexpected_results = {} + self.failures = {} + self.tests_by_expectation[test_expectations.SKIP] = set() + for expectation in TestExpectationsFile.EXPECTATIONS.values(): + self.tests_by_expectation[expectation] = set() + for timeline in TestExpectationsFile.TIMELINES.values(): + self.tests_by_timeline[timeline] = ( + expectations.get_tests_with_timeline(timeline)) + + def add(self, result, expected): + """Add a TestResult into the appropriate bin. + + Args: + result: TestResult + expected: whether the result was what we expected it to be. + """ + + self.tests_by_expectation[result.type].add(result.filename) + self.results[result.filename] = result + self.remaining -= 1 + if len(result.failures): + self.failures[result.filename] = result.failures + if expected: + self.expected += 1 + else: + self.unexpected_results[result.filename] = result.type + self.unexpected += 1 + if len(result.failures): + self.unexpected_failures += 1 + if result.type == test_expectations.CRASH or result.type == test_expectations.TIMEOUT: + self.unexpected_crashes_or_timeouts += 1 diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py new file mode 100644 index 0000000..8645fc1 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py @@ -0,0 +1,868 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A helper class for reading in and dealing with tests expectations +for layout tests. +""" + +import logging +import os +import re +import sys + +import webkitpy.thirdparty.simplejson as simplejson + +_log = logging.getLogger("webkitpy.layout_tests.layout_package." + "test_expectations") + +# Test expectation and modifier constants. +(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX, + SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(15) + +# Test expectation file update action constants +(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4) + + +def result_was_expected(result, expected_results, test_needs_rebaselining, + test_is_skipped): + """Returns whether we got a result we were expecting. + Args: + result: actual result of a test execution + expected_results: set of results listed in test_expectations + test_needs_rebaselining: whether test was marked as REBASELINE + test_is_skipped: whether test was marked as SKIP""" + if result in expected_results: + return True + if result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and FAIL in expected_results: + return True + if result == MISSING and test_needs_rebaselining: + return True + if result == SKIP and test_is_skipped: + return True + return False + + +def remove_pixel_failures(expected_results): + """Returns a copy of the expected results for a test, except that we + drop any pixel failures and return the remaining expectations. For example, + if we're not running pixel tests, then tests expected to fail as IMAGE + will PASS.""" + expected_results = expected_results.copy() + if IMAGE in expected_results: + expected_results.remove(IMAGE) + expected_results.add(PASS) + if IMAGE_PLUS_TEXT in expected_results: + expected_results.remove(IMAGE_PLUS_TEXT) + expected_results.add(TEXT) + return expected_results + + +class TestExpectations: + TEST_LIST = "test_expectations.txt" + + def __init__(self, port, tests, expectations, test_platform_name, + is_debug_mode, is_lint_mode, overrides=None): + """Loads and parses the test expectations given in the string. + Args: + port: handle to object containing platform-specific functionality + test: list of all of the test files + expectations: test expectations as a string + test_platform_name: name of the platform to match expectations + against. Note that this may be different than + port.test_platform_name() when is_lint_mode is True. + is_debug_mode: whether to use the DEBUG or RELEASE modifiers + in the expectations + is_lint_mode: If True, just parse the expectations string + looking for errors. + overrides: test expectations that are allowed to override any + entries in |expectations|. This is used by callers + that need to manage two sets of expectations (e.g., upstream + and downstream expectations). + """ + self._expected_failures = TestExpectationsFile(port, expectations, + tests, test_platform_name, is_debug_mode, is_lint_mode, + overrides=overrides) + + # TODO(ojan): Allow for removing skipped tests when getting the list of + # tests to run, but not when getting metrics. + # TODO(ojan): Replace the Get* calls here with the more sane API exposed + # by TestExpectationsFile below. Maybe merge the two classes entirely? + + def get_expectations_json_for_all_platforms(self): + return ( + self._expected_failures.get_expectations_json_for_all_platforms()) + + def get_rebaselining_failures(self): + return (self._expected_failures.get_test_set(REBASELINE, FAIL) | + self._expected_failures.get_test_set(REBASELINE, IMAGE) | + self._expected_failures.get_test_set(REBASELINE, TEXT) | + self._expected_failures.get_test_set(REBASELINE, + IMAGE_PLUS_TEXT)) + + def get_options(self, test): + return self._expected_failures.get_options(test) + + def get_expectations(self, test): + return self._expected_failures.get_expectations(test) + + def get_expectations_string(self, test): + """Returns the expectatons for the given test as an uppercase string. + If there are no expectations for the test, then "PASS" is returned.""" + expectations = self.get_expectations(test) + retval = [] + + for expectation in expectations: + retval.append(self.expectation_to_string(expectation)) + + return " ".join(retval) + + def expectation_to_string(self, expectation): + """Return the uppercased string equivalent of a given expectation.""" + for item in TestExpectationsFile.EXPECTATIONS.items(): + if item[1] == expectation: + return item[0].upper() + raise ValueError(expectation) + + def get_tests_with_result_type(self, result_type): + return self._expected_failures.get_tests_with_result_type(result_type) + + def get_tests_with_timeline(self, timeline): + return self._expected_failures.get_tests_with_timeline(timeline) + + def matches_an_expected_result(self, test, result, + pixel_tests_are_enabled): + expected_results = self._expected_failures.get_expectations(test) + if not pixel_tests_are_enabled: + expected_results = remove_pixel_failures(expected_results) + return result_was_expected(result, expected_results, + self.is_rebaselining(test), self.has_modifier(test, SKIP)) + + def is_rebaselining(self, test): + return self._expected_failures.has_modifier(test, REBASELINE) + + def has_modifier(self, test, modifier): + return self._expected_failures.has_modifier(test, modifier) + + def remove_platform_from_expectations(self, tests, platform): + return self._expected_failures.remove_platform_from_expectations( + tests, platform) + + +def strip_comments(line): + """Strips comments from a line and return None if the line is empty + or else the contents of line with leading and trailing spaces removed + and all other whitespace collapsed""" + + commentIndex = line.find('//') + if commentIndex is -1: + commentIndex = len(line) + + line = re.sub(r'\s+', ' ', line[:commentIndex].strip()) + if line == '': + return None + else: + return line + + +class ParseError(Exception): + def __init__(self, fatal, errors): + self.fatal = fatal + self.errors = errors + + def __str__(self): + return '\n'.join(map(str, self.errors)) + + def __repr__(self): + return 'ParseError(fatal=%s, errors=%s)' % (fatal, errors) + + +class ModifiersAndExpectations: + """A holder for modifiers and expectations on a test that serializes to + JSON.""" + + def __init__(self, modifiers, expectations): + self.modifiers = modifiers + self.expectations = expectations + + +class ExpectationsJsonEncoder(simplejson.JSONEncoder): + """JSON encoder that can handle ModifiersAndExpectations objects.""" + def default(self, obj): + # A ModifiersAndExpectations object has two fields, each of which + # is a dict. Since JSONEncoders handle all the builtin types directly, + # the only time this routine should be called is on the top level + # object (i.e., the encoder shouldn't recurse). + assert isinstance(obj, ModifiersAndExpectations) + return {"modifiers": obj.modifiers, + "expectations": obj.expectations} + + +class TestExpectationsFile: + """Test expectation files consist of lines with specifications of what + to expect from layout test cases. The test cases can be directories + in which case the expectations apply to all test cases in that + directory and any subdirectory. The format of the file is along the + lines of: + + LayoutTests/fast/js/fixme.js = FAIL + LayoutTests/fast/js/flaky.js = FAIL PASS + LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS + ... + + To add other options: + SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS + + SKIP: Doesn't run the test. + SLOW: The test takes a long time to run, but does not timeout indefinitely. + WONTFIX: For tests that we never intend to pass on a given platform. + DEBUG: Expectations apply only to the debug build. + RELEASE: Expectations apply only to release build. + LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these + platforms. + + Notes: + -A test cannot be both SLOW and TIMEOUT + -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is + a migratory state that currently means either IMAGE, TEXT, or + IMAGE+TEXT. Once we have finished migrating the expectations, we will + change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT + identifier. + -A test can be included twice, but not via the same path. + -If a test is included twice, then the more precise path wins. + -CRASH tests cannot be WONTFIX + """ + + EXPECTATIONS = {'pass': PASS, + 'fail': FAIL, + 'text': TEXT, + 'image': IMAGE, + 'image+text': IMAGE_PLUS_TEXT, + 'timeout': TIMEOUT, + 'crash': CRASH, + 'missing': MISSING} + + EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'), + PASS: ('pass', 'passes'), + FAIL: ('failure', 'failures'), + TEXT: ('text diff mismatch', + 'text diff mismatch'), + IMAGE: ('image mismatch', 'image mismatch'), + IMAGE_PLUS_TEXT: ('image and text mismatch', + 'image and text mismatch'), + CRASH: ('DumpRenderTree crash', + 'DumpRenderTree crashes'), + TIMEOUT: ('test timed out', 'tests timed out'), + MISSING: ('no expected result found', + 'no expected results found')} + + EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT, + TEXT, IMAGE, FAIL, SKIP) + + BUILD_TYPES = ('debug', 'release') + + MODIFIERS = {'skip': SKIP, + 'wontfix': WONTFIX, + 'slow': SLOW, + 'rebaseline': REBASELINE, + 'none': NONE} + + TIMELINES = {'wontfix': WONTFIX, + 'now': NOW} + + RESULT_TYPES = {'skip': SKIP, + 'pass': PASS, + 'fail': FAIL, + 'flaky': FLAKY} + + def __init__(self, port, expectations, full_test_list, test_platform_name, + is_debug_mode, is_lint_mode, overrides=None): + """ + expectations: Contents of the expectations file + full_test_list: The list of all tests to be run pending processing of + the expections for those tests. + test_platform_name: name of the platform to match expectations + against. Note that this may be different than + port.test_platform_name() when is_lint_mode is True. + is_debug_mode: Whether we testing a test_shell built debug mode. + is_lint_mode: Whether this is just linting test_expecatations.txt. + overrides: test expectations that are allowed to override any + entries in |expectations|. This is used by callers + that need to manage two sets of expectations (e.g., upstream + and downstream expectations). + """ + + self._port = port + self._expectations = expectations + self._full_test_list = full_test_list + self._test_platform_name = test_platform_name + self._is_debug_mode = is_debug_mode + self._is_lint_mode = is_lint_mode + self._overrides = overrides + self._errors = [] + self._non_fatal_errors = [] + + # Maps relative test paths as listed in the expectations file to a + # list of maps containing modifiers and expectations for each time + # the test is listed in the expectations file. + self._all_expectations = {} + + # Maps a test to its list of expectations. + self._test_to_expectations = {} + + # Maps a test to its list of options (string values) + self._test_to_options = {} + + # Maps a test to its list of modifiers: the constants associated with + # the options minus any bug or platform strings + self._test_to_modifiers = {} + + # Maps a test to the base path that it was listed with in the list. + self._test_list_paths = {} + + self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS) + self._expectation_to_tests = self._dict_of_sets(self.EXPECTATIONS) + self._timeline_to_tests = self._dict_of_sets(self.TIMELINES) + self._result_type_to_tests = self._dict_of_sets(self.RESULT_TYPES) + + self._read(self._get_iterable_expectations(self._expectations), + overrides_allowed=False) + + # List of tests that are in the overrides file (used for checking for + # duplicates inside the overrides file itself). Note that just because + # a test is in this set doesn't mean it's necessarily overridding a + # expectation in the regular expectations; the test might not be + # mentioned in the regular expectations file at all. + self._overridding_tests = set() + + if overrides: + self._read(self._get_iterable_expectations(self._overrides), + overrides_allowed=True) + + self._handle_any_read_errors() + self._process_tests_without_expectations() + + def _handle_any_read_errors(self): + if len(self._errors) or len(self._non_fatal_errors): + if self._is_debug_mode: + build_type = 'DEBUG' + else: + build_type = 'RELEASE' + _log.error('') + _log.error("FAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" % + (self._test_platform_name.upper(), build_type)) + + for error in self._errors: + _log.error(error) + for error in self._non_fatal_errors: + _log.error(error) + + if len(self._errors): + raise ParseError(fatal=True, errors=self._errors) + if len(self._non_fatal_errors) and self._is_lint_mode: + raise ParseError(fatal=False, errors=self._non_fatal_errors) + + def _process_tests_without_expectations(self): + expectations = set([PASS]) + options = [] + modifiers = [] + if self._full_test_list: + for test in self._full_test_list: + if not test in self._test_list_paths: + self._add_test(test, modifiers, expectations, options, + overrides_allowed=False) + + def _dict_of_sets(self, strings_to_constants): + """Takes a dict of strings->constants and returns a dict mapping + each constant to an empty set.""" + d = {} + for c in strings_to_constants.values(): + d[c] = set() + return d + + def _get_iterable_expectations(self, expectations_str): + """Returns an object that can be iterated over. Allows for not caring + about whether we're iterating over a file or a new-line separated + string.""" + iterable = [x + "\n" for x in expectations_str.split("\n")] + # Strip final entry if it's empty to avoid added in an extra + # newline. + if iterable[-1] == "\n": + return iterable[:-1] + return iterable + + def get_test_set(self, modifier, expectation=None, include_skips=True): + if expectation is None: + tests = self._modifier_to_tests[modifier] + else: + tests = (self._expectation_to_tests[expectation] & + self._modifier_to_tests[modifier]) + + if not include_skips: + tests = tests - self.get_test_set(SKIP, expectation) + + return tests + + def get_tests_with_result_type(self, result_type): + return self._result_type_to_tests[result_type] + + def get_tests_with_timeline(self, timeline): + return self._timeline_to_tests[timeline] + + def get_options(self, test): + """This returns the entire set of options for the given test + (the modifiers plus the BUGXXXX identifier). This is used by the + LTTF dashboard.""" + return self._test_to_options[test] + + def has_modifier(self, test, modifier): + return test in self._modifier_to_tests[modifier] + + def get_expectations(self, test): + return self._test_to_expectations[test] + + def get_expectations_json_for_all_platforms(self): + # Specify separators in order to get compact encoding. + return ExpectationsJsonEncoder(separators=(',', ':')).encode( + self._all_expectations) + + def get_non_fatal_errors(self): + return self._non_fatal_errors + + def remove_platform_from_expectations(self, tests, platform): + """Returns a copy of the expectations with the tests matching the + platform removed. + + If a test is in the test list and has an option that matches the given + platform, remove the matching platform and save the updated test back + to the file. If no other platforms remaining after removal, delete the + test from the file. + + Args: + tests: list of tests that need to update.. + platform: which platform option to remove. + + Returns: + the updated string. + """ + + assert(platform) + f_orig = self._get_iterable_expectations(self._expectations) + f_new = [] + + tests_removed = 0 + tests_updated = 0 + lineno = 0 + for line in f_orig: + lineno += 1 + action = self._get_platform_update_action(line, lineno, tests, + platform) + assert(action in (NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, + ADD_PLATFORMS_EXCEPT_THIS)) + if action == NO_CHANGE: + # Save the original line back to the file + _log.debug('No change to test: %s', line) + f_new.append(line) + elif action == REMOVE_TEST: + tests_removed += 1 + _log.info('Test removed: %s', line) + elif action == REMOVE_PLATFORM: + parts = line.split(':') + new_options = parts[0].replace(platform.upper() + ' ', '', 1) + new_line = ('%s:%s' % (new_options, parts[1])) + f_new.append(new_line) + tests_updated += 1 + _log.info('Test updated: ') + _log.info(' old: %s', line) + _log.info(' new: %s', new_line) + elif action == ADD_PLATFORMS_EXCEPT_THIS: + parts = line.split(':') + new_options = parts[0] + for p in self._port.test_platform_names(): + p = p.upper() + # This is a temp solution for rebaselining tool. + # Do not add tags WIN-7 and WIN-VISTA to test expectations + # if the original line does not specify the platform + # option. + # TODO(victorw): Remove WIN-VISTA and WIN-7 once we have + # reliable Win 7 and Win Vista buildbots setup. + if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'): + new_options += p + ' ' + new_line = ('%s:%s' % (new_options, parts[1])) + f_new.append(new_line) + tests_updated += 1 + _log.info('Test updated: ') + _log.info(' old: %s', line) + _log.info(' new: %s', new_line) + + _log.info('Total tests removed: %d', tests_removed) + _log.info('Total tests updated: %d', tests_updated) + + return "".join(f_new) + + def parse_expectations_line(self, line, lineno): + """Parses a line from test_expectations.txt and returns a tuple + with the test path, options as a list, expectations as a list.""" + line = strip_comments(line) + if not line: + return (None, None, None) + + options = [] + if line.find(":") is -1: + test_and_expectation = line.split("=") + else: + parts = line.split(":") + options = self._get_options_list(parts[0]) + test_and_expectation = parts[1].split('=') + + test = test_and_expectation[0].strip() + if (len(test_and_expectation) is not 2): + self._add_error(lineno, "Missing expectations.", + test_and_expectation) + expectations = None + else: + expectations = self._get_options_list(test_and_expectation[1]) + + return (test, options, expectations) + + def _get_platform_update_action(self, line, lineno, tests, platform): + """Check the platform option and return the action needs to be taken. + + Args: + line: current line in test expectations file. + lineno: current line number of line + tests: list of tests that need to update.. + platform: which platform option to remove. + + Returns: + NO_CHANGE: no change to the line (comments, test not in the list etc) + REMOVE_TEST: remove the test from file. + REMOVE_PLATFORM: remove this platform option from the test. + ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one. + """ + test, options, expectations = self.parse_expectations_line(line, + lineno) + if not test or test not in tests: + return NO_CHANGE + + has_any_platform = False + for option in options: + if option in self._port.test_platform_names(): + has_any_platform = True + if not option == platform: + return REMOVE_PLATFORM + + # If there is no platform specified, then it means apply to all + # platforms. Return the action to add all the platforms except this + # one. + if not has_any_platform: + return ADD_PLATFORMS_EXCEPT_THIS + + return REMOVE_TEST + + def _has_valid_modifiers_for_current_platform(self, options, lineno, + test_and_expectations, modifiers): + """Returns true if the current platform is in the options list or if + no platforms are listed and if there are no fatal errors in the + options list. + + Args: + options: List of lowercase options. + lineno: The line in the file where the test is listed. + test_and_expectations: The path and expectations for the test. + modifiers: The set to populate with modifiers. + """ + has_any_platform = False + has_bug_id = False + for option in options: + if option in self.MODIFIERS: + modifiers.add(option) + elif option in self._port.test_platform_names(): + has_any_platform = True + elif re.match(r'bug\d', option) != None: + self._add_error(lineno, 'Bug must be either BUGCR, BUGWK, or BUGV8_ for test: %s' % + option, test_and_expectations) + elif option.startswith('bug'): + has_bug_id = True + elif option not in self.BUILD_TYPES: + self._add_error(lineno, 'Invalid modifier for test: %s' % + option, test_and_expectations) + + if has_any_platform and not self._match_platform(options): + return False + + if not has_bug_id and 'wontfix' not in options: + # TODO(ojan): Turn this into an AddError call once all the + # tests have BUG identifiers. + self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.', + test_and_expectations) + + if 'release' in options or 'debug' in options: + if self._is_debug_mode and 'debug' not in options: + return False + if not self._is_debug_mode and 'release' not in options: + return False + + if self._is_lint_mode and 'rebaseline' in options: + self._add_error(lineno, + 'REBASELINE should only be used for running rebaseline.py. ' + 'Cannot be checked in.', test_and_expectations) + + return True + + def _match_platform(self, options): + """Match the list of options against our specified platform. If any + of the options prefix-match self._platform, return True. This handles + the case where a test is marked WIN and the platform is WIN-VISTA. + + Args: + options: list of options + """ + for opt in options: + if self._test_platform_name.startswith(opt): + return True + return False + + def _add_to_all_expectations(self, test, options, expectations): + # Make all paths unix-style so the dashboard doesn't need to. + test = test.replace('\\', '/') + if not test in self._all_expectations: + self._all_expectations[test] = [] + self._all_expectations[test].append( + ModifiersAndExpectations(options, expectations)) + + def _read(self, expectations, overrides_allowed): + """For each test in an expectations iterable, generate the + expectations for it.""" + lineno = 0 + for line in expectations: + lineno += 1 + + test_list_path, options, expectations = \ + self.parse_expectations_line(line, lineno) + if not expectations: + continue + + self._add_to_all_expectations(test_list_path, + " ".join(options).upper(), + " ".join(expectations).upper()) + + modifiers = set() + if options and not self._has_valid_modifiers_for_current_platform( + options, lineno, test_list_path, modifiers): + continue + + expectations = self._parse_expectations(expectations, lineno, + test_list_path) + + if 'slow' in options and TIMEOUT in expectations: + self._add_error(lineno, + 'A test can not be both slow and timeout. If it times out ' + 'indefinitely, then it should be just timeout.', + test_list_path) + + full_path = os.path.join(self._port.layout_tests_dir(), + test_list_path) + full_path = os.path.normpath(full_path) + # WebKit's way of skipping tests is to add a -disabled suffix. + # So we should consider the path existing if the path or the + # -disabled version exists. + if (not self._port.path_exists(full_path) + and not self._port.path_exists(full_path + '-disabled')): + # Log a non fatal error here since you hit this case any + # time you update test_expectations.txt without syncing + # the LayoutTests directory + self._log_non_fatal_error(lineno, 'Path does not exist.', + test_list_path) + continue + + if not self._full_test_list: + tests = [test_list_path] + else: + tests = self._expand_tests(test_list_path) + + self._add_tests(tests, expectations, test_list_path, lineno, + modifiers, options, overrides_allowed) + + def _get_options_list(self, listString): + return [part.strip().lower() for part in listString.strip().split(' ')] + + def _parse_expectations(self, expectations, lineno, test_list_path): + result = set() + for part in expectations: + if not part in self.EXPECTATIONS: + self._add_error(lineno, 'Unsupported expectation: %s' % part, + test_list_path) + continue + expectation = self.EXPECTATIONS[part] + result.add(expectation) + return result + + def _expand_tests(self, test_list_path): + """Convert the test specification to an absolute, normalized + path and make sure directories end with the OS path separator.""" + # FIXME: full_test_list can quickly contain a big amount of + # elements. We should consider at some point to use a more + # efficient structure instead of a list. Maybe a dictionary of + # lists to represent the tree of tests, leaves being test + # files and nodes being categories. + + path = os.path.join(self._port.layout_tests_dir(), test_list_path) + path = os.path.normpath(path) + if self._port.path_isdir(path): + # this is a test category, return all the tests of the category. + path = os.path.join(path, '') + + return [test for test in self._full_test_list if test.startswith(path)] + + # this is a test file, do a quick check if it's in the + # full test suite. + result = [] + if path in self._full_test_list: + result = [path, ] + return result + + def _add_tests(self, tests, expectations, test_list_path, lineno, + modifiers, options, overrides_allowed): + for test in tests: + if self._already_seen_test(test, test_list_path, lineno, + overrides_allowed): + continue + + self._clear_expectations_for_test(test, test_list_path) + self._add_test(test, modifiers, expectations, options, + overrides_allowed) + + def _add_test(self, test, modifiers, expectations, options, + overrides_allowed): + """Sets the expected state for a given test. + + This routine assumes the test has not been added before. If it has, + use _ClearExpectationsForTest() to reset the state prior to + calling this. + + Args: + test: test to add + modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.) + expectations: sequence of expectations (PASS, IMAGE, etc.) + options: sequence of keywords and bug identifiers. + overrides_allowed: whether we're parsing the regular expectations + or the overridding expectations""" + self._test_to_expectations[test] = expectations + for expectation in expectations: + self._expectation_to_tests[expectation].add(test) + + self._test_to_options[test] = options + self._test_to_modifiers[test] = set() + for modifier in modifiers: + mod_value = self.MODIFIERS[modifier] + self._modifier_to_tests[mod_value].add(test) + self._test_to_modifiers[test].add(mod_value) + + if 'wontfix' in modifiers: + self._timeline_to_tests[WONTFIX].add(test) + else: + self._timeline_to_tests[NOW].add(test) + + if 'skip' in modifiers: + self._result_type_to_tests[SKIP].add(test) + elif expectations == set([PASS]): + self._result_type_to_tests[PASS].add(test) + elif len(expectations) > 1: + self._result_type_to_tests[FLAKY].add(test) + else: + self._result_type_to_tests[FAIL].add(test) + + if overrides_allowed: + self._overridding_tests.add(test) + + def _clear_expectations_for_test(self, test, test_list_path): + """Remove prexisting expectations for this test. + This happens if we are seeing a more precise path + than a previous listing. + """ + if test in self._test_list_paths: + self._test_to_expectations.pop(test, '') + self._remove_from_sets(test, self._expectation_to_tests) + self._remove_from_sets(test, self._modifier_to_tests) + self._remove_from_sets(test, self._timeline_to_tests) + self._remove_from_sets(test, self._result_type_to_tests) + + self._test_list_paths[test] = os.path.normpath(test_list_path) + + def _remove_from_sets(self, test, dict): + """Removes the given test from the sets in the dictionary. + + Args: + test: test to look for + dict: dict of sets of files""" + for set_of_tests in dict.itervalues(): + if test in set_of_tests: + set_of_tests.remove(test) + + def _already_seen_test(self, test, test_list_path, lineno, + allow_overrides): + """Returns true if we've already seen a more precise path for this test + than the test_list_path. + """ + if not test in self._test_list_paths: + return False + + prev_base_path = self._test_list_paths[test] + if (prev_base_path == os.path.normpath(test_list_path)): + if (not allow_overrides or test in self._overridding_tests): + if allow_overrides: + expectation_source = "override" + else: + expectation_source = "expectation" + self._add_error(lineno, 'Duplicate %s.' % expectation_source, + test) + return True + else: + # We have seen this path, but that's okay because its + # in the overrides and the earlier path was in the + # expectations. + return False + + # Check if we've already seen a more precise path. + return prev_base_path.startswith(os.path.normpath(test_list_path)) + + def _add_error(self, lineno, msg, path): + """Reports an error that will prevent running the tests. Does not + immediately raise an exception because we'd like to aggregate all the + errors so they can all be printed out.""" + self._errors.append('Line:%s %s %s' % (lineno, msg, path)) + + def _log_non_fatal_error(self, lineno, msg, path): + """Reports an error that will not prevent running the tests. These are + still errors, but not bad enough to warrant breaking test running.""" + self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path)) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py new file mode 100644 index 0000000..34771f3 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py @@ -0,0 +1,350 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for test_expectations.py.""" + +import os +import sys +import unittest + +from webkitpy.layout_tests import port +from webkitpy.layout_tests.layout_package.test_expectations import * + +class FunctionsTest(unittest.TestCase): + def test_result_was_expected(self): + # test basics + self.assertEquals(result_was_expected(PASS, set([PASS]), + False, False), True) + self.assertEquals(result_was_expected(TEXT, set([PASS]), + False, False), False) + + # test handling of FAIL expectations + self.assertEquals(result_was_expected(IMAGE_PLUS_TEXT, set([FAIL]), + False, False), True) + self.assertEquals(result_was_expected(IMAGE, set([FAIL]), + False, False), True) + self.assertEquals(result_was_expected(TEXT, set([FAIL]), + False, False), True) + self.assertEquals(result_was_expected(CRASH, set([FAIL]), + False, False), False) + + # test handling of SKIPped tests and results + self.assertEquals(result_was_expected(SKIP, set([CRASH]), + False, True), True) + self.assertEquals(result_was_expected(SKIP, set([CRASH]), + False, False), False) + + # test handling of MISSING results and the REBASELINE modifier + self.assertEquals(result_was_expected(MISSING, set([PASS]), + True, False), True) + self.assertEquals(result_was_expected(MISSING, set([PASS]), + False, False), False) + + def test_remove_pixel_failures(self): + self.assertEquals(remove_pixel_failures(set([TEXT])), + set([TEXT])) + self.assertEquals(remove_pixel_failures(set([PASS])), + set([PASS])) + self.assertEquals(remove_pixel_failures(set([IMAGE])), + set([PASS])) + self.assertEquals(remove_pixel_failures(set([IMAGE_PLUS_TEXT])), + set([TEXT])) + self.assertEquals(remove_pixel_failures(set([PASS, IMAGE, CRASH])), + set([PASS, CRASH])) + + +class Base(unittest.TestCase): + def __init__(self, testFunc, setUp=None, tearDown=None, description=None): + self._port = port.get('test', None) + self._exp = None + unittest.TestCase.__init__(self, testFunc) + + def get_test(self, test_name): + return os.path.join(self._port.layout_tests_dir(), test_name) + + def get_basic_tests(self): + return [self.get_test('failures/expected/text.html'), + self.get_test('failures/expected/image_checksum.html'), + self.get_test('failures/expected/crash.html'), + self.get_test('failures/expected/missing_text.html'), + self.get_test('failures/expected/image.html'), + self.get_test('passes/text.html')] + + def get_basic_expectations(self): + return """ +BUG_TEST : failures/expected/text.html = TEXT +BUG_TEST WONTFIX SKIP : failures/expected/crash.html = CRASH +BUG_TEST REBASELINE : failures/expected/missing_image.html = MISSING +BUG_TEST WONTFIX : failures/expected/image_checksum.html = IMAGE +BUG_TEST WONTFIX WIN : failures/expected/image.html = IMAGE +""" + + def parse_exp(self, expectations, overrides=None, is_lint_mode=False, + is_debug_mode=False): + self._exp = TestExpectations(self._port, + tests=self.get_basic_tests(), + expectations=expectations, + test_platform_name=self._port.test_platform_name(), + is_debug_mode=is_debug_mode, + is_lint_mode=is_lint_mode, + overrides=overrides) + + def assert_exp(self, test, result): + self.assertEquals(self._exp.get_expectations(self.get_test(test)), + set([result])) + + +class TestExpectationsTest(Base): + def test_basic(self): + self.parse_exp(self.get_basic_expectations()) + self.assert_exp('failures/expected/text.html', TEXT) + self.assert_exp('failures/expected/image_checksum.html', IMAGE) + self.assert_exp('passes/text.html', PASS) + self.assert_exp('failures/expected/image.html', PASS) + + def test_multiple_results(self): + self.parse_exp('BUGX : failures/expected/text.html = TEXT CRASH') + self.assertEqual(self._exp.get_expectations( + self.get_test('failures/expected/text.html')), + set([TEXT, CRASH])) + + def test_precedence(self): + # This tests handling precedence of specific lines over directories + # and tests expectations covering entire directories. + exp_str = """ +BUGX : failures/expected/text.html = TEXT +BUGX WONTFIX : failures/expected = IMAGE +""" + self.parse_exp(exp_str) + self.assert_exp('failures/expected/text.html', TEXT) + self.assert_exp('failures/expected/crash.html', IMAGE) + + def test_category_expectations(self): + # This test checks unknown tests are not present in the + # expectations and that known test part of a test category is + # present in the expectations. + exp_str = """ +BUGX WONTFIX : failures/expected = IMAGE +""" + self.parse_exp(exp_str) + test_name = 'failures/expected/unknown-test.html' + unknown_test = self.get_test(test_name) + self.assertRaises(KeyError, self._exp.get_expectations, + unknown_test) + self.assert_exp('failures/expected/crash.html', IMAGE) + + def test_release_mode(self): + self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT', + is_debug_mode=True) + self.assert_exp('failures/expected/text.html', TEXT) + self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT', + is_debug_mode=True) + self.assert_exp('failures/expected/text.html', PASS) + self.parse_exp('BUGX DEBUG : failures/expected/text.html = TEXT', + is_debug_mode=False) + self.assert_exp('failures/expected/text.html', PASS) + self.parse_exp('BUGX RELEASE : failures/expected/text.html = TEXT', + is_debug_mode=False) + self.assert_exp('failures/expected/text.html', TEXT) + + def test_get_options(self): + self.parse_exp(self.get_basic_expectations()) + self.assertEqual(self._exp.get_options( + self.get_test('passes/text.html')), []) + + def test_expectations_json_for_all_platforms(self): + self.parse_exp(self.get_basic_expectations()) + json_str = self._exp.get_expectations_json_for_all_platforms() + # FIXME: test actual content? + self.assertTrue(json_str) + + def test_get_expectations_string(self): + self.parse_exp(self.get_basic_expectations()) + self.assertEquals(self._exp.get_expectations_string( + self.get_test('failures/expected/text.html')), + 'TEXT') + + def test_expectation_to_string(self): + # Normal cases are handled by other tests. + self.parse_exp(self.get_basic_expectations()) + self.assertRaises(ValueError, self._exp.expectation_to_string, + -1) + + def test_get_test_set(self): + # Handle some corner cases for this routine not covered by other tests. + self.parse_exp(self.get_basic_expectations()) + s = self._exp._expected_failures.get_test_set(WONTFIX) + self.assertEqual(s, + set([self.get_test('failures/expected/crash.html'), + self.get_test('failures/expected/image_checksum.html')])) + s = self._exp._expected_failures.get_test_set(WONTFIX, CRASH) + self.assertEqual(s, + set([self.get_test('failures/expected/crash.html')])) + s = self._exp._expected_failures.get_test_set(WONTFIX, CRASH, + include_skips=False) + self.assertEqual(s, set([])) + + def test_parse_error_fatal(self): + try: + self.parse_exp("""FOO : failures/expected/text.html = TEXT +SKIP : failures/expected/image.html""") + self.assertFalse(True, "ParseError wasn't raised") + except ParseError, e: + self.assertTrue(e.fatal) + exp_errors = [u'Line:1 Invalid modifier for test: foo failures/expected/text.html', + u"Line:2 Missing expectations. [' failures/expected/image.html']"] + self.assertEqual(str(e), '\n'.join(map(str, exp_errors))) + self.assertEqual(e.errors, exp_errors) + + def test_parse_error_nonfatal(self): + try: + self.parse_exp('SKIP : failures/expected/text.html = TEXT', + is_lint_mode=True) + self.assertFalse(True, "ParseError wasn't raised") + except ParseError, e: + self.assertFalse(e.fatal) + exp_errors = [u'Line:1 Test lacks BUG modifier. failures/expected/text.html'] + self.assertEqual(str(e), '\n'.join(map(str, exp_errors))) + self.assertEqual(e.errors, exp_errors) + + def test_syntax_missing_expectation(self): + # This is missing the expectation. + self.assertRaises(ParseError, self.parse_exp, + 'BUG_TEST: failures/expected/text.html', + is_debug_mode=True) + + def test_syntax_invalid_option(self): + self.assertRaises(ParseError, self.parse_exp, + 'BUG_TEST FOO: failures/expected/text.html = PASS') + + def test_syntax_invalid_expectation(self): + # This is missing the expectation. + self.assertRaises(ParseError, self.parse_exp, + 'BUG_TEST: failures/expected/text.html = FOO') + + def test_syntax_missing_bugid(self): + # This should log a non-fatal error. + self.parse_exp('SLOW : failures/expected/text.html = TEXT') + self.assertEqual( + len(self._exp._expected_failures.get_non_fatal_errors()), 1) + + def test_semantic_slow_and_timeout(self): + # A test cannot be SLOW and expected to TIMEOUT. + self.assertRaises(ParseError, self.parse_exp, + 'BUG_TEST SLOW : failures/expected/timeout.html = TIMEOUT') + + def test_semantic_rebaseline(self): + # Can't lint a file w/ 'REBASELINE' in it. + self.assertRaises(ParseError, self.parse_exp, + 'BUG_TEST REBASELINE : failures/expected/text.html = TEXT', + is_lint_mode=True) + + def test_semantic_duplicates(self): + self.assertRaises(ParseError, self.parse_exp, """ +BUG_TEST : failures/expected/text.html = TEXT +BUG_TEST : failures/expected/text.html = IMAGE""") + + self.assertRaises(ParseError, self.parse_exp, + self.get_basic_expectations(), """ +BUG_TEST : failures/expected/text.html = TEXT +BUG_TEST : failures/expected/text.html = IMAGE""") + + def test_semantic_missing_file(self): + # This should log a non-fatal error. + self.parse_exp('BUG_TEST : missing_file.html = TEXT') + self.assertEqual( + len(self._exp._expected_failures.get_non_fatal_errors()), 1) + + + def test_overrides(self): + self.parse_exp(self.get_basic_expectations(), """ +BUG_OVERRIDE : failures/expected/text.html = IMAGE""") + self.assert_exp('failures/expected/text.html', IMAGE) + + def test_matches_an_expected_result(self): + + def match(test, result, pixel_tests_enabled): + return self._exp.matches_an_expected_result( + self.get_test(test), result, pixel_tests_enabled) + + self.parse_exp(self.get_basic_expectations()) + self.assertTrue(match('failures/expected/text.html', TEXT, True)) + self.assertTrue(match('failures/expected/text.html', TEXT, False)) + self.assertFalse(match('failures/expected/text.html', CRASH, True)) + self.assertFalse(match('failures/expected/text.html', CRASH, False)) + self.assertTrue(match('failures/expected/image_checksum.html', IMAGE, + True)) + self.assertTrue(match('failures/expected/image_checksum.html', PASS, + False)) + self.assertTrue(match('failures/expected/crash.html', SKIP, False)) + self.assertTrue(match('passes/text.html', PASS, False)) + + +class RebaseliningTest(Base): + """Test rebaselining-specific functionality.""" + def assertRemove(self, platform, input_expectations, expected_expectations): + self.parse_exp(input_expectations) + test = self.get_test('failures/expected/text.html') + actual_expectations = self._exp.remove_platform_from_expectations( + test, platform) + self.assertEqual(expected_expectations, actual_expectations) + + def test_no_get_rebaselining_failures(self): + self.parse_exp(self.get_basic_expectations()) + self.assertEqual(len(self._exp.get_rebaselining_failures()), 0) + + def test_get_rebaselining_failures_expand(self): + self.parse_exp(""" +BUG_TEST REBASELINE : failures/expected/text.html = TEXT +""") + self.assertEqual(len(self._exp.get_rebaselining_failures()), 1) + + def test_remove_expand(self): + self.assertRemove('mac', + 'BUGX REBASELINE : failures/expected/text.html = TEXT\n', + 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n') + + def test_remove_mac_win(self): + self.assertRemove('mac', + 'BUGX REBASELINE MAC WIN : failures/expected/text.html = TEXT\n', + 'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n') + + def test_remove_mac_mac(self): + self.assertRemove('mac', + 'BUGX REBASELINE MAC : failures/expected/text.html = TEXT\n', + '') + + def test_remove_nothing(self): + self.assertRemove('mac', + '\n\n', + '\n\n') + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py new file mode 100644 index 0000000..6d55761 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py @@ -0,0 +1,282 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Classes for failures that occur during tests.""" + +import os +import test_expectations + +import cPickle + + +def determine_result_type(failure_list): + """Takes a set of test_failures and returns which result type best fits + the list of failures. "Best fits" means we use the worst type of failure. + + Returns: + one of the test_expectations result types - PASS, TEXT, CRASH, etc.""" + + if not failure_list or len(failure_list) == 0: + return test_expectations.PASS + + failure_types = [type(f) for f in failure_list] + if FailureCrash in failure_types: + return test_expectations.CRASH + elif FailureTimeout in failure_types: + return test_expectations.TIMEOUT + elif (FailureMissingResult in failure_types or + FailureMissingImage in failure_types or + FailureMissingImageHash in failure_types): + return test_expectations.MISSING + else: + is_text_failure = FailureTextMismatch in failure_types + is_image_failure = (FailureImageHashIncorrect in failure_types or + FailureImageHashMismatch in failure_types) + if is_text_failure and is_image_failure: + return test_expectations.IMAGE_PLUS_TEXT + elif is_text_failure: + return test_expectations.TEXT + elif is_image_failure: + return test_expectations.IMAGE + else: + raise ValueError("unclassifiable set of failures: " + + str(failure_types)) + + +class TestFailure(object): + """Abstract base class that defines the failure interface.""" + + @staticmethod + def loads(s): + """Creates a TestFailure object from the specified string.""" + return cPickle.loads(s) + + @staticmethod + def message(): + """Returns a string describing the failure in more detail.""" + raise NotImplementedError + + def __eq__(self, other): + return self.__class__.__name__ == other.__class__.__name__ + + def __ne__(self, other): + return self.__class__.__name__ != other.__class__.__name__ + + def dumps(self): + """Returns the string/JSON representation of a TestFailure.""" + return cPickle.dumps(self) + + def result_html_output(self, filename): + """Returns an HTML string to be included on the results.html page.""" + raise NotImplementedError + + def should_kill_dump_render_tree(self): + """Returns True if we should kill DumpRenderTree before the next + test.""" + return False + + def relative_output_filename(self, filename, modifier): + """Returns a relative filename inside the output dir that contains + modifier. + + For example, if filename is fast\dom\foo.html and modifier is + "-expected.txt", the return value is fast\dom\foo-expected.txt + + Args: + filename: relative filename to test file + modifier: a string to replace the extension of filename with + + Return: + The relative windows path to the output filename + """ + return os.path.splitext(filename)[0] + modifier + + +class FailureWithType(TestFailure): + """Base class that produces standard HTML output based on the test type. + + Subclasses may commonly choose to override the ResultHtmlOutput, but still + use the standard OutputLinks. + """ + + def __init__(self): + TestFailure.__init__(self) + + # Filename suffixes used by ResultHtmlOutput. + OUT_FILENAMES = () + + def output_links(self, filename, out_names): + """Returns a string holding all applicable output file links. + + Args: + filename: the test filename, used to construct the result file names + out_names: list of filename suffixes for the files. If three or more + suffixes are in the list, they should be [actual, expected, diff, + wdiff]. Two suffixes should be [actual, expected], and a + single item is the [actual] filename suffix. + If out_names is empty, returns the empty string. + """ + # FIXME: Seems like a bad idea to separate the display name data + # from the path data by hard-coding the display name here + # and passing in the path information via out_names. + # + # FIXME: Also, we don't know for sure that these files exist, + # and we shouldn't be creating links to files that don't exist + # (for example, if we don't actually have wdiff output). + links = [''] + uris = [self.relative_output_filename(filename, fn) for + fn in out_names] + if len(uris) > 1: + links.append("<a href='%s'>expected</a>" % uris[1]) + if len(uris) > 0: + links.append("<a href='%s'>actual</a>" % uris[0]) + if len(uris) > 2: + links.append("<a href='%s'>diff</a>" % uris[2]) + if len(uris) > 3: + links.append("<a href='%s'>wdiff</a>" % uris[3]) + if len(uris) > 4: + links.append("<a href='%s'>pretty diff</a>" % uris[4]) + return ' '.join(links) + + def result_html_output(self, filename): + return self.message() + self.output_links(filename, self.OUT_FILENAMES) + + +class FailureTimeout(TestFailure): + """Test timed out. We also want to restart DumpRenderTree if this + happens.""" + + @staticmethod + def message(): + return "Test timed out" + + def result_html_output(self, filename): + return "<strong>%s</strong>" % self.message() + + def should_kill_dump_render_tree(self): + return True + + +class FailureCrash(TestFailure): + """Test shell crashed.""" + + @staticmethod + def message(): + return "Test shell crashed" + + def result_html_output(self, filename): + # FIXME: create a link to the minidump file + stack = self.relative_output_filename(filename, "-stack.txt") + return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(), + stack) + + def should_kill_dump_render_tree(self): + return True + + +class FailureMissingResult(FailureWithType): + """Expected result was missing.""" + OUT_FILENAMES = ("-actual.txt",) + + @staticmethod + def message(): + return "No expected results found" + + def result_html_output(self, filename): + return ("<strong>%s</strong>" % self.message() + + self.output_links(filename, self.OUT_FILENAMES)) + + +class FailureTextMismatch(FailureWithType): + """Text diff output failed.""" + # Filename suffixes used by ResultHtmlOutput. + # FIXME: Why don't we use the constants from TestTypeBase here? + OUT_FILENAMES = ("-actual.txt", "-expected.txt", "-diff.txt", + "-wdiff.html", "-pretty-diff.html") + + @staticmethod + def message(): + return "Text diff mismatch" + + +class FailureMissingImageHash(FailureWithType): + """Actual result hash was missing.""" + # Chrome doesn't know to display a .checksum file as text, so don't bother + # putting in a link to the actual result. + + @staticmethod + def message(): + return "No expected image hash found" + + def result_html_output(self, filename): + return "<strong>%s</strong>" % self.message() + + +class FailureMissingImage(FailureWithType): + """Actual result image was missing.""" + OUT_FILENAMES = ("-actual.png",) + + @staticmethod + def message(): + return "No expected image found" + + def result_html_output(self, filename): + return ("<strong>%s</strong>" % self.message() + + self.output_links(filename, self.OUT_FILENAMES)) + + +class FailureImageHashMismatch(FailureWithType): + """Image hashes didn't match.""" + OUT_FILENAMES = ("-actual.png", "-expected.png", "-diff.png") + + @staticmethod + def message(): + # We call this a simple image mismatch to avoid confusion, since + # we link to the PNGs rather than the checksums. + return "Image mismatch" + + +class FailureImageHashIncorrect(FailureWithType): + """Actual result hash is incorrect.""" + # Chrome doesn't know to display a .checksum file as text, so don't bother + # putting in a link to the actual result. + + @staticmethod + def message(): + return "Images match, expected image hash incorrect. " + + def result_html_output(self, filename): + return "<strong>%s</strong>" % self.message() + +# Convenient collection of all failure classes for anything that might +# need to enumerate over them all. +ALL_FAILURE_CLASSES = (FailureTimeout, FailureCrash, FailureMissingResult, + FailureTextMismatch, FailureMissingImageHash, + FailureMissingImage, FailureImageHashMismatch, + FailureImageHashIncorrect) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py new file mode 100644 index 0000000..3e3528d --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py @@ -0,0 +1,84 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""""Tests code paths not covered by the regular unit tests.""" + +import unittest + +from webkitpy.layout_tests.layout_package.test_failures import * + + +class Test(unittest.TestCase): + def assertResultHtml(self, failure_obj): + self.assertNotEqual(failure_obj.result_html_output('foo'), None) + + def assert_loads(self, cls): + failure_obj = cls() + s = failure_obj.dumps() + new_failure_obj = TestFailure.loads(s) + self.assertTrue(isinstance(new_failure_obj, cls)) + + self.assertEqual(failure_obj, new_failure_obj) + + # Also test that != is implemented. + self.assertFalse(failure_obj != new_failure_obj) + + def test_crash(self): + self.assertResultHtml(FailureCrash()) + + def test_hash_incorrect(self): + self.assertResultHtml(FailureImageHashIncorrect()) + + def test_missing(self): + self.assertResultHtml(FailureMissingResult()) + + def test_missing_image(self): + self.assertResultHtml(FailureMissingImage()) + + def test_missing_image_hash(self): + self.assertResultHtml(FailureMissingImageHash()) + + def test_timeout(self): + self.assertResultHtml(FailureTimeout()) + + def test_unknown_failure_type(self): + class UnknownFailure(TestFailure): + pass + + failure_obj = UnknownFailure() + self.assertRaises(ValueError, determine_result_type, [failure_obj]) + self.assertRaises(NotImplementedError, failure_obj.message) + self.assertRaises(NotImplementedError, failure_obj.result_html_output, + "foo.txt") + + def test_loads(self): + for c in ALL_FAILURE_CLASSES: + self.assert_loads(c) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py new file mode 100644 index 0000000..4b027c0 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_input.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +class TestInput: + """Groups information about a test for easy passing of data.""" + + def __init__(self, filename, timeout): + """Holds the input parameters for a test. + Args: + filename: Full path to the test. + timeout: Timeout in msecs the driver should use while running the test + """ + # FIXME: filename should really be test_name as a relative path. + self.filename = filename + self.timeout = timeout + # The image_hash is used to avoid doing an image dump if the + # checksums match. The image_hash is set later, and only if it is needed + # for the test. + self.image_hash = None diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py new file mode 100644 index 0000000..e809be6 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_output.py @@ -0,0 +1,56 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +class TestOutput(object): + """Groups information about a test output for easy passing of data. + + This is used not only for a actual test output, but also for grouping + expected test output. + """ + + def __init__(self, text, image, image_hash, + crash=None, test_time=None, timeout=None, error=None): + """Initializes a TestOutput object. + + Args: + text: a text output + image: an image output + image_hash: a string containing the checksum of the image + crash: a boolean indicating whether the driver crashed on the test + test_time: a time which the test has taken + timeout: a boolean indicating whehter the test timed out + error: any unexpected or additional (or error) text output + """ + self.text = text + self.image = image + self.image_hash = image_hash + self.crash = crash + self.test_time = test_time + self.timeout = timeout + self.error = error diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results.py new file mode 100644 index 0000000..2417fb7 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results.py @@ -0,0 +1,61 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import cPickle + +import test_failures + + +class TestResult(object): + """Data object containing the results of a single test.""" + + @staticmethod + def loads(str): + return cPickle.loads(str) + + def __init__(self, filename, failures, test_run_time, + total_time_for_all_diffs, time_for_diffs): + self.failures = failures + self.filename = filename + self.test_run_time = test_run_time + self.time_for_diffs = time_for_diffs + self.total_time_for_all_diffs = total_time_for_all_diffs + self.type = test_failures.determine_result_type(failures) + + def __eq__(self, other): + return (self.filename == other.filename and + self.failures == other.failures and + self.test_run_time == other.test_run_time and + self.time_for_diffs == other.time_for_diffs and + self.total_time_for_all_diffs == other.total_time_for_all_diffs) + + def __ne__(self, other): + return not (self == other) + + def dumps(self): + return cPickle.dumps(self) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py new file mode 100644 index 0000000..5921666 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_unittest.py @@ -0,0 +1,52 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from test_results import TestResult + + +class Test(unittest.TestCase): + def test_loads(self): + result = TestResult(filename='foo', + failures=[], + test_run_time=1.1, + total_time_for_all_diffs=0.5, + time_for_diffs=0.5) + s = result.dumps() + new_result = TestResult.loads(s) + self.assertTrue(isinstance(new_result, TestResult)) + + self.assertEqual(new_result, result) + + # Also check that != is implemented. + self.assertFalse(new_result != result) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py new file mode 100644 index 0000000..033c8c6 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_results_uploader.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import codecs +import mimetypes +import socket +import urllib2 + +from webkitpy.common.net.networktransaction import NetworkTransaction + +def get_mime_type(filename): + return mimetypes.guess_type(filename)[0] or 'application/octet-stream' + + +def _encode_multipart_form_data(fields, files): + """Encode form fields for multipart/form-data. + + Args: + fields: A sequence of (name, value) elements for regular form fields. + files: A sequence of (name, filename, value) elements for data to be + uploaded as files. + Returns: + (content_type, body) ready for httplib.HTTP instance. + + Source: + http://code.google.com/p/rietveld/source/browse/trunk/upload.py + """ + BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' + CRLF = '\r\n' + lines = [] + + for key, value in fields: + lines.append('--' + BOUNDARY) + lines.append('Content-Disposition: form-data; name="%s"' % key) + lines.append('') + if isinstance(value, unicode): + value = value.encode('utf-8') + lines.append(value) + + for key, filename, value in files: + lines.append('--' + BOUNDARY) + lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) + lines.append('Content-Type: %s' % get_mime_type(filename)) + lines.append('') + if isinstance(value, unicode): + value = value.encode('utf-8') + lines.append(value) + + lines.append('--' + BOUNDARY + '--') + lines.append('') + body = CRLF.join(lines) + content_type = 'multipart/form-data; boundary=%s' % BOUNDARY + return content_type, body + + +class TestResultsUploader: + def __init__(self, host): + self._host = host + + def _upload_files(self, attrs, file_objs): + url = "http://%s/testfile/upload" % self._host + content_type, data = _encode_multipart_form_data(attrs, file_objs) + headers = {"Content-Type": content_type} + request = urllib2.Request(url, data, headers) + urllib2.urlopen(request) + + def upload(self, params, files, timeout_seconds): + file_objs = [] + for filename, path in files: + with codecs.open(path, "rb") as file: + file_objs.append(('file', filename, file.read())) + + orig_timeout = socket.getdefaulttimeout() + try: + socket.setdefaulttimeout(timeout_seconds) + NetworkTransaction(timeout_seconds=timeout_seconds).run( + lambda: self._upload_files(params, file_objs)) + finally: + socket.setdefaulttimeout(orig_timeout) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py new file mode 100644 index 0000000..24d04ca --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py @@ -0,0 +1,1218 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""" +The TestRunner class runs a series of tests (TestType interface) against a set +of test files. If a test file fails a TestType, it returns a list TestFailure +objects to the TestRunner. The TestRunner then aggregates the TestFailures to +create a final report. +""" + +from __future__ import with_statement + +import codecs +import errno +import logging +import math +import os +import Queue +import random +import shutil +import sys +import time + +from result_summary import ResultSummary +from test_input import TestInput + +import dump_render_tree_thread +import json_layout_results_generator +import message_broker +import printing +import test_expectations +import test_failures +import test_results +import test_results_uploader + +from webkitpy.thirdparty import simplejson +from webkitpy.tool import grammar + +_log = logging.getLogger("webkitpy.layout_tests.run_webkit_tests") + +# Builder base URL where we have the archived test results. +BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/" + +LAYOUT_TESTS_DIRECTORY = "LayoutTests" + os.sep + +TestExpectationsFile = test_expectations.TestExpectationsFile + + +def summarize_unexpected_results(port_obj, expectations, result_summary, + retry_summary): + """Summarize any unexpected results as a dict. + + FIXME: split this data structure into a separate class? + + Args: + port_obj: interface to port-specific hooks + expectations: test_expectations.TestExpectations object + result_summary: summary object from initial test runs + retry_summary: summary object from final test run of retried tests + Returns: + A dictionary containing a summary of the unexpected results from the + run, with the following fields: + 'version': a version indicator (1 in this version) + 'fixable': # of fixable tests (NOW - PASS) + 'skipped': # of skipped tests (NOW & SKIPPED) + 'num_regressions': # of non-flaky failures + 'num_flaky': # of flaky failures + 'num_passes': # of unexpected passes + 'tests': a dict of tests -> {'expected': '...', 'actual': '...'} + """ + results = {} + results['version'] = 1 + + tbe = result_summary.tests_by_expectation + tbt = result_summary.tests_by_timeline + results['fixable'] = len(tbt[test_expectations.NOW] - + tbe[test_expectations.PASS]) + results['skipped'] = len(tbt[test_expectations.NOW] & + tbe[test_expectations.SKIP]) + + num_passes = 0 + num_flaky = 0 + num_regressions = 0 + keywords = {} + for k, v in TestExpectationsFile.EXPECTATIONS.iteritems(): + keywords[v] = k.upper() + + tests = {} + for filename, result in result_summary.unexpected_results.iteritems(): + # Note that if a test crashed in the original run, we ignore + # whether or not it crashed when we retried it (if we retried it), + # and always consider the result not flaky. + test = port_obj.relative_test_filename(filename) + expected = expectations.get_expectations_string(filename) + actual = [keywords[result]] + + if result == test_expectations.PASS: + num_passes += 1 + elif result == test_expectations.CRASH: + num_regressions += 1 + else: + if filename not in retry_summary.unexpected_results: + actual.extend(expectations.get_expectations_string( + filename).split(" ")) + num_flaky += 1 + else: + retry_result = retry_summary.unexpected_results[filename] + if result != retry_result: + actual.append(keywords[retry_result]) + num_flaky += 1 + else: + num_regressions += 1 + + tests[test] = {} + tests[test]['expected'] = expected + tests[test]['actual'] = " ".join(actual) + + results['tests'] = tests + results['num_passes'] = num_passes + results['num_flaky'] = num_flaky + results['num_regressions'] = num_regressions + + return results + + +class TestRunInterruptedException(Exception): + """Raised when a test run should be stopped immediately.""" + def __init__(self, reason): + self.reason = reason + + +class TestRunner: + """A class for managing running a series of tests on a series of layout + test files.""" + + HTTP_SUBDIR = os.sep.join(['', 'http', '']) + WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', '']) + + # The per-test timeout in milliseconds, if no --time-out-ms option was + # given to run_webkit_tests. This should correspond to the default timeout + # in DumpRenderTree. + DEFAULT_TEST_TIMEOUT_MS = 6 * 1000 + + def __init__(self, port, options, printer): + """Initialize test runner data structures. + + Args: + port: an object implementing port-specific + options: a dictionary of command line options + printer: a Printer object to record updates to. + """ + self._port = port + self._options = options + self._printer = printer + self._message_broker = None + + # disable wss server. need to install pyOpenSSL on buildbots. + # self._websocket_secure_server = websocket_server.PyWebSocket( + # options.results_directory, use_tls=True, port=9323) + + # a set of test files, and the same tests as a list + self._test_files = set() + self._test_files_list = None + self._result_queue = Queue.Queue() + self._retrying = False + + def collect_tests(self, args, last_unexpected_results): + """Find all the files to test. + + Args: + args: list of test arguments from the command line + last_unexpected_results: list of unexpected results to retest, if any + + """ + paths = [self._strip_test_dir_prefix(arg) for arg in args if arg and arg != ''] + paths += last_unexpected_results + if self._options.test_list: + paths += read_test_files(self._options.test_list) + self._test_files = self._port.tests(paths) + + def _strip_test_dir_prefix(self, path): + if path.startswith(LAYOUT_TESTS_DIRECTORY): + return path[len(LAYOUT_TESTS_DIRECTORY):] + return path + + def lint(self): + lint_failed = False + + # Creating the expecations for each platform/configuration pair does + # all the test list parsing and ensures it's correct syntax (e.g. no + # dupes). + for platform_name in self._port.test_platform_names(): + try: + self.parse_expectations(platform_name, is_debug_mode=True) + except test_expectations.ParseError: + lint_failed = True + try: + self.parse_expectations(platform_name, is_debug_mode=False) + except test_expectations.ParseError: + lint_failed = True + + self._printer.write("") + if lint_failed: + _log.error("Lint failed.") + return -1 + + _log.info("Lint succeeded.") + return 0 + + def parse_expectations(self, test_platform_name, is_debug_mode): + """Parse the expectations from the test_list files and return a data + structure holding them. Throws an error if the test_list files have + invalid syntax.""" + if self._options.lint_test_files: + test_files = None + else: + test_files = self._test_files + + expectations_str = self._port.test_expectations() + overrides_str = self._port.test_expectations_overrides() + self._expectations = test_expectations.TestExpectations( + self._port, test_files, expectations_str, test_platform_name, + is_debug_mode, self._options.lint_test_files, + overrides=overrides_str) + return self._expectations + + def prepare_lists_and_print_output(self): + """Create appropriate subsets of test lists and returns a + ResultSummary object. Also prints expected test counts. + """ + + # Remove skipped - both fixable and ignored - files from the + # top-level list of files to test. + num_all_test_files = len(self._test_files) + self._printer.print_expected("Found: %d tests" % + (len(self._test_files))) + if not num_all_test_files: + _log.critical('No tests to run.') + return None + + skipped = set() + if num_all_test_files > 1 and not self._options.force: + skipped = self._expectations.get_tests_with_result_type( + test_expectations.SKIP) + self._test_files -= skipped + + # Create a sorted list of test files so the subset chunk, + # if used, contains alphabetically consecutive tests. + self._test_files_list = list(self._test_files) + if self._options.randomize_order: + random.shuffle(self._test_files_list) + else: + self._test_files_list.sort() + + # If the user specifies they just want to run a subset of the tests, + # just grab a subset of the non-skipped tests. + if self._options.run_chunk or self._options.run_part: + chunk_value = self._options.run_chunk or self._options.run_part + test_files = self._test_files_list + try: + (chunk_num, chunk_len) = chunk_value.split(":") + chunk_num = int(chunk_num) + assert(chunk_num >= 0) + test_size = int(chunk_len) + assert(test_size > 0) + except: + _log.critical("invalid chunk '%s'" % chunk_value) + return None + + # Get the number of tests + num_tests = len(test_files) + + # Get the start offset of the slice. + if self._options.run_chunk: + chunk_len = test_size + # In this case chunk_num can be really large. We need + # to make the slave fit in the current number of tests. + slice_start = (chunk_num * chunk_len) % num_tests + else: + # Validate the data. + assert(test_size <= num_tests) + assert(chunk_num <= test_size) + + # To count the chunk_len, and make sure we don't skip + # some tests, we round to the next value that fits exactly + # all the parts. + rounded_tests = num_tests + if rounded_tests % test_size != 0: + rounded_tests = (num_tests + test_size - + (num_tests % test_size)) + + chunk_len = rounded_tests / test_size + slice_start = chunk_len * (chunk_num - 1) + # It does not mind if we go over test_size. + + # Get the end offset of the slice. + slice_end = min(num_tests, slice_start + chunk_len) + + files = test_files[slice_start:slice_end] + + tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ( + (slice_end - slice_start), slice_start, slice_end, num_tests) + self._printer.print_expected(tests_run_msg) + + # If we reached the end and we don't have enough tests, we run some + # from the beginning. + if slice_end - slice_start < chunk_len: + extra = chunk_len - (slice_end - slice_start) + extra_msg = (' last chunk is partial, appending [0:%d]' % + extra) + self._printer.print_expected(extra_msg) + tests_run_msg += "\n" + extra_msg + files.extend(test_files[0:extra]) + tests_run_filename = os.path.join(self._options.results_directory, + "tests_run.txt") + with codecs.open(tests_run_filename, "w", "utf-8") as file: + file.write(tests_run_msg + "\n") + + len_skip_chunk = int(len(files) * len(skipped) / + float(len(self._test_files))) + skip_chunk_list = list(skipped)[0:len_skip_chunk] + skip_chunk = set(skip_chunk_list) + + # Update expectations so that the stats are calculated correctly. + # We need to pass a list that includes the right # of skipped files + # to ParseExpectations so that ResultSummary() will get the correct + # stats. So, we add in the subset of skipped files, and then + # subtract them back out. + self._test_files_list = files + skip_chunk_list + self._test_files = set(self._test_files_list) + + self._expectations = self.parse_expectations( + self._port.test_platform_name(), + self._options.configuration == 'Debug') + + self._test_files = set(files) + self._test_files_list = files + else: + skip_chunk = skipped + + result_summary = ResultSummary(self._expectations, + self._test_files | skip_chunk) + self._print_expected_results_of_type(result_summary, + test_expectations.PASS, "passes") + self._print_expected_results_of_type(result_summary, + test_expectations.FAIL, "failures") + self._print_expected_results_of_type(result_summary, + test_expectations.FLAKY, "flaky") + self._print_expected_results_of_type(result_summary, + test_expectations.SKIP, "skipped") + + if self._options.force: + self._printer.print_expected('Running all tests, including ' + 'skips (--force)') + else: + # Note that we don't actually run the skipped tests (they were + # subtracted out of self._test_files, above), but we stub out the + # results here so the statistics can remain accurate. + for test in skip_chunk: + result = test_results.TestResult(test, + failures=[], test_run_time=0, total_time_for_all_diffs=0, + time_for_diffs=0) + result.type = test_expectations.SKIP + result_summary.add(result, expected=True) + self._printer.print_expected('') + + return result_summary + + def _get_dir_for_test_file(self, test_file): + """Returns the highest-level directory by which to shard the given + test file.""" + index = test_file.rfind(os.sep + LAYOUT_TESTS_DIRECTORY) + + test_file = test_file[index + len(LAYOUT_TESTS_DIRECTORY):] + test_file_parts = test_file.split(os.sep, 1) + directory = test_file_parts[0] + test_file = test_file_parts[1] + + # The http tests are very stable on mac/linux. + # TODO(ojan): Make the http server on Windows be apache so we can + # turn shard the http tests there as well. Switching to apache is + # what made them stable on linux/mac. + return_value = directory + while ((directory != 'http' or sys.platform in ('darwin', 'linux2')) + and test_file.find(os.sep) >= 0): + test_file_parts = test_file.split(os.sep, 1) + directory = test_file_parts[0] + return_value = os.path.join(return_value, directory) + test_file = test_file_parts[1] + + return return_value + + def _get_test_input_for_file(self, test_file): + """Returns the appropriate TestInput object for the file. Mostly this + is used for looking up the timeout value (in ms) to use for the given + test.""" + if self._test_is_slow(test_file): + return TestInput(test_file, self._options.slow_time_out_ms) + return TestInput(test_file, self._options.time_out_ms) + + def _test_requires_lock(self, test_file): + """Return True if the test needs to be locked when + running multiple copies of NRWTs.""" + split_path = test_file.split(os.sep) + return 'http' in split_path or 'websocket' in split_path + + def _test_is_slow(self, test_file): + return self._expectations.has_modifier(test_file, + test_expectations.SLOW) + + def _shard_tests(self, test_files, use_real_shards): + """Groups tests into batches. + This helps ensure that tests that depend on each other (aka bad tests!) + continue to run together as most cross-tests dependencies tend to + occur within the same directory. If use_real_shards is False, we + put each (non-HTTP/websocket) test into its own shard for maximum + concurrency instead of trying to do any sort of real sharding. + + Return: + A list of lists of TestInput objects. + """ + # FIXME: when we added http locking, we changed how this works such + # that we always lump all of the HTTP threads into a single shard. + # That will slow down experimental-fully-parallel, but it's unclear + # what the best alternative is completely revamping how we track + # when to grab the lock. + + test_lists = [] + tests_to_http_lock = [] + if not use_real_shards: + for test_file in test_files: + test_input = self._get_test_input_for_file(test_file) + if self._test_requires_lock(test_file): + tests_to_http_lock.append(test_input) + else: + test_lists.append((".", [test_input])) + else: + tests_by_dir = {} + for test_file in test_files: + directory = self._get_dir_for_test_file(test_file) + test_input = self._get_test_input_for_file(test_file) + if self._test_requires_lock(test_file): + tests_to_http_lock.append(test_input) + else: + tests_by_dir.setdefault(directory, []) + tests_by_dir[directory].append(test_input) + # Sort by the number of tests in the dir so that the ones with the + # most tests get run first in order to maximize parallelization. + # Number of tests is a good enough, but not perfect, approximation + # of how long that set of tests will take to run. We can't just use + # a PriorityQueue until we move to Python 2.6. + for directory in tests_by_dir: + test_list = tests_by_dir[directory] + # Keep the tests in alphabetical order. + # FIXME: Remove once tests are fixed so they can be run in any + # order. + test_list.reverse() + test_list_tuple = (directory, test_list) + test_lists.append(test_list_tuple) + test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1]))) + + # Put the http tests first. There are only a couple hundred of them, + # but each http test takes a very long time to run, so sorting by the + # number of tests doesn't accurately capture how long they take to run. + if tests_to_http_lock: + tests_to_http_lock.reverse() + test_lists.insert(0, ("tests_to_http_lock", tests_to_http_lock)) + + return test_lists + + def _contains_tests(self, subdir): + for test_file in self._test_files: + if test_file.find(subdir) >= 0: + return True + return False + + def _num_workers(self): + return int(self._options.child_processes) + + def _run_tests(self, file_list, result_summary): + """Runs the tests in the file_list. + + Return: A tuple (interrupted, keyboard_interrupted, thread_timings, + test_timings, individual_test_timings) + interrupted is whether the run was interrupted + keyboard_interrupted is whether the interruption was because someone + typed Ctrl^C + thread_timings is a list of dicts with the total runtime + of each thread with 'name', 'num_tests', 'total_time' properties + test_timings is a list of timings for each sharded subdirectory + of the form [time, directory_name, num_tests] + individual_test_timings is a list of run times for each test + in the form {filename:filename, test_run_time:test_run_time} + result_summary: summary object to populate with the results + """ + + self._printer.print_update('Sharding tests ...') + num_workers = self._num_workers() + test_lists = self._shard_tests(file_list, + num_workers > 1 and not self._options.experimental_fully_parallel) + filename_queue = Queue.Queue() + for item in test_lists: + filename_queue.put(item) + + self._printer.print_update('Starting %s ...' % + grammar.pluralize('worker', num_workers)) + self._message_broker = message_broker.get(self._port, self._options) + broker = self._message_broker + self._current_filename_queue = filename_queue + self._current_result_summary = result_summary + + if not self._options.dry_run: + threads = broker.start_workers(self) + else: + threads = {} + + self._printer.print_update("Starting testing ...") + keyboard_interrupted = False + interrupted = False + if not self._options.dry_run: + try: + broker.run_message_loop() + except KeyboardInterrupt: + _log.info("Interrupted, exiting") + broker.cancel_workers() + keyboard_interrupted = True + interrupted = True + except TestRunInterruptedException, e: + _log.info(e.reason) + broker.cancel_workers() + interrupted = True + except: + # Unexpected exception; don't try to clean up workers. + _log.info("Exception raised, exiting") + raise + + thread_timings, test_timings, individual_test_timings = \ + self._collect_timing_info(threads) + + broker.cleanup() + self._message_broker = None + return (interrupted, keyboard_interrupted, thread_timings, test_timings, + individual_test_timings) + + def update(self): + self.update_summary(self._current_result_summary) + + def _collect_timing_info(self, threads): + test_timings = {} + individual_test_timings = [] + thread_timings = [] + + for thread in threads: + thread_timings.append({'name': thread.getName(), + 'num_tests': thread.get_num_tests(), + 'total_time': thread.get_total_time()}) + test_timings.update(thread.get_test_group_timing_stats()) + individual_test_timings.extend(thread.get_test_results()) + + return (thread_timings, test_timings, individual_test_timings) + + def needs_http(self): + """Returns whether the test runner needs an HTTP server.""" + return self._contains_tests(self.HTTP_SUBDIR) + + def needs_websocket(self): + """Returns whether the test runner needs a WEBSOCKET server.""" + return self._contains_tests(self.WEBSOCKET_SUBDIR) + + def set_up_run(self): + """Configures the system to be ready to run tests. + + Returns a ResultSummary object if we should continue to run tests, + or None if we should abort. + + """ + # This must be started before we check the system dependencies, + # since the helper may do things to make the setup correct. + self._printer.print_update("Starting helper ...") + self._port.start_helper() + + # Check that the system dependencies (themes, fonts, ...) are correct. + if not self._options.nocheck_sys_deps: + self._printer.print_update("Checking system dependencies ...") + if not self._port.check_sys_deps(self.needs_http()): + self._port.stop_helper() + return None + + if self._options.clobber_old_results: + self._clobber_old_results() + + # Create the output directory if it doesn't already exist. + self._port.maybe_make_directory(self._options.results_directory) + + self._port.setup_test_run() + + self._printer.print_update("Preparing tests ...") + result_summary = self.prepare_lists_and_print_output() + if not result_summary: + return None + + return result_summary + + def run(self, result_summary): + """Run all our tests on all our test files. + + For each test file, we run each test type. If there are any failures, + we collect them for reporting. + + Args: + result_summary: a summary object tracking the test results. + + Return: + The number of unexpected results (0 == success) + """ + # gather_test_files() must have been called first to initialize us. + # If we didn't find any files to test, we've errored out already in + # prepare_lists_and_print_output(). + assert(len(self._test_files)) + + start_time = time.time() + + interrupted, keyboard_interrupted, thread_timings, test_timings, \ + individual_test_timings = ( + self._run_tests(self._test_files_list, result_summary)) + + # We exclude the crashes from the list of results to retry, because + # we want to treat even a potentially flaky crash as an error. + failures = self._get_failures(result_summary, include_crashes=False) + retry_summary = result_summary + while (len(failures) and self._options.retry_failures and + not self._retrying and not interrupted): + _log.info('') + _log.info("Retrying %d unexpected failure(s) ..." % len(failures)) + _log.info('') + self._retrying = True + retry_summary = ResultSummary(self._expectations, failures.keys()) + # Note that we intentionally ignore the return value here. + self._run_tests(failures.keys(), retry_summary) + failures = self._get_failures(retry_summary, include_crashes=True) + + end_time = time.time() + + self._print_timing_statistics(end_time - start_time, + thread_timings, test_timings, + individual_test_timings, + result_summary) + + self._print_result_summary(result_summary) + + sys.stdout.flush() + sys.stderr.flush() + + self._printer.print_one_line_summary(result_summary.total, + result_summary.expected, + result_summary.unexpected) + + unexpected_results = summarize_unexpected_results(self._port, + self._expectations, result_summary, retry_summary) + self._printer.print_unexpected_results(unexpected_results) + + if (self._options.record_results and not self._options.dry_run and + not interrupted): + # Write the same data to log files and upload generated JSON files + # to appengine server. + self._upload_json_files(unexpected_results, result_summary, + individual_test_timings) + + # Write the summary to disk (results.html) and display it if requested. + if not self._options.dry_run: + wrote_results = self._write_results_html_file(result_summary) + if self._options.show_results and wrote_results: + self._show_results_html_file() + + # Now that we've completed all the processing we can, we re-raise + # a KeyboardInterrupt if necessary so the caller can handle it. + if keyboard_interrupted: + raise KeyboardInterrupt + + # Ignore flaky failures and unexpected passes so we don't turn the + # bot red for those. + return unexpected_results['num_regressions'] + + def clean_up_run(self): + """Restores the system after we're done running tests.""" + + _log.debug("flushing stdout") + sys.stdout.flush() + _log.debug("flushing stderr") + sys.stderr.flush() + _log.debug("stopping helper") + self._port.stop_helper() + + def update_summary(self, result_summary): + """Update the summary and print results with any completed tests.""" + while True: + try: + result = test_results.TestResult.loads(self._result_queue.get_nowait()) + except Queue.Empty: + return + + expected = self._expectations.matches_an_expected_result( + result.filename, result.type, self._options.pixel_tests) + result_summary.add(result, expected) + exp_str = self._expectations.get_expectations_string( + result.filename) + got_str = self._expectations.expectation_to_string(result.type) + self._printer.print_test_result(result, expected, exp_str, got_str) + self._printer.print_progress(result_summary, self._retrying, + self._test_files_list) + + def interrupt_if_at_failure_limit(limit, count, message): + if limit and count >= limit: + raise TestRunInterruptedException(message % count) + + interrupt_if_at_failure_limit( + self._options.exit_after_n_failures, + result_summary.unexpected_failures, + "Aborting run since %d failures were reached") + interrupt_if_at_failure_limit( + self._options.exit_after_n_crashes_or_timeouts, + result_summary.unexpected_crashes_or_timeouts, + "Aborting run since %d crashes or timeouts were reached") + + def _clobber_old_results(self): + # Just clobber the actual test results directories since the other + # files in the results directory are explicitly used for cross-run + # tracking. + self._printer.print_update("Clobbering old results in %s" % + self._options.results_directory) + layout_tests_dir = self._port.layout_tests_dir() + possible_dirs = self._port.test_dirs() + for dirname in possible_dirs: + if os.path.isdir(os.path.join(layout_tests_dir, dirname)): + shutil.rmtree(os.path.join(self._options.results_directory, + dirname), + ignore_errors=True) + + def _get_failures(self, result_summary, include_crashes): + """Filters a dict of results and returns only the failures. + + Args: + result_summary: the results of the test run + include_crashes: whether crashes are included in the output. + We use False when finding the list of failures to retry + to see if the results were flaky. Although the crashes may also be + flaky, we treat them as if they aren't so that they're not ignored. + Returns: + a dict of files -> results + """ + failed_results = {} + for test, result in result_summary.unexpected_results.iteritems(): + if (result == test_expectations.PASS or + result == test_expectations.CRASH and not include_crashes): + continue + failed_results[test] = result + + return failed_results + + def _upload_json_files(self, unexpected_results, result_summary, + individual_test_timings): + """Writes the results of the test run as JSON files into the results + dir and upload the files to the appengine server. + + There are three different files written into the results dir: + unexpected_results.json: A short list of any unexpected results. + This is used by the buildbots to display results. + expectations.json: This is used by the flakiness dashboard. + results.json: A full list of the results - used by the flakiness + dashboard and the aggregate results dashboard. + + Args: + unexpected_results: dict of unexpected results + result_summary: full summary object + individual_test_timings: list of test times (used by the flakiness + dashboard). + """ + results_directory = self._options.results_directory + _log.debug("Writing JSON files in %s." % results_directory) + unexpected_json_path = os.path.join(results_directory, "unexpected_results.json") + with codecs.open(unexpected_json_path, "w", "utf-8") as file: + simplejson.dump(unexpected_results, file, sort_keys=True, indent=2) + + # Write a json file of the test_expectations.txt file for the layout + # tests dashboard. + expectations_path = os.path.join(results_directory, "expectations.json") + expectations_json = \ + self._expectations.get_expectations_json_for_all_platforms() + with codecs.open(expectations_path, "w", "utf-8") as file: + file.write(u"ADD_EXPECTATIONS(%s);" % expectations_json) + + generator = json_layout_results_generator.JSONLayoutResultsGenerator( + self._port, self._options.builder_name, self._options.build_name, + self._options.build_number, self._options.results_directory, + BUILDER_BASE_URL, individual_test_timings, + self._expectations, result_summary, self._test_files_list, + not self._options.upload_full_results, + self._options.test_results_server, + "layout-tests", + self._options.master_name) + + _log.debug("Finished writing JSON files.") + + json_files = ["expectations.json"] + if self._options.upload_full_results: + json_files.append("results.json") + else: + json_files.append("incremental_results.json") + + generator.upload_json_files(json_files) + + def _print_config(self): + """Prints the configuration for the test run.""" + p = self._printer + p.print_config("Using port '%s'" % self._port.name()) + p.print_config("Placing test results in %s" % + self._options.results_directory) + if self._options.new_baseline: + p.print_config("Placing new baselines in %s" % + self._port.baseline_path()) + p.print_config("Using %s build" % self._options.configuration) + if self._options.pixel_tests: + p.print_config("Pixel tests enabled") + else: + p.print_config("Pixel tests disabled") + + p.print_config("Regular timeout: %s, slow test timeout: %s" % + (self._options.time_out_ms, + self._options.slow_time_out_ms)) + + if self._num_workers() == 1: + p.print_config("Running one %s" % self._port.driver_name()) + else: + p.print_config("Running %s %ss in parallel" % + (self._options.child_processes, + self._port.driver_name())) + p.print_config('Command line: ' + + ' '.join(self._port.driver_cmd_line())) + p.print_config("Worker model: %s" % self._options.worker_model) + p.print_config("") + + def _print_expected_results_of_type(self, result_summary, + result_type, result_type_str): + """Print the number of the tests in a given result class. + + Args: + result_summary - the object containing all the results to report on + result_type - the particular result type to report in the summary. + result_type_str - a string description of the result_type. + """ + tests = self._expectations.get_tests_with_result_type(result_type) + now = result_summary.tests_by_timeline[test_expectations.NOW] + wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] + + # We use a fancy format string in order to print the data out in a + # nicely-aligned table. + fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" + % (self._num_digits(now), self._num_digits(wontfix))) + self._printer.print_expected(fmtstr % + (len(tests), result_type_str, len(tests & now), len(tests & wontfix))) + + def _num_digits(self, num): + """Returns the number of digits needed to represent the length of a + sequence.""" + ndigits = 1 + if len(num): + ndigits = int(math.log10(len(num))) + 1 + return ndigits + + def _print_timing_statistics(self, total_time, thread_timings, + directory_test_timings, individual_test_timings, + result_summary): + """Record timing-specific information for the test run. + + Args: + total_time: total elapsed time (in seconds) for the test run + thread_timings: wall clock time each thread ran for + directory_test_timings: timing by directory + individual_test_timings: timing by file + result_summary: summary object for the test run + """ + self._printer.print_timing("Test timing:") + self._printer.print_timing(" %6.2f total testing time" % total_time) + self._printer.print_timing("") + self._printer.print_timing("Thread timing:") + cuml_time = 0 + for t in thread_timings: + self._printer.print_timing(" %10s: %5d tests, %6.2f secs" % + (t['name'], t['num_tests'], t['total_time'])) + cuml_time += t['total_time'] + self._printer.print_timing(" %6.2f cumulative, %6.2f optimal" % + (cuml_time, cuml_time / int(self._options.child_processes))) + self._printer.print_timing("") + + self._print_aggregate_test_statistics(individual_test_timings) + self._print_individual_test_times(individual_test_timings, + result_summary) + self._print_directory_timings(directory_test_timings) + + def _print_aggregate_test_statistics(self, individual_test_timings): + """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. + Args: + individual_test_timings: List of TestResults for all tests. + """ + test_types = [] # Unit tests don't actually produce any timings. + if individual_test_timings: + test_types = individual_test_timings[0].time_for_diffs.keys() + times_for_dump_render_tree = [] + times_for_diff_processing = [] + times_per_test_type = {} + for test_type in test_types: + times_per_test_type[test_type] = [] + + for test_stats in individual_test_timings: + times_for_dump_render_tree.append(test_stats.test_run_time) + times_for_diff_processing.append( + test_stats.total_time_for_all_diffs) + time_for_diffs = test_stats.time_for_diffs + for test_type in test_types: + times_per_test_type[test_type].append( + time_for_diffs[test_type]) + + self._print_statistics_for_test_timings( + "PER TEST TIME IN TESTSHELL (seconds):", + times_for_dump_render_tree) + self._print_statistics_for_test_timings( + "PER TEST DIFF PROCESSING TIMES (seconds):", + times_for_diff_processing) + for test_type in test_types: + self._print_statistics_for_test_timings( + "PER TEST TIMES BY TEST TYPE: %s" % test_type, + times_per_test_type[test_type]) + + def _print_individual_test_times(self, individual_test_timings, + result_summary): + """Prints the run times for slow, timeout and crash tests. + Args: + individual_test_timings: List of TestStats for all tests. + result_summary: summary object for test run + """ + # Reverse-sort by the time spent in DumpRenderTree. + individual_test_timings.sort(lambda a, b: + cmp(b.test_run_time, a.test_run_time)) + + num_printed = 0 + slow_tests = [] + timeout_or_crash_tests = [] + unexpected_slow_tests = [] + for test_tuple in individual_test_timings: + filename = test_tuple.filename + is_timeout_crash_or_slow = False + if self._test_is_slow(filename): + is_timeout_crash_or_slow = True + slow_tests.append(test_tuple) + + if filename in result_summary.failures: + result = result_summary.results[filename].type + if (result == test_expectations.TIMEOUT or + result == test_expectations.CRASH): + is_timeout_crash_or_slow = True + timeout_or_crash_tests.append(test_tuple) + + if (not is_timeout_crash_or_slow and + num_printed < printing.NUM_SLOW_TESTS_TO_LOG): + num_printed = num_printed + 1 + unexpected_slow_tests.append(test_tuple) + + self._printer.print_timing("") + self._print_test_list_timing("%s slowest tests that are not " + "marked as SLOW and did not timeout/crash:" % + printing.NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests) + self._printer.print_timing("") + self._print_test_list_timing("Tests marked as SLOW:", slow_tests) + self._printer.print_timing("") + self._print_test_list_timing("Tests that timed out or crashed:", + timeout_or_crash_tests) + self._printer.print_timing("") + + def _print_test_list_timing(self, title, test_list): + """Print timing info for each test. + + Args: + title: section heading + test_list: tests that fall in this section + """ + if self._printer.disabled('slowest'): + return + + self._printer.print_timing(title) + for test_tuple in test_list: + filename = test_tuple.filename[len( + self._port.layout_tests_dir()) + 1:] + filename = filename.replace('\\', '/') + test_run_time = round(test_tuple.test_run_time, 1) + self._printer.print_timing(" %s took %s seconds" % + (filename, test_run_time)) + + def _print_directory_timings(self, directory_test_timings): + """Print timing info by directory for any directories that + take > 10 seconds to run. + + Args: + directory_test_timing: time info for each directory + """ + timings = [] + for directory in directory_test_timings: + num_tests, time_for_directory = directory_test_timings[directory] + timings.append((round(time_for_directory, 1), directory, + num_tests)) + timings.sort() + + self._printer.print_timing("Time to process slowest subdirectories:") + min_seconds_to_print = 10 + for timing in timings: + if timing[0] > min_seconds_to_print: + self._printer.print_timing( + " %s took %s seconds to run %s tests." % (timing[1], + timing[0], timing[2])) + self._printer.print_timing("") + + def _print_statistics_for_test_timings(self, title, timings): + """Prints the median, mean and standard deviation of the values in + timings. + + Args: + title: Title for these timings. + timings: A list of floats representing times. + """ + self._printer.print_timing(title) + timings.sort() + + num_tests = len(timings) + if not num_tests: + return + percentile90 = timings[int(.9 * num_tests)] + percentile99 = timings[int(.99 * num_tests)] + + if num_tests % 2 == 1: + median = timings[((num_tests - 1) / 2) - 1] + else: + lower = timings[num_tests / 2 - 1] + upper = timings[num_tests / 2] + median = (float(lower + upper)) / 2 + + mean = sum(timings) / num_tests + + for time in timings: + sum_of_deviations = math.pow(time - mean, 2) + + std_deviation = math.sqrt(sum_of_deviations / num_tests) + self._printer.print_timing(" Median: %6.3f" % median) + self._printer.print_timing(" Mean: %6.3f" % mean) + self._printer.print_timing(" 90th percentile: %6.3f" % percentile90) + self._printer.print_timing(" 99th percentile: %6.3f" % percentile99) + self._printer.print_timing(" Standard dev: %6.3f" % std_deviation) + self._printer.print_timing("") + + def _print_result_summary(self, result_summary): + """Print a short summary about how many tests passed. + + Args: + result_summary: information to log + """ + failed = len(result_summary.failures) + skipped = len( + result_summary.tests_by_expectation[test_expectations.SKIP]) + total = result_summary.total + passed = total - failed - skipped + pct_passed = 0.0 + if total > 0: + pct_passed = float(passed) * 100 / total + + self._printer.print_actual("") + self._printer.print_actual("=> Results: %d/%d tests passed (%.1f%%)" % + (passed, total, pct_passed)) + self._printer.print_actual("") + self._print_result_summary_entry(result_summary, + test_expectations.NOW, "Tests to be fixed") + + self._printer.print_actual("") + self._print_result_summary_entry(result_summary, + test_expectations.WONTFIX, + "Tests that will only be fixed if they crash (WONTFIX)") + self._printer.print_actual("") + + def _print_result_summary_entry(self, result_summary, timeline, + heading): + """Print a summary block of results for a particular timeline of test. + + Args: + result_summary: summary to print results for + timeline: the timeline to print results for (NOT, WONTFIX, etc.) + heading: a textual description of the timeline + """ + total = len(result_summary.tests_by_timeline[timeline]) + not_passing = (total - + len(result_summary.tests_by_expectation[test_expectations.PASS] & + result_summary.tests_by_timeline[timeline])) + self._printer.print_actual("=> %s (%d):" % (heading, not_passing)) + + for result in TestExpectationsFile.EXPECTATION_ORDER: + if result == test_expectations.PASS: + continue + results = (result_summary.tests_by_expectation[result] & + result_summary.tests_by_timeline[timeline]) + desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result] + if not_passing and len(results): + pct = len(results) * 100.0 / not_passing + self._printer.print_actual(" %5d %-24s (%4.1f%%)" % + (len(results), desc[len(results) != 1], pct)) + + def _results_html(self, test_files, failures, title="Test Failures", override_time=None): + """ + test_files = a list of file paths + failures = dictionary mapping test paths to failure objects + title = title printed at top of test + override_time = current time (used by unit tests) + """ + page = """<html> + <head> + <title>Layout Test Results (%(time)s)</title> + </head> + <body> + <h2>%(title)s (%(time)s)</h2> + """ % {'title': title, 'time': override_time or time.asctime()} + + for test_file in sorted(test_files): + test_name = self._port.relative_test_filename(test_file) + test_url = self._port.filename_to_uri(test_file) + page += u"<p><a href='%s'>%s</a><br />\n" % (test_url, test_name) + test_failures = failures.get(test_file, []) + for failure in test_failures: + page += (u" %s<br/>" % + failure.result_html_output(test_name)) + page += "</p>\n" + page += "</body></html>\n" + return page + + def _write_results_html_file(self, result_summary): + """Write results.html which is a summary of tests that failed. + + Args: + result_summary: a summary of the results :) + + Returns: + True if any results were written (since expected failures may be + omitted) + """ + # test failures + if self._options.full_results_html: + results_title = "Test Failures" + test_files = result_summary.failures.keys() + else: + results_title = "Unexpected Test Failures" + unexpected_failures = self._get_failures(result_summary, + include_crashes=True) + test_files = unexpected_failures.keys() + if not len(test_files): + return False + + out_filename = os.path.join(self._options.results_directory, + "results.html") + with codecs.open(out_filename, "w", "utf-8") as results_file: + html = self._results_html(test_files, result_summary.failures, results_title) + results_file.write(html) + + return True + + def _show_results_html_file(self): + """Shows the results.html page.""" + results_filename = os.path.join(self._options.results_directory, + "results.html") + self._port.show_results_html_file(results_filename) + + +def read_test_files(files): + tests = [] + for file in files: + try: + with codecs.open(file, 'r', 'utf-8') as file_contents: + # FIXME: This could be cleaner using a list comprehension. + for line in file_contents: + line = test_expectations.strip_comments(line) + if line: + tests.append(line) + except IOError, e: + if e.errno == errno.ENOENT: + _log.critical('') + _log.critical('--test-list file "%s" not found' % file) + raise + return tests diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py new file mode 100644 index 0000000..3c564ae --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py @@ -0,0 +1,102 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for TestRunner().""" + +import unittest + +from webkitpy.thirdparty.mock import Mock + +import test_runner + + +class TestRunnerWrapper(test_runner.TestRunner): + def _get_test_input_for_file(self, test_file): + return test_file + + +class TestRunnerTest(unittest.TestCase): + def test_results_html(self): + mock_port = Mock() + mock_port.relative_test_filename = lambda name: name + mock_port.filename_to_uri = lambda name: name + + runner = test_runner.TestRunner(port=mock_port, options=Mock(), + printer=Mock()) + expected_html = u"""<html> + <head> + <title>Layout Test Results (time)</title> + </head> + <body> + <h2>Title (time)</h2> + <p><a href='test_path'>test_path</a><br /> +</p> +</body></html> +""" + html = runner._results_html(["test_path"], {}, "Title", override_time="time") + self.assertEqual(html, expected_html) + + def test_shard_tests(self): + # Test that _shard_tests in test_runner.TestRunner really + # put the http tests first in the queue. + runner = TestRunnerWrapper(port=Mock(), options=Mock(), + printer=Mock()) + + test_list = [ + "LayoutTests/websocket/tests/unicode.htm", + "LayoutTests/animations/keyframes.html", + "LayoutTests/http/tests/security/view-source-no-refresh.html", + "LayoutTests/websocket/tests/websocket-protocol-ignored.html", + "LayoutTests/fast/css/display-none-inline-style-change-crash.html", + "LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html", + "LayoutTests/dom/html/level2/html/HTMLAnchorElement03.html", + "LayoutTests/ietestcenter/Javascript/11.1.5_4-4-c-1.html", + "LayoutTests/dom/html/level2/html/HTMLAnchorElement06.html", + ] + + expected_tests_to_http_lock = set([ + 'LayoutTests/websocket/tests/unicode.htm', + 'LayoutTests/http/tests/security/view-source-no-refresh.html', + 'LayoutTests/websocket/tests/websocket-protocol-ignored.html', + 'LayoutTests/http/tests/xmlhttprequest/supported-xml-content-types.html', + ]) + + # FIXME: Ideally the HTTP tests don't have to all be in one shard. + single_thread_results = runner._shard_tests(test_list, False) + multi_thread_results = runner._shard_tests(test_list, True) + + self.assertEqual("tests_to_http_lock", single_thread_results[0][0]) + self.assertEqual(expected_tests_to_http_lock, set(single_thread_results[0][1])) + self.assertEqual("tests_to_http_lock", multi_thread_results[0][0]) + self.assertEqual(expected_tests_to_http_lock, set(multi_thread_results[0][1])) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/__init__.py b/Tools/Scripts/webkitpy/layout_tests/port/__init__.py new file mode 100644 index 0000000..e3ad6f4 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/__init__.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Port-specific entrypoints for the layout tests test infrastructure.""" + +from factory import get diff --git a/Tools/Scripts/webkitpy/layout_tests/port/apache_http_server.py b/Tools/Scripts/webkitpy/layout_tests/port/apache_http_server.py new file mode 100644 index 0000000..46617f6 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/apache_http_server.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A class to start/stop the apache http server used by layout tests.""" + + +from __future__ import with_statement + +import codecs +import logging +import optparse +import os +import re +import subprocess +import sys + +import http_server_base + +_log = logging.getLogger("webkitpy.layout_tests.port.apache_http_server") + + +class LayoutTestApacheHttpd(http_server_base.HttpServerBase): + + def __init__(self, port_obj, output_dir): + """Args: + port_obj: handle to the platform-specific routines + output_dir: the absolute path to the layout test result directory + """ + http_server_base.HttpServerBase.__init__(self, port_obj) + self._output_dir = output_dir + self._httpd_proc = None + port_obj.maybe_make_directory(output_dir) + + self.mappings = [{'port': 8000}, + {'port': 8080}, + {'port': 8081}, + {'port': 8443, 'sslcert': True}] + + # The upstream .conf file assumed the existence of /tmp/WebKit for + # placing apache files like the lock file there. + self._runtime_path = os.path.join("/tmp", "WebKit") + port_obj.maybe_make_directory(self._runtime_path) + + # The PID returned when Apache is started goes away (due to dropping + # privileges?). The proper controlling PID is written to a file in the + # apache runtime directory. + self._pid_file = os.path.join(self._runtime_path, 'httpd.pid') + + test_dir = self._port_obj.layout_tests_dir() + js_test_resources_dir = self._cygwin_safe_join(test_dir, "fast", "js", + "resources") + mime_types_path = self._cygwin_safe_join(test_dir, "http", "conf", + "mime.types") + cert_file = self._cygwin_safe_join(test_dir, "http", "conf", + "webkit-httpd.pem") + access_log = self._cygwin_safe_join(output_dir, "access_log.txt") + error_log = self._cygwin_safe_join(output_dir, "error_log.txt") + document_root = self._cygwin_safe_join(test_dir, "http", "tests") + + # FIXME: We shouldn't be calling a protected method of _port_obj! + executable = self._port_obj._path_to_apache() + if self._is_cygwin(): + executable = self._get_cygwin_path(executable) + + cmd = [executable, + '-f', "\"%s\"" % self._get_apache_config_file_path(test_dir, output_dir), + '-C', "\'DocumentRoot \"%s\"\'" % document_root, + '-c', "\'Alias /js-test-resources \"%s\"'" % js_test_resources_dir, + '-C', "\'Listen %s\'" % "127.0.0.1:8000", + '-C', "\'Listen %s\'" % "127.0.0.1:8081", + '-c', "\'TypesConfig \"%s\"\'" % mime_types_path, + '-c', "\'CustomLog \"%s\" common\'" % access_log, + '-c', "\'ErrorLog \"%s\"\'" % error_log, + '-C', "\'User \"%s\"\'" % os.environ.get("USERNAME", + os.environ.get("USER", ""))] + + if self._is_cygwin(): + cygbin = self._port_obj._path_from_base('third_party', 'cygwin', + 'bin') + # Not entirely sure why, but from cygwin we need to run the + # httpd command through bash. + self._start_cmd = [ + os.path.join(cygbin, 'bash.exe'), + '-c', + 'PATH=%s %s' % (self._get_cygwin_path(cygbin), " ".join(cmd)), + ] + else: + # TODO(ojan): When we get cygwin using Apache 2, use set the + # cert file for cygwin as well. + cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file]) + # Join the string here so that Cygwin/Windows and Mac/Linux + # can use the same code. Otherwise, we could remove the single + # quotes above and keep cmd as a sequence. + self._start_cmd = " ".join(cmd) + + def _is_cygwin(self): + return sys.platform in ("win32", "cygwin") + + def _cygwin_safe_join(self, *parts): + """Returns a platform appropriate path.""" + path = os.path.join(*parts) + if self._is_cygwin(): + return self._get_cygwin_path(path) + return path + + def _get_cygwin_path(self, path): + """Convert a Windows path to a cygwin path. + + The cygpath utility insists on converting paths that it thinks are + Cygwin root paths to what it thinks the correct roots are. So paths + such as "C:\b\slave\webkit-release\build\third_party\cygwin\bin" + are converted to plain "/usr/bin". To avoid this, we + do the conversion manually. + + The path is expected to be an absolute path, on any drive. + """ + drive_regexp = re.compile(r'([a-z]):[/\\]', re.IGNORECASE) + + def lower_drive(matchobj): + return '/cygdrive/%s/' % matchobj.group(1).lower() + path = drive_regexp.sub(lower_drive, path) + return path.replace('\\', '/') + + def _get_apache_config_file_path(self, test_dir, output_dir): + """Returns the path to the apache config file to use. + Args: + test_dir: absolute path to the LayoutTests directory. + output_dir: absolute path to the layout test results directory. + """ + httpd_config = self._port_obj._path_to_apache_config_file() + httpd_config_copy = os.path.join(output_dir, "httpd.conf") + # httpd.conf is always utf-8 according to http://archive.apache.org/gnats/10125 + with codecs.open(httpd_config, "r", "utf-8") as httpd_config_file: + httpd_conf = httpd_config_file.read() + if self._is_cygwin(): + # This is a gross hack, but it lets us use the upstream .conf file + # and our checked in cygwin. This tells the server the root + # directory to look in for .so modules. It will use this path + # plus the relative paths to the .so files listed in the .conf + # file. We have apache/cygwin checked into our tree so + # people don't have to install it into their cygwin. + cygusr = self._port_obj._path_from_base('third_party', 'cygwin', + 'usr') + httpd_conf = httpd_conf.replace('ServerRoot "/usr"', + 'ServerRoot "%s"' % self._get_cygwin_path(cygusr)) + + with codecs.open(httpd_config_copy, "w", "utf-8") as file: + file.write(httpd_conf) + + if self._is_cygwin(): + return self._get_cygwin_path(httpd_config_copy) + return httpd_config_copy + + def _get_virtual_host_config(self, document_root, port, ssl=False): + """Returns a <VirtualHost> directive block for an httpd.conf file. + It will listen to 127.0.0.1 on each of the given port. + """ + return '\n'.join(('<VirtualHost 127.0.0.1:%s>' % port, + 'DocumentRoot "%s"' % document_root, + ssl and 'SSLEngine On' or '', + '</VirtualHost>', '')) + + def _start_httpd_process(self): + """Starts the httpd process and returns whether there were errors.""" + # Use shell=True because we join the arguments into a string for + # the sake of Window/Cygwin and it needs quoting that breaks + # shell=False. + # FIXME: We should not need to be joining shell arguments into strings. + # shell=True is a trail of tears. + # Note: Not thread safe: http://bugs.python.org/issue2320 + self._httpd_proc = subprocess.Popen(self._start_cmd, + stderr=subprocess.PIPE, + shell=True) + err = self._httpd_proc.stderr.read() + if len(err): + _log.debug(err) + return False + return True + + def start(self): + """Starts the apache http server.""" + # Stop any currently running servers. + self.stop() + + _log.debug("Starting apache http server") + server_started = self.wait_for_action(self._start_httpd_process) + if server_started: + _log.debug("Apache started. Testing ports") + server_started = self.wait_for_action( + self.is_server_running_on_all_ports) + + if server_started: + _log.debug("Server successfully started") + else: + raise Exception('Failed to start http server') + + def stop(self): + """Stops the apache http server.""" + _log.debug("Shutting down any running http servers") + httpd_pid = None + if os.path.exists(self._pid_file): + httpd_pid = int(open(self._pid_file).readline()) + # FIXME: We shouldn't be calling a protected method of _port_obj! + self._port_obj._shut_down_http_server(httpd_pid) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py new file mode 100644 index 0000000..97b54c9 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py @@ -0,0 +1,862 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Google name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Abstract base class of Port-specific entrypoints for the layout tests +test infrastructure (the Port and Driver classes).""" + +import cgi +import difflib +import errno +import os +import shlex +import sys +import time + +import apache_http_server +import config as port_config +import http_lock +import http_server +import test_files +import websocket_server + +from webkitpy.common import system +from webkitpy.common.system import filesystem +from webkitpy.common.system import logutils +from webkitpy.common.system import path +from webkitpy.common.system.executive import Executive, ScriptError +from webkitpy.common.system.user import User + + +_log = logutils.get_logger(__file__) + + +class DummyOptions(object): + """Fake implementation of optparse.Values. Cloned from + webkitpy.tool.mocktool.MockOptions. + + """ + + def __init__(self, **kwargs): + # The caller can set option values using keyword arguments. We don't + # set any values by default because we don't know how this + # object will be used. Generally speaking unit tests should + # subclass this or provider wrapper functions that set a common + # set of options. + for key, value in kwargs.items(): + self.__dict__[key] = value + + +# FIXME: This class should merge with webkitpy.webkit_port at some point. +class Port(object): + """Abstract class for Port-specific hooks for the layout_test package.""" + + def __init__(self, port_name=None, options=None, + executive=None, + user=None, + filesystem=None, + config=None, + **kwargs): + self._name = port_name + self._options = options + if self._options is None: + # FIXME: Ideally we'd have a package-wide way to get a + # well-formed options object that had all of the necessary + # options defined on it. + self._options = DummyOptions() + self._executive = executive or Executive() + self._user = user or User() + self._filesystem = filesystem or system.filesystem.FileSystem() + self._config = config or port_config.Config(self._executive, + self._filesystem) + self._helper = None + self._http_server = None + self._webkit_base_dir = None + self._websocket_server = None + self._http_lock = None + + # Python's Popen has a bug that causes any pipes opened to a + # process that can't be executed to be leaked. Since this + # code is specifically designed to tolerate exec failures + # to gracefully handle cases where wdiff is not installed, + # the bug results in a massive file descriptor leak. As a + # workaround, if an exec failure is ever experienced for + # wdiff, assume it's not available. This will leak one + # file descriptor but that's better than leaking each time + # wdiff would be run. + # + # http://mail.python.org/pipermail/python-list/ + # 2008-August/505753.html + # http://bugs.python.org/issue3210 + self._wdiff_available = True + + self._pretty_patch_path = self.path_from_webkit_base("Websites", + "bugs.webkit.org", "PrettyPatch", "prettify.rb") + self._pretty_patch_available = True + self.set_option_default('configuration', None) + if self._options.configuration is None: + self._options.configuration = self.default_configuration() + + def default_child_processes(self): + """Return the number of DumpRenderTree instances to use for this + port.""" + return self._executive.cpu_count() + + def baseline_path(self): + """Return the absolute path to the directory to store new baselines + in for this port.""" + raise NotImplementedError('Port.baseline_path') + + def baseline_search_path(self): + """Return a list of absolute paths to directories to search under for + baselines. The directories are searched in order.""" + raise NotImplementedError('Port.baseline_search_path') + + def check_build(self, needs_http): + """This routine is used to ensure that the build is up to date + and all the needed binaries are present.""" + raise NotImplementedError('Port.check_build') + + def check_sys_deps(self, needs_http): + """If the port needs to do some runtime checks to ensure that the + tests can be run successfully, it should override this routine. + This step can be skipped with --nocheck-sys-deps. + + Returns whether the system is properly configured.""" + return True + + def check_image_diff(self, override_step=None, logging=True): + """This routine is used to check whether image_diff binary exists.""" + raise NotImplementedError('Port.check_image_diff') + + def check_pretty_patch(self): + """Checks whether we can use the PrettyPatch ruby script.""" + + # check if Ruby is installed + try: + result = self._executive.run_command(['ruby', '--version']) + except OSError, e: + if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]: + _log.error("Ruby is not installed; " + "can't generate pretty patches.") + _log.error('') + return False + + if not self.path_exists(self._pretty_patch_path): + _log.error('Unable to find %s .' % self._pretty_patch_path) + _log.error("Can't generate pretty patches.") + _log.error('') + return False + + return True + + def compare_text(self, expected_text, actual_text): + """Return whether or not the two strings are *not* equal. This + routine is used to diff text output. + + While this is a generic routine, we include it in the Port + interface so that it can be overriden for testing purposes.""" + return expected_text != actual_text + + def diff_image(self, expected_contents, actual_contents, + diff_filename=None, tolerance=0): + """Compare two images and produce a delta image file. + + Return True if the two images are different, False if they are the same. + Also produce a delta image of the two images and write that into + |diff_filename| if it is not None. + + |tolerance| should be a percentage value (0.0 - 100.0). + If it is omitted, the port default tolerance value is used. + + """ + raise NotImplementedError('Port.diff_image') + + + def diff_text(self, expected_text, actual_text, + expected_filename, actual_filename): + """Returns a string containing the diff of the two text strings + in 'unified diff' format. + + While this is a generic routine, we include it in the Port + interface so that it can be overriden for testing purposes.""" + + # The filenames show up in the diff output, make sure they're + # raw bytes and not unicode, so that they don't trigger join() + # trying to decode the input. + def to_raw_bytes(str): + if isinstance(str, unicode): + return str.encode('utf-8') + return str + expected_filename = to_raw_bytes(expected_filename) + actual_filename = to_raw_bytes(actual_filename) + diff = difflib.unified_diff(expected_text.splitlines(True), + actual_text.splitlines(True), + expected_filename, + actual_filename) + return ''.join(diff) + + def driver_name(self): + """Returns the name of the actual binary that is performing the test, + so that it can be referred to in log messages. In most cases this + will be DumpRenderTree, but if a port uses a binary with a different + name, it can be overridden here.""" + return "DumpRenderTree" + + def expected_baselines(self, filename, suffix, all_baselines=False): + """Given a test name, finds where the baseline results are located. + + Args: + filename: absolute filename to test file + suffix: file suffix of the expected results, including dot; e.g. + '.txt' or '.png'. This should not be None, but may be an empty + string. + all_baselines: If True, return an ordered list of all baseline paths + for the given platform. If False, return only the first one. + Returns + a list of ( platform_dir, results_filename ), where + platform_dir - abs path to the top of the results tree (or test + tree) + results_filename - relative path from top of tree to the results + file + (os.path.join of the two gives you the full path to the file, + unless None was returned.) + Return values will be in the format appropriate for the current + platform (e.g., "\\" for path separators on Windows). If the results + file is not found, then None will be returned for the directory, + but the expected relative pathname will still be returned. + + This routine is generic but lives here since it is used in + conjunction with the other baseline and filename routines that are + platform specific. + """ + testname = os.path.splitext(self.relative_test_filename(filename))[0] + + baseline_filename = testname + '-expected' + suffix + + baseline_search_path = self.baseline_search_path() + + baselines = [] + for platform_dir in baseline_search_path: + if self.path_exists(self._filesystem.join(platform_dir, + baseline_filename)): + baselines.append((platform_dir, baseline_filename)) + + if not all_baselines and baselines: + return baselines + + # If it wasn't found in a platform directory, return the expected + # result in the test directory, even if no such file actually exists. + platform_dir = self.layout_tests_dir() + if self.path_exists(self._filesystem.join(platform_dir, + baseline_filename)): + baselines.append((platform_dir, baseline_filename)) + + if baselines: + return baselines + + return [(None, baseline_filename)] + + def expected_filename(self, filename, suffix): + """Given a test name, returns an absolute path to its expected results. + + If no expected results are found in any of the searched directories, + the directory in which the test itself is located will be returned. + The return value is in the format appropriate for the platform + (e.g., "\\" for path separators on windows). + + Args: + filename: absolute filename to test file + suffix: file suffix of the expected results, including dot; e.g. '.txt' + or '.png'. This should not be None, but may be an empty string. + platform: the most-specific directory name to use to build the + search list of directories, e.g., 'chromium-win', or + 'chromium-mac-leopard' (we follow the WebKit format) + + This routine is generic but is implemented here to live alongside + the other baseline and filename manipulation routines. + """ + platform_dir, baseline_filename = self.expected_baselines( + filename, suffix)[0] + if platform_dir: + return self._filesystem.join(platform_dir, baseline_filename) + return self._filesystem.join(self.layout_tests_dir(), baseline_filename) + + def expected_checksum(self, test): + """Returns the checksum of the image we expect the test to produce, or None if it is a text-only test.""" + path = self.expected_filename(test, '.checksum') + if not self.path_exists(path): + return None + return self._filesystem.read_text_file(path) + + def expected_image(self, test): + """Returns the image we expect the test to produce.""" + path = self.expected_filename(test, '.png') + if not self.path_exists(path): + return None + return self._filesystem.read_binary_file(path) + + def expected_text(self, test): + """Returns the text output we expect the test to produce. + End-of-line characters are normalized to '\n'.""" + # FIXME: DRT output is actually utf-8, but since we don't decode the + # output from DRT (instead treating it as a binary string), we read the + # baselines as a binary string, too. + path = self.expected_filename(test, '.txt') + if not self.path_exists(path): + return '' + text = self._filesystem.read_binary_file(path) + return text.replace("\r\n", "\n") + + def filename_to_uri(self, filename): + """Convert a test file (which is an absolute path) to a URI.""" + LAYOUTTEST_HTTP_DIR = "http/tests/" + LAYOUTTEST_WEBSOCKET_DIR = "http/tests/websocket/tests/" + + relative_path = self.relative_test_filename(filename) + port = None + use_ssl = False + + if (relative_path.startswith(LAYOUTTEST_WEBSOCKET_DIR) + or relative_path.startswith(LAYOUTTEST_HTTP_DIR)): + relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):] + port = 8000 + + # Make http/tests/local run as local files. This is to mimic the + # logic in run-webkit-tests. + # + # TODO(dpranke): remove the media reference and the SSL reference? + if (port and not relative_path.startswith("local/") and + not relative_path.startswith("media/")): + if relative_path.startswith("ssl/"): + port += 443 + protocol = "https" + else: + protocol = "http" + return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path) + + return path.abspath_to_uri(os.path.abspath(filename)) + + def tests(self, paths): + """Return the list of tests found (relative to layout_tests_dir().""" + return test_files.find(self, paths) + + def test_dirs(self): + """Returns the list of top-level test directories. + + Used by --clobber-old-results.""" + layout_tests_dir = self.layout_tests_dir() + return filter(lambda x: self._filesystem.isdir(self._filesystem.join(layout_tests_dir, x)), + self._filesystem.listdir(layout_tests_dir)) + + def path_isdir(self, path): + """Return True if the path refers to a directory of tests.""" + # Used by test_expectations.py to apply rules to whole directories. + return self._filesystem.isdir(path) + + def path_exists(self, path): + """Return True if the path refers to an existing test or baseline.""" + # Used by test_expectations.py to determine if an entry refers to a + # valid test and by printing.py to determine if baselines exist. + return self._filesystem.exists(path) + + def driver_cmd_line(self): + """Prints the DRT command line that will be used.""" + driver = self.create_driver(0) + return driver.cmd_line() + + def update_baseline(self, path, data, encoding): + """Updates the baseline for a test. + + Args: + path: the actual path to use for baseline, not the path to + the test. This function is used to update either generic or + platform-specific baselines, but we can't infer which here. + data: contents of the baseline. + encoding: file encoding to use for the baseline. + """ + # FIXME: remove the encoding parameter in favor of text/binary + # functions. + if encoding is None: + self._filesystem.write_binary_file(path, data) + else: + self._filesystem.write_text_file(path, data) + + def uri_to_test_name(self, uri): + """Return the base layout test name for a given URI. + + This returns the test name for a given URI, e.g., if you passed in + "file:///src/LayoutTests/fast/html/keygen.html" it would return + "fast/html/keygen.html". + + """ + test = uri + if uri.startswith("file:///"): + prefix = path.abspath_to_uri(self.layout_tests_dir()) + "/" + return test[len(prefix):] + + if uri.startswith("http://127.0.0.1:8880/"): + # websocket tests + return test.replace('http://127.0.0.1:8880/', '') + + if uri.startswith("http://"): + # regular HTTP test + return test.replace('http://127.0.0.1:8000/', 'http/tests/') + + if uri.startswith("https://"): + return test.replace('https://127.0.0.1:8443/', 'http/tests/') + + raise NotImplementedError('unknown url type: %s' % uri) + + def layout_tests_dir(self): + """Return the absolute path to the top of the LayoutTests directory.""" + return self.path_from_webkit_base('LayoutTests') + + def skips_layout_test(self, test_name): + """Figures out if the givent test is being skipped or not. + + Test categories are handled as well.""" + for test_or_category in self.skipped_layout_tests(): + if test_or_category == test_name: + return True + category = self._filesystem.join(self.layout_tests_dir(), + test_or_category) + if (self._filesystem.isdir(category) and + test_name.startswith(test_or_category)): + return True + return False + + def maybe_make_directory(self, *path): + """Creates the specified directory if it doesn't already exist.""" + self._filesystem.maybe_make_directory(*path) + + def name(self): + """Return the name of the port (e.g., 'mac', 'chromium-win-xp'). + + Note that this is different from the test_platform_name(), which + may be different (e.g., 'win-xp' instead of 'chromium-win-xp'.""" + return self._name + + def get_option(self, name, default_value=None): + # FIXME: Eventually we should not have to do a test for + # hasattr(), and we should be able to just do + # self.options.value. See additional FIXME in the constructor. + if hasattr(self._options, name): + return getattr(self._options, name) + return default_value + + def set_option_default(self, name, default_value): + if not hasattr(self._options, name): + return setattr(self._options, name, default_value) + + def path_from_webkit_base(self, *comps): + """Returns the full path to path made by joining the top of the + WebKit source tree and the list of path components in |*comps|.""" + return self._config.path_from_webkit_base(*comps) + + def script_path(self, script_name): + return self._config.script_path(script_name) + + def path_to_test_expectations_file(self): + """Update the test expectations to the passed-in string. + + This is used by the rebaselining tool. Raises NotImplementedError + if the port does not use expectations files.""" + raise NotImplementedError('Port.path_to_test_expectations_file') + + def relative_test_filename(self, filename): + """Relative unix-style path for a filename under the LayoutTests + directory. Filenames outside the LayoutTests directory should raise + an error.""" + assert filename.startswith(self.layout_tests_dir()), "%s did not start with %s" % (filename, self.layout_tests_dir()) + return filename[len(self.layout_tests_dir()) + 1:] + + def results_directory(self): + """Absolute path to the place to store the test results.""" + raise NotImplementedError('Port.results_directory') + + def setup_test_run(self): + """Perform port-specific work at the beginning of a test run.""" + pass + + def setup_environ_for_server(self): + """Perform port-specific work at the beginning of a server launch. + + Returns: + Operating-system's environment. + """ + return os.environ.copy() + + def show_results_html_file(self, results_filename): + """This routine should display the HTML file pointed at by + results_filename in a users' browser.""" + return self._user.open_url(results_filename) + + def create_driver(self, worker_number): + """Return a newly created base.Driver subclass for starting/stopping + the test driver.""" + raise NotImplementedError('Port.create_driver') + + def start_helper(self): + """If a port needs to reconfigure graphics settings or do other + things to ensure a known test configuration, it should override this + method.""" + pass + + def start_http_server(self): + """Start a web server if it is available. Do nothing if + it isn't. This routine is allowed to (and may) fail if a server + is already running.""" + if self.get_option('use_apache'): + self._http_server = apache_http_server.LayoutTestApacheHttpd(self, + self.get_option('results_directory')) + else: + self._http_server = http_server.Lighttpd(self, + self.get_option('results_directory')) + self._http_server.start() + + def start_websocket_server(self): + """Start a websocket server if it is available. Do nothing if + it isn't. This routine is allowed to (and may) fail if a server + is already running.""" + self._websocket_server = websocket_server.PyWebSocket(self, + self.get_option('results_directory')) + self._websocket_server.start() + + def acquire_http_lock(self): + self._http_lock = http_lock.HttpLock(None) + self._http_lock.wait_for_httpd_lock() + + def stop_helper(self): + """Shut down the test helper if it is running. Do nothing if + it isn't, or it isn't available. If a port overrides start_helper() + it must override this routine as well.""" + pass + + def stop_http_server(self): + """Shut down the http server if it is running. Do nothing if + it isn't, or it isn't available.""" + if self._http_server: + self._http_server.stop() + + def stop_websocket_server(self): + """Shut down the websocket server if it is running. Do nothing if + it isn't, or it isn't available.""" + if self._websocket_server: + self._websocket_server.stop() + + def release_http_lock(self): + if self._http_lock: + self._http_lock.cleanup_http_lock() + + def test_expectations(self): + """Returns the test expectations for this port. + + Basically this string should contain the equivalent of a + test_expectations file. See test_expectations.py for more details.""" + raise NotImplementedError('Port.test_expectations') + + def test_expectations_overrides(self): + """Returns an optional set of overrides for the test_expectations. + + This is used by ports that have code in two repositories, and where + it is possible that you might need "downstream" expectations that + temporarily override the "upstream" expectations until the port can + sync up the two repos.""" + return None + + def test_base_platform_names(self): + """Return a list of the 'base' platforms on your port. The base + platforms represent different architectures, operating systems, + or implementations (as opposed to different versions of a single + platform). For example, 'mac' and 'win' might be different base + platforms, wherease 'mac-tiger' and 'mac-leopard' might be + different platforms. This routine is used by the rebaselining tool + and the dashboards, and the strings correspond to the identifiers + in your test expectations (*not* necessarily the platform names + themselves).""" + raise NotImplementedError('Port.base_test_platforms') + + def test_platform_name(self): + """Returns the string that corresponds to the given platform name + in the test expectations. This may be the same as name(), or it + may be different. For example, chromium returns 'mac' for + 'chromium-mac'.""" + raise NotImplementedError('Port.test_platform_name') + + def test_platforms(self): + """Returns the list of test platform identifiers as used in the + test_expectations and on dashboards, the rebaselining tool, etc. + + Note that this is not necessarily the same as the list of ports, + which must be globally unique (e.g., both 'chromium-mac' and 'mac' + might return 'mac' as a test_platform name'.""" + raise NotImplementedError('Port.platforms') + + def test_platform_name_to_name(self, test_platform_name): + """Returns the Port platform name that corresponds to the name as + referenced in the expectations file. E.g., "mac" returns + "chromium-mac" on the Chromium ports.""" + raise NotImplementedError('Port.test_platform_name_to_name') + + def version(self): + """Returns a string indicating the version of a given platform, e.g. + '-leopard' or '-xp'. + + This is used to help identify the exact port when parsing test + expectations, determining search paths, and logging information.""" + raise NotImplementedError('Port.version') + + def test_repository_paths(self): + """Returns a list of (repository_name, repository_path) tuples + of its depending code base. By default it returns a list that only + contains a ('webkit', <webkitRepossitoryPath>) tuple. + """ + return [('webkit', self.layout_tests_dir())] + + + _WDIFF_DEL = '##WDIFF_DEL##' + _WDIFF_ADD = '##WDIFF_ADD##' + _WDIFF_END = '##WDIFF_END##' + + def _format_wdiff_output_as_html(self, wdiff): + wdiff = cgi.escape(wdiff) + wdiff = wdiff.replace(self._WDIFF_DEL, "<span class=del>") + wdiff = wdiff.replace(self._WDIFF_ADD, "<span class=add>") + wdiff = wdiff.replace(self._WDIFF_END, "</span>") + html = "<head><style>.del { background: #faa; } " + html += ".add { background: #afa; }</style></head>" + html += "<pre>%s</pre>" % wdiff + return html + + def _wdiff_command(self, actual_filename, expected_filename): + executable = self._path_to_wdiff() + return [executable, + "--start-delete=%s" % self._WDIFF_DEL, + "--end-delete=%s" % self._WDIFF_END, + "--start-insert=%s" % self._WDIFF_ADD, + "--end-insert=%s" % self._WDIFF_END, + actual_filename, + expected_filename] + + @staticmethod + def _handle_wdiff_error(script_error): + # Exit 1 means the files differed, any other exit code is an error. + if script_error.exit_code != 1: + raise script_error + + def _run_wdiff(self, actual_filename, expected_filename): + """Runs wdiff and may throw exceptions. + This is mostly a hook for unit testing.""" + # Diffs are treated as binary as they may include multiple files + # with conflicting encodings. Thus we do not decode the output. + command = self._wdiff_command(actual_filename, expected_filename) + wdiff = self._executive.run_command(command, decode_output=False, + error_handler=self._handle_wdiff_error) + return self._format_wdiff_output_as_html(wdiff) + + def wdiff_text(self, actual_filename, expected_filename): + """Returns a string of HTML indicating the word-level diff of the + contents of the two filenames. Returns an empty string if word-level + diffing isn't available.""" + if not self._wdiff_available: + return "" + try: + # It's possible to raise a ScriptError we pass wdiff invalid paths. + return self._run_wdiff(actual_filename, expected_filename) + except OSError, e: + if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]: + # Silently ignore cases where wdiff is missing. + self._wdiff_available = False + return "" + raise + + # This is a class variable so we can test error output easily. + _pretty_patch_error_html = "Failed to run PrettyPatch, see error log." + + def pretty_patch_text(self, diff_path): + if not self._pretty_patch_available: + return self._pretty_patch_error_html + command = ("ruby", "-I", os.path.dirname(self._pretty_patch_path), + self._pretty_patch_path, diff_path) + try: + # Diffs are treated as binary (we pass decode_output=False) as they + # may contain multiple files of conflicting encodings. + return self._executive.run_command(command, decode_output=False) + except OSError, e: + # If the system is missing ruby log the error and stop trying. + self._pretty_patch_available = False + _log.error("Failed to run PrettyPatch (%s): %s" % (command, e)) + return self._pretty_patch_error_html + except ScriptError, e: + # If ruby failed to run for some reason, log the command + # output and stop trying. + self._pretty_patch_available = False + _log.error("Failed to run PrettyPatch (%s):\n%s" % (command, + e.message_with_output())) + return self._pretty_patch_error_html + + def default_configuration(self): + return self._config.default_configuration() + + # + # PROTECTED ROUTINES + # + # The routines below should only be called by routines in this class + # or any of its subclasses. + # + def _webkit_build_directory(self, args): + return self._config.build_directory(args[0]) + + def _path_to_apache(self): + """Returns the full path to the apache binary. + + This is needed only by ports that use the apache_http_server module.""" + raise NotImplementedError('Port.path_to_apache') + + def _path_to_apache_config_file(self): + """Returns the full path to the apache binary. + + This is needed only by ports that use the apache_http_server module.""" + raise NotImplementedError('Port.path_to_apache_config_file') + + def _path_to_driver(self, configuration=None): + """Returns the full path to the test driver (DumpRenderTree).""" + raise NotImplementedError('Port._path_to_driver') + + def _path_to_webcore_library(self): + """Returns the full path to a built copy of WebCore.""" + raise NotImplementedError('Port.path_to_webcore_library') + + def _path_to_helper(self): + """Returns the full path to the layout_test_helper binary, which + is used to help configure the system for the test run, or None + if no helper is needed. + + This is likely only used by start/stop_helper().""" + raise NotImplementedError('Port._path_to_helper') + + def _path_to_image_diff(self): + """Returns the full path to the image_diff binary, or None if it + is not available. + + This is likely used only by diff_image()""" + raise NotImplementedError('Port.path_to_image_diff') + + def _path_to_lighttpd(self): + """Returns the path to the LigHTTPd binary. + + This is needed only by ports that use the http_server.py module.""" + raise NotImplementedError('Port._path_to_lighttpd') + + def _path_to_lighttpd_modules(self): + """Returns the path to the LigHTTPd modules directory. + + This is needed only by ports that use the http_server.py module.""" + raise NotImplementedError('Port._path_to_lighttpd_modules') + + def _path_to_lighttpd_php(self): + """Returns the path to the LigHTTPd PHP executable. + + This is needed only by ports that use the http_server.py module.""" + raise NotImplementedError('Port._path_to_lighttpd_php') + + def _path_to_wdiff(self): + """Returns the full path to the wdiff binary, or None if it is + not available. + + This is likely used only by wdiff_text()""" + raise NotImplementedError('Port._path_to_wdiff') + + def _shut_down_http_server(self, pid): + """Forcefully and synchronously kills the web server. + + This routine should only be called from http_server.py or its + subclasses.""" + raise NotImplementedError('Port._shut_down_http_server') + + def _webkit_baseline_path(self, platform): + """Return the full path to the top of the baseline tree for a + given platform.""" + return self._filesystem.join(self.layout_tests_dir(), 'platform', + platform) + + +class Driver: + """Abstract interface for the DumpRenderTree interface.""" + + def __init__(self, port, worker_number): + """Initialize a Driver to subsequently run tests. + + Typically this routine will spawn DumpRenderTree in a config + ready for subsequent input. + + port - reference back to the port object. + worker_number - identifier for a particular worker/driver instance + """ + raise NotImplementedError('Driver.__init__') + + def run_test(self, test_input): + """Run a single test and return the results. + + Note that it is okay if a test times out or crashes and leaves + the driver in an indeterminate state. The upper layers of the program + are responsible for cleaning up and ensuring things are okay. + + Args: + test_input: a TestInput object + + Returns a TestOutput object. + """ + raise NotImplementedError('Driver.run_test') + + # FIXME: This is static so we can test it w/o creating a Base instance. + @classmethod + def _command_wrapper(cls, wrapper_option): + # Hook for injecting valgrind or other runtime instrumentation, + # used by e.g. tools/valgrind/valgrind_tests.py. + wrapper = [] + browser_wrapper = os.environ.get("BROWSER_WRAPPER", None) + if browser_wrapper: + # FIXME: There seems to be no reason to use BROWSER_WRAPPER over --wrapper. + # Remove this code any time after the date listed below. + _log.error("BROWSER_WRAPPER is deprecated, please use --wrapper instead.") + _log.error("BROWSER_WRAPPER will be removed any time after June 1st 2010 and your scripts will break.") + wrapper += [browser_wrapper] + + if wrapper_option: + wrapper += shlex.split(wrapper_option) + return wrapper + + def poll(self): + """Returns None if the Driver is still running. Returns the returncode + if it has exited.""" + raise NotImplementedError('Driver.poll') + + def stop(self): + raise NotImplementedError('Driver.stop') diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py new file mode 100644 index 0000000..8d586e3 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py @@ -0,0 +1,315 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import optparse +import os +import sys +import tempfile +import unittest + +from webkitpy.common.system.executive import Executive, ScriptError +from webkitpy.common.system import executive_mock +from webkitpy.common.system import filesystem +from webkitpy.common.system import outputcapture +from webkitpy.common.system.path import abspath_to_uri +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool import mocktool + +import base +import config +import config_mock + + +class PortTest(unittest.TestCase): + def test_format_wdiff_output_as_html(self): + output = "OUTPUT %s %s %s" % (base.Port._WDIFF_DEL, base.Port._WDIFF_ADD, base.Port._WDIFF_END) + html = base.Port()._format_wdiff_output_as_html(output) + expected_html = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre>OUTPUT <span class=del> <span class=add> </span></pre>" + self.assertEqual(html, expected_html) + + def test_wdiff_command(self): + port = base.Port() + port._path_to_wdiff = lambda: "/path/to/wdiff" + command = port._wdiff_command("/actual/path", "/expected/path") + expected_command = [ + "/path/to/wdiff", + "--start-delete=##WDIFF_DEL##", + "--end-delete=##WDIFF_END##", + "--start-insert=##WDIFF_ADD##", + "--end-insert=##WDIFF_END##", + "/actual/path", + "/expected/path", + ] + self.assertEqual(command, expected_command) + + def _file_with_contents(self, contents, encoding="utf-8"): + new_file = tempfile.NamedTemporaryFile() + new_file.write(contents.encode(encoding)) + new_file.flush() + return new_file + + def test_pretty_patch_os_error(self): + port = base.Port(executive=executive_mock.MockExecutive2(exception=OSError)) + oc = outputcapture.OutputCapture() + oc.capture_output() + self.assertEqual(port.pretty_patch_text("patch.txt"), + port._pretty_patch_error_html) + + # This tests repeated calls to make sure we cache the result. + self.assertEqual(port.pretty_patch_text("patch.txt"), + port._pretty_patch_error_html) + oc.restore_output() + + def test_pretty_patch_script_error(self): + # FIXME: This is some ugly white-box test hacking ... + base._pretty_patch_available = True + port = base.Port(executive=executive_mock.MockExecutive2(exception=ScriptError)) + self.assertEqual(port.pretty_patch_text("patch.txt"), + port._pretty_patch_error_html) + + # This tests repeated calls to make sure we cache the result. + self.assertEqual(port.pretty_patch_text("patch.txt"), + port._pretty_patch_error_html) + + def test_run_wdiff(self): + executive = Executive() + # This may fail on some systems. We could ask the port + # object for the wdiff path, but since we don't know what + # port object to use, this is sufficient for now. + try: + wdiff_path = executive.run_command(["which", "wdiff"]).rstrip() + except Exception, e: + wdiff_path = None + + port = base.Port() + port._path_to_wdiff = lambda: wdiff_path + + if wdiff_path: + # "with tempfile.NamedTemporaryFile() as actual" does not seem to work in Python 2.5 + actual = self._file_with_contents(u"foo") + expected = self._file_with_contents(u"bar") + wdiff = port._run_wdiff(actual.name, expected.name) + expected_wdiff = "<head><style>.del { background: #faa; } .add { background: #afa; }</style></head><pre><span class=del>foo</span><span class=add>bar</span></pre>" + self.assertEqual(wdiff, expected_wdiff) + # Running the full wdiff_text method should give the same result. + port._wdiff_available = True # In case it's somehow already disabled. + wdiff = port.wdiff_text(actual.name, expected.name) + self.assertEqual(wdiff, expected_wdiff) + # wdiff should still be available after running wdiff_text with a valid diff. + self.assertTrue(port._wdiff_available) + actual.close() + expected.close() + + # Bogus paths should raise a script error. + self.assertRaises(ScriptError, port._run_wdiff, "/does/not/exist", "/does/not/exist2") + self.assertRaises(ScriptError, port.wdiff_text, "/does/not/exist", "/does/not/exist2") + # wdiff will still be available after running wdiff_text with invalid paths. + self.assertTrue(port._wdiff_available) + base._wdiff_available = True + + # If wdiff does not exist _run_wdiff should throw an OSError. + port._path_to_wdiff = lambda: "/invalid/path/to/wdiff" + self.assertRaises(OSError, port._run_wdiff, "foo", "bar") + + # wdiff_text should not throw an error if wdiff does not exist. + self.assertEqual(port.wdiff_text("foo", "bar"), "") + # However wdiff should not be available after running wdiff_text if wdiff is missing. + self.assertFalse(port._wdiff_available) + + def test_diff_text(self): + port = base.Port() + # Make sure that we don't run into decoding exceptions when the + # filenames are unicode, with regular or malformed input (expected or + # actual input is always raw bytes, not unicode). + port.diff_text('exp', 'act', 'exp.txt', 'act.txt') + port.diff_text('exp', 'act', u'exp.txt', 'act.txt') + port.diff_text('exp', 'act', u'a\xac\u1234\u20ac\U00008000', 'act.txt') + + port.diff_text('exp' + chr(255), 'act', 'exp.txt', 'act.txt') + port.diff_text('exp' + chr(255), 'act', u'exp.txt', 'act.txt') + + # Though expected and actual files should always be read in with no + # encoding (and be stored as str objects), test unicode inputs just to + # be safe. + port.diff_text(u'exp', 'act', 'exp.txt', 'act.txt') + port.diff_text( + u'a\xac\u1234\u20ac\U00008000', 'act', 'exp.txt', 'act.txt') + + # And make sure we actually get diff output. + diff = port.diff_text('foo', 'bar', 'exp.txt', 'act.txt') + self.assertTrue('foo' in diff) + self.assertTrue('bar' in diff) + self.assertTrue('exp.txt' in diff) + self.assertTrue('act.txt' in diff) + self.assertFalse('nosuchthing' in diff) + + def test_default_configuration_notfound(self): + # Test that we delegate to the config object properly. + port = base.Port(config=config_mock.MockConfig(default_configuration='default')) + self.assertEqual(port.default_configuration(), 'default') + + def test_layout_tests_skipping(self): + port = base.Port() + port.skipped_layout_tests = lambda: ['foo/bar.html', 'media'] + self.assertTrue(port.skips_layout_test('foo/bar.html')) + self.assertTrue(port.skips_layout_test('media/video-zoom.html')) + self.assertFalse(port.skips_layout_test('foo/foo.html')) + + def test_setup_test_run(self): + port = base.Port() + # This routine is a no-op. We just test it for coverage. + port.setup_test_run() + + def test_test_dirs(self): + port = base.Port() + dirs = port.test_dirs() + self.assertTrue('canvas' in dirs) + self.assertTrue('css2.1' in dirs) + + def test_filename_to_uri(self): + port = base.Port() + layout_test_dir = port.layout_tests_dir() + test_file = os.path.join(layout_test_dir, "foo", "bar.html") + + # On Windows, absolute paths are of the form "c:\foo.txt". However, + # all current browsers (except for Opera) normalize file URLs by + # prepending an additional "/" as if the absolute path was + # "/c:/foo.txt". This means that all file URLs end up with "file:///" + # at the beginning. + if sys.platform == 'win32': + prefix = "file:///" + path = test_file.replace("\\", "/") + else: + prefix = "file://" + path = test_file + + self.assertEqual(port.filename_to_uri(test_file), + abspath_to_uri(test_file)) + + def test_get_option__set(self): + options, args = optparse.OptionParser().parse_args([]) + options.foo = 'bar' + port = base.Port(options=options) + self.assertEqual(port.get_option('foo'), 'bar') + + def test_get_option__unset(self): + port = base.Port() + self.assertEqual(port.get_option('foo'), None) + + def test_get_option__default(self): + port = base.Port() + self.assertEqual(port.get_option('foo', 'bar'), 'bar') + + def test_set_option_default__unset(self): + port = base.Port() + port.set_option_default('foo', 'bar') + self.assertEqual(port.get_option('foo'), 'bar') + + def test_set_option_default__set(self): + options, args = optparse.OptionParser().parse_args([]) + options.foo = 'bar' + port = base.Port(options=options) + # This call should have no effect. + port.set_option_default('foo', 'new_bar') + self.assertEqual(port.get_option('foo'), 'bar') + + def test_name__unset(self): + port = base.Port() + self.assertEqual(port.name(), None) + + def test_name__set(self): + port = base.Port(port_name='foo') + self.assertEqual(port.name(), 'foo') + + +class VirtualTest(unittest.TestCase): + """Tests that various methods expected to be virtual are.""" + def assertVirtual(self, method, *args, **kwargs): + self.assertRaises(NotImplementedError, method, *args, **kwargs) + + def test_virtual_methods(self): + port = base.Port() + self.assertVirtual(port.baseline_path) + self.assertVirtual(port.baseline_search_path) + self.assertVirtual(port.check_build, None) + self.assertVirtual(port.check_image_diff) + self.assertVirtual(port.create_driver, 0) + self.assertVirtual(port.diff_image, None, None) + self.assertVirtual(port.path_to_test_expectations_file) + self.assertVirtual(port.test_platform_name) + self.assertVirtual(port.results_directory) + self.assertVirtual(port.test_expectations) + self.assertVirtual(port.test_base_platform_names) + self.assertVirtual(port.test_platform_name) + self.assertVirtual(port.test_platforms) + self.assertVirtual(port.test_platform_name_to_name, None) + self.assertVirtual(port.version) + self.assertVirtual(port._path_to_apache) + self.assertVirtual(port._path_to_apache_config_file) + self.assertVirtual(port._path_to_driver) + self.assertVirtual(port._path_to_helper) + self.assertVirtual(port._path_to_image_diff) + self.assertVirtual(port._path_to_lighttpd) + self.assertVirtual(port._path_to_lighttpd_modules) + self.assertVirtual(port._path_to_lighttpd_php) + self.assertVirtual(port._path_to_wdiff) + self.assertVirtual(port._shut_down_http_server, None) + + def test_virtual_driver_method(self): + self.assertRaises(NotImplementedError, base.Driver, base.Port(), + 0) + + def test_virtual_driver_methods(self): + class VirtualDriver(base.Driver): + def __init__(self): + pass + + driver = VirtualDriver() + self.assertVirtual(driver.run_test, None) + self.assertVirtual(driver.poll) + self.assertVirtual(driver.stop) + + +class DriverTest(unittest.TestCase): + + def _assert_wrapper(self, wrapper_string, expected_wrapper): + wrapper = base.Driver._command_wrapper(wrapper_string) + self.assertEqual(wrapper, expected_wrapper) + + def test_command_wrapper(self): + self._assert_wrapper(None, []) + self._assert_wrapper("valgrind", ["valgrind"]) + + # Validate that shlex works as expected. + command_with_spaces = "valgrind --smc-check=\"check with spaces!\" --foo" + expected_parse = ["valgrind", "--smc-check=check with spaces!", "--foo"] + self._assert_wrapper(command_with_spaces, expected_parse) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py new file mode 100644 index 0000000..012e9cc --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py @@ -0,0 +1,546 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Chromium implementations of the Port interface.""" + +from __future__ import with_statement + +import codecs +import errno +import logging +import os +import re +import shutil +import signal +import subprocess +import sys +import tempfile +import time +import webbrowser + +from webkitpy.common.system.path import cygpath +from webkitpy.layout_tests.layout_package import test_expectations +from webkitpy.layout_tests.layout_package import test_output + +import base +import http_server + +# Chromium DRT on OSX uses WebKitDriver. +if sys.platform == 'darwin': + import webkit + +import websocket_server + +_log = logging.getLogger("webkitpy.layout_tests.port.chromium") + + +# FIXME: This function doesn't belong in this package. +def check_file_exists(path_to_file, file_description, override_step=None, + logging=True): + """Verify the file is present where expected or log an error. + + Args: + file_name: The (human friendly) name or description of the file + you're looking for (e.g., "HTTP Server"). Used for error logging. + override_step: An optional string to be logged if the check fails. + logging: Whether or not log the error messages.""" + if not os.path.exists(path_to_file): + if logging: + _log.error('Unable to find %s' % file_description) + _log.error(' at %s' % path_to_file) + if override_step: + _log.error(' %s' % override_step) + _log.error('') + return False + return True + + +class ChromiumPort(base.Port): + """Abstract base class for Chromium implementations of the Port class.""" + + def __init__(self, **kwargs): + base.Port.__init__(self, **kwargs) + self._chromium_base_dir = None + + def baseline_path(self): + return self._webkit_baseline_path(self._name) + + def check_build(self, needs_http): + result = True + + dump_render_tree_binary_path = self._path_to_driver() + result = check_file_exists(dump_render_tree_binary_path, + 'test driver') and result + if result and self.get_option('build'): + result = self._check_driver_build_up_to_date( + self.get_option('configuration')) + else: + _log.error('') + + helper_path = self._path_to_helper() + if helper_path: + result = check_file_exists(helper_path, + 'layout test helper') and result + + if self.get_option('pixel_tests'): + result = self.check_image_diff( + 'To override, invoke with --no-pixel-tests') and result + + # It's okay if pretty patch isn't available, but we will at + # least log a message. + self.check_pretty_patch() + + return result + + def check_sys_deps(self, needs_http): + cmd = [self._path_to_driver(), '--check-layout-test-sys-deps'] + if self._executive.run_command(cmd, return_exit_code=True): + _log.error('System dependencies check failed.') + _log.error('To override, invoke with --nocheck-sys-deps') + _log.error('') + return False + return True + + def check_image_diff(self, override_step=None, logging=True): + image_diff_path = self._path_to_image_diff() + return check_file_exists(image_diff_path, 'image diff exe', + override_step, logging) + + def diff_image(self, expected_contents, actual_contents, + diff_filename=None): + executable = self._path_to_image_diff() + + tempdir = tempfile.mkdtemp() + expected_filename = os.path.join(tempdir, "expected.png") + with open(expected_filename, 'w+b') as file: + file.write(expected_contents) + actual_filename = os.path.join(tempdir, "actual.png") + with open(actual_filename, 'w+b') as file: + file.write(actual_contents) + + if diff_filename: + cmd = [executable, '--diff', expected_filename, + actual_filename, diff_filename] + else: + cmd = [executable, expected_filename, actual_filename] + + result = True + try: + exit_code = self._executive.run_command(cmd, return_exit_code=True) + if exit_code == 0: + # The images are the same. + result = False + elif exit_code != 1: + _log.error("image diff returned an exit code of " + + str(exit_code)) + # Returning False here causes the script to think that we + # successfully created the diff even though we didn't. If + # we return True, we think that the images match but the hashes + # don't match. + # FIXME: Figure out why image_diff returns other values. + result = False + except OSError, e: + if e.errno == errno.ENOENT or e.errno == errno.EACCES: + _compare_available = False + else: + raise e + finally: + shutil.rmtree(tempdir, ignore_errors=True) + return result + + def driver_name(self): + if self._options.use_test_shell: + return "test_shell" + return "DumpRenderTree" + + def path_from_chromium_base(self, *comps): + """Returns the full path to path made by joining the top of the + Chromium source tree and the list of path components in |*comps|.""" + if not self._chromium_base_dir: + abspath = os.path.abspath(__file__) + offset = abspath.find('third_party') + if offset == -1: + self._chromium_base_dir = os.path.join( + abspath[0:abspath.find('Tools')], + 'WebKit', 'chromium') + else: + self._chromium_base_dir = abspath[0:offset] + return os.path.join(self._chromium_base_dir, *comps) + + def path_to_test_expectations_file(self): + return self.path_from_webkit_base('LayoutTests', 'platform', + 'chromium', 'test_expectations.txt') + + def results_directory(self): + try: + return self.path_from_chromium_base('webkit', + self.get_option('configuration'), + self.get_option('results_directory')) + except AssertionError: + return self._build_path(self.get_option('configuration'), + self.get_option('results_directory')) + + def setup_test_run(self): + # Delete the disk cache if any to ensure a clean test run. + dump_render_tree_binary_path = self._path_to_driver() + cachedir = os.path.split(dump_render_tree_binary_path)[0] + cachedir = os.path.join(cachedir, "cache") + if os.path.exists(cachedir): + shutil.rmtree(cachedir) + + def create_driver(self, worker_number): + """Starts a new Driver and returns a handle to it.""" + if not self.get_option('use_test_shell') and sys.platform == 'darwin': + return webkit.WebKitDriver(self, worker_number) + return ChromiumDriver(self, worker_number) + + def start_helper(self): + helper_path = self._path_to_helper() + if helper_path: + _log.debug("Starting layout helper %s" % helper_path) + # Note: Not thread safe: http://bugs.python.org/issue2320 + self._helper = subprocess.Popen([helper_path], + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None) + is_ready = self._helper.stdout.readline() + if not is_ready.startswith('ready'): + _log.error("layout_test_helper failed to be ready") + + def stop_helper(self): + if self._helper: + _log.debug("Stopping layout test helper") + self._helper.stdin.write("x\n") + self._helper.stdin.close() + # wait() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + self._helper.wait() + + def test_base_platform_names(self): + return ('linux', 'mac', 'win') + + def test_expectations(self): + """Returns the test expectations for this port. + + Basically this string should contain the equivalent of a + test_expectations file. See test_expectations.py for more details.""" + expectations_path = self.path_to_test_expectations_file() + with codecs.open(expectations_path, "r", "utf-8") as file: + return file.read() + + def test_expectations_overrides(self): + try: + overrides_path = self.path_from_chromium_base('webkit', 'tools', + 'layout_tests', 'test_expectations.txt') + except AssertionError: + return None + if not os.path.exists(overrides_path): + return None + with codecs.open(overrides_path, "r", "utf-8") as file: + return file.read() + + def skipped_layout_tests(self, extra_test_files=None): + expectations_str = self.test_expectations() + overrides_str = self.test_expectations_overrides() + test_platform_name = self.test_platform_name() + is_debug_mode = False + + all_test_files = self.tests([]) + if extra_test_files: + all_test_files.update(extra_test_files) + + expectations = test_expectations.TestExpectations( + self, all_test_files, expectations_str, test_platform_name, + is_debug_mode, is_lint_mode=False, overrides=overrides_str) + tests_dir = self.layout_tests_dir() + return [self.relative_test_filename(test) + for test in expectations.get_tests_with_result_type(test_expectations.SKIP)] + + def test_platform_names(self): + return self.test_base_platform_names() + ('win-xp', + 'win-vista', 'win-7') + + def test_platform_name_to_name(self, test_platform_name): + if test_platform_name in self.test_platform_names(): + return 'chromium-' + test_platform_name + raise ValueError('Unsupported test_platform_name: %s' % + test_platform_name) + + def test_repository_paths(self): + # Note: for JSON file's backward-compatibility we use 'chrome' rather + # than 'chromium' here. + repos = super(ChromiumPort, self).test_repository_paths() + repos.append(('chrome', self.path_from_chromium_base())) + return repos + + # + # PROTECTED METHODS + # + # These routines should only be called by other methods in this file + # or any subclasses. + # + + def _check_driver_build_up_to_date(self, configuration): + if configuration in ('Debug', 'Release'): + try: + debug_path = self._path_to_driver('Debug') + release_path = self._path_to_driver('Release') + + debug_mtime = os.stat(debug_path).st_mtime + release_mtime = os.stat(release_path).st_mtime + + if (debug_mtime > release_mtime and configuration == 'Release' or + release_mtime > debug_mtime and configuration == 'Debug'): + _log.warning('You are not running the most ' + 'recent DumpRenderTree binary. You need to ' + 'pass --debug or not to select between ' + 'Debug and Release.') + _log.warning('') + # This will fail if we don't have both a debug and release binary. + # That's fine because, in this case, we must already be running the + # most up-to-date one. + except OSError: + pass + return True + + def _chromium_baseline_path(self, platform): + if platform is None: + platform = self.name() + return self.path_from_webkit_base('LayoutTests', 'platform', platform) + + def _convert_path(self, path): + """Handles filename conversion for subprocess command line args.""" + # See note above in diff_image() for why we need this. + if sys.platform == 'cygwin': + return cygpath(path) + return path + + def _path_to_image_diff(self): + binary_name = 'ImageDiff' + if self.get_option('use_test_shell'): + binary_name = 'image_diff' + return self._build_path(self.get_option('configuration'), binary_name) + + +class ChromiumDriver(base.Driver): + """Abstract interface for test_shell.""" + + def __init__(self, port, worker_number): + self._port = port + self._worker_number = worker_number + self._image_path = None + if self._port.get_option('pixel_tests'): + self._image_path = os.path.join( + self._port.get_option('results_directory'), + 'png_result%s.png' % self._worker_number) + + def cmd_line(self): + cmd = self._command_wrapper(self._port.get_option('wrapper')) + cmd.append(self._port._path_to_driver()) + if self._port.get_option('pixel_tests'): + # See note above in diff_image() for why we need _convert_path(). + cmd.append("--pixel-tests=" + + self._port._convert_path(self._image_path)) + + if self._port.get_option('use_test_shell'): + cmd.append('--layout-tests') + else: + cmd.append('--test-shell') + + if self._port.get_option('startup_dialog'): + cmd.append('--testshell-startup-dialog') + + if self._port.get_option('gp_fault_error_box'): + cmd.append('--gp-fault-error-box') + + if self._port.get_option('js_flags') is not None: + cmd.append('--js-flags="' + self._port.get_option('js_flags') + '"') + + if self._port.get_option('multiple_loads') > 0: + cmd.append('--multiple-loads=' + str(self._port.get_option('multiple_loads'))) + + # test_shell does not support accelerated compositing. + if not self._port.get_option("use_test_shell"): + if self._port.get_option('accelerated_compositing'): + cmd.append('--enable-accelerated-compositing') + if self._port.get_option('accelerated_2d_canvas'): + cmd.append('--enable-accelerated-2d-canvas') + return cmd + + def start(self): + # FIXME: Should be an error to call this method twice. + cmd = self.cmd_line() + + # We need to pass close_fds=True to work around Python bug #2320 + # (otherwise we can hang when we kill DumpRenderTree when we are running + # multiple threads). See http://bugs.python.org/issue2320 . + # Note that close_fds isn't supported on Windows, but this bug only + # shows up on Mac and Linux. + close_flag = sys.platform not in ('win32', 'cygwin') + self._proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + close_fds=close_flag) + + def poll(self): + # poll() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + return self._proc.poll() + + def _write_command_and_read_line(self, input=None): + """Returns a tuple: (line, did_crash)""" + try: + if input: + if isinstance(input, unicode): + # TestShell expects utf-8 + input = input.encode("utf-8") + self._proc.stdin.write(input) + # DumpRenderTree text output is always UTF-8. However some tests + # (e.g. webarchive) may spit out binary data instead of text so we + # don't bother to decode the output (for either DRT or test_shell). + line = self._proc.stdout.readline() + # We could assert() here that line correctly decodes as UTF-8. + return (line, False) + except IOError, e: + _log.error("IOError communicating w/ test_shell: " + str(e)) + return (None, True) + + def _test_shell_command(self, uri, timeoutms, checksum): + cmd = uri + if timeoutms: + cmd += ' ' + str(timeoutms) + if checksum: + cmd += ' ' + checksum + cmd += "\n" + return cmd + + def _output_image(self): + """Returns the image output which driver generated.""" + png_path = self._image_path + if png_path and os.path.isfile(png_path): + with open(png_path, 'rb') as image_file: + return image_file.read() + else: + return None + + def _output_image_with_retry(self): + # Retry a few more times because open() sometimes fails on Windows, + # raising "IOError: [Errno 13] Permission denied:" + retry_num = 50 + timeout_seconds = 5.0 + for i in range(retry_num): + try: + return self._output_image() + except IOError, e: + if e.errno == errno.EACCES: + time.sleep(timeout_seconds / retry_num) + else: + raise e + return self._output_image() + + def run_test(self, test_input): + output = [] + error = [] + crash = False + timeout = False + actual_uri = None + actual_checksum = None + + start_time = time.time() + + uri = self._port.filename_to_uri(test_input.filename) + cmd = self._test_shell_command(uri, test_input.timeout, + test_input.image_hash) + (line, crash) = self._write_command_and_read_line(input=cmd) + + while not crash and line.rstrip() != "#EOF": + # Make sure we haven't crashed. + if line == '' and self.poll() is not None: + # This is hex code 0xc000001d, which is used for abrupt + # termination. This happens if we hit ctrl+c from the prompt + # and we happen to be waiting on test_shell. + # sdoyon: Not sure for which OS and in what circumstances the + # above code is valid. What works for me under Linux to detect + # ctrl+c is for the subprocess returncode to be negative + # SIGINT. And that agrees with the subprocess documentation. + if (-1073741510 == self._proc.returncode or + - signal.SIGINT == self._proc.returncode): + raise KeyboardInterrupt + crash = True + break + + # Don't include #URL lines in our output + if line.startswith("#URL:"): + actual_uri = line.rstrip()[5:] + if uri != actual_uri: + # GURL capitalizes the drive letter of a file URL. + if (not re.search("^file:///[a-z]:", uri) or + uri.lower() != actual_uri.lower()): + _log.fatal("Test got out of sync:\n|%s|\n|%s|" % + (uri, actual_uri)) + raise AssertionError("test out of sync") + elif line.startswith("#MD5:"): + actual_checksum = line.rstrip()[5:] + elif line.startswith("#TEST_TIMED_OUT"): + timeout = True + # Test timed out, but we still need to read until #EOF. + elif actual_uri: + output.append(line) + else: + error.append(line) + + (line, crash) = self._write_command_and_read_line(input=None) + + run_time = time.time() - start_time + return test_output.TestOutput( + ''.join(output), self._output_image_with_retry(), actual_checksum, + crash, run_time, timeout, ''.join(error)) + + def stop(self): + if self._proc: + self._proc.stdin.close() + self._proc.stdout.close() + if self._proc.stderr: + self._proc.stderr.close() + if sys.platform not in ('win32', 'cygwin'): + # Closing stdin/stdout/stderr hangs sometimes on OS X, + # (see __init__(), above), and anyway we don't want to hang + # the harness if test_shell is buggy, so we wait a couple + # seconds to give test_shell a chance to clean up, but then + # force-kill the process if necessary. + KILL_TIMEOUT = 3.0 + timeout = time.time() + KILL_TIMEOUT + # poll() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + while self._proc.poll() is None and time.time() < timeout: + time.sleep(0.1) + # poll() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + if self._proc.poll() is None: + _log.warning('stopping test driver timed out, ' + 'killing it') + self._port._executive.kill_process(self._proc.pid) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py new file mode 100644 index 0000000..c1f5c8d --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import codecs +import os +import sys + +import chromium_linux +import chromium_mac +import chromium_win + + +def get(**kwargs): + """Some tests have slightly different results when run while using + hardware acceleration. In those cases, we prepend an additional directory + to the baseline paths.""" + port_name = kwargs.get('port_name', None) + if port_name == 'chromium-gpu': + if sys.platform in ('cygwin', 'win32'): + port_name = 'chromium-gpu-win' + elif sys.platform == 'linux2': + port_name = 'chromium-gpu-linux' + elif sys.platform == 'darwin': + port_name = 'chromium-gpu-mac' + else: + raise NotImplementedError('unsupported platform: %s' % + sys.platform) + + if port_name == 'chromium-gpu-linux': + return ChromiumGpuLinuxPort(**kwargs) + + if port_name.startswith('chromium-gpu-mac'): + return ChromiumGpuMacPort(**kwargs) + + if port_name.startswith('chromium-gpu-win'): + return ChromiumGpuWinPort(**kwargs) + + raise NotImplementedError('unsupported port: %s' % port_name) + + +def _set_gpu_options(options): + if options: + if options.accelerated_compositing is None: + options.accelerated_compositing = True + if options.accelerated_2d_canvas is None: + options.accelerated_2d_canvas = True + + # FIXME: Remove this after http://codereview.chromium.org/5133001/ is enabled + # on the bots. + if options.builder_name is not None and not ' - GPU' in options.builder_name: + options.builder_name = options.builder_name + ' - GPU' + + +def _gpu_overrides(port): + try: + overrides_path = port.path_from_chromium_base('webkit', 'tools', + 'layout_tests', 'test_expectations_gpu.txt') + except AssertionError: + return None + if not os.path.exists(overrides_path): + return None + with codecs.open(overrides_path, "r", "utf-8") as file: + return file.read() + + +class ChromiumGpuLinuxPort(chromium_linux.ChromiumLinuxPort): + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'chromium-gpu-linux') + _set_gpu_options(kwargs.get('options')) + chromium_linux.ChromiumLinuxPort.__init__(self, **kwargs) + + def baseline_search_path(self): + # Mimic the Linux -> Win expectations fallback in the ordinary Chromium port. + return (map(self._webkit_baseline_path, ['chromium-gpu-linux', 'chromium-gpu-win', 'chromium-gpu']) + + chromium_linux.ChromiumLinuxPort.baseline_search_path(self)) + + def default_child_processes(self): + return 1 + + def path_to_test_expectations_file(self): + return self.path_from_webkit_base('LayoutTests', 'platform', + 'chromium-gpu', 'test_expectations.txt') + + def test_expectations_overrides(self): + return _gpu_overrides(self) + + +class ChromiumGpuMacPort(chromium_mac.ChromiumMacPort): + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'chromium-gpu-mac') + _set_gpu_options(kwargs.get('options')) + chromium_mac.ChromiumMacPort.__init__(self, **kwargs) + + def baseline_search_path(self): + return (map(self._webkit_baseline_path, ['chromium-gpu-mac', 'chromium-gpu']) + + chromium_mac.ChromiumMacPort.baseline_search_path(self)) + + def default_child_processes(self): + return 1 + + def path_to_test_expectations_file(self): + return self.path_from_webkit_base('LayoutTests', 'platform', + 'chromium-gpu', 'test_expectations.txt') + + def test_expectations_overrides(self): + return _gpu_overrides(self) + + +class ChromiumGpuWinPort(chromium_win.ChromiumWinPort): + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'chromium-gpu-win' + self.version()) + _set_gpu_options(kwargs.get('options')) + chromium_win.ChromiumWinPort.__init__(self, **kwargs) + + def baseline_search_path(self): + return (map(self._webkit_baseline_path, ['chromium-gpu-win', 'chromium-gpu']) + + chromium_win.ChromiumWinPort.baseline_search_path(self)) + + def default_child_processes(self): + return 1 + + def path_to_test_expectations_file(self): + return self.path_from_webkit_base('LayoutTests', 'platform', + 'chromium-gpu', 'test_expectations.txt') + + def test_expectations_overrides(self): + return _gpu_overrides(self) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu_unittest.py new file mode 100644 index 0000000..ad0404c --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu_unittest.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import unittest + +from webkitpy.tool import mocktool +import chromium_gpu + + +class ChromiumGpuTest(unittest.TestCase): + def test_get_chromium_gpu_linux(self): + self.assertOverridesWorked('chromium-gpu-linux') + + def test_get_chromium_gpu_mac(self): + self.assertOverridesWorked('chromium-gpu-mac') + + def test_get_chromium_gpu_win(self): + self.assertOverridesWorked('chromium-gpu-win') + + def assertOverridesWorked(self, port_name): + # test that we got the right port + mock_options = mocktool.MockOptions(accelerated_compositing=None, + accelerated_2d_canvas=None, + builder_name='foo', + child_processes=None) + port = chromium_gpu.get(port_name=port_name, options=mock_options) + self.assertTrue(port._options.accelerated_compositing) + self.assertTrue(port._options.accelerated_2d_canvas) + self.assertEqual(port.default_child_processes(), 1) + self.assertEqual(port._options.builder_name, 'foo - GPU') + + # we use startswith() instead of Equal to gloss over platform versions. + self.assertTrue(port.name().startswith(port_name)) + + # test that it has the right directories in front of the search path. + paths = port.baseline_search_path() + self.assertEqual(port._webkit_baseline_path(port_name), paths[0]) + if port_name == 'chromium-gpu-linux': + self.assertEqual(port._webkit_baseline_path('chromium-gpu-win'), paths[1]) + self.assertEqual(port._webkit_baseline_path('chromium-gpu'), paths[2]) + else: + self.assertEqual(port._webkit_baseline_path('chromium-gpu'), paths[1]) + + # Test that we have the right expectations file. + self.assertTrue('chromium-gpu' in + port.path_to_test_expectations_file()) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py new file mode 100644 index 0000000..5d9dd87 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Chromium Linux implementation of the Port interface.""" + +import logging +import os +import signal + +import chromium + +_log = logging.getLogger("webkitpy.layout_tests.port.chromium_linux") + + +class ChromiumLinuxPort(chromium.ChromiumPort): + """Chromium Linux implementation of the Port class.""" + + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'chromium-linux') + chromium.ChromiumPort.__init__(self, **kwargs) + + def baseline_search_path(self): + port_names = ["chromium-linux", "chromium-win", "chromium", "win", "mac"] + return map(self._webkit_baseline_path, port_names) + + def check_build(self, needs_http): + result = chromium.ChromiumPort.check_build(self, needs_http) + if needs_http: + if self.get_option('use_apache'): + result = self._check_apache_install() and result + else: + result = self._check_lighttpd_install() and result + result = self._check_wdiff_install() and result + + if not result: + _log.error('For complete Linux build requirements, please see:') + _log.error('') + _log.error(' http://code.google.com/p/chromium/wiki/' + 'LinuxBuildInstructions') + return result + + def test_platform_name(self): + # We use 'linux' instead of 'chromium-linux' in test_expectations.txt. + return 'linux' + + def version(self): + # We don't have different versions on linux. + return '' + + # + # PROTECTED METHODS + # + + def _build_path(self, *comps): + base = self.path_from_chromium_base() + if os.path.exists(os.path.join(base, 'sconsbuild')): + return os.path.join(base, 'sconsbuild', *comps) + if os.path.exists(os.path.join(base, 'out', *comps)) or self.get_option('use_test_shell'): + return os.path.join(base, 'out', *comps) + base = self.path_from_webkit_base() + if os.path.exists(os.path.join(base, 'sconsbuild')): + return os.path.join(base, 'sconsbuild', *comps) + return os.path.join(base, 'out', *comps) + + def _check_apache_install(self): + result = chromium.check_file_exists(self._path_to_apache(), + "apache2") + result = chromium.check_file_exists(self._path_to_apache_config_file(), + "apache2 config file") and result + if not result: + _log.error(' Please install using: "sudo apt-get install ' + 'apache2 libapache2-mod-php5"') + _log.error('') + return result + + def _check_lighttpd_install(self): + result = chromium.check_file_exists( + self._path_to_lighttpd(), "LigHTTPd executable") + result = chromium.check_file_exists(self._path_to_lighttpd_php(), + "PHP CGI executable") and result + result = chromium.check_file_exists(self._path_to_lighttpd_modules(), + "LigHTTPd modules") and result + if not result: + _log.error(' Please install using: "sudo apt-get install ' + 'lighttpd php5-cgi"') + _log.error('') + return result + + def _check_wdiff_install(self): + result = chromium.check_file_exists(self._path_to_wdiff(), 'wdiff') + if not result: + _log.error(' Please install using: "sudo apt-get install ' + 'wdiff"') + _log.error('') + # FIXME: The ChromiumMac port always returns True. + return result + + def _path_to_apache(self): + if self._is_redhat_based(): + return '/usr/sbin/httpd' + else: + return '/usr/sbin/apache2' + + def _path_to_apache_config_file(self): + if self._is_redhat_based(): + config_name = 'fedora-httpd.conf' + else: + config_name = 'apache2-debian-httpd.conf' + + return os.path.join(self.layout_tests_dir(), 'http', 'conf', + config_name) + + def _path_to_lighttpd(self): + return "/usr/sbin/lighttpd" + + def _path_to_lighttpd_modules(self): + return "/usr/lib/lighttpd" + + def _path_to_lighttpd_php(self): + return "/usr/bin/php-cgi" + + def _path_to_driver(self, configuration=None): + if not configuration: + configuration = self.get_option('configuration') + binary_name = 'DumpRenderTree' + if self.get_option('use_test_shell'): + binary_name = 'test_shell' + return self._build_path(configuration, binary_name) + + def _path_to_helper(self): + return None + + def _path_to_wdiff(self): + if self._is_redhat_based(): + return '/usr/bin/dwdiff' + else: + return '/usr/bin/wdiff' + + def _is_redhat_based(self): + return os.path.exists(os.path.join('/etc', 'redhat-release')) + + def _shut_down_http_server(self, server_pid): + """Shut down the lighttpd web server. Blocks until it's fully + shut down. + + Args: + server_pid: The process ID of the running server. + """ + # server_pid is not set when "http_server.py stop" is run manually. + if server_pid is None: + # TODO(mmoss) This isn't ideal, since it could conflict with + # lighttpd processes not started by http_server.py, + # but good enough for now. + self._executive.kill_all("lighttpd") + self._executive.kill_all("apache2") + else: + try: + os.kill(server_pid, signal.SIGTERM) + # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? + except OSError: + # Sometimes we get a bad PID (e.g. from a stale httpd.pid + # file), so if kill fails on the given PID, just try to + # 'killall' web servers. + self._shut_down_http_server(None) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py new file mode 100644 index 0000000..f638e01 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py @@ -0,0 +1,182 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Chromium Mac implementation of the Port interface.""" + +import logging +import os +import platform +import signal + +import chromium + +from webkitpy.common.system.executive import Executive + +_log = logging.getLogger("webkitpy.layout_tests.port.chromium_mac") + + +class ChromiumMacPort(chromium.ChromiumPort): + """Chromium Mac implementation of the Port class.""" + + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'chromium-mac') + chromium.ChromiumPort.__init__(self, **kwargs) + + def baseline_search_path(self): + port_names = [ + "chromium-mac" + self.version(), + "chromium-mac", + "chromium", + "mac" + self.version(), + "mac", + ] + return map(self._webkit_baseline_path, port_names) + + def check_build(self, needs_http): + result = chromium.ChromiumPort.check_build(self, needs_http) + result = self._check_wdiff_install() and result + if not result: + _log.error('For complete Mac build requirements, please see:') + _log.error('') + _log.error(' http://code.google.com/p/chromium/wiki/' + 'MacBuildInstructions') + return result + + def default_child_processes(self): + # FIXME: we need to run single-threaded for now. See + # https://bugs.webkit.org/show_bug.cgi?id=38553. Unfortunately this + # routine is called right before the logger is configured, so if we + # try to _log.warning(), it gets thrown away. + import sys + sys.stderr.write("Defaulting to one child - see https://bugs.webkit.org/show_bug.cgi?id=38553\n") + return 1 + + def driver_name(self): + """name for this port's equivalent of DumpRenderTree.""" + if self.get_option('use_test_shell'): + return "TestShell" + return "DumpRenderTree" + + def test_platform_name(self): + # We use 'mac' instead of 'chromium-mac' + return 'mac' + + def version(self): + # FIXME: It's strange that this string is -version, not just version. + os_version_string = platform.mac_ver()[0] # e.g. "10.5.6" + if not os_version_string: + return '-leopard' + release_version = int(os_version_string.split('.')[1]) + # we don't support 'tiger' or earlier releases + if release_version == 5: + return '-leopard' + elif release_version == 6: + return '-snowleopard' + return '' + + # + # PROTECTED METHODS + # + + def _build_path(self, *comps): + path = self.path_from_chromium_base('xcodebuild', *comps) + if os.path.exists(path) or self.get_option('use_test_shell'): + return path + return self.path_from_webkit_base('WebKit', 'chromium', 'xcodebuild', + *comps) + + def _check_wdiff_install(self): + try: + # We're ignoring the return and always returning True + self._executive.run_command([self._path_to_wdiff()], error_handler=Executive.ignore_error) + except OSError: + _log.warning('wdiff not found. Install using MacPorts or some ' + 'other means') + return True + + def _lighttpd_path(self, *comps): + return self.path_from_chromium_base('third_party', 'lighttpd', + 'mac', *comps) + + def _path_to_apache(self): + return '/usr/sbin/httpd' + + def _path_to_apache_config_file(self): + return os.path.join(self.layout_tests_dir(), 'http', 'conf', + 'apache2-httpd.conf') + + def _path_to_lighttpd(self): + return self._lighttpd_path('bin', 'lighttpd') + + def _path_to_lighttpd_modules(self): + return self._lighttpd_path('lib') + + def _path_to_lighttpd_php(self): + return self._lighttpd_path('bin', 'php-cgi') + + def _path_to_driver(self, configuration=None): + # FIXME: make |configuration| happy with case-sensitive file + # systems. + if not configuration: + configuration = self.get_option('configuration') + return self._build_path(configuration, self.driver_name() + '.app', + 'Contents', 'MacOS', self.driver_name()) + + def _path_to_helper(self): + binary_name = 'LayoutTestHelper' + if self.get_option('use_test_shell'): + binary_name = 'layout_test_helper' + return self._build_path(self.get_option('configuration'), binary_name) + + def _path_to_wdiff(self): + return 'wdiff' + + def _shut_down_http_server(self, server_pid): + """Shut down the lighttpd web server. Blocks until it's fully + shut down. + + Args: + server_pid: The process ID of the running server. + """ + # server_pid is not set when "http_server.py stop" is run manually. + if server_pid is None: + # TODO(mmoss) This isn't ideal, since it could conflict with + # lighttpd processes not started by http_server.py, + # but good enough for now. + self._executive.kill_all('lighttpd') + self._executive.kill_all('httpd') + else: + try: + os.kill(server_pid, signal.SIGTERM) + # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? + except OSError: + # Sometimes we get a bad PID (e.g. from a stale httpd.pid + # file), so if kill fails on the given PID, just try to + # 'killall' web servers. + self._shut_down_http_server(None) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py new file mode 100644 index 0000000..d63faa0 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py @@ -0,0 +1,40 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import chromium_mac +import unittest + +from webkitpy.thirdparty.mock import Mock + + +class ChromiumMacPortTest(unittest.TestCase): + + def test_check_wdiff_install(self): + port = chromium_mac.ChromiumMacPort() + # Currently is always true, just logs if missing. + self.assertTrue(port._check_wdiff_install()) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py new file mode 100644 index 0000000..c87984f --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py @@ -0,0 +1,193 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import unittest +import StringIO + +from webkitpy.tool import mocktool +from webkitpy.thirdparty.mock import Mock + +import chromium +import chromium_linux +import chromium_mac +import chromium_win + +class ChromiumDriverTest(unittest.TestCase): + + def setUp(self): + mock_port = Mock() + mock_port.get_option = lambda option_name: '' + self.driver = chromium.ChromiumDriver(mock_port, worker_number=0) + + def test_test_shell_command(self): + expected_command = "test.html 2 checksum\n" + self.assertEqual(self.driver._test_shell_command("test.html", 2, "checksum"), expected_command) + + def _assert_write_command_and_read_line(self, input=None, expected_line=None, expected_stdin=None, expected_crash=False): + if not expected_stdin: + if input: + expected_stdin = input + else: + # We reset stdin, so we should expect stdin.getValue = "" + expected_stdin = "" + self.driver._proc.stdin = StringIO.StringIO() + line, did_crash = self.driver._write_command_and_read_line(input) + self.assertEqual(self.driver._proc.stdin.getvalue(), expected_stdin) + self.assertEqual(line, expected_line) + self.assertEqual(did_crash, expected_crash) + + def test_write_command_and_read_line(self): + self.driver._proc = Mock() + # Set up to read 3 lines before we get an IOError + self.driver._proc.stdout = StringIO.StringIO("first\nsecond\nthird\n") + + unicode_input = u"I \u2661 Unicode" + utf8_input = unicode_input.encode("utf-8") + # Test unicode input conversion to utf-8 + self._assert_write_command_and_read_line(input=unicode_input, expected_stdin=utf8_input, expected_line="first\n") + # Test str() input. + self._assert_write_command_and_read_line(input="foo", expected_line="second\n") + # Test input=None + self._assert_write_command_and_read_line(expected_line="third\n") + # Test reading from a closed/empty stream. + # reading from a StringIO does not raise IOError like a real file would, so raise IOError manually. + def mock_readline(): + raise IOError + self.driver._proc.stdout.readline = mock_readline + self._assert_write_command_and_read_line(expected_crash=True) + + +class ChromiumPortTest(unittest.TestCase): + class TestMacPort(chromium_mac.ChromiumMacPort): + def __init__(self, options): + chromium_mac.ChromiumMacPort.__init__(self, + port_name='test-port', + options=options) + + def default_configuration(self): + self.default_configuration_called = True + return 'default' + + class TestLinuxPort(chromium_linux.ChromiumLinuxPort): + def __init__(self, options): + chromium_linux.ChromiumLinuxPort.__init__(self, + port_name='test-port', + options=options) + + def default_configuration(self): + self.default_configuration_called = True + return 'default' + + def test_path_to_image_diff(self): + mock_options = mocktool.MockOptions() + port = ChromiumPortTest.TestLinuxPort(options=mock_options) + self.assertTrue(port._path_to_image_diff().endswith( + '/out/default/ImageDiff'), msg=port._path_to_image_diff()) + port = ChromiumPortTest.TestMacPort(options=mock_options) + self.assertTrue(port._path_to_image_diff().endswith( + '/xcodebuild/default/ImageDiff')) + mock_options = mocktool.MockOptions(use_test_shell=True) + port = ChromiumPortTest.TestLinuxPort(options=mock_options) + self.assertTrue(port._path_to_image_diff().endswith( + '/out/default/image_diff'), msg=port._path_to_image_diff()) + port = ChromiumPortTest.TestMacPort(options=mock_options) + self.assertTrue(port._path_to_image_diff().endswith( + '/xcodebuild/default/image_diff')) + # FIXME: Figure out how this is going to work on Windows. + #port = chromium_win.ChromiumWinPort('test-port', options=MockOptions()) + + def test_skipped_layout_tests(self): + mock_options = mocktool.MockOptions() + port = ChromiumPortTest.TestLinuxPort(options=mock_options) + + fake_test = os.path.join(port.layout_tests_dir(), "fast/js/not-good.js") + + port.test_expectations = lambda: """BUG_TEST SKIP : fast/js/not-good.js = TEXT +LINUX WIN : fast/js/very-good.js = TIMEOUT PASS""" + port.test_expectations_overrides = lambda: '' + port.tests = lambda paths: set() + port.path_exists = lambda test: True + + skipped_tests = port.skipped_layout_tests(extra_test_files=[fake_test, ]) + self.assertTrue("fast/js/not-good.js" in skipped_tests) + + def test_default_configuration(self): + mock_options = mocktool.MockOptions() + port = ChromiumPortTest.TestLinuxPort(options=mock_options) + self.assertEquals(mock_options.configuration, 'default') + self.assertTrue(port.default_configuration_called) + + mock_options = mocktool.MockOptions(configuration=None) + port = ChromiumPortTest.TestLinuxPort(mock_options) + self.assertEquals(mock_options.configuration, 'default') + self.assertTrue(port.default_configuration_called) + + def test_diff_image(self): + class TestPort(ChromiumPortTest.TestLinuxPort): + def _path_to_image_diff(self): + return "/path/to/image_diff" + + class MockExecute: + def __init__(self, result): + self._result = result + + def run_command(self, + args, + cwd=None, + input=None, + error_handler=None, + return_exit_code=False, + return_stderr=True, + decode_output=False): + if return_exit_code: + return self._result + return '' + + mock_options = mocktool.MockOptions() + port = ChromiumPortTest.TestLinuxPort(mock_options) + + # Images are different. + port._executive = MockExecute(0) + self.assertEquals(False, port.diff_image("EXPECTED", "ACTUAL")) + + # Images are the same. + port._executive = MockExecute(1) + self.assertEquals(True, port.diff_image("EXPECTED", "ACTUAL")) + + # There was some error running image_diff. + port._executive = MockExecute(2) + exception_raised = False + try: + port.diff_image("EXPECTED", "ACTUAL") + except ValueError, e: + exception_raised = True + self.assertFalse(exception_raised) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py new file mode 100644 index 0000000..d080f82 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Chromium Win implementation of the Port interface.""" + +import logging +import os +import sys + +import chromium + +_log = logging.getLogger("webkitpy.layout_tests.port.chromium_win") + + +class ChromiumWinPort(chromium.ChromiumPort): + """Chromium Win implementation of the Port class.""" + + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'chromium-win' + self.version()) + chromium.ChromiumPort.__init__(self, **kwargs) + + def setup_environ_for_server(self): + env = chromium.ChromiumPort.setup_environ_for_server(self) + # Put the cygwin directory first in the path to find cygwin1.dll. + env["PATH"] = "%s;%s" % ( + self.path_from_chromium_base("third_party", "cygwin", "bin"), + env["PATH"]) + # Configure the cygwin directory so that pywebsocket finds proper + # python executable to run cgi program. + env["CYGWIN_PATH"] = self.path_from_chromium_base( + "third_party", "cygwin", "bin") + if (sys.platform == "win32" and self.get_option('register_cygwin')): + setup_mount = self.path_from_chromium_base("third_party", + "cygwin", + "setup_mount.bat") + self._executive.run_command([setup_mount]) + return env + + def baseline_search_path(self): + port_names = [] + if self._name.endswith('-win-xp'): + port_names.append("chromium-win-xp") + if self._name.endswith('-win-xp') or self._name.endswith('-win-vista'): + port_names.append("chromium-win-vista") + # FIXME: This may need to include mac-snowleopard like win.py. + port_names.extend(["chromium-win", "chromium", "win", "mac"]) + return map(self._webkit_baseline_path, port_names) + + def check_build(self, needs_http): + result = chromium.ChromiumPort.check_build(self, needs_http) + if not result: + _log.error('For complete Windows build requirements, please ' + 'see:') + _log.error('') + _log.error(' http://dev.chromium.org/developers/how-tos/' + 'build-instructions-windows') + return result + + def relative_test_filename(self, filename): + path = filename[len(self.layout_tests_dir()) + 1:] + return path.replace('\\', '/') + + def test_platform_name(self): + # We return 'win-xp', not 'chromium-win-xp' here, for convenience. + return 'win' + self.version() + + def version(self): + if not hasattr(sys, 'getwindowsversion'): + return '' + winver = sys.getwindowsversion() + if winver[0] == 6 and (winver[1] == 1): + return '-7' + if winver[0] == 6 and (winver[1] == 0): + return '-vista' + if winver[0] == 5 and (winver[1] == 1 or winver[1] == 2): + return '-xp' + return '' + + # + # PROTECTED ROUTINES + # + def _build_path(self, *comps): + p = self.path_from_chromium_base('webkit', *comps) + if os.path.exists(p): + return p + p = self.path_from_chromium_base('chrome', *comps) + if os.path.exists(p) or self.get_option('use_test_shell'): + return p + return os.path.join(self.path_from_webkit_base(), 'WebKit', 'chromium', + *comps) + + def _lighttpd_path(self, *comps): + return self.path_from_chromium_base('third_party', 'lighttpd', 'win', + *comps) + + def _path_to_apache(self): + return self.path_from_chromium_base('third_party', 'cygwin', 'usr', + 'sbin', 'httpd') + + def _path_to_apache_config_file(self): + return os.path.join(self.layout_tests_dir(), 'http', 'conf', + 'cygwin-httpd.conf') + + def _path_to_lighttpd(self): + return self._lighttpd_path('LightTPD.exe') + + def _path_to_lighttpd_modules(self): + return self._lighttpd_path('lib') + + def _path_to_lighttpd_php(self): + return self._lighttpd_path('php5', 'php-cgi.exe') + + def _path_to_driver(self, configuration=None): + if not configuration: + configuration = self.get_option('configuration') + binary_name = 'DumpRenderTree.exe' + if self.get_option('use_test_shell'): + binary_name = 'test_shell.exe' + return self._build_path(configuration, binary_name) + + def _path_to_helper(self): + binary_name = 'LayoutTestHelper.exe' + if self.get_option('use_test_shell'): + binary_name = 'layout_test_helper.exe' + return self._build_path(self.get_option('configuration'), binary_name) + + def _path_to_image_diff(self): + binary_name = 'ImageDiff.exe' + if self.get_option('use_test_shell'): + binary_name = 'image_diff.exe' + return self._build_path(self.get_option('configuration'), binary_name) + + def _path_to_wdiff(self): + return self.path_from_chromium_base('third_party', 'cygwin', 'bin', + 'wdiff.exe') + + def _shut_down_http_server(self, server_pid): + """Shut down the lighttpd web server. Blocks until it's fully + shut down. + + Args: + server_pid: The process ID of the running server. + """ + # FIXME: Why are we ignoring server_pid and calling + # _kill_all instead of Executive.kill_process(pid)? + self._executive.kill_all("LightTPD.exe") + self._executive.kill_all("httpd.exe") diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py new file mode 100644 index 0000000..36f3c6b --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py @@ -0,0 +1,74 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import sys +import unittest +import chromium_win +from webkitpy.common.system import outputcapture +from webkitpy.tool import mocktool + + +class ChromiumWinTest(unittest.TestCase): + + class RegisterCygwinOption(object): + def __init__(self): + self.register_cygwin = True + + def setUp(self): + self.orig_platform = sys.platform + + def tearDown(self): + sys.platform = self.orig_platform + + def _mock_path_from_chromium_base(self, *comps): + return os.path.join("/chromium/src", *comps) + + def test_setup_environ_for_server(self): + port = chromium_win.ChromiumWinPort() + port._executive = mocktool.MockExecutive(should_log=True) + port.path_from_chromium_base = self._mock_path_from_chromium_base + output = outputcapture.OutputCapture() + orig_environ = os.environ.copy() + env = output.assert_outputs(self, port.setup_environ_for_server) + self.assertEqual(orig_environ["PATH"], os.environ["PATH"]) + self.assertNotEqual(env["PATH"], os.environ["PATH"]) + + def test_setup_environ_for_server_register_cygwin(self): + sys.platform = "win32" + port = chromium_win.ChromiumWinPort( + options=ChromiumWinTest.RegisterCygwinOption()) + port._executive = mocktool.MockExecutive(should_log=True) + port.path_from_chromium_base = self._mock_path_from_chromium_base + setup_mount = self._mock_path_from_chromium_base("third_party", + "cygwin", + "setup_mount.bat") + expected_stderr = "MOCK run_command: %s\n" % [setup_mount] + output = outputcapture.OutputCapture() + output.assert_outputs(self, port.setup_environ_for_server, + expected_stderr=expected_stderr) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config.py b/Tools/Scripts/webkitpy/layout_tests/port/config.py new file mode 100644 index 0000000..e08ed9d --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/config.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Wrapper objects for WebKit-specific utility routines.""" + +# FIXME: This file needs to be unified with common/checkout/scm.py and +# common/config/ports.py . + +import os + +from webkitpy.common.system import logutils +from webkitpy.common.system import executive + + +_log = logutils.get_logger(__file__) + +# +# FIXME: This is used to record if we've already hit the filesystem to look +# for a default configuration. We cache this to speed up the unit tests, +# but this can be reset with clear_cached_configuration(). This should be +# replaced with us consistently using MockConfigs() for tests that don't +# hit the filesystem at all and provide a reliable value. +# +_have_determined_configuration = False +_configuration = "Release" + + +def clear_cached_configuration(): + global _have_determined_configuration, _configuration + _have_determined_configuration = False + _configuration = "Release" + + +class Config(object): + _FLAGS_FROM_CONFIGURATIONS = { + "Debug": "--debug", + "Release": "--release", + } + + def __init__(self, executive, filesystem): + self._executive = executive + self._filesystem = filesystem + self._webkit_base_dir = None + self._default_configuration = None + self._build_directories = {} + + def build_directory(self, configuration): + """Returns the path to the build directory for the configuration.""" + if configuration: + flags = ["--configuration", + self._FLAGS_FROM_CONFIGURATIONS[configuration]] + else: + configuration = "" + flags = ["--top-level"] + + if not self._build_directories.get(configuration): + args = ["perl", self._script_path("webkit-build-directory")] + flags + self._build_directories[configuration] = ( + self._executive.run_command(args).rstrip()) + + return self._build_directories[configuration] + + def build_dumprendertree(self, configuration): + """Builds DRT in the given configuration. + + Returns True if the build was successful and up-to-date.""" + flag = self._FLAGS_FROM_CONFIGURATIONS[configuration] + exit_code = self._executive.run_command([ + self._script_path("build-dumprendertree"), flag], + return_exit_code=True) + if exit_code != 0: + _log.error("Failed to build DumpRenderTree") + return False + return True + + def default_configuration(self): + """Returns the default configuration for the user. + + Returns the value set by 'set-webkit-configuration', or "Release" + if that has not been set. This mirrors the logic in webkitdirs.pm.""" + if not self._default_configuration: + self._default_configuration = self._determine_configuration() + if not self._default_configuration: + self._default_configuration = 'Release' + if self._default_configuration not in self._FLAGS_FROM_CONFIGURATIONS: + _log.warn("Configuration \"%s\" is not a recognized value.\n" % + self._default_configuration) + _log.warn("Scripts may fail. " + "See 'set-webkit-configuration --help'.") + return self._default_configuration + + def path_from_webkit_base(self, *comps): + return self._filesystem.join(self.webkit_base_dir(), *comps) + + def webkit_base_dir(self): + """Returns the absolute path to the top of the WebKit tree. + + Raises an AssertionError if the top dir can't be determined.""" + # Note: this code somewhat duplicates the code in + # scm.find_checkout_root(). However, that code only works if the top + # of the SCM repository also matches the top of the WebKit tree. The + # Chromium ports, for example, only check out subdirectories like + # Tools/Scripts, and so we still have to do additional work + # to find the top of the tree. + # + # This code will also work if there is no SCM system at all. + if not self._webkit_base_dir: + abspath = os.path.abspath(__file__) + self._webkit_base_dir = abspath[0:abspath.find('Tools') - 1] + return self._webkit_base_dir + + def _script_path(self, script_name): + return self._filesystem.join(self.webkit_base_dir(), "Tools", + "Scripts", script_name) + + def _determine_configuration(self): + # This mirrors the logic in webkitdirs.pm:determineConfiguration(). + # + # FIXME: See the comment at the top of the file regarding unit tests + # and our use of global mutable static variables. + global _have_determined_configuration, _configuration + if not _have_determined_configuration: + contents = self._read_configuration() + if not contents: + contents = "Release" + if contents == "Deployment": + contents = "Release" + if contents == "Development": + contents = "Debug" + _configuration = contents + _have_determined_configuration = True + return _configuration + + def _read_configuration(self): + try: + configuration_path = self._filesystem.join(self.build_directory(None), + "Configuration") + if not self._filesystem.exists(configuration_path): + return None + except (OSError, executive.ScriptError): + return None + + return self._filesystem.read_text_file(configuration_path).rstrip() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py b/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py new file mode 100644 index 0000000..af71fa3 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Wrapper objects for WebKit-specific utility routines.""" + + +class MockConfig(object): + def __init__(self, default_configuration='Release'): + self._default_configuration = default_configuration + + def build_directory(self, configuration): + return "/build" + + def build_dumprendertree(self, configuration): + return True + + def default_configuration(self): + return self._default_configuration + + def path_from_webkit_base(self, *comps): + return "/" + "/".join(list(comps)) + + def webkit_base_dir(self): + return "/" diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config_standalone.py b/Tools/Scripts/webkitpy/layout_tests/port/config_standalone.py new file mode 100644 index 0000000..3dec3b9 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/config_standalone.py @@ -0,0 +1,70 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""FIXME: This script is used by +config_unittest.test_default_configuration__standalone() to read the +default configuration to work around any possible caching / reset bugs. See +https://bugs.webkit.org/show_bug?id=49360 for the motivation. We can remove +this test when we remove the global configuration cache in config.py.""" + +import os +import unittest +import sys + + +# Ensure that webkitpy is in PYTHONPATH. +this_dir = os.path.abspath(sys.path[0]) +up = os.path.dirname +script_dir = up(up(up(this_dir))) +if script_dir not in sys.path: + sys.path.append(script_dir) + +from webkitpy.common.system import executive +from webkitpy.common.system import executive_mock +from webkitpy.common.system import filesystem +from webkitpy.common.system import filesystem_mock + +import config + + +def main(argv=None): + if not argv: + argv = sys.argv + + if len(argv) == 3 and argv[1] == '--mock': + e = executive_mock.MockExecutive2(output='foo') + fs = filesystem_mock.MockFileSystem({'foo/Configuration': argv[2]}) + else: + e = executive.Executive() + fs = filesystem.FileSystem() + + c = config.Config(e, fs) + print c.default_configuration() + +if __name__ == '__main__': + main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/config_unittest.py new file mode 100644 index 0000000..2cce3cc --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/config_unittest.py @@ -0,0 +1,202 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import sys +import unittest + +from webkitpy.common.system import executive +from webkitpy.common.system import executive_mock +from webkitpy.common.system import filesystem +from webkitpy.common.system import filesystem_mock +from webkitpy.common.system import outputcapture + +import config + + +def mock_run_command(arg_list): + # Set this to True to test actual output (where possible). + integration_test = False + if integration_test: + return executive.Executive().run_command(arg_list) + + if 'webkit-build-directory' in arg_list[1]: + return mock_webkit_build_directory(arg_list[2:]) + return 'Error' + + +def mock_webkit_build_directory(arg_list): + if arg_list == ['--top-level']: + return '/WebKitBuild' + elif arg_list == ['--configuration', '--debug']: + return '/WebKitBuild/Debug' + elif arg_list == ['--configuration', '--release']: + return '/WebKitBuild/Release' + return 'Error' + + +class ConfigTest(unittest.TestCase): + def tearDown(self): + config.clear_cached_configuration() + + def make_config(self, output='', files={}, exit_code=0, exception=None, + run_command_fn=None): + e = executive_mock.MockExecutive2(output=output, exit_code=exit_code, + exception=exception, + run_command_fn=run_command_fn) + fs = filesystem_mock.MockFileSystem(files) + return config.Config(e, fs) + + def assert_configuration(self, contents, expected): + # This tests that a configuration file containing + # _contents_ ends up being interpreted as _expected_. + c = self.make_config('foo', {'foo/Configuration': contents}) + self.assertEqual(c.default_configuration(), expected) + + def test_build_directory(self): + # --top-level + c = self.make_config(run_command_fn=mock_run_command) + self.assertTrue(c.build_directory(None).endswith('WebKitBuild')) + + # Test again to check caching + self.assertTrue(c.build_directory(None).endswith('WebKitBuild')) + + # Test other values + self.assertTrue(c.build_directory('Release').endswith('/Release')) + self.assertTrue(c.build_directory('Debug').endswith('/Debug')) + self.assertRaises(KeyError, c.build_directory, 'Unknown') + + def test_build_dumprendertree__success(self): + c = self.make_config(exit_code=0) + self.assertTrue(c.build_dumprendertree("Debug")) + self.assertTrue(c.build_dumprendertree("Release")) + self.assertRaises(KeyError, c.build_dumprendertree, "Unknown") + + def test_build_dumprendertree__failure(self): + c = self.make_config(exit_code=-1) + + # FIXME: Build failures should log errors. However, the message we + # get depends on how we're being called; as a standalone test, + # we'll get the "no handlers found" message. As part of + # test-webkitpy, we get the actual message. Really, we need + # outputcapture to install its own handler. + oc = outputcapture.OutputCapture() + oc.capture_output() + self.assertFalse(c.build_dumprendertree('Debug')) + oc.restore_output() + + oc.capture_output() + self.assertFalse(c.build_dumprendertree('Release')) + oc.restore_output() + + def test_default_configuration__release(self): + self.assert_configuration('Release', 'Release') + + def test_default_configuration__debug(self): + self.assert_configuration('Debug', 'Debug') + + def test_default_configuration__deployment(self): + self.assert_configuration('Deployment', 'Release') + + def test_default_configuration__development(self): + self.assert_configuration('Development', 'Debug') + + def test_default_configuration__notfound(self): + # This tests what happens if the default configuration file + # doesn't exist. + c = self.make_config(output='foo', files={'foo/Configuration': None}) + self.assertEqual(c.default_configuration(), "Release") + + def test_default_configuration__unknown(self): + # Ignore the warning about an unknown configuration value. + oc = outputcapture.OutputCapture() + oc.capture_output() + self.assert_configuration('Unknown', 'Unknown') + oc.restore_output() + + def test_default_configuration__standalone(self): + # FIXME: This test runs a standalone python script to test + # reading the default configuration to work around any possible + # caching / reset bugs. See https://bugs.webkit.org/show_bug?id=49360 + # for the motivation. We can remove this test when we remove the + # global configuration cache in config.py. + e = executive.Executive() + fs = filesystem.FileSystem() + c = config.Config(e, fs) + script = c.path_from_webkit_base('Tools', 'Scripts', + 'webkitpy', 'layout_tests', 'port', 'config_standalone.py') + + # Note: don't use 'Release' here, since that's the normal default. + expected = 'Debug' + + args = [sys.executable, script, '--mock', expected] + actual = e.run_command(args).rstrip() + self.assertEqual(actual, expected) + + def test_default_configuration__no_perl(self): + # We need perl to run webkit-build-directory to find out where the + # default configuration file is. See what happens if perl isn't + # installed. (We should get the default value, 'Release'). + c = self.make_config(exception=OSError) + actual = c.default_configuration() + self.assertEqual(actual, 'Release') + + def test_default_configuration__scripterror(self): + # We run webkit-build-directory to find out where the default + # configuration file is. See what happens if that script fails. + # (We should get the default value, 'Release'). + c = self.make_config(exception=executive.ScriptError()) + actual = c.default_configuration() + self.assertEqual(actual, 'Release') + + def test_path_from_webkit_base(self): + # FIXME: We use a real filesystem here. Should this move to a + # mocked one? + c = config.Config(executive.Executive(), filesystem.FileSystem()) + self.assertTrue(c.path_from_webkit_base('foo')) + + def test_webkit_base_dir(self): + # FIXME: We use a real filesystem here. Should this move to a + # mocked one? + c = config.Config(executive.Executive(), filesystem.FileSystem()) + base_dir = c.webkit_base_dir() + self.assertTrue(base_dir) + self.assertNotEqual(base_dir[-1], '/') + + orig_cwd = os.getcwd() + os.chdir(os.environ['HOME']) + c = config.Config(executive.Executive(), filesystem.FileSystem()) + try: + base_dir_2 = c.webkit_base_dir() + self.assertEqual(base_dir, base_dir_2) + finally: + os.chdir(orig_cwd) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/dryrun.py b/Tools/Scripts/webkitpy/layout_tests/port/dryrun.py new file mode 100644 index 0000000..4ed34e6 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/dryrun.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Google name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""This is a test implementation of the Port interface that generates the + correct output for every test. It can be used for perf testing, because + it is pretty much a lower limit on how fast a port can possibly run. + + This implementation acts as a wrapper around a real port (the real port + is held as a delegate object). To specify which port, use the port name + 'dryrun-XXX' (e.g., 'dryrun-chromium-mac-leopard'). If you use just + 'dryrun', it uses the default port. + + Note that because this is really acting as a wrapper around the underlying + port, you must be able to run the underlying port as well + (check_build() and check_sys_deps() must pass and auxiliary binaries + like layout_test_helper and httpd must work). + + This implementation also modifies the test expectations so that all + tests are either SKIPPED or expected to PASS.""" + +from __future__ import with_statement + +import os +import sys +import time + +from webkitpy.layout_tests.layout_package import test_output + +import base +import factory + + +class DryRunPort(object): + """DryRun implementation of the Port interface.""" + + def __init__(self, **kwargs): + pfx = 'dryrun-' + if 'port_name' in kwargs: + if kwargs['port_name'].startswith(pfx): + kwargs['port_name'] = kwargs['port_name'][len(pfx):] + else: + kwargs['port_name'] = None + self.__delegate = factory.get(**kwargs) + + def __getattr__(self, name): + return getattr(self.__delegate, name) + + def check_build(self, needs_http): + return True + + def check_sys_deps(self, needs_http): + return True + + def start_helper(self): + pass + + def start_http_server(self): + pass + + def start_websocket_server(self): + pass + + def stop_helper(self): + pass + + def stop_http_server(self): + pass + + def stop_websocket_server(self): + pass + + def create_driver(self, worker_number): + return DryrunDriver(self, worker_number) + + +class DryrunDriver(base.Driver): + """Dryrun implementation of the DumpRenderTree / Driver interface.""" + + def __init__(self, port, worker_number): + self._port = port + self._worker_number = worker_number + + def cmd_line(self): + return ['None'] + + def poll(self): + return None + + def run_test(self, test_input): + start_time = time.time() + text_output = self._port.expected_text(test_input.filename) + + if test_input.image_hash is not None: + image = self._port.expected_image(test_input.filename) + hash = self._port.expected_checksum(test_input.filename) + else: + image = None + hash = None + return test_output.TestOutput(text_output, image, hash, False, + time.time() - start_time, False, None) + + def start(self): + pass + + def stop(self): + pass diff --git a/Tools/Scripts/webkitpy/layout_tests/port/factory.py b/Tools/Scripts/webkitpy/layout_tests/port/factory.py new file mode 100644 index 0000000..6935744 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/factory.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Factory method to retrieve the appropriate port implementation.""" + + +import sys + +ALL_PORT_NAMES = ['test', 'dryrun', 'mac', 'win', 'gtk', 'qt', 'chromium-mac', + 'chromium-linux', 'chromium-win', 'google-chrome-win', + 'google-chrome-mac', 'google-chrome-linux32', 'google-chrome-linux64'] + + +def get(port_name=None, options=None, **kwargs): + """Returns an object implementing the Port interface. If + port_name is None, this routine attempts to guess at the most + appropriate port on this platform.""" + # Wrapped for backwards-compatibility + if port_name: + kwargs['port_name'] = port_name + if options: + kwargs['options'] = options + return _get_kwargs(**kwargs) + + +def _get_kwargs(**kwargs): + port_to_use = kwargs.get('port_name', None) + options = kwargs.get('options', None) + if port_to_use is None: + if sys.platform == 'win32' or sys.platform == 'cygwin': + if options and hasattr(options, 'chromium') and options.chromium: + port_to_use = 'chromium-win' + else: + port_to_use = 'win' + elif sys.platform == 'linux2': + port_to_use = 'chromium-linux' + elif sys.platform == 'darwin': + if options and hasattr(options, 'chromium') and options.chromium: + port_to_use = 'chromium-mac' + else: + port_to_use = 'mac' + + if port_to_use is None: + raise NotImplementedError('unknown port; sys.platform = "%s"' % + sys.platform) + + if port_to_use == 'test': + import test + maker = test.TestPort + elif port_to_use.startswith('dryrun'): + import dryrun + maker = dryrun.DryRunPort + elif port_to_use.startswith('mac'): + import mac + maker = mac.MacPort + elif port_to_use.startswith('win'): + import win + maker = win.WinPort + elif port_to_use.startswith('gtk'): + import gtk + maker = gtk.GtkPort + elif port_to_use.startswith('qt'): + import qt + maker = qt.QtPort + elif port_to_use.startswith('chromium-gpu'): + import chromium_gpu + maker = chromium_gpu.get + elif port_to_use.startswith('chromium-mac'): + import chromium_mac + maker = chromium_mac.ChromiumMacPort + elif port_to_use.startswith('chromium-linux'): + import chromium_linux + maker = chromium_linux.ChromiumLinuxPort + elif port_to_use.startswith('chromium-win'): + import chromium_win + maker = chromium_win.ChromiumWinPort + elif port_to_use.startswith('google-chrome'): + import google_chrome + maker = google_chrome.GetGoogleChromePort + else: + raise NotImplementedError('unsupported port: %s' % port_to_use) + return maker(**kwargs) + +def get_all(options=None): + """Returns all the objects implementing the Port interface.""" + return dict([(port_name, get(port_name, options=options)) + for port_name in ALL_PORT_NAMES]) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py new file mode 100644 index 0000000..978a557 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py @@ -0,0 +1,188 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import sys +import unittest + +from webkitpy.tool import mocktool + +import chromium_gpu +import chromium_linux +import chromium_mac +import chromium_win +import dryrun +import factory +import google_chrome +import gtk +import mac +import qt +import test +import win + + +class FactoryTest(unittest.TestCase): + """Test factory creates proper port object for the target. + + Target is specified by port_name, sys.platform and options. + + """ + # FIXME: The ports themselves should expose what options they require, + # instead of passing generic "options". + + def setUp(self): + self.real_sys_platform = sys.platform + self.webkit_options = mocktool.MockOptions(pixel_tests=False) + self.chromium_options = mocktool.MockOptions(pixel_tests=False, + chromium=True) + + def tearDown(self): + sys.platform = self.real_sys_platform + + def assert_port(self, port_name, expected_port, port_obj=None): + """Helper assert for port_name. + + Args: + port_name: port name to get port object. + expected_port: class of expected port object. + port_obj: optional port object + """ + port_obj = port_obj or factory.get(port_name=port_name) + self.assertTrue(isinstance(port_obj, expected_port)) + + def assert_platform_port(self, platform, options, expected_port): + """Helper assert for platform and options. + + Args: + platform: sys.platform. + options: options to get port object. + expected_port: class of expected port object. + + """ + orig_platform = sys.platform + sys.platform = platform + self.assertTrue(isinstance(factory.get(options=options), + expected_port)) + sys.platform = orig_platform + + def test_test(self): + self.assert_port("test", test.TestPort) + + def test_dryrun(self): + self.assert_port("dryrun-test", dryrun.DryRunPort) + self.assert_port("dryrun-mac", dryrun.DryRunPort) + + def test_mac(self): + self.assert_port("mac", mac.MacPort) + self.assert_platform_port("darwin", None, mac.MacPort) + self.assert_platform_port("darwin", self.webkit_options, mac.MacPort) + + def test_win(self): + self.assert_port("win", win.WinPort) + self.assert_platform_port("win32", None, win.WinPort) + self.assert_platform_port("win32", self.webkit_options, win.WinPort) + self.assert_platform_port("cygwin", None, win.WinPort) + self.assert_platform_port("cygwin", self.webkit_options, win.WinPort) + + def test_google_chrome(self): + # The actual Chrome class names aren't available so we test that the + # objects we get are at least subclasses of the Chromium versions. + self.assert_port("google-chrome-linux32", + chromium_linux.ChromiumLinuxPort) + self.assert_port("google-chrome-linux64", + chromium_linux.ChromiumLinuxPort) + self.assert_port("google-chrome-win", + chromium_win.ChromiumWinPort) + self.assert_port("google-chrome-mac", + chromium_mac.ChromiumMacPort) + + def test_gtk(self): + self.assert_port("gtk", gtk.GtkPort) + + def test_qt(self): + self.assert_port("qt", qt.QtPort) + + def test_chromium_gpu_linux(self): + self.assert_port("chromium-gpu-linux", chromium_gpu.ChromiumGpuLinuxPort) + + def test_chromium_gpu_mac(self): + self.assert_port("chromium-gpu-mac", chromium_gpu.ChromiumGpuMacPort) + + def test_chromium_gpu_win(self): + self.assert_port("chromium-gpu-win", chromium_gpu.ChromiumGpuWinPort) + + def test_chromium_mac(self): + self.assert_port("chromium-mac", chromium_mac.ChromiumMacPort) + self.assert_platform_port("darwin", self.chromium_options, + chromium_mac.ChromiumMacPort) + + def test_chromium_linux(self): + self.assert_port("chromium-linux", chromium_linux.ChromiumLinuxPort) + self.assert_platform_port("linux2", self.chromium_options, + chromium_linux.ChromiumLinuxPort) + + def test_chromium_win(self): + self.assert_port("chromium-win", chromium_win.ChromiumWinPort) + self.assert_platform_port("win32", self.chromium_options, + chromium_win.ChromiumWinPort) + self.assert_platform_port("cygwin", self.chromium_options, + chromium_win.ChromiumWinPort) + + def test_get_all_ports(self): + ports = factory.get_all() + for name in factory.ALL_PORT_NAMES: + self.assertTrue(name in ports.keys()) + self.assert_port("test", test.TestPort, ports["test"]) + self.assert_port("dryrun-test", dryrun.DryRunPort, ports["dryrun"]) + self.assert_port("dryrun-mac", dryrun.DryRunPort, ports["dryrun"]) + self.assert_port("mac", mac.MacPort, ports["mac"]) + self.assert_port("win", win.WinPort, ports["win"]) + self.assert_port("gtk", gtk.GtkPort, ports["gtk"]) + self.assert_port("qt", qt.QtPort, ports["qt"]) + self.assert_port("chromium-mac", chromium_mac.ChromiumMacPort, + ports["chromium-mac"]) + self.assert_port("chromium-linux", chromium_linux.ChromiumLinuxPort, + ports["chromium-linux"]) + self.assert_port("chromium-win", chromium_win.ChromiumWinPort, + ports["chromium-win"]) + + def test_unknown_specified(self): + # Test what happens when you specify an unknown port. + orig_platform = sys.platform + self.assertRaises(NotImplementedError, factory.get, + port_name='unknown') + + def test_unknown_default(self): + # Test what happens when you're running on an unknown platform. + orig_platform = sys.platform + sys.platform = 'unknown' + self.assertRaises(NotImplementedError, factory.get) + sys.platform = orig_platform + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/google_chrome.py b/Tools/Scripts/webkitpy/layout_tests/port/google_chrome.py new file mode 100644 index 0000000..8d94bb5 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/google_chrome.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import codecs +import os + + +def _test_expectations_overrides(port, super): + # The chrome ports use the regular overrides plus anything in the + # official test_expectations as well. Hopefully we don't get collisions. + chromium_overrides = super.test_expectations_overrides(port) + + # FIXME: It used to be that AssertionError would get raised by + # path_from_chromium_base() if we weren't in a Chromium checkout, but + # this changed in r60427. This should probably be changed back. + overrides_path = port.path_from_chromium_base('webkit', 'tools', + 'layout_tests', 'test_expectations_chrome.txt') + if not os.path.exists(overrides_path): + return chromium_overrides + + with codecs.open(overrides_path, "r", "utf-8") as file: + if chromium_overrides: + return chromium_overrides + file.read() + else: + return file.read() + +def GetGoogleChromePort(**kwargs): + """Some tests have slightly different results when compiled as Google + Chrome vs Chromium. In those cases, we prepend an additional directory to + to the baseline paths.""" + port_name = kwargs['port_name'] + del kwargs['port_name'] + if port_name == 'google-chrome-linux32': + import chromium_linux + + class GoogleChromeLinux32Port(chromium_linux.ChromiumLinuxPort): + def baseline_search_path(self): + paths = chromium_linux.ChromiumLinuxPort.baseline_search_path( + self) + paths.insert(0, self._webkit_baseline_path( + 'google-chrome-linux32')) + return paths + + def test_expectations_overrides(self): + return _test_expectations_overrides(self, + chromium_linux.ChromiumLinuxPort) + + return GoogleChromeLinux32Port(**kwargs) + elif port_name == 'google-chrome-linux64': + import chromium_linux + + class GoogleChromeLinux64Port(chromium_linux.ChromiumLinuxPort): + def baseline_search_path(self): + paths = chromium_linux.ChromiumLinuxPort.baseline_search_path( + self) + paths.insert(0, self._webkit_baseline_path( + 'google-chrome-linux64')) + return paths + + def test_expectations_overrides(self): + return _test_expectations_overrides(self, + chromium_linux.ChromiumLinuxPort) + + return GoogleChromeLinux64Port(**kwargs) + elif port_name.startswith('google-chrome-mac'): + import chromium_mac + + class GoogleChromeMacPort(chromium_mac.ChromiumMacPort): + def baseline_search_path(self): + paths = chromium_mac.ChromiumMacPort.baseline_search_path( + self) + paths.insert(0, self._webkit_baseline_path( + 'google-chrome-mac')) + return paths + + def test_expectations_overrides(self): + return _test_expectations_overrides(self, + chromium_mac.ChromiumMacPort) + + return GoogleChromeMacPort(**kwargs) + elif port_name.startswith('google-chrome-win'): + import chromium_win + + class GoogleChromeWinPort(chromium_win.ChromiumWinPort): + def baseline_search_path(self): + paths = chromium_win.ChromiumWinPort.baseline_search_path( + self) + paths.insert(0, self._webkit_baseline_path( + 'google-chrome-win')) + return paths + + def test_expectations_overrides(self): + return _test_expectations_overrides(self, + chromium_win.ChromiumWinPort) + + return GoogleChromeWinPort(**kwargs) + raise NotImplementedError('unsupported port: %s' % port_name) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/google_chrome_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/google_chrome_unittest.py new file mode 100644 index 0000000..e60c274 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/google_chrome_unittest.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import codecs +import os +import unittest + +from webkitpy.common import newstringio + +import factory +import google_chrome + + +class GetGoogleChromePortTest(unittest.TestCase): + def test_get_google_chrome_port(self): + test_ports = ('google-chrome-linux32', 'google-chrome-linux64', + 'google-chrome-mac', 'google-chrome-win') + for port in test_ports: + self._verify_baseline_path(port, port) + self._verify_expectations_overrides(port) + + self._verify_baseline_path('google-chrome-mac', 'google-chrome-mac-leopard') + self._verify_baseline_path('google-chrome-win', 'google-chrome-win-xp') + self._verify_baseline_path('google-chrome-win', 'google-chrome-win-vista') + + def _verify_baseline_path(self, expected_path, port_name): + port = google_chrome.GetGoogleChromePort(port_name=port_name, + options=None) + path = port.baseline_search_path()[0] + self.assertEqual(expected_path, os.path.split(path)[1]) + + def _verify_expectations_overrides(self, port_name): + # FIXME: make this more robust when we have the Tree() abstraction. + # we should be able to test for the files existing or not, and + # be able to control the contents better. + + chromium_port = factory.get("chromium-mac") + chromium_overrides = chromium_port.test_expectations_overrides() + port = google_chrome.GetGoogleChromePort(port_name=port_name, + options=None) + + orig_exists = os.path.exists + orig_open = codecs.open + expected_string = "// hello, world\n" + + def mock_exists_chrome_not_found(path): + if 'test_expectations_chrome.txt' in path: + return False + return orig_exists(path) + + def mock_exists_chrome_found(path): + if 'test_expectations_chrome.txt' in path: + return True + return orig_exists(path) + + def mock_open(path, mode, encoding): + if 'test_expectations_chrome.txt' in path: + return newstringio.StringIO(expected_string) + return orig_open(path, mode, encoding) + + try: + os.path.exists = mock_exists_chrome_not_found + chrome_overrides = port.test_expectations_overrides() + self.assertEqual(chromium_overrides, chrome_overrides) + + os.path.exists = mock_exists_chrome_found + codecs.open = mock_open + chrome_overrides = port.test_expectations_overrides() + if chromium_overrides: + self.assertEqual(chrome_overrides, + chromium_overrides + expected_string) + else: + self.assertEqual(chrome_overrides, expected_string) + finally: + os.path.exists = orig_exists + codecs.open = orig_open + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py new file mode 100644 index 0000000..a18fdff --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py @@ -0,0 +1,116 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Google name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""WebKit Gtk implementation of the Port interface.""" + +import logging +import os +import signal + +from webkitpy.layout_tests.port.webkit import WebKitPort + +_log = logging.getLogger("webkitpy.layout_tests.port.gtk") + + +class GtkPort(WebKitPort): + """WebKit Gtk implementation of the Port class.""" + + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'gtk') + WebKitPort.__init__(self, **kwargs) + + def _tests_for_other_platforms(self): + # FIXME: This list could be dynamic based on platform name and + # pushed into base.Port. + # This really need to be automated. + return [ + "platform/chromium", + "platform/win", + "platform/qt", + "platform/mac", + ] + + def _path_to_apache_config_file(self): + # FIXME: This needs to detect the distribution and change config files. + return os.path.join(self.layout_tests_dir(), 'http', 'conf', + 'apache2-debian-httpd.conf') + + def _shut_down_http_server(self, server_pid): + """Shut down the httpd web server. Blocks until it's fully + shut down. + + Args: + server_pid: The process ID of the running server. + """ + # server_pid is not set when "http_server.py stop" is run manually. + if server_pid is None: + # FIXME: This isn't ideal, since it could conflict with + # lighttpd processes not started by http_server.py, + # but good enough for now. + self._executive.kill_all('apache2') + else: + try: + os.kill(server_pid, signal.SIGTERM) + # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? + except OSError: + # Sometimes we get a bad PID (e.g. from a stale httpd.pid + # file), so if kill fails on the given PID, just try to + # 'killall' web servers. + self._shut_down_http_server(None) + + def _path_to_driver(self): + return self._build_path('Programs', 'DumpRenderTree') + + def check_build(self, needs_http): + if not self._check_driver(): + return False + return True + + def _path_to_apache(self): + if self._is_redhat_based(): + return '/usr/sbin/httpd' + else: + return '/usr/sbin/apache2' + + def _path_to_apache_config_file(self): + if self._is_redhat_based(): + config_name = 'fedora-httpd.conf' + else: + config_name = 'apache2-debian-httpd.conf' + + return os.path.join(self.layout_tests_dir(), 'http', 'conf', + config_name) + + def _path_to_wdiff(self): + if self._is_redhat_based(): + return '/usr/bin/dwdiff' + else: + return '/usr/bin/wdiff' + + def _is_redhat_based(self): + return os.path.exists(os.path.join('/etc', 'redhat-release')) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py b/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py new file mode 100644 index 0000000..f5946b6 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""This class helps to block NRWT threads when more NRWTs run +http and websocket tests in a same time.""" + +import glob +import logging +import os +import sys +import tempfile +import time + +from webkitpy.common.system.executive import Executive +from webkitpy.common.system.file_lock import FileLock +from webkitpy.common.system.filesystem import FileSystem + + +_log = logging.getLogger("webkitpy.layout_tests.port.http_lock") + + +class HttpLock(object): + + def __init__(self, lock_path, lock_file_prefix="WebKitHttpd.lock.", + guard_lock="WebKit.lock"): + self._lock_path = lock_path + if not self._lock_path: + self._lock_path = tempfile.gettempdir() + self._lock_file_prefix = lock_file_prefix + self._lock_file_path_prefix = os.path.join(self._lock_path, + self._lock_file_prefix) + self._guard_lock_file = os.path.join(self._lock_path, guard_lock) + self._guard_lock = FileLock(self._guard_lock_file) + self._process_lock_file_name = "" + self._executive = Executive() + + def cleanup_http_lock(self): + """Delete the lock file if exists.""" + if os.path.exists(self._process_lock_file_name): + _log.debug("Removing lock file: %s" % self._process_lock_file_name) + FileSystem().remove(self._process_lock_file_name) + + def _extract_lock_number(self, lock_file_name): + """Return the lock number from lock file.""" + prefix_length = len(self._lock_file_path_prefix) + return int(lock_file_name[prefix_length:]) + + def _lock_file_list(self): + """Return the list of lock files sequentially.""" + lock_list = glob.glob(self._lock_file_path_prefix + '*') + lock_list.sort(key=self._extract_lock_number) + return lock_list + + def _next_lock_number(self): + """Return the next available lock number.""" + lock_list = self._lock_file_list() + if not lock_list: + return 0 + return self._extract_lock_number(lock_list[-1]) + 1 + + def _curent_lock_pid(self): + """Return with the current lock pid. If the lock is not valid + it deletes the lock file.""" + lock_list = self._lock_file_list() + if not lock_list: + return + try: + current_lock_file = open(lock_list[0], 'r') + current_pid = current_lock_file.readline() + current_lock_file.close() + if not (current_pid and self._executive.check_running_pid(int(current_pid))): + _log.debug("Removing stuck lock file: %s" % lock_list[0]) + FileSystem().remove(lock_list[0]) + return + except (IOError, OSError): + return + return int(current_pid) + + def _create_lock_file(self): + """The lock files are used to schedule the running test sessions in first + come first served order. The guard lock ensures that the lock numbers are + sequential.""" + if not os.path.exists(self._lock_path): + _log.debug("Lock directory does not exist: %s" % self._lock_path) + return False + + if not self._guard_lock.acquire_lock(): + _log.debug("Guard lock timed out!") + return False + + self._process_lock_file_name = (self._lock_file_path_prefix + + str(self._next_lock_number())) + _log.debug("Creating lock file: %s" % self._process_lock_file_name) + lock_file = open(self._process_lock_file_name, 'w') + lock_file.write(str(os.getpid())) + lock_file.close() + self._guard_lock.release_lock() + return True + + + def wait_for_httpd_lock(self): + """Create a lock file and wait until it's turn comes. If something goes wrong + it wont do any locking.""" + if not self._create_lock_file(): + _log.debug("Warning, http locking failed!") + return + + while self._curent_lock_pid() != os.getpid(): + time.sleep(1) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py new file mode 100644 index 0000000..85c760a --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import glob +import http_lock +import os +import unittest + + +class HttpLockTest(unittest.TestCase): + + def __init__(self, testFunc): + self.http_lock_obj = http_lock.HttpLock(None, "WebKitTestHttpd.lock.", "WebKitTest.lock") + self.lock_file_path_prefix = os.path.join(self.http_lock_obj._lock_path, + self.http_lock_obj._lock_file_prefix) + self.lock_file_name = self.lock_file_path_prefix + "0" + self.guard_lock_file = self.http_lock_obj._guard_lock_file + self.clean_all_lockfile() + unittest.TestCase.__init__(self, testFunc) + + def clean_all_lockfile(self): + if os.path.exists(self.guard_lock_file): + os.unlink(self.guard_lock_file) + lock_list = glob.glob(self.lock_file_path_prefix + '*') + for file_name in lock_list: + os.unlink(file_name) + + def assertEqual(self, first, second): + if first != second: + self.clean_all_lockfile() + unittest.TestCase.assertEqual(self, first, second) + + def _check_lock_file(self): + if os.path.exists(self.lock_file_name): + pid = os.getpid() + lock_file = open(self.lock_file_name, 'r') + lock_file_pid = lock_file.readline() + lock_file.close() + self.assertEqual(pid, int(lock_file_pid)) + return True + return False + + def test_lock_lifecycle(self): + self.http_lock_obj._create_lock_file() + + self.assertEqual(True, self._check_lock_file()) + self.assertEqual(1, self.http_lock_obj._next_lock_number()) + + self.http_lock_obj.cleanup_http_lock() + + self.assertEqual(False, self._check_lock_file()) + self.assertEqual(0, self.http_lock_obj._next_lock_number()) + + def test_extract_lock_number(self,): + lock_file_list = ( + self.lock_file_path_prefix + "00", + self.lock_file_path_prefix + "9", + self.lock_file_path_prefix + "001", + self.lock_file_path_prefix + "021", + ) + + expected_number_list = (0, 9, 1, 21) + + for lock_file, expected in zip(lock_file_list, expected_number_list): + self.assertEqual(self.http_lock_obj._extract_lock_number(lock_file), expected) + + def test_lock_file_list(self): + lock_file_list = [ + self.lock_file_path_prefix + "6", + self.lock_file_path_prefix + "1", + self.lock_file_path_prefix + "4", + self.lock_file_path_prefix + "3", + ] + + expected_file_list = [ + self.lock_file_path_prefix + "1", + self.lock_file_path_prefix + "3", + self.lock_file_path_prefix + "4", + self.lock_file_path_prefix + "6", + ] + + for file_name in lock_file_list: + open(file_name, 'w') + + self.assertEqual(self.http_lock_obj._lock_file_list(), expected_file_list) + + for file_name in lock_file_list: + os.unlink(file_name) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_server.py b/Tools/Scripts/webkitpy/layout_tests/port/http_server.py new file mode 100755 index 0000000..bd75e27 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/http_server.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A class to help start/stop the lighttpd server used by layout tests.""" + +from __future__ import with_statement + +import codecs +import logging +import optparse +import os +import shutil +import subprocess +import sys +import tempfile +import time +import urllib + +import factory +import http_server_base + +_log = logging.getLogger("webkitpy.layout_tests.port.http_server") + + +class HttpdNotStarted(Exception): + pass + + +class Lighttpd(http_server_base.HttpServerBase): + + def __init__(self, port_obj, output_dir, background=False, port=None, + root=None, run_background=None): + """Args: + output_dir: the absolute path to the layout test result directory + """ + # Webkit tests + http_server_base.HttpServerBase.__init__(self, port_obj) + self._output_dir = output_dir + self._process = None + self._port = port + self._root = root + self._run_background = run_background + if self._port: + self._port = int(self._port) + + try: + self._webkit_tests = os.path.join( + self._port_obj.layout_tests_dir(), 'http', 'tests') + self._js_test_resource = os.path.join( + self._port_obj.layout_tests_dir(), 'fast', 'js', 'resources') + except: + self._webkit_tests = None + self._js_test_resource = None + + # Self generated certificate for SSL server (for client cert get + # <base-path>\chrome\test\data\ssl\certs\root_ca_cert.crt) + self._pem_file = os.path.join( + os.path.dirname(os.path.abspath(__file__)), 'httpd2.pem') + + # One mapping where we can get to everything + self.VIRTUALCONFIG = [] + + if self._webkit_tests: + self.VIRTUALCONFIG.extend( + # Three mappings (one with SSL) for LayoutTests http tests + [{'port': 8000, 'docroot': self._webkit_tests}, + {'port': 8080, 'docroot': self._webkit_tests}, + {'port': 8443, 'docroot': self._webkit_tests, + 'sslcert': self._pem_file}]) + + def is_running(self): + return self._process != None + + def start(self): + if self.is_running(): + raise 'Lighttpd already running' + + base_conf_file = self._port_obj.path_from_webkit_base('Tools', + 'Scripts', 'webkitpy', 'layout_tests', 'port', 'lighttpd.conf') + out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf') + time_str = time.strftime("%d%b%Y-%H%M%S") + access_file_name = "access.log-" + time_str + ".txt" + access_log = os.path.join(self._output_dir, access_file_name) + log_file_name = "error.log-" + time_str + ".txt" + error_log = os.path.join(self._output_dir, log_file_name) + + # Remove old log files. We only need to keep the last ones. + self.remove_log_files(self._output_dir, "access.log-") + self.remove_log_files(self._output_dir, "error.log-") + + # Write out the config + with codecs.open(base_conf_file, "r", "utf-8") as file: + base_conf = file.read() + + # FIXME: This should be re-worked so that this block can + # use with open() instead of a manual file.close() call. + # lighttpd.conf files seem to be UTF-8 without BOM: + # http://redmine.lighttpd.net/issues/992 + f = codecs.open(out_conf_file, "w", "utf-8") + f.write(base_conf) + + # Write out our cgi handlers. Run perl through env so that it + # processes the #! line and runs perl with the proper command + # line arguments. Emulate apache's mod_asis with a cat cgi handler. + f.write(('cgi.assign = ( ".cgi" => "/usr/bin/env",\n' + ' ".pl" => "/usr/bin/env",\n' + ' ".asis" => "/bin/cat",\n' + ' ".php" => "%s" )\n\n') % + self._port_obj._path_to_lighttpd_php()) + + # Setup log files + f.write(('server.errorlog = "%s"\n' + 'accesslog.filename = "%s"\n\n') % (error_log, access_log)) + + # Setup upload folders. Upload folder is to hold temporary upload files + # and also POST data. This is used to support XHR layout tests that + # does POST. + f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir)) + + # Setup a link to where the js test templates are stored + f.write(('alias.url = ( "/js-test-resources" => "%s" )\n\n') % + (self._js_test_resource)) + + # dump out of virtual host config at the bottom. + if self._root: + if self._port: + # Have both port and root dir. + mappings = [{'port': self._port, 'docroot': self._root}] + else: + # Have only a root dir - set the ports as for LayoutTests. + # This is used in ui_tests to run http tests against a browser. + + # default set of ports as for LayoutTests but with a + # specified root. + mappings = [{'port': 8000, 'docroot': self._root}, + {'port': 8080, 'docroot': self._root}, + {'port': 8443, 'docroot': self._root, + 'sslcert': self._pem_file}] + else: + mappings = self.VIRTUALCONFIG + for mapping in mappings: + ssl_setup = '' + if 'sslcert' in mapping: + ssl_setup = (' ssl.engine = "enable"\n' + ' ssl.pemfile = "%s"\n' % mapping['sslcert']) + + f.write(('$SERVER["socket"] == "127.0.0.1:%d" {\n' + ' server.document-root = "%s"\n' + + ssl_setup + + '}\n\n') % (mapping['port'], mapping['docroot'])) + f.close() + + executable = self._port_obj._path_to_lighttpd() + module_path = self._port_obj._path_to_lighttpd_modules() + start_cmd = [executable, + # Newly written config file + '-f', os.path.join(self._output_dir, 'lighttpd.conf'), + # Where it can find its module dynamic libraries + '-m', module_path] + + if not self._run_background: + start_cmd.append(# Don't background + '-D') + + # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the + # bug that mod_alias.so loads it from the hard coded path. + if sys.platform == 'darwin': + tmp_module_path = '/tmp/lighttpd/lib' + if not os.path.exists(tmp_module_path): + os.makedirs(tmp_module_path) + lib_file = 'liblightcomp.dylib' + shutil.copyfile(os.path.join(module_path, lib_file), + os.path.join(tmp_module_path, lib_file)) + + env = self._port_obj.setup_environ_for_server() + _log.debug('Starting http server') + # FIXME: Should use Executive.run_command + self._process = subprocess.Popen(start_cmd, env=env) + + # Wait for server to start. + self.mappings = mappings + server_started = self.wait_for_action( + self.is_server_running_on_all_ports) + + # Our process terminated already + if not server_started or self._process.returncode != None: + raise google.httpd_utils.HttpdNotStarted('Failed to start httpd.') + + _log.debug("Server successfully started") + + # TODO(deanm): Find a nicer way to shutdown cleanly. Our log files are + # probably not being flushed, etc... why doesn't our python have os.kill ? + + def stop(self, force=False): + if not force and not self.is_running(): + return + + httpd_pid = None + if self._process: + httpd_pid = self._process.pid + self._port_obj._shut_down_http_server(httpd_pid) + + if self._process: + # wait() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + self._process.wait() + self._process = None diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_server_base.py b/Tools/Scripts/webkitpy/layout_tests/port/http_server_base.py new file mode 100644 index 0000000..52a0403 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/http_server_base.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Base class with common routines between the Apache and Lighttpd servers.""" + +import logging +import os +import time +import urllib + +from webkitpy.common.system import filesystem + +_log = logging.getLogger("webkitpy.layout_tests.port.http_server_base") + + +class HttpServerBase(object): + + def __init__(self, port_obj): + self._port_obj = port_obj + + def wait_for_action(self, action): + """Repeat the action for 20 seconds or until it succeeds. Returns + whether it succeeded.""" + start_time = time.time() + while time.time() - start_time < 20: + if action(): + return True + _log.debug("Waiting for action: %s" % action) + time.sleep(1) + + return False + + def is_server_running_on_all_ports(self): + """Returns whether the server is running on all the desired ports.""" + for mapping in self.mappings: + if 'sslcert' in mapping: + http_suffix = 's' + else: + http_suffix = '' + + url = 'http%s://127.0.0.1:%d/' % (http_suffix, mapping['port']) + + try: + response = urllib.urlopen(url) + _log.debug("Server running at %s" % url) + except IOError, e: + _log.debug("Server NOT running at %s: %s" % (url, e)) + return False + + return True + + def remove_log_files(self, folder, starts_with): + files = os.listdir(folder) + for file in files: + if file.startswith(starts_with): + full_path = os.path.join(folder, file) + filesystem.FileSystem().remove(full_path) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/httpd2.pem b/Tools/Scripts/webkitpy/layout_tests/port/httpd2.pem new file mode 100644 index 0000000..6349b78 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/httpd2.pem @@ -0,0 +1,41 @@ +-----BEGIN CERTIFICATE----- +MIIEZDCCAkygAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMRAwDgYDVQQDEwdUZXN0 +IENBMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMN +TW91bnRhaW4gVmlldzESMBAGA1UEChMJQ2VydCBUZXN0MB4XDTA4MDcyODIyMzIy +OFoXDTEzMDcyNzIyMzIyOFowSjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm +b3JuaWExEjAQBgNVBAoTCUNlcnQgVGVzdDESMBAGA1UEAxMJMTI3LjAuMC4xMIGf +MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU3 +3BdRCd67DFM44GRrsjDSH4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYG +qgNiV2ywxTlMj7NlN2C7SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a +3JPJe1TaIab5GwIDAQABo4HCMIG/MAkGA1UdEwQCMAAwHQYDVR0OBBYEFCYLBv5K +x5sLNVlpLh5FwTwhdDl7MIGSBgNVHSMEgYowgYeAFF3Of5nj1BlBMU/Gz7El9Vqv +45cxoWSkYjBgMRAwDgYDVQQDEwdUZXN0IENBMQswCQYDVQQGEwJVUzETMBEGA1UE +CBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzESMBAGA1UEChMJ +Q2VydCBUZXN0ggkA1FGT1D/e2U4wDQYJKoZIhvcNAQEFBQADggIBAEtkVmLObUgk +b2cIA2S+QDtifq1UgVfBbytvR2lFmnADOR55mo0gHQG3HHqq4g034LmoVXDHhUk8 +Gb6aFiv4QubmVhLXcUelTRXwiNvGzkW7pC6Jrq105hdPjzXMKTcmiLaopm5Fqfc7 +hj5Cn1Sjspc8pdeQjrbeMdvca7KlFrGP8YkwCU2xOOX9PiN9G0966BWfjnr/fZZp ++OQVuUFHdiAZwthEMuDpAAXHqYXIsermgdOpgJaA53cf8NqBV2QGhtFgtsJCRoiu +7DKqhyRWBGyz19VIH2b7y+6qvQVxuHk19kKRM0nftw/yNcJnm7gtttespMUPsOMa +a2SD1G0hm0TND6vxaBhgR3cVqpl/qIpAdFi00Tm7hTyYE7I43zPW03t+/DpCt3Um +EMRZsQ90co5q+bcx/vQ7YAtwUh30uMb0wpibeyCwDp8cqNmSiRkEuc/FjTYes5t8 +5gR//WX1l0+qjrjusO9NmoLnq2Yk6UcioX+z+q6Z/dudGfqhLfeWD2Q0LWYA242C +d7km5Y3KAt1PJdVsof/aiVhVdddY/OIEKTRQhWEdDbosy2eh16BCKXT2FFvhNDg1 +AYFvn6I8nj9IldMJiIc3DdhacEAEzRMeRgPdzAa1griKUGknxsyTyRii8ru0WS6w +DCNrlDOVXdzYGEZooBI76BDVY0W0akjV +-----END CERTIFICATE----- +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU33BdRCd67DFM44GRrsjDS +H4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYGqgNiV2ywxTlMj7NlN2C7 +SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a3JPJe1TaIab5GwIDAQAB +AoGANHXu8z2YIzlhE+bwhGm8MGBpKL3qhRuKjeriqMA36tWezOw8lY4ymEAU+Ulv +BsCdaxqydQoTYou57m4TyUHEcxq9pq3H0zB0qL709DdHi/t4zbV9XIoAzC5v0/hG +9+Ca29TwC02FCw+qLkNrtwCpwOcQmc+bPxqvFu1iMiahURECQQD2I/Hi2413CMZz +TBjl8fMiVO9GhA2J0sc8Qi+YcgJakaLD9xcbaiLkTzPZDlA389C1b6Ia+poAr4YA +Ve0FFbxpAkEA2OobayyHE/QtPEqoy6NLR57jirmVBNmSWWd4lAyL5UIHIYVttJZg +8CLvbzaU/iDGwR+wKsM664rKPHEmtlyo4wJBAMeSqYO5ZOCJGu9NWjrHjM3fdAsG +8zs2zhiLya+fcU0iHIksBW5TBmt71Jw/wMc9R5J1K0kYvFml98653O5si1ECQBCk +RV4/mE1rmlzZzYFyEcB47DQkcM5ictvxGEsje0gnfKyRtAz6zI0f4QbDRUMJ+LWw +XK+rMsYHa+SfOb0b9skCQQCLdeonsIpFDv/Uv+flHISy0WA+AFkLXrRkBKh6G/OD +dMHaNevkJgUnpceVEnkrdenp5CcEoFTI17pd+nBgDm/B +-----END RSA PRIVATE KEY----- diff --git a/Tools/Scripts/webkitpy/layout_tests/port/lighttpd.conf b/Tools/Scripts/webkitpy/layout_tests/port/lighttpd.conf new file mode 100644 index 0000000..26ca22f --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/lighttpd.conf @@ -0,0 +1,90 @@ +server.tag = "LightTPD/1.4.19 (Win32)" +server.modules = ( "mod_accesslog", + "mod_alias", + "mod_cgi", + "mod_rewrite" ) + +# default document root required +server.document-root = "." + +# files to check for if .../ is requested +index-file.names = ( "index.php", "index.pl", "index.cgi", + "index.html", "index.htm", "default.htm" ) +# mimetype mapping +mimetype.assign = ( + ".gif" => "image/gif", + ".jpg" => "image/jpeg", + ".jpeg" => "image/jpeg", + ".png" => "image/png", + ".svg" => "image/svg+xml", + ".css" => "text/css", + ".html" => "text/html", + ".htm" => "text/html", + ".xhtml" => "application/xhtml+xml", + ".xhtmlmp" => "application/vnd.wap.xhtml+xml", + ".js" => "application/x-javascript", + ".log" => "text/plain", + ".conf" => "text/plain", + ".text" => "text/plain", + ".txt" => "text/plain", + ".dtd" => "text/xml", + ".xml" => "text/xml", + ".manifest" => "text/cache-manifest", + ) + +# Use the "Content-Type" extended attribute to obtain mime type if possible +mimetype.use-xattr = "enable" + +## +# which extensions should not be handle via static-file transfer +# +# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi +static-file.exclude-extensions = ( ".php", ".pl", ".cgi" ) + +server.bind = "localhost" +server.port = 8001 + +## virtual directory listings +dir-listing.activate = "enable" +#dir-listing.encoding = "iso-8859-2" +#dir-listing.external-css = "style/oldstyle.css" + +## enable debugging +#debug.log-request-header = "enable" +#debug.log-response-header = "enable" +#debug.log-request-handling = "enable" +#debug.log-file-not-found = "enable" + +#### SSL engine +#ssl.engine = "enable" +#ssl.pemfile = "server.pem" + +# Rewrite rule for utf-8 path test (LayoutTests/http/tests/uri/utf8-path.html) +# See the apache rewrite rule at LayoutTests/http/tests/uri/intercept/.htaccess +# Rewrite rule for LayoutTests/http/tests/appcache/cyrillic-uri.html. +# See the apache rewrite rule at +# LayoutTests/http/tests/appcache/resources/intercept/.htaccess +url.rewrite-once = ( + "^/uri/intercept/(.*)" => "/uri/resources/print-uri.php", + "^/appcache/resources/intercept/(.*)" => "/appcache/resources/print-uri.php" +) + +# LayoutTests/http/tests/xmlhttprequest/response-encoding.html uses an htaccess +# to override charset for reply2.txt, reply2.xml, and reply4.txt. +$HTTP["url"] =~ "^/xmlhttprequest/resources/reply2.(txt|xml)" { + mimetype.assign = ( + ".txt" => "text/plain; charset=windows-1251", + ".xml" => "text/xml; charset=windows-1251" + ) +} +$HTTP["url"] =~ "^/xmlhttprequest/resources/reply4.txt" { + mimetype.assign = ( ".txt" => "text/plain; charset=koi8-r" ) +} + +# LayoutTests/http/tests/appcache/wrong-content-type.html uses an htaccess +# to override mime type for wrong-content-type.manifest. +$HTTP["url"] =~ "^/appcache/resources/wrong-content-type.manifest" { + mimetype.assign = ( ".manifest" => "text/plain" ) +} + +# Autogenerated test-specific config follows. diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac.py b/Tools/Scripts/webkitpy/layout_tests/port/mac.py new file mode 100644 index 0000000..696e339 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/mac.py @@ -0,0 +1,152 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Google name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""WebKit Mac implementation of the Port interface.""" + +import logging +import os +import platform +import signal + +import webkitpy.common.system.ospath as ospath +import webkitpy.layout_tests.port.server_process as server_process +from webkitpy.layout_tests.port.webkit import WebKitPort, WebKitDriver + +_log = logging.getLogger("webkitpy.layout_tests.port.mac") + + +class MacPort(WebKitPort): + """WebKit Mac implementation of the Port class.""" + + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'mac' + self.version()) + WebKitPort.__init__(self, **kwargs) + + def default_child_processes(self): + # FIXME: new-run-webkit-tests is unstable on Mac running more than + # four threads in parallel. + # See https://bugs.webkit.org/show_bug.cgi?id=36622 + child_processes = WebKitPort.default_child_processes(self) + if child_processes > 4: + return 4 + return child_processes + + def baseline_search_path(self): + port_names = [] + if self._name == 'mac-tiger': + port_names.append("mac-tiger") + if self._name in ('mac-tiger', 'mac-leopard'): + port_names.append("mac-leopard") + if self._name in ('mac-tiger', 'mac-leopard', 'mac-snowleopard'): + port_names.append("mac-snowleopard") + port_names.append("mac") + return map(self._webkit_baseline_path, port_names) + + def path_to_test_expectations_file(self): + return self.path_from_webkit_base('LayoutTests', 'platform', + 'mac', 'test_expectations.txt') + + def _skipped_file_paths(self): + # FIXME: This method will need to be made work for non-mac + # platforms and moved into base.Port. + skipped_files = [] + if self._name in ('mac-tiger', 'mac-leopard', 'mac-snowleopard'): + skipped_files.append(os.path.join( + self._webkit_baseline_path(self._name), 'Skipped')) + skipped_files.append(os.path.join(self._webkit_baseline_path('mac'), + 'Skipped')) + return skipped_files + + def test_platform_name(self): + return 'mac' + self.version() + + def version(self): + os_version_string = platform.mac_ver()[0] # e.g. "10.5.6" + if not os_version_string: + return '-leopard' + release_version = int(os_version_string.split('.')[1]) + if release_version == 4: + return '-tiger' + elif release_version == 5: + return '-leopard' + elif release_version == 6: + return '-snowleopard' + return '' + + def _build_java_test_support(self): + java_tests_path = os.path.join(self.layout_tests_dir(), "java") + build_java = ["/usr/bin/make", "-C", java_tests_path] + if self._executive.run_command(build_java, return_exit_code=True): + _log.error("Failed to build Java support files: %s" % build_java) + return False + return True + + def _check_port_build(self): + return self._build_java_test_support() + + def _tests_for_other_platforms(self): + # The original run-webkit-tests builds up a "whitelist" of tests to + # run, and passes that to DumpRenderTree. new-run-webkit-tests assumes + # we run *all* tests and test_expectations.txt functions as a + # blacklist. + # FIXME: This list could be dynamic based on platform name and + # pushed into base.Port. + return [ + "platform/chromium", + "platform/gtk", + "platform/qt", + "platform/win", + ] + + def _path_to_apache_config_file(self): + return os.path.join(self.layout_tests_dir(), 'http', 'conf', + 'apache2-httpd.conf') + + # FIXME: This doesn't have anything to do with WebKit. + def _shut_down_http_server(self, server_pid): + """Shut down the lighttpd web server. Blocks until it's fully + shut down. + + Args: + server_pid: The process ID of the running server. + """ + # server_pid is not set when "http_server.py stop" is run manually. + if server_pid is None: + # FIXME: This isn't ideal, since it could conflict with + # lighttpd processes not started by http_server.py, + # but good enough for now. + self._executive.kill_all('httpd') + else: + try: + os.kill(server_pid, signal.SIGTERM) + # FIXME: Maybe throw in a SIGKILL just to be sure? + except OSError: + # Sometimes we get a bad PID (e.g. from a stale httpd.pid + # file), so if kill fails on the given PID, just try to + # 'killall' web servers. + self._shut_down_http_server(None) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py new file mode 100644 index 0000000..d383a4c --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py @@ -0,0 +1,81 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import StringIO +import sys +import unittest + +import mac +import port_testcase + + +class MacTest(port_testcase.PortTestCase): + def make_port(self, options=port_testcase.mock_options): + if sys.platform != 'darwin': + return None + port_obj = mac.MacPort(options=options) + port_obj._options.results_directory = port_obj.results_directory() + port_obj._options.configuration = 'Release' + return port_obj + + def test_skipped_file_paths(self): + port = self.make_port() + if not port: + return + skipped_paths = port._skipped_file_paths() + # FIXME: _skipped_file_paths should return WebKit-relative paths. + # So to make it unit testable, we strip the WebKit directory from the path. + relative_paths = [path[len(port.path_from_webkit_base()):] for path in skipped_paths] + self.assertEqual(relative_paths, ['LayoutTests/platform/mac-leopard/Skipped', 'LayoutTests/platform/mac/Skipped']) + + example_skipped_file = u""" +# <rdar://problem/5647952> fast/events/mouseout-on-window.html needs mac DRT to issue mouse out events +fast/events/mouseout-on-window.html + +# <rdar://problem/5643675> window.scrollTo scrolls a window with no scrollbars +fast/events/attempt-scroll-with-no-scrollbars.html + +# see bug <rdar://problem/5646437> REGRESSION (r28015): svg/batik/text/smallFonts fails +svg/batik/text/smallFonts.svg +""" + example_skipped_tests = [ + "fast/events/mouseout-on-window.html", + "fast/events/attempt-scroll-with-no-scrollbars.html", + "svg/batik/text/smallFonts.svg", + ] + + def test_skipped_file_paths(self): + port = self.make_port() + if not port: + return + skipped_file = StringIO.StringIO(self.example_skipped_file) + self.assertEqual(port._tests_from_skipped_file(skipped_file), self.example_skipped_tests) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py new file mode 100644 index 0000000..c4b36ac --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py @@ -0,0 +1,97 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit testing base class for Port implementations.""" + +import os +import tempfile +import unittest + +from webkitpy.tool import mocktool +mock_options = mocktool.MockOptions(results_directory='layout-test-results', + use_apache=True, + configuration='Release') + +# FIXME: This should be used for all ports, not just WebKit Mac. See +# https://bugs.webkit.org/show_bug.cgi?id=50043 . + +class PortTestCase(unittest.TestCase): + """Tests the WebKit port implementation.""" + def make_port(self, options=mock_options): + """Override in subclass.""" + raise NotImplementedError() + + def test_driver_cmd_line(self): + port = self.make_port() + if not port: + return + self.assertTrue(len(port.driver_cmd_line())) + + def test_http_server(self): + port = self.make_port() + if not port: + return + port.start_http_server() + port.stop_http_server() + + def test_image_diff(self): + port = self.make_port() + if not port: + return + + # FIXME: not sure why this shouldn't always be True + #self.assertTrue(port.check_image_diff()) + if not port.check_image_diff(): + return + + dir = port.layout_tests_dir() + file1 = os.path.join(dir, 'fast', 'css', 'button_center.png') + fh1 = file(file1) + contents1 = fh1.read() + file2 = os.path.join(dir, 'fast', 'css', + 'remove-shorthand-expected.png') + fh2 = file(file2) + contents2 = fh2.read() + tmpfile = tempfile.mktemp() + + self.assertFalse(port.diff_image(contents1, contents1)) + self.assertTrue(port.diff_image(contents1, contents2)) + + self.assertTrue(port.diff_image(contents1, contents2, tmpfile)) + fh1.close() + fh2.close() + # FIXME: this may not be being written? + # self.assertTrue(os.path.exists(tmpfile)) + # os.remove(tmpfile) + + def test_websocket_server(self): + port = self.make_port() + if not port: + return + port.start_websocket_server() + port.stop_websocket_server() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/qt.py b/Tools/Scripts/webkitpy/layout_tests/port/qt.py new file mode 100644 index 0000000..af94acc --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/qt.py @@ -0,0 +1,119 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Google name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""QtWebKit implementation of the Port interface.""" + +import logging +import os +import signal +import sys + +import webkit + +from webkitpy.layout_tests.port.webkit import WebKitPort + +_log = logging.getLogger("webkitpy.layout_tests.port.qt") + + +class QtPort(WebKitPort): + """QtWebKit implementation of the Port class.""" + + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'qt') + WebKitPort.__init__(self, **kwargs) + + def baseline_search_path(self): + port_names = [] + if sys.platform == 'linux2': + port_names.append("qt-linux") + elif sys.platform in ('win32', 'cygwin'): + port_names.append("qt-win") + elif sys.platform == 'darwin': + port_names.append("qt-mac") + port_names.append("qt") + return map(self._webkit_baseline_path, port_names) + + def _tests_for_other_platforms(self): + # FIXME: This list could be dynamic based on platform name and + # pushed into base.Port. + # This really need to be automated. + return [ + "platform/chromium", + "platform/win", + "platform/gtk", + "platform/mac", + ] + + def _path_to_apache_config_file(self): + # FIXME: This needs to detect the distribution and change config files. + return os.path.join(self.layout_tests_dir(), 'http', 'conf', + 'apache2-debian-httpd.conf') + + def _shut_down_http_server(self, server_pid): + """Shut down the httpd web server. Blocks until it's fully + shut down. + + Args: + server_pid: The process ID of the running server. + """ + # server_pid is not set when "http_server.py stop" is run manually. + if server_pid is None: + # FIXME: This isn't ideal, since it could conflict with + # lighttpd processes not started by http_server.py, + # but good enough for now. + self._executive.kill_all('apache2') + else: + try: + os.kill(server_pid, signal.SIGTERM) + # TODO(mmoss) Maybe throw in a SIGKILL just to be sure? + except OSError: + # Sometimes we get a bad PID (e.g. from a stale httpd.pid + # file), so if kill fails on the given PID, just try to + # 'killall' web servers. + self._shut_down_http_server(None) + + def _build_driver(self): + # The Qt port builds DRT as part of the main build step + return True + + def _path_to_driver(self): + return self._build_path('bin/DumpRenderTree') + + def _path_to_image_diff(self): + return self._build_path('bin/ImageDiff') + + def _path_to_webcore_library(self): + return self._build_path('lib/libQtWebKit.so') + + def _runtime_feature_list(self): + return None + + def setup_environ_for_server(self): + env = webkit.WebKitPort.setup_environ_for_server(self) + env['QTWEBKIT_PLUGIN_PATH'] = self._build_path('lib/plugins') + return env diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py new file mode 100644 index 0000000..5a0a40c --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Google name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Package that implements the ServerProcess wrapper class""" + +import logging +import os +import select +import signal +import subprocess +import sys +import time +if sys.platform != 'win32': + import fcntl + +from webkitpy.common.system.executive import Executive + +_log = logging.getLogger("webkitpy.layout_tests.port.server_process") + + +class ServerProcess: + """This class provides a wrapper around a subprocess that + implements a simple request/response usage model. The primary benefit + is that reading responses takes a timeout, so that we don't ever block + indefinitely. The class also handles transparently restarting processes + as necessary to keep issuing commands.""" + + def __init__(self, port_obj, name, cmd, env=None, executive=Executive()): + self._port = port_obj + self._name = name + self._cmd = cmd + self._env = env + self._reset() + self._executive = executive + + def _reset(self): + self._proc = None + self._output = '' + self.crashed = False + self.timed_out = False + self.error = '' + + def _start(self): + if self._proc: + raise ValueError("%s already running" % self._name) + self._reset() + # close_fds is a workaround for http://bugs.python.org/issue2320 + close_fds = sys.platform not in ('win32', 'cygwin') + self._proc = subprocess.Popen(self._cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=close_fds, + env=self._env) + fd = self._proc.stdout.fileno() + fl = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) + fd = self._proc.stderr.fileno() + fl = fcntl.fcntl(fd, fcntl.F_GETFL) + fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) + + def handle_interrupt(self): + """This routine checks to see if the process crashed or exited + because of a keyboard interrupt and raises KeyboardInterrupt + accordingly.""" + if self.crashed: + # This is hex code 0xc000001d, which is used for abrupt + # termination. This happens if we hit ctrl+c from the prompt + # and we happen to be waiting on the DumpRenderTree. + # sdoyon: Not sure for which OS and in what circumstances the + # above code is valid. What works for me under Linux to detect + # ctrl+c is for the subprocess returncode to be negative + # SIGINT. And that agrees with the subprocess documentation. + if (-1073741510 == self._proc.returncode or + - signal.SIGINT == self._proc.returncode): + raise KeyboardInterrupt + return + + def poll(self): + """Check to see if the underlying process is running; returns None + if it still is (wrapper around subprocess.poll).""" + if self._proc: + # poll() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + return self._proc.poll() + return None + + def write(self, input): + """Write a request to the subprocess. The subprocess is (re-)start()'ed + if is not already running.""" + if not self._proc: + self._start() + self._proc.stdin.write(input) + + def read_line(self, timeout): + """Read a single line from the subprocess, waiting until the deadline. + If the deadline passes, the call times out. Note that even if the + subprocess has crashed or the deadline has passed, if there is output + pending, it will be returned. + + Args: + timeout: floating-point number of seconds the call is allowed + to block for. A zero or negative number will attempt to read + any existing data, but will not block. There is no way to + block indefinitely. + Returns: + output: data returned, if any. If no data is available and the + call times out or crashes, an empty string is returned. Note + that the returned string includes the newline ('\n').""" + return self._read(timeout, size=0) + + def read(self, timeout, size): + """Attempts to read size characters from the subprocess, waiting until + the deadline passes. If the deadline passes, any available data will be + returned. Note that even if the deadline has passed or if the + subprocess has crashed, any available data will still be returned. + + Args: + timeout: floating-point number of seconds the call is allowed + to block for. A zero or negative number will attempt to read + any existing data, but will not block. There is no way to + block indefinitely. + size: amount of data to read. Must be a postive integer. + Returns: + output: data returned, if any. If no data is available, an empty + string is returned. + """ + if size <= 0: + raise ValueError('ServerProcess.read() called with a ' + 'non-positive size: %d ' % size) + return self._read(timeout, size) + + def _read(self, timeout, size): + """Internal routine that actually does the read.""" + index = -1 + out_fd = self._proc.stdout.fileno() + err_fd = self._proc.stderr.fileno() + select_fds = (out_fd, err_fd) + deadline = time.time() + timeout + while not self.timed_out and not self.crashed: + # poll() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + if self._proc.poll() != None: + self.crashed = True + self.handle_interrupt() + + now = time.time() + if now > deadline: + self.timed_out = True + + # Check to see if we have any output we can return. + if size and len(self._output) >= size: + index = size + elif size == 0: + index = self._output.find('\n') + 1 + + if index > 0 or self.crashed or self.timed_out: + output = self._output[0:index] + self._output = self._output[index:] + return output + + # Nope - wait for more data. + (read_fds, write_fds, err_fds) = select.select(select_fds, [], + select_fds, + deadline - now) + try: + if out_fd in read_fds: + self._output += self._proc.stdout.read() + if err_fd in read_fds: + self.error += self._proc.stderr.read() + except IOError, e: + pass + + def stop(self): + """Stop (shut down) the subprocess), if it is running.""" + pid = self._proc.pid + self._proc.stdin.close() + self._proc.stdout.close() + if self._proc.stderr: + self._proc.stderr.close() + if sys.platform not in ('win32', 'cygwin'): + # Closing stdin/stdout/stderr hangs sometimes on OS X, + # (see restart(), above), and anyway we don't want to hang + # the harness if DumpRenderTree is buggy, so we wait a couple + # seconds to give DumpRenderTree a chance to clean up, but then + # force-kill the process if necessary. + KILL_TIMEOUT = 3.0 + timeout = time.time() + KILL_TIMEOUT + # poll() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + while self._proc.poll() is None and time.time() < timeout: + time.sleep(0.1) + # poll() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + if self._proc.poll() is None: + _log.warning('stopping %s timed out, killing it' % + self._name) + self._executive.kill_process(self._proc.pid) + _log.warning('killed') + self._reset() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py new file mode 100644 index 0000000..935881c --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Google name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Dummy Port implementation used for testing.""" +from __future__ import with_statement + +import codecs +import fnmatch +import os +import sys +import time + +from webkitpy.layout_tests.layout_package import test_output + +import base + + +# This sets basic expectations for a test. Each individual expectation +# can be overridden by a keyword argument in TestList.add(). +class TestInstance: + def __init__(self, name): + self.name = name + self.base = name[(name.rfind("/") + 1):name.rfind(".html")] + self.crash = False + self.exception = False + self.hang = False + self.keyboard = False + self.error = '' + self.timeout = False + self.actual_text = self.base + '-txt\n' + self.actual_checksum = self.base + '-checksum\n' + self.actual_image = self.base + '-png\n' + self.expected_text = self.actual_text + self.expected_checksum = self.actual_checksum + self.expected_image = self.actual_image + + +# This is an in-memory list of tests, what we want them to produce, and +# what we want to claim are the expected results. +class TestList: + def __init__(self, port): + self.port = port + self.tests = {} + + def add(self, name, **kwargs): + test = TestInstance(name) + for key, value in kwargs.items(): + test.__dict__[key] = value + self.tests[name] = test + + def keys(self): + return self.tests.keys() + + def __contains__(self, item): + return item in self.tests + + def __getitem__(self, item): + return self.tests[item] + + +class TestPort(base.Port): + """Test implementation of the Port interface.""" + + def __init__(self, **kwargs): + base.Port.__init__(self, **kwargs) + tests = TestList(self) + tests.add('failures/expected/checksum.html', + actual_checksum='checksum_fail-checksum') + tests.add('failures/expected/crash.html', crash=True) + tests.add('failures/expected/exception.html', exception=True) + tests.add('failures/expected/timeout.html', timeout=True) + tests.add('failures/expected/hang.html', hang=True) + tests.add('failures/expected/missing_text.html', + expected_text=None) + tests.add('failures/expected/image.html', + actual_image='image_fail-png', + expected_image='image-png') + tests.add('failures/expected/image_checksum.html', + actual_checksum='image_checksum_fail-checksum', + actual_image='image_checksum_fail-png') + tests.add('failures/expected/keyboard.html', + keyboard=True) + tests.add('failures/expected/missing_check.html', + expected_checksum=None) + tests.add('failures/expected/missing_image.html', + expected_image=None) + tests.add('failures/expected/missing_text.html', + expected_text=None) + tests.add('failures/expected/newlines_leading.html', + expected_text="\nfoo\n", + actual_text="foo\n") + tests.add('failures/expected/newlines_trailing.html', + expected_text="foo\n\n", + actual_text="foo\n") + tests.add('failures/expected/newlines_with_excess_CR.html', + expected_text="foo\r\r\r\n", + actual_text="foo\n") + tests.add('failures/expected/text.html', + actual_text='text_fail-png') + tests.add('failures/unexpected/crash.html', crash=True) + tests.add('failures/unexpected/text-image-checksum.html', + actual_text='text-image-checksum_fail-txt', + actual_checksum='text-image-checksum_fail-checksum') + tests.add('failures/unexpected/timeout.html', timeout=True) + tests.add('http/tests/passes/text.html') + tests.add('http/tests/ssl/text.html') + tests.add('passes/error.html', error='stuff going to stderr') + tests.add('passes/image.html') + tests.add('passes/platform_image.html') + # Text output files contain "\r\n" on Windows. This may be + # helpfully filtered to "\r\r\n" by our Python/Cygwin tooling. + tests.add('passes/text.html', + expected_text='\nfoo\n\n', + actual_text='\nfoo\r\n\r\r\n') + tests.add('websocket/tests/passes/text.html') + self._tests = tests + + def baseline_path(self): + return os.path.join(self.layout_tests_dir(), 'platform', + self.name() + self.version()) + + def baseline_search_path(self): + return [self.baseline_path()] + + def check_build(self, needs_http): + return True + + def diff_image(self, expected_contents, actual_contents, + diff_filename=None): + diffed = actual_contents != expected_contents + if diffed and diff_filename: + with codecs.open(diff_filename, "w", "utf-8") as diff_fh: + diff_fh.write("< %s\n---\n> %s\n" % + (expected_contents, actual_contents)) + return diffed + + def expected_checksum(self, test): + test = self.relative_test_filename(test) + return self._tests[test].expected_checksum + + def expected_image(self, test): + test = self.relative_test_filename(test) + return self._tests[test].expected_image + + def expected_text(self, test): + test = self.relative_test_filename(test) + text = self._tests[test].expected_text + if not text: + text = '' + return text + + def tests(self, paths): + # Test the idea of port-specific overrides for test lists. Also + # keep in memory to speed up the test harness. + if not paths: + paths = ['*'] + + matched_tests = [] + for p in paths: + if self.path_isdir(p): + matched_tests.extend(fnmatch.filter(self._tests.keys(), p + '*')) + else: + matched_tests.extend(fnmatch.filter(self._tests.keys(), p)) + layout_tests_dir = self.layout_tests_dir() + return set([os.path.join(layout_tests_dir, p) for p in matched_tests]) + + def path_exists(self, path): + # used by test_expectations.py and printing.py + rpath = self.relative_test_filename(path) + if rpath in self._tests: + return True + if self.path_isdir(rpath): + return True + if rpath.endswith('-expected.txt'): + test = rpath.replace('-expected.txt', '.html') + return (test in self._tests and + self._tests[test].expected_text) + if rpath.endswith('-expected.checksum'): + test = rpath.replace('-expected.checksum', '.html') + return (test in self._tests and + self._tests[test].expected_checksum) + if rpath.endswith('-expected.png'): + test = rpath.replace('-expected.png', '.html') + return (test in self._tests and + self._tests[test].expected_image) + return False + + def layout_tests_dir(self): + return self.path_from_webkit_base('Tools', 'Scripts', + 'webkitpy', 'layout_tests', 'data') + + def path_isdir(self, path): + # Used by test_expectations.py + # + # We assume that a path is a directory if we have any tests + # that whose prefix matches the path plus a directory modifier + # and not a file extension. + if path[-1] != '/': + path += '/' + + # FIXME: Directories can have a dot in the name. We should + # probably maintain a white list of known cases like CSS2.1 + # and check it here in the future. + if path.find('.') != -1: + # extension separator found, assume this is a file + return False + + # strip out layout tests directory path if found. The tests + # keys are relative to it. + tests_dir = self.layout_tests_dir() + if path.startswith(tests_dir): + path = path[len(tests_dir) + 1:] + + return any([t.startswith(path) for t in self._tests.keys()]) + + def test_dirs(self): + return ['passes', 'failures'] + + def name(self): + return self._name + + def _path_to_wdiff(self): + return None + + def results_directory(self): + return '/tmp/' + self.get_option('results_directory') + + def setup_test_run(self): + pass + + def create_driver(self, worker_number): + return TestDriver(self, worker_number) + + def start_http_server(self): + pass + + def start_websocket_server(self): + pass + + def stop_http_server(self): + pass + + def stop_websocket_server(self): + pass + + def test_expectations(self): + """Returns the test expectations for this port. + + Basically this string should contain the equivalent of a + test_expectations file. See test_expectations.py for more details.""" + return """ +WONTFIX : failures/expected/checksum.html = IMAGE +WONTFIX : failures/expected/crash.html = CRASH +// This one actually passes because the checksums will match. +WONTFIX : failures/expected/image.html = PASS +WONTFIX : failures/expected/image_checksum.html = IMAGE +WONTFIX : failures/expected/missing_check.html = MISSING PASS +WONTFIX : failures/expected/missing_image.html = MISSING PASS +WONTFIX : failures/expected/missing_text.html = MISSING PASS +WONTFIX : failures/expected/newlines_leading.html = TEXT +WONTFIX : failures/expected/newlines_trailing.html = TEXT +WONTFIX : failures/expected/newlines_with_excess_CR.html = TEXT +WONTFIX : failures/expected/text.html = TEXT +WONTFIX : failures/expected/timeout.html = TIMEOUT +WONTFIX SKIP : failures/expected/hang.html = TIMEOUT +WONTFIX SKIP : failures/expected/keyboard.html = CRASH +WONTFIX SKIP : failures/expected/exception.html = CRASH +""" + + def test_base_platform_names(self): + return ('mac', 'win') + + def test_platform_name(self): + return 'mac' + + def test_platform_names(self): + return self.test_base_platform_names() + + def test_platform_name_to_name(self, test_platform_name): + return test_platform_name + + def version(self): + return '' + + +class TestDriver(base.Driver): + """Test/Dummy implementation of the DumpRenderTree interface.""" + + def __init__(self, port, worker_number): + self._port = port + + def cmd_line(self): + return ['None'] + + def poll(self): + return True + + def run_test(self, test_input): + start_time = time.time() + test_name = self._port.relative_test_filename(test_input.filename) + test = self._port._tests[test_name] + if test.keyboard: + raise KeyboardInterrupt + if test.exception: + raise ValueError('exception from ' + test_name) + if test.hang: + time.sleep((float(test_input.timeout) * 4) / 1000.0) + return test_output.TestOutput(test.actual_text, test.actual_image, + test.actual_checksum, test.crash, + time.time() - start_time, test.timeout, + test.error) + + def start(self): + pass + + def stop(self): + pass diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test_files.py b/Tools/Scripts/webkitpy/layout_tests/port/test_files.py new file mode 100644 index 0000000..2c0a7b6 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/test_files.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""This module is used to find all of the layout test files used by +run-webkit-tests. It exposes one public function - find() - +which takes an optional list of paths. If a list is passed in, the returned +list of test files is constrained to those found under the paths passed in, +i.e. calling find(["LayoutTests/fast"]) will only return files +under that directory.""" + +import glob +import os +import time + +from webkitpy.common.system import logutils + + +_log = logutils.get_logger(__file__) + + +# When collecting test cases, we include any file with these extensions. +_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.xhtmlmp', '.pl', + '.php', '.svg']) +# When collecting test cases, skip these directories +_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests']) + + +def find(port, paths): + """Finds the set of tests under port.layout_tests_dir(). + + Args: + paths: a list of command line paths relative to the layout_tests_dir() + to limit the search to. glob patterns are ok. + """ + gather_start_time = time.time() + paths_to_walk = set() + # if paths is empty, provide a pre-defined list. + if paths: + _log.debug("Gathering tests from: %s relative to %s" % (paths, port.layout_tests_dir())) + for path in paths: + # If there's an * in the name, assume it's a glob pattern. + path = os.path.join(port.layout_tests_dir(), path) + if path.find('*') > -1: + filenames = glob.glob(path) + paths_to_walk.update(filenames) + else: + paths_to_walk.add(path) + else: + _log.debug("Gathering tests from: %s" % port.layout_tests_dir()) + paths_to_walk.add(port.layout_tests_dir()) + + # Now walk all the paths passed in on the command line and get filenames + test_files = set() + for path in paths_to_walk: + if os.path.isfile(path) and _is_test_file(path): + test_files.add(os.path.normpath(path)) + continue + + for root, dirs, files in os.walk(path): + # Don't walk skipped directories or their sub-directories. + if os.path.basename(root) in _skipped_directories: + del dirs[:] + continue + # This copy and for-in is slightly inefficient, but + # the extra walk avoidance consistently shaves .5 seconds + # off of total walk() time on my MacBook Pro. + for directory in dirs[:]: + if directory in _skipped_directories: + dirs.remove(directory) + + for filename in files: + if _is_test_file(filename): + filename = os.path.join(root, filename) + filename = os.path.normpath(filename) + test_files.add(filename) + + gather_time = time.time() - gather_start_time + _log.debug("Test gathering took %f seconds" % gather_time) + + return test_files + + +def _has_supported_extension(filename): + """Return true if filename is one of the file extensions we want to run a + test on.""" + extension = os.path.splitext(filename)[1] + return extension in _supported_file_extensions + + +def _is_reference_html_file(filename): + """Return true if the filename points to a reference HTML file.""" + if (filename.endswith('-expected.html') or + filename.endswith('-expected-mismatch.html')): + _log.warn("Reftests are not supported - ignoring %s" % filename) + return True + return False + + +def _is_test_file(filename): + """Return true if the filename points to a test file.""" + return (_has_supported_extension(filename) and + not _is_reference_html_file(filename)) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test_files_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/test_files_unittest.py new file mode 100644 index 0000000..83525c8 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/test_files_unittest.py @@ -0,0 +1,75 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import unittest + +import base +import test_files + + +class TestFilesTest(unittest.TestCase): + def test_find_no_paths_specified(self): + port = base.Port() + layout_tests_dir = port.layout_tests_dir() + port.layout_tests_dir = lambda: os.path.join(layout_tests_dir, + 'fast', 'html') + tests = test_files.find(port, []) + self.assertNotEqual(tests, 0) + + def test_find_one_test(self): + port = base.Port() + # This is just a test picked at random but known to exist. + tests = test_files.find(port, ['fast/html/keygen.html']) + self.assertEqual(len(tests), 1) + + def test_find_glob(self): + port = base.Port() + tests = test_files.find(port, ['fast/html/key*']) + self.assertEqual(len(tests), 1) + + def test_find_with_skipped_directories(self): + port = base.Port() + tests = port.tests('userscripts') + self.assertTrue('userscripts/resources/frame1.html' not in tests) + + def test_find_with_skipped_directories_2(self): + port = base.Port() + tests = test_files.find(port, ['userscripts/resources']) + self.assertEqual(tests, set([])) + + def test_is_test_file(self): + self.assertTrue(test_files._is_test_file('foo.html')) + self.assertTrue(test_files._is_test_file('foo.shtml')) + self.assertFalse(test_files._is_test_file('foo.png')) + self.assertFalse(test_files._is_test_file('foo-expected.html')) + self.assertFalse(test_files._is_test_file('foo-expected-mismatch.html')) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py new file mode 100644 index 0000000..afdebeb --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py @@ -0,0 +1,497 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Google name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""WebKit implementations of the Port interface.""" + + +from __future__ import with_statement + +import codecs +import logging +import os +import re +import shutil +import signal +import sys +import time +import webbrowser +import operator +import tempfile +import shutil + +import webkitpy.common.system.ospath as ospath +import webkitpy.layout_tests.layout_package.test_output as test_output +import webkitpy.layout_tests.port.base as base +import webkitpy.layout_tests.port.server_process as server_process + +_log = logging.getLogger("webkitpy.layout_tests.port.webkit") + + +class WebKitPort(base.Port): + """WebKit implementation of the Port class.""" + + def __init__(self, **kwargs): + base.Port.__init__(self, **kwargs) + self._cached_apache_path = None + + # FIXME: disable pixel tests until they are run by default on the + # build machines. + self.set_option_default('pixel_tests', False) + + def baseline_path(self): + return self._webkit_baseline_path(self._name) + + def baseline_search_path(self): + return [self._webkit_baseline_path(self._name)] + + def path_to_test_expectations_file(self): + return os.path.join(self._webkit_baseline_path(self._name), + 'test_expectations.txt') + + # Only needed by ports which maintain versioned test expectations (like mac-tiger vs. mac-leopard) + def version(self): + return '' + + def _build_driver(self): + configuration = self.get_option('configuration') + return self._config.build_dumprendertree(configuration) + + def _check_driver(self): + driver_path = self._path_to_driver() + if not os.path.exists(driver_path): + _log.error("DumpRenderTree was not found at %s" % driver_path) + return False + return True + + def check_build(self, needs_http): + if self.get_option('build') and not self._build_driver(): + return False + if not self._check_driver(): + return False + if self.get_option('pixel_tests'): + if not self.check_image_diff(): + return False + if not self._check_port_build(): + return False + return True + + def _check_port_build(self): + # Ports can override this method to do additional checks. + return True + + def check_image_diff(self, override_step=None, logging=True): + image_diff_path = self._path_to_image_diff() + if not os.path.exists(image_diff_path): + _log.error("ImageDiff was not found at %s" % image_diff_path) + return False + return True + + def diff_image(self, expected_contents, actual_contents, + diff_filename=None): + """Return True if the two files are different. Also write a delta + image of the two images into |diff_filename| if it is not None.""" + + # Handle the case where the test didn't actually generate an image. + if not actual_contents: + return True + + sp = self._diff_image_request(expected_contents, actual_contents) + return self._diff_image_reply(sp, diff_filename) + + def _diff_image_request(self, expected_contents, actual_contents): + # FIXME: use self.get_option('tolerance') and + # self.set_option_default('tolerance', 0.1) once that behaves correctly + # with default values. + if self.get_option('tolerance') is not None: + tolerance = self.get_option('tolerance') + else: + tolerance = 0.1 + command = [self._path_to_image_diff(), '--tolerance', str(tolerance)] + sp = server_process.ServerProcess(self, 'ImageDiff', command) + + sp.write('Content-Length: %d\n%sContent-Length: %d\n%s' % + (len(actual_contents), actual_contents, + len(expected_contents), expected_contents)) + + return sp + + def _diff_image_reply(self, sp, diff_filename): + timeout = 2.0 + deadline = time.time() + timeout + output = sp.read_line(timeout) + while not sp.timed_out and not sp.crashed and output: + if output.startswith('Content-Length'): + m = re.match('Content-Length: (\d+)', output) + content_length = int(m.group(1)) + timeout = deadline - time.time() + output = sp.read(timeout, content_length) + break + elif output.startswith('diff'): + break + else: + timeout = deadline - time.time() + output = sp.read_line(deadline) + + result = True + if output.startswith('diff'): + m = re.match('diff: (.+)% (passed|failed)', output) + if m.group(2) == 'passed': + result = False + elif output and diff_filename: + with open(diff_filename, 'w') as file: + file.write(output) + elif sp.timed_out: + _log.error("ImageDiff timed out") + elif sp.crashed: + _log.error("ImageDiff crashed") + sp.stop() + return result + + def results_directory(self): + # Results are store relative to the built products to make it easy + # to have multiple copies of webkit checked out and built. + return self._build_path(self.get_option('results_directory')) + + def setup_test_run(self): + # This port doesn't require any specific configuration. + pass + + def create_driver(self, worker_number): + return WebKitDriver(self, worker_number) + + def test_base_platform_names(self): + # At the moment we don't use test platform names, but we have + # to return something. + return ('mac', 'win') + + def _tests_for_other_platforms(self): + raise NotImplementedError('WebKitPort._tests_for_other_platforms') + # The original run-webkit-tests builds up a "whitelist" of tests to + # run, and passes that to DumpRenderTree. new-run-webkit-tests assumes + # we run *all* tests and test_expectations.txt functions as a + # blacklist. + # FIXME: This list could be dynamic based on platform name and + # pushed into base.Port. + return [ + "platform/chromium", + "platform/gtk", + "platform/qt", + "platform/win", + ] + + def _runtime_feature_list(self): + """Return the supported features of DRT. If a port doesn't support + this DRT switch, it has to override this method to return None""" + driver_path = self._path_to_driver() + feature_list = ' '.join(os.popen(driver_path + " --print-supported-features 2>&1").readlines()) + if "SupportedFeatures:" in feature_list: + return feature_list + return None + + def _supported_symbol_list(self): + """Return the supported symbols of WebCore.""" + webcore_library_path = self._path_to_webcore_library() + if not webcore_library_path: + return None + symbol_list = ' '.join(os.popen("nm " + webcore_library_path).readlines()) + return symbol_list + + def _directories_for_features(self): + """Return the supported feature dictionary. The keys are the + features and the values are the directories in lists.""" + directories_for_features = { + "Accelerated Compositing": ["compositing"], + "3D Rendering": ["animations/3d", "transforms/3d"], + } + return directories_for_features + + def _directories_for_symbols(self): + """Return the supported feature dictionary. The keys are the + symbols and the values are the directories in lists.""" + directories_for_symbol = { + "MathMLElement": ["mathml"], + "GraphicsLayer": ["compositing"], + "WebCoreHas3DRendering": ["animations/3d", "transforms/3d"], + "WebGLShader": ["fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl"], + "WMLElement": ["http/tests/wml", "fast/wml", "wml"], + "parseWCSSInputProperty": ["fast/wcss"], + "isXHTMLMPDocument": ["fast/xhtmlmp"], + } + return directories_for_symbol + + def _skipped_tests_for_unsupported_features(self): + """Return the directories of unsupported tests. Search for the + symbols in the symbol_list, if found add the corresponding + directories to the skipped directory list.""" + feature_list = self._runtime_feature_list() + directories = self._directories_for_features() + + # if DRT feature detection not supported + if not feature_list: + feature_list = self._supported_symbol_list() + directories = self._directories_for_symbols() + + if not feature_list: + return [] + + skipped_directories = [directories[feature] + for feature in directories.keys() + if feature not in feature_list] + return reduce(operator.add, skipped_directories) + + def _tests_for_disabled_features(self): + # FIXME: This should use the feature detection from + # webkitperl/features.pm to match run-webkit-tests. + # For now we hard-code a list of features known to be disabled on + # the Mac platform. + disabled_feature_tests = [ + "fast/xhtmlmp", + "http/tests/wml", + "mathml", + "wml", + ] + # FIXME: webarchive tests expect to read-write from + # -expected.webarchive files instead of .txt files. + # This script doesn't know how to do that yet, so pretend they're + # just "disabled". + webarchive_tests = [ + "webarchive", + "svg/webarchive", + "http/tests/webarchive", + "svg/custom/image-with-prefix-in-webarchive.svg", + ] + unsupported_feature_tests = self._skipped_tests_for_unsupported_features() + return disabled_feature_tests + webarchive_tests + unsupported_feature_tests + + def _tests_from_skipped_file(self, skipped_file): + tests_to_skip = [] + for line in skipped_file.readlines(): + line = line.strip() + if line.startswith('#') or not len(line): + continue + tests_to_skip.append(line) + return tests_to_skip + + def _skipped_file_paths(self): + return [os.path.join(self._webkit_baseline_path(self._name), + 'Skipped')] + + def _expectations_from_skipped_files(self): + tests_to_skip = [] + for filename in self._skipped_file_paths(): + if not os.path.exists(filename): + _log.warn("Failed to open Skipped file: %s" % filename) + continue + with codecs.open(filename, "r", "utf-8") as skipped_file: + tests_to_skip.extend(self._tests_from_skipped_file(skipped_file)) + return tests_to_skip + + def test_expectations(self): + # The WebKit mac port uses a combination of a test_expectations file + # and 'Skipped' files. + expectations_path = self.path_to_test_expectations_file() + with codecs.open(expectations_path, "r", "utf-8") as file: + return file.read() + self._skips() + + def _skips(self): + # Each Skipped file contains a list of files + # or directories to be skipped during the test run. The total list + # of tests to skipped is given by the contents of the generic + # Skipped file found in platform/X plus a version-specific file + # found in platform/X-version. Duplicate entries are allowed. + # This routine reads those files and turns contents into the + # format expected by test_expectations. + + tests_to_skip = self.skipped_layout_tests() + skip_lines = map(lambda test_path: "BUG_SKIPPED SKIP : %s = FAIL" % + test_path, tests_to_skip) + return "\n".join(skip_lines) + + def skipped_layout_tests(self): + # Use a set to allow duplicates + tests_to_skip = set(self._expectations_from_skipped_files()) + tests_to_skip.update(self._tests_for_other_platforms()) + tests_to_skip.update(self._tests_for_disabled_features()) + return tests_to_skip + + def test_platform_name(self): + return self._name + self.version() + + def test_platform_names(self): + return self.test_base_platform_names() + ( + 'mac-tiger', 'mac-leopard', 'mac-snowleopard') + + def _build_path(self, *comps): + return self._filesystem.join(self._config.build_directory( + self.get_option('configuration')), *comps) + + def _path_to_driver(self): + return self._build_path('DumpRenderTree') + + def _path_to_webcore_library(self): + return None + + def _path_to_helper(self): + return None + + def _path_to_image_diff(self): + return self._build_path('ImageDiff') + + def _path_to_wdiff(self): + # FIXME: This does not exist on a default Mac OS X Leopard install. + return 'wdiff' + + def _path_to_apache(self): + if not self._cached_apache_path: + # The Apache binary path can vary depending on OS and distribution + # See http://wiki.apache.org/httpd/DistrosDefaultLayout + for path in ["/usr/sbin/httpd", "/usr/sbin/apache2"]: + if os.path.exists(path): + self._cached_apache_path = path + break + + if not self._cached_apache_path: + _log.error("Could not find apache. Not installed or unknown path.") + + return self._cached_apache_path + + +class WebKitDriver(base.Driver): + """WebKit implementation of the DumpRenderTree interface.""" + + def __init__(self, port, worker_number): + self._worker_number = worker_number + self._port = port + self._driver_tempdir = tempfile.mkdtemp(prefix='DumpRenderTree-') + + def __del__(self): + shutil.rmtree(self._driver_tempdir) + + def cmd_line(self): + cmd = self._command_wrapper(self._port.get_option('wrapper')) + cmd += [self._port._path_to_driver(), '-'] + + if self._port.get_option('pixel_tests'): + cmd.append('--pixel-tests') + + return cmd + + def start(self): + environment = self._port.setup_environ_for_server() + environment['DYLD_FRAMEWORK_PATH'] = self._port._build_path() + environment['DUMPRENDERTREE_TEMP'] = self._driver_tempdir + self._server_process = server_process.ServerProcess(self._port, + "DumpRenderTree", self.cmd_line(), environment) + + def poll(self): + return self._server_process.poll() + + def restart(self): + self._server_process.stop() + self._server_process.start() + return + + # FIXME: This function is huge. + def run_test(self, test_input): + uri = self._port.filename_to_uri(test_input.filename) + if uri.startswith("file:///"): + command = uri[7:] + else: + command = uri + + if test_input.image_hash: + command += "'" + test_input.image_hash + command += "\n" + + start_time = time.time() + self._server_process.write(command) + + have_seen_content_type = False + actual_image_hash = None + output = str() # Use a byte array for output, even though it should be UTF-8. + image = str() + + timeout = int(test_input.timeout) / 1000.0 + deadline = time.time() + timeout + line = self._server_process.read_line(timeout) + while (not self._server_process.timed_out + and not self._server_process.crashed + and line.rstrip() != "#EOF"): + if (line.startswith('Content-Type:') and not + have_seen_content_type): + have_seen_content_type = True + else: + # Note: Text output from DumpRenderTree is always UTF-8. + # However, some tests (e.g. webarchives) spit out binary + # data instead of text. So to make things simple, we + # always treat the output as binary. + output += line + line = self._server_process.read_line(timeout) + timeout = deadline - time.time() + + # Now read a second block of text for the optional image data + remaining_length = -1 + HASH_HEADER = 'ActualHash: ' + LENGTH_HEADER = 'Content-Length: ' + line = self._server_process.read_line(timeout) + while (not self._server_process.timed_out + and not self._server_process.crashed + and line.rstrip() != "#EOF"): + if line.startswith(HASH_HEADER): + actual_image_hash = line[len(HASH_HEADER):].strip() + elif line.startswith('Content-Type:'): + pass + elif line.startswith(LENGTH_HEADER): + timeout = deadline - time.time() + content_length = int(line[len(LENGTH_HEADER):]) + image = self._server_process.read(timeout, content_length) + timeout = deadline - time.time() + line = self._server_process.read_line(timeout) + + error_lines = self._server_process.error.splitlines() + # FIXME: This is a hack. It is unclear why sometimes + # we do not get any error lines from the server_process + # probably we are not flushing stderr. + if error_lines and error_lines[-1] == "#EOF": + error_lines.pop() # Remove the expected "#EOF" + error = "\n".join(error_lines) + # FIXME: This seems like the wrong section of code to be doing + # this reset in. + self._server_process.error = "" + return test_output.TestOutput(output, image, actual_image_hash, + self._server_process.crashed, + time.time() - start_time, + self._server_process.timed_out, + error) + + def stop(self): + if self._server_process: + self._server_process.stop() + self._server_process = None diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py new file mode 100644 index 0000000..7b68310 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged +# +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UNIVERSITY OF SZEGED OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.layout_tests.port.webkit import WebKitPort + + +class TestWebKitPort(WebKitPort): + def __init__(self, symbol_list=None, feature_list=None): + self.symbol_list = symbol_list + self.feature_list = feature_list + + def _runtime_feature_list(self): + return self.feature_list + + def _supported_symbol_list(self): + return self.symbol_list + + def _tests_for_other_platforms(self): + return ["media", ] + + def _tests_for_disabled_features(self): + return ["accessibility", ] + + def _skipped_file_paths(self): + return [] + +class WebKitPortTest(unittest.TestCase): + + def test_skipped_directories_for_symbols(self): + supported_symbols = ["GraphicsLayer", "WebCoreHas3DRendering", "isXHTMLMPDocument", "fooSymbol"] + expected_directories = set(["mathml", "fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl", "http/tests/wml", "fast/wml", "wml", "fast/wcss"]) + result_directories = set(TestWebKitPort(supported_symbols, None)._skipped_tests_for_unsupported_features()) + self.assertEqual(result_directories, expected_directories) + + def test_skipped_directories_for_features(self): + supported_features = ["Accelerated Compositing", "Foo Feature"] + expected_directories = set(["animations/3d", "transforms/3d"]) + result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features()) + self.assertEqual(result_directories, expected_directories) + + def test_skipped_layout_tests(self): + self.assertEqual(TestWebKitPort(None, None).skipped_layout_tests(), + set(["media", "accessibility"])) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/websocket_server.py b/Tools/Scripts/webkitpy/layout_tests/port/websocket_server.py new file mode 100644 index 0000000..926bc04 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/websocket_server.py @@ -0,0 +1,257 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A class to help start/stop the PyWebSocket server used by layout tests.""" + + +from __future__ import with_statement + +import codecs +import logging +import optparse +import os +import subprocess +import sys +import tempfile +import time +import urllib + +import factory +import http_server + +from webkitpy.common.system.executive import Executive +from webkitpy.thirdparty.autoinstalled.pywebsocket import mod_pywebsocket + + +_log = logging.getLogger("webkitpy.layout_tests.port.websocket_server") + +_WS_LOG_PREFIX = 'pywebsocket.ws.log-' +_WSS_LOG_PREFIX = 'pywebsocket.wss.log-' + +_DEFAULT_WS_PORT = 8880 +_DEFAULT_WSS_PORT = 9323 + + +def url_is_alive(url): + """Checks to see if we get an http response from |url|. + We poll the url 20 times with a 0.5 second delay. If we don't + get a reply in that time, we give up and assume the httpd + didn't start properly. + + Args: + url: The URL to check. + Return: + True if the url is alive. + """ + sleep_time = 0.5 + wait_time = 10 + while wait_time > 0: + try: + response = urllib.urlopen(url) + # Server is up and responding. + return True + except IOError: + pass + # Wait for sleep_time before trying again. + wait_time -= sleep_time + time.sleep(sleep_time) + + return False + + +class PyWebSocketNotStarted(Exception): + pass + + +class PyWebSocketNotFound(Exception): + pass + + +class PyWebSocket(http_server.Lighttpd): + + def __init__(self, port_obj, output_dir, port=_DEFAULT_WS_PORT, + root=None, use_tls=False, + pidfile=None): + """Args: + output_dir: the absolute path to the layout test result directory + """ + http_server.Lighttpd.__init__(self, port_obj, output_dir, + port=_DEFAULT_WS_PORT, + root=root) + self._output_dir = output_dir + self._process = None + self._port = port + self._root = root + self._use_tls = use_tls + self._private_key = self._pem_file + self._certificate = self._pem_file + if self._port: + self._port = int(self._port) + if self._use_tls: + self._server_name = 'PyWebSocket(Secure)' + else: + self._server_name = 'PyWebSocket' + self._pidfile = pidfile + self._wsout = None + + # Webkit tests + if self._root: + self._layout_tests = os.path.abspath(self._root) + self._web_socket_tests = os.path.abspath( + os.path.join(self._root, 'http', 'tests', + 'websocket', 'tests')) + else: + try: + self._layout_tests = self._port_obj.layout_tests_dir() + self._web_socket_tests = os.path.join(self._layout_tests, + 'http', 'tests', 'websocket', 'tests') + except: + self._web_socket_tests = None + + def start(self): + if not self._web_socket_tests: + _log.info('No need to start %s server.' % self._server_name) + return + if self.is_running(): + raise PyWebSocketNotStarted('%s is already running.' % + self._server_name) + + time_str = time.strftime('%d%b%Y-%H%M%S') + if self._use_tls: + log_prefix = _WSS_LOG_PREFIX + else: + log_prefix = _WS_LOG_PREFIX + log_file_name = log_prefix + time_str + + # Remove old log files. We only need to keep the last ones. + self.remove_log_files(self._output_dir, log_prefix) + + error_log = os.path.join(self._output_dir, log_file_name + "-err.txt") + + output_log = os.path.join(self._output_dir, log_file_name + "-out.txt") + self._wsout = codecs.open(output_log, "w", "utf-8") + + python_interp = sys.executable + pywebsocket_base = os.path.join( + os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))), 'thirdparty', + 'autoinstalled', 'pywebsocket') + pywebsocket_script = os.path.join(pywebsocket_base, 'mod_pywebsocket', + 'standalone.py') + start_cmd = [ + python_interp, '-u', pywebsocket_script, + '--server-host', '127.0.0.1', + '--port', str(self._port), + '--document-root', os.path.join(self._layout_tests, 'http', 'tests'), + '--scan-dir', self._web_socket_tests, + '--cgi-paths', '/websocket/tests', + '--log-file', error_log, + ] + + handler_map_file = os.path.join(self._web_socket_tests, + 'handler_map.txt') + if os.path.exists(handler_map_file): + _log.debug('Using handler_map_file: %s' % handler_map_file) + start_cmd.append('--websock-handlers-map-file') + start_cmd.append(handler_map_file) + else: + _log.warning('No handler_map_file found') + + if self._use_tls: + start_cmd.extend(['-t', '-k', self._private_key, + '-c', self._certificate]) + + env = self._port_obj.setup_environ_for_server() + env['PYTHONPATH'] = (pywebsocket_base + os.path.pathsep + + env.get('PYTHONPATH', '')) + + _log.debug('Starting %s server on %d.' % ( + self._server_name, self._port)) + _log.debug('cmdline: %s' % ' '.join(start_cmd)) + # FIXME: We should direct this call through Executive for testing. + # Note: Not thread safe: http://bugs.python.org/issue2320 + self._process = subprocess.Popen(start_cmd, + stdin=open(os.devnull, 'r'), + stdout=self._wsout, + stderr=subprocess.STDOUT, + env=env) + + if self._use_tls: + url = 'https' + else: + url = 'http' + url = url + '://127.0.0.1:%d/' % self._port + if not url_is_alive(url): + if self._process.returncode == None: + # FIXME: We should use a non-static Executive for easier + # testing. + Executive().kill_process(self._process.pid) + with codecs.open(output_log, "r", "utf-8") as fp: + for line in fp: + _log.error(line) + raise PyWebSocketNotStarted( + 'Failed to start %s server on port %s.' % + (self._server_name, self._port)) + + # Our process terminated already + if self._process.returncode != None: + raise PyWebSocketNotStarted( + 'Failed to start %s server.' % self._server_name) + if self._pidfile: + with codecs.open(self._pidfile, "w", "ascii") as file: + file.write("%d" % self._process.pid) + + def stop(self, force=False): + if not force and not self.is_running(): + return + + pid = None + if self._process: + pid = self._process.pid + elif self._pidfile: + with codecs.open(self._pidfile, "r", "ascii") as file: + pid = int(file.read().strip()) + + if not pid: + raise PyWebSocketNotFound( + 'Failed to find %s server pid.' % self._server_name) + + _log.debug('Shutting down %s server %d.' % (self._server_name, pid)) + # FIXME: We should use a non-static Executive for easier testing. + Executive().kill_process(pid) + + if self._process: + # wait() is not threadsafe and can throw OSError due to: + # http://bugs.python.org/issue1731717 + self._process.wait() + self._process = None + + if self._wsout: + self._wsout.close() + self._wsout = None diff --git a/Tools/Scripts/webkitpy/layout_tests/port/win.py b/Tools/Scripts/webkitpy/layout_tests/port/win.py new file mode 100644 index 0000000..9e30155 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/port/win.py @@ -0,0 +1,75 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the Google name nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""WebKit Win implementation of the Port interface.""" + +import logging +import os + +from webkitpy.layout_tests.port.webkit import WebKitPort + +_log = logging.getLogger("webkitpy.layout_tests.port.win") + + +class WinPort(WebKitPort): + """WebKit Win implementation of the Port class.""" + + def __init__(self, **kwargs): + kwargs.setdefault('port_name', 'win') + WebKitPort.__init__(self, **kwargs) + + def baseline_search_path(self): + # Based on code from old-run-webkit-tests expectedDirectoryForTest() + port_names = ["win", "mac-snowleopard", "mac"] + return map(self._webkit_baseline_path, port_names) + + def _tests_for_other_platforms(self): + # FIXME: This list could be dynamic based on platform name and + # pushed into base.Port. + # This really need to be automated. + return [ + "platform/chromium", + "platform/gtk", + "platform/qt", + "platform/mac", + ] + + def _path_to_apache_config_file(self): + return os.path.join(self.layout_tests_dir(), 'http', 'conf', + 'cygwin-httpd.conf') + + def _shut_down_http_server(self, server_pid): + """Shut down the httpd web server. Blocks until it's fully + shut down. + + Args: + server_pid: The process ID of the running server. + """ + # Looks like we ignore server_pid. + # Copy/pasted from chromium-win. + self._executive.kill_all("httpd.exe") diff --git a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py new file mode 100644 index 0000000..4d8b7c9 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py @@ -0,0 +1,966 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Rebaselining tool that automatically produces baselines for all platforms. + +The script does the following for each platform specified: + 1. Compile a list of tests that need rebaselining. + 2. Download test result archive from buildbot for the platform. + 3. Extract baselines from the archive file for all identified files. + 4. Add new baselines to SVN repository. + 5. For each test that has been rebaselined, remove this platform option from + the test in test_expectation.txt. If no other platforms remain after + removal, delete the rebaselined test from the file. + +At the end, the script generates a html that compares old and new baselines. +""" + +from __future__ import with_statement + +import codecs +import copy +import logging +import optparse +import os +import re +import shutil +import subprocess +import sys +import tempfile +import time +import urllib +import zipfile + +from webkitpy.common.system import path +from webkitpy.common.system import user +from webkitpy.common.system.executive import Executive, ScriptError +import webkitpy.common.checkout.scm as scm + +import port +from layout_package import test_expectations + +_log = logging.getLogger("webkitpy.layout_tests." + "rebaseline_chromium_webkit_tests") + +BASELINE_SUFFIXES = ['.txt', '.png', '.checksum'] +REBASELINE_PLATFORM_ORDER = ['mac', 'win', 'win-xp', 'win-vista', 'linux'] +ARCHIVE_DIR_NAME_DICT = {'win': 'Webkit_Win', + 'win-vista': 'webkit-dbg-vista', + 'win-xp': 'Webkit_Win', + 'mac': 'Webkit_Mac10_5', + 'linux': 'webkit-rel-linux64', + 'win-canary': 'webkit-rel-webkit-org', + 'win-vista-canary': 'webkit-dbg-vista', + 'win-xp-canary': 'webkit-rel-webkit-org', + 'mac-canary': 'webkit-rel-mac-webkit-org', + 'linux-canary': 'webkit-rel-linux-webkit-org'} + + +def log_dashed_string(text, platform, logging_level=logging.INFO): + """Log text message with dashes on both sides.""" + + msg = text + if platform: + msg += ': ' + platform + if len(msg) < 78: + dashes = '-' * ((78 - len(msg)) / 2) + msg = '%s %s %s' % (dashes, msg, dashes) + + if logging_level == logging.ERROR: + _log.error(msg) + elif logging_level == logging.WARNING: + _log.warn(msg) + else: + _log.info(msg) + + +def setup_html_directory(html_directory): + """Setup the directory to store html results. + + All html related files are stored in the "rebaseline_html" subdirectory. + + Args: + html_directory: parent directory that stores the rebaselining results. + If None, a temp directory is created. + + Returns: + the directory that stores the html related rebaselining results. + """ + + if not html_directory: + html_directory = tempfile.mkdtemp() + elif not os.path.exists(html_directory): + os.mkdir(html_directory) + + html_directory = os.path.join(html_directory, 'rebaseline_html') + _log.info('Html directory: "%s"', html_directory) + + if os.path.exists(html_directory): + shutil.rmtree(html_directory, True) + _log.info('Deleted file at html directory: "%s"', html_directory) + + if not os.path.exists(html_directory): + os.mkdir(html_directory) + return html_directory + + +def get_result_file_fullpath(html_directory, baseline_filename, platform, + result_type): + """Get full path of the baseline result file. + + Args: + html_directory: directory that stores the html related files. + baseline_filename: name of the baseline file. + platform: win, linux or mac + result_type: type of the baseline result: '.txt', '.png'. + + Returns: + Full path of the baseline file for rebaselining result comparison. + """ + + base, ext = os.path.splitext(baseline_filename) + result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext) + fullpath = os.path.join(html_directory, result_filename) + _log.debug(' Result file full path: "%s".', fullpath) + return fullpath + + +class Rebaseliner(object): + """Class to produce new baselines for a given platform.""" + + REVISION_REGEX = r'<a href=\"(\d+)/\">' + + def __init__(self, running_port, target_port, platform, options): + """ + Args: + running_port: the Port the script is running on. + target_port: the Port the script uses to find port-specific + configuration information like the test_expectations.txt + file location and the list of test platforms. + platform: the test platform to rebaseline + options: the command-line options object.""" + self._platform = platform + self._options = options + self._port = running_port + self._target_port = target_port + self._rebaseline_port = port.get( + self._target_port.test_platform_name_to_name(platform), options) + self._rebaselining_tests = [] + self._rebaselined_tests = [] + + # Create tests and expectations helper which is used to: + # -. compile list of tests that need rebaselining. + # -. update the tests in test_expectations file after rebaseline + # is done. + expectations_str = self._rebaseline_port.test_expectations() + self._test_expectations = \ + test_expectations.TestExpectations(self._rebaseline_port, + None, + expectations_str, + self._platform, + False, + False) + self._scm = scm.default_scm() + + def run(self, backup): + """Run rebaseline process.""" + + log_dashed_string('Compiling rebaselining tests', self._platform) + if not self._compile_rebaselining_tests(): + return True + + log_dashed_string('Downloading archive', self._platform) + archive_file = self._download_buildbot_archive() + _log.info('') + if not archive_file: + _log.error('No archive found.') + return False + + log_dashed_string('Extracting and adding new baselines', + self._platform) + if not self._extract_and_add_new_baselines(archive_file): + return False + + log_dashed_string('Updating rebaselined tests in file', + self._platform) + self._update_rebaselined_tests_in_file(backup) + _log.info('') + + if len(self._rebaselining_tests) != len(self._rebaselined_tests): + _log.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN ' + 'REBASELINED.') + _log.warning(' Total tests needing rebaselining: %d', + len(self._rebaselining_tests)) + _log.warning(' Total tests rebaselined: %d', + len(self._rebaselined_tests)) + return False + + _log.warning('All tests needing rebaselining were successfully ' + 'rebaselined.') + + return True + + def get_rebaselining_tests(self): + return self._rebaselining_tests + + def _compile_rebaselining_tests(self): + """Compile list of tests that need rebaselining for the platform. + + Returns: + List of tests that need rebaselining or + None if there is no such test. + """ + + self._rebaselining_tests = \ + self._test_expectations.get_rebaselining_failures() + if not self._rebaselining_tests: + _log.warn('No tests found that need rebaselining.') + return None + + _log.info('Total number of tests needing rebaselining ' + 'for "%s": "%d"', self._platform, + len(self._rebaselining_tests)) + + test_no = 1 + for test in self._rebaselining_tests: + _log.info(' %d: %s', test_no, test) + test_no += 1 + + return self._rebaselining_tests + + def _get_latest_revision(self, url): + """Get the latest layout test revision number from buildbot. + + Args: + url: Url to retrieve layout test revision numbers. + + Returns: + latest revision or + None on failure. + """ + + _log.debug('Url to retrieve revision: "%s"', url) + + f = urllib.urlopen(url) + content = f.read() + f.close() + + revisions = re.findall(self.REVISION_REGEX, content) + if not revisions: + _log.error('Failed to find revision, content: "%s"', content) + return None + + revisions.sort(key=int) + _log.info('Latest revision: "%s"', revisions[len(revisions) - 1]) + return revisions[len(revisions) - 1] + + def _get_archive_dir_name(self, platform, webkit_canary): + """Get name of the layout test archive directory. + + Returns: + Directory name or + None on failure + """ + + if webkit_canary: + platform += '-canary' + + if platform in ARCHIVE_DIR_NAME_DICT: + return ARCHIVE_DIR_NAME_DICT[platform] + else: + _log.error('Cannot find platform key %s in archive ' + 'directory name dictionary', platform) + return None + + def _get_archive_url(self): + """Generate the url to download latest layout test archive. + + Returns: + Url to download archive or + None on failure + """ + + if self._options.force_archive_url: + return self._options.force_archive_url + + dir_name = self._get_archive_dir_name(self._platform, + self._options.webkit_canary) + if not dir_name: + return None + + _log.debug('Buildbot platform dir name: "%s"', dir_name) + + url_base = '%s/%s/' % (self._options.archive_url, dir_name) + latest_revision = self._get_latest_revision(url_base) + if latest_revision is None or latest_revision <= 0: + return None + archive_url = ('%s%s/layout-test-results.zip' % (url_base, + latest_revision)) + _log.info('Archive url: "%s"', archive_url) + return archive_url + + def _download_buildbot_archive(self): + """Download layout test archive file from buildbot. + + Returns: + True if download succeeded or + False otherwise. + """ + + url = self._get_archive_url() + if url is None: + return None + + fn = urllib.urlretrieve(url)[0] + _log.info('Archive downloaded and saved to file: "%s"', fn) + return fn + + def _extract_and_add_new_baselines(self, archive_file): + """Extract new baselines from archive and add them to SVN repository. + + Args: + archive_file: full path to the archive file. + + Returns: + List of tests that have been rebaselined or + None on failure. + """ + + zip_file = zipfile.ZipFile(archive_file, 'r') + zip_namelist = zip_file.namelist() + + _log.debug('zip file namelist:') + for name in zip_namelist: + _log.debug(' ' + name) + + platform = self._rebaseline_port.test_platform_name_to_name( + self._platform) + _log.debug('Platform dir: "%s"', platform) + + test_no = 1 + self._rebaselined_tests = [] + for test in self._rebaselining_tests: + _log.info('Test %d: %s', test_no, test) + + found = False + scm_error = False + test_basename = os.path.splitext(test)[0] + for suffix in BASELINE_SUFFIXES: + archive_test_name = ('layout-test-results/%s-actual%s' % + (test_basename, suffix)) + _log.debug(' Archive test file name: "%s"', + archive_test_name) + if not archive_test_name in zip_namelist: + _log.info(' %s file not in archive.', suffix) + continue + + found = True + _log.info(' %s file found in archive.', suffix) + + # Extract new baseline from archive and save it to a temp file. + data = zip_file.read(archive_test_name) + temp_fd, temp_name = tempfile.mkstemp(suffix) + f = os.fdopen(temp_fd, 'wb') + f.write(data) + f.close() + + expected_filename = '%s-expected%s' % (test_basename, suffix) + expected_fullpath = os.path.join( + self._rebaseline_port.baseline_path(), expected_filename) + expected_fullpath = os.path.normpath(expected_fullpath) + _log.debug(' Expected file full path: "%s"', + expected_fullpath) + + # TODO(victorw): for now, the rebaselining tool checks whether + # or not THIS baseline is duplicate and should be skipped. + # We could improve the tool to check all baselines in upper + # and lower + # levels and remove all duplicated baselines. + if self._is_dup_baseline(temp_name, + expected_fullpath, + test, + suffix, + self._platform): + os.remove(temp_name) + self._delete_baseline(expected_fullpath) + continue + + # Create the new baseline directory if it doesn't already + # exist. + self._port.maybe_make_directory( + os.path.dirname(expected_fullpath)) + + shutil.move(temp_name, expected_fullpath) + + if 0 != self._scm.add(expected_fullpath, return_exit_code=True): + # FIXME: print detailed diagnose messages + scm_error = True + elif suffix != '.checksum': + self._create_html_baseline_files(expected_fullpath) + + if not found: + _log.warn(' No new baselines found in archive.') + else: + if scm_error: + _log.warn(' Failed to add baselines to your repository.') + else: + _log.info(' Rebaseline succeeded.') + self._rebaselined_tests.append(test) + + test_no += 1 + + zip_file.close() + os.remove(archive_file) + + return self._rebaselined_tests + + def _is_dup_baseline(self, new_baseline, baseline_path, test, suffix, + platform): + """Check whether a baseline is duplicate and can fallback to same + baseline for another platform. For example, if a test has same + baseline on linux and windows, then we only store windows + baseline and linux baseline will fallback to the windows version. + + Args: + expected_filename: baseline expectation file name. + test: test name. + suffix: file suffix of the expected results, including dot; + e.g. '.txt' or '.png'. + platform: baseline platform 'mac', 'win' or 'linux'. + + Returns: + True if the baseline is unnecessary. + False otherwise. + """ + test_filepath = os.path.join(self._target_port.layout_tests_dir(), + test) + all_baselines = self._rebaseline_port.expected_baselines( + test_filepath, suffix, True) + for (fallback_dir, fallback_file) in all_baselines: + if fallback_dir and fallback_file: + fallback_fullpath = os.path.normpath( + os.path.join(fallback_dir, fallback_file)) + if fallback_fullpath.lower() != baseline_path.lower(): + with codecs.open(new_baseline, "r", + None) as file_handle1: + new_output = file_handle1.read() + with codecs.open(fallback_fullpath, "r", + None) as file_handle2: + fallback_output = file_handle2.read() + is_image = baseline_path.lower().endswith('.png') + if not self._diff_baselines(new_output, fallback_output, + is_image): + _log.info(' Found same baseline at %s', + fallback_fullpath) + return True + else: + return False + + return False + + def _diff_baselines(self, output1, output2, is_image): + """Check whether two baselines are different. + + Args: + output1, output2: contents of the baselines to compare. + + Returns: + True if two files are different or have different extensions. + False otherwise. + """ + + if is_image: + return self._port.diff_image(output1, output2, None) + else: + return self._port.compare_text(output1, output2) + + def _delete_baseline(self, filename): + """Remove the file from repository and delete it from disk. + + Args: + filename: full path of the file to delete. + """ + + if not filename or not os.path.isfile(filename): + return + self._scm.delete(filename) + + def _update_rebaselined_tests_in_file(self, backup): + """Update the rebaselined tests in test expectations file. + + Args: + backup: if True, backup the original test expectations file. + + Returns: + no + """ + + if self._rebaselined_tests: + new_expectations = ( + self._test_expectations.remove_platform_from_expectations( + self._rebaselined_tests, self._platform)) + path = self._target_port.path_to_test_expectations_file() + if backup: + date_suffix = time.strftime('%Y%m%d%H%M%S', + time.localtime(time.time())) + backup_file = ('%s.orig.%s' % (path, date_suffix)) + if os.path.exists(backup_file): + os.remove(backup_file) + _log.info('Saving original file to "%s"', backup_file) + os.rename(path, backup_file) + # FIXME: What encoding are these files? + # Or is new_expectations always a byte array? + with open(path, "w") as file: + file.write(new_expectations) + # self._scm.add(path) + else: + _log.info('No test was rebaselined so nothing to remove.') + + def _create_html_baseline_files(self, baseline_fullpath): + """Create baseline files (old, new and diff) in html directory. + + The files are used to compare the rebaselining results. + + Args: + baseline_fullpath: full path of the expected baseline file. + """ + + if not baseline_fullpath or not os.path.exists(baseline_fullpath): + return + + # Copy the new baseline to html directory for result comparison. + baseline_filename = os.path.basename(baseline_fullpath) + new_file = get_result_file_fullpath(self._options.html_directory, + baseline_filename, self._platform, + 'new') + shutil.copyfile(baseline_fullpath, new_file) + _log.info(' Html: copied new baseline file from "%s" to "%s".', + baseline_fullpath, new_file) + + # Get the old baseline from the repository and save to the html directory. + try: + output = self._scm.show_head(baseline_fullpath) + except ScriptError, e: + _log.info(e) + output = "" + + if (not output) or (output.upper().rstrip().endswith( + 'NO SUCH FILE OR DIRECTORY')): + _log.info(' No base file: "%s"', baseline_fullpath) + return + base_file = get_result_file_fullpath(self._options.html_directory, + baseline_filename, self._platform, + 'old') + # We should be using an explicit encoding here. + with open(base_file, "wb") as file: + file.write(output) + _log.info(' Html: created old baseline file: "%s".', + base_file) + + # Get the diff between old and new baselines and save to the html dir. + if baseline_filename.upper().endswith('.TXT'): + output = self._scm.diff_for_file(baseline_fullpath, log=_log) + if output: + diff_file = get_result_file_fullpath( + self._options.html_directory, baseline_filename, + self._platform, 'diff') + with open(diff_file, 'wb') as file: + file.write(output) + _log.info(' Html: created baseline diff file: "%s".', + diff_file) + + +class HtmlGenerator(object): + """Class to generate rebaselining result comparison html.""" + + HTML_REBASELINE = ('<html>' + '<head>' + '<style>' + 'body {font-family: sans-serif;}' + '.mainTable {background: #666666;}' + '.mainTable td , .mainTable th {background: white;}' + '.detail {margin-left: 10px; margin-top: 3px;}' + '</style>' + '<title>Rebaselining Result Comparison (%(time)s)' + '</title>' + '</head>' + '<body>' + '<h2>Rebaselining Result Comparison (%(time)s)</h2>' + '%(body)s' + '</body>' + '</html>') + HTML_NO_REBASELINING_TESTS = ( + '<p>No tests found that need rebaselining.</p>') + HTML_TABLE_TEST = ('<table class="mainTable" cellspacing=1 cellpadding=5>' + '%s</table><br>') + HTML_TR_TEST = ('<tr>' + '<th style="background-color: #CDECDE; border-bottom: ' + '1px solid black; font-size: 18pt; font-weight: bold" ' + 'colspan="5">' + '<a href="%s">%s</a>' + '</th>' + '</tr>') + HTML_TEST_DETAIL = ('<div class="detail">' + '<tr>' + '<th width="100">Baseline</th>' + '<th width="100">Platform</th>' + '<th width="200">Old</th>' + '<th width="200">New</th>' + '<th width="150">Difference</th>' + '</tr>' + '%s' + '</div>') + HTML_TD_NOLINK = '<td align=center><a>%s</a></td>' + HTML_TD_LINK = '<td align=center><a href="%(uri)s">%(name)s</a></td>' + HTML_TD_LINK_IMG = ('<td><a href="%(uri)s">' + '<img style="width: 200" src="%(uri)s" /></a></td>') + HTML_TR = '<tr>%s</tr>' + + def __init__(self, target_port, options, platforms, rebaselining_tests, + executive): + self._html_directory = options.html_directory + self._target_port = target_port + self._platforms = platforms + self._rebaselining_tests = rebaselining_tests + self._executive = executive + self._html_file = os.path.join(options.html_directory, + 'rebaseline.html') + + def abspath_to_uri(self, filename): + """Converts an absolute path to a file: URI.""" + return path.abspath_to_uri(filename, self._executive) + + def generate_html(self): + """Generate html file for rebaselining result comparison.""" + + _log.info('Generating html file') + + html_body = '' + if not self._rebaselining_tests: + html_body += self.HTML_NO_REBASELINING_TESTS + else: + tests = list(self._rebaselining_tests) + tests.sort() + + test_no = 1 + for test in tests: + _log.info('Test %d: %s', test_no, test) + html_body += self._generate_html_for_one_test(test) + + html = self.HTML_REBASELINE % ({'time': time.asctime(), + 'body': html_body}) + _log.debug(html) + + with codecs.open(self._html_file, "w", "utf-8") as file: + file.write(html) + + _log.info('Baseline comparison html generated at "%s"', + self._html_file) + + def show_html(self): + """Launch the rebaselining html in brwoser.""" + + _log.info('Launching html: "%s"', self._html_file) + user.User().open_url(self._html_file) + _log.info('Html launched.') + + def _generate_baseline_links(self, test_basename, suffix, platform): + """Generate links for baseline results (old, new and diff). + + Args: + test_basename: base filename of the test + suffix: baseline file suffixes: '.txt', '.png' + platform: win, linux or mac + + Returns: + html links for showing baseline results (old, new and diff) + """ + + baseline_filename = '%s-expected%s' % (test_basename, suffix) + _log.debug(' baseline filename: "%s"', baseline_filename) + + new_file = get_result_file_fullpath(self._html_directory, + baseline_filename, platform, 'new') + _log.info(' New baseline file: "%s"', new_file) + if not os.path.exists(new_file): + _log.info(' No new baseline file: "%s"', new_file) + return '' + + old_file = get_result_file_fullpath(self._html_directory, + baseline_filename, platform, 'old') + _log.info(' Old baseline file: "%s"', old_file) + if suffix == '.png': + html_td_link = self.HTML_TD_LINK_IMG + else: + html_td_link = self.HTML_TD_LINK + + links = '' + if os.path.exists(old_file): + links += html_td_link % { + 'uri': self.abspath_to_uri(old_file), + 'name': baseline_filename} + else: + _log.info(' No old baseline file: "%s"', old_file) + links += self.HTML_TD_NOLINK % '' + + links += html_td_link % {'uri': self.abspath_to_uri(new_file), + 'name': baseline_filename} + + diff_file = get_result_file_fullpath(self._html_directory, + baseline_filename, platform, + 'diff') + _log.info(' Baseline diff file: "%s"', diff_file) + if os.path.exists(diff_file): + links += html_td_link % {'uri': self.abspath_to_uri(diff_file), + 'name': 'Diff'} + else: + _log.info(' No baseline diff file: "%s"', diff_file) + links += self.HTML_TD_NOLINK % '' + + return links + + def _generate_html_for_one_test(self, test): + """Generate html for one rebaselining test. + + Args: + test: layout test name + + Returns: + html that compares baseline results for the test. + """ + + test_basename = os.path.basename(os.path.splitext(test)[0]) + _log.info(' basename: "%s"', test_basename) + rows = [] + for suffix in BASELINE_SUFFIXES: + if suffix == '.checksum': + continue + + _log.info(' Checking %s files', suffix) + for platform in self._platforms: + links = self._generate_baseline_links(test_basename, suffix, + platform) + if links: + row = self.HTML_TD_NOLINK % self._get_baseline_result_type( + suffix) + row += self.HTML_TD_NOLINK % platform + row += links + _log.debug(' html row: %s', row) + + rows.append(self.HTML_TR % row) + + if rows: + test_path = os.path.join(self._target_port.layout_tests_dir(), + test) + html = self.HTML_TR_TEST % (self.abspath_to_uri(test_path), test) + html += self.HTML_TEST_DETAIL % ' '.join(rows) + + _log.debug(' html for test: %s', html) + return self.HTML_TABLE_TEST % html + + return '' + + def _get_baseline_result_type(self, suffix): + """Name of the baseline result type.""" + + if suffix == '.png': + return 'Pixel' + elif suffix == '.txt': + return 'Render Tree' + else: + return 'Other' + + +def get_host_port_object(options): + """Return a port object for the platform we're running on.""" + # The only thing we really need on the host is a way to diff + # text files and image files, which means we need to check that some + # version of ImageDiff has been built. We will look for either Debug + # or Release versions of the default port on the platform. + options.configuration = "Release" + port_obj = port.get(None, options) + if not port_obj.check_image_diff(override_step=None, logging=False): + _log.debug('No release version of the image diff binary was found.') + options.configuration = "Debug" + port_obj = port.get(None, options) + if not port_obj.check_image_diff(override_step=None, logging=False): + _log.error('No version of image diff was found. Check your build.') + return None + else: + _log.debug('Found the debug version of the image diff binary.') + else: + _log.debug('Found the release version of the image diff binary.') + return port_obj + + +def parse_options(args): + """Parse options and return a pair of host options and target options.""" + option_parser = optparse.OptionParser() + option_parser.add_option('-v', '--verbose', + action='store_true', + default=False, + help='include debug-level logging.') + + option_parser.add_option('-q', '--quiet', + action='store_true', + help='Suppress result HTML viewing') + + option_parser.add_option('-p', '--platforms', + default='mac,win,win-xp,win-vista,linux', + help=('Comma delimited list of platforms ' + 'that need rebaselining.')) + + option_parser.add_option('-u', '--archive_url', + default=('http://build.chromium.org/f/chromium/' + 'layout_test_results'), + help=('Url to find the layout test result archive' + ' file.')) + option_parser.add_option('-U', '--force_archive_url', + help=('Url of result zip file. This option is for debugging ' + 'purposes')) + + option_parser.add_option('-w', '--webkit_canary', + action='store_true', + default=False, + help=('If True, pull baselines from webkit.org ' + 'canary bot.')) + + option_parser.add_option('-b', '--backup', + action='store_true', + default=False, + help=('Whether or not to backup the original test' + ' expectations file after rebaseline.')) + + option_parser.add_option('-d', '--html_directory', + default='', + help=('The directory that stores the results for ' + 'rebaselining comparison.')) + + option_parser.add_option('', '--use_drt', + action='store_true', + default=False, + help=('Use ImageDiff from DumpRenderTree instead ' + 'of image_diff for pixel tests.')) + + option_parser.add_option('', '--target-platform', + default='chromium', + help=('The target platform to rebaseline ' + '("mac", "chromium", "qt", etc.). Defaults ' + 'to "chromium".')) + options = option_parser.parse_args(args)[0] + + target_options = copy.copy(options) + if options.target_platform == 'chromium': + target_options.chromium = True + options.tolerance = 0 + + return (options, target_options) + + +def main(executive=Executive()): + """Main function to produce new baselines.""" + + (options, target_options) = parse_options(sys.argv[1:]) + + # We need to create three different Port objects over the life of this + # script. |target_port_obj| is used to determine configuration information: + # location of the expectations file, names of ports to rebaseline, etc. + # |port_obj| is used for runtime functionality like actually diffing + # Then we create a rebaselining port to actual find and manage the + # baselines. + target_port_obj = port.get(None, target_options) + + # Set up our logging format. + log_level = logging.INFO + if options.verbose: + log_level = logging.DEBUG + logging.basicConfig(level=log_level, + format=('%(asctime)s %(filename)s:%(lineno)-3d ' + '%(levelname)s %(message)s'), + datefmt='%y%m%d %H:%M:%S') + + host_port_obj = get_host_port_object(options) + if not host_port_obj: + sys.exit(1) + + # Verify 'platforms' option is valid. + if not options.platforms: + _log.error('Invalid "platforms" option. --platforms must be ' + 'specified in order to rebaseline.') + sys.exit(1) + platforms = [p.strip().lower() for p in options.platforms.split(',')] + for platform in platforms: + if not platform in REBASELINE_PLATFORM_ORDER: + _log.error('Invalid platform: "%s"' % (platform)) + sys.exit(1) + + # Adjust the platform order so rebaseline tool is running at the order of + # 'mac', 'win' and 'linux'. This is in same order with layout test baseline + # search paths. It simplifies how the rebaseline tool detects duplicate + # baselines. Check _IsDupBaseline method for details. + rebaseline_platforms = [] + for platform in REBASELINE_PLATFORM_ORDER: + if platform in platforms: + rebaseline_platforms.append(platform) + + options.html_directory = setup_html_directory(options.html_directory) + + rebaselining_tests = set() + backup = options.backup + for platform in rebaseline_platforms: + rebaseliner = Rebaseliner(host_port_obj, target_port_obj, + platform, options) + + _log.info('') + log_dashed_string('Rebaseline started', platform) + if rebaseliner.run(backup): + # Only need to backup one original copy of test expectation file. + backup = False + log_dashed_string('Rebaseline done', platform) + else: + log_dashed_string('Rebaseline failed', platform, logging.ERROR) + + rebaselining_tests |= set(rebaseliner.get_rebaselining_tests()) + + _log.info('') + log_dashed_string('Rebaselining result comparison started', None) + html_generator = HtmlGenerator(target_port_obj, + options, + rebaseline_platforms, + rebaselining_tests, + executive=executive) + html_generator.generate_html() + if not options.quiet: + html_generator.show_html() + log_dashed_string('Rebaselining result comparison done', None) + + sys.exit(0) + +if '__main__' == __name__: + main() diff --git a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py new file mode 100644 index 0000000..7c55b94 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py @@ -0,0 +1,157 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for rebaseline_chromium_webkit_tests.py.""" + +import os +import sys +import unittest + +from webkitpy.tool import mocktool +from webkitpy.layout_tests import port +from webkitpy.layout_tests import rebaseline_chromium_webkit_tests +from webkitpy.common.system.executive import Executive, ScriptError + + +class MockPort(object): + def __init__(self, image_diff_exists): + self.image_diff_exists = image_diff_exists + + def check_image_diff(self, override_step, logging): + return self.image_diff_exists + + +def get_mock_get(config_expectations): + def mock_get(port_name, options): + return MockPort(config_expectations[options.configuration]) + return mock_get + + +class TestGetHostPortObject(unittest.TestCase): + def assert_result(self, release_present, debug_present, valid_port_obj): + # Tests whether we get a valid port object returned when we claim + # that Image diff is (or isn't) present in the two configs. + port.get = get_mock_get({'Release': release_present, + 'Debug': debug_present}) + options = mocktool.MockOptions(configuration=None, + html_directory=None) + port_obj = rebaseline_chromium_webkit_tests.get_host_port_object( + options) + if valid_port_obj: + self.assertNotEqual(port_obj, None) + else: + self.assertEqual(port_obj, None) + + def test_get_host_port_object(self): + # Save the normal port.get() function for future testing. + old_get = port.get + + # Test whether we get a valid port object back for the four + # possible cases of having ImageDiffs built. It should work when + # there is at least one binary present. + self.assert_result(False, False, False) + self.assert_result(True, False, True) + self.assert_result(False, True, True) + self.assert_result(True, True, True) + + # Restore the normal port.get() function. + port.get = old_get + + +class TestRebaseliner(unittest.TestCase): + def make_rebaseliner(self): + options = mocktool.MockOptions(configuration=None, + html_directory=None) + host_port_obj = port.get('test', options) + target_options = options + target_port_obj = port.get('test', target_options) + platform = 'test' + return rebaseline_chromium_webkit_tests.Rebaseliner( + host_port_obj, target_port_obj, platform, options) + + def test_parse_options(self): + (options, target_options) = rebaseline_chromium_webkit_tests.parse_options([]) + self.assertTrue(target_options.chromium) + self.assertEqual(options.tolerance, 0) + + (options, target_options) = rebaseline_chromium_webkit_tests.parse_options(['--target-platform', 'qt']) + self.assertFalse(hasattr(target_options, 'chromium')) + self.assertEqual(options.tolerance, 0) + + def test_noop(self): + # this method tests that was can at least instantiate an object, even + # if there is nothing to do. + rebaseliner = self.make_rebaseliner() + self.assertNotEqual(rebaseliner, None) + + def test_diff_baselines_txt(self): + rebaseliner = self.make_rebaseliner() + output = rebaseliner._port.expected_text( + os.path.join(rebaseliner._port.layout_tests_dir(), + 'passes/text.html')) + self.assertFalse(rebaseliner._diff_baselines(output, output, + is_image=False)) + + def test_diff_baselines_png(self): + rebaseliner = self.make_rebaseliner() + image = rebaseliner._port.expected_image( + os.path.join(rebaseliner._port.layout_tests_dir(), + 'passes/image.html')) + self.assertFalse(rebaseliner._diff_baselines(image, image, + is_image=True)) + + +class TestHtmlGenerator(unittest.TestCase): + def make_generator(self, tests): + return rebaseline_chromium_webkit_tests.HtmlGenerator( + target_port=None, + options=mocktool.MockOptions(configuration=None, + html_directory='/tmp'), + platforms=['mac'], + rebaselining_tests=tests, + executive=Executive()) + + def test_generate_baseline_links(self): + orig_platform = sys.platform + orig_exists = os.path.exists + + try: + sys.platform = 'darwin' + os.path.exists = lambda x: True + generator = self.make_generator(["foo.txt"]) + links = generator._generate_baseline_links("foo", ".txt", "mac") + expected_links = '<td align=center><a href="file:///tmp/foo-expected-mac-old.txt">foo-expected.txt</a></td><td align=center><a href="file:///tmp/foo-expected-mac-new.txt">foo-expected.txt</a></td><td align=center><a href="file:///tmp/foo-expected-mac-diff.txt">Diff</a></td>' + self.assertEqual(links, expected_links) + finally: + sys.platform = orig_platform + os.path.exists = orig_exists + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py new file mode 100755 index 0000000..f7e5330 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py @@ -0,0 +1,434 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Run layout tests.""" + +from __future__ import with_statement + +import codecs +import errno +import logging +import optparse +import os +import signal +import sys + +from layout_package import printing +from layout_package import test_runner + +from webkitpy.common.system import user +from webkitpy.thirdparty import simplejson + +import port + +_log = logging.getLogger(__name__) + + +def run(port, options, args, regular_output=sys.stderr, + buildbot_output=sys.stdout): + """Run the tests. + + Args: + port: Port object for port-specific behavior + options: a dictionary of command line options + args: a list of sub directories or files to test + regular_output: a stream-like object that we can send logging/debug + output to + buildbot_output: a stream-like object that we can write all output that + is intended to be parsed by the buildbot to + Returns: + the number of unexpected results that occurred, or -1 if there is an + error. + + """ + warnings = _set_up_derived_options(port, options) + + printer = printing.Printer(port, options, regular_output, buildbot_output, + int(options.child_processes), options.experimental_fully_parallel) + for w in warnings: + _log.warning(w) + + if options.help_printing: + printer.help_printing() + printer.cleanup() + return 0 + + last_unexpected_results = _gather_unexpected_results(options) + if options.print_last_failures: + printer.write("\n".join(last_unexpected_results) + "\n") + printer.cleanup() + return 0 + + # We wrap any parts of the run that are slow or likely to raise exceptions + # in a try/finally to ensure that we clean up the logging configuration. + num_unexpected_results = -1 + try: + runner = test_runner.TestRunner(port, options, printer) + runner._print_config() + + printer.print_update("Collecting tests ...") + try: + runner.collect_tests(args, last_unexpected_results) + except IOError, e: + if e.errno == errno.ENOENT: + return -1 + raise + + printer.print_update("Parsing expectations ...") + if options.lint_test_files: + return runner.lint() + runner.parse_expectations(port.test_platform_name(), + options.configuration == 'Debug') + + printer.print_update("Checking build ...") + if not port.check_build(runner.needs_http()): + _log.error("Build check failed") + return -1 + + result_summary = runner.set_up_run() + if result_summary: + num_unexpected_results = runner.run(result_summary) + runner.clean_up_run() + _log.debug("Testing completed, Exit status: %d" % + num_unexpected_results) + finally: + printer.cleanup() + + return num_unexpected_results + + +def _set_up_derived_options(port_obj, options): + """Sets the options values that depend on other options values.""" + # We return a list of warnings to print after the printer is initialized. + warnings = [] + + if options.worker_model == 'old-inline': + if options.child_processes and int(options.child_processes) > 1: + warnings.append("--worker-model=old-inline overrides --child-processes") + options.child_processes = "1" + if not options.child_processes: + options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES", + str(port_obj.default_child_processes())) + + if not options.configuration: + options.configuration = port_obj.default_configuration() + + if options.pixel_tests is None: + options.pixel_tests = True + + if not options.use_apache: + options.use_apache = sys.platform in ('darwin', 'linux2') + + if not os.path.isabs(options.results_directory): + # This normalizes the path to the build dir. + # FIXME: how this happens is not at all obvious; this is a dumb + # interface and should be cleaned up. + options.results_directory = port_obj.results_directory() + + if not options.time_out_ms: + if options.configuration == "Debug": + options.time_out_ms = str(2 * test_runner.TestRunner.DEFAULT_TEST_TIMEOUT_MS) + else: + options.time_out_ms = str(test_runner.TestRunner.DEFAULT_TEST_TIMEOUT_MS) + + options.slow_time_out_ms = str(5 * int(options.time_out_ms)) + return warnings + + +def _gather_unexpected_results(options): + """Returns the unexpected results from the previous run, if any.""" + last_unexpected_results = [] + if options.print_last_failures or options.retest_last_failures: + unexpected_results_filename = os.path.join( + options.results_directory, "unexpected_results.json") + with codecs.open(unexpected_results_filename, "r", "utf-8") as file: + results = simplejson.load(file) + last_unexpected_results = results['tests'].keys() + return last_unexpected_results + + +def _compat_shim_callback(option, opt_str, value, parser): + print "Ignoring unsupported option: %s" % opt_str + + +def _compat_shim_option(option_name, **kwargs): + return optparse.make_option(option_name, action="callback", + callback=_compat_shim_callback, + help="Ignored, for old-run-webkit-tests compat only.", **kwargs) + + +def parse_args(args=None): + """Provides a default set of command line args. + + Returns a tuple of options, args from optparse""" + + # FIXME: All of these options should be stored closer to the code which + # FIXME: actually uses them. configuration_options should move + # FIXME: to WebKitPort and be shared across all scripts. + configuration_options = [ + optparse.make_option("-t", "--target", dest="configuration", + help="(DEPRECATED)"), + # FIXME: --help should display which configuration is default. + optparse.make_option('--debug', action='store_const', const='Debug', + dest="configuration", + help='Set the configuration to Debug'), + optparse.make_option('--release', action='store_const', + const='Release', dest="configuration", + help='Set the configuration to Release'), + # old-run-webkit-tests also accepts -c, --configuration CONFIGURATION. + ] + + print_options = printing.print_options() + + # FIXME: These options should move onto the ChromiumPort. + chromium_options = [ + optparse.make_option("--chromium", action="store_true", default=False, + help="use the Chromium port"), + optparse.make_option("--startup-dialog", action="store_true", + default=False, help="create a dialog on DumpRenderTree startup"), + optparse.make_option("--gp-fault-error-box", action="store_true", + default=False, help="enable Windows GP fault error box"), + optparse.make_option("--multiple-loads", + type="int", help="turn on multiple loads of each test"), + optparse.make_option("--js-flags", + type="string", help="JavaScript flags to pass to tests"), + optparse.make_option("--nocheck-sys-deps", action="store_true", + default=False, + help="Don't check the system dependencies (themes)"), + optparse.make_option("--use-test-shell", action="store_true", + default=False, + help="Use test_shell instead of DRT"), + optparse.make_option("--accelerated-compositing", + action="store_true", + help="Use hardware-accelated compositing for rendering"), + optparse.make_option("--no-accelerated-compositing", + action="store_false", + dest="accelerated_compositing", + help="Don't use hardware-accelerated compositing for rendering"), + optparse.make_option("--accelerated-2d-canvas", + action="store_true", + help="Use hardware-accelerated 2D Canvas calls"), + optparse.make_option("--no-accelerated-2d-canvas", + action="store_false", + dest="accelerated_2d_canvas", + help="Don't use hardware-accelerated 2D Canvas calls"), + ] + + # Missing Mac-specific old-run-webkit-tests options: + # FIXME: Need: -g, --guard for guard malloc support on Mac. + # FIXME: Need: -l --leaks Enable leaks checking. + # FIXME: Need: --sample-on-timeout Run sample on timeout + + old_run_webkit_tests_compat = [ + # NRWT doesn't generate results by default anyway. + _compat_shim_option("--no-new-test-results"), + # NRWT doesn't sample on timeout yet anyway. + _compat_shim_option("--no-sample-on-timeout"), + # FIXME: NRWT needs to support remote links eventually. + _compat_shim_option("--use-remote-links-to-tests"), + ] + + results_options = [ + # NEED for bots: --use-remote-links-to-tests Link to test files + # within the SVN repository in the results. + optparse.make_option("-p", "--pixel-tests", action="store_true", + dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"), + optparse.make_option("--no-pixel-tests", action="store_false", + dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"), + optparse.make_option("--tolerance", + help="Ignore image differences less than this percentage (some " + "ports may ignore this option)", type="float"), + optparse.make_option("--results-directory", + default="layout-test-results", + help="Output results directory source dir, relative to Debug or " + "Release"), + optparse.make_option("--new-baseline", action="store_true", + default=False, help="Save all generated results as new baselines " + "into the platform directory, overwriting whatever's " + "already there."), + optparse.make_option("--reset-results", action="store_true", + default=False, help="Reset any existing baselines to the " + "generated results"), + optparse.make_option("--no-show-results", action="store_false", + default=True, dest="show_results", + help="Don't launch a browser with results after the tests " + "are done"), + # FIXME: We should have a helper function to do this sort of + # deprectated mapping and automatically log, etc. + optparse.make_option("--noshow-results", action="store_false", + dest="show_results", + help="Deprecated, same as --no-show-results."), + optparse.make_option("--no-launch-safari", action="store_false", + dest="show_results", + help="old-run-webkit-tests compat, same as --noshow-results."), + # old-run-webkit-tests: + # --[no-]launch-safari Launch (or do not launch) Safari to display + # test results (default: launch) + optparse.make_option("--full-results-html", action="store_true", + default=False, + help="Show all failures in results.html, rather than only " + "regressions"), + optparse.make_option("--clobber-old-results", action="store_true", + default=False, help="Clobbers test results from previous runs."), + optparse.make_option("--platform", + help="Override the platform for expected results"), + optparse.make_option("--no-record-results", action="store_false", + default=True, dest="record_results", + help="Don't record the results."), + # old-run-webkit-tests also has HTTP toggle options: + # --[no-]http Run (or do not run) http tests + # (default: run) + ] + + test_options = [ + optparse.make_option("--build", dest="build", + action="store_true", default=True, + help="Check to ensure the DumpRenderTree build is up-to-date " + "(default)."), + optparse.make_option("--no-build", dest="build", + action="store_false", help="Don't check to see if the " + "DumpRenderTree build is up-to-date."), + optparse.make_option("-n", "--dry-run", action="store_true", + default=False, + help="Do everything but actually run the tests or upload results."), + # old-run-webkit-tests has --valgrind instead of wrapper. + optparse.make_option("--wrapper", + help="wrapper command to insert before invocations of " + "DumpRenderTree; option is split on whitespace before " + "running. (Example: --wrapper='valgrind --smc-check=all')"), + # old-run-webkit-tests: + # -i|--ignore-tests Comma-separated list of directories + # or tests to ignore + optparse.make_option("--test-list", action="append", + help="read list of tests to run from file", metavar="FILE"), + # old-run-webkit-tests uses --skipped==[default|ignore|only] + # instead of --force: + optparse.make_option("--force", action="store_true", default=False, + help="Run all tests, even those marked SKIP in the test list"), + optparse.make_option("--use-apache", action="store_true", + default=False, help="Whether to use apache instead of lighttpd."), + optparse.make_option("--time-out-ms", + help="Set the timeout for each test"), + # old-run-webkit-tests calls --randomize-order --random: + optparse.make_option("--randomize-order", action="store_true", + default=False, help=("Run tests in random order (useful " + "for tracking down corruption)")), + optparse.make_option("--run-chunk", + help=("Run a specified chunk (n:l), the nth of len l, " + "of the layout tests")), + optparse.make_option("--run-part", help=("Run a specified part (n:m), " + "the nth of m parts, of the layout tests")), + # old-run-webkit-tests calls --batch-size: --nthly n + # Restart DumpRenderTree every n tests (default: 1000) + optparse.make_option("--batch-size", + help=("Run a the tests in batches (n), after every n tests, " + "DumpRenderTree is relaunched."), type="int", default=0), + # old-run-webkit-tests calls --run-singly: -1|--singly + # Isolate each test case run (implies --nthly 1 --verbose) + optparse.make_option("--run-singly", action="store_true", + default=False, help="run a separate DumpRenderTree for each test"), + optparse.make_option("--child-processes", + help="Number of DumpRenderTrees to run in parallel."), + # FIXME: Display default number of child processes that will run. + optparse.make_option("--worker-model", action="store", + default="old-threads", help=("controls worker model. Valid values " + "are 'old-inline', 'old-threads'.")), + optparse.make_option("--experimental-fully-parallel", + action="store_true", default=False, + help="run all tests in parallel"), + optparse.make_option("--exit-after-n-failures", type="int", nargs=1, + help="Exit after the first N failures instead of running all " + "tests"), + optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int", + nargs=1, help="Exit after the first N crashes instead of running " + "all tests"), + # FIXME: consider: --iterations n + # Number of times to run the set of tests (e.g. ABCABCABC) + optparse.make_option("--print-last-failures", action="store_true", + default=False, help="Print the tests in the last run that " + "had unexpected failures (or passes) and then exit."), + optparse.make_option("--retest-last-failures", action="store_true", + default=False, help="re-test the tests in the last run that " + "had unexpected failures (or passes)."), + optparse.make_option("--retry-failures", action="store_true", + default=True, + help="Re-try any tests that produce unexpected results (default)"), + optparse.make_option("--no-retry-failures", action="store_false", + dest="retry_failures", + help="Don't re-try any tests that produce unexpected results."), + ] + + misc_options = [ + optparse.make_option("--lint-test-files", action="store_true", + default=False, help=("Makes sure the test files parse for all " + "configurations. Does not run any tests.")), + ] + + # FIXME: Move these into json_results_generator.py + results_json_options = [ + optparse.make_option("--master-name", help="The name of the buildbot master."), + optparse.make_option("--builder-name", default="DUMMY_BUILDER_NAME", + help=("The name of the builder shown on the waterfall running " + "this script e.g. WebKit.")), + optparse.make_option("--build-name", default="DUMMY_BUILD_NAME", + help=("The name of the builder used in its path, e.g. " + "webkit-rel.")), + optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER", + help=("The build number of the builder running this script.")), + optparse.make_option("--test-results-server", default="", + help=("If specified, upload results json files to this appengine " + "server.")), + optparse.make_option("--upload-full-results", + action="store_true", + default=False, + help="If true, upload full json results to server."), + ] + + option_list = (configuration_options + print_options + + chromium_options + results_options + test_options + + misc_options + results_json_options + + old_run_webkit_tests_compat) + option_parser = optparse.OptionParser(option_list=option_list) + + return option_parser.parse_args(args) + + +def main(): + options, args = parse_args() + port_obj = port.get(options.platform, options) + return run(port_obj, options, args) + + +if '__main__' == __name__: + try: + sys.exit(main()) + except KeyboardInterrupt: + # this mirrors what the shell normally does + sys.exit(signal.SIGINT + 128) diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py new file mode 100644 index 0000000..2bfac2f --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py @@ -0,0 +1,545 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for run_webkit_tests.""" + +import codecs +import itertools +import logging +import os +import Queue +import shutil +import sys +import tempfile +import thread +import time +import threading +import unittest + +from webkitpy.common import array_stream +from webkitpy.common.system import outputcapture +from webkitpy.common.system import user +from webkitpy.layout_tests import port +from webkitpy.layout_tests import run_webkit_tests +from webkitpy.layout_tests.layout_package import dump_render_tree_thread +from webkitpy.layout_tests.port.test import TestPort, TestDriver +from webkitpy.python24.versioning import compare_version +from webkitpy.test.skip import skip_if + +from webkitpy.thirdparty.mock import Mock + + +class MockUser(): + def __init__(self): + self.url = None + + def open_url(self, url): + self.url = url + + +def parse_args(extra_args=None, record_results=False, tests_included=False, + print_nothing=True): + extra_args = extra_args or [] + if print_nothing: + args = ['--print', 'nothing'] + else: + args = [] + if not '--platform' in extra_args: + args.extend(['--platform', 'test']) + if not record_results: + args.append('--no-record-results') + if not '--child-processes' in extra_args: + args.extend(['--worker-model', 'old-inline']) + args.extend(extra_args) + if not tests_included: + # We use the glob to test that globbing works. + args.extend(['passes', + 'http/tests', + 'websocket/tests', + 'failures/expected/*']) + return run_webkit_tests.parse_args(args) + + +def passing_run(extra_args=None, port_obj=None, record_results=False, + tests_included=False): + options, parsed_args = parse_args(extra_args, record_results, + tests_included) + if not port_obj: + port_obj = port.get(port_name=options.platform, options=options, + user=MockUser()) + res = run_webkit_tests.run(port_obj, options, parsed_args) + return res == 0 + + +def logging_run(extra_args=None, port_obj=None, tests_included=False): + options, parsed_args = parse_args(extra_args=extra_args, + record_results=False, + tests_included=tests_included, + print_nothing=False) + user = MockUser() + if not port_obj: + port_obj = port.get(port_name=options.platform, options=options, + user=user) + + res, buildbot_output, regular_output = run_and_capture(port_obj, options, + parsed_args) + return (res, buildbot_output, regular_output, user) + + +def run_and_capture(port_obj, options, parsed_args): + oc = outputcapture.OutputCapture() + try: + oc.capture_output() + buildbot_output = array_stream.ArrayStream() + regular_output = array_stream.ArrayStream() + res = run_webkit_tests.run(port_obj, options, parsed_args, + buildbot_output=buildbot_output, + regular_output=regular_output) + finally: + oc.restore_output() + return (res, buildbot_output, regular_output) + + +def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False): + extra_args = extra_args or [] + if not tests_included: + # Not including http tests since they get run out of order (that + # behavior has its own test, see test_get_test_file_queue) + extra_args = ['passes', 'failures'] + extra_args + options, parsed_args = parse_args(extra_args, tests_included=True) + + user = MockUser() + + test_batches = [] + + class RecordingTestDriver(TestDriver): + def __init__(self, port, worker_number): + TestDriver.__init__(self, port, worker_number) + self._current_test_batch = None + + def poll(self): + # So that we don't create a new driver for every test + return None + + def stop(self): + self._current_test_batch = None + + def run_test(self, test_input): + if self._current_test_batch is None: + self._current_test_batch = [] + test_batches.append(self._current_test_batch) + test_name = self._port.relative_test_filename(test_input.filename) + self._current_test_batch.append(test_name) + return TestDriver.run_test(self, test_input) + + class RecordingTestPort(TestPort): + def create_driver(self, worker_number): + return RecordingTestDriver(self, worker_number) + + recording_port = RecordingTestPort(options=options, user=user) + run_and_capture(recording_port, options, parsed_args) + + if flatten_batches: + return list(itertools.chain(*test_batches)) + + return test_batches + + +class MainTest(unittest.TestCase): + def test_accelerated_compositing(self): + # This just tests that we recognize the command line args + self.assertTrue(passing_run(['--accelerated-compositing'])) + self.assertTrue(passing_run(['--no-accelerated-compositing'])) + + def test_accelerated_2d_canvas(self): + # This just tests that we recognize the command line args + self.assertTrue(passing_run(['--accelerated-2d-canvas'])) + self.assertTrue(passing_run(['--no-accelerated-2d-canvas'])) + + def test_basic(self): + self.assertTrue(passing_run()) + + def test_batch_size(self): + batch_tests_run = get_tests_run(['--batch-size', '2']) + for batch in batch_tests_run: + self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch)) + + def test_child_process_1(self): + (res, buildbot_output, regular_output, user) = logging_run( + ['--print', 'config', '--child-processes', '1']) + self.assertTrue('Running one DumpRenderTree\n' + in regular_output.get()) + + def test_child_processes_2(self): + (res, buildbot_output, regular_output, user) = logging_run( + ['--print', 'config', '--child-processes', '2']) + self.assertTrue('Running 2 DumpRenderTrees in parallel\n' + in regular_output.get()) + + def test_dryrun(self): + batch_tests_run = get_tests_run(['--dry-run']) + self.assertEqual(batch_tests_run, []) + + batch_tests_run = get_tests_run(['-n']) + self.assertEqual(batch_tests_run, []) + + def test_exception_raised(self): + self.assertRaises(ValueError, logging_run, + ['failures/expected/exception.html'], tests_included=True) + + def test_full_results_html(self): + # FIXME: verify html? + self.assertTrue(passing_run(['--full-results-html'])) + + def test_help_printing(self): + res, out, err, user = logging_run(['--help-printing']) + self.assertEqual(res, 0) + self.assertTrue(out.empty()) + self.assertFalse(err.empty()) + + def test_hung_thread(self): + res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50', + 'failures/expected/hang.html'], + tests_included=True) + self.assertEqual(res, 0) + self.assertFalse(out.empty()) + self.assertFalse(err.empty()) + + def test_keyboard_interrupt(self): + # Note that this also tests running a test marked as SKIP if + # you specify it explicitly. + self.assertRaises(KeyboardInterrupt, logging_run, + ['failures/expected/keyboard.html'], tests_included=True) + + def test_last_results(self): + passing_run(['--clobber-old-results'], record_results=True) + (res, buildbot_output, regular_output, user) = logging_run( + ['--print-last-failures']) + self.assertEqual(regular_output.get(), ['\n\n']) + self.assertEqual(buildbot_output.get(), []) + + def test_lint_test_files(self): + res, out, err, user = logging_run(['--lint-test-files']) + self.assertEqual(res, 0) + self.assertTrue(out.empty()) + self.assertTrue(any(['Lint succeeded' in msg for msg in err.get()])) + + def test_lint_test_files__errors(self): + options, parsed_args = parse_args(['--lint-test-files']) + user = MockUser() + port_obj = port.get(options.platform, options=options, user=user) + port_obj.test_expectations = lambda: "# syntax error" + res, out, err = run_and_capture(port_obj, options, parsed_args) + + self.assertEqual(res, -1) + self.assertTrue(out.empty()) + self.assertTrue(any(['Lint failed' in msg for msg in err.get()])) + + def test_no_tests_found(self): + res, out, err, user = logging_run(['resources'], tests_included=True) + self.assertEqual(res, -1) + self.assertTrue(out.empty()) + self.assertTrue('No tests to run.\n' in err.get()) + + def test_no_tests_found_2(self): + res, out, err, user = logging_run(['foo'], tests_included=True) + self.assertEqual(res, -1) + self.assertTrue(out.empty()) + self.assertTrue('No tests to run.\n' in err.get()) + + def test_randomize_order(self): + # FIXME: verify order was shuffled + self.assertTrue(passing_run(['--randomize-order'])) + + def test_run_chunk(self): + # Test that we actually select the right chunk + all_tests_run = get_tests_run(flatten_batches=True) + chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True) + self.assertEquals(all_tests_run[4:8], chunk_tests_run) + + # Test that we wrap around if the number of tests is not evenly divisible by the chunk size + tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html'] + chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True) + self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run) + + def test_run_force(self): + # This raises an exception because we run + # failures/expected/exception.html, which is normally SKIPped. + self.assertRaises(ValueError, logging_run, ['--force']) + + def test_run_part(self): + # Test that we actually select the right part + tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html'] + tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True) + self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run) + + # Test that we wrap around if the number of tests is not evenly divisible by the chunk size + # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the + # last part repeats the first two tests). + chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True) + self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run) + + def test_run_singly(self): + batch_tests_run = get_tests_run(['--run-singly']) + for batch in batch_tests_run: + self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch)) + + def test_single_file(self): + tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True) + self.assertEquals(['passes/text.html'], tests_run) + + def test_test_list(self): + filename = tempfile.mktemp() + tmpfile = file(filename, mode='w+') + tmpfile.write('passes/text.html') + tmpfile.close() + tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True) + self.assertEquals(['passes/text.html'], tests_run) + os.remove(filename) + res, out, err, user = logging_run(['--test-list=%s' % filename], + tests_included=True) + self.assertEqual(res, -1) + self.assertFalse(err.empty()) + + def test_unexpected_failures(self): + # Run tests including the unexpected failures. + self._url_opened = None + res, out, err, user = logging_run(tests_included=True) + self.assertEqual(res, 3) + self.assertFalse(out.empty()) + self.assertFalse(err.empty()) + self.assertEqual(user.url, '/tmp/layout-test-results/results.html') + + def test_exit_after_n_failures(self): + # Unexpected failures should result in tests stopping. + tests_run = get_tests_run([ + 'failures/unexpected/text-image-checksum.html', + 'passes/text.html', + '--exit-after-n-failures', '1', + ], + tests_included=True, + flatten_batches=True) + self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run) + + # But we'll keep going for expected ones. + tests_run = get_tests_run([ + 'failures/expected/text.html', + 'passes/text.html', + '--exit-after-n-failures', '1', + ], + tests_included=True, + flatten_batches=True) + self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run) + + def test_exit_after_n_crashes(self): + # Unexpected crashes should result in tests stopping. + tests_run = get_tests_run([ + 'failures/unexpected/crash.html', + 'passes/text.html', + '--exit-after-n-crashes-or-timeouts', '1', + ], + tests_included=True, + flatten_batches=True) + self.assertEquals(['failures/unexpected/crash.html'], tests_run) + + # Same with timeouts. + tests_run = get_tests_run([ + 'failures/unexpected/timeout.html', + 'passes/text.html', + '--exit-after-n-crashes-or-timeouts', '1', + ], + tests_included=True, + flatten_batches=True) + self.assertEquals(['failures/unexpected/timeout.html'], tests_run) + + # But we'll keep going for expected ones. + tests_run = get_tests_run([ + 'failures/expected/crash.html', + 'passes/text.html', + '--exit-after-n-crashes-or-timeouts', '1', + ], + tests_included=True, + flatten_batches=True) + self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run) + + def test_results_directory_absolute(self): + # We run a configuration that should fail, to generate output, then + # look for what the output results url was. + + tmpdir = tempfile.mkdtemp() + res, out, err, user = logging_run(['--results-directory=' + tmpdir], + tests_included=True) + self.assertEqual(user.url, os.path.join(tmpdir, 'results.html')) + shutil.rmtree(tmpdir, ignore_errors=True) + + def test_results_directory_default(self): + # We run a configuration that should fail, to generate output, then + # look for what the output results url was. + + # This is the default location. + res, out, err, user = logging_run(tests_included=True) + self.assertEqual(user.url, '/tmp/layout-test-results/results.html') + + def test_results_directory_relative(self): + # We run a configuration that should fail, to generate output, then + # look for what the output results url was. + + res, out, err, user = logging_run(['--results-directory=foo'], + tests_included=True) + self.assertEqual(user.url, '/tmp/foo/results.html') + + def test_tolerance(self): + class ImageDiffTestPort(TestPort): + def diff_image(self, expected_contents, actual_contents, + diff_filename=None): + self.tolerance_used_for_diff_image = self._options.tolerance + return True + + def get_port_for_run(args): + options, parsed_args = run_webkit_tests.parse_args(args) + test_port = ImageDiffTestPort(options=options, user=MockUser()) + passing_run(args, port_obj=test_port, tests_included=True) + return test_port + + base_args = ['--pixel-tests', 'failures/expected/*'] + + # If we pass in an explicit tolerance argument, then that will be used. + test_port = get_port_for_run(base_args + ['--tolerance', '.1']) + self.assertEqual(0.1, test_port.tolerance_used_for_diff_image) + test_port = get_port_for_run(base_args + ['--tolerance', '0']) + self.assertEqual(0, test_port.tolerance_used_for_diff_image) + + # Otherwise the port's default tolerance behavior (including ignoring it) + # should be used. + test_port = get_port_for_run(base_args) + self.assertEqual(None, test_port.tolerance_used_for_diff_image) + + def test_worker_model__inline(self): + self.assertTrue(passing_run(['--worker-model', 'old-inline'])) + + def test_worker_model__threads(self): + self.assertTrue(passing_run(['--worker-model', 'old-threads'])) + + def test_worker_model__unknown(self): + self.assertRaises(ValueError, logging_run, + ['--worker-model', 'unknown']) + +MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2') + + + +def _mocked_open(original_open, file_list): + def _wrapper(name, mode, encoding): + if name.find("-expected.") != -1 and mode.find("w") != -1: + # we don't want to actually write new baselines, so stub these out + name.replace('\\', '/') + file_list.append(name) + return original_open(os.devnull, mode, encoding) + return original_open(name, mode, encoding) + return _wrapper + + +class RebaselineTest(unittest.TestCase): + def assertBaselines(self, file_list, file): + "assert that the file_list contains the baselines.""" + for ext in [".txt", ".png", ".checksum"]: + baseline = file + "-expected" + ext + self.assertTrue(any(f.find(baseline) != -1 for f in file_list)) + + # FIXME: Add tests to ensure that we're *not* writing baselines when we're not + # supposed to be. + + def disabled_test_reset_results(self): + # FIXME: This test is disabled until we can rewrite it to use a + # mock filesystem. + # + # Test that we update expectations in place. If the expectation + # is missing, update the expected generic location. + file_list = [] + passing_run(['--pixel-tests', + '--reset-results', + 'passes/image.html', + 'failures/expected/missing_image.html'], + tests_included=True) + self.assertEqual(len(file_list), 6) + self.assertBaselines(file_list, + "data/passes/image") + self.assertBaselines(file_list, + "data/failures/expected/missing_image") + + def disabled_test_new_baseline(self): + # FIXME: This test is disabled until we can rewrite it to use a + # mock filesystem. + # + # Test that we update the platform expectations. If the expectation + # is mssing, then create a new expectation in the platform dir. + file_list = [] + original_open = codecs.open + try: + # Test that we update the platform expectations. If the expectation + # is mssing, then create a new expectation in the platform dir. + file_list = [] + codecs.open = _mocked_open(original_open, file_list) + passing_run(['--pixel-tests', + '--new-baseline', + 'passes/image.html', + 'failures/expected/missing_image.html'], + tests_included=True) + self.assertEqual(len(file_list), 6) + self.assertBaselines(file_list, + "data/platform/test/passes/image") + self.assertBaselines(file_list, + "data/platform/test/failures/expected/missing_image") + finally: + codecs.open = original_open + + +class DryrunTest(unittest.TestCase): + # FIXME: it's hard to know which platforms are safe to test; the + # chromium platforms require a chromium checkout, and the mac platform + # requires fcntl, so it can't be tested on win32, etc. There is + # probably a better way of handling this. + def test_darwin(self): + if sys.platform != "darwin": + return + + self.assertTrue(passing_run(['--platform', 'test'])) + self.assertTrue(passing_run(['--platform', 'dryrun', + 'fast/html'])) + self.assertTrue(passing_run(['--platform', 'dryrun-mac', + 'fast/html'])) + + def test_test(self): + self.assertTrue(passing_run(['--platform', 'dryrun-test', + '--pixel-tests'])) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/test_types/__init__.py b/Tools/Scripts/webkitpy/layout_tests/test_types/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/test_types/__init__.py diff --git a/Tools/Scripts/webkitpy/layout_tests/test_types/image_diff.py b/Tools/Scripts/webkitpy/layout_tests/test_types/image_diff.py new file mode 100644 index 0000000..da466c8 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/test_types/image_diff.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Compares the image output of a test to the expected image output. + +Compares hashes for the generated and expected images. If the output doesn't +match, returns FailureImageHashMismatch and outputs both hashes into the layout +test results directory. +""" + +from __future__ import with_statement + +import codecs +import errno +import logging +import os +import shutil + +from webkitpy.layout_tests.layout_package import test_failures +from webkitpy.layout_tests.test_types import test_type_base + +# Cache whether we have the image_diff executable available. +_compare_available = True +_compare_msg_printed = False + +_log = logging.getLogger("webkitpy.layout_tests.test_types.image_diff") + + +class ImageDiff(test_type_base.TestTypeBase): + + def _save_baseline_files(self, filename, image, image_hash, + generate_new_baseline): + """Saves new baselines for the PNG and checksum. + + Args: + filename: test filename + image: a image output + image_hash: a checksum of the image + generate_new_baseline: whether to generate a new, platform-specific + baseline, or update the existing one + """ + self._save_baseline_data(filename, image, ".png", encoding=None, + generate_new_baseline=generate_new_baseline) + self._save_baseline_data(filename, image_hash, ".checksum", + encoding="ascii", + generate_new_baseline=generate_new_baseline) + + def _copy_image(self, filename, actual_image, expected_image): + self.write_output_files(filename, '.png', + output=actual_image, expected=expected_image, + encoding=None, print_text_diffs=False) + + def _copy_image_hash(self, filename, actual_image_hash, expected_image_hash): + self.write_output_files(filename, '.checksum', + actual_image_hash, expected_image_hash, + encoding="ascii", print_text_diffs=False) + + def _create_diff_image(self, port, filename, actual_image, expected_image): + """Creates the visual diff of the expected/actual PNGs. + + Returns True if the images are different. + """ + diff_filename = self.output_filename(filename, + self.FILENAME_SUFFIX_COMPARE) + return port.diff_image(actual_image, expected_image, diff_filename) + + def compare_output(self, port, filename, test_args, actual_test_output, + expected_test_output): + """Implementation of CompareOutput that checks the output image and + checksum against the expected files from the LayoutTest directory. + """ + failures = [] + + # If we didn't produce a hash file, this test must be text-only. + if actual_test_output.image_hash is None: + return failures + + # If we're generating a new baseline, we pass. + if test_args.new_baseline or test_args.reset_results: + self._save_baseline_files(filename, actual_test_output.image, + actual_test_output.image_hash, + test_args.new_baseline) + return failures + + if not expected_test_output.image: + # Report a missing expected PNG file. + self._copy_image(filename, actual_test_output.image, expected_image=None) + self._copy_image_hash(filename, actual_test_output.image_hash, + expected_test_output.image_hash) + failures.append(test_failures.FailureMissingImage()) + return failures + if not expected_test_output.image_hash: + # Report a missing expected checksum file. + self._copy_image(filename, actual_test_output.image, + expected_test_output.image) + self._copy_image_hash(filename, actual_test_output.image_hash, + expected_image_hash=None) + failures.append(test_failures.FailureMissingImageHash()) + return failures + + if actual_test_output.image_hash == expected_test_output.image_hash: + # Hash matched (no diff needed, okay to return). + return failures + + self._copy_image(filename, actual_test_output.image, + expected_test_output.image) + self._copy_image_hash(filename, actual_test_output.image_hash, + expected_test_output.image_hash) + + # Even though we only use the result in one codepath below but we + # still need to call CreateImageDiff for other codepaths. + images_are_different = self._create_diff_image(port, filename, + actual_test_output.image, + expected_test_output.image) + if not images_are_different: + failures.append(test_failures.FailureImageHashIncorrect()) + else: + failures.append(test_failures.FailureImageHashMismatch()) + + return failures diff --git a/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py b/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py new file mode 100644 index 0000000..4b96b3a --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines the interface TestTypeBase which other test types inherit from. + +Also defines the TestArguments "struct" to pass them additional arguments. +""" + +from __future__ import with_statement + +import codecs +import cgi +import errno +import logging +import os.path + +_log = logging.getLogger("webkitpy.layout_tests.test_types.test_type_base") + + +class TestArguments(object): + """Struct-like wrapper for additional arguments needed by + specific tests.""" + # Whether to save new baseline results. + new_baseline = False + + # Path to the actual PNG file generated by pixel tests + png_path = None + + # Value of checksum generated by pixel tests. + hash = None + + # Whether to use wdiff to generate by-word diffs. + wdiff = False + +# Python bug workaround. See the wdiff code in WriteOutputFiles for an +# explanation. +_wdiff_available = True + + +class TestTypeBase(object): + + # Filename pieces when writing failures to the test results directory. + FILENAME_SUFFIX_ACTUAL = "-actual" + FILENAME_SUFFIX_EXPECTED = "-expected" + FILENAME_SUFFIX_DIFF = "-diff" + FILENAME_SUFFIX_WDIFF = "-wdiff.html" + FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html" + FILENAME_SUFFIX_COMPARE = "-diff.png" + + def __init__(self, port, root_output_dir): + """Initialize a TestTypeBase object. + + Args: + port: object implementing port-specific information and methods + root_output_dir: The unix style path to the output dir. + """ + self._root_output_dir = root_output_dir + self._port = port + + def _make_output_directory(self, filename): + """Creates the output directory (if needed) for a given test + filename.""" + output_filename = os.path.join(self._root_output_dir, + self._port.relative_test_filename(filename)) + self._port.maybe_make_directory(os.path.split(output_filename)[0]) + + def _save_baseline_data(self, filename, data, modifier, encoding, + generate_new_baseline=True): + """Saves a new baseline file into the port's baseline directory. + + The file will be named simply "<test>-expected<modifier>", suitable for + use as the expected results in a later run. + + Args: + filename: path to the test file + data: result to be saved as the new baseline + modifier: type of the result file, e.g. ".txt" or ".png" + encoding: file encoding (none, "utf-8", etc.) + generate_new_baseline: whether to enerate a new, platform-specific + baseline, or update the existing one + """ + + if generate_new_baseline: + relative_dir = os.path.dirname( + self._port.relative_test_filename(filename)) + baseline_path = self._port.baseline_path() + output_dir = os.path.join(baseline_path, relative_dir) + output_file = os.path.basename(os.path.splitext(filename)[0] + + self.FILENAME_SUFFIX_EXPECTED + modifier) + self._port.maybe_make_directory(output_dir) + output_path = os.path.join(output_dir, output_file) + _log.debug('writing new baseline result "%s"' % (output_path)) + else: + output_path = self._port.expected_filename(filename, modifier) + _log.debug('resetting baseline result "%s"' % output_path) + + self._port.update_baseline(output_path, data, encoding) + + def output_filename(self, filename, modifier): + """Returns a filename inside the output dir that contains modifier. + + For example, if filename is c:/.../fast/dom/foo.html and modifier is + "-expected.txt", the return value is + c:/cygwin/tmp/layout-test-results/fast/dom/foo-expected.txt + + Args: + filename: absolute filename to test file + modifier: a string to replace the extension of filename with + + Return: + The absolute windows path to the output filename + """ + output_filename = os.path.join(self._root_output_dir, + self._port.relative_test_filename(filename)) + return os.path.splitext(output_filename)[0] + modifier + + def compare_output(self, port, filename, test_args, actual_test_output, + expected_test_output): + """Method that compares the output from the test with the + expected value. + + This is an abstract method to be implemented by all sub classes. + + Args: + port: object implementing port-specific information and methods + filename: absolute filename to test file + test_args: a TestArguments object holding optional additional + arguments + actual_test_output: a TestOutput object which represents actual test + output + expected_test_output: a TestOutput object which represents a expected + test output + + Return: + a list of TestFailure objects, empty if the test passes + """ + raise NotImplementedError + + def _write_into_file_at_path(self, file_path, contents, encoding): + """This method assumes that byte_array is already encoded + into the right format.""" + open_mode = 'w' + if encoding is None: + open_mode = 'w+b' + with codecs.open(file_path, open_mode, encoding=encoding) as file: + file.write(contents) + + def write_output_files(self, filename, file_type, + output, expected, encoding, + print_text_diffs=False): + """Writes the test output, the expected output and optionally the diff + between the two to files in the results directory. + + The full output filename of the actual, for example, will be + <filename>-actual<file_type> + For instance, + my_test-actual.txt + + Args: + filename: The test filename + file_type: A string describing the test output file type, e.g. ".txt" + output: A string containing the test output + expected: A string containing the expected test output + print_text_diffs: True for text diffs. (FIXME: We should be able to get this from the file type?) + """ + self._make_output_directory(filename) + actual_filename = self.output_filename(filename, self.FILENAME_SUFFIX_ACTUAL + file_type) + expected_filename = self.output_filename(filename, self.FILENAME_SUFFIX_EXPECTED + file_type) + # FIXME: This function is poorly designed. We should be passing in some sort of + # encoding information from the callers. + if output: + self._write_into_file_at_path(actual_filename, output, encoding) + if expected: + self._write_into_file_at_path(expected_filename, expected, encoding) + + if not output or not expected: + return + + if not print_text_diffs: + return + + # Note: We pass encoding=None for all diff writes, as we treat diff + # output as binary. Diff output may contain multiple files in + # conflicting encodings. + diff = self._port.diff_text(expected, output, expected_filename, actual_filename) + diff_filename = self.output_filename(filename, self.FILENAME_SUFFIX_DIFF + file_type) + self._write_into_file_at_path(diff_filename, diff, encoding=None) + + # Shell out to wdiff to get colored inline diffs. + wdiff = self._port.wdiff_text(expected_filename, actual_filename) + wdiff_filename = self.output_filename(filename, self.FILENAME_SUFFIX_WDIFF) + self._write_into_file_at_path(wdiff_filename, wdiff, encoding=None) + + # Use WebKit's PrettyPatch.rb to get an HTML diff. + pretty_patch = self._port.pretty_patch_text(diff_filename) + pretty_patch_filename = self.output_filename(filename, self.FILENAME_SUFFIX_PRETTY_PATCH) + self._write_into_file_at_path(pretty_patch_filename, pretty_patch, encoding=None) diff --git a/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base_unittest.py b/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base_unittest.py new file mode 100644 index 0000000..5dbfcb6 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/test_types/test_type_base_unittest.py @@ -0,0 +1,47 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +""""Tests stray tests not covered by regular code paths.""" + +import test_type_base +import unittest + +from webkitpy.thirdparty.mock import Mock + + +class Test(unittest.TestCase): + + def test_compare_output_notimplemented(self): + test_type = test_type_base.TestTypeBase(None, None) + self.assertRaises(NotImplementedError, test_type.compare_output, + None, "foo.txt", '', + test_type_base.TestArguments(), 'Debug') + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/test_types/text_diff.py b/Tools/Scripts/webkitpy/layout_tests/test_types/text_diff.py new file mode 100644 index 0000000..ad25262 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/test_types/text_diff.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Compares the text output of a test to the expected text output. + +If the output doesn't match, returns FailureTextMismatch and outputs the diff +files into the layout test results directory. +""" + +from __future__ import with_statement + +import codecs +import errno +import logging +import os.path + +from webkitpy.layout_tests.layout_package import test_failures +from webkitpy.layout_tests.test_types import test_type_base + +_log = logging.getLogger("webkitpy.layout_tests.test_types.text_diff") + + +class TestTextDiff(test_type_base.TestTypeBase): + + def _get_normalized_output_text(self, output): + """Returns the normalized text output, i.e. the output in which + the end-of-line characters are normalized to "\n".""" + # Running tests on Windows produces "\r\n". The "\n" part is helpfully + # changed to "\r\n" by our system (Python/Cygwin), resulting in + # "\r\r\n", when, in fact, we wanted to compare the text output with + # the normalized text expectation files. + return output.replace("\r\r\n", "\r\n").replace("\r\n", "\n") + + def compare_output(self, port, filename, test_args, actual_test_output, + expected_test_output): + """Implementation of CompareOutput that checks the output text against + the expected text from the LayoutTest directory.""" + failures = [] + + # If we're generating a new baseline, we pass. + if test_args.new_baseline or test_args.reset_results: + # Although all test_shell/DumpRenderTree output should be utf-8, + # we do not ever decode it inside run-webkit-tests. For some tests + # DumpRenderTree may not output utf-8 text (e.g. webarchives). + self._save_baseline_data(filename, actual_test_output.text, + ".txt", encoding=None, + generate_new_baseline=test_args.new_baseline) + return failures + + # Normalize text to diff + actual_text = self._get_normalized_output_text(actual_test_output.text) + # Assuming expected_text is already normalized. + expected_text = expected_test_output.text + + # Write output files for new tests, too. + if port.compare_text(actual_text, expected_text): + # Text doesn't match, write output files. + self.write_output_files(filename, ".txt", actual_text, + expected_text, encoding=None, + print_text_diffs=True) + + if expected_text == '': + failures.append(test_failures.FailureMissingResult()) + else: + failures.append(test_failures.FailureTextMismatch()) + + return failures diff --git a/Tools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests.py b/Tools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests.py new file mode 100755 index 0000000..f4c8098 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python + +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR +# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import glob +import logging +import optparse +import os +import re +import sys +import webkitpy.common.checkout.scm as scm + +_log = logging.getLogger("webkitpy.layout_tests." + "update-webgl-conformance-tests") + + +def remove_first_line_comment(text): + return re.compile(r'^<!--.*?-->\s*', re.DOTALL).sub('', text) + + +def translate_includes(text): + # Mapping of single filename to relative path under WebKit root. + # Assumption: these filenames are globally unique. + include_mapping = { + "js-test-style.css": "../../js/resources", + "js-test-pre.js": "../../js/resources", + "js-test-post.js": "../../js/resources", + "desktop-gl-constants.js": "resources", + } + + for filename, path in include_mapping.items(): + search = r'(?:[^"\'= ]*/)?' + re.escape(filename) + replace = os.path.join(path, filename) + text = re.sub(search, replace, text) + + return text + + +def translate_khronos_test(text): + """ + This method translates the contents of a Khronos test to a WebKit test. + """ + + translateFuncs = [ + remove_first_line_comment, + translate_includes, + ] + + for f in translateFuncs: + text = f(text) + + return text + + +def update_file(in_filename, out_dir): + # check in_filename exists + # check out_dir exists + out_filename = os.path.join(out_dir, os.path.basename(in_filename)) + + _log.debug("Processing " + in_filename) + with open(in_filename, 'r') as in_file: + with open(out_filename, 'w') as out_file: + out_file.write(translate_khronos_test(in_file.read())) + + +def update_directory(in_dir, out_dir): + for filename in glob.glob(os.path.join(in_dir, '*.html')): + update_file(os.path.join(in_dir, filename), out_dir) + + +def default_out_dir(): + current_scm = scm.detect_scm_system(os.path.dirname(sys.argv[0])) + if not current_scm: + return os.getcwd() + root_dir = current_scm.checkout_root + if not root_dir: + return os.getcwd() + out_dir = os.path.join(root_dir, "LayoutTests/fast/canvas/webgl") + if os.path.isdir(out_dir): + return out_dir + return os.getcwd() + + +def configure_logging(options): + """Configures the logging system.""" + log_fmt = '%(levelname)s: %(message)s' + log_datefmt = '%y%m%d %H:%M:%S' + log_level = logging.INFO + if options.verbose: + log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s ' + '%(message)s') + log_level = logging.DEBUG + logging.basicConfig(level=log_level, format=log_fmt, + datefmt=log_datefmt) + + +def option_parser(): + usage = "usage: %prog [options] (input file or directory)" + parser = optparse.OptionParser(usage=usage) + parser.add_option('-v', '--verbose', + action='store_true', + default=False, + help='include debug-level logging') + parser.add_option('-o', '--output', + action='store', + type='string', + default=default_out_dir(), + metavar='DIR', + help='specify an output directory to place files ' + 'in [default: %default]') + return parser + + +def main(): + parser = option_parser() + (options, args) = parser.parse_args() + configure_logging(options) + + if len(args) == 0: + _log.error("Must specify an input directory or filename.") + parser.print_help() + return 1 + + in_name = args[0] + if os.path.isfile(in_name): + update_file(in_name, options.output) + elif os.path.isdir(in_name): + update_directory(in_name, options.output) + else: + _log.error("'%s' is not a directory or a file.", in_name) + return 2 + + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/Tools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests_unittest.py new file mode 100644 index 0000000..7393b70 --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/update_webgl_conformance_tests_unittest.py @@ -0,0 +1,102 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for update_webgl_conformance_tests.""" + +import unittest +from webkitpy.layout_tests import update_webgl_conformance_tests as webgl + + +def construct_script(name): + return "<script src=\"" + name + "\"></script>\n" + + +def construct_style(name): + return "<link rel=\"stylesheet\" href=\"" + name + "\">" + + +class TestTranslation(unittest.TestCase): + def assert_unchanged(self, text): + self.assertEqual(text, webgl.translate_khronos_test(text)) + + def assert_translate(self, input, output): + self.assertEqual(output, webgl.translate_khronos_test(input)) + + def test_simple_unchanged(self): + self.assert_unchanged("") + self.assert_unchanged("<html></html>") + + def test_header_strip(self): + single_line_header = "<!-- single line header. -->" + multi_line_header = """<!-- this is a multi-line + header. it should all be removed too. + -->""" + text = "<html></html>" + self.assert_translate(single_line_header, "") + self.assert_translate(single_line_header + text, text) + self.assert_translate(multi_line_header + text, text) + + def dont_strip_other_headers(self): + self.assert_unchanged("<html>\n<!-- don't remove comments on other lines. -->\n</html>") + + def test_include_rewriting(self): + # Mappings to None are unchanged + styles = { + "../resources/js-test-style.css": "../../js/resources/js-test-style.css", + "fail.css": None, + "resources/stylesheet.css": None, + "../resources/style.css": None, + } + scripts = { + "../resources/js-test-pre.js": "../../js/resources/js-test-pre.js", + "../resources/js-test-post.js": "../../js/resources/js-test-post.js", + "../resources/desktop-gl-constants.js": "resources/desktop-gl-constants.js", + + "resources/shadow-offset.js": None, + "../resources/js-test-post-async.js": None, + } + + input_text = "" + output_text = "" + for input, output in styles.items(): + input_text += construct_style(input) + output_text += construct_style(output if output else input) + for input, output in scripts.items(): + input_text += construct_script(input) + output_text += construct_script(output if output else input) + + head = '<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">\n<html>\n<head>\n' + foot = '</head>\n<body>\n</body>\n</html>' + input_text = head + input_text + foot + output_text = head + output_text + foot + self.assert_translate(input_text, output_text) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/python24/__init__.py b/Tools/Scripts/webkitpy/python24/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/python24/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/python24/versioning.py b/Tools/Scripts/webkitpy/python24/versioning.py new file mode 100644 index 0000000..8b1f21b --- /dev/null +++ b/Tools/Scripts/webkitpy/python24/versioning.py @@ -0,0 +1,133 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Supports Python version checking.""" + +import logging +import sys + +_log = logging.getLogger("webkitpy.python24.versioning") + +# The minimum Python version the webkitpy package supports. +_MINIMUM_SUPPORTED_PYTHON_VERSION = "2.5" + + +def compare_version(sysmodule=None, target_version=None): + """Compare the current Python version with a target version. + + Args: + sysmodule: An object with version and version_info data attributes + used to detect the current Python version. The attributes + should have the same semantics as sys.version and + sys.version_info. This parameter should only be used + for unit testing. Defaults to sys. + target_version: A string representing the Python version to compare + the current version against. The string should have + one of the following three forms: 2, 2.5, or 2.5.3. + Defaults to the minimum version that the webkitpy + package supports. + + Returns: + A triple of (comparison, current_version, target_version). + + comparison: An integer representing the result of comparing the + current version with the target version. A positive + number means the current version is greater than the + target, 0 means they are the same, and a negative number + means the current version is less than the target. + This method compares version information only up + to the precision of the given target version. For + example, if the target version is 2.6 and the current + version is 2.5.3, this method uses 2.5 for the purposes + of comparing with the target. + current_version: A string representing the current Python version, for + example 2.5.3. + target_version: A string representing the version that the current + version was compared against, for example 2.5. + + """ + if sysmodule is None: + sysmodule = sys + if target_version is None: + target_version = _MINIMUM_SUPPORTED_PYTHON_VERSION + + # The number of version parts to compare. + precision = len(target_version.split(".")) + + # We use sys.version_info rather than sys.version since its first + # three elements are guaranteed to be integers. + current_version_info_to_compare = sysmodule.version_info[:precision] + # Convert integers to strings. + current_version_info_to_compare = map(str, current_version_info_to_compare) + current_version_to_compare = ".".join(current_version_info_to_compare) + + # Compare version strings lexicographically. + if current_version_to_compare > target_version: + comparison = 1 + elif current_version_to_compare == target_version: + comparison = 0 + else: + comparison = -1 + + # The version number portion of the current version string, for + # example "2.6.4". + current_version = sysmodule.version.split()[0] + + return (comparison, current_version, target_version) + + +# FIXME: Add a logging level parameter to allow the version message +# to be logged at levels other than WARNING, for example CRITICAL. +def check_version(log=None, sysmodule=None, target_version=None): + """Check the current Python version against a target version. + + Logs a warning message if the current version is less than the + target version. + + Args: + log: A logging.logger instance to use when logging the version warning. + Defaults to the logger of this module. + sysmodule: See the compare_version() docstring. + target_version: See the compare_version() docstring. + + Returns: + A boolean value of whether the current version is greater than + or equal to the target version. + + """ + if log is None: + log = _log + + (comparison, current_version, target_version) = \ + compare_version(sysmodule, target_version) + + if comparison >= 0: + # Then the current version is at least the minimum version. + return True + + message = ("WebKit Python scripts do not support your current Python " + "version (%s). The minimum supported version is %s.\n" + " See the following page to upgrade your Python version:\n\n" + " http://trac.webkit.org/wiki/PythonGuidelines\n" + % (current_version, target_version)) + log.warn(message) + return False diff --git a/Tools/Scripts/webkitpy/python24/versioning_unittest.py b/Tools/Scripts/webkitpy/python24/versioning_unittest.py new file mode 100644 index 0000000..6939e2d --- /dev/null +++ b/Tools/Scripts/webkitpy/python24/versioning_unittest.py @@ -0,0 +1,134 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains unit tests for versioning.py.""" + +import logging +import unittest + +from webkitpy.common.system.logtesting import LogTesting +from webkitpy.python24.versioning import check_version +from webkitpy.python24.versioning import compare_version + +class MockSys(object): + + """A mock sys module for passing to version-checking methods.""" + + def __init__(self, current_version): + """Create an instance. + + current_version: A version string with major, minor, and micro + version parts. + + """ + version_info = current_version.split(".") + version_info = map(int, version_info) + + self.version = current_version + " Version details." + self.version_info = version_info + + +class CompareVersionTest(unittest.TestCase): + + """Tests compare_version().""" + + def _mock_sys(self, current_version): + return MockSys(current_version) + + def test_default_minimum_version(self): + """Test the configured minimum version that webkitpy supports.""" + (comparison, current_version, min_version) = compare_version() + self.assertEquals(min_version, "2.5") + + def compare_version(self, target_version, current_version=None): + """Call compare_version().""" + if current_version is None: + current_version = "2.5.3" + mock_sys = self._mock_sys(current_version) + return compare_version(mock_sys, target_version) + + def compare(self, target_version, current_version=None): + """Call compare_version(), and return the comparison.""" + return self.compare_version(target_version, current_version)[0] + + def test_returned_current_version(self): + """Test the current_version return value.""" + current_version = self.compare_version("2.5")[1] + self.assertEquals(current_version, "2.5.3") + + def test_returned_target_version(self): + """Test the current_version return value.""" + target_version = self.compare_version("2.5")[2] + self.assertEquals(target_version, "2.5") + + def test_target_version_major(self): + """Test major version for target.""" + self.assertEquals(-1, self.compare("3")) + self.assertEquals(0, self.compare("2")) + self.assertEquals(1, self.compare("2", "3.0.0")) + + def test_target_version_minor(self): + """Test minor version for target.""" + self.assertEquals(-1, self.compare("2.6")) + self.assertEquals(0, self.compare("2.5")) + self.assertEquals(1, self.compare("2.4")) + + def test_target_version_micro(self): + """Test minor version for target.""" + self.assertEquals(-1, self.compare("2.5.4")) + self.assertEquals(0, self.compare("2.5.3")) + self.assertEquals(1, self.compare("2.5.2")) + + +class CheckVersionTest(unittest.TestCase): + + """Tests check_version().""" + + def setUp(self): + self._log = LogTesting.setUp(self) + + def tearDown(self): + self._log.tearDown() + + def _check_version(self, minimum_version): + """Call check_version().""" + mock_sys = MockSys("2.5.3") + return check_version(sysmodule=mock_sys, target_version=minimum_version) + + def test_true_return_value(self): + """Test the configured minimum version that webkitpy supports.""" + is_current = self._check_version("2.4") + self.assertEquals(True, is_current) + self._log.assertMessages([]) # No warning was logged. + + def test_false_return_value(self): + """Test the configured minimum version that webkitpy supports.""" + is_current = self._check_version("2.6") + self.assertEquals(False, is_current) + expected_message = ('WARNING: WebKit Python scripts do not support ' + 'your current Python version (2.5.3). ' + 'The minimum supported version is 2.6.\n ' + 'See the following page to upgrade your Python ' + 'version:\n\n ' + 'http://trac.webkit.org/wiki/PythonGuidelines\n\n') + self._log.assertMessages([expected_message]) + diff --git a/Tools/Scripts/webkitpy/style/__init__.py b/Tools/Scripts/webkitpy/style/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/style/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/style/checker.py b/Tools/Scripts/webkitpy/style/checker.py new file mode 100644 index 0000000..6f1beb0 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checker.py @@ -0,0 +1,749 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# Copyright (C) 2010 ProFUSION embedded systems +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Front end of some style-checker modules.""" + +import logging +import os.path +import sys + +from checkers.common import categories as CommonCategories +from checkers.common import CarriageReturnChecker +from checkers.cpp import CppChecker +from checkers.python import PythonChecker +from checkers.test_expectations import TestExpectationsChecker +from checkers.text import TextChecker +from checkers.xml import XMLChecker +from error_handlers import DefaultStyleErrorHandler +from filter import FilterConfiguration +from optparser import ArgumentParser +from optparser import DefaultCommandOptionValues +from webkitpy.style_references import configure_logging as _configure_logging + +_log = logging.getLogger("webkitpy.style.checker") + +# These are default option values for the command-line option parser. +_DEFAULT_MIN_CONFIDENCE = 1 +_DEFAULT_OUTPUT_FORMAT = 'emacs' + + +# FIXME: For style categories we will never want to have, remove them. +# For categories for which we want to have similar functionality, +# modify the implementation and enable them. +# +# Throughout this module, we use "filter rule" rather than "filter" +# for an individual boolean filter flag like "+foo". This allows us to +# reserve "filter" for what one gets by collectively applying all of +# the filter rules. +# +# The base filter rules are the filter rules that begin the list of +# filter rules used to check style. For example, these rules precede +# any user-specified filter rules. Since by default all categories are +# checked, this list should normally include only rules that begin +# with a "-" sign. +_BASE_FILTER_RULES = [ + '-build/endif_comment', + '-build/include_what_you_use', # <string> for std::string + '-build/storage_class', # const static + '-legal/copyright', + '-readability/multiline_comment', + '-readability/braces', # int foo() {}; + '-readability/fn_size', + '-readability/casting', + '-readability/function', + '-runtime/arrays', # variable length array + '-runtime/casting', + '-runtime/sizeof', + '-runtime/explicit', # explicit + '-runtime/virtual', # virtual dtor + '-runtime/printf', + '-runtime/threadsafe_fn', + '-runtime/rtti', + '-whitespace/blank_line', + '-whitespace/end_of_line', + '-whitespace/labels', + # List Python pep8 categories last. + # + # Because much of WebKit's Python code base does not abide by the + # PEP8 79 character limit, we ignore the 79-character-limit category + # pep8/E501 for now. + # + # FIXME: Consider bringing WebKit's Python code base into conformance + # with the 79 character limit, or some higher limit that is + # agreeable to the WebKit project. + '-pep8/E501', + ] + + +# The path-specific filter rules. +# +# This list is order sensitive. Only the first path substring match +# is used. See the FilterConfiguration documentation in filter.py +# for more information on this list. +# +# Each string appearing in this nested list should have at least +# one associated unit test assertion. These assertions are located, +# for example, in the test_path_rules_specifier() unit test method of +# checker_unittest.py. +_PATH_RULES_SPECIFIER = [ + # Files in these directories are consumers of the WebKit + # API and therefore do not follow the same header including + # discipline as WebCore. + + ([# TestNetscapePlugIn has no config.h and uses funny names like + # NPP_SetWindow. + "Tools/DumpRenderTree/TestNetscapePlugIn/", + # The API test harnesses have no config.h and use funny macros like + # TEST_CLASS_NAME. + "Tools/WebKitAPITest/", + "Tools/TestWebKitAPI/"], + ["-build/include", + "-readability/naming"]), + ([# The EFL APIs use EFL naming style, which includes + # both lower-cased and camel-cased, underscore-sparated + # values. + "WebKit/efl/ewk/", + # There is no clean way to avoid "yy_*" names used by flex. + "WebCore/css/CSSParser.cpp", + # Qt code uses '_' in some places (such as private slots + # and on test xxx_data methos on tests) + "JavaScriptCore/qt/api/", + "WebKit/qt/Api/", + "WebKit/qt/tests/", + "WebKit/qt/declarative/", + "WebKit/qt/examples/"], + ["-readability/naming"]), + ([# The GTK+ APIs use GTK+ naming style, which includes + # lower-cased, underscore-separated values. + # Also, GTK+ allows the use of NULL. + "WebCore/bindings/scripts/test/GObject", + "WebKit/gtk/webkit/", + "Tools/DumpRenderTree/gtk/"], + ["-readability/naming", + "-readability/null"]), + ([# Header files in ForwardingHeaders have no header guards or + # exceptional header guards (e.g., WebCore_FWD_Debugger_h). + "/ForwardingHeaders/"], + ["-build/header_guard"]), + ([# assembler has lots of opcodes that use underscores, so + # we don't check for underscores in that directory. + "/JavaScriptCore/assembler/"], + ["-readability/naming"]), + + # WebKit2 rules: + # WebKit2 doesn't use config.h, and certain directories have other + # idiosyncracies. + ([# NPAPI has function names with underscores. + "WebKit2/WebProcess/Plugins/Netscape"], + ["-build/include_order", + "-readability/naming"]), + ([# The WebKit2 C API has names with underscores and whitespace-aligned + # struct members. + "WebKit2/UIProcess/API/C/", + "WebKit2/WebProcess/InjectedBundle/API/c/"], + ["-build/include_order", + "-readability/naming", + "-whitespace/declaration"]), + ([# Nothing in WebKit2 uses config.h. + "WebKit2/"], + ["-build/include_order"]), + + # For third-party Python code, keep only the following checks-- + # + # No tabs: to avoid having to set the SVN allow-tabs property. + # No trailing white space: since this is easy to correct. + # No carriage-return line endings: since this is easy to correct. + # + (["webkitpy/thirdparty/"], + ["-", + "+pep8/W191", # Tabs + "+pep8/W291", # Trailing white space + "+whitespace/carriage_return"]), +] + + +_CPP_FILE_EXTENSIONS = [ + 'c', + 'cpp', + 'h', + ] + +_PYTHON_FILE_EXTENSION = 'py' + +_TEXT_FILE_EXTENSIONS = [ + 'ac', + 'cc', + 'cgi', + 'css', + 'exp', + 'flex', + 'gyp', + 'gypi', + 'html', + 'idl', + 'in', + 'js', + 'mm', + 'php', + 'pl', + 'pm', + 'pri', + 'pro', + 'rb', + 'sh', + 'txt', + 'wm', + 'xhtml', + 'y', + ] + +_XML_FILE_EXTENSIONS = [ + 'vcproj', + 'vsprops', + ] + +# Files to skip that are less obvious. +# +# Some files should be skipped when checking style. For example, +# WebKit maintains some files in Mozilla style on purpose to ease +# future merges. +_SKIPPED_FILES_WITH_WARNING = [ + "gtk2drawing.c", # WebCore/platform/gtk/gtk2drawing.c + "gtkdrawing.h", # WebCore/platform/gtk/gtkdrawing.h + "WebKit/gtk/tests/", + # Soup API that is still being cooked, will be removed from WebKit + # in a few months when it is merged into soup proper. The style + # follows the libsoup style completely. + "WebCore/platform/network/soup/cache/", + ] + + +# Files to skip that are more common or obvious. +# +# This list should be in addition to files with FileType.NONE. Files +# with FileType.NONE are automatically skipped without warning. +_SKIPPED_FILES_WITHOUT_WARNING = [ + "LayoutTests/", + ] + +# Extensions of files which are allowed to contain carriage returns. +_CARRIAGE_RETURN_ALLOWED_FILE_EXTENSIONS = [ + 'vcproj', + 'vsprops', + ] + +# The maximum number of errors to report per file, per category. +# If a category is not a key, then it has no maximum. +_MAX_REPORTS_PER_CATEGORY = { + "whitespace/carriage_return": 1 +} + + +def _all_categories(): + """Return the set of all categories used by check-webkit-style.""" + # Take the union across all checkers. + categories = CommonCategories.union(CppChecker.categories) + categories = categories.union(TestExpectationsChecker.categories) + + # FIXME: Consider adding all of the pep8 categories. Since they + # are not too meaningful for documentation purposes, for + # now we add only the categories needed for the unit tests + # (which validate the consistency of the configuration + # settings against the known categories, etc). + categories = categories.union(["pep8/W191", "pep8/W291", "pep8/E501"]) + + return categories + + +def _check_webkit_style_defaults(): + """Return the default command-line options for check-webkit-style.""" + return DefaultCommandOptionValues(min_confidence=_DEFAULT_MIN_CONFIDENCE, + output_format=_DEFAULT_OUTPUT_FORMAT) + + +# This function assists in optparser not having to import from checker. +def check_webkit_style_parser(): + all_categories = _all_categories() + default_options = _check_webkit_style_defaults() + return ArgumentParser(all_categories=all_categories, + base_filter_rules=_BASE_FILTER_RULES, + default_options=default_options) + + +def check_webkit_style_configuration(options): + """Return a StyleProcessorConfiguration instance for check-webkit-style. + + Args: + options: A CommandOptionValues instance. + + """ + filter_configuration = FilterConfiguration( + base_rules=_BASE_FILTER_RULES, + path_specific=_PATH_RULES_SPECIFIER, + user_rules=options.filter_rules) + + return StyleProcessorConfiguration(filter_configuration=filter_configuration, + max_reports_per_category=_MAX_REPORTS_PER_CATEGORY, + min_confidence=options.min_confidence, + output_format=options.output_format, + stderr_write=sys.stderr.write) + + +def _create_log_handlers(stream): + """Create and return a default list of logging.Handler instances. + + Format WARNING messages and above to display the logging level, and + messages strictly below WARNING not to display it. + + Args: + stream: See the configure_logging() docstring. + + """ + # Handles logging.WARNING and above. + error_handler = logging.StreamHandler(stream) + error_handler.setLevel(logging.WARNING) + formatter = logging.Formatter("%(levelname)s: %(message)s") + error_handler.setFormatter(formatter) + + # Create a logging.Filter instance that only accepts messages + # below WARNING (i.e. filters out anything WARNING or above). + non_error_filter = logging.Filter() + # The filter method accepts a logging.LogRecord instance. + non_error_filter.filter = lambda record: record.levelno < logging.WARNING + + non_error_handler = logging.StreamHandler(stream) + non_error_handler.addFilter(non_error_filter) + formatter = logging.Formatter("%(message)s") + non_error_handler.setFormatter(formatter) + + return [error_handler, non_error_handler] + + +def _create_debug_log_handlers(stream): + """Create and return a list of logging.Handler instances for debugging. + + Args: + stream: See the configure_logging() docstring. + + """ + handler = logging.StreamHandler(stream) + formatter = logging.Formatter("%(name)s: %(levelname)-8s %(message)s") + handler.setFormatter(formatter) + + return [handler] + + +def configure_logging(stream, logger=None, is_verbose=False): + """Configure logging, and return the list of handlers added. + + Returns: + A list of references to the logging handlers added to the root + logger. This allows the caller to later remove the handlers + using logger.removeHandler. This is useful primarily during unit + testing where the caller may want to configure logging temporarily + and then undo the configuring. + + Args: + stream: A file-like object to which to log. The stream must + define an "encoding" data attribute, or else logging + raises an error. + logger: A logging.logger instance to configure. This parameter + should be used only in unit tests. Defaults to the + root logger. + is_verbose: A boolean value of whether logging should be verbose. + + """ + # If the stream does not define an "encoding" data attribute, the + # logging module can throw an error like the following: + # + # Traceback (most recent call last): + # File "/System/Library/Frameworks/Python.framework/Versions/2.6/... + # lib/python2.6/logging/__init__.py", line 761, in emit + # self.stream.write(fs % msg.encode(self.stream.encoding)) + # LookupError: unknown encoding: unknown + if logger is None: + logger = logging.getLogger() + + if is_verbose: + logging_level = logging.DEBUG + handlers = _create_debug_log_handlers(stream) + else: + logging_level = logging.INFO + handlers = _create_log_handlers(stream) + + handlers = _configure_logging(logging_level=logging_level, logger=logger, + handlers=handlers) + + return handlers + + +# Enum-like idiom +class FileType: + + NONE = 0 # FileType.NONE evaluates to False. + # Alphabetize remaining types + CPP = 1 + PYTHON = 2 + TEXT = 3 + XML = 4 + + +class CheckerDispatcher(object): + + """Supports determining whether and how to check style, based on path.""" + + def _file_extension(self, file_path): + """Return the file extension without the leading dot.""" + return os.path.splitext(file_path)[1].lstrip(".") + + def should_skip_with_warning(self, file_path): + """Return whether the given file should be skipped with a warning.""" + for skipped_file in _SKIPPED_FILES_WITH_WARNING: + if file_path.find(skipped_file) >= 0: + return True + return False + + def should_skip_without_warning(self, file_path): + """Return whether the given file should be skipped without a warning.""" + if not self._file_type(file_path): # FileType.NONE. + return True + # Since "LayoutTests" is in _SKIPPED_FILES_WITHOUT_WARNING, make + # an exception to prevent files like "LayoutTests/ChangeLog" and + # "LayoutTests/ChangeLog-2009-06-16" from being skipped. + # Files like 'test_expectations.txt' and 'drt_expectations.txt' + # are also should not be skipped. + # + # FIXME: Figure out a good way to avoid having to add special logic + # for this special case. + basename = os.path.basename(file_path) + if basename.startswith('ChangeLog'): + return False + elif basename == 'test_expectations.txt' or basename == 'drt_expectations.txt': + return False + for skipped_file in _SKIPPED_FILES_WITHOUT_WARNING: + if file_path.find(skipped_file) >= 0: + return True + return False + + def should_check_and_strip_carriage_returns(self, file_path): + return self._file_extension(file_path) not in _CARRIAGE_RETURN_ALLOWED_FILE_EXTENSIONS + + def _file_type(self, file_path): + """Return the file type corresponding to the given file.""" + file_extension = self._file_extension(file_path) + + if (file_extension in _CPP_FILE_EXTENSIONS) or (file_path == '-'): + # FIXME: Do something about the comment below and the issue it + # raises since cpp_style already relies on the extension. + # + # Treat stdin as C++. Since the extension is unknown when + # reading from stdin, cpp_style tests should not rely on + # the extension. + return FileType.CPP + elif file_extension == _PYTHON_FILE_EXTENSION: + return FileType.PYTHON + elif file_extension in _XML_FILE_EXTENSIONS: + return FileType.XML + elif (os.path.basename(file_path).startswith('ChangeLog') or + (not file_extension and "Tools/Scripts/" in file_path) or + file_extension in _TEXT_FILE_EXTENSIONS): + return FileType.TEXT + else: + return FileType.NONE + + def _create_checker(self, file_type, file_path, handle_style_error, + min_confidence): + """Instantiate and return a style checker based on file type.""" + if file_type == FileType.NONE: + checker = None + elif file_type == FileType.CPP: + file_extension = self._file_extension(file_path) + checker = CppChecker(file_path, file_extension, + handle_style_error, min_confidence) + elif file_type == FileType.PYTHON: + checker = PythonChecker(file_path, handle_style_error) + elif file_type == FileType.XML: + checker = XMLChecker(file_path, handle_style_error) + elif file_type == FileType.TEXT: + basename = os.path.basename(file_path) + if basename == 'test_expectations.txt' or basename == 'drt_expectations.txt': + checker = TestExpectationsChecker(file_path, handle_style_error) + else: + checker = TextChecker(file_path, handle_style_error) + else: + raise ValueError('Invalid file type "%(file_type)s": the only valid file types ' + "are %(NONE)s, %(CPP)s, and %(TEXT)s." + % {"file_type": file_type, + "NONE": FileType.NONE, + "CPP": FileType.CPP, + "TEXT": FileType.TEXT}) + + return checker + + def dispatch(self, file_path, handle_style_error, min_confidence): + """Instantiate and return a style checker based on file path.""" + file_type = self._file_type(file_path) + + checker = self._create_checker(file_type, + file_path, + handle_style_error, + min_confidence) + return checker + + +# FIXME: Remove the stderr_write attribute from this class and replace +# its use with calls to a logging module logger. +class StyleProcessorConfiguration(object): + + """Stores configuration values for the StyleProcessor class. + + Attributes: + min_confidence: An integer between 1 and 5 inclusive that is the + minimum confidence level of style errors to report. + + max_reports_per_category: The maximum number of errors to report + per category, per file. + + stderr_write: A function that takes a string as a parameter and + serves as stderr.write. + + """ + + def __init__(self, + filter_configuration, + max_reports_per_category, + min_confidence, + output_format, + stderr_write): + """Create a StyleProcessorConfiguration instance. + + Args: + filter_configuration: A FilterConfiguration instance. The default + is the "empty" filter configuration, which + means that all errors should be checked. + + max_reports_per_category: The maximum number of errors to report + per category, per file. + + min_confidence: An integer between 1 and 5 inclusive that is the + minimum confidence level of style errors to report. + The default is 1, which reports all style errors. + + output_format: A string that is the output format. The supported + output formats are "emacs" which emacs can parse + and "vs7" which Microsoft Visual Studio 7 can parse. + + stderr_write: A function that takes a string as a parameter and + serves as stderr.write. + + """ + self._filter_configuration = filter_configuration + self._output_format = output_format + + self.max_reports_per_category = max_reports_per_category + self.min_confidence = min_confidence + self.stderr_write = stderr_write + + def is_reportable(self, category, confidence_in_error, file_path): + """Return whether an error is reportable. + + An error is reportable if both the confidence in the error is + at least the minimum confidence level and the current filter + says the category should be checked for the given path. + + Args: + category: A string that is a style category. + confidence_in_error: An integer between 1 and 5 inclusive that is + the application's confidence in the error. + A higher number means greater confidence. + file_path: The path of the file being checked + + """ + if confidence_in_error < self.min_confidence: + return False + + return self._filter_configuration.should_check(category, file_path) + + def write_style_error(self, + category, + confidence_in_error, + file_path, + line_number, + message): + """Write a style error to the configured stderr.""" + if self._output_format == 'vs7': + format_string = "%s(%s): %s [%s] [%d]\n" + else: + format_string = "%s:%s: %s [%s] [%d]\n" + + self.stderr_write(format_string % (file_path, + line_number, + message, + category, + confidence_in_error)) + + +class ProcessorBase(object): + + """The base class for processors of lists of lines.""" + + def should_process(self, file_path): + """Return whether the file at file_path should be processed. + + The TextFileReader class calls this method prior to reading in + the lines of a file. Use this method, for example, to prevent + the style checker from reading binary files into memory. + + """ + raise NotImplementedError('Subclasses should implement.') + + def process(self, lines, file_path, **kwargs): + """Process lines of text read from a file. + + Args: + lines: A list of lines of text to process. + file_path: The path from which the lines were read. + **kwargs: This argument signifies that the process() method of + subclasses of ProcessorBase may support additional + keyword arguments. + For example, a style checker's check() method + may support a "reportable_lines" parameter that represents + the line numbers of the lines for which style errors + should be reported. + + """ + raise NotImplementedError('Subclasses should implement.') + + +class StyleProcessor(ProcessorBase): + + """A ProcessorBase for checking style. + + Attributes: + error_count: An integer that is the total number of reported + errors for the lifetime of this instance. + + """ + + def __init__(self, configuration, mock_dispatcher=None, + mock_increment_error_count=None, + mock_carriage_checker_class=None): + """Create an instance. + + Args: + configuration: A StyleProcessorConfiguration instance. + mock_dispatcher: A mock CheckerDispatcher instance. This + parameter is for unit testing. Defaults to a + CheckerDispatcher instance. + mock_increment_error_count: A mock error-count incrementer. + mock_carriage_checker_class: A mock class for checking and + transforming carriage returns. + This parameter is for unit testing. + Defaults to CarriageReturnChecker. + + """ + if mock_dispatcher is None: + dispatcher = CheckerDispatcher() + else: + dispatcher = mock_dispatcher + + if mock_increment_error_count is None: + # The following blank line is present to avoid flagging by pep8.py. + + def increment_error_count(): + """Increment the total count of reported errors.""" + self.error_count += 1 + else: + increment_error_count = mock_increment_error_count + + if mock_carriage_checker_class is None: + # This needs to be a class rather than an instance since the + # process() method instantiates one using parameters. + carriage_checker_class = CarriageReturnChecker + else: + carriage_checker_class = mock_carriage_checker_class + + self.error_count = 0 + + self._carriage_checker_class = carriage_checker_class + self._configuration = configuration + self._dispatcher = dispatcher + self._increment_error_count = increment_error_count + + def should_process(self, file_path): + """Return whether the file should be checked for style.""" + if self._dispatcher.should_skip_without_warning(file_path): + return False + if self._dispatcher.should_skip_with_warning(file_path): + _log.warn('File exempt from style guide. Skipping: "%s"' + % file_path) + return False + return True + + def process(self, lines, file_path, line_numbers=None): + """Check the given lines for style. + + Arguments: + lines: A list of all lines in the file to check. + file_path: The path of the file to process. If possible, the path + should be relative to the source root. Otherwise, + path-specific logic may not behave as expected. + line_numbers: A list of line numbers of the lines for which + style errors should be reported, or None if errors + for all lines should be reported. When not None, this + list normally contains the line numbers corresponding + to the modified lines of a patch. + + """ + _log.debug("Checking style: " + file_path) + + style_error_handler = DefaultStyleErrorHandler( + configuration=self._configuration, + file_path=file_path, + increment_error_count=self._increment_error_count, + line_numbers=line_numbers) + + carriage_checker = self._carriage_checker_class(style_error_handler) + + # Check for and remove trailing carriage returns ("\r"). + if self._dispatcher.should_check_and_strip_carriage_returns(file_path): + lines = carriage_checker.check(lines) + + min_confidence = self._configuration.min_confidence + checker = self._dispatcher.dispatch(file_path, + style_error_handler, + min_confidence) + + if checker is None: + raise AssertionError("File should not be checked: '%s'" % file_path) + + _log.debug("Using class: " + checker.__class__.__name__) + + checker.check(lines) diff --git a/Tools/Scripts/webkitpy/style/checker_unittest.py b/Tools/Scripts/webkitpy/style/checker_unittest.py new file mode 100755 index 0000000..d9057a8 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checker_unittest.py @@ -0,0 +1,832 @@ +#!/usr/bin/python +# -*- coding: utf-8; -*- +# +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile Inc. +# Copyright (C) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for style.py.""" + +import logging +import os +import unittest + +import checker as style +from webkitpy.style_references import LogTesting +from webkitpy.style_references import TestLogStream +from checker import _BASE_FILTER_RULES +from checker import _MAX_REPORTS_PER_CATEGORY +from checker import _PATH_RULES_SPECIFIER as PATH_RULES_SPECIFIER +from checker import _all_categories +from checker import check_webkit_style_configuration +from checker import check_webkit_style_parser +from checker import configure_logging +from checker import CheckerDispatcher +from checker import ProcessorBase +from checker import StyleProcessor +from checker import StyleProcessorConfiguration +from checkers.cpp import CppChecker +from checkers.python import PythonChecker +from checkers.text import TextChecker +from checkers.xml import XMLChecker +from error_handlers import DefaultStyleErrorHandler +from filter import validate_filter_rules +from filter import FilterConfiguration +from optparser import ArgumentParser +from optparser import CommandOptionValues +from webkitpy.common.system.logtesting import LoggingTestCase +from webkitpy.style.filereader import TextFileReader + + +class ConfigureLoggingTestBase(unittest.TestCase): + + """Base class for testing configure_logging(). + + Sub-classes should implement: + + is_verbose: The is_verbose value to pass to configure_logging(). + + """ + + def setUp(self): + is_verbose = self.is_verbose + + log_stream = TestLogStream(self) + + # Use a logger other than the root logger or one prefixed with + # webkit so as not to conflict with test-webkitpy logging. + logger = logging.getLogger("unittest") + + # Configure the test logger not to pass messages along to the + # root logger. This prevents test messages from being + # propagated to loggers used by test-webkitpy logging (e.g. + # the root logger). + logger.propagate = False + + self._handlers = configure_logging(stream=log_stream, logger=logger, + is_verbose=is_verbose) + self._log = logger + self._log_stream = log_stream + + def tearDown(self): + """Reset logging to its original state. + + This method ensures that the logging configuration set up + for a unit test does not affect logging in other unit tests. + + """ + logger = self._log + for handler in self._handlers: + logger.removeHandler(handler) + + def assert_log_messages(self, messages): + """Assert that the logged messages equal the given messages.""" + self._log_stream.assertMessages(messages) + + +class ConfigureLoggingTest(ConfigureLoggingTestBase): + + """Tests the configure_logging() function.""" + + is_verbose = False + + def test_warning_message(self): + self._log.warn("test message") + self.assert_log_messages(["WARNING: test message\n"]) + + def test_below_warning_message(self): + # We test the boundary case of a logging level equal to 29. + # In practice, we will probably only be calling log.info(), + # which corresponds to a logging level of 20. + level = logging.WARNING - 1 # Equals 29. + self._log.log(level, "test message") + self.assert_log_messages(["test message\n"]) + + def test_debug_message(self): + self._log.debug("test message") + self.assert_log_messages([]) + + def test_two_messages(self): + self._log.info("message1") + self._log.info("message2") + self.assert_log_messages(["message1\n", "message2\n"]) + + +class ConfigureLoggingVerboseTest(ConfigureLoggingTestBase): + + """Tests the configure_logging() function with is_verbose True.""" + + is_verbose = True + + def test_debug_message(self): + self._log.debug("test message") + self.assert_log_messages(["unittest: DEBUG test message\n"]) + + +class GlobalVariablesTest(unittest.TestCase): + + """Tests validity of the global variables.""" + + def _all_categories(self): + return _all_categories() + + def defaults(self): + return style._check_webkit_style_defaults() + + def test_webkit_base_filter_rules(self): + base_filter_rules = _BASE_FILTER_RULES + defaults = self.defaults() + already_seen = [] + validate_filter_rules(base_filter_rules, self._all_categories()) + # Also do some additional checks. + for rule in base_filter_rules: + # Check no leading or trailing white space. + self.assertEquals(rule, rule.strip()) + # All categories are on by default, so defaults should + # begin with -. + self.assertTrue(rule.startswith('-')) + # Check no rule occurs twice. + self.assertFalse(rule in already_seen) + already_seen.append(rule) + + def test_defaults(self): + """Check that default arguments are valid.""" + default_options = self.defaults() + + # FIXME: We should not need to call parse() to determine + # whether the default arguments are valid. + parser = ArgumentParser(all_categories=self._all_categories(), + base_filter_rules=[], + default_options=default_options) + # No need to test the return value here since we test parse() + # on valid arguments elsewhere. + # + # The default options are valid: no error or SystemExit. + parser.parse(args=[]) + + def test_path_rules_specifier(self): + all_categories = self._all_categories() + for (sub_paths, path_rules) in PATH_RULES_SPECIFIER: + validate_filter_rules(path_rules, self._all_categories()) + + config = FilterConfiguration(path_specific=PATH_RULES_SPECIFIER) + + def assertCheck(path, category): + """Assert that the given category should be checked.""" + message = ('Should check category "%s" for path "%s".' + % (category, path)) + self.assertTrue(config.should_check(category, path)) + + def assertNoCheck(path, category): + """Assert that the given category should not be checked.""" + message = ('Should not check category "%s" for path "%s".' + % (category, path)) + self.assertFalse(config.should_check(category, path), message) + + assertCheck("random_path.cpp", + "build/include") + assertNoCheck("Tools/WebKitAPITest/main.cpp", + "build/include") + assertCheck("random_path.cpp", + "readability/naming") + assertNoCheck("WebKit/gtk/webkit/webkit.h", + "readability/naming") + assertNoCheck("Tools/DumpRenderTree/gtk/DumpRenderTree.cpp", + "readability/null") + assertNoCheck("WebKit/efl/ewk/ewk_view.h", + "readability/naming") + assertNoCheck("WebCore/css/CSSParser.cpp", + "readability/naming") + + # Test if Qt exceptions are indeed working + assertCheck("JavaScriptCore/qt/api/qscriptengine.cpp", + "readability/braces") + assertCheck("WebKit/qt/Api/qwebpage.cpp", + "readability/braces") + assertCheck("WebKit/qt/tests/qwebelement/tst_qwebelement.cpp", + "readability/braces") + assertCheck("WebKit/qt/declarative/platformplugin/WebPlugin.cpp", + "readability/braces") + assertCheck("WebKit/qt/examples/platformplugin/WebPlugin.cpp", + "readability/braces") + assertNoCheck("JavaScriptCore/qt/api/qscriptengine.cpp", + "readability/naming") + assertNoCheck("WebKit/qt/Api/qwebpage.cpp", + "readability/naming") + assertNoCheck("WebKit/qt/tests/qwebelement/tst_qwebelement.cpp", + "readability/naming") + assertNoCheck("WebKit/qt/declarative/platformplugin/WebPlugin.cpp", + "readability/naming") + assertNoCheck("WebKit/qt/examples/platformplugin/WebPlugin.cpp", + "readability/naming") + + assertNoCheck("WebCore/ForwardingHeaders/debugger/Debugger.h", + "build/header_guard") + + # Third-party Python code: webkitpy/thirdparty + path = "Tools/Scripts/webkitpy/thirdparty/mock.py" + assertNoCheck(path, "build/include") + assertNoCheck(path, "pep8/E401") # A random pep8 category. + assertCheck(path, "pep8/W191") + assertCheck(path, "pep8/W291") + assertCheck(path, "whitespace/carriage_return") + + def test_max_reports_per_category(self): + """Check that _MAX_REPORTS_PER_CATEGORY is valid.""" + all_categories = self._all_categories() + for category in _MAX_REPORTS_PER_CATEGORY.iterkeys(): + self.assertTrue(category in all_categories, + 'Key "%s" is not a category' % category) + + +class CheckWebKitStyleFunctionTest(unittest.TestCase): + + """Tests the functions with names of the form check_webkit_style_*.""" + + def test_check_webkit_style_configuration(self): + # Exercise the code path to make sure the function does not error out. + option_values = CommandOptionValues() + configuration = check_webkit_style_configuration(option_values) + + def test_check_webkit_style_parser(self): + # Exercise the code path to make sure the function does not error out. + parser = check_webkit_style_parser() + + +class CheckerDispatcherSkipTest(unittest.TestCase): + + """Tests the "should skip" methods of the CheckerDispatcher class.""" + + def setUp(self): + self._dispatcher = CheckerDispatcher() + + def test_should_skip_with_warning(self): + """Test should_skip_with_warning().""" + # Check a non-skipped file. + self.assertFalse(self._dispatcher.should_skip_with_warning("foo.txt")) + + # Check skipped files. + paths_to_skip = [ + "gtk2drawing.c", + "gtkdrawing.h", + "WebCore/platform/gtk/gtk2drawing.c", + "WebCore/platform/gtk/gtkdrawing.h", + "WebKit/gtk/tests/testatk.c", + ] + + for path in paths_to_skip: + self.assertTrue(self._dispatcher.should_skip_with_warning(path), + "Checking: " + path) + + def _assert_should_skip_without_warning(self, path, is_checker_none, + expected): + # Check the file type before asserting the return value. + checker = self._dispatcher.dispatch(file_path=path, + handle_style_error=None, + min_confidence=3) + message = 'while checking: %s' % path + self.assertEquals(checker is None, is_checker_none, message) + self.assertEquals(self._dispatcher.should_skip_without_warning(path), + expected, message) + + def test_should_skip_without_warning__true(self): + """Test should_skip_without_warning() for True return values.""" + # Check a file with NONE file type. + path = 'foo.asdf' # Non-sensical file extension. + self._assert_should_skip_without_warning(path, + is_checker_none=True, + expected=True) + + # Check files with non-NONE file type. These examples must be + # drawn from the _SKIPPED_FILES_WITHOUT_WARNING configuration + # variable. + path = os.path.join('LayoutTests', 'foo.txt') + self._assert_should_skip_without_warning(path, + is_checker_none=False, + expected=True) + + def test_should_skip_without_warning__false(self): + """Test should_skip_without_warning() for False return values.""" + paths = ['foo.txt', + os.path.join('LayoutTests', 'ChangeLog'), + ] + + for path in paths: + self._assert_should_skip_without_warning(path, + is_checker_none=False, + expected=False) + + +class CheckerDispatcherCarriageReturnTest(unittest.TestCase): + def test_should_check_and_strip_carriage_returns(self): + files = { + 'foo.txt': True, + 'foo.cpp': True, + 'foo.vcproj': False, + 'foo.vsprops': False, + } + + dispatcher = CheckerDispatcher() + for file_path, expected_result in files.items(): + self.assertEquals(dispatcher.should_check_and_strip_carriage_returns(file_path), expected_result, 'Checking: %s' % file_path) + + +class CheckerDispatcherDispatchTest(unittest.TestCase): + + """Tests dispatch() method of CheckerDispatcher class.""" + + def mock_handle_style_error(self): + pass + + def dispatch(self, file_path): + """Call dispatch() with the given file path.""" + dispatcher = CheckerDispatcher() + checker = dispatcher.dispatch(file_path, + self.mock_handle_style_error, + min_confidence=3) + return checker + + def assert_checker_none(self, file_path): + """Assert that the dispatched checker is None.""" + checker = self.dispatch(file_path) + self.assertTrue(checker is None, 'Checking: "%s"' % file_path) + + def assert_checker(self, file_path, expected_class): + """Assert the type of the dispatched checker.""" + checker = self.dispatch(file_path) + got_class = checker.__class__ + self.assertEquals(got_class, expected_class, + 'For path "%(file_path)s" got %(got_class)s when ' + "expecting %(expected_class)s." + % {"file_path": file_path, + "got_class": got_class, + "expected_class": expected_class}) + + def assert_checker_cpp(self, file_path): + """Assert that the dispatched checker is a CppChecker.""" + self.assert_checker(file_path, CppChecker) + + def assert_checker_python(self, file_path): + """Assert that the dispatched checker is a PythonChecker.""" + self.assert_checker(file_path, PythonChecker) + + def assert_checker_text(self, file_path): + """Assert that the dispatched checker is a TextChecker.""" + self.assert_checker(file_path, TextChecker) + + def assert_checker_xml(self, file_path): + """Assert that the dispatched checker is a XMLChecker.""" + self.assert_checker(file_path, XMLChecker) + + def test_cpp_paths(self): + """Test paths that should be checked as C++.""" + paths = [ + "-", + "foo.c", + "foo.cpp", + "foo.h", + ] + + for path in paths: + self.assert_checker_cpp(path) + + # Check checker attributes on a typical input. + file_base = "foo" + file_extension = "c" + file_path = file_base + "." + file_extension + self.assert_checker_cpp(file_path) + checker = self.dispatch(file_path) + self.assertEquals(checker.file_extension, file_extension) + self.assertEquals(checker.file_path, file_path) + self.assertEquals(checker.handle_style_error, self.mock_handle_style_error) + self.assertEquals(checker.min_confidence, 3) + # Check "-" for good measure. + file_base = "-" + file_extension = "" + file_path = file_base + self.assert_checker_cpp(file_path) + checker = self.dispatch(file_path) + self.assertEquals(checker.file_extension, file_extension) + self.assertEquals(checker.file_path, file_path) + + def test_python_paths(self): + """Test paths that should be checked as Python.""" + paths = [ + "foo.py", + "Tools/Scripts/modules/text_style.py", + ] + + for path in paths: + self.assert_checker_python(path) + + # Check checker attributes on a typical input. + file_base = "foo" + file_extension = "css" + file_path = file_base + "." + file_extension + self.assert_checker_text(file_path) + checker = self.dispatch(file_path) + self.assertEquals(checker.file_path, file_path) + self.assertEquals(checker.handle_style_error, + self.mock_handle_style_error) + + def test_text_paths(self): + """Test paths that should be checked as text.""" + paths = [ + "ChangeLog", + "ChangeLog-2009-06-16", + "foo.ac", + "foo.cc", + "foo.cgi", + "foo.css", + "foo.exp", + "foo.flex", + "foo.gyp", + "foo.gypi", + "foo.html", + "foo.idl", + "foo.in", + "foo.js", + "foo.mm", + "foo.php", + "foo.pl", + "foo.pm", + "foo.pri", + "foo.pro", + "foo.rb", + "foo.sh", + "foo.txt", + "foo.wm", + "foo.xhtml", + "foo.y", + os.path.join("WebCore", "ChangeLog"), + os.path.join("WebCore", "inspector", "front-end", "inspector.js"), + os.path.join("Tools", "Scripts", "check-webkit-style"), + ] + + for path in paths: + self.assert_checker_text(path) + + # Check checker attributes on a typical input. + file_base = "foo" + file_extension = "css" + file_path = file_base + "." + file_extension + self.assert_checker_text(file_path) + checker = self.dispatch(file_path) + self.assertEquals(checker.file_path, file_path) + self.assertEquals(checker.handle_style_error, self.mock_handle_style_error) + + def test_xml_paths(self): + """Test paths that should be checked as XML.""" + paths = [ + "WebCore/WebCore.vcproj/WebCore.vcproj", + "WebKitLibraries/win/tools/vsprops/common.vsprops", + ] + + for path in paths: + self.assert_checker_xml(path) + + # Check checker attributes on a typical input. + file_base = "foo" + file_extension = "vcproj" + file_path = file_base + "." + file_extension + self.assert_checker_xml(file_path) + checker = self.dispatch(file_path) + self.assertEquals(checker.file_path, file_path) + self.assertEquals(checker.handle_style_error, + self.mock_handle_style_error) + + def test_none_paths(self): + """Test paths that have no file type..""" + paths = [ + "Makefile", + "foo.asdf", # Non-sensical file extension. + "foo.png", + "foo.exe", + ] + + for path in paths: + self.assert_checker_none(path) + + +class StyleProcessorConfigurationTest(unittest.TestCase): + + """Tests the StyleProcessorConfiguration class.""" + + def setUp(self): + self._error_messages = [] + """The messages written to _mock_stderr_write() of this class.""" + + def _mock_stderr_write(self, message): + self._error_messages.append(message) + + def _style_checker_configuration(self, output_format="vs7"): + """Return a StyleProcessorConfiguration instance for testing.""" + base_rules = ["-whitespace", "+whitespace/tab"] + filter_configuration = FilterConfiguration(base_rules=base_rules) + + return StyleProcessorConfiguration( + filter_configuration=filter_configuration, + max_reports_per_category={"whitespace/newline": 1}, + min_confidence=3, + output_format=output_format, + stderr_write=self._mock_stderr_write) + + def test_init(self): + """Test the __init__() method.""" + configuration = self._style_checker_configuration() + + # Check that __init__ sets the "public" data attributes correctly. + self.assertEquals(configuration.max_reports_per_category, + {"whitespace/newline": 1}) + self.assertEquals(configuration.stderr_write, self._mock_stderr_write) + self.assertEquals(configuration.min_confidence, 3) + + def test_is_reportable(self): + """Test the is_reportable() method.""" + config = self._style_checker_configuration() + + self.assertTrue(config.is_reportable("whitespace/tab", 3, "foo.txt")) + + # Test the confidence check code path by varying the confidence. + self.assertFalse(config.is_reportable("whitespace/tab", 2, "foo.txt")) + + # Test the category check code path by varying the category. + self.assertFalse(config.is_reportable("whitespace/line", 4, "foo.txt")) + + def _call_write_style_error(self, output_format): + config = self._style_checker_configuration(output_format=output_format) + config.write_style_error(category="whitespace/tab", + confidence_in_error=5, + file_path="foo.h", + line_number=100, + message="message") + + def test_write_style_error_emacs(self): + """Test the write_style_error() method.""" + self._call_write_style_error("emacs") + self.assertEquals(self._error_messages, + ["foo.h:100: message [whitespace/tab] [5]\n"]) + + def test_write_style_error_vs7(self): + """Test the write_style_error() method.""" + self._call_write_style_error("vs7") + self.assertEquals(self._error_messages, + ["foo.h(100): message [whitespace/tab] [5]\n"]) + + +class StyleProcessor_EndToEndTest(LoggingTestCase): + + """Test the StyleProcessor class with an emphasis on end-to-end tests.""" + + def setUp(self): + LoggingTestCase.setUp(self) + self._messages = [] + + def _mock_stderr_write(self, message): + """Save a message so it can later be asserted.""" + self._messages.append(message) + + def test_init(self): + """Test __init__ constructor.""" + configuration = StyleProcessorConfiguration( + filter_configuration=FilterConfiguration(), + max_reports_per_category={}, + min_confidence=3, + output_format="vs7", + stderr_write=self._mock_stderr_write) + processor = StyleProcessor(configuration) + + self.assertEquals(processor.error_count, 0) + self.assertEquals(self._messages, []) + + def test_process(self): + configuration = StyleProcessorConfiguration( + filter_configuration=FilterConfiguration(), + max_reports_per_category={}, + min_confidence=3, + output_format="vs7", + stderr_write=self._mock_stderr_write) + processor = StyleProcessor(configuration) + + processor.process(lines=['line1', 'Line with tab:\t'], + file_path='foo.txt') + self.assertEquals(processor.error_count, 1) + expected_messages = ['foo.txt(2): Line contains tab character. ' + '[whitespace/tab] [5]\n'] + self.assertEquals(self._messages, expected_messages) + + +class StyleProcessor_CodeCoverageTest(LoggingTestCase): + + """Test the StyleProcessor class with an emphasis on code coverage. + + This class makes heavy use of mock objects. + + """ + + class MockDispatchedChecker(object): + + """A mock checker dispatched by the MockDispatcher.""" + + def __init__(self, file_path, min_confidence, style_error_handler): + self.file_path = file_path + self.min_confidence = min_confidence + self.style_error_handler = style_error_handler + + def check(self, lines): + self.lines = lines + + class MockDispatcher(object): + + """A mock CheckerDispatcher class.""" + + def __init__(self): + self.dispatched_checker = None + + def should_skip_with_warning(self, file_path): + return file_path.endswith('skip_with_warning.txt') + + def should_skip_without_warning(self, file_path): + return file_path.endswith('skip_without_warning.txt') + + def should_check_and_strip_carriage_returns(self, file_path): + return not file_path.endswith('carriage_returns_allowed.txt') + + def dispatch(self, file_path, style_error_handler, min_confidence): + if file_path.endswith('do_not_process.txt'): + return None + + checker = StyleProcessor_CodeCoverageTest.MockDispatchedChecker( + file_path, + min_confidence, + style_error_handler) + + # Save the dispatched checker so the current test case has a + # way to access and check it. + self.dispatched_checker = checker + + return checker + + def setUp(self): + LoggingTestCase.setUp(self) + # We can pass an error-message swallower here because error message + # output is tested instead in the end-to-end test case above. + configuration = StyleProcessorConfiguration( + filter_configuration=FilterConfiguration(), + max_reports_per_category={"whitespace/newline": 1}, + min_confidence=3, + output_format="vs7", + stderr_write=self._swallow_stderr_message) + + mock_carriage_checker_class = self._create_carriage_checker_class() + mock_dispatcher = self.MockDispatcher() + # We do not need to use a real incrementer here because error-count + # incrementing is tested instead in the end-to-end test case above. + mock_increment_error_count = self._do_nothing + + processor = StyleProcessor(configuration=configuration, + mock_carriage_checker_class=mock_carriage_checker_class, + mock_dispatcher=mock_dispatcher, + mock_increment_error_count=mock_increment_error_count) + + self._configuration = configuration + self._mock_dispatcher = mock_dispatcher + self._processor = processor + + def _do_nothing(self): + # We provide this function so the caller can pass it to the + # StyleProcessor constructor. This lets us assert the equality of + # the DefaultStyleErrorHandler instance generated by the process() + # method with an expected instance. + pass + + def _swallow_stderr_message(self, message): + """Swallow a message passed to stderr.write().""" + # This is a mock stderr.write() for passing to the constructor + # of the StyleProcessorConfiguration class. + pass + + def _create_carriage_checker_class(self): + + # Create a reference to self with a new name so its name does not + # conflict with the self introduced below. + test_case = self + + class MockCarriageChecker(object): + + """A mock carriage-return checker.""" + + def __init__(self, style_error_handler): + self.style_error_handler = style_error_handler + + # This gives the current test case access to the + # instantiated carriage checker. + test_case.carriage_checker = self + + def check(self, lines): + # Save the lines so the current test case has a way to access + # and check them. + self.lines = lines + + return lines + + return MockCarriageChecker + + def test_should_process__skip_without_warning(self): + """Test should_process() for a skip-without-warning file.""" + file_path = "foo/skip_without_warning.txt" + + self.assertFalse(self._processor.should_process(file_path)) + + def test_should_process__skip_with_warning(self): + """Test should_process() for a skip-with-warning file.""" + file_path = "foo/skip_with_warning.txt" + + self.assertFalse(self._processor.should_process(file_path)) + + self.assertLog(['WARNING: File exempt from style guide. ' + 'Skipping: "foo/skip_with_warning.txt"\n']) + + def test_should_process__true_result(self): + """Test should_process() for a file that should be processed.""" + file_path = "foo/skip_process.txt" + + self.assertTrue(self._processor.should_process(file_path)) + + def test_process__checker_dispatched(self): + """Test the process() method for a path with a dispatched checker.""" + file_path = 'foo.txt' + lines = ['line1', 'line2'] + line_numbers = [100] + + expected_error_handler = DefaultStyleErrorHandler( + configuration=self._configuration, + file_path=file_path, + increment_error_count=self._do_nothing, + line_numbers=line_numbers) + + self._processor.process(lines=lines, + file_path=file_path, + line_numbers=line_numbers) + + # Check that the carriage-return checker was instantiated correctly + # and was passed lines correctly. + carriage_checker = self.carriage_checker + self.assertEquals(carriage_checker.style_error_handler, + expected_error_handler) + self.assertEquals(carriage_checker.lines, ['line1', 'line2']) + + # Check that the style checker was dispatched correctly and was + # passed lines correctly. + checker = self._mock_dispatcher.dispatched_checker + self.assertEquals(checker.file_path, 'foo.txt') + self.assertEquals(checker.min_confidence, 3) + self.assertEquals(checker.style_error_handler, expected_error_handler) + + self.assertEquals(checker.lines, ['line1', 'line2']) + + def test_process__no_checker_dispatched(self): + """Test the process() method for a path with no dispatched checker.""" + path = os.path.join('foo', 'do_not_process.txt') + self.assertRaises(AssertionError, self._processor.process, + lines=['line1', 'line2'], file_path=path, + line_numbers=[100]) + + def test_process__carriage_returns_not_stripped(self): + """Test that carriage returns aren't stripped from files that are allowed to contain them.""" + file_path = 'carriage_returns_allowed.txt' + lines = ['line1\r', 'line2\r'] + line_numbers = [100] + self._processor.process(lines=lines, + file_path=file_path, + line_numbers=line_numbers) + # The carriage return checker should never have been invoked, and so + # should not have saved off any lines. + self.assertFalse(hasattr(self.carriage_checker, 'lines')) diff --git a/Tools/Scripts/webkitpy/style/checkers/__init__.py b/Tools/Scripts/webkitpy/style/checkers/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/style/checkers/common.py b/Tools/Scripts/webkitpy/style/checkers/common.py new file mode 100644 index 0000000..76aa956 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/common.py @@ -0,0 +1,74 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Supports style checking not specific to any one file type.""" + + +# FIXME: Test this list in the same way that the list of CppChecker +# categories is tested, for example by checking that all of its +# elements appear in the unit tests. This should probably be done +# after moving the relevant cpp_unittest.ErrorCollector code +# into a shared location and refactoring appropriately. +categories = set([ + "whitespace/carriage_return", + "whitespace/tab"]) + + +class CarriageReturnChecker(object): + + """Supports checking for and handling carriage returns.""" + + def __init__(self, handle_style_error): + self._handle_style_error = handle_style_error + + def check(self, lines): + """Check for and strip trailing carriage returns from lines.""" + for line_number in range(len(lines)): + if not lines[line_number].endswith("\r"): + continue + + self._handle_style_error(line_number + 1, # Correct for offset. + "whitespace/carriage_return", + 1, + "One or more unexpected \\r (^M) found; " + "better to use only a \\n") + + lines[line_number] = lines[line_number].rstrip("\r") + + return lines + + +class TabChecker(object): + + """Supports checking for and handling tabs.""" + + def __init__(self, file_path, handle_style_error): + self.file_path = file_path + self.handle_style_error = handle_style_error + + def check(self, lines): + # FIXME: share with cpp_style. + for line_number, line in enumerate(lines): + if "\t" in line: + self.handle_style_error(line_number + 1, + "whitespace/tab", 5, + "Line contains tab character.") diff --git a/Tools/Scripts/webkitpy/style/checkers/common_unittest.py b/Tools/Scripts/webkitpy/style/checkers/common_unittest.py new file mode 100644 index 0000000..1fe1263 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/common_unittest.py @@ -0,0 +1,124 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for common.py.""" + +import unittest + +from common import CarriageReturnChecker +from common import TabChecker + +# FIXME: The unit tests for the cpp, text, and common checkers should +# share supporting test code. This can include, for example, the +# mock style error handling code and the code to check that all +# of a checker's categories are covered by the unit tests. +# Such shared code can be located in a shared test file, perhaps +# even this file. +class CarriageReturnCheckerTest(unittest.TestCase): + + """Tests check_no_carriage_return().""" + + _category = "whitespace/carriage_return" + _confidence = 1 + _expected_message = ("One or more unexpected \\r (^M) found; " + "better to use only a \\n") + + def setUp(self): + self._style_errors = [] # The list of accumulated style errors. + + def _mock_style_error_handler(self, line_number, category, confidence, + message): + """Append the error information to the list of style errors.""" + error = (line_number, category, confidence, message) + self._style_errors.append(error) + + def assert_carriage_return(self, input_lines, expected_lines, error_lines): + """Process the given line and assert that the result is correct.""" + handle_style_error = self._mock_style_error_handler + + checker = CarriageReturnChecker(handle_style_error) + output_lines = checker.check(input_lines) + + # Check both the return value and error messages. + self.assertEquals(output_lines, expected_lines) + + expected_errors = [(line_number, self._category, self._confidence, + self._expected_message) + for line_number in error_lines] + self.assertEquals(self._style_errors, expected_errors) + + def test_ends_with_carriage(self): + self.assert_carriage_return(["carriage return\r"], + ["carriage return"], + [1]) + + def test_ends_with_nothing(self): + self.assert_carriage_return(["no carriage return"], + ["no carriage return"], + []) + + def test_ends_with_newline(self): + self.assert_carriage_return(["no carriage return\n"], + ["no carriage return\n"], + []) + + def test_carriage_in_middle(self): + # The CarriageReturnChecker checks only the final character + # of each line. + self.assert_carriage_return(["carriage\r in a string"], + ["carriage\r in a string"], + []) + + def test_multiple_errors(self): + self.assert_carriage_return(["line1", "line2\r", "line3\r"], + ["line1", "line2", "line3"], + [2, 3]) + + +class TabCheckerTest(unittest.TestCase): + + """Tests for TabChecker.""" + + def assert_tab(self, input_lines, error_lines): + """Assert when the given lines contain tabs.""" + self._error_lines = [] + + def style_error_handler(line_number, category, confidence, message): + self.assertEqual(category, 'whitespace/tab') + self.assertEqual(confidence, 5) + self.assertEqual(message, 'Line contains tab character.') + self._error_lines.append(line_number) + + checker = TabChecker('', style_error_handler) + checker.check(input_lines) + self.assertEquals(self._error_lines, error_lines) + + def test_notab(self): + self.assert_tab([''], []) + self.assert_tab(['foo', 'bar'], []) + + def test_tab(self): + self.assert_tab(['\tfoo'], [1]) + self.assert_tab(['line1', '\tline2', 'line3\t'], [2, 3]) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp.py b/Tools/Scripts/webkitpy/style/checkers/cpp.py new file mode 100644 index 0000000..94e5bdd --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/cpp.py @@ -0,0 +1,3171 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile Inc. +# Copyright (C) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This is the modified version of Google's cpplint. The original code is +# http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py + +"""Support for check-webkit-style.""" + +import codecs +import math # for log +import os +import os.path +import re +import sre_compile +import string +import sys +import unicodedata + +# The key to use to provide a class to fake loading a header file. +INCLUDE_IO_INJECTION_KEY = 'include_header_io' + +# Headers that we consider STL headers. +_STL_HEADERS = frozenset([ + 'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception', + 'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set', + 'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h', + 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack', + 'stl_alloc.h', 'stl_relops.h', 'type_traits.h', + 'utility', 'vector', 'vector.h', + ]) + + +# Non-STL C++ system headers. +_CPP_HEADERS = frozenset([ + 'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype', + 'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath', + 'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef', + 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype', + 'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream', + 'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip', + 'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h', + 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h', + 'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h', + 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h', + 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept', + 'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string', + 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray', + ]) + + +# Assertion macros. These are defined in base/logging.h and +# testing/base/gunit.h. Note that the _M versions need to come first +# for substring matching to work. +_CHECK_MACROS = [ + 'DCHECK', 'CHECK', + 'EXPECT_TRUE_M', 'EXPECT_TRUE', + 'ASSERT_TRUE_M', 'ASSERT_TRUE', + 'EXPECT_FALSE_M', 'EXPECT_FALSE', + 'ASSERT_FALSE_M', 'ASSERT_FALSE', + ] + +# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE +_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) + +for op, replacement in [('==', 'EQ'), ('!=', 'NE'), + ('>=', 'GE'), ('>', 'GT'), + ('<=', 'LE'), ('<', 'LT')]: + _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement + _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement + +for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), + ('>=', 'LT'), ('>', 'LE'), + ('<=', 'GT'), ('<', 'GE')]: + _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement + _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement + + +# These constants define types of headers for use with +# _IncludeState.check_next_include_order(). +_CONFIG_HEADER = 0 +_PRIMARY_HEADER = 1 +_OTHER_HEADER = 2 +_MOC_HEADER = 3 + + +# A dictionary of items customize behavior for unit test. For example, +# INCLUDE_IO_INJECTION_KEY allows providing a custom io class which allows +# for faking a header file. +_unit_test_config = {} + + +# The regexp compilation caching is inlined in all regexp functions for +# performance reasons; factoring it out into a separate function turns out +# to be noticeably expensive. +_regexp_compile_cache = {} + + +def match(pattern, s): + """Matches the string with the pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].match(s) + + +def search(pattern, s): + """Searches the string for the pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].search(s) + + +def sub(pattern, replacement, s): + """Substitutes occurrences of a pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].sub(replacement, s) + + +def subn(pattern, replacement, s): + """Substitutes occurrences of a pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].subn(replacement, s) + + +def iteratively_replace_matches_with_char(pattern, char_replacement, s): + """Returns the string with replacement done. + + Every character in the match is replaced with char. + Due to the iterative nature, pattern should not match char or + there will be an infinite loop. + + Example: + pattern = r'<[^>]>' # template parameters + char_replacement = '_' + s = 'A<B<C, D>>' + Returns 'A_________' + + Args: + pattern: The regex to match. + char_replacement: The character to put in place of every + character of the match. + s: The string on which to do the replacements. + + Returns: + True, if the given line is blank. + """ + while True: + matched = search(pattern, s) + if not matched: + return s + start_match_index = matched.start(0) + end_match_index = matched.end(0) + match_length = end_match_index - start_match_index + s = s[:start_match_index] + char_replacement * match_length + s[end_match_index:] + + +def up_to_unmatched_closing_paren(s): + """Splits a string into two parts up to first unmatched ')'. + + Args: + s: a string which is a substring of line after '(' + (e.g., "a == (b + c))"). + + Returns: + A pair of strings (prefix before first unmatched ')', + remainder of s after first unmatched ')'), e.g., + up_to_unmatched_closing_paren("a == (b + c)) { ") + returns "a == (b + c)", " {". + Returns None, None if there is no unmatched ')' + + """ + i = 1 + for pos, c in enumerate(s): + if c == '(': + i += 1 + elif c == ')': + i -= 1 + if i == 0: + return s[:pos], s[pos + 1:] + return None, None + +class _IncludeState(dict): + """Tracks line numbers for includes, and the order in which includes appear. + + As a dict, an _IncludeState object serves as a mapping between include + filename and line number on which that file was included. + + Call check_next_include_order() once for each header in the file, passing + in the type constants defined above. Calls in an illegal order will + raise an _IncludeError with an appropriate error message. + + """ + # self._section will move monotonically through this set. If it ever + # needs to move backwards, check_next_include_order will raise an error. + _INITIAL_SECTION = 0 + _CONFIG_SECTION = 1 + _PRIMARY_SECTION = 2 + _OTHER_SECTION = 3 + + _TYPE_NAMES = { + _CONFIG_HEADER: 'WebCore config.h', + _PRIMARY_HEADER: 'header this file implements', + _OTHER_HEADER: 'other header', + _MOC_HEADER: 'moc file', + } + _SECTION_NAMES = { + _INITIAL_SECTION: "... nothing.", + _CONFIG_SECTION: "WebCore config.h.", + _PRIMARY_SECTION: 'a header this file implements.', + _OTHER_SECTION: 'other header.', + } + + def __init__(self): + dict.__init__(self) + self._section = self._INITIAL_SECTION + self._visited_primary_section = False + self.header_types = dict(); + + def visited_primary_section(self): + return self._visited_primary_section + + def check_next_include_order(self, header_type, file_is_header): + """Returns a non-empty error message if the next header is out of order. + + This function also updates the internal state to be ready to check + the next include. + + Args: + header_type: One of the _XXX_HEADER constants defined above. + file_is_header: Whether the file that owns this _IncludeState is itself a header + + Returns: + The empty string if the header is in the right order, or an + error message describing what's wrong. + + """ + if header_type == _CONFIG_HEADER and file_is_header: + return 'Header file should not contain WebCore config.h.' + if header_type == _PRIMARY_HEADER and file_is_header: + return 'Header file should not contain itself.' + if header_type == _MOC_HEADER: + return '' + + error_message = '' + if self._section != self._OTHER_SECTION: + before_error_message = ('Found %s before %s' % + (self._TYPE_NAMES[header_type], + self._SECTION_NAMES[self._section + 1])) + after_error_message = ('Found %s after %s' % + (self._TYPE_NAMES[header_type], + self._SECTION_NAMES[self._section])) + + if header_type == _CONFIG_HEADER: + if self._section >= self._CONFIG_SECTION: + error_message = after_error_message + self._section = self._CONFIG_SECTION + elif header_type == _PRIMARY_HEADER: + if self._section >= self._PRIMARY_SECTION: + error_message = after_error_message + elif self._section < self._CONFIG_SECTION: + error_message = before_error_message + self._section = self._PRIMARY_SECTION + self._visited_primary_section = True + else: + assert header_type == _OTHER_HEADER + if not file_is_header and self._section < self._PRIMARY_SECTION: + error_message = before_error_message + self._section = self._OTHER_SECTION + + return error_message + + +class _FunctionState(object): + """Tracks current function name and the number of lines in its body. + + Attributes: + min_confidence: The minimum confidence level to use while checking style. + + """ + + _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. + _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. + + def __init__(self, min_confidence): + self.min_confidence = min_confidence + self.current_function = '' + self.in_a_function = False + self.lines_in_function = 0 + # Make sure these will not be mistaken for real lines (even when a + # small amount is added to them). + self.body_start_line_number = -1000 + self.ending_line_number = -1000 + + def begin(self, function_name, body_start_line_number, ending_line_number, is_declaration): + """Start analyzing function body. + + Args: + function_name: The name of the function being tracked. + body_start_line_number: The line number of the { or the ; for a protoype. + ending_line_number: The line number where the function ends. + is_declaration: True if this is a prototype. + """ + self.in_a_function = True + self.lines_in_function = -1 # Don't count the open brace line. + self.current_function = function_name + self.body_start_line_number = body_start_line_number + self.ending_line_number = ending_line_number + self.is_declaration = is_declaration + + def count(self, line_number): + """Count line in current function body.""" + if self.in_a_function and line_number >= self.body_start_line_number: + self.lines_in_function += 1 + + def check(self, error, line_number): + """Report if too many lines in function body. + + Args: + error: The function to call with any errors found. + line_number: The number of the line to check. + """ + if match(r'T(EST|est)', self.current_function): + base_trigger = self._TEST_TRIGGER + else: + base_trigger = self._NORMAL_TRIGGER + trigger = base_trigger * 2 ** self.min_confidence + + if self.lines_in_function > trigger: + error_level = int(math.log(self.lines_in_function / base_trigger, 2)) + # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... + if error_level > 5: + error_level = 5 + error(line_number, 'readability/fn_size', error_level, + 'Small and focused functions are preferred:' + ' %s has %d non-comment lines' + ' (error triggered by exceeding %d lines).' % ( + self.current_function, self.lines_in_function, trigger)) + + def end(self): + """Stop analyzing function body.""" + self.in_a_function = False + + +class _IncludeError(Exception): + """Indicates a problem with the include order in a file.""" + pass + + +class FileInfo: + """Provides utility functions for filenames. + + FileInfo provides easy access to the components of a file's path + relative to the project root. + """ + + def __init__(self, filename): + self._filename = filename + + def full_name(self): + """Make Windows paths like Unix.""" + return os.path.abspath(self._filename).replace('\\', '/') + + def repository_name(self): + """Full name after removing the local path to the repository. + + If we have a real absolute path name here we can try to do something smart: + detecting the root of the checkout and truncating /path/to/checkout from + the name so that we get header guards that don't include things like + "C:\Documents and Settings\..." or "/home/username/..." in them and thus + people on different computers who have checked the source out to different + locations won't see bogus errors. + """ + fullname = self.full_name() + + if os.path.exists(fullname): + project_dir = os.path.dirname(fullname) + + if os.path.exists(os.path.join(project_dir, ".svn")): + # If there's a .svn file in the current directory, we + # recursively look up the directory tree for the top + # of the SVN checkout + root_dir = project_dir + one_up_dir = os.path.dirname(root_dir) + while os.path.exists(os.path.join(one_up_dir, ".svn")): + root_dir = os.path.dirname(root_dir) + one_up_dir = os.path.dirname(one_up_dir) + + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Not SVN? Try to find a git top level directory by + # searching up from the current path. + root_dir = os.path.dirname(fullname) + while (root_dir != os.path.dirname(root_dir) + and not os.path.exists(os.path.join(root_dir, ".git"))): + root_dir = os.path.dirname(root_dir) + if os.path.exists(os.path.join(root_dir, ".git")): + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Don't know what to do; header guard warnings may be wrong... + return fullname + + def split(self): + """Splits the file into the directory, basename, and extension. + + For 'chrome/browser/browser.cpp', Split() would + return ('chrome/browser', 'browser', '.cpp') + + Returns: + A tuple of (directory, basename, extension). + """ + + googlename = self.repository_name() + project, rest = os.path.split(googlename) + return (project,) + os.path.splitext(rest) + + def base_name(self): + """File base name - text after the final slash, before the final period.""" + return self.split()[1] + + def extension(self): + """File extension - text following the final period.""" + return self.split()[2] + + def no_extension(self): + """File has no source file extension.""" + return '/'.join(self.split()[0:2]) + + def is_source(self): + """File has a source file extension.""" + return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx') + + +# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard. +_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( + r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') +# Matches strings. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') +# Matches characters. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") +# Matches multi-line C++ comments. +# This RE is a little bit more complicated than one might expect, because we +# have to take care of space removals tools so we can handle comments inside +# statements better. +# The current rule is: We only clear spaces from both sides when we're at the +# end of the line. Otherwise, we try to remove spaces from the right side, +# if this doesn't work we try on left side but only if there's a non-character +# on the right. +_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( + r"""(\s*/\*.*\*/\s*$| + /\*.*\*/\s+| + \s+/\*.*\*/(?=\W)| + /\*.*\*/)""", re.VERBOSE) + + +def is_cpp_string(line): + """Does line terminate so, that the next symbol is in string constant. + + This function does not consider single-line nor multi-line comments. + + Args: + line: is a partial line of code starting from the 0..n. + + Returns: + True, if next character appended to 'line' is inside a + string constant. + """ + + line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" + return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 + + +def find_next_multi_line_comment_start(lines, line_index): + """Find the beginning marker for a multiline comment.""" + while line_index < len(lines): + if lines[line_index].strip().startswith('/*'): + # Only return this marker if the comment goes beyond this line + if lines[line_index].strip().find('*/', 2) < 0: + return line_index + line_index += 1 + return len(lines) + + +def find_next_multi_line_comment_end(lines, line_index): + """We are inside a comment, find the end marker.""" + while line_index < len(lines): + if lines[line_index].strip().endswith('*/'): + return line_index + line_index += 1 + return len(lines) + + +def remove_multi_line_comments_from_range(lines, begin, end): + """Clears a range of lines for multi-line comments.""" + # Having // dummy comments makes the lines non-empty, so we will not get + # unnecessary blank line warnings later in the code. + for i in range(begin, end): + lines[i] = '// dummy' + + +def remove_multi_line_comments(lines, error): + """Removes multiline (c-style) comments from lines.""" + line_index = 0 + while line_index < len(lines): + line_index_begin = find_next_multi_line_comment_start(lines, line_index) + if line_index_begin >= len(lines): + return + line_index_end = find_next_multi_line_comment_end(lines, line_index_begin) + if line_index_end >= len(lines): + error(line_index_begin + 1, 'readability/multiline_comment', 5, + 'Could not find end of multi-line comment') + return + remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1) + line_index = line_index_end + 1 + + +def cleanse_comments(line): + """Removes //-comments and single-line C-style /* */ comments. + + Args: + line: A line of C++ source. + + Returns: + The line with single-line comments removed. + """ + comment_position = line.find('//') + if comment_position != -1 and not is_cpp_string(line[:comment_position]): + line = line[:comment_position] + # get rid of /* ... */ + return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) + + +class CleansedLines(object): + """Holds 3 copies of all lines with different preprocessing applied to them. + + 1) elided member contains lines without strings and comments, + 2) lines member contains lines without comments, and + 3) raw member contains all the lines without processing. + All these three members are of <type 'list'>, and of the same length. + """ + + def __init__(self, lines): + self.elided = [] + self.lines = [] + self.raw_lines = lines + self._num_lines = len(lines) + for line_number in range(len(lines)): + self.lines.append(cleanse_comments(lines[line_number])) + elided = self.collapse_strings(lines[line_number]) + self.elided.append(cleanse_comments(elided)) + + def num_lines(self): + """Returns the number of lines represented.""" + return self._num_lines + + @staticmethod + def collapse_strings(elided): + """Collapses strings and chars on a line to simple "" or '' blocks. + + We nix strings first so we're not fooled by text like '"http://"' + + Args: + elided: The line being processed. + + Returns: + The line with collapsed strings. + """ + if not _RE_PATTERN_INCLUDE.match(elided): + # Remove escaped characters first to make quote/single quote collapsing + # basic. Things that look like escaped characters shouldn't occur + # outside of strings and chars. + elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) + elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) + elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) + return elided + + +def close_expression(clean_lines, line_number, pos): + """If input points to ( or { or [, finds the position that closes it. + + If clean_lines.elided[line_number][pos] points to a '(' or '{' or '[', finds + the line_number/pos that correspond to the closing of the expression. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + pos: A position on the line. + + Returns: + A tuple (line, line_number, pos) pointer *past* the closing brace, or + ('', len(clean_lines.elided), -1) if we never find a close. Note we + ignore strings and comments when matching; and the line we return is the + 'cleansed' line at line_number. + """ + + line = clean_lines.elided[line_number] + start_character = line[pos] + if start_character not in '({[': + return (line, clean_lines.num_lines(), -1) + if start_character == '(': + end_character = ')' + if start_character == '[': + end_character = ']' + if start_character == '{': + end_character = '}' + + num_open = line.count(start_character) - line.count(end_character) + while num_open > 0: + line_number += 1 + if line_number >= clean_lines.num_lines(): + return ('', len(clean_lines.elided), -1) + line = clean_lines.elided[line_number] + num_open += line.count(start_character) - line.count(end_character) + # OK, now find the end_character that actually got us back to even + endpos = len(line) + while num_open >= 0: + endpos = line.rfind(')', 0, endpos) + num_open -= 1 # chopped off another ) + return (line, line_number, endpos + 1) + + +def check_for_copyright(lines, error): + """Logs an error if no Copyright message appears at the top of the file.""" + + # We'll say it should occur by line 10. Don't forget there's a + # dummy line at the front. + for line in xrange(1, min(len(lines), 11)): + if re.search(r'Copyright', lines[line], re.I): + break + else: # means no copyright line was found + error(0, 'legal/copyright', 5, + 'No copyright message found. ' + 'You should have a line: "Copyright [year] <Copyright Owner>"') + + +def get_header_guard_cpp_variable(filename): + """Returns the CPP variable that should be used as a header guard. + + Args: + filename: The name of a C++ header file. + + Returns: + The CPP variable that should be used as a header guard in the + named file. + + """ + + # Restores original filename in case that style checker is invoked from Emacs's + # flymake. + filename = re.sub(r'_flymake\.h$', '.h', filename) + + standard_name = sub(r'[-.\s]', '_', os.path.basename(filename)) + + # Files under WTF typically have header guards that start with WTF_. + if filename.find('/wtf/'): + special_name = "WTF_" + standard_name + else: + special_name = standard_name + return (special_name, standard_name) + + +def check_for_header_guard(filename, lines, error): + """Checks that the file contains a header guard. + + Logs an error if no #ifndef header guard is present. For other + headers, checks that the full pathname is used. + + Args: + filename: The name of the C++ header file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + cppvar = get_header_guard_cpp_variable(filename) + + ifndef = None + ifndef_line_number = 0 + define = None + for line_number, line in enumerate(lines): + line_split = line.split() + if len(line_split) >= 2: + # find the first occurrence of #ifndef and #define, save arg + if not ifndef and line_split[0] == '#ifndef': + # set ifndef to the header guard presented on the #ifndef line. + ifndef = line_split[1] + ifndef_line_number = line_number + if not define and line_split[0] == '#define': + define = line_split[1] + if define and ifndef: + break + + if not ifndef or not define or ifndef != define: + error(0, 'build/header_guard', 5, + 'No #ifndef header guard found, suggested CPP variable is: %s' % + cppvar[0]) + return + + # The guard should be File_h. + if ifndef not in cppvar: + error(ifndef_line_number, 'build/header_guard', 5, + '#ifndef header guard has wrong style, please use: %s' % cppvar[0]) + + +def check_for_unicode_replacement_characters(lines, error): + """Logs an error for each line containing Unicode replacement characters. + + These indicate that either the file contained invalid UTF-8 (likely) + or Unicode replacement characters (which it shouldn't). Note that + it's possible for this to throw off line numbering if the invalid + UTF-8 occurred adjacent to a newline. + + Args: + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + for line_number, line in enumerate(lines): + if u'\ufffd' in line: + error(line_number, 'readability/utf8', 5, + 'Line contains invalid UTF-8 (or Unicode replacement character).') + + +def check_for_new_line_at_eof(lines, error): + """Logs an error if there is no newline char at the end of the file. + + Args: + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + # The array lines() was created by adding two newlines to the + # original file (go figure), then splitting on \n. + # To verify that the file ends in \n, we just have to make sure the + # last-but-two element of lines() exists and is empty. + if len(lines) < 3 or lines[-2]: + error(len(lines) - 2, 'whitespace/ending_newline', 5, + 'Could not find a newline character at the end of the file.') + + +def check_for_multiline_comments_and_strings(clean_lines, line_number, error): + """Logs an error if we see /* ... */ or "..." that extend past one line. + + /* ... */ comments are legit inside macros, for one line. + Otherwise, we prefer // comments, so it's ok to warn about the + other. Likewise, it's ok for strings to extend across multiple + lines, as long as a line continuation character (backslash) + terminates each line. Although not currently prohibited by the C++ + style guide, it's ugly and unnecessary. We don't do well with either + in this lint program, so we warn about both. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[line_number] + + # Remove all \\ (escaped backslashes) from the line. They are OK, and the + # second (escaped) slash may trigger later \" detection erroneously. + line = line.replace('\\\\', '') + + if line.count('/*') > line.count('*/'): + error(line_number, 'readability/multiline_comment', 5, + 'Complex multi-line /*...*/-style comment found. ' + 'Lint may give bogus warnings. ' + 'Consider replacing these with //-style comments, ' + 'with #if 0...#endif, ' + 'or with more clearly structured multi-line comments.') + + if (line.count('"') - line.count('\\"')) % 2: + error(line_number, 'readability/multiline_string', 5, + 'Multi-line string ("...") found. This lint script doesn\'t ' + 'do well with such strings, and may give bogus warnings. They\'re ' + 'ugly and unnecessary, and you should use concatenation instead".') + + +_THREADING_LIST = ( + ('asctime(', 'asctime_r('), + ('ctime(', 'ctime_r('), + ('getgrgid(', 'getgrgid_r('), + ('getgrnam(', 'getgrnam_r('), + ('getlogin(', 'getlogin_r('), + ('getpwnam(', 'getpwnam_r('), + ('getpwuid(', 'getpwuid_r('), + ('gmtime(', 'gmtime_r('), + ('localtime(', 'localtime_r('), + ('rand(', 'rand_r('), + ('readdir(', 'readdir_r('), + ('strtok(', 'strtok_r('), + ('ttyname(', 'ttyname_r('), + ) + + +def check_posix_threading(clean_lines, line_number, error): + """Checks for calls to thread-unsafe functions. + + Much code has been originally written without consideration of + multi-threading. Also, engineers are relying on their old experience; + they have learned posix before threading extensions were added. These + tests guide the engineers to use thread-safe functions (when using + posix directly). + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[line_number] + for single_thread_function, multithread_safe_function in _THREADING_LIST: + index = line.find(single_thread_function) + # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 + if index >= 0 and (index == 0 or (not line[index - 1].isalnum() + and line[index - 1] not in ('_', '.', '>'))): + error(line_number, 'runtime/threadsafe_fn', 2, + 'Consider using ' + multithread_safe_function + + '...) instead of ' + single_thread_function + + '...) for improved thread safety.') + + +# Matches invalid increment: *count++, which moves pointer instead of +# incrementing a value. +_RE_PATTERN_INVALID_INCREMENT = re.compile( + r'^\s*\*\w+(\+\+|--);') + + +def check_invalid_increment(clean_lines, line_number, error): + """Checks for invalid increment *count++. + + For example following function: + void increment_counter(int* count) { + *count++; + } + is invalid, because it effectively does count++, moving pointer, and should + be replaced with ++*count, (*count)++ or *count += 1. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[line_number] + if _RE_PATTERN_INVALID_INCREMENT.match(line): + error(line_number, 'runtime/invalid_increment', 5, + 'Changing pointer instead of value (or unused value of operator*).') + + +class _ClassInfo(object): + """Stores information about a class.""" + + def __init__(self, name, line_number): + self.name = name + self.line_number = line_number + self.seen_open_brace = False + self.is_derived = False + self.virtual_method_line_number = None + self.has_virtual_destructor = False + self.brace_depth = 0 + + +class _ClassState(object): + """Holds the current state of the parse relating to class declarations. + + It maintains a stack of _ClassInfos representing the parser's guess + as to the current nesting of class declarations. The innermost class + is at the top (back) of the stack. Typically, the stack will either + be empty or have exactly one entry. + """ + + def __init__(self): + self.classinfo_stack = [] + + def check_finished(self, error): + """Checks that all classes have been completely parsed. + + Call this when all lines in a file have been processed. + Args: + error: The function to call with any errors found. + """ + if self.classinfo_stack: + # Note: This test can result in false positives if #ifdef constructs + # get in the way of brace matching. See the testBuildClass test in + # cpp_style_unittest.py for an example of this. + error(self.classinfo_stack[0].line_number, 'build/class', 5, + 'Failed to find complete declaration of class %s' % + self.classinfo_stack[0].name) + + +class _FileState(object): + def __init__(self, clean_lines, file_extension): + self._did_inside_namespace_indent_warning = False + self._clean_lines = clean_lines + if file_extension in ['m', 'mm']: + self._is_objective_c = True + elif file_extension == 'h': + # In the case of header files, it is unknown if the file + # is objective c or not, so set this value to None and then + # if it is requested, use heuristics to guess the value. + self._is_objective_c = None + else: + self._is_objective_c = False + self._is_c = file_extension == 'c' + + def set_did_inside_namespace_indent_warning(self): + self._did_inside_namespace_indent_warning = True + + def did_inside_namespace_indent_warning(self): + return self._did_inside_namespace_indent_warning + + def is_objective_c(self): + if self._is_objective_c is None: + for line in self._clean_lines.elided: + # Starting with @ or #import seem like the best indications + # that we have an Objective C file. + if line.startswith("@") or line.startswith("#import"): + self._is_objective_c = True + break + else: + self._is_objective_c = False + return self._is_objective_c + + def is_c_or_objective_c(self): + """Return whether the file extension corresponds to C or Objective-C.""" + return self._is_c or self.is_objective_c() + + +def check_for_non_standard_constructs(clean_lines, line_number, + class_state, error): + """Logs an error if we see certain non-ANSI constructs ignored by gcc-2. + + Complain about several constructs which gcc-2 accepts, but which are + not standard C++. Warning about these in lint is one way to ease the + transition to new compilers. + - put storage class first (e.g. "static const" instead of "const static"). + - "%lld" instead of %qd" in printf-type functions. + - "%1$d" is non-standard in printf-type functions. + - "\%" is an undefined character escape sequence. + - text after #endif is not allowed. + - invalid inner-style forward declaration. + - >? and <? operators, and their >?= and <?= cousins. + - classes with virtual methods need virtual destructors (compiler warning + available, but not turned on yet.) + + Additionally, check for constructor/destructor style violations as it + is very convenient to do so while checking for gcc-2 compliance. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + class_state: A _ClassState instance which maintains information about + the current stack of nested class declarations being parsed. + error: A callable to which errors are reported, which takes parameters: + line number, error level, and message + """ + + # Remove comments from the line, but leave in strings for now. + line = clean_lines.lines[line_number] + + if search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): + error(line_number, 'runtime/printf_format', 3, + '%q in format strings is deprecated. Use %ll instead.') + + if search(r'printf\s*\(.*".*%\d+\$', line): + error(line_number, 'runtime/printf_format', 2, + '%N$ formats are unconventional. Try rewriting to avoid them.') + + # Remove escaped backslashes before looking for undefined escapes. + line = line.replace('\\\\', '') + + if search(r'("|\').*\\(%|\[|\(|{)', line): + error(line_number, 'build/printf_format', 3, + '%, [, (, and { are undefined character escapes. Unescape them.') + + # For the rest, work with both comments and strings removed. + line = clean_lines.elided[line_number] + + if search(r'\b(const|volatile|void|char|short|int|long' + r'|float|double|signed|unsigned' + r'|schar|u?int8|u?int16|u?int32|u?int64)' + r'\s+(auto|register|static|extern|typedef)\b', + line): + error(line_number, 'build/storage_class', 5, + 'Storage class (static, extern, typedef, etc) should be first.') + + if match(r'\s*#\s*endif\s*[^/\s]+', line): + error(line_number, 'build/endif_comment', 5, + 'Uncommented text after #endif is non-standard. Use a comment.') + + if match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): + error(line_number, 'build/forward_decl', 5, + 'Inner-style forward declarations are invalid. Remove this line.') + + if search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): + error(line_number, 'build/deprecated', 3, + '>? and <? (max and min) operators are non-standard and deprecated.') + + # Track class entry and exit, and attempt to find cases within the + # class declaration that don't meet the C++ style + # guidelines. Tracking is very dependent on the code matching Google + # style guidelines, but it seems to perform well enough in testing + # to be a worthwhile addition to the checks. + classinfo_stack = class_state.classinfo_stack + # Look for a class declaration + class_decl_match = match( + r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line) + if class_decl_match: + classinfo_stack.append(_ClassInfo(class_decl_match.group(3), line_number)) + + # Everything else in this function uses the top of the stack if it's + # not empty. + if not classinfo_stack: + return + + classinfo = classinfo_stack[-1] + + # If the opening brace hasn't been seen look for it and also + # parent class declarations. + if not classinfo.seen_open_brace: + # If the line has a ';' in it, assume it's a forward declaration or + # a single-line class declaration, which we won't process. + if line.find(';') != -1: + classinfo_stack.pop() + return + classinfo.seen_open_brace = (line.find('{') != -1) + # Look for a bare ':' + if search('(^|[^:]):($|[^:])', line): + classinfo.is_derived = True + if not classinfo.seen_open_brace: + return # Everything else in this function is for after open brace + + # The class may have been declared with namespace or classname qualifiers. + # The constructor and destructor will not have those qualifiers. + base_classname = classinfo.name.split('::')[-1] + + # Look for single-argument constructors that aren't marked explicit. + # Technically a valid construct, but against style. + args = match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)' + % re.escape(base_classname), + line) + if (args + and args.group(1) != 'void' + and not match(r'(const\s+)?%s\s*&' % re.escape(base_classname), + args.group(1).strip())): + error(line_number, 'runtime/explicit', 5, + 'Single-argument constructors should be marked explicit.') + + # Look for methods declared virtual. + if search(r'\bvirtual\b', line): + classinfo.virtual_method_line_number = line_number + # Only look for a destructor declaration on the same line. It would + # be extremely unlikely for the destructor declaration to occupy + # more than one line. + if search(r'~%s\s*\(' % base_classname, line): + classinfo.has_virtual_destructor = True + + # Look for class end. + brace_depth = classinfo.brace_depth + brace_depth = brace_depth + line.count('{') - line.count('}') + if brace_depth <= 0: + classinfo = classinfo_stack.pop() + # Try to detect missing virtual destructor declarations. + # For now, only warn if a non-derived class with virtual methods lacks + # a virtual destructor. This is to make it less likely that people will + # declare derived virtual destructors without declaring the base + # destructor virtual. + if ((classinfo.virtual_method_line_number is not None) + and (not classinfo.has_virtual_destructor) + and (not classinfo.is_derived)): # Only warn for base classes + error(classinfo.line_number, 'runtime/virtual', 4, + 'The class %s probably needs a virtual destructor due to ' + 'having virtual method(s), one declared at line %d.' + % (classinfo.name, classinfo.virtual_method_line_number)) + else: + classinfo.brace_depth = brace_depth + + +def check_spacing_for_function_call(line, line_number, error): + """Checks for the correctness of various spacing around function calls. + + Args: + line: The text of the line to check. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + # Since function calls often occur inside if/for/foreach/while/switch + # expressions - which have their own, more liberal conventions - we + # first see if we should be looking inside such an expression for a + # function call, to which we can apply more strict standards. + function_call = line # if there's no control flow construct, look at whole line + for pattern in (r'\bif\s*\((.*)\)\s*{', + r'\bfor\s*\((.*)\)\s*{', + r'\bforeach\s*\((.*)\)\s*{', + r'\bwhile\s*\((.*)\)\s*[{;]', + r'\bswitch\s*\((.*)\)\s*{'): + matched = search(pattern, line) + if matched: + function_call = matched.group(1) # look inside the parens for function calls + break + + # Except in if/for/foreach/while/switch, there should never be space + # immediately inside parens (eg "f( 3, 4 )"). We make an exception + # for nested parens ( (a+b) + c ). Likewise, there should never be + # a space before a ( when it's a function argument. I assume it's a + # function argument when the char before the whitespace is legal in + # a function name (alnum + _) and we're not starting a macro. Also ignore + # pointers and references to arrays and functions coz they're too tricky: + # we use a very simple way to recognize these: + # " (something)(maybe-something)" or + # " (something)(maybe-something," or + # " (something)[something]" + # Note that we assume the contents of [] to be short enough that + # they'll never need to wrap. + if ( # Ignore control structures. + not search(r'\b(if|for|foreach|while|switch|return|new|delete)\b', function_call) + # Ignore pointers/references to functions. + and not search(r' \([^)]+\)\([^)]*(\)|,$)', function_call) + # Ignore pointers/references to arrays. + and not search(r' \([^)]+\)\[[^\]]+\]', function_call)): + if search(r'\w\s*\([ \t](?!\s*\\$)', function_call): # a ( used for a fn call + error(line_number, 'whitespace/parens', 4, + 'Extra space after ( in function call') + elif search(r'\([ \t]+(?!(\s*\\)|\()', function_call): + error(line_number, 'whitespace/parens', 2, + 'Extra space after (') + if (search(r'\w\s+\(', function_call) + and not search(r'#\s*define|typedef', function_call)): + error(line_number, 'whitespace/parens', 4, + 'Extra space before ( in function call') + # If the ) is followed only by a newline or a { + newline, assume it's + # part of a control statement (if/while/etc), and don't complain + if search(r'[^)\s]\s+\)(?!\s*$|{\s*$)', function_call): + error(line_number, 'whitespace/parens', 2, + 'Extra space before )') + + +def is_blank_line(line): + """Returns true if the given line is blank. + + We consider a line to be blank if the line is empty or consists of + only white spaces. + + Args: + line: A line of a string. + + Returns: + True, if the given line is blank. + """ + return not line or line.isspace() + + +def detect_functions(clean_lines, line_number, function_state, error): + """Finds where functions start and end. + + Uses a simplistic algorithm assuming other style guidelines + (especially spacing) are followed. + Trivial bodies are unchecked, so constructors with huge initializer lists + may be missed. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + function_state: Current function name and lines in body so far. + error: The function to call with any errors found. + """ + # Are we now past the end of a function? + if function_state.ending_line_number + 1 == line_number: + function_state.end() + + # If we're in a function, don't try to detect a new one. + if function_state.in_a_function: + return + + lines = clean_lines.lines + line = lines[line_number] + raw = clean_lines.raw_lines + raw_line = raw[line_number] + + regexp = r'\s*(\w(\w|::|\*|\&|\s|<|>|,|~)*)\(' # decls * & space::name( ... + match_result = match(regexp, line) + if not match_result: + return + + # If the name is all caps and underscores, figure it's a macro and + # ignore it, unless it's TEST or TEST_F. + function_name = match_result.group(1).split()[-1] + if function_name != 'TEST' and function_name != 'TEST_F' and match(r'[A-Z_]+$', function_name): + return + + joined_line = '' + for start_line_number in xrange(line_number, clean_lines.num_lines()): + start_line = clean_lines.elided[start_line_number] + joined_line += ' ' + start_line.lstrip() + if search(r'{|;', start_line): + # Replace template constructs with _ so that no spaces remain in the function name, + # while keeping the column numbers of other characters the same as "line". + line_with_no_templates = iteratively_replace_matches_with_char(r'<[^<>]*>', '_', line) + match_function = search(r'((\w|:|<|>|,|~)*)\(', line_with_no_templates) + if not match_function: + return # The '(' must have been inside of a template. + + # Use the column numbers from the modified line to find the + # function name in the original line. + function = line[match_function.start(1):match_function.end(1)] + + if match(r'TEST', function): # Handle TEST... macros + parameter_regexp = search(r'(\(.*\))', joined_line) + if parameter_regexp: # Ignore bad syntax + function += parameter_regexp.group(1) + else: + function += '()' + is_declaration = bool(search(r'^[^{]*;', start_line)) + if is_declaration: + ending_line_number = start_line_number + else: + open_brace_index = start_line.find('{') + ending_line_number = close_expression(clean_lines, start_line_number, open_brace_index)[1] + function_state.begin(function, start_line_number, ending_line_number, is_declaration) + return + + # No body for the function (or evidence of a non-function) was found. + error(line_number, 'readability/fn_size', 5, + 'Lint failed to find start of function body.') + + +def check_for_function_lengths(clean_lines, line_number, function_state, error): + """Reports for long function bodies. + + For an overview why this is done, see: + http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions + + Blank/comment lines are not counted so as to avoid encouraging the removal + of vertical space and commments just to get through a lint check. + NOLINT *on the last line of a function* disables this check. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + function_state: Current function name and lines in body so far. + error: The function to call with any errors found. + """ + lines = clean_lines.lines + line = lines[line_number] + raw = clean_lines.raw_lines + raw_line = raw[line_number] + + if function_state.ending_line_number == line_number: # last line + if not search(r'\bNOLINT\b', raw_line): + function_state.check(error, line_number) + elif not match(r'^\s*$', line): + function_state.count(line_number) # Count non-blank/non-comment lines. + + +def check_pass_ptr_usage(clean_lines, line_number, function_state, error): + """Check for proper usage of Pass*Ptr. + + Currently this is limited to detecting declarations of Pass*Ptr + variables inside of functions. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + function_state: Current function name and lines in body so far. + error: The function to call with any errors found. + """ + if not function_state.in_a_function: + return + + lines = clean_lines.lines + line = lines[line_number] + if line_number > function_state.body_start_line_number: + matched_pass_ptr = match(r'^\s*Pass([A-Z][A-Za-z]*)Ptr<', line) + if matched_pass_ptr: + type_name = 'Pass%sPtr' % matched_pass_ptr.group(1) + error(line_number, 'readability/pass_ptr', 5, + 'Local variables should never be %s (see ' + 'http://webkit.org/coding/RefPtr.html).' % type_name) + + +def check_spacing(file_extension, clean_lines, line_number, error): + """Checks for the correctness of various spacing issues in the code. + + Things we check for: spaces around operators, spaces after + if/for/while/switch, no spaces around parens in function calls, two + spaces between code and comment, don't start a block with a blank + line, don't end a function with a blank line, don't have too many + blank lines in a row. + + Args: + file_extension: The current file extension, without the leading dot. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + raw = clean_lines.raw_lines + line = raw[line_number] + + # Before nixing comments, check if the line is blank for no good + # reason. This includes the first line after a block is opened, and + # blank lines at the end of a function (ie, right before a line like '}'). + if is_blank_line(line): + elided = clean_lines.elided + previous_line = elided[line_number - 1] + previous_brace = previous_line.rfind('{') + # FIXME: Don't complain if line before blank line, and line after, + # both start with alnums and are indented the same amount. + # This ignores whitespace at the start of a namespace block + # because those are not usually indented. + if (previous_brace != -1 and previous_line[previous_brace:].find('}') == -1 + and previous_line[:previous_brace].find('namespace') == -1): + # OK, we have a blank line at the start of a code block. Before we + # complain, we check if it is an exception to the rule: The previous + # non-empty line has the parameters of a function header that are indented + # 4 spaces (because they did not fit in a 80 column line when placed on + # the same line as the function name). We also check for the case where + # the previous line is indented 6 spaces, which may happen when the + # initializers of a constructor do not fit into a 80 column line. + exception = False + if match(r' {6}\w', previous_line): # Initializer list? + # We are looking for the opening column of initializer list, which + # should be indented 4 spaces to cause 6 space indentation afterwards. + search_position = line_number - 2 + while (search_position >= 0 + and match(r' {6}\w', elided[search_position])): + search_position -= 1 + exception = (search_position >= 0 + and elided[search_position][:5] == ' :') + else: + # Search for the function arguments or an initializer list. We use a + # simple heuristic here: If the line is indented 4 spaces; and we have a + # closing paren, without the opening paren, followed by an opening brace + # or colon (for initializer lists) we assume that it is the last line of + # a function header. If we have a colon indented 4 spaces, it is an + # initializer list. + exception = (match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', + previous_line) + or match(r' {4}:', previous_line)) + + if not exception: + error(line_number, 'whitespace/blank_line', 2, + 'Blank line at the start of a code block. Is this needed?') + # This doesn't ignore whitespace at the end of a namespace block + # because that is too hard without pairing open/close braces; + # however, a special exception is made for namespace closing + # brackets which have a comment containing "namespace". + # + # Also, ignore blank lines at the end of a block in a long if-else + # chain, like this: + # if (condition1) { + # // Something followed by a blank line + # + # } else if (condition2) { + # // Something else + # } + if line_number + 1 < clean_lines.num_lines(): + next_line = raw[line_number + 1] + if (next_line + and match(r'\s*}', next_line) + and next_line.find('namespace') == -1 + and next_line.find('} else ') == -1): + error(line_number, 'whitespace/blank_line', 3, + 'Blank line at the end of a code block. Is this needed?') + + # Next, we complain if there's a comment too near the text + comment_position = line.find('//') + if comment_position != -1: + # Check if the // may be in quotes. If so, ignore it + # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 + if (line.count('"', 0, comment_position) - line.count('\\"', 0, comment_position)) % 2 == 0: # not in quotes + # Allow one space before end of line comment. + if (not match(r'^\s*$', line[:comment_position]) + and (comment_position >= 1 + and ((line[comment_position - 1] not in string.whitespace) + or (comment_position >= 2 + and line[comment_position - 2] in string.whitespace)))): + error(line_number, 'whitespace/comments', 5, + 'One space before end of line comments') + # There should always be a space between the // and the comment + commentend = comment_position + 2 + if commentend < len(line) and not line[commentend] == ' ': + # but some lines are exceptions -- e.g. if they're big + # comment delimiters like: + # //---------------------------------------------------------- + # or they begin with multiple slashes followed by a space: + # //////// Header comment + matched = (search(r'[=/-]{4,}\s*$', line[commentend:]) + or search(r'^/+ ', line[commentend:])) + if not matched: + error(line_number, 'whitespace/comments', 4, + 'Should have a space between // and comment') + + line = clean_lines.elided[line_number] # get rid of comments and strings + + # Don't try to do spacing checks for operator methods + line = sub(r'operator(==|!=|<|<<|<=|>=|>>|>|\+=|-=|\*=|/=|%=|&=|\|=|^=|<<=|>>=)\(', 'operator\(', line) + # Don't try to do spacing checks for #include or #import statements at + # minimum because it messes up checks for spacing around / + if match(r'\s*#\s*(?:include|import)', line): + return + if search(r'[\w.]=[\w.]', line): + error(line_number, 'whitespace/operators', 4, + 'Missing spaces around =') + + # FIXME: It's not ok to have spaces around binary operators like . + + # You should always have whitespace around binary operators. + # Alas, we can't test < or > because they're legitimately used sans spaces + # (a->b, vector<int> a). The only time we can tell is a < with no >, and + # only if it's not template params list spilling into the next line. + matched = search(r'[^<>=!\s](==|!=|\+=|-=|\*=|/=|/|\|=|&=|<<=|>>=|<=|>=|\|\||\||&&|>>|<<)[^<>=!\s]', line) + if not matched: + # Note that while it seems that the '<[^<]*' term in the following + # regexp could be simplified to '<.*', which would indeed match + # the same class of strings, the [^<] means that searching for the + # regexp takes linear rather than quadratic time. + if not search(r'<[^<]*,\s*$', line): # template params spill + matched = search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line) + if matched: + error(line_number, 'whitespace/operators', 3, + 'Missing spaces around %s' % matched.group(1)) + + # There shouldn't be space around unary operators + matched = search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) + if matched: + error(line_number, 'whitespace/operators', 4, + 'Extra space for operator %s' % matched.group(1)) + + # A pet peeve of mine: no spaces after an if, while, switch, or for + matched = search(r' (if\(|for\(|foreach\(|while\(|switch\()', line) + if matched: + error(line_number, 'whitespace/parens', 5, + 'Missing space before ( in %s' % matched.group(1)) + + # For if/for/foreach/while/switch, the left and right parens should be + # consistent about how many spaces are inside the parens, and + # there should either be zero or one spaces inside the parens. + # We don't want: "if ( foo)" or "if ( foo )". + # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. + matched = search(r'\b(?P<statement>if|for|foreach|while|switch)\s*\((?P<remainder>.*)$', line) + if matched: + statement = matched.group('statement') + condition, rest = up_to_unmatched_closing_paren(matched.group('remainder')) + if condition is not None: + condition_match = search(r'(?P<leading>[ ]*)(?P<separator>.).*[^ ]+(?P<trailing>[ ]*)', condition) + if condition_match: + n_leading = len(condition_match.group('leading')) + n_trailing = len(condition_match.group('trailing')) + if n_leading != 0: + for_exception = statement == 'for' and condition.startswith(' ;') + if not for_exception: + error(line_number, 'whitespace/parens', 5, + 'Extra space after ( in %s' % statement) + if n_trailing != 0: + for_exception = statement == 'for' and condition.endswith('; ') + if not for_exception: + error(line_number, 'whitespace/parens', 5, + 'Extra space before ) in %s' % statement) + + # Do not check for more than one command in macros + in_preprocessor_directive = match(r'\s*#', line) + if not in_preprocessor_directive and not match(r'((\s*{\s*}?)|(\s*;?))\s*\\?$', rest): + error(line_number, 'whitespace/parens', 4, + 'More than one command on the same line in %s' % statement) + + # You should always have a space after a comma (either as fn arg or operator) + if search(r',[^\s]', line): + error(line_number, 'whitespace/comma', 3, + 'Missing space after ,') + + matched = search(r'^\s*(?P<token1>[a-zA-Z0-9_\*&]+)\s\s+(?P<token2>[a-zA-Z0-9_\*&]+)', line) + if matched: + error(line_number, 'whitespace/declaration', 3, + 'Extra space between %s and %s' % (matched.group('token1'), matched.group('token2'))) + + if file_extension == 'cpp': + # C++ should have the & or * beside the type not the variable name. + matched = match(r'\s*\w+(?<!\breturn|\bdelete)\s+(?P<pointer_operator>\*|\&)\w+', line) + if matched: + error(line_number, 'whitespace/declaration', 3, + 'Declaration has space between type name and %s in %s' % (matched.group('pointer_operator'), matched.group(0).strip())) + + elif file_extension == 'c': + # C Pointer declaration should have the * beside the variable not the type name. + matched = search(r'^\s*\w+\*\s+\w+', line) + if matched: + error(line_number, 'whitespace/declaration', 3, + 'Declaration has space between * and variable name in %s' % matched.group(0).strip()) + + # Next we will look for issues with function calls. + check_spacing_for_function_call(line, line_number, error) + + # Except after an opening paren, you should have spaces before your braces. + # And since you should never have braces at the beginning of a line, this is + # an easy test. + if search(r'[^ ({]{', line): + error(line_number, 'whitespace/braces', 5, + 'Missing space before {') + + # Make sure '} else {' has spaces. + if search(r'}else', line): + error(line_number, 'whitespace/braces', 5, + 'Missing space before else') + + # You shouldn't have spaces before your brackets, except maybe after + # 'delete []' or 'new char * []'. + if search(r'\w\s+\[', line) and not search(r'delete\s+\[', line): + error(line_number, 'whitespace/braces', 5, + 'Extra space before [') + + # You shouldn't have a space before a semicolon at the end of the line. + # There's a special case for "for" since the style guide allows space before + # the semicolon there. + if search(r':\s*;\s*$', line): + error(line_number, 'whitespace/semicolon', 5, + 'Semicolon defining empty statement. Use { } instead.') + elif search(r'^\s*;\s*$', line): + error(line_number, 'whitespace/semicolon', 5, + 'Line contains only semicolon. If this should be an empty statement, ' + 'use { } instead.') + elif (search(r'\s+;\s*$', line) and not search(r'\bfor\b', line)): + error(line_number, 'whitespace/semicolon', 5, + 'Extra space before last semicolon. If this should be an empty ' + 'statement, use { } instead.') + elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line) + and line.count('(') == line.count(')') + # Allow do {} while(); + and not search(r'}\s*while', line)): + error(line_number, 'whitespace/semicolon', 5, + 'Semicolon defining empty statement for this loop. Use { } instead.') + + +def get_previous_non_blank_line(clean_lines, line_number): + """Return the most recent non-blank line and its line number. + + Args: + clean_lines: A CleansedLines instance containing the file contents. + line_number: The number of the line to check. + + Returns: + A tuple with two elements. The first element is the contents of the last + non-blank line before the current line, or the empty string if this is the + first non-blank line. The second is the line number of that line, or -1 + if this is the first non-blank line. + """ + + previous_line_number = line_number - 1 + while previous_line_number >= 0: + previous_line = clean_lines.elided[previous_line_number] + if not is_blank_line(previous_line): # if not a blank line... + return (previous_line, previous_line_number) + previous_line_number -= 1 + return ('', -1) + + +def check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error): + """Looks for indentation errors inside of namespaces. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + file_extension: The extension (dot not included) of the file. + file_state: A _FileState instance which maintains information about + the state of things in the file. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + namespace_match = match(r'(?P<namespace_indentation>\s*)namespace\s+\S+\s*{\s*$', line) + if not namespace_match: + return + + current_indentation_level = len(namespace_match.group('namespace_indentation')) + if current_indentation_level > 0: + # Don't warn about an indented namespace if we already warned about indented code. + if not file_state.did_inside_namespace_indent_warning(): + error(line_number, 'whitespace/indent', 4, + 'namespace should never be indented.') + return + looking_for_semicolon = False; + line_offset = 0 + in_preprocessor_directive = False; + for current_line in clean_lines.elided[line_number + 1:]: + line_offset += 1 + if not current_line.strip(): + continue + if not current_indentation_level: + if not (in_preprocessor_directive or looking_for_semicolon): + if not match(r'\S', current_line) and not file_state.did_inside_namespace_indent_warning(): + file_state.set_did_inside_namespace_indent_warning() + error(line_number + line_offset, 'whitespace/indent', 4, + 'Code inside a namespace should not be indented.') + if in_preprocessor_directive or (current_line.strip()[0] == '#'): # This takes care of preprocessor directive syntax. + in_preprocessor_directive = current_line[-1] == '\\' + else: + looking_for_semicolon = ((current_line.find(';') == -1) and (current_line.strip()[-1] != '}')) or (current_line[-1] == '\\') + else: + looking_for_semicolon = False; # If we have a brace we may not need a semicolon. + current_indentation_level += current_line.count('{') - current_line.count('}') + if current_indentation_level < 0: + break; + + +def check_using_std(clean_lines, line_number, file_state, error): + """Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + file_state: A _FileState instance which maintains information about + the state of things in the file. + error: The function to call with any errors found. + """ + + # This check doesn't apply to C or Objective-C implementation files. + if file_state.is_c_or_objective_c(): + return + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + using_std_match = match(r'\s*using\s+std::(?P<method_name>\S+)\s*;\s*$', line) + if not using_std_match: + return + + method_name = using_std_match.group('method_name') + error(line_number, 'build/using_std', 4, + "Use 'using namespace std;' instead of 'using std::%s;'." % method_name) + + +def check_max_min_macros(clean_lines, line_number, file_state, error): + """Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min(). + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + file_state: A _FileState instance which maintains information about + the state of things in the file. + error: The function to call with any errors found. + """ + + # This check doesn't apply to C or Objective-C implementation files. + if file_state.is_c_or_objective_c(): + return + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + max_min_macros_search = search(r'\b(?P<max_min_macro>(MAX|MIN))\s*\(', line) + if not max_min_macros_search: + return + + max_min_macro = max_min_macros_search.group('max_min_macro') + max_min_macro_lower = max_min_macro.lower() + error(line_number, 'runtime/max_min_macros', 4, + 'Use std::%s() or std::%s<type>() instead of the %s() macro.' + % (max_min_macro_lower, max_min_macro_lower, max_min_macro)) + + +def check_switch_indentation(clean_lines, line_number, error): + """Looks for indentation errors inside of switch statements. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + switch_match = match(r'(?P<switch_indentation>\s*)switch\s*\(.+\)\s*{\s*$', line) + if not switch_match: + return + + switch_indentation = switch_match.group('switch_indentation') + inner_indentation = switch_indentation + ' ' * 4 + line_offset = 0 + encountered_nested_switch = False + + for current_line in clean_lines.elided[line_number + 1:]: + line_offset += 1 + + # Skip not only empty lines but also those with preprocessor directives. + if current_line.strip() == '' or current_line.startswith('#'): + continue + + if match(r'\s*switch\s*\(.+\)\s*{\s*$', current_line): + # Complexity alarm - another switch statement nested inside the one + # that we're currently testing. We'll need to track the extent of + # that inner switch if the upcoming label tests are still supposed + # to work correctly. Let's not do that; instead, we'll finish + # checking this line, and then leave it like that. Assuming the + # indentation is done consistently (even if incorrectly), this will + # still catch all indentation issues in practice. + encountered_nested_switch = True + + current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line); + current_indentation = current_indentation_match.group('indentation') + remaining_line = current_indentation_match.group('remaining_line') + + # End the check at the end of the switch statement. + if remaining_line.startswith('}') and current_indentation == switch_indentation: + break + # Case and default branches should not be indented. The regexp also + # catches single-line cases like "default: break;" but does not trigger + # on stuff like "Document::Foo();". + elif match(r'(default|case\s+.*)\s*:([^:].*)?$', remaining_line): + if current_indentation != switch_indentation: + error(line_number + line_offset, 'whitespace/indent', 4, + 'A case label should not be indented, but line up with its switch statement.') + # Don't throw an error for multiple badly indented labels, + # one should be enough to figure out the problem. + break + # We ignore goto labels at the very beginning of a line. + elif match(r'\w+\s*:\s*$', remaining_line): + continue + # It's not a goto label, so check if it's indented at least as far as + # the switch statement plus one more level of indentation. + elif not current_indentation.startswith(inner_indentation): + error(line_number + line_offset, 'whitespace/indent', 4, + 'Non-label code inside switch statements should be indented.') + # Don't throw an error for multiple badly indented statements, + # one should be enough to figure out the problem. + break + + if encountered_nested_switch: + break + + +def check_braces(clean_lines, line_number, error): + """Looks for misplaced braces (e.g. at the end of line). + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + if match(r'\s*{\s*$', line): + # We allow an open brace to start a line in the case where someone + # is using braces for function definition or in a block to + # explicitly create a new scope, which is commonly used to control + # the lifetime of stack-allocated variables. We don't detect this + # perfectly: we just don't complain if the last non-whitespace + # character on the previous non-blank line is ';', ':', '{', '}', + # ')', or ') const' and doesn't begin with 'if|for|while|switch|else'. + # We also allow '#' for #endif and '=' for array initialization. + previous_line = get_previous_non_blank_line(clean_lines, line_number)[0] + if ((not search(r'[;:}{)=]\s*$|\)\s*const\s*$', previous_line) + or search(r'\b(if|for|foreach|while|switch|else)\b', previous_line)) + and previous_line.find('#') < 0): + error(line_number, 'whitespace/braces', 4, + 'This { should be at the end of the previous line') + elif (search(r'\)\s*(const\s*)?{\s*$', line) + and line.count('(') == line.count(')') + and not search(r'\b(if|for|foreach|while|switch)\b', line) + and not match(r'\s+[A-Z_][A-Z_0-9]+\b', line)): + error(line_number, 'whitespace/braces', 4, + 'Place brace on its own line for function definitions.') + + if (match(r'\s*}\s*(else\s*({\s*)?)?$', line) and line_number > 1): + # We check if a closed brace has started a line to see if a + # one line control statement was previous. + previous_line = clean_lines.elided[line_number - 2] + if (previous_line.find('{') > 0 and previous_line.find('}') < 0 + and search(r'\b(if|for|foreach|while|else)\b', previous_line)): + error(line_number, 'whitespace/braces', 4, + 'One line control clauses should not use braces.') + + # An else clause should be on the same line as the preceding closing brace. + if match(r'\s*else\s*', line): + previous_line = get_previous_non_blank_line(clean_lines, line_number)[0] + if match(r'\s*}\s*$', previous_line): + error(line_number, 'whitespace/newline', 4, + 'An else should appear on the same line as the preceding }') + + # Likewise, an else should never have the else clause on the same line + if search(r'\belse [^\s{]', line) and not search(r'\belse if\b', line): + error(line_number, 'whitespace/newline', 4, + 'Else clause should never be on same line as else (use 2 lines)') + + # In the same way, a do/while should never be on one line + if match(r'\s*do [^\s{]', line): + error(line_number, 'whitespace/newline', 4, + 'do/while clauses should not be on a single line') + + # Braces shouldn't be followed by a ; unless they're defining a struct + # or initializing an array. + # We can't tell in general, but we can for some common cases. + previous_line_number = line_number + while True: + (previous_line, previous_line_number) = get_previous_non_blank_line(clean_lines, previous_line_number) + if match(r'\s+{.*}\s*;', line) and not previous_line.count(';'): + line = previous_line + line + else: + break + if (search(r'{.*}\s*;', line) + and line.count('{') == line.count('}') + and not search(r'struct|class|enum|\s*=\s*{', line)): + error(line_number, 'readability/braces', 4, + "You don't need a ; after a }") + + +def check_exit_statement_simplifications(clean_lines, line_number, error): + """Looks for else or else-if statements that should be written as an + if statement when the prior if concludes with a return, break, continue or + goto statement. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + else_match = match(r'(?P<else_indentation>\s*)(\}\s*)?else(\s+if\s*\(|(?P<else>\s*(\{\s*)?\Z))', line) + if not else_match: + return + + else_indentation = else_match.group('else_indentation') + inner_indentation = else_indentation + ' ' * 4 + + previous_lines = clean_lines.elided[:line_number] + previous_lines.reverse() + line_offset = 0 + encountered_exit_statement = False + + for current_line in previous_lines: + line_offset -= 1 + + # Skip not only empty lines but also those with preprocessor directives + # and goto labels. + if current_line.strip() == '' or current_line.startswith('#') or match(r'\w+\s*:\s*$', current_line): + continue + + # Skip lines with closing braces on the original indentation level. + # Even though the styleguide says they should be on the same line as + # the "else if" statement, we also want to check for instances where + # the current code does not comply with the coding style. Thus, ignore + # these lines and proceed to the line before that. + if current_line == else_indentation + '}': + continue + + current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line); + current_indentation = current_indentation_match.group('indentation') + remaining_line = current_indentation_match.group('remaining_line') + + # As we're going up the lines, the first real statement to encounter + # has to be an exit statement (return, break, continue or goto) - + # otherwise, this check doesn't apply. + if not encountered_exit_statement: + # We only want to find exit statements if they are on exactly + # the same level of indentation as expected from the code inside + # the block. If the indentation doesn't strictly match then we + # might have a nested if or something, which must be ignored. + if current_indentation != inner_indentation: + break + if match(r'(return(\W+.*)|(break|continue)\s*;|goto\s*\w+;)$', remaining_line): + encountered_exit_statement = True + continue + break + + # When code execution reaches this point, we've found an exit statement + # as last statement of the previous block. Now we only need to make + # sure that the block belongs to an "if", then we can throw an error. + + # Skip lines with opening braces on the original indentation level, + # similar to the closing braces check above. ("if (condition)\n{") + if current_line == else_indentation + '{': + continue + + # Skip everything that's further indented than our "else" or "else if". + if current_indentation.startswith(else_indentation) and current_indentation != else_indentation: + continue + + # So we've got a line with same (or less) indentation. Is it an "if"? + # If yes: throw an error. If no: don't throw an error. + # Whatever the outcome, this is the end of our loop. + if match(r'if\s*\(', remaining_line): + if else_match.start('else') != -1: + error(line_number + line_offset, 'readability/control_flow', 4, + 'An else statement can be removed when the prior "if" ' + 'concludes with a return, break, continue or goto statement.') + else: + error(line_number + line_offset, 'readability/control_flow', 4, + 'An else if statement should be written as an if statement ' + 'when the prior "if" concludes with a return, break, ' + 'continue or goto statement.') + break + + +def replaceable_check(operator, macro, line): + """Determine whether a basic CHECK can be replaced with a more specific one. + + For example suggest using CHECK_EQ instead of CHECK(a == b) and + similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE. + + Args: + operator: The C++ operator used in the CHECK. + macro: The CHECK or EXPECT macro being called. + line: The current source line. + + Returns: + True if the CHECK can be replaced with a more specific one. + """ + + # This matches decimal and hex integers, strings, and chars (in that order). + match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')' + + # Expression to match two sides of the operator with something that + # looks like a literal, since CHECK(x == iterator) won't compile. + # This means we can't catch all the cases where a more specific + # CHECK is possible, but it's less annoying than dealing with + # extraneous warnings. + match_this = (r'\s*' + macro + r'\((\s*' + + match_constant + r'\s*' + operator + r'[^<>].*|' + r'.*[^<>]' + operator + r'\s*' + match_constant + + r'\s*\))') + + # Don't complain about CHECK(x == NULL) or similar because + # CHECK_EQ(x, NULL) won't compile (requires a cast). + # Also, don't complain about more complex boolean expressions + # involving && or || such as CHECK(a == b || c == d). + return match(match_this, line) and not search(r'NULL|&&|\|\|', line) + + +def check_check(clean_lines, line_number, error): + """Checks the use of CHECK and EXPECT macros. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + # Decide the set of replacement macros that should be suggested + raw_lines = clean_lines.raw_lines + current_macro = '' + for macro in _CHECK_MACROS: + if raw_lines[line_number].find(macro) >= 0: + current_macro = macro + break + if not current_macro: + # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT' + return + + line = clean_lines.elided[line_number] # get rid of comments and strings + + # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc. + for operator in ['==', '!=', '>=', '>', '<=', '<']: + if replaceable_check(operator, current_macro, line): + error(line_number, 'readability/check', 2, + 'Consider using %s instead of %s(a %s b)' % ( + _CHECK_REPLACEMENT[current_macro][operator], + current_macro, operator)) + break + + +def check_for_comparisons_to_zero(clean_lines, line_number, error): + # Get the line without comments and strings. + line = clean_lines.elided[line_number] + + # Include NULL here so that users don't have to convert NULL to 0 first and then get this error. + if search(r'[=!]=\s*(NULL|0|true|false)\W', line) or search(r'\W(NULL|0|true|false)\s*[=!]=', line): + error(line_number, 'readability/comparison_to_zero', 5, + 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.') + + +def check_for_null(clean_lines, line_number, file_state, error): + # This check doesn't apply to C or Objective-C implementation files. + if file_state.is_c_or_objective_c(): + return + + line = clean_lines.elided[line_number] + + # Don't warn about NULL usage in g_*(). See Bug 32858 and 39372. + if search(r'\bg(_[a-z]+)+\b', line): + return + + # Don't warn about NULL usage in gst_*_many(). See Bug 39740 + if search(r'\bgst_\w+_many\b', line): + return + + # Don't warn about NULL usage in g_str{join,concat}(). See Bug 34834 + if search(r'\bg_str(join|concat)\b', line): + return + + # Don't warn about NULL usage in gdk_pixbuf_save_to_*{join,concat}(). See Bug 43090. + if search(r'\bgdk_pixbuf_save_to\w+\b', line): + return + + if search(r'\bNULL\b', line): + error(line_number, 'readability/null', 5, 'Use 0 instead of NULL.') + return + + line = clean_lines.raw_lines[line_number] + # See if NULL occurs in any comments in the line. If the search for NULL using the raw line + # matches, then do the check with strings collapsed to avoid giving errors for + # NULLs occurring in strings. + if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)): + error(line_number, 'readability/null', 4, 'Use 0 instead of NULL.') + +def get_line_width(line): + """Determines the width of the line in column positions. + + Args: + line: A string, which may be a Unicode string. + + Returns: + The width of the line in column positions, accounting for Unicode + combining characters and wide characters. + """ + if isinstance(line, unicode): + width = 0 + for c in unicodedata.normalize('NFC', line): + if unicodedata.east_asian_width(c) in ('W', 'F'): + width += 2 + elif not unicodedata.combining(c): + width += 1 + return width + return len(line) + + +def check_style(clean_lines, line_number, file_extension, class_state, file_state, error): + """Checks rules from the 'C++ style rules' section of cppguide.html. + + Most of these rules are hard to test (naming, comment style), but we + do what we can. In particular we check for 4-space indents, line lengths, + tab usage, spaces inside code, etc. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + class_state: A _ClassState instance which maintains information about + the current stack of nested class declarations being parsed. + file_state: A _FileState instance which maintains information about + the state of things in the file. + error: The function to call with any errors found. + """ + + raw_lines = clean_lines.raw_lines + line = raw_lines[line_number] + + if line.find('\t') != -1: + error(line_number, 'whitespace/tab', 1, + 'Tab found; better to use spaces') + + # One or three blank spaces at the beginning of the line is weird; it's + # hard to reconcile that with 4-space indents. + # NOTE: here are the conditions rob pike used for his tests. Mine aren't + # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces + # if(RLENGTH > 20) complain = 0; + # if(match($0, " +(error|private|public|protected):")) complain = 0; + # if(match(prev, "&& *$")) complain = 0; + # if(match(prev, "\\|\\| *$")) complain = 0; + # if(match(prev, "[\",=><] *$")) complain = 0; + # if(match($0, " <<")) complain = 0; + # if(match(prev, " +for \\(")) complain = 0; + # if(prevodd && match(prevprev, " +for \\(")) complain = 0; + initial_spaces = 0 + cleansed_line = clean_lines.elided[line_number] + while initial_spaces < len(line) and line[initial_spaces] == ' ': + initial_spaces += 1 + if line and line[-1].isspace(): + error(line_number, 'whitespace/end_of_line', 4, + 'Line ends in whitespace. Consider deleting these extra spaces.') + # There are certain situations we allow one space, notably for labels + elif ((initial_spaces >= 1 and initial_spaces <= 3) + and not match(r'\s*\w+\s*:\s*$', cleansed_line)): + error(line_number, 'whitespace/indent', 3, + 'Weird number of spaces at line-start. ' + 'Are you using a 4-space indent?') + # Labels should always be indented at least one space. + elif not initial_spaces and line[:2] != '//': + label_match = match(r'(?P<label>[^:]+):\s*$', line) + + if label_match: + label = label_match.group('label') + # Only throw errors for stuff that is definitely not a goto label, + # because goto labels can in fact occur at the start of the line. + if label in ['public', 'private', 'protected'] or label.find(' ') != -1: + error(line_number, 'whitespace/labels', 4, + 'Labels should always be indented at least one space. ' + 'If this is a member-initializer list in a constructor, ' + 'the colon should be on the line after the definition header.') + + if (cleansed_line.count(';') > 1 + # for loops are allowed two ;'s (and may run over two lines). + and cleansed_line.find('for') == -1 + and (get_previous_non_blank_line(clean_lines, line_number)[0].find('for') == -1 + or get_previous_non_blank_line(clean_lines, line_number)[0].find(';') != -1) + # It's ok to have many commands in a switch case that fits in 1 line + and not ((cleansed_line.find('case ') != -1 + or cleansed_line.find('default:') != -1) + and cleansed_line.find('break;') != -1) + # Also it's ok to have many commands in trivial single-line accessors in class definitions. + and not (match(r'.*\(.*\).*{.*.}', line) + and class_state.classinfo_stack + and line.count('{') == line.count('}')) + and not cleansed_line.startswith('#define ')): + error(line_number, 'whitespace/newline', 4, + 'More than one command on the same line') + + if cleansed_line.strip().endswith('||') or cleansed_line.strip().endswith('&&'): + error(line_number, 'whitespace/operators', 4, + 'Boolean expressions that span multiple lines should have their ' + 'operators on the left side of the line instead of the right side.') + + # Some more style checks + check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error) + check_using_std(clean_lines, line_number, file_state, error) + check_max_min_macros(clean_lines, line_number, file_state, error) + check_switch_indentation(clean_lines, line_number, error) + check_braces(clean_lines, line_number, error) + check_exit_statement_simplifications(clean_lines, line_number, error) + check_spacing(file_extension, clean_lines, line_number, error) + check_check(clean_lines, line_number, error) + check_for_comparisons_to_zero(clean_lines, line_number, error) + check_for_null(clean_lines, line_number, file_state, error) + + +_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"') +_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') +# Matches the first component of a filename delimited by -s and _s. That is: +# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo.cpp').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo-bar_baz.cpp').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo_bar-baz.cpp').group(0) == 'foo' +_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') + + +def _drop_common_suffixes(filename): + """Drops common suffixes like _test.cpp or -inl.h from filename. + + For example: + >>> _drop_common_suffixes('foo/foo-inl.h') + 'foo/foo' + >>> _drop_common_suffixes('foo/bar/foo.cpp') + 'foo/bar/foo' + >>> _drop_common_suffixes('foo/foo_internal.h') + 'foo/foo' + >>> _drop_common_suffixes('foo/foo_unusualinternal.h') + 'foo/foo_unusualinternal' + + Args: + filename: The input filename. + + Returns: + The filename with the common suffix removed. + """ + for suffix in ('test.cpp', 'regtest.cpp', 'unittest.cpp', + 'inl.h', 'impl.h', 'internal.h'): + if (filename.endswith(suffix) and len(filename) > len(suffix) + and filename[-len(suffix) - 1] in ('-', '_')): + return filename[:-len(suffix) - 1] + return os.path.splitext(filename)[0] + + +def _classify_include(filename, include, is_system, include_state): + """Figures out what kind of header 'include' is. + + Args: + filename: The current file cpp_style is running over. + include: The path to a #included file. + is_system: True if the #include used <> rather than "". + include_state: An _IncludeState instance in which the headers are inserted. + + Returns: + One of the _XXX_HEADER constants. + + For example: + >>> _classify_include('foo.cpp', 'config.h', False) + _CONFIG_HEADER + >>> _classify_include('foo.cpp', 'foo.h', False) + _PRIMARY_HEADER + >>> _classify_include('foo.cpp', 'bar.h', False) + _OTHER_HEADER + """ + + # If it is a system header we know it is classified as _OTHER_HEADER. + if is_system: + return _OTHER_HEADER + + # If the include is named config.h then this is WebCore/config.h. + if include == "config.h": + return _CONFIG_HEADER + + # There cannot be primary includes in header files themselves. Only an + # include exactly matches the header filename will be is flagged as + # primary, so that it triggers the "don't include yourself" check. + if filename.endswith('.h') and filename != include: + return _OTHER_HEADER; + + # Qt's moc files do not follow the naming and ordering rules, so they should be skipped + if include.startswith('moc_') and include.endswith('.cpp'): + return _MOC_HEADER + + if include.endswith('.moc'): + return _MOC_HEADER + + # If the target file basename starts with the include we're checking + # then we consider it the primary header. + target_base = FileInfo(filename).base_name() + include_base = FileInfo(include).base_name() + + # If we haven't encountered a primary header, then be lenient in checking. + if not include_state.visited_primary_section() and target_base.find(include_base) != -1: + return _PRIMARY_HEADER + # If we already encountered a primary header, perform a strict comparison. + # In case the two filename bases are the same then the above lenient check + # probably was a false positive. + elif include_state.visited_primary_section() and target_base == include_base: + if include == "ResourceHandleWin.h": + # FIXME: Thus far, we've only seen one example of these, but if we + # start to see more, please consider generalizing this check + # somehow. + return _OTHER_HEADER + return _PRIMARY_HEADER + + return _OTHER_HEADER + + +def check_include_line(filename, file_extension, clean_lines, line_number, include_state, error): + """Check rules that are applicable to #include lines. + + Strings on #include lines are NOT removed from elided line, to make + certain tasks easier. However, to prevent false positives, checks + applicable to #include lines in CheckLanguage must be put here. + + Args: + filename: The name of the current file. + file_extension: The current file extension, without the leading dot. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + include_state: An _IncludeState instance in which the headers are inserted. + error: The function to call with any errors found. + """ + # FIXME: For readability or as a possible optimization, consider + # exiting early here by checking whether the "build/include" + # category should be checked for the given filename. This + # may involve having the error handler classes expose a + # should_check() method, in addition to the usual __call__ + # method. + line = clean_lines.lines[line_number] + + matched = _RE_PATTERN_INCLUDE.search(line) + if not matched: + return + + include = matched.group(2) + is_system = (matched.group(1) == '<') + + # Look for any of the stream classes that are part of standard C++. + if match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): + error(line_number, 'readability/streams', 3, + 'Streams are highly discouraged.') + + # Look for specific includes to fix. + if include.startswith('wtf/') and not is_system: + error(line_number, 'build/include', 4, + 'wtf includes should be <wtf/file.h> instead of "wtf/file.h".') + + duplicate_header = include in include_state + if duplicate_header: + error(line_number, 'build/include', 4, + '"%s" already included at %s:%s' % + (include, filename, include_state[include])) + else: + include_state[include] = line_number + + header_type = _classify_include(filename, include, is_system, include_state) + include_state.header_types[line_number] = header_type + + # Only proceed if this isn't a duplicate header. + if duplicate_header: + return + + # We want to ensure that headers appear in the right order: + # 1) for implementation files: config.h, primary header, blank line, alphabetically sorted + # 2) for header files: alphabetically sorted + # The include_state object keeps track of the last type seen + # and complains if the header types are out of order or missing. + error_message = include_state.check_next_include_order(header_type, file_extension == "h") + + # Check to make sure we have a blank line after primary header. + if not error_message and header_type == _PRIMARY_HEADER: + next_line = clean_lines.raw_lines[line_number + 1] + if not is_blank_line(next_line): + error(line_number, 'build/include_order', 4, + 'You should add a blank line after implementation file\'s own header.') + + # Check to make sure all headers besides config.h and the primary header are + # alphabetically sorted. Skip Qt's moc files. + if not error_message and header_type == _OTHER_HEADER: + previous_line_number = line_number - 1; + previous_line = clean_lines.lines[previous_line_number] + previous_match = _RE_PATTERN_INCLUDE.search(previous_line) + while (not previous_match and previous_line_number > 0 + and not search(r'\A(#if|#ifdef|#ifndef|#else|#elif|#endif)', previous_line)): + previous_line_number -= 1; + previous_line = clean_lines.lines[previous_line_number] + previous_match = _RE_PATTERN_INCLUDE.search(previous_line) + if previous_match: + previous_header_type = include_state.header_types[previous_line_number] + if previous_header_type == _OTHER_HEADER and previous_line.strip() > line.strip(): + error(line_number, 'build/include_order', 4, + 'Alphabetical sorting problem.') + + if error_message: + if file_extension == 'h': + error(line_number, 'build/include_order', 4, + '%s Should be: alphabetically sorted.' % + error_message) + else: + error(line_number, 'build/include_order', 4, + '%s Should be: config.h, primary header, blank line, and then alphabetically sorted.' % + error_message) + + +def check_language(filename, clean_lines, line_number, file_extension, include_state, + file_state, error): + """Checks rules from the 'C++ language rules' section of cppguide.html. + + Some of these rules are hard to test (function overloading, using + uint32 inappropriately), but we do the best we can. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + include_state: An _IncludeState instance in which the headers are inserted. + file_state: A _FileState instance which maintains information about + the state of things in the file. + error: The function to call with any errors found. + """ + # If the line is empty or consists of entirely a comment, no need to + # check it. + line = clean_lines.elided[line_number] + if not line: + return + + matched = _RE_PATTERN_INCLUDE.search(line) + if matched: + check_include_line(filename, file_extension, clean_lines, line_number, include_state, error) + return + + # FIXME: figure out if they're using default arguments in fn proto. + + # Check to see if they're using an conversion function cast. + # I just try to capture the most common basic types, though there are more. + # Parameterless conversion functions, such as bool(), are allowed as they are + # probably a member operator declaration or default constructor. + matched = search( + r'\b(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line) + if matched: + # gMock methods are defined using some variant of MOCK_METHODx(name, type) + # where type may be float(), int(string), etc. Without context they are + # virtually indistinguishable from int(x) casts. + if not match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line): + error(line_number, 'readability/casting', 4, + 'Using deprecated casting style. ' + 'Use static_cast<%s>(...) instead' % + matched.group(1)) + + check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number], + 'static_cast', + r'\((int|float|double|bool|char|u?int(16|32|64))\)', + error) + # This doesn't catch all cases. Consider (const char * const)"hello". + check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number], + 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) + + # In addition, we look for people taking the address of a cast. This + # is dangerous -- casts can assign to temporaries, so the pointer doesn't + # point where you think. + if search( + r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line): + error(line_number, 'runtime/casting', 4, + ('Are you taking an address of a cast? ' + 'This is dangerous: could be a temp var. ' + 'Take the address before doing the cast, rather than after')) + + # Check for people declaring static/global STL strings at the top level. + # This is dangerous because the C++ language does not guarantee that + # globals with constructors are initialized before the first access. + matched = match( + r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', + line) + # Make sure it's not a function. + # Function template specialization looks like: "string foo<Type>(...". + # Class template definitions look like: "string Foo<Type>::Method(...". + if matched and not match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', + matched.group(3)): + error(line_number, 'runtime/string', 4, + 'For a static/global string constant, use a C style string instead: ' + '"%schar %s[]".' % + (matched.group(1), matched.group(2))) + + # Check that we're not using RTTI outside of testing code. + if search(r'\bdynamic_cast<', line): + error(line_number, 'runtime/rtti', 5, + 'Do not use dynamic_cast<>. If you need to cast within a class ' + "hierarchy, use static_cast<> to upcast. Google doesn't support " + 'RTTI.') + + if search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): + error(line_number, 'runtime/init', 4, + 'You seem to be initializing a member variable with itself.') + + if file_extension == 'h': + # FIXME: check that 1-arg constructors are explicit. + # How to tell it's a constructor? + # (handled in check_for_non_standard_constructs for now) + pass + + # Check if people are using the verboten C basic types. The only exception + # we regularly allow is "unsigned short port" for port. + if search(r'\bshort port\b', line): + if not search(r'\bunsigned short port\b', line): + error(line_number, 'runtime/int', 4, + 'Use "unsigned short" for ports, not "short"') + + # When snprintf is used, the second argument shouldn't be a literal. + matched = search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) + if matched: + error(line_number, 'runtime/printf', 3, + 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' + 'to snprintf.' % (matched.group(1), matched.group(2))) + + # Check if some verboten C functions are being used. + if search(r'\bsprintf\b', line): + error(line_number, 'runtime/printf', 5, + 'Never use sprintf. Use snprintf instead.') + matched = search(r'\b(strcpy|strcat)\b', line) + if matched: + error(line_number, 'runtime/printf', 4, + 'Almost always, snprintf is better than %s' % matched.group(1)) + + if search(r'\bsscanf\b', line): + error(line_number, 'runtime/printf', 1, + 'sscanf can be ok, but is slow and can overflow buffers.') + + # Check for suspicious usage of "if" like + # } if (a == b) { + if search(r'\}\s*if\s*\(', line): + error(line_number, 'readability/braces', 4, + 'Did you mean "else if"? If not, start a new line for "if".') + + # Check for potential format string bugs like printf(foo). + # We constrain the pattern not to pick things like DocidForPrintf(foo). + # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) + matched = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I) + if matched: + error(line_number, 'runtime/printf', 4, + 'Potential format string bug. Do %s("%%s", %s) instead.' + % (matched.group(1), matched.group(2))) + + # Check for potential memset bugs like memset(buf, sizeof(buf), 0). + matched = search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) + if matched and not match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", matched.group(2)): + error(line_number, 'runtime/memset', 4, + 'Did you mean "memset(%s, 0, %s)"?' + % (matched.group(1), matched.group(2))) + + # Detect variable-length arrays. + matched = match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) + if (matched and matched.group(2) != 'return' and matched.group(2) != 'delete' and + matched.group(3).find(']') == -1): + # Split the size using space and arithmetic operators as delimiters. + # If any of the resulting tokens are not compile time constants then + # report the error. + tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', matched.group(3)) + is_const = True + skip_next = False + for tok in tokens: + if skip_next: + skip_next = False + continue + + if search(r'sizeof\(.+\)', tok): + continue + if search(r'arraysize\(\w+\)', tok): + continue + + tok = tok.lstrip('(') + tok = tok.rstrip(')') + if not tok: + continue + if match(r'\d+', tok): + continue + if match(r'0[xX][0-9a-fA-F]+', tok): + continue + if match(r'k[A-Z0-9]\w*', tok): + continue + if match(r'(.+::)?k[A-Z0-9]\w*', tok): + continue + if match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): + continue + # A catch all for tricky sizeof cases, including 'sizeof expression', + # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' + # requires skipping the next token becasue we split on ' ' and '*'. + if tok.startswith('sizeof'): + skip_next = True + continue + is_const = False + break + if not is_const: + error(line_number, 'runtime/arrays', 1, + 'Do not use variable-length arrays. Use an appropriately named ' + "('k' followed by CamelCase) compile-time constant for the size.") + + # Check for use of unnamed namespaces in header files. Registration + # macros are typically OK, so we allow use of "namespace {" on lines + # that end with backslashes. + if (file_extension == 'h' + and search(r'\bnamespace\s*{', line) + and line[-1] != '\\'): + error(line_number, 'build/namespaces', 4, + 'Do not use unnamed namespaces in header files. See ' + 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' + ' for more information.') + + check_identifier_name_in_declaration(filename, line_number, line, file_state, error) + + +def check_identifier_name_in_declaration(filename, line_number, line, file_state, error): + """Checks if identifier names contain any underscores. + + As identifiers in libraries we are using have a bunch of + underscores, we only warn about the declarations of identifiers + and don't check use of identifiers. + + Args: + filename: The name of the current file. + line_number: The number of the line to check. + line: The line of code to check. + file_state: A _FileState instance which maintains information about + the state of things in the file. + error: The function to call with any errors found. + """ + # We don't check a return statement. + if match(r'\s*(return|delete)\b', line): + return + + # Basically, a declaration is a type name followed by whitespaces + # followed by an identifier. The type name can be complicated + # due to type adjectives and templates. We remove them first to + # simplify the process to find declarations of identifiers. + + # Convert "long long", "long double", and "long long int" to + # simple types, but don't remove simple "long". + line = sub(r'long (long )?(?=long|double|int)', '', line) + # Convert unsigned/signed types to simple types, too. + line = sub(r'(unsigned|signed) (?=char|short|int|long)', '', line) + line = sub(r'\b(inline|using|static|const|volatile|auto|register|extern|typedef|restrict|struct|class|virtual)(?=\W)', '', line) + + # Remove "new" and "new (expr)" to simplify, too. + line = sub(r'new\s*(\([^)]*\))?', '', line) + + # Remove all template parameters by removing matching < and >. + # Loop until no templates are removed to remove nested templates. + while True: + line, number_of_replacements = subn(r'<([\w\s:]|::)+\s*[*&]*\s*>', '', line) + if not number_of_replacements: + break + + # Declarations of local variables can be in condition expressions + # of control flow statements (e.g., "if (RenderObject* p = o->parent())"). + # We remove the keywords and the first parenthesis. + # + # Declarations in "while", "if", and "switch" are different from + # other declarations in two aspects: + # + # - There can be only one declaration between the parentheses. + # (i.e., you cannot write "if (int i = 0, j = 1) {}") + # - The variable must be initialized. + # (i.e., you cannot write "if (int i) {}") + # + # and we will need different treatments for them. + line = sub(r'^\s*for\s*\(', '', line) + line, control_statement = subn(r'^\s*(while|else if|if|switch)\s*\(', '', line) + + # Detect variable and functions. + type_regexp = r'\w([\w]|\s*[*&]\s*|::)+' + identifier_regexp = r'(?P<identifier>[\w:]+)' + maybe_bitfield_regexp = r'(:\s*\d+\s*)?' + character_after_identifier_regexp = r'(?P<character_after_identifier>[[;()=,])(?!=)' + declaration_without_type_regexp = r'\s*' + identifier_regexp + r'\s*' + maybe_bitfield_regexp + character_after_identifier_regexp + declaration_with_type_regexp = r'\s*' + type_regexp + r'\s' + declaration_without_type_regexp + is_function_arguments = False + number_of_identifiers = 0 + while True: + # If we are seeing the first identifier or arguments of a + # function, there should be a type name before an identifier. + if not number_of_identifiers or is_function_arguments: + declaration_regexp = declaration_with_type_regexp + else: + declaration_regexp = declaration_without_type_regexp + + matched = match(declaration_regexp, line) + if not matched: + return + identifier = matched.group('identifier') + character_after_identifier = matched.group('character_after_identifier') + + # If we removed a non-for-control statement, the character after + # the identifier should be '='. With this rule, we can avoid + # warning for cases like "if (val & INT_MAX) {". + if control_statement and character_after_identifier != '=': + return + + is_function_arguments = is_function_arguments or character_after_identifier == '(' + + # Remove "m_" and "s_" to allow them. + modified_identifier = sub(r'(^|(?<=::))[ms]_', '', identifier) + if not file_state.is_objective_c() and modified_identifier.find('_') >= 0: + # Various exceptions to the rule: JavaScript op codes functions, const_iterator. + if (not (filename.find('JavaScriptCore') >= 0 and modified_identifier.find('op_') >= 0) + and not modified_identifier.startswith('tst_') + and not modified_identifier.startswith('webkit_dom_object_') + and not modified_identifier.startswith('NPN_') + and not modified_identifier.startswith('NPP_') + and not modified_identifier.startswith('NP_') + and not modified_identifier.startswith('qt_') + and not modified_identifier.startswith('cairo_') + and not modified_identifier.find('::qt_') >= 0 + and not modified_identifier == "const_iterator" + and not modified_identifier == "vm_throw"): + error(line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use underscores in your identifier names.") + + # Check for variables named 'l', these are too easy to confuse with '1' in some fonts + if modified_identifier == 'l': + error(line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use the single letter 'l' as an identifier name.") + + # There can be only one declaration in non-for-control statements. + if control_statement: + return + # We should continue checking if this is a function + # declaration because we need to check its arguments. + # Also, we need to check multiple declarations. + if character_after_identifier != '(' and character_after_identifier != ',': + return + + number_of_identifiers += 1 + line = line[matched.end():] + +def check_c_style_cast(line_number, line, raw_line, cast_type, pattern, + error): + """Checks for a C-style cast by looking for the pattern. + + This also handles sizeof(type) warnings, due to similarity of content. + + Args: + line_number: The number of the line to check. + line: The line of code to check. + raw_line: The raw line of code to check, with comments. + cast_type: The string for the C++ cast to recommend. This is either + reinterpret_cast or static_cast, depending. + pattern: The regular expression used to find C-style casts. + error: The function to call with any errors found. + """ + matched = search(pattern, line) + if not matched: + return + + # e.g., sizeof(int) + sizeof_match = match(r'.*sizeof\s*$', line[0:matched.start(1) - 1]) + if sizeof_match: + error(line_number, 'runtime/sizeof', 1, + 'Using sizeof(type). Use sizeof(varname) instead if possible') + return + + remainder = line[matched.end(0):] + + # The close paren is for function pointers as arguments to a function. + # eg, void foo(void (*bar)(int)); + # The semicolon check is a more basic function check; also possibly a + # function pointer typedef. + # eg, void foo(int); or void foo(int) const; + # The equals check is for function pointer assignment. + # eg, void *(*foo)(int) = ... + # + # Right now, this will only catch cases where there's a single argument, and + # it's unnamed. It should probably be expanded to check for multiple + # arguments with some unnamed. + function_match = match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder) + if function_match: + if (not function_match.group(3) + or function_match.group(3) == ';' + or raw_line.find('/*') < 0): + error(line_number, 'readability/function', 3, + 'All parameters should be named in a function') + return + + # At this point, all that should be left is actual casts. + error(line_number, 'readability/casting', 4, + 'Using C-style cast. Use %s<%s>(...) instead' % + (cast_type, matched.group(1))) + + +_HEADERS_CONTAINING_TEMPLATES = ( + ('<deque>', ('deque',)), + ('<functional>', ('unary_function', 'binary_function', + 'plus', 'minus', 'multiplies', 'divides', 'modulus', + 'negate', + 'equal_to', 'not_equal_to', 'greater', 'less', + 'greater_equal', 'less_equal', + 'logical_and', 'logical_or', 'logical_not', + 'unary_negate', 'not1', 'binary_negate', 'not2', + 'bind1st', 'bind2nd', + 'pointer_to_unary_function', + 'pointer_to_binary_function', + 'ptr_fun', + 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', + 'mem_fun_ref_t', + 'const_mem_fun_t', 'const_mem_fun1_t', + 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', + 'mem_fun_ref', + )), + ('<limits>', ('numeric_limits',)), + ('<list>', ('list',)), + ('<map>', ('map', 'multimap',)), + ('<memory>', ('allocator',)), + ('<queue>', ('queue', 'priority_queue',)), + ('<set>', ('set', 'multiset',)), + ('<stack>', ('stack',)), + ('<string>', ('char_traits', 'basic_string',)), + ('<utility>', ('pair',)), + ('<vector>', ('vector',)), + + # gcc extensions. + # Note: std::hash is their hash, ::hash is our hash + ('<hash_map>', ('hash_map', 'hash_multimap',)), + ('<hash_set>', ('hash_set', 'hash_multiset',)), + ('<slist>', ('slist',)), + ) + +_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = { + # We can trust with reasonable confidence that map gives us pair<>, too. + 'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap') +} + +_RE_PATTERN_STRING = re.compile(r'\bstring\b') + +_re_pattern_algorithm_header = [] +for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', + 'transform'): + # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or + # type::max(). + _re_pattern_algorithm_header.append( + (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), + _template, + '<algorithm>')) + +_re_pattern_templates = [] +for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: + for _template in _templates: + _re_pattern_templates.append( + (re.compile(r'(\<|\b)' + _template + r'\s*\<'), + _template + '<>', + _header)) + + +def files_belong_to_same_module(filename_cpp, filename_h): + """Check if these two filenames belong to the same module. + + The concept of a 'module' here is a as follows: + foo.h, foo-inl.h, foo.cpp, foo_test.cpp and foo_unittest.cpp belong to the + same 'module' if they are in the same directory. + some/path/public/xyzzy and some/path/internal/xyzzy are also considered + to belong to the same module here. + + If the filename_cpp contains a longer path than the filename_h, for example, + '/absolute/path/to/base/sysinfo.cpp', and this file would include + 'base/sysinfo.h', this function also produces the prefix needed to open the + header. This is used by the caller of this function to more robustly open the + header file. We don't have access to the real include paths in this context, + so we need this guesswork here. + + Known bugs: tools/base/bar.cpp and base/bar.h belong to the same module + according to this implementation. Because of this, this function gives + some false positives. This should be sufficiently rare in practice. + + Args: + filename_cpp: is the path for the .cpp file + filename_h: is the path for the header path + + Returns: + Tuple with a bool and a string: + bool: True if filename_cpp and filename_h belong to the same module. + string: the additional prefix needed to open the header file. + """ + + if not filename_cpp.endswith('.cpp'): + return (False, '') + filename_cpp = filename_cpp[:-len('.cpp')] + if filename_cpp.endswith('_unittest'): + filename_cpp = filename_cpp[:-len('_unittest')] + elif filename_cpp.endswith('_test'): + filename_cpp = filename_cpp[:-len('_test')] + filename_cpp = filename_cpp.replace('/public/', '/') + filename_cpp = filename_cpp.replace('/internal/', '/') + + if not filename_h.endswith('.h'): + return (False, '') + filename_h = filename_h[:-len('.h')] + if filename_h.endswith('-inl'): + filename_h = filename_h[:-len('-inl')] + filename_h = filename_h.replace('/public/', '/') + filename_h = filename_h.replace('/internal/', '/') + + files_belong_to_same_module = filename_cpp.endswith(filename_h) + common_path = '' + if files_belong_to_same_module: + common_path = filename_cpp[:-len(filename_h)] + return files_belong_to_same_module, common_path + + +def update_include_state(filename, include_state, io=codecs): + """Fill up the include_state with new includes found from the file. + + Args: + filename: the name of the header to read. + include_state: an _IncludeState instance in which the headers are inserted. + io: The io factory to use to read the file. Provided for testability. + + Returns: + True if a header was succesfully added. False otherwise. + """ + io = _unit_test_config.get(INCLUDE_IO_INJECTION_KEY, codecs) + header_file = None + try: + header_file = io.open(filename, 'r', 'utf8', 'replace') + except IOError: + return False + line_number = 0 + for line in header_file: + line_number += 1 + clean_line = cleanse_comments(line) + matched = _RE_PATTERN_INCLUDE.search(clean_line) + if matched: + include = matched.group(2) + # The value formatting is cute, but not really used right now. + # What matters here is that the key is in include_state. + include_state.setdefault(include, '%s:%d' % (filename, line_number)) + return True + + +def check_for_include_what_you_use(filename, clean_lines, include_state, error): + """Reports for missing stl includes. + + This function will output warnings to make sure you are including the headers + necessary for the stl containers and functions that you use. We only give one + reason to include a header. For example, if you use both equal_to<> and + less<> in a .h file, only one (the latter in the file) of these will be + reported as a reason to include the <functional>. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + include_state: An _IncludeState instance. + error: The function to call with any errors found. + """ + required = {} # A map of header name to line_number and the template entity. + # Example of required: { '<functional>': (1219, 'less<>') } + + for line_number in xrange(clean_lines.num_lines()): + line = clean_lines.elided[line_number] + if not line or line[0] == '#': + continue + + # String is special -- it is a non-templatized type in STL. + if _RE_PATTERN_STRING.search(line): + required['<string>'] = (line_number, 'string') + + for pattern, template, header in _re_pattern_algorithm_header: + if pattern.search(line): + required[header] = (line_number, template) + + # The following function is just a speed up, no semantics are changed. + if not '<' in line: # Reduces the cpu time usage by skipping lines. + continue + + for pattern, template, header in _re_pattern_templates: + if pattern.search(line): + required[header] = (line_number, template) + + # The policy is that if you #include something in foo.h you don't need to + # include it again in foo.cpp. Here, we will look at possible includes. + # Let's copy the include_state so it is only messed up within this function. + include_state = include_state.copy() + + # Did we find the header for this file (if any) and succesfully load it? + header_found = False + + # Use the absolute path so that matching works properly. + abs_filename = os.path.abspath(filename) + + # For Emacs's flymake. + # If cpp_style is invoked from Emacs's flymake, a temporary file is generated + # by flymake and that file name might end with '_flymake.cpp'. In that case, + # restore original file name here so that the corresponding header file can be + # found. + # e.g. If the file name is 'foo_flymake.cpp', we should search for 'foo.h' + # instead of 'foo_flymake.h' + abs_filename = re.sub(r'_flymake\.cpp$', '.cpp', abs_filename) + + # include_state is modified during iteration, so we iterate over a copy of + # the keys. + for header in include_state.keys(): #NOLINT + (same_module, common_path) = files_belong_to_same_module(abs_filename, header) + fullpath = common_path + header + if same_module and update_include_state(fullpath, include_state): + header_found = True + + # If we can't find the header file for a .cpp, assume it's because we don't + # know where to look. In that case we'll give up as we're not sure they + # didn't include it in the .h file. + # FIXME: Do a better job of finding .h files so we are confident that + # not having the .h file means there isn't one. + if filename.endswith('.cpp') and not header_found: + return + + # All the lines have been processed, report the errors found. + for required_header_unstripped in required: + template = required[required_header_unstripped][1] + if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED: + headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template] + if [True for header in headers if header in include_state]: + continue + if required_header_unstripped.strip('<>"') not in include_state: + error(required[required_header_unstripped][0], + 'build/include_what_you_use', 4, + 'Add #include ' + required_header_unstripped + ' for ' + template) + + +def process_line(filename, file_extension, + clean_lines, line, include_state, function_state, + class_state, file_state, error): + """Processes a single line in the file. + + Args: + filename: Filename of the file that is being processed. + file_extension: The extension (dot not included) of the file. + clean_lines: An array of strings, each representing a line of the file, + with comments stripped. + line: Number of line being processed. + include_state: An _IncludeState instance in which the headers are inserted. + function_state: A _FunctionState instance which counts function lines, etc. + class_state: A _ClassState instance which maintains information about + the current stack of nested class declarations being parsed. + file_state: A _FileState instance which maintains information about + the state of things in the file. + error: A callable to which errors are reported, which takes arguments: + line number, error level, and message + + """ + raw_lines = clean_lines.raw_lines + detect_functions(clean_lines, line, function_state, error) + check_for_function_lengths(clean_lines, line, function_state, error) + if search(r'\bNOLINT\b', raw_lines[line]): # ignore nolint lines + return + check_pass_ptr_usage(clean_lines, line, function_state, error) + check_for_multiline_comments_and_strings(clean_lines, line, error) + check_style(clean_lines, line, file_extension, class_state, file_state, error) + check_language(filename, clean_lines, line, file_extension, include_state, + file_state, error) + check_for_non_standard_constructs(clean_lines, line, class_state, error) + check_posix_threading(clean_lines, line, error) + check_invalid_increment(clean_lines, line, error) + + +def _process_lines(filename, file_extension, lines, error, min_confidence): + """Performs lint checks and reports any errors to the given error function. + + Args: + filename: Filename of the file that is being processed. + file_extension: The extension (dot not included) of the file. + lines: An array of strings, each representing a line of the file, with the + last element being empty if the file is termined with a newline. + error: A callable to which errors are reported, which takes 4 arguments: + """ + lines = (['// marker so line numbers and indices both start at 1'] + lines + + ['// marker so line numbers end in a known way']) + + include_state = _IncludeState() + function_state = _FunctionState(min_confidence) + class_state = _ClassState() + + check_for_copyright(lines, error) + + if file_extension == 'h': + check_for_header_guard(filename, lines, error) + + remove_multi_line_comments(lines, error) + clean_lines = CleansedLines(lines) + file_state = _FileState(clean_lines, file_extension) + for line in xrange(clean_lines.num_lines()): + process_line(filename, file_extension, clean_lines, line, + include_state, function_state, class_state, file_state, error) + class_state.check_finished(error) + + check_for_include_what_you_use(filename, clean_lines, include_state, error) + + # We check here rather than inside process_line so that we see raw + # lines rather than "cleaned" lines. + check_for_unicode_replacement_characters(lines, error) + + check_for_new_line_at_eof(lines, error) + + +class CppChecker(object): + + """Processes C++ lines for checking style.""" + + # This list is used to-- + # + # (1) generate an explicit list of all possible categories, + # (2) unit test that all checked categories have valid names, and + # (3) unit test that all categories are getting unit tested. + # + categories = set([ + 'build/class', + 'build/deprecated', + 'build/endif_comment', + 'build/forward_decl', + 'build/header_guard', + 'build/include', + 'build/include_order', + 'build/include_what_you_use', + 'build/namespaces', + 'build/printf_format', + 'build/storage_class', + 'build/using_std', + 'legal/copyright', + 'readability/braces', + 'readability/casting', + 'readability/check', + 'readability/comparison_to_zero', + 'readability/constructors', + 'readability/control_flow', + 'readability/fn_size', + 'readability/function', + 'readability/multiline_comment', + 'readability/multiline_string', + 'readability/naming', + 'readability/null', + 'readability/pass_ptr', + 'readability/streams', + 'readability/todo', + 'readability/utf8', + 'runtime/arrays', + 'runtime/casting', + 'runtime/explicit', + 'runtime/init', + 'runtime/int', + 'runtime/invalid_increment', + 'runtime/max_min_macros', + 'runtime/memset', + 'runtime/printf', + 'runtime/printf_format', + 'runtime/references', + 'runtime/rtti', + 'runtime/sizeof', + 'runtime/string', + 'runtime/threadsafe_fn', + 'runtime/virtual', + 'whitespace/blank_line', + 'whitespace/braces', + 'whitespace/comma', + 'whitespace/comments', + 'whitespace/declaration', + 'whitespace/end_of_line', + 'whitespace/ending_newline', + 'whitespace/indent', + 'whitespace/labels', + 'whitespace/line_length', + 'whitespace/newline', + 'whitespace/operators', + 'whitespace/parens', + 'whitespace/semicolon', + 'whitespace/tab', + 'whitespace/todo', + ]) + + def __init__(self, file_path, file_extension, handle_style_error, + min_confidence): + """Create a CppChecker instance. + + Args: + file_extension: A string that is the file extension, without + the leading dot. + + """ + self.file_extension = file_extension + self.file_path = file_path + self.handle_style_error = handle_style_error + self.min_confidence = min_confidence + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this CppChecker instance is equal to another.""" + if self.file_extension != other.file_extension: + return False + if self.file_path != other.file_path: + return False + if self.handle_style_error != other.handle_style_error: + return False + if self.min_confidence != other.min_confidence: + return False + + return True + + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce __ne__() from __eq__(). + return not self.__eq__(other) + + def check(self, lines): + _process_lines(self.file_path, self.file_extension, lines, + self.handle_style_error, self.min_confidence) + + +# FIXME: Remove this function (requires refactoring unit tests). +def process_file_data(filename, file_extension, lines, error, min_confidence, unit_test_config): + global _unit_test_config + _unit_test_config = unit_test_config + checker = CppChecker(filename, file_extension, error, min_confidence) + checker.check(lines) + _unit_test_config = {} diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py new file mode 100644 index 0000000..70df1ea --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py @@ -0,0 +1,3998 @@ +#!/usr/bin/python +# -*- coding: utf-8; -*- +# +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile Inc. +# Copyright (C) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test for cpp_style.py.""" + +# FIXME: Add a good test that tests UpdateIncludeState. + +import codecs +import os +import random +import re +import unittest +import cpp as cpp_style +from cpp import CppChecker +from ..filter import FilterConfiguration + +# This class works as an error collector and replaces cpp_style.Error +# function for the unit tests. We also verify each category we see +# is in STYLE_CATEGORIES, to help keep that list up to date. +class ErrorCollector: + _all_style_categories = CppChecker.categories + # This is a list including all categories seen in any unit test. + _seen_style_categories = {} + + def __init__(self, assert_fn, filter=None): + """assert_fn: a function to call when we notice a problem. + filter: filters the errors that we are concerned about.""" + self._assert_fn = assert_fn + self._errors = [] + if not filter: + filter = FilterConfiguration() + self._filter = filter + + def __call__(self, unused_linenum, category, confidence, message): + self._assert_fn(category in self._all_style_categories, + 'Message "%s" has category "%s",' + ' which is not in STYLE_CATEGORIES' % (message, category)) + if self._filter.should_check(category, ""): + self._seen_style_categories[category] = 1 + self._errors.append('%s [%s] [%d]' % (message, category, confidence)) + + def results(self): + if len(self._errors) < 2: + return ''.join(self._errors) # Most tests expect to have a string. + else: + return self._errors # Let's give a list if there is more than one. + + def result_list(self): + return self._errors + + def verify_all_categories_are_seen(self): + """Fails if there's a category in _all_style_categories - _seen_style_categories. + + This should only be called after all tests are run, so + _seen_style_categories has had a chance to fully populate. Since + this isn't called from within the normal unittest framework, we + can't use the normal unittest assert macros. Instead we just exit + when we see an error. Good thing this test is always run last! + """ + for category in self._all_style_categories: + if category not in self._seen_style_categories: + import sys + sys.exit('FATAL ERROR: There are no tests for category "%s"' % category) + + +# This class is a lame mock of codecs. We do not verify filename, mode, or +# encoding, but for the current use case it is not needed. +class MockIo: + def __init__(self, mock_file): + self.mock_file = mock_file + + def open(self, unused_filename, unused_mode, unused_encoding, _): # NOLINT + # (lint doesn't like open as a method name) + return self.mock_file + + +class CppFunctionsTest(unittest.TestCase): + + """Supports testing functions that do not need CppStyleTestBase.""" + + def test_is_c_or_objective_c(self): + clean_lines = cpp_style.CleansedLines(['']) + clean_objc_lines = cpp_style.CleansedLines(['#import "header.h"']) + self.assertTrue(cpp_style._FileState(clean_lines, 'c').is_c_or_objective_c()) + self.assertTrue(cpp_style._FileState(clean_lines, 'm').is_c_or_objective_c()) + self.assertFalse(cpp_style._FileState(clean_lines, 'cpp').is_c_or_objective_c()) + self.assertFalse(cpp_style._FileState(clean_lines, 'cc').is_c_or_objective_c()) + self.assertFalse(cpp_style._FileState(clean_lines, 'h').is_c_or_objective_c()) + self.assertTrue(cpp_style._FileState(clean_objc_lines, 'h').is_c_or_objective_c()) + + +class CppStyleTestBase(unittest.TestCase): + """Provides some useful helper functions for cpp_style tests. + + Attributes: + min_confidence: An integer that is the current minimum confidence + level for the tests. + + """ + + # FIXME: Refactor the unit tests so the confidence level is passed + # explicitly, just like it is in the real code. + min_confidence = 1; + + # Helper function to avoid needing to explicitly pass confidence + # in all the unit test calls to cpp_style.process_file_data(). + def process_file_data(self, filename, file_extension, lines, error, unit_test_config={}): + """Call cpp_style.process_file_data() with the min_confidence.""" + return cpp_style.process_file_data(filename, file_extension, lines, + error, self.min_confidence, unit_test_config) + + def perform_lint(self, code, filename, basic_error_rules, unit_test_config={}): + error_collector = ErrorCollector(self.assert_, FilterConfiguration(basic_error_rules)) + lines = code.split('\n') + extension = filename.split('.')[1] + self.process_file_data(filename, extension, lines, error_collector, unit_test_config) + return error_collector.results() + + # Perform lint on single line of input and return the error message. + def perform_single_line_lint(self, code, filename): + basic_error_rules = ('-build/header_guard', + '-legal/copyright', + '-readability/fn_size', + '-whitespace/ending_newline') + return self.perform_lint(code, filename, basic_error_rules) + + # Perform lint over multiple lines and return the error message. + def perform_multi_line_lint(self, code, file_extension): + basic_error_rules = ('-build/header_guard', + '-legal/copyright', + '-multi_line_filter', + '-whitespace/ending_newline') + return self.perform_lint(code, 'test.' + file_extension, basic_error_rules) + + # Only keep some errors related to includes, namespaces and rtti. + def perform_language_rules_check(self, filename, code): + basic_error_rules = ('-', + '+build/include', + '+build/include_order', + '+build/namespaces', + '+runtime/rtti') + return self.perform_lint(code, filename, basic_error_rules) + + # Only keep function length errors. + def perform_function_lengths_check(self, code): + basic_error_rules = ('-', + '+readability/fn_size') + return self.perform_lint(code, 'test.cpp', basic_error_rules) + + # Only keep pass ptr errors. + def perform_pass_ptr_check(self, code): + basic_error_rules = ('-', + '+readability/pass_ptr') + return self.perform_lint(code, 'test.cpp', basic_error_rules) + + # Only include what you use errors. + def perform_include_what_you_use(self, code, filename='foo.h', io=codecs): + basic_error_rules = ('-', + '+build/include_what_you_use') + unit_test_config = {cpp_style.INCLUDE_IO_INJECTION_KEY: io} + return self.perform_lint(code, filename, basic_error_rules, unit_test_config) + + # Perform lint and compare the error message with "expected_message". + def assert_lint(self, code, expected_message, file_name='foo.cpp'): + self.assertEquals(expected_message, self.perform_single_line_lint(code, file_name)) + + def assert_lint_one_of_many_errors_re(self, code, expected_message_re, file_name='foo.cpp'): + messages = self.perform_single_line_lint(code, file_name) + for message in messages: + if re.search(expected_message_re, message): + return + + self.assertEquals(expected_message_re, messages) + + def assert_multi_line_lint(self, code, expected_message, file_name='foo.h'): + file_extension = file_name[file_name.rfind('.') + 1:] + self.assertEquals(expected_message, self.perform_multi_line_lint(code, file_extension)) + + def assert_multi_line_lint_re(self, code, expected_message_re, file_name='foo.h'): + file_extension = file_name[file_name.rfind('.') + 1:] + message = self.perform_multi_line_lint(code, file_extension) + if not re.search(expected_message_re, message): + self.fail('Message was:\n' + message + 'Expected match to "' + expected_message_re + '"') + + def assert_language_rules_check(self, file_name, code, expected_message): + self.assertEquals(expected_message, + self.perform_language_rules_check(file_name, code)) + + def assert_include_what_you_use(self, code, expected_message): + self.assertEquals(expected_message, + self.perform_include_what_you_use(code)) + + def assert_blank_lines_check(self, lines, start_errors, end_errors): + error_collector = ErrorCollector(self.assert_) + self.process_file_data('foo.cpp', 'cpp', lines, error_collector) + self.assertEquals( + start_errors, + error_collector.results().count( + 'Blank line at the start of a code block. Is this needed?' + ' [whitespace/blank_line] [2]')) + self.assertEquals( + end_errors, + error_collector.results().count( + 'Blank line at the end of a code block. Is this needed?' + ' [whitespace/blank_line] [3]')) + + +class FunctionDetectionTest(CppStyleTestBase): + def perform_function_detection(self, lines, function_information): + clean_lines = cpp_style.CleansedLines(lines) + function_state = cpp_style._FunctionState(5) + error_collector = ErrorCollector(self.assert_) + cpp_style.detect_functions(clean_lines, 0, function_state, error_collector) + if not function_information: + self.assertEquals(function_state.in_a_function, False) + return + self.assertEquals(function_state.in_a_function, True) + self.assertEquals(function_state.current_function, function_information['name'] + '()') + self.assertEquals(function_state.body_start_line_number, function_information['body_start_line_number']) + self.assertEquals(function_state.ending_line_number, function_information['ending_line_number']) + self.assertEquals(function_state.is_declaration, function_information['is_declaration']) + + def test_basic_function_detection(self): + self.perform_function_detection( + ['void theTestFunctionName(int) {', + '}'], + {'name': 'theTestFunctionName', + 'body_start_line_number': 0, + 'ending_line_number': 1, + 'is_declaration': False}) + + def test_function_declaration_detection(self): + self.perform_function_detection( + ['void aFunctionName(int);'], + {'name': 'aFunctionName', + 'body_start_line_number': 0, + 'ending_line_number': 0, + 'is_declaration': True}) + + def test_non_functions(self): + # This case exposed an error because the open brace was in quotes. + self.perform_function_detection( + ['asm(', + ' "stmdb sp!, {r1-r3}" "\n"', + ');'], + # This isn't a function but it looks like one to our simple + # algorithm and that is ok. + {'name': 'asm', + 'body_start_line_number': 2, + 'ending_line_number': 2, + 'is_declaration': True}) + + # Simple test case with something that is not a function. + self.perform_function_detection(['class Stuff;'], None) + +class CppStyleTest(CppStyleTestBase): + + # Test get line width. + def test_get_line_width(self): + self.assertEquals(0, cpp_style.get_line_width('')) + self.assertEquals(10, cpp_style.get_line_width(u'x' * 10)) + self.assertEquals(16, cpp_style.get_line_width(u'都|道|府|県|支庁')) + + def test_find_next_multi_line_comment_start(self): + self.assertEquals(1, cpp_style.find_next_multi_line_comment_start([''], 0)) + + lines = ['a', 'b', '/* c'] + self.assertEquals(2, cpp_style.find_next_multi_line_comment_start(lines, 0)) + + lines = ['char a[] = "/*";'] # not recognized as comment. + self.assertEquals(1, cpp_style.find_next_multi_line_comment_start(lines, 0)) + + def test_find_next_multi_line_comment_end(self): + self.assertEquals(1, cpp_style.find_next_multi_line_comment_end([''], 0)) + lines = ['a', 'b', ' c */'] + self.assertEquals(2, cpp_style.find_next_multi_line_comment_end(lines, 0)) + + def test_remove_multi_line_comments_from_range(self): + lines = ['a', ' /* comment ', ' * still comment', ' comment */ ', 'b'] + cpp_style.remove_multi_line_comments_from_range(lines, 1, 4) + self.assertEquals(['a', '// dummy', '// dummy', '// dummy', 'b'], lines) + + def test_spaces_at_end_of_line(self): + self.assert_lint( + '// Hello there ', + 'Line ends in whitespace. Consider deleting these extra spaces.' + ' [whitespace/end_of_line] [4]') + + # Test C-style cast cases. + def test_cstyle_cast(self): + self.assert_lint( + 'int a = (int)1.0;', + 'Using C-style cast. Use static_cast<int>(...) instead' + ' [readability/casting] [4]') + self.assert_lint( + 'int *a = (int *)DEFINED_VALUE;', + 'Using C-style cast. Use reinterpret_cast<int *>(...) instead' + ' [readability/casting] [4]', 'foo.c') + self.assert_lint( + 'uint16 a = (uint16)1.0;', + 'Using C-style cast. Use static_cast<uint16>(...) instead' + ' [readability/casting] [4]') + self.assert_lint( + 'int32 a = (int32)1.0;', + 'Using C-style cast. Use static_cast<int32>(...) instead' + ' [readability/casting] [4]') + self.assert_lint( + 'uint64 a = (uint64)1.0;', + 'Using C-style cast. Use static_cast<uint64>(...) instead' + ' [readability/casting] [4]') + + # Test taking address of casts (runtime/casting) + def test_runtime_casting(self): + self.assert_lint( + 'int* x = &static_cast<int*>(foo);', + 'Are you taking an address of a cast? ' + 'This is dangerous: could be a temp var. ' + 'Take the address before doing the cast, rather than after' + ' [runtime/casting] [4]') + + self.assert_lint( + 'int* x = &dynamic_cast<int *>(foo);', + ['Are you taking an address of a cast? ' + 'This is dangerous: could be a temp var. ' + 'Take the address before doing the cast, rather than after' + ' [runtime/casting] [4]', + 'Do not use dynamic_cast<>. If you need to cast within a class ' + 'hierarchy, use static_cast<> to upcast. Google doesn\'t support ' + 'RTTI. [runtime/rtti] [5]']) + + self.assert_lint( + 'int* x = &reinterpret_cast<int *>(foo);', + 'Are you taking an address of a cast? ' + 'This is dangerous: could be a temp var. ' + 'Take the address before doing the cast, rather than after' + ' [runtime/casting] [4]') + + # It's OK to cast an address. + self.assert_lint( + 'int* x = reinterpret_cast<int *>(&foo);', + '') + + def test_runtime_selfinit(self): + self.assert_lint( + 'Foo::Foo(Bar r, Bel l) : r_(r_), l_(l_) { }', + 'You seem to be initializing a member variable with itself.' + ' [runtime/init] [4]') + self.assert_lint( + 'Foo::Foo(Bar r, Bel l) : r_(r), l_(l) { }', + '') + self.assert_lint( + 'Foo::Foo(Bar r) : r_(r), l_(r_), ll_(l_) { }', + '') + + def test_runtime_rtti(self): + statement = 'int* x = dynamic_cast<int*>(&foo);' + error_message = ( + 'Do not use dynamic_cast<>. If you need to cast within a class ' + 'hierarchy, use static_cast<> to upcast. Google doesn\'t support ' + 'RTTI. [runtime/rtti] [5]') + # dynamic_cast is disallowed in most files. + self.assert_language_rules_check('foo.cpp', statement, error_message) + self.assert_language_rules_check('foo.h', statement, error_message) + + # We cannot test this functionality because of difference of + # function definitions. Anyway, we may never enable this. + # + # # Test for unnamed arguments in a method. + # def test_check_for_unnamed_params(self): + # message = ('All parameters should be named in a function' + # ' [readability/function] [3]') + # self.assert_lint('virtual void A(int*) const;', message) + # self.assert_lint('virtual void B(void (*fn)(int*));', message) + # self.assert_lint('virtual void C(int*);', message) + # self.assert_lint('void *(*f)(void *) = x;', message) + # self.assert_lint('void Method(char*) {', message) + # self.assert_lint('void Method(char*);', message) + # self.assert_lint('void Method(char* /*x*/);', message) + # self.assert_lint('typedef void (*Method)(int32);', message) + # self.assert_lint('static void operator delete[](void*) throw();', message) + # + # self.assert_lint('virtual void D(int* p);', '') + # self.assert_lint('void operator delete(void* x) throw();', '') + # self.assert_lint('void Method(char* x)\n{', '') + # self.assert_lint('void Method(char* /*x*/)\n{', '') + # self.assert_lint('void Method(char* x);', '') + # self.assert_lint('typedef void (*Method)(int32 x);', '') + # self.assert_lint('static void operator delete[](void* x) throw();', '') + # self.assert_lint('static void operator delete[](void* /*x*/) throw();', '') + # + # # This one should technically warn, but doesn't because the function + # # pointer is confusing. + # self.assert_lint('virtual void E(void (*fn)(int* p));', '') + + # Test deprecated casts such as int(d) + def test_deprecated_cast(self): + self.assert_lint( + 'int a = int(2.2);', + 'Using deprecated casting style. ' + 'Use static_cast<int>(...) instead' + ' [readability/casting] [4]') + # Checks for false positives... + self.assert_lint( + 'int a = int(); // Constructor, o.k.', + '') + self.assert_lint( + 'X::X() : a(int()) {} // default Constructor, o.k.', + '') + self.assert_lint( + 'operator bool(); // Conversion operator, o.k.', + '') + + # The second parameter to a gMock method definition is a function signature + # that often looks like a bad cast but should not picked up by lint. + def test_mock_method(self): + self.assert_lint( + 'MOCK_METHOD0(method, int());', + '') + self.assert_lint( + 'MOCK_CONST_METHOD1(method, float(string));', + '') + self.assert_lint( + 'MOCK_CONST_METHOD2_T(method, double(float, float));', + '') + + # Test sizeof(type) cases. + def test_sizeof_type(self): + self.assert_lint( + 'sizeof(int);', + 'Using sizeof(type). Use sizeof(varname) instead if possible' + ' [runtime/sizeof] [1]') + self.assert_lint( + 'sizeof(int *);', + 'Using sizeof(type). Use sizeof(varname) instead if possible' + ' [runtime/sizeof] [1]') + + # Test typedef cases. There was a bug that cpp_style misidentified + # typedef for pointer to function as C-style cast and produced + # false-positive error messages. + def test_typedef_for_pointer_to_function(self): + self.assert_lint( + 'typedef void (*Func)(int x);', + '') + self.assert_lint( + 'typedef void (*Func)(int *x);', + '') + self.assert_lint( + 'typedef void Func(int x);', + '') + self.assert_lint( + 'typedef void Func(int *x);', + '') + + def test_include_what_you_use_no_implementation_files(self): + code = 'std::vector<int> foo;' + self.assertEquals('Add #include <vector> for vector<>' + ' [build/include_what_you_use] [4]', + self.perform_include_what_you_use(code, 'foo.h')) + self.assertEquals('', + self.perform_include_what_you_use(code, 'foo.cpp')) + + def test_include_what_you_use(self): + self.assert_include_what_you_use( + '''#include <vector> + std::vector<int> foo; + ''', + '') + self.assert_include_what_you_use( + '''#include <map> + std::pair<int,int> foo; + ''', + '') + self.assert_include_what_you_use( + '''#include <multimap> + std::pair<int,int> foo; + ''', + '') + self.assert_include_what_you_use( + '''#include <hash_map> + std::pair<int,int> foo; + ''', + '') + self.assert_include_what_you_use( + '''#include <utility> + std::pair<int,int> foo; + ''', + '') + self.assert_include_what_you_use( + '''#include <vector> + DECLARE_string(foobar); + ''', + '') + self.assert_include_what_you_use( + '''#include <vector> + DEFINE_string(foobar, "", ""); + ''', + '') + self.assert_include_what_you_use( + '''#include <vector> + std::pair<int,int> foo; + ''', + 'Add #include <utility> for pair<>' + ' [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include "base/foobar.h" + std::vector<int> foo; + ''', + 'Add #include <vector> for vector<>' + ' [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include <vector> + std::set<int> foo; + ''', + 'Add #include <set> for set<>' + ' [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include "base/foobar.h" + hash_map<int, int> foobar; + ''', + 'Add #include <hash_map> for hash_map<>' + ' [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include "base/foobar.h" + bool foobar = std::less<int>(0,1); + ''', + 'Add #include <functional> for less<>' + ' [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include "base/foobar.h" + bool foobar = min<int>(0,1); + ''', + 'Add #include <algorithm> for min [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + 'void a(const string &foobar);', + 'Add #include <string> for string [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include "base/foobar.h" + bool foobar = swap(0,1); + ''', + 'Add #include <algorithm> for swap [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include "base/foobar.h" + bool foobar = transform(a.begin(), a.end(), b.start(), Foo); + ''', + 'Add #include <algorithm> for transform ' + '[build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include "base/foobar.h" + bool foobar = min_element(a.begin(), a.end()); + ''', + 'Add #include <algorithm> for min_element ' + '[build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''foo->swap(0,1); + foo.swap(0,1); + ''', + '') + self.assert_include_what_you_use( + '''#include <string> + void a(const std::multimap<int,string> &foobar); + ''', + 'Add #include <map> for multimap<>' + ' [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include <queue> + void a(const std::priority_queue<int> &foobar); + ''', + '') + self.assert_include_what_you_use( + '''#include "base/basictypes.h" + #include "base/port.h" + #include <assert.h> + #include <string> + #include <vector> + vector<string> hajoa;''', '') + self.assert_include_what_you_use( + '''#include <string> + int i = numeric_limits<int>::max() + ''', + 'Add #include <limits> for numeric_limits<>' + ' [build/include_what_you_use] [4]') + self.assert_include_what_you_use( + '''#include <limits> + int i = numeric_limits<int>::max() + ''', + '') + + # Test the UpdateIncludeState code path. + mock_header_contents = ['#include "blah/foo.h"', '#include "blah/bar.h"'] + message = self.perform_include_what_you_use( + '#include "config.h"\n' + '#include "blah/a.h"\n', + filename='blah/a.cpp', + io=MockIo(mock_header_contents)) + self.assertEquals(message, '') + + mock_header_contents = ['#include <set>'] + message = self.perform_include_what_you_use( + '''#include "config.h" + #include "blah/a.h" + + std::set<int> foo;''', + filename='blah/a.cpp', + io=MockIo(mock_header_contents)) + self.assertEquals(message, '') + + # If there's just a .cpp and the header can't be found then it's ok. + message = self.perform_include_what_you_use( + '''#include "config.h" + #include "blah/a.h" + + std::set<int> foo;''', + filename='blah/a.cpp') + self.assertEquals(message, '') + + # Make sure we find the headers with relative paths. + mock_header_contents = [''] + message = self.perform_include_what_you_use( + '''#include "config.h" + #include "%s/a.h" + + std::set<int> foo;''' % os.path.basename(os.getcwd()), + filename='a.cpp', + io=MockIo(mock_header_contents)) + self.assertEquals(message, 'Add #include <set> for set<> ' + '[build/include_what_you_use] [4]') + + def test_files_belong_to_same_module(self): + f = cpp_style.files_belong_to_same_module + self.assertEquals((True, ''), f('a.cpp', 'a.h')) + self.assertEquals((True, ''), f('base/google.cpp', 'base/google.h')) + self.assertEquals((True, ''), f('base/google_test.cpp', 'base/google.h')) + self.assertEquals((True, ''), + f('base/google_unittest.cpp', 'base/google.h')) + self.assertEquals((True, ''), + f('base/internal/google_unittest.cpp', + 'base/public/google.h')) + self.assertEquals((True, 'xxx/yyy/'), + f('xxx/yyy/base/internal/google_unittest.cpp', + 'base/public/google.h')) + self.assertEquals((True, 'xxx/yyy/'), + f('xxx/yyy/base/google_unittest.cpp', + 'base/public/google.h')) + self.assertEquals((True, ''), + f('base/google_unittest.cpp', 'base/google-inl.h')) + self.assertEquals((True, '/home/build/google3/'), + f('/home/build/google3/base/google.cpp', 'base/google.h')) + + self.assertEquals((False, ''), + f('/home/build/google3/base/google.cpp', 'basu/google.h')) + self.assertEquals((False, ''), f('a.cpp', 'b.h')) + + def test_cleanse_line(self): + self.assertEquals('int foo = 0; ', + cpp_style.cleanse_comments('int foo = 0; // danger!')) + self.assertEquals('int o = 0;', + cpp_style.cleanse_comments('int /* foo */ o = 0;')) + self.assertEquals('foo(int a, int b);', + cpp_style.cleanse_comments('foo(int a /* abc */, int b);')) + self.assertEqual('f(a, b);', + cpp_style.cleanse_comments('f(a, /* name */ b);')) + self.assertEqual('f(a, b);', + cpp_style.cleanse_comments('f(a /* name */, b);')) + self.assertEqual('f(a, b);', + cpp_style.cleanse_comments('f(a, /* name */b);')) + + def test_multi_line_comments(self): + # missing explicit is bad + self.assert_multi_line_lint( + r'''int a = 0; + /* multi-liner + class Foo { + Foo(int f); // should cause a lint warning in code + } + */ ''', + '') + self.assert_multi_line_lint( + r'''/* int a = 0; multi-liner + static const int b = 0;''', + ['Could not find end of multi-line comment' + ' [readability/multiline_comment] [5]', + 'Complex multi-line /*...*/-style comment found. ' + 'Lint may give bogus warnings. Consider replacing these with ' + '//-style comments, with #if 0...#endif, or with more clearly ' + 'structured multi-line comments. [readability/multiline_comment] [5]']) + self.assert_multi_line_lint(r''' /* multi-line comment''', + ['Could not find end of multi-line comment' + ' [readability/multiline_comment] [5]', + 'Complex multi-line /*...*/-style comment found. ' + 'Lint may give bogus warnings. Consider replacing these with ' + '//-style comments, with #if 0...#endif, or with more clearly ' + 'structured multi-line comments. [readability/multiline_comment] [5]']) + self.assert_multi_line_lint(r''' // /* comment, but not multi-line''', '') + + def test_multiline_strings(self): + multiline_string_error_message = ( + 'Multi-line string ("...") found. This lint script doesn\'t ' + 'do well with such strings, and may give bogus warnings. They\'re ' + 'ugly and unnecessary, and you should use concatenation instead".' + ' [readability/multiline_string] [5]') + + file_path = 'mydir/foo.cpp' + + error_collector = ErrorCollector(self.assert_) + self.process_file_data(file_path, 'cpp', + ['const char* str = "This is a\\', + ' multiline string.";'], + error_collector) + self.assertEquals( + 2, # One per line. + error_collector.result_list().count(multiline_string_error_message)) + + # Test non-explicit single-argument constructors + def test_explicit_single_argument_constructors(self): + # missing explicit is bad + self.assert_multi_line_lint( + '''class Foo { + Foo(int f); + };''', + 'Single-argument constructors should be marked explicit.' + ' [runtime/explicit] [5]') + # missing explicit is bad, even with whitespace + self.assert_multi_line_lint( + '''class Foo { + Foo (int f); + };''', + ['Extra space before ( in function call [whitespace/parens] [4]', + 'Single-argument constructors should be marked explicit.' + ' [runtime/explicit] [5]']) + # missing explicit, with distracting comment, is still bad + self.assert_multi_line_lint( + '''class Foo { + Foo(int f); // simpler than Foo(blargh, blarg) + };''', + 'Single-argument constructors should be marked explicit.' + ' [runtime/explicit] [5]') + # missing explicit, with qualified classname + self.assert_multi_line_lint( + '''class Qualifier::AnotherOne::Foo { + Foo(int f); + };''', + 'Single-argument constructors should be marked explicit.' + ' [runtime/explicit] [5]') + # structs are caught as well. + self.assert_multi_line_lint( + '''struct Foo { + Foo(int f); + };''', + 'Single-argument constructors should be marked explicit.' + ' [runtime/explicit] [5]') + # Templatized classes are caught as well. + self.assert_multi_line_lint( + '''template<typename T> class Foo { + Foo(int f); + };''', + 'Single-argument constructors should be marked explicit.' + ' [runtime/explicit] [5]') + # proper style is okay + self.assert_multi_line_lint( + '''class Foo { + explicit Foo(int f); + };''', + '') + # two argument constructor is okay + self.assert_multi_line_lint( + '''class Foo { + Foo(int f, int b); + };''', + '') + # two argument constructor, across two lines, is okay + self.assert_multi_line_lint( + '''class Foo { + Foo(int f, + int b); + };''', + '') + # non-constructor (but similar name), is okay + self.assert_multi_line_lint( + '''class Foo { + aFoo(int f); + };''', + '') + # constructor with void argument is okay + self.assert_multi_line_lint( + '''class Foo { + Foo(void); + };''', + '') + # single argument method is okay + self.assert_multi_line_lint( + '''class Foo { + Bar(int b); + };''', + '') + # comments should be ignored + self.assert_multi_line_lint( + '''class Foo { + // Foo(int f); + };''', + '') + # single argument function following class definition is okay + # (okay, it's not actually valid, but we don't want a false positive) + self.assert_multi_line_lint( + '''class Foo { + Foo(int f, int b); + }; + Foo(int f);''', + '') + # single argument function is okay + self.assert_multi_line_lint( + '''static Foo(int f);''', + '') + # single argument copy constructor is okay. + self.assert_multi_line_lint( + '''class Foo { + Foo(const Foo&); + };''', + '') + self.assert_multi_line_lint( + '''class Foo { + Foo(Foo&); + };''', + '') + + def test_slash_star_comment_on_single_line(self): + self.assert_multi_line_lint( + '''/* static */ Foo(int f);''', + '') + self.assert_multi_line_lint( + '''/*/ static */ Foo(int f);''', + '') + self.assert_multi_line_lint( + '''/*/ static Foo(int f);''', + 'Could not find end of multi-line comment' + ' [readability/multiline_comment] [5]') + self.assert_multi_line_lint( + ''' /*/ static Foo(int f);''', + 'Could not find end of multi-line comment' + ' [readability/multiline_comment] [5]') + self.assert_multi_line_lint( + ''' /**/ static Foo(int f);''', + '') + + # Test suspicious usage of "if" like this: + # if (a == b) { + # DoSomething(); + # } if (a == c) { // Should be "else if". + # DoSomething(); // This gets called twice if a == b && a == c. + # } + def test_suspicious_usage_of_if(self): + self.assert_lint( + ' if (a == b) {', + '') + self.assert_lint( + ' } if (a == b) {', + 'Did you mean "else if"? If not, start a new line for "if".' + ' [readability/braces] [4]') + + # Test suspicious usage of memset. Specifically, a 0 + # as the final argument is almost certainly an error. + def test_suspicious_usage_of_memset(self): + # Normal use is okay. + self.assert_lint( + ' memset(buf, 0, sizeof(buf))', + '') + + # A 0 as the final argument is almost certainly an error. + self.assert_lint( + ' memset(buf, sizeof(buf), 0)', + 'Did you mean "memset(buf, 0, sizeof(buf))"?' + ' [runtime/memset] [4]') + self.assert_lint( + ' memset(buf, xsize * ysize, 0)', + 'Did you mean "memset(buf, 0, xsize * ysize)"?' + ' [runtime/memset] [4]') + + # There is legitimate test code that uses this form. + # This is okay since the second argument is a literal. + self.assert_lint( + " memset(buf, 'y', 0)", + '') + self.assert_lint( + ' memset(buf, 4, 0)', + '') + self.assert_lint( + ' memset(buf, -1, 0)', + '') + self.assert_lint( + ' memset(buf, 0xF1, 0)', + '') + self.assert_lint( + ' memset(buf, 0xcd, 0)', + '') + + def test_check_posix_threading(self): + self.assert_lint('sctime_r()', '') + self.assert_lint('strtok_r()', '') + self.assert_lint(' strtok_r(foo, ba, r)', '') + self.assert_lint('brand()', '') + self.assert_lint('_rand()', '') + self.assert_lint('.rand()', '') + self.assert_lint('>rand()', '') + self.assert_lint('rand()', + 'Consider using rand_r(...) instead of rand(...)' + ' for improved thread safety.' + ' [runtime/threadsafe_fn] [2]') + self.assert_lint('strtok()', + 'Consider using strtok_r(...) ' + 'instead of strtok(...)' + ' for improved thread safety.' + ' [runtime/threadsafe_fn] [2]') + + # Test potential format string bugs like printf(foo). + def test_format_strings(self): + self.assert_lint('printf("foo")', '') + self.assert_lint('printf("foo: %s", foo)', '') + self.assert_lint('DocidForPrintf(docid)', '') # Should not trigger. + self.assert_lint( + 'printf(foo)', + 'Potential format string bug. Do printf("%s", foo) instead.' + ' [runtime/printf] [4]') + self.assert_lint( + 'printf(foo.c_str())', + 'Potential format string bug. ' + 'Do printf("%s", foo.c_str()) instead.' + ' [runtime/printf] [4]') + self.assert_lint( + 'printf(foo->c_str())', + 'Potential format string bug. ' + 'Do printf("%s", foo->c_str()) instead.' + ' [runtime/printf] [4]') + self.assert_lint( + 'StringPrintf(foo)', + 'Potential format string bug. Do StringPrintf("%s", foo) instead.' + '' + ' [runtime/printf] [4]') + + # Variable-length arrays are not permitted. + def test_variable_length_array_detection(self): + errmsg = ('Do not use variable-length arrays. Use an appropriately named ' + "('k' followed by CamelCase) compile-time constant for the size." + ' [runtime/arrays] [1]') + + self.assert_lint('int a[any_old_variable];', errmsg) + self.assert_lint('int doublesize[some_var * 2];', errmsg) + self.assert_lint('int a[afunction()];', errmsg) + self.assert_lint('int a[function(kMaxFooBars)];', errmsg) + self.assert_lint('bool aList[items_->size()];', errmsg) + self.assert_lint('namespace::Type buffer[len+1];', errmsg) + + self.assert_lint('int a[64];', '') + self.assert_lint('int a[0xFF];', '') + self.assert_lint('int first[256], second[256];', '') + self.assert_lint('int arrayName[kCompileTimeConstant];', '') + self.assert_lint('char buf[somenamespace::kBufSize];', '') + self.assert_lint('int arrayName[ALL_CAPS];', '') + self.assert_lint('AClass array1[foo::bar::ALL_CAPS];', '') + self.assert_lint('int a[kMaxStrLen + 1];', '') + self.assert_lint('int a[sizeof(foo)];', '') + self.assert_lint('int a[sizeof(*foo)];', '') + self.assert_lint('int a[sizeof foo];', '') + self.assert_lint('int a[sizeof(struct Foo)];', '') + self.assert_lint('int a[128 - sizeof(const bar)];', '') + self.assert_lint('int a[(sizeof(foo) * 4)];', '') + self.assert_lint('int a[(arraysize(fixed_size_array)/2) << 1];', 'Missing spaces around / [whitespace/operators] [3]') + self.assert_lint('delete a[some_var];', '') + self.assert_lint('return a[some_var];', '') + + # Brace usage + def test_braces(self): + # Braces shouldn't be followed by a ; unless they're defining a struct + # or initializing an array + self.assert_lint('int a[3] = { 1, 2, 3 };', '') + self.assert_lint( + '''const int foo[] = + {1, 2, 3 };''', + '') + # For single line, unmatched '}' with a ';' is ignored (not enough context) + self.assert_multi_line_lint( + '''int a[3] = { 1, + 2, + 3 };''', + '') + self.assert_multi_line_lint( + '''int a[2][3] = { { 1, 2 }, + { 3, 4 } };''', + '') + self.assert_multi_line_lint( + '''int a[2][3] = + { { 1, 2 }, + { 3, 4 } };''', + '') + + # CHECK/EXPECT_TRUE/EXPECT_FALSE replacements + def test_check_check(self): + self.assert_lint('CHECK(x == 42)', + 'Consider using CHECK_EQ instead of CHECK(a == b)' + ' [readability/check] [2]') + self.assert_lint('CHECK(x != 42)', + 'Consider using CHECK_NE instead of CHECK(a != b)' + ' [readability/check] [2]') + self.assert_lint('CHECK(x >= 42)', + 'Consider using CHECK_GE instead of CHECK(a >= b)' + ' [readability/check] [2]') + self.assert_lint('CHECK(x > 42)', + 'Consider using CHECK_GT instead of CHECK(a > b)' + ' [readability/check] [2]') + self.assert_lint('CHECK(x <= 42)', + 'Consider using CHECK_LE instead of CHECK(a <= b)' + ' [readability/check] [2]') + self.assert_lint('CHECK(x < 42)', + 'Consider using CHECK_LT instead of CHECK(a < b)' + ' [readability/check] [2]') + + self.assert_lint('DCHECK(x == 42)', + 'Consider using DCHECK_EQ instead of DCHECK(a == b)' + ' [readability/check] [2]') + self.assert_lint('DCHECK(x != 42)', + 'Consider using DCHECK_NE instead of DCHECK(a != b)' + ' [readability/check] [2]') + self.assert_lint('DCHECK(x >= 42)', + 'Consider using DCHECK_GE instead of DCHECK(a >= b)' + ' [readability/check] [2]') + self.assert_lint('DCHECK(x > 42)', + 'Consider using DCHECK_GT instead of DCHECK(a > b)' + ' [readability/check] [2]') + self.assert_lint('DCHECK(x <= 42)', + 'Consider using DCHECK_LE instead of DCHECK(a <= b)' + ' [readability/check] [2]') + self.assert_lint('DCHECK(x < 42)', + 'Consider using DCHECK_LT instead of DCHECK(a < b)' + ' [readability/check] [2]') + + self.assert_lint( + 'EXPECT_TRUE("42" == x)', + 'Consider using EXPECT_EQ instead of EXPECT_TRUE(a == b)' + ' [readability/check] [2]') + self.assert_lint( + 'EXPECT_TRUE("42" != x)', + 'Consider using EXPECT_NE instead of EXPECT_TRUE(a != b)' + ' [readability/check] [2]') + self.assert_lint( + 'EXPECT_TRUE(+42 >= x)', + 'Consider using EXPECT_GE instead of EXPECT_TRUE(a >= b)' + ' [readability/check] [2]') + self.assert_lint( + 'EXPECT_TRUE_M(-42 > x)', + 'Consider using EXPECT_GT_M instead of EXPECT_TRUE_M(a > b)' + ' [readability/check] [2]') + self.assert_lint( + 'EXPECT_TRUE_M(42U <= x)', + 'Consider using EXPECT_LE_M instead of EXPECT_TRUE_M(a <= b)' + ' [readability/check] [2]') + self.assert_lint( + 'EXPECT_TRUE_M(42L < x)', + 'Consider using EXPECT_LT_M instead of EXPECT_TRUE_M(a < b)' + ' [readability/check] [2]') + + self.assert_lint( + 'EXPECT_FALSE(x == 42)', + 'Consider using EXPECT_NE instead of EXPECT_FALSE(a == b)' + ' [readability/check] [2]') + self.assert_lint( + 'EXPECT_FALSE(x != 42)', + 'Consider using EXPECT_EQ instead of EXPECT_FALSE(a != b)' + ' [readability/check] [2]') + self.assert_lint( + 'EXPECT_FALSE(x >= 42)', + 'Consider using EXPECT_LT instead of EXPECT_FALSE(a >= b)' + ' [readability/check] [2]') + self.assert_lint( + 'ASSERT_FALSE(x > 42)', + 'Consider using ASSERT_LE instead of ASSERT_FALSE(a > b)' + ' [readability/check] [2]') + self.assert_lint( + 'ASSERT_FALSE(x <= 42)', + 'Consider using ASSERT_GT instead of ASSERT_FALSE(a <= b)' + ' [readability/check] [2]') + self.assert_lint( + 'ASSERT_FALSE_M(x < 42)', + 'Consider using ASSERT_GE_M instead of ASSERT_FALSE_M(a < b)' + ' [readability/check] [2]') + + self.assert_lint('CHECK(some_iterator == obj.end())', '') + self.assert_lint('EXPECT_TRUE(some_iterator == obj.end())', '') + self.assert_lint('EXPECT_FALSE(some_iterator == obj.end())', '') + + self.assert_lint('CHECK(CreateTestFile(dir, (1 << 20)));', '') + self.assert_lint('CHECK(CreateTestFile(dir, (1 >> 20)));', '') + + self.assert_lint('CHECK(x<42)', + ['Missing spaces around <' + ' [whitespace/operators] [3]', + 'Consider using CHECK_LT instead of CHECK(a < b)' + ' [readability/check] [2]']) + self.assert_lint('CHECK(x>42)', + 'Consider using CHECK_GT instead of CHECK(a > b)' + ' [readability/check] [2]') + + self.assert_lint( + ' EXPECT_TRUE(42 < x) // Random comment.', + 'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)' + ' [readability/check] [2]') + self.assert_lint( + 'EXPECT_TRUE( 42 < x )', + ['Extra space after ( in function call' + ' [whitespace/parens] [4]', + 'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)' + ' [readability/check] [2]']) + self.assert_lint( + 'CHECK("foo" == "foo")', + 'Consider using CHECK_EQ instead of CHECK(a == b)' + ' [readability/check] [2]') + + self.assert_lint('CHECK_EQ("foo", "foo")', '') + + def test_brace_at_begin_of_line(self): + self.assert_lint('{', + 'This { should be at the end of the previous line' + ' [whitespace/braces] [4]') + self.assert_multi_line_lint( + '#endif\n' + '{\n' + '}\n', + '') + self.assert_multi_line_lint( + 'if (condition) {', + '') + self.assert_multi_line_lint( + ' MACRO1(macroArg) {', + '') + self.assert_multi_line_lint( + 'ACCESSOR_GETTER(MessageEventPorts) {', + 'Place brace on its own line for function definitions. [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'int foo() {', + 'Place brace on its own line for function definitions. [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'int foo() const {', + 'Place brace on its own line for function definitions. [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'int foo() const\n' + '{\n' + '}\n', + '') + self.assert_multi_line_lint( + 'if (condition\n' + ' && condition2\n' + ' && condition3) {\n' + '}\n', + '') + + def test_mismatching_spaces_in_parens(self): + self.assert_lint('if (foo ) {', 'Extra space before ) in if' + ' [whitespace/parens] [5]') + self.assert_lint('switch ( foo) {', 'Extra space after ( in switch' + ' [whitespace/parens] [5]') + self.assert_lint('for (foo; ba; bar ) {', 'Extra space before ) in for' + ' [whitespace/parens] [5]') + self.assert_lint('for ((foo); (ba); (bar) ) {', 'Extra space before ) in for' + ' [whitespace/parens] [5]') + self.assert_lint('for (; foo; bar) {', '') + self.assert_lint('for (; (foo); (bar)) {', '') + self.assert_lint('for ( ; foo; bar) {', '') + self.assert_lint('for ( ; (foo); (bar)) {', '') + self.assert_lint('for ( ; foo; bar ) {', 'Extra space before ) in for' + ' [whitespace/parens] [5]') + self.assert_lint('for ( ; (foo); (bar) ) {', 'Extra space before ) in for' + ' [whitespace/parens] [5]') + self.assert_lint('for (foo; bar; ) {', '') + self.assert_lint('for ((foo); (bar); ) {', '') + self.assert_lint('foreach (foo, foos ) {', 'Extra space before ) in foreach' + ' [whitespace/parens] [5]') + self.assert_lint('foreach ( foo, foos) {', 'Extra space after ( in foreach' + ' [whitespace/parens] [5]') + self.assert_lint('while ( foo) {', 'Extra space after ( in while' + ' [whitespace/parens] [5]') + + def test_spacing_for_fncall(self): + self.assert_lint('if (foo) {', '') + self.assert_lint('for (foo;bar;baz) {', '') + self.assert_lint('foreach (foo, foos) {', '') + self.assert_lint('while (foo) {', '') + self.assert_lint('switch (foo) {', '') + self.assert_lint('new (RenderArena()) RenderInline(document())', '') + self.assert_lint('foo( bar)', 'Extra space after ( in function call' + ' [whitespace/parens] [4]') + self.assert_lint('foobar( \\', '') + self.assert_lint('foobar( \\', '') + self.assert_lint('( a + b)', 'Extra space after (' + ' [whitespace/parens] [2]') + self.assert_lint('((a+b))', '') + self.assert_lint('foo (foo)', 'Extra space before ( in function call' + ' [whitespace/parens] [4]') + self.assert_lint('typedef foo (*foo)(foo)', '') + self.assert_lint('typedef foo (*foo12bar_)(foo)', '') + self.assert_lint('typedef foo (Foo::*bar)(foo)', '') + self.assert_lint('foo (Foo::*bar)(', + 'Extra space before ( in function call' + ' [whitespace/parens] [4]') + self.assert_lint('typedef foo (Foo::*bar)(', '') + self.assert_lint('(foo)(bar)', '') + self.assert_lint('Foo (*foo)(bar)', '') + self.assert_lint('Foo (*foo)(Bar bar,', '') + self.assert_lint('char (*p)[sizeof(foo)] = &foo', '') + self.assert_lint('char (&ref)[sizeof(foo)] = &foo', '') + self.assert_lint('const char32 (*table[])[6];', '') + + def test_spacing_before_braces(self): + self.assert_lint('if (foo){', 'Missing space before {' + ' [whitespace/braces] [5]') + self.assert_lint('for{', 'Missing space before {' + ' [whitespace/braces] [5]') + self.assert_lint('for {', '') + self.assert_lint('EXPECT_DEBUG_DEATH({', '') + + def test_spacing_around_else(self): + self.assert_lint('}else {', 'Missing space before else' + ' [whitespace/braces] [5]') + self.assert_lint('} else{', 'Missing space before {' + ' [whitespace/braces] [5]') + self.assert_lint('} else {', '') + self.assert_lint('} else if', '') + + def test_spacing_for_binary_ops(self): + self.assert_lint('if (foo<=bar) {', 'Missing spaces around <=' + ' [whitespace/operators] [3]') + self.assert_lint('if (foo<bar) {', 'Missing spaces around <' + ' [whitespace/operators] [3]') + self.assert_lint('if (foo<bar->baz) {', 'Missing spaces around <' + ' [whitespace/operators] [3]') + self.assert_lint('if (foo<bar->bar) {', 'Missing spaces around <' + ' [whitespace/operators] [3]') + self.assert_lint('typedef hash_map<Foo, Bar', 'Missing spaces around <' + ' [whitespace/operators] [3]') + self.assert_lint('typedef hash_map<FoooooType, BaaaaarType,', '') + self.assert_lint('a<Foo> t+=b;', 'Missing spaces around +=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo> t-=b;', 'Missing spaces around -=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t*=b;', 'Missing spaces around *=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t/=b;', 'Missing spaces around /=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t|=b;', 'Missing spaces around |=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t&=b;', 'Missing spaces around &=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t<<=b;', 'Missing spaces around <<=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t>>=b;', 'Missing spaces around >>=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t>>=&b|c;', 'Missing spaces around >>=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t<<=*b/c;', 'Missing spaces around <<=' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo> t -= b;', '') + self.assert_lint('a<Foo> t += b;', '') + self.assert_lint('a<Foo*> t *= b;', '') + self.assert_lint('a<Foo*> t /= b;', '') + self.assert_lint('a<Foo*> t |= b;', '') + self.assert_lint('a<Foo*> t &= b;', '') + self.assert_lint('a<Foo*> t <<= b;', '') + self.assert_lint('a<Foo*> t >>= b;', '') + self.assert_lint('a<Foo*> t >>= &b|c;', 'Missing spaces around |' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t <<= *b/c;', 'Missing spaces around /' + ' [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t <<= b/c; //Test', [ + 'Should have a space between // and comment ' + '[whitespace/comments] [4]', 'Missing' + ' spaces around / [whitespace/operators] [3]']) + self.assert_lint('a<Foo*> t <<= b||c; //Test', ['One space before end' + ' of line comments [whitespace/comments] [5]', + 'Should have a space between // and comment ' + '[whitespace/comments] [4]', + 'Missing spaces around || [whitespace/operators] [3]']) + self.assert_lint('a<Foo*> t <<= b&&c; // Test', 'Missing spaces around' + ' && [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t <<= b&&&c; // Test', 'Missing spaces around' + ' && [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t <<= b&&*c; // Test', 'Missing spaces around' + ' && [whitespace/operators] [3]') + self.assert_lint('a<Foo*> t <<= b && *c; // Test', '') + self.assert_lint('a<Foo*> t <<= b && &c; // Test', '') + self.assert_lint('a<Foo*> t <<= b || &c; /*Test', 'Complex multi-line ' + '/*...*/-style comment found. Lint may give bogus ' + 'warnings. Consider replacing these with //-style' + ' comments, with #if 0...#endif, or with more clearly' + ' structured multi-line comments. [readability/multiline_comment] [5]') + self.assert_lint('a<Foo&> t <<= &b | &c;', '') + self.assert_lint('a<Foo*> t <<= &b & &c; // Test', '') + self.assert_lint('a<Foo*> t <<= *b / &c; // Test', '') + self.assert_lint('if (a=b == 1)', 'Missing spaces around = [whitespace/operators] [4]') + self.assert_lint('a = 1<<20', 'Missing spaces around << [whitespace/operators] [3]') + self.assert_lint('if (a = b == 1)', '') + self.assert_lint('a = 1 << 20', '') + self.assert_multi_line_lint('#include <sys/io.h>\n', '') + self.assert_multi_line_lint('#import <foo/bar.h>\n', '') + + def test_operator_methods(self): + self.assert_lint('String operator+(const String&, const String&);', '') + self.assert_lint('bool operator==(const String&, const String&);', '') + self.assert_lint('String& operator-=(const String&, const String&);', '') + self.assert_lint('String& operator+=(const String&, const String&);', '') + self.assert_lint('String& operator*=(const String&, const String&);', '') + self.assert_lint('String& operator%=(const String&, const String&);', '') + self.assert_lint('String& operator&=(const String&, const String&);', '') + self.assert_lint('String& operator<<=(const String&, const String&);', '') + self.assert_lint('String& operator>>=(const String&, const String&);', '') + self.assert_lint('String& operator|=(const String&, const String&);', '') + self.assert_lint('String& operator^=(const String&, const String&);', '') + + def test_spacing_before_last_semicolon(self): + self.assert_lint('call_function() ;', + 'Extra space before last semicolon. If this should be an ' + 'empty statement, use { } instead.' + ' [whitespace/semicolon] [5]') + self.assert_lint('while (true) ;', + 'Extra space before last semicolon. If this should be an ' + 'empty statement, use { } instead.' + ' [whitespace/semicolon] [5]') + self.assert_lint('default:;', + 'Semicolon defining empty statement. Use { } instead.' + ' [whitespace/semicolon] [5]') + self.assert_lint(' ;', + 'Line contains only semicolon. If this should be an empty ' + 'statement, use { } instead.' + ' [whitespace/semicolon] [5]') + self.assert_lint('for (int i = 0; ;', '') + + # Static or global STL strings. + def test_static_or_global_stlstrings(self): + self.assert_lint('string foo;', + 'For a static/global string constant, use a C style ' + 'string instead: "char foo[]".' + ' [runtime/string] [4]') + self.assert_lint('string kFoo = "hello"; // English', + 'For a static/global string constant, use a C style ' + 'string instead: "char kFoo[]".' + ' [runtime/string] [4]') + self.assert_lint('static string foo;', + 'For a static/global string constant, use a C style ' + 'string instead: "static char foo[]".' + ' [runtime/string] [4]') + self.assert_lint('static const string foo;', + 'For a static/global string constant, use a C style ' + 'string instead: "static const char foo[]".' + ' [runtime/string] [4]') + self.assert_lint('string Foo::bar;', + 'For a static/global string constant, use a C style ' + 'string instead: "char Foo::bar[]".' + ' [runtime/string] [4]') + # Rare case. + self.assert_lint('string foo("foobar");', + 'For a static/global string constant, use a C style ' + 'string instead: "char foo[]".' + ' [runtime/string] [4]') + # Should not catch local or member variables. + self.assert_lint(' string foo', '') + # Should not catch functions. + self.assert_lint('string EmptyString() { return ""; }', '') + self.assert_lint('string EmptyString () { return ""; }', '') + self.assert_lint('string VeryLongNameFunctionSometimesEndsWith(\n' + ' VeryLongNameType veryLongNameVariable) {}', '') + self.assert_lint('template<>\n' + 'string FunctionTemplateSpecialization<SomeType>(\n' + ' int x) { return ""; }', '') + self.assert_lint('template<>\n' + 'string FunctionTemplateSpecialization<vector<A::B>* >(\n' + ' int x) { return ""; }', '') + + # should not catch methods of template classes. + self.assert_lint('string Class<Type>::Method() const\n' + '{\n' + ' return "";\n' + '}\n', '') + self.assert_lint('string Class<Type>::Method(\n' + ' int arg) const\n' + '{\n' + ' return "";\n' + '}\n', '') + + def test_no_spaces_in_function_calls(self): + self.assert_lint('TellStory(1, 3);', + '') + self.assert_lint('TellStory(1, 3 );', + 'Extra space before )' + ' [whitespace/parens] [2]') + self.assert_lint('TellStory(1 /* wolf */, 3 /* pigs */);', + '') + self.assert_multi_line_lint('#endif\n );', + '') + + def test_two_spaces_between_code_and_comments(self): + self.assert_lint('} // namespace foo', + '') + self.assert_lint('}// namespace foo', + 'One space before end of line comments' + ' [whitespace/comments] [5]') + self.assert_lint('printf("foo"); // Outside quotes.', + '') + self.assert_lint('int i = 0; // Having one space is fine.','') + self.assert_lint('int i = 0; // Having two spaces is bad.', + 'One space before end of line comments' + ' [whitespace/comments] [5]') + self.assert_lint('int i = 0; // Having three spaces is bad.', + 'One space before end of line comments' + ' [whitespace/comments] [5]') + self.assert_lint('// Top level comment', '') + self.assert_lint(' // Line starts with four spaces.', '') + self.assert_lint('foo();\n' + '{ // A scope is opening.', '') + self.assert_lint(' foo();\n' + ' { // An indented scope is opening.', '') + self.assert_lint('if (foo) { // not a pure scope', + '') + self.assert_lint('printf("// In quotes.")', '') + self.assert_lint('printf("\\"%s // In quotes.")', '') + self.assert_lint('printf("%s", "// In quotes.")', '') + + def test_space_after_comment_marker(self): + self.assert_lint('//', '') + self.assert_lint('//x', 'Should have a space between // and comment' + ' [whitespace/comments] [4]') + self.assert_lint('// x', '') + self.assert_lint('//----', '') + self.assert_lint('//====', '') + self.assert_lint('//////', '') + self.assert_lint('////// x', '') + self.assert_lint('/// x', '') + self.assert_lint('////x', 'Should have a space between // and comment' + ' [whitespace/comments] [4]') + + def test_newline_at_eof(self): + def do_test(self, data, is_missing_eof): + error_collector = ErrorCollector(self.assert_) + self.process_file_data('foo.cpp', 'cpp', data.split('\n'), + error_collector) + # The warning appears only once. + self.assertEquals( + int(is_missing_eof), + error_collector.results().count( + 'Could not find a newline character at the end of the file.' + ' [whitespace/ending_newline] [5]')) + + do_test(self, '// Newline\n// at EOF\n', False) + do_test(self, '// No newline\n// at EOF', True) + + def test_invalid_utf8(self): + def do_test(self, raw_bytes, has_invalid_utf8): + error_collector = ErrorCollector(self.assert_) + self.process_file_data('foo.cpp', 'cpp', + unicode(raw_bytes, 'utf8', 'replace').split('\n'), + error_collector) + # The warning appears only once. + self.assertEquals( + int(has_invalid_utf8), + error_collector.results().count( + 'Line contains invalid UTF-8' + ' (or Unicode replacement character).' + ' [readability/utf8] [5]')) + + do_test(self, 'Hello world\n', False) + do_test(self, '\xe9\x8e\xbd\n', False) + do_test(self, '\xe9x\x8e\xbd\n', True) + # This is the encoding of the replacement character itself (which + # you can see by evaluating codecs.getencoder('utf8')(u'\ufffd')). + do_test(self, '\xef\xbf\xbd\n', True) + + def test_is_blank_line(self): + self.assert_(cpp_style.is_blank_line('')) + self.assert_(cpp_style.is_blank_line(' ')) + self.assert_(cpp_style.is_blank_line(' \t\r\n')) + self.assert_(not cpp_style.is_blank_line('int a;')) + self.assert_(not cpp_style.is_blank_line('{')) + + def test_blank_lines_check(self): + self.assert_blank_lines_check(['{\n', '\n', '\n', '}\n'], 1, 1) + self.assert_blank_lines_check([' if (foo) {\n', '\n', ' }\n'], 1, 1) + self.assert_blank_lines_check( + ['\n', '// {\n', '\n', '\n', '// Comment\n', '{\n', '}\n'], 0, 0) + self.assert_blank_lines_check(['\n', 'run("{");\n', '\n'], 0, 0) + self.assert_blank_lines_check(['\n', ' if (foo) { return 0; }\n', '\n'], 0, 0) + + def test_allow_blank_line_before_closing_namespace(self): + error_collector = ErrorCollector(self.assert_) + self.process_file_data('foo.cpp', 'cpp', + ['namespace {', '', '} // namespace'], + error_collector) + self.assertEquals(0, error_collector.results().count( + 'Blank line at the end of a code block. Is this needed?' + ' [whitespace/blank_line] [3]')) + + def test_allow_blank_line_before_if_else_chain(self): + error_collector = ErrorCollector(self.assert_) + self.process_file_data('foo.cpp', 'cpp', + ['if (hoge) {', + '', # No warning + '} else if (piyo) {', + '', # No warning + '} else if (piyopiyo) {', + ' hoge = true;', # No warning + '} else {', + '', # Warning on this line + '}'], + error_collector) + self.assertEquals(1, error_collector.results().count( + 'Blank line at the end of a code block. Is this needed?' + ' [whitespace/blank_line] [3]')) + + def test_else_on_same_line_as_closing_braces(self): + error_collector = ErrorCollector(self.assert_) + self.process_file_data('foo.cpp', 'cpp', + ['if (hoge) {', + '', + '}', + ' else {' # Warning on this line + '', + '}'], + error_collector) + self.assertEquals(1, error_collector.results().count( + 'An else should appear on the same line as the preceding }' + ' [whitespace/newline] [4]')) + + def test_else_clause_not_on_same_line_as_else(self): + self.assert_lint(' else DoSomethingElse();', + 'Else clause should never be on same line as else ' + '(use 2 lines) [whitespace/newline] [4]') + self.assert_lint(' else ifDoSomethingElse();', + 'Else clause should never be on same line as else ' + '(use 2 lines) [whitespace/newline] [4]') + self.assert_lint(' else if (blah) {', '') + self.assert_lint(' variable_ends_in_else = true;', '') + + def test_comma(self): + self.assert_lint('a = f(1,2);', + 'Missing space after , [whitespace/comma] [3]') + self.assert_lint('int tmp=a,a=b,b=tmp;', + ['Missing spaces around = [whitespace/operators] [4]', + 'Missing space after , [whitespace/comma] [3]']) + self.assert_lint('f(a, /* name */ b);', '') + self.assert_lint('f(a, /* name */b);', '') + + def test_declaration(self): + self.assert_lint('int a;', '') + self.assert_lint('int a;', 'Extra space between int and a [whitespace/declaration] [3]') + self.assert_lint('int* a;', 'Extra space between int* and a [whitespace/declaration] [3]') + self.assert_lint('else if { }', '') + self.assert_lint('else if { }', 'Extra space between else and if [whitespace/declaration] [3]') + + def test_pointer_reference_marker_location(self): + self.assert_lint('int* b;', '', 'foo.cpp') + self.assert_lint('int *b;', + 'Declaration has space between type name and * in int *b [whitespace/declaration] [3]', + 'foo.cpp') + self.assert_lint('return *b;', '', 'foo.cpp') + self.assert_lint('delete *b;', '', 'foo.cpp') + self.assert_lint('int *b;', '', 'foo.c') + self.assert_lint('int* b;', + 'Declaration has space between * and variable name in int* b [whitespace/declaration] [3]', + 'foo.c') + self.assert_lint('int& b;', '', 'foo.cpp') + self.assert_lint('int &b;', + 'Declaration has space between type name and & in int &b [whitespace/declaration] [3]', + 'foo.cpp') + self.assert_lint('return &b;', '', 'foo.cpp') + + def test_indent(self): + self.assert_lint('static int noindent;', '') + self.assert_lint(' int fourSpaceIndent;', '') + self.assert_lint(' int oneSpaceIndent;', + 'Weird number of spaces at line-start. ' + 'Are you using a 4-space indent? [whitespace/indent] [3]') + self.assert_lint(' int threeSpaceIndent;', + 'Weird number of spaces at line-start. ' + 'Are you using a 4-space indent? [whitespace/indent] [3]') + self.assert_lint(' char* oneSpaceIndent = "public:";', + 'Weird number of spaces at line-start. ' + 'Are you using a 4-space indent? [whitespace/indent] [3]') + self.assert_lint(' public:', '') + self.assert_lint(' public:', '') + self.assert_lint(' public:', '') + + def test_label(self): + self.assert_lint('public:', + 'Labels should always be indented at least one space. ' + 'If this is a member-initializer list in a constructor, ' + 'the colon should be on the line after the definition ' + 'header. [whitespace/labels] [4]') + self.assert_lint(' public:', '') + self.assert_lint(' public:', '') + self.assert_lint(' public:', '') + self.assert_lint(' public:', '') + self.assert_lint(' public:', '') + + def test_not_alabel(self): + self.assert_lint('MyVeryLongNamespace::MyVeryLongClassName::', '') + + def test_tab(self): + self.assert_lint('\tint a;', + 'Tab found; better to use spaces [whitespace/tab] [1]') + self.assert_lint('int a = 5;\t// set a to 5', + 'Tab found; better to use spaces [whitespace/tab] [1]') + + def test_unnamed_namespaces_in_headers(self): + self.assert_language_rules_check( + 'foo.h', 'namespace {', + 'Do not use unnamed namespaces in header files. See' + ' http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' + ' for more information. [build/namespaces] [4]') + # namespace registration macros are OK. + self.assert_language_rules_check('foo.h', 'namespace { \\', '') + # named namespaces are OK. + self.assert_language_rules_check('foo.h', 'namespace foo {', '') + self.assert_language_rules_check('foo.h', 'namespace foonamespace {', '') + self.assert_language_rules_check('foo.cpp', 'namespace {', '') + self.assert_language_rules_check('foo.cpp', 'namespace foo {', '') + + def test_build_class(self): + # Test that the linter can parse to the end of class definitions, + # and that it will report when it can't. + # Use multi-line linter because it performs the ClassState check. + self.assert_multi_line_lint( + 'class Foo {', + 'Failed to find complete declaration of class Foo' + ' [build/class] [5]') + # Don't warn on forward declarations of various types. + self.assert_multi_line_lint( + 'class Foo;', + '') + self.assert_multi_line_lint( + '''struct Foo* + foo = NewFoo();''', + '') + # Here is an example where the linter gets confused, even though + # the code doesn't violate the style guide. + self.assert_multi_line_lint( + '''class Foo + #ifdef DERIVE_FROM_GOO + : public Goo { + #else + : public Hoo { + #endif + };''', + 'Failed to find complete declaration of class Foo' + ' [build/class] [5]') + + def test_build_end_comment(self): + # The crosstool compiler we currently use will fail to compile the + # code in this test, so we might consider removing the lint check. + self.assert_lint('#endif Not a comment', + 'Uncommented text after #endif is non-standard.' + ' Use a comment.' + ' [build/endif_comment] [5]') + + def test_build_forward_decl(self): + # The crosstool compiler we currently use will fail to compile the + # code in this test, so we might consider removing the lint check. + self.assert_lint('class Foo::Goo;', + 'Inner-style forward declarations are invalid.' + ' Remove this line.' + ' [build/forward_decl] [5]') + + def test_build_header_guard(self): + file_path = 'mydir/Foo.h' + + # We can't rely on our internal stuff to get a sane path on the open source + # side of things, so just parse out the suggested header guard. This + # doesn't allow us to test the suggested header guard, but it does let us + # test all the other header tests. + error_collector = ErrorCollector(self.assert_) + self.process_file_data(file_path, 'h', [], error_collector) + expected_guard = '' + matcher = re.compile( + 'No \#ifndef header guard found\, suggested CPP variable is\: ([A-Za-z_0-9]+) ') + for error in error_collector.result_list(): + matches = matcher.match(error) + if matches: + expected_guard = matches.group(1) + break + + # Make sure we extracted something for our header guard. + self.assertNotEqual(expected_guard, '') + + # Wrong guard + error_collector = ErrorCollector(self.assert_) + self.process_file_data(file_path, 'h', + ['#ifndef FOO_H', '#define FOO_H'], error_collector) + self.assertEquals( + 1, + error_collector.result_list().count( + '#ifndef header guard has wrong style, please use: %s' + ' [build/header_guard] [5]' % expected_guard), + error_collector.result_list()) + + # No define + error_collector = ErrorCollector(self.assert_) + self.process_file_data(file_path, 'h', + ['#ifndef %s' % expected_guard], error_collector) + self.assertEquals( + 1, + error_collector.result_list().count( + 'No #ifndef header guard found, suggested CPP variable is: %s' + ' [build/header_guard] [5]' % expected_guard), + error_collector.result_list()) + + # Mismatched define + error_collector = ErrorCollector(self.assert_) + self.process_file_data(file_path, 'h', + ['#ifndef %s' % expected_guard, + '#define FOO_H'], + error_collector) + self.assertEquals( + 1, + error_collector.result_list().count( + 'No #ifndef header guard found, suggested CPP variable is: %s' + ' [build/header_guard] [5]' % expected_guard), + error_collector.result_list()) + + # No header guard errors + error_collector = ErrorCollector(self.assert_) + self.process_file_data(file_path, 'h', + ['#ifndef %s' % expected_guard, + '#define %s' % expected_guard, + '#endif // %s' % expected_guard], + error_collector) + for line in error_collector.result_list(): + if line.find('build/header_guard') != -1: + self.fail('Unexpected error: %s' % line) + + # Completely incorrect header guard + error_collector = ErrorCollector(self.assert_) + self.process_file_data(file_path, 'h', + ['#ifndef FOO', + '#define FOO', + '#endif // FOO'], + error_collector) + self.assertEquals( + 1, + error_collector.result_list().count( + '#ifndef header guard has wrong style, please use: %s' + ' [build/header_guard] [5]' % expected_guard), + error_collector.result_list()) + + # Special case for flymake + error_collector = ErrorCollector(self.assert_) + self.process_file_data('mydir/Foo_flymake.h', 'h', + ['#ifndef %s' % expected_guard, + '#define %s' % expected_guard, + '#endif // %s' % expected_guard], + error_collector) + for line in error_collector.result_list(): + if line.find('build/header_guard') != -1: + self.fail('Unexpected error: %s' % line) + + error_collector = ErrorCollector(self.assert_) + self.process_file_data('mydir/Foo_flymake.h', 'h', [], error_collector) + self.assertEquals( + 1, + error_collector.result_list().count( + 'No #ifndef header guard found, suggested CPP variable is: %s' + ' [build/header_guard] [5]' % expected_guard), + error_collector.result_list()) + + # Allow the WTF_ prefix for files in that directory. + header_guard_filter = FilterConfiguration(('-', '+build/header_guard')) + error_collector = ErrorCollector(self.assert_, header_guard_filter) + self.process_file_data('JavaScriptCore/wtf/TestName.h', 'h', + ['#ifndef WTF_TestName_h', '#define WTF_TestName_h'], + error_collector) + self.assertEquals(0, len(error_collector.result_list()), + error_collector.result_list()) + + # Also allow the non WTF_ prefix for files in that directory. + error_collector = ErrorCollector(self.assert_, header_guard_filter) + self.process_file_data('JavaScriptCore/wtf/TestName.h', 'h', + ['#ifndef TestName_h', '#define TestName_h'], + error_collector) + self.assertEquals(0, len(error_collector.result_list()), + error_collector.result_list()) + + # Verify that we suggest the WTF prefix version. + error_collector = ErrorCollector(self.assert_, header_guard_filter) + self.process_file_data('JavaScriptCore/wtf/TestName.h', 'h', + ['#ifndef BAD_TestName_h', '#define BAD_TestName_h'], + error_collector) + self.assertEquals( + 1, + error_collector.result_list().count( + '#ifndef header guard has wrong style, please use: WTF_TestName_h' + ' [build/header_guard] [5]'), + error_collector.result_list()) + + def test_build_printf_format(self): + self.assert_lint( + r'printf("\%%d", value);', + '%, [, (, and { are undefined character escapes. Unescape them.' + ' [build/printf_format] [3]') + + self.assert_lint( + r'snprintf(buffer, sizeof(buffer), "\[%d", value);', + '%, [, (, and { are undefined character escapes. Unescape them.' + ' [build/printf_format] [3]') + + self.assert_lint( + r'fprintf(file, "\(%d", value);', + '%, [, (, and { are undefined character escapes. Unescape them.' + ' [build/printf_format] [3]') + + self.assert_lint( + r'vsnprintf(buffer, sizeof(buffer), "\\\{%d", ap);', + '%, [, (, and { are undefined character escapes. Unescape them.' + ' [build/printf_format] [3]') + + # Don't warn if double-slash precedes the symbol + self.assert_lint(r'printf("\\%%%d", value);', + '') + + def test_runtime_printf_format(self): + self.assert_lint( + r'fprintf(file, "%q", value);', + '%q in format strings is deprecated. Use %ll instead.' + ' [runtime/printf_format] [3]') + + self.assert_lint( + r'aprintf(file, "The number is %12q", value);', + '%q in format strings is deprecated. Use %ll instead.' + ' [runtime/printf_format] [3]') + + self.assert_lint( + r'printf(file, "The number is" "%-12q", value);', + '%q in format strings is deprecated. Use %ll instead.' + ' [runtime/printf_format] [3]') + + self.assert_lint( + r'printf(file, "The number is" "%+12q", value);', + '%q in format strings is deprecated. Use %ll instead.' + ' [runtime/printf_format] [3]') + + self.assert_lint( + r'printf(file, "The number is" "% 12q", value);', + '%q in format strings is deprecated. Use %ll instead.' + ' [runtime/printf_format] [3]') + + self.assert_lint( + r'snprintf(file, "Never mix %d and %1$d parmaeters!", value);', + '%N$ formats are unconventional. Try rewriting to avoid them.' + ' [runtime/printf_format] [2]') + + def assert_lintLogCodeOnError(self, code, expected_message): + # Special assert_lint which logs the input code on error. + result = self.perform_single_line_lint(code, 'foo.cpp') + if result != expected_message: + self.fail('For code: "%s"\nGot: "%s"\nExpected: "%s"' + % (code, result, expected_message)) + + def test_build_storage_class(self): + qualifiers = [None, 'const', 'volatile'] + signs = [None, 'signed', 'unsigned'] + types = ['void', 'char', 'int', 'float', 'double', + 'schar', 'int8', 'uint8', 'int16', 'uint16', + 'int32', 'uint32', 'int64', 'uint64'] + storage_classes = ['auto', 'extern', 'register', 'static', 'typedef'] + + build_storage_class_error_message = ( + 'Storage class (static, extern, typedef, etc) should be first.' + ' [build/storage_class] [5]') + + # Some explicit cases. Legal in C++, deprecated in C99. + self.assert_lint('const int static foo = 5;', + build_storage_class_error_message) + + self.assert_lint('char static foo;', + build_storage_class_error_message) + + self.assert_lint('double const static foo = 2.0;', + build_storage_class_error_message) + + self.assert_lint('uint64 typedef unsignedLongLong;', + build_storage_class_error_message) + + self.assert_lint('int register foo = 0;', + build_storage_class_error_message) + + # Since there are a very large number of possibilities, randomly + # construct declarations. + # Make sure that the declaration is logged if there's an error. + # Seed generator with an integer for absolute reproducibility. + random.seed(25) + for unused_i in range(10): + # Build up random list of non-storage-class declaration specs. + other_decl_specs = [random.choice(qualifiers), random.choice(signs), + random.choice(types)] + # remove None + other_decl_specs = filter(lambda x: x is not None, other_decl_specs) + + # shuffle + random.shuffle(other_decl_specs) + + # insert storage class after the first + storage_class = random.choice(storage_classes) + insertion_point = random.randint(1, len(other_decl_specs)) + decl_specs = (other_decl_specs[0:insertion_point] + + [storage_class] + + other_decl_specs[insertion_point:]) + + self.assert_lintLogCodeOnError( + ' '.join(decl_specs) + ';', + build_storage_class_error_message) + + # but no error if storage class is first + self.assert_lintLogCodeOnError( + storage_class + ' ' + ' '.join(other_decl_specs), + '') + + def test_legal_copyright(self): + legal_copyright_message = ( + 'No copyright message found. ' + 'You should have a line: "Copyright [year] <Copyright Owner>"' + ' [legal/copyright] [5]') + + copyright_line = '// Copyright 2008 Google Inc. All Rights Reserved.' + + file_path = 'mydir/googleclient/foo.cpp' + + # There should be a copyright message in the first 10 lines + error_collector = ErrorCollector(self.assert_) + self.process_file_data(file_path, 'cpp', [], error_collector) + self.assertEquals( + 1, + error_collector.result_list().count(legal_copyright_message)) + + error_collector = ErrorCollector(self.assert_) + self.process_file_data( + file_path, 'cpp', + ['' for unused_i in range(10)] + [copyright_line], + error_collector) + self.assertEquals( + 1, + error_collector.result_list().count(legal_copyright_message)) + + # Test that warning isn't issued if Copyright line appears early enough. + error_collector = ErrorCollector(self.assert_) + self.process_file_data(file_path, 'cpp', [copyright_line], error_collector) + for message in error_collector.result_list(): + if message.find('legal/copyright') != -1: + self.fail('Unexpected error: %s' % message) + + error_collector = ErrorCollector(self.assert_) + self.process_file_data( + file_path, 'cpp', + ['' for unused_i in range(9)] + [copyright_line], + error_collector) + for message in error_collector.result_list(): + if message.find('legal/copyright') != -1: + self.fail('Unexpected error: %s' % message) + + def test_invalid_increment(self): + self.assert_lint('*count++;', + 'Changing pointer instead of value (or unused value of ' + 'operator*). [runtime/invalid_increment] [5]') + + +class CleansedLinesTest(unittest.TestCase): + def test_init(self): + lines = ['Line 1', + 'Line 2', + 'Line 3 // Comment test', + 'Line 4 "foo"'] + + clean_lines = cpp_style.CleansedLines(lines) + self.assertEquals(lines, clean_lines.raw_lines) + self.assertEquals(4, clean_lines.num_lines()) + + self.assertEquals(['Line 1', + 'Line 2', + 'Line 3 ', + 'Line 4 "foo"'], + clean_lines.lines) + + self.assertEquals(['Line 1', + 'Line 2', + 'Line 3 ', + 'Line 4 ""'], + clean_lines.elided) + + def test_init_empty(self): + clean_lines = cpp_style.CleansedLines([]) + self.assertEquals([], clean_lines.raw_lines) + self.assertEquals(0, clean_lines.num_lines()) + + def test_collapse_strings(self): + collapse = cpp_style.CleansedLines.collapse_strings + self.assertEquals('""', collapse('""')) # "" (empty) + self.assertEquals('"""', collapse('"""')) # """ (bad) + self.assertEquals('""', collapse('"xyz"')) # "xyz" (string) + self.assertEquals('""', collapse('"\\\""')) # "\"" (string) + self.assertEquals('""', collapse('"\'"')) # "'" (string) + self.assertEquals('"\"', collapse('"\"')) # "\" (bad) + self.assertEquals('""', collapse('"\\\\"')) # "\\" (string) + self.assertEquals('"', collapse('"\\\\\\"')) # "\\\" (bad) + self.assertEquals('""', collapse('"\\\\\\\\"')) # "\\\\" (string) + + self.assertEquals('\'\'', collapse('\'\'')) # '' (empty) + self.assertEquals('\'\'', collapse('\'a\'')) # 'a' (char) + self.assertEquals('\'\'', collapse('\'\\\'\'')) # '\'' (char) + self.assertEquals('\'', collapse('\'\\\'')) # '\' (bad) + self.assertEquals('', collapse('\\012')) # '\012' (char) + self.assertEquals('', collapse('\\xfF0')) # '\xfF0' (char) + self.assertEquals('', collapse('\\n')) # '\n' (char) + self.assertEquals('\#', collapse('\\#')) # '\#' (bad) + + self.assertEquals('StringReplace(body, "", "");', + collapse('StringReplace(body, "\\\\", "\\\\\\\\");')) + self.assertEquals('\'\' ""', + collapse('\'"\' "foo"')) + + +class OrderOfIncludesTest(CppStyleTestBase): + def setUp(self): + self.include_state = cpp_style._IncludeState() + + # Cheat os.path.abspath called in FileInfo class. + self.os_path_abspath_orig = os.path.abspath + os.path.abspath = lambda value: value + + def tearDown(self): + os.path.abspath = self.os_path_abspath_orig + + def test_try_drop_common_suffixes(self): + self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h')) + self.assertEqual('foo/bar/foo', + cpp_style._drop_common_suffixes('foo/bar/foo_inl.h')) + self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp')) + self.assertEqual('foo/foo_unusualinternal', + cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h')) + self.assertEqual('', + cpp_style._drop_common_suffixes('_test.cpp')) + self.assertEqual('test', + cpp_style._drop_common_suffixes('test.cpp')) + + +class OrderOfIncludesTest(CppStyleTestBase): + def setUp(self): + self.include_state = cpp_style._IncludeState() + + # Cheat os.path.abspath called in FileInfo class. + self.os_path_abspath_orig = os.path.abspath + os.path.abspath = lambda value: value + + def tearDown(self): + os.path.abspath = self.os_path_abspath_orig + + def test_check_next_include_order__no_config(self): + self.assertEqual('Header file should not contain WebCore config.h.', + self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, True)) + + def test_check_next_include_order__no_self(self): + self.assertEqual('Header file should not contain itself.', + self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, True)) + # Test actual code to make sure that header types are correctly assigned. + self.assert_language_rules_check('Foo.h', + '#include "Foo.h"\n', + 'Header file should not contain itself. Should be: alphabetically sorted.' + ' [build/include_order] [4]') + self.assert_language_rules_check('FooBar.h', + '#include "Foo.h"\n', + '') + + def test_check_next_include_order__likely_then_config(self): + self.assertEqual('Found header this file implements before WebCore config.h.', + self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False)) + self.assertEqual('Found WebCore config.h after a header this file implements.', + self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False)) + + def test_check_next_include_order__other_then_config(self): + self.assertEqual('Found other header before WebCore config.h.', + self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False)) + self.assertEqual('Found WebCore config.h after other header.', + self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False)) + + def test_check_next_include_order__config_then_other_then_likely(self): + self.assertEqual('', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False)) + self.assertEqual('Found other header before a header this file implements.', + self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False)) + self.assertEqual('Found header this file implements after other header.', + self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False)) + + def test_check_alphabetical_include_order(self): + self.assert_language_rules_check('foo.h', + '#include "a.h"\n' + '#include "c.h"\n' + '#include "b.h"\n', + 'Alphabetical sorting problem. [build/include_order] [4]') + + self.assert_language_rules_check('foo.h', + '#include "a.h"\n' + '#include "b.h"\n' + '#include "c.h"\n', + '') + + self.assert_language_rules_check('foo.h', + '#include <assert.h>\n' + '#include "bar.h"\n', + 'Alphabetical sorting problem. [build/include_order] [4]') + + self.assert_language_rules_check('foo.h', + '#include "bar.h"\n' + '#include <assert.h>\n', + '') + + def test_check_line_break_after_own_header(self): + self.assert_language_rules_check('foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '#include "bar.h"\n', + 'You should add a blank line after implementation file\'s own header. [build/include_order] [4]') + + self.assert_language_rules_check('foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '\n' + '#include "bar.h"\n', + '') + + def test_check_preprocessor_in_include_section(self): + self.assert_language_rules_check('foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '\n' + '#ifdef BAZ\n' + '#include "baz.h"\n' + '#else\n' + '#include "foobar.h"\n' + '#endif"\n' + '#include "bar.h"\n', # No flag because previous is in preprocessor section + '') + + self.assert_language_rules_check('foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '\n' + '#ifdef BAZ\n' + '#include "baz.h"\n' + '#endif"\n' + '#include "bar.h"\n' + '#include "a.h"\n', # Should still flag this. + 'Alphabetical sorting problem. [build/include_order] [4]') + + self.assert_language_rules_check('foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '\n' + '#ifdef BAZ\n' + '#include "baz.h"\n' + '#include "bar.h"\n' #Should still flag this + '#endif"\n', + 'Alphabetical sorting problem. [build/include_order] [4]') + + self.assert_language_rules_check('foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '\n' + '#ifdef BAZ\n' + '#include "baz.h"\n' + '#endif"\n' + '#ifdef FOOBAR\n' + '#include "foobar.h"\n' + '#endif"\n' + '#include "bar.h"\n' + '#include "a.h"\n', # Should still flag this. + 'Alphabetical sorting problem. [build/include_order] [4]') + + # Check that after an already included error, the sorting rules still work. + self.assert_language_rules_check('foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '\n' + '#include "foo.h"\n' + '#include "g.h"\n', + '"foo.h" already included at foo.cpp:2 [build/include] [4]') + + def test_check_wtf_includes(self): + self.assert_language_rules_check('foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '\n' + '#include <wtf/Assertions.h>\n', + '') + self.assert_language_rules_check('foo.cpp', + '#include "config.h"\n' + '#include "foo.h"\n' + '\n' + '#include "wtf/Assertions.h"\n', + 'wtf includes should be <wtf/file.h> instead of "wtf/file.h".' + ' [build/include] [4]') + + def test_classify_include(self): + classify_include = cpp_style._classify_include + include_state = cpp_style._IncludeState() + self.assertEqual(cpp_style._CONFIG_HEADER, + classify_include('foo/foo.cpp', + 'config.h', + False, include_state)) + self.assertEqual(cpp_style._PRIMARY_HEADER, + classify_include('foo/internal/foo.cpp', + 'foo/public/foo.h', + False, include_state)) + self.assertEqual(cpp_style._PRIMARY_HEADER, + classify_include('foo/internal/foo.cpp', + 'foo/other/public/foo.h', + False, include_state)) + self.assertEqual(cpp_style._OTHER_HEADER, + classify_include('foo/internal/foo.cpp', + 'foo/other/public/foop.h', + False, include_state)) + self.assertEqual(cpp_style._OTHER_HEADER, + classify_include('foo/foo.cpp', + 'string', + True, include_state)) + self.assertEqual(cpp_style._PRIMARY_HEADER, + classify_include('fooCustom.cpp', + 'foo.h', + False, include_state)) + self.assertEqual(cpp_style._PRIMARY_HEADER, + classify_include('PrefixFooCustom.cpp', + 'Foo.h', + False, include_state)) + self.assertEqual(cpp_style._MOC_HEADER, + classify_include('foo.cpp', + 'foo.moc', + False, include_state)) + self.assertEqual(cpp_style._MOC_HEADER, + classify_include('foo.cpp', + 'moc_foo.cpp', + False, include_state)) + # Tricky example where both includes might be classified as primary. + self.assert_language_rules_check('ScrollbarThemeWince.cpp', + '#include "config.h"\n' + '#include "ScrollbarThemeWince.h"\n' + '\n' + '#include "Scrollbar.h"\n', + '') + self.assert_language_rules_check('ScrollbarThemeWince.cpp', + '#include "config.h"\n' + '#include "Scrollbar.h"\n' + '\n' + '#include "ScrollbarThemeWince.h"\n', + 'Found header this file implements after a header this file implements.' + ' Should be: config.h, primary header, blank line, and then alphabetically sorted.' + ' [build/include_order] [4]') + self.assert_language_rules_check('ResourceHandleWin.cpp', + '#include "config.h"\n' + '#include "ResourceHandle.h"\n' + '\n' + '#include "ResourceHandleWin.h"\n', + '') + + def test_try_drop_common_suffixes(self): + self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h')) + self.assertEqual('foo/bar/foo', + cpp_style._drop_common_suffixes('foo/bar/foo_inl.h')) + self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp')) + self.assertEqual('foo/foo_unusualinternal', + cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h')) + self.assertEqual('', + cpp_style._drop_common_suffixes('_test.cpp')) + self.assertEqual('test', + cpp_style._drop_common_suffixes('test.cpp')) + self.assertEqual('test', + cpp_style._drop_common_suffixes('test.cpp')) + +class CheckForFunctionLengthsTest(CppStyleTestBase): + def setUp(self): + # Reducing these thresholds for the tests speeds up tests significantly. + self.old_normal_trigger = cpp_style._FunctionState._NORMAL_TRIGGER + self.old_test_trigger = cpp_style._FunctionState._TEST_TRIGGER + + cpp_style._FunctionState._NORMAL_TRIGGER = 10 + cpp_style._FunctionState._TEST_TRIGGER = 25 + + def tearDown(self): + cpp_style._FunctionState._NORMAL_TRIGGER = self.old_normal_trigger + cpp_style._FunctionState._TEST_TRIGGER = self.old_test_trigger + + # FIXME: Eliminate the need for this function. + def set_min_confidence(self, min_confidence): + """Set new test confidence and return old test confidence.""" + old_min_confidence = self.min_confidence + self.min_confidence = min_confidence + return old_min_confidence + + def assert_function_lengths_check(self, code, expected_message): + """Check warnings for long function bodies are as expected. + + Args: + code: C++ source code expected to generate a warning message. + expected_message: Message expected to be generated by the C++ code. + """ + self.assertEquals(expected_message, + self.perform_function_lengths_check(code)) + + def trigger_lines(self, error_level): + """Return number of lines needed to trigger a function length warning. + + Args: + error_level: --v setting for cpp_style. + + Returns: + Number of lines needed to trigger a function length warning. + """ + return cpp_style._FunctionState._NORMAL_TRIGGER * 2 ** error_level + + def trigger_test_lines(self, error_level): + """Return number of lines needed to trigger a test function length warning. + + Args: + error_level: --v setting for cpp_style. + + Returns: + Number of lines needed to trigger a test function length warning. + """ + return cpp_style._FunctionState._TEST_TRIGGER * 2 ** error_level + + def assert_function_length_check_definition(self, lines, error_level): + """Generate long function definition and check warnings are as expected. + + Args: + lines: Number of lines to generate. + error_level: --v setting for cpp_style. + """ + trigger_level = self.trigger_lines(self.min_confidence) + self.assert_function_lengths_check( + 'void test(int x)' + self.function_body(lines), + ('Small and focused functions are preferred: ' + 'test() has %d non-comment lines ' + '(error triggered by exceeding %d lines).' + ' [readability/fn_size] [%d]' + % (lines, trigger_level, error_level))) + + def assert_function_length_check_definition_ok(self, lines): + """Generate shorter function definition and check no warning is produced. + + Args: + lines: Number of lines to generate. + """ + self.assert_function_lengths_check( + 'void test(int x)' + self.function_body(lines), + '') + + def assert_function_length_check_at_error_level(self, error_level): + """Generate and check function at the trigger level for --v setting. + + Args: + error_level: --v setting for cpp_style. + """ + self.assert_function_length_check_definition(self.trigger_lines(error_level), + error_level) + + def assert_function_length_check_below_error_level(self, error_level): + """Generate and check function just below the trigger level for --v setting. + + Args: + error_level: --v setting for cpp_style. + """ + self.assert_function_length_check_definition(self.trigger_lines(error_level) - 1, + error_level - 1) + + def assert_function_length_check_above_error_level(self, error_level): + """Generate and check function just above the trigger level for --v setting. + + Args: + error_level: --v setting for cpp_style. + """ + self.assert_function_length_check_definition(self.trigger_lines(error_level) + 1, + error_level) + + def function_body(self, number_of_lines): + return ' {\n' + ' this_is_just_a_test();\n' * number_of_lines + '}' + + def function_body_with_blank_lines(self, number_of_lines): + return ' {\n' + ' this_is_just_a_test();\n\n' * number_of_lines + '}' + + def function_body_with_no_lints(self, number_of_lines): + return ' {\n' + ' this_is_just_a_test(); // NOLINT\n' * number_of_lines + '}' + + # Test line length checks. + def test_function_length_check_declaration(self): + self.assert_function_lengths_check( + 'void test();', # Not a function definition + '') + + def test_function_length_check_declaration_with_block_following(self): + self.assert_function_lengths_check( + ('void test();\n' + + self.function_body(66)), # Not a function definition + '') + + def test_function_length_check_class_definition(self): + self.assert_function_lengths_check( # Not a function definition + 'class Test' + self.function_body(66) + ';', + '') + + def test_function_length_check_trivial(self): + self.assert_function_lengths_check( + 'void test() {}', # Not counted + '') + + def test_function_length_check_empty(self): + self.assert_function_lengths_check( + 'void test() {\n}', + '') + + def test_function_length_check_definition_below_severity0(self): + old_min_confidence = self.set_min_confidence(0) + self.assert_function_length_check_definition_ok(self.trigger_lines(0) - 1) + self.set_min_confidence(old_min_confidence) + + def test_function_length_check_definition_at_severity0(self): + old_min_confidence = self.set_min_confidence(0) + self.assert_function_length_check_definition_ok(self.trigger_lines(0)) + self.set_min_confidence(old_min_confidence) + + def test_function_length_check_definition_above_severity0(self): + old_min_confidence = self.set_min_confidence(0) + self.assert_function_length_check_above_error_level(0) + self.set_min_confidence(old_min_confidence) + + def test_function_length_check_definition_below_severity1v0(self): + old_min_confidence = self.set_min_confidence(0) + self.assert_function_length_check_below_error_level(1) + self.set_min_confidence(old_min_confidence) + + def test_function_length_check_definition_at_severity1v0(self): + old_min_confidence = self.set_min_confidence(0) + self.assert_function_length_check_at_error_level(1) + self.set_min_confidence(old_min_confidence) + + def test_function_length_check_definition_below_severity1(self): + self.assert_function_length_check_definition_ok(self.trigger_lines(1) - 1) + + def test_function_length_check_definition_at_severity1(self): + self.assert_function_length_check_definition_ok(self.trigger_lines(1)) + + def test_function_length_check_definition_above_severity1(self): + self.assert_function_length_check_above_error_level(1) + + def test_function_length_check_definition_severity1_plus_indented(self): + error_level = 1 + error_lines = self.trigger_lines(error_level) + 1 + trigger_level = self.trigger_lines(self.min_confidence) + indent_spaces = ' ' + self.assert_function_lengths_check( + re.sub(r'(?m)^(.)', indent_spaces + r'\1', + 'void test_indent(int x)\n' + self.function_body(error_lines)), + ('Small and focused functions are preferred: ' + 'test_indent() has %d non-comment lines ' + '(error triggered by exceeding %d lines).' + ' [readability/fn_size] [%d]') + % (error_lines, trigger_level, error_level)) + + def test_function_length_check_definition_severity1_plus_blanks(self): + error_level = 1 + error_lines = self.trigger_lines(error_level) + 1 + trigger_level = self.trigger_lines(self.min_confidence) + self.assert_function_lengths_check( + 'void test_blanks(int x)' + self.function_body(error_lines), + ('Small and focused functions are preferred: ' + 'test_blanks() has %d non-comment lines ' + '(error triggered by exceeding %d lines).' + ' [readability/fn_size] [%d]') + % (error_lines, trigger_level, error_level)) + + def test_function_length_check_complex_definition_severity1(self): + error_level = 1 + error_lines = self.trigger_lines(error_level) + 1 + trigger_level = self.trigger_lines(self.min_confidence) + self.assert_function_lengths_check( + ('my_namespace::my_other_namespace::MyVeryLongTypeName<Type1, bool func(const Element*)>*\n' + 'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >(int arg1, char* arg2)' + + self.function_body(error_lines)), + ('Small and focused functions are preferred: ' + 'my_namespace::my_other_namespace<Type3, Type4>::~MyFunction<Type5<Type6, Type7> >()' + ' has %d non-comment lines ' + '(error triggered by exceeding %d lines).' + ' [readability/fn_size] [%d]') + % (error_lines, trigger_level, error_level)) + + def test_function_length_check_definition_severity1_for_test(self): + error_level = 1 + error_lines = self.trigger_test_lines(error_level) + 1 + trigger_level = self.trigger_test_lines(self.min_confidence) + self.assert_function_lengths_check( + 'TEST_F(Test, Mutator)' + self.function_body(error_lines), + ('Small and focused functions are preferred: ' + 'TEST_F(Test, Mutator) has %d non-comment lines ' + '(error triggered by exceeding %d lines).' + ' [readability/fn_size] [%d]') + % (error_lines, trigger_level, error_level)) + + def test_function_length_check_definition_severity1_for_split_line_test(self): + error_level = 1 + error_lines = self.trigger_test_lines(error_level) + 1 + trigger_level = self.trigger_test_lines(self.min_confidence) + self.assert_function_lengths_check( + ('TEST_F(GoogleUpdateRecoveryRegistryProtectedTest,\n' + ' FixGoogleUpdate_AllValues_MachineApp)' # note: 4 spaces + + self.function_body(error_lines)), + ('Small and focused functions are preferred: ' + 'TEST_F(GoogleUpdateRecoveryRegistryProtectedTest, ' # 1 space + 'FixGoogleUpdate_AllValues_MachineApp) has %d non-comment lines ' + '(error triggered by exceeding %d lines).' + ' [readability/fn_size] [%d]') + % (error_lines, trigger_level, error_level)) + + def test_function_length_check_definition_severity1_for_bad_test_doesnt_break(self): + error_level = 1 + error_lines = self.trigger_test_lines(error_level) + 1 + trigger_level = self.trigger_test_lines(self.min_confidence) + self.assert_function_lengths_check( + ('TEST_F(' + + self.function_body(error_lines)), + ('Small and focused functions are preferred: ' + 'TEST_F has %d non-comment lines ' + '(error triggered by exceeding %d lines).' + ' [readability/fn_size] [%d]') + % (error_lines, trigger_level, error_level)) + + def test_function_length_check_definition_severity1_with_embedded_no_lints(self): + error_level = 1 + error_lines = self.trigger_lines(error_level) + 1 + trigger_level = self.trigger_lines(self.min_confidence) + self.assert_function_lengths_check( + 'void test(int x)' + self.function_body_with_no_lints(error_lines), + ('Small and focused functions are preferred: ' + 'test() has %d non-comment lines ' + '(error triggered by exceeding %d lines).' + ' [readability/fn_size] [%d]') + % (error_lines, trigger_level, error_level)) + + def test_function_length_check_definition_severity1_with_no_lint(self): + self.assert_function_lengths_check( + ('void test(int x)' + self.function_body(self.trigger_lines(1)) + + ' // NOLINT -- long function'), + '') + + def test_function_length_check_definition_below_severity2(self): + self.assert_function_length_check_below_error_level(2) + + def test_function_length_check_definition_severity2(self): + self.assert_function_length_check_at_error_level(2) + + def test_function_length_check_definition_above_severity2(self): + self.assert_function_length_check_above_error_level(2) + + def test_function_length_check_definition_below_severity3(self): + self.assert_function_length_check_below_error_level(3) + + def test_function_length_check_definition_severity3(self): + self.assert_function_length_check_at_error_level(3) + + def test_function_length_check_definition_above_severity3(self): + self.assert_function_length_check_above_error_level(3) + + def test_function_length_check_definition_below_severity4(self): + self.assert_function_length_check_below_error_level(4) + + def test_function_length_check_definition_severity4(self): + self.assert_function_length_check_at_error_level(4) + + def test_function_length_check_definition_above_severity4(self): + self.assert_function_length_check_above_error_level(4) + + def test_function_length_check_definition_below_severity5(self): + self.assert_function_length_check_below_error_level(5) + + def test_function_length_check_definition_at_severity5(self): + self.assert_function_length_check_at_error_level(5) + + def test_function_length_check_definition_above_severity5(self): + self.assert_function_length_check_above_error_level(5) + + def test_function_length_check_definition_huge_lines(self): + # 5 is the limit + self.assert_function_length_check_definition(self.trigger_lines(10), 5) + + def test_function_length_not_determinable(self): + # Macro invocation without terminating semicolon. + self.assert_function_lengths_check( + 'MACRO(arg)', + '') + + # Macro with underscores + self.assert_function_lengths_check( + 'MACRO_WITH_UNDERSCORES(arg1, arg2, arg3)', + '') + + self.assert_function_lengths_check( + 'NonMacro(arg)', + 'Lint failed to find start of function body.' + ' [readability/fn_size] [5]') + + +class NoNonVirtualDestructorsTest(CppStyleTestBase): + + def test_no_error(self): + self.assert_multi_line_lint( + '''class Foo { + virtual ~Foo(); + virtual void foo(); + };''', + '') + + self.assert_multi_line_lint( + '''class Foo { + virtual inline ~Foo(); + virtual void foo(); + };''', + '') + + self.assert_multi_line_lint( + '''class Foo { + inline virtual ~Foo(); + virtual void foo(); + };''', + '') + + self.assert_multi_line_lint( + '''class Foo::Goo { + virtual ~Goo(); + virtual void goo(); + };''', + '') + self.assert_multi_line_lint( + 'class Foo { void foo(); };', + 'More than one command on the same line [whitespace/newline] [4]') + self.assert_multi_line_lint( + 'class MyClass {\n' + ' int getIntValue() { ASSERT(m_ptr); return *m_ptr; }\n' + '};\n', + '') + self.assert_multi_line_lint( + 'class MyClass {\n' + ' int getIntValue()\n' + ' {\n' + ' ASSERT(m_ptr); return *m_ptr;\n' + ' }\n' + '};\n', + 'More than one command on the same line [whitespace/newline] [4]') + + self.assert_multi_line_lint( + '''class Qualified::Goo : public Foo { + virtual void goo(); + };''', + '') + + self.assert_multi_line_lint( + # Line-ending : + '''class Goo : + public Foo { + virtual void goo(); + };''', + 'Labels should always be indented at least one space. If this is a ' + 'member-initializer list in a constructor, the colon should be on the ' + 'line after the definition header. [whitespace/labels] [4]') + + def test_no_destructor_when_virtual_needed(self): + self.assert_multi_line_lint_re( + '''class Foo { + virtual void foo(); + };''', + 'The class Foo probably needs a virtual destructor') + + def test_destructor_non_virtual_when_virtual_needed(self): + self.assert_multi_line_lint_re( + '''class Foo { + ~Foo(); + virtual void foo(); + };''', + 'The class Foo probably needs a virtual destructor') + + def test_no_warn_when_derived(self): + self.assert_multi_line_lint( + '''class Foo : public Goo { + virtual void foo(); + };''', + '') + + def test_internal_braces(self): + self.assert_multi_line_lint_re( + '''class Foo { + enum Goo { + GOO + }; + virtual void foo(); + };''', + 'The class Foo probably needs a virtual destructor') + + def test_inner_class_needs_virtual_destructor(self): + self.assert_multi_line_lint_re( + '''class Foo { + class Goo { + virtual void goo(); + }; + };''', + 'The class Goo probably needs a virtual destructor') + + def test_outer_class_needs_virtual_destructor(self): + self.assert_multi_line_lint_re( + '''class Foo { + class Goo { + }; + virtual void foo(); + };''', + 'The class Foo probably needs a virtual destructor') + + def test_qualified_class_needs_virtual_destructor(self): + self.assert_multi_line_lint_re( + '''class Qualified::Foo { + virtual void foo(); + };''', + 'The class Qualified::Foo probably needs a virtual destructor') + + def test_multi_line_declaration_no_error(self): + self.assert_multi_line_lint_re( + '''class Foo + : public Goo { + virtual void foo(); + };''', + '') + + def test_multi_line_declaration_with_error(self): + self.assert_multi_line_lint( + '''class Foo + { + virtual void foo(); + };''', + ['This { should be at the end of the previous line ' + '[whitespace/braces] [4]', + 'The class Foo probably needs a virtual destructor due to having ' + 'virtual method(s), one declared at line 3. [runtime/virtual] [4]']) + + +class PassPtrTest(CppStyleTestBase): + # For http://webkit.org/coding/RefPtr.html + + def assert_pass_ptr_check(self, code, expected_message): + """Check warnings for Pass*Ptr are as expected. + + Args: + code: C++ source code expected to generate a warning message. + expected_message: Message expected to be generated by the C++ code. + """ + self.assertEquals(expected_message, + self.perform_pass_ptr_check(code)) + + def test_pass_ref_ptr_in_function(self): + # Local variables should never be PassRefPtr. + self.assert_pass_ptr_check( + 'int myFunction()\n' + '{\n' + ' PassRefPtr<Type1> variable = variable2;\n' + '}', + 'Local variables should never be PassRefPtr (see ' + 'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]') + + def test_pass_own_ptr_in_function(self): + # Local variables should never be PassRefPtr. + self.assert_pass_ptr_check( + 'int myFunction()\n' + '{\n' + ' PassOwnPtr<Type1> variable = variable2;\n' + '}', + 'Local variables should never be PassOwnPtr (see ' + 'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]') + + def test_pass_other_type_ptr_in_function(self): + # Local variables should never be PassRefPtr. + self.assert_pass_ptr_check( + 'int myFunction()\n' + '{\n' + ' PassOtherTypePtr<Type1> variable;\n' + '}', + 'Local variables should never be PassOtherTypePtr (see ' + 'http://webkit.org/coding/RefPtr.html). [readability/pass_ptr] [5]') + + def test_pass_ref_ptr_return_value(self): + self.assert_pass_ptr_check( + 'PassRefPtr<Type1>\n' + 'myFunction(int)\n' + '{\n' + '}', + '') + self.assert_pass_ptr_check( + 'PassRefPtr<Type1> myFunction(int)\n' + '{\n' + '}', + '') + self.assert_pass_ptr_check( + 'PassRefPtr<Type1> myFunction();\n', + '') + + def test_pass_ref_ptr_parameter_value(self): + self.assert_pass_ptr_check( + 'int myFunction(PassRefPtr<Type1>)\n' + '{\n' + '}', + '') + + def test_ref_ptr_member_variable(self): + self.assert_pass_ptr_check( + 'class Foo {' + ' RefPtr<Type1> m_other;\n' + '};\n', + '') + + +class WebKitStyleTest(CppStyleTestBase): + + # for http://webkit.org/coding/coding-style.html + def test_indentation(self): + # 1. Use spaces, not tabs. Tabs should only appear in files that + # require them for semantic meaning, like Makefiles. + self.assert_multi_line_lint( + 'class Foo {\n' + ' int goo;\n' + '};', + '') + self.assert_multi_line_lint( + 'class Foo {\n' + '\tint goo;\n' + '};', + 'Tab found; better to use spaces [whitespace/tab] [1]') + + # 2. The indent size is 4 spaces. + self.assert_multi_line_lint( + 'class Foo {\n' + ' int goo;\n' + '};', + '') + self.assert_multi_line_lint( + 'class Foo {\n' + ' int goo;\n' + '};', + 'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]') + # FIXME: No tests for 8-spaces. + + # 3. In a header, code inside a namespace should not be indented. + self.assert_multi_line_lint( + 'namespace WebCore {\n\n' + 'class Document {\n' + ' int myVariable;\n' + '};\n' + '}', + '', + 'foo.h') + self.assert_multi_line_lint( + 'namespace OuterNamespace {\n' + ' namespace InnerNamespace {\n' + ' class Document {\n' + '};\n' + '};\n' + '}', + 'Code inside a namespace should not be indented. [whitespace/indent] [4]', + 'foo.h') + self.assert_multi_line_lint( + 'namespace OuterNamespace {\n' + ' class Document {\n' + ' namespace InnerNamespace {\n' + '};\n' + '};\n' + '}', + 'Code inside a namespace should not be indented. [whitespace/indent] [4]', + 'foo.h') + self.assert_multi_line_lint( + 'namespace WebCore {\n' + '#if 0\n' + ' class Document {\n' + '};\n' + '#endif\n' + '}', + 'Code inside a namespace should not be indented. [whitespace/indent] [4]', + 'foo.h') + self.assert_multi_line_lint( + 'namespace WebCore {\n' + 'class Document {\n' + '};\n' + '}', + '', + 'foo.h') + + # 4. In an implementation file (files with the extension .cpp, .c + # or .mm), code inside a namespace should not be indented. + self.assert_multi_line_lint( + 'namespace WebCore {\n\n' + 'Document::Foo()\n' + ' : foo(bar)\n' + ' , boo(far)\n' + '{\n' + ' stuff();\n' + '}', + '', + 'foo.cpp') + self.assert_multi_line_lint( + 'namespace OuterNamespace {\n' + 'namespace InnerNamespace {\n' + 'Document::Foo() { }\n' + ' void* p;\n' + '}\n' + '}\n', + 'Code inside a namespace should not be indented. [whitespace/indent] [4]', + 'foo.cpp') + self.assert_multi_line_lint( + 'namespace OuterNamespace {\n' + 'namespace InnerNamespace {\n' + 'Document::Foo() { }\n' + '}\n' + ' void* p;\n' + '}\n', + 'Code inside a namespace should not be indented. [whitespace/indent] [4]', + 'foo.cpp') + self.assert_multi_line_lint( + 'namespace WebCore {\n\n' + ' const char* foo = "start:;"\n' + ' "dfsfsfs";\n' + '}\n', + 'Code inside a namespace should not be indented. [whitespace/indent] [4]', + 'foo.cpp') + self.assert_multi_line_lint( + 'namespace WebCore {\n\n' + 'const char* foo(void* a = ";", // ;\n' + ' void* b);\n' + ' void* p;\n' + '}\n', + 'Code inside a namespace should not be indented. [whitespace/indent] [4]', + 'foo.cpp') + self.assert_multi_line_lint( + 'namespace WebCore {\n\n' + 'const char* foo[] = {\n' + ' "void* b);", // ;\n' + ' "asfdf",\n' + ' }\n' + ' void* p;\n' + '}\n', + 'Code inside a namespace should not be indented. [whitespace/indent] [4]', + 'foo.cpp') + self.assert_multi_line_lint( + 'namespace WebCore {\n\n' + 'const char* foo[] = {\n' + ' "void* b);", // }\n' + ' "asfdf",\n' + ' }\n' + '}\n', + '', + 'foo.cpp') + self.assert_multi_line_lint( + ' namespace WebCore {\n\n' + ' void Document::Foo()\n' + ' {\n' + 'start: // infinite loops are fun!\n' + ' goto start;\n' + ' }', + 'namespace should never be indented. [whitespace/indent] [4]', + 'foo.cpp') + self.assert_multi_line_lint( + 'namespace WebCore {\n' + ' Document::Foo() { }\n' + '}', + 'Code inside a namespace should not be indented.' + ' [whitespace/indent] [4]', + 'foo.cpp') + self.assert_multi_line_lint( + 'namespace WebCore {\n' + '#define abc(x) x; \\\n' + ' x\n' + '}', + '', + 'foo.cpp') + self.assert_multi_line_lint( + 'namespace WebCore {\n' + '#define abc(x) x; \\\n' + ' x\n' + ' void* x;' + '}', + 'Code inside a namespace should not be indented. [whitespace/indent] [4]', + 'foo.cpp') + + # 5. A case label should line up with its switch statement. The + # case statement is indented. + self.assert_multi_line_lint( + ' switch (condition) {\n' + ' case fooCondition:\n' + ' case barCondition:\n' + ' i++;\n' + ' break;\n' + ' default:\n' + ' i--;\n' + ' }\n', + '') + self.assert_multi_line_lint( + ' switch (condition) {\n' + ' case fooCondition:\n' + ' switch (otherCondition) {\n' + ' default:\n' + ' return;\n' + ' }\n' + ' default:\n' + ' i--;\n' + ' }\n', + '') + self.assert_multi_line_lint( + ' switch (condition) {\n' + ' case fooCondition: break;\n' + ' default: return;\n' + ' }\n', + '') + self.assert_multi_line_lint( + ' switch (condition) {\n' + ' case fooCondition:\n' + ' case barCondition:\n' + ' i++;\n' + ' break;\n' + ' default:\n' + ' i--;\n' + ' }\n', + 'A case label should not be indented, but line up with its switch statement.' + ' [whitespace/indent] [4]') + self.assert_multi_line_lint( + ' switch (condition) {\n' + ' case fooCondition:\n' + ' break;\n' + ' default:\n' + ' i--;\n' + ' }\n', + 'A case label should not be indented, but line up with its switch statement.' + ' [whitespace/indent] [4]') + self.assert_multi_line_lint( + ' switch (condition) {\n' + ' case fooCondition:\n' + ' case barCondition:\n' + ' switch (otherCondition) {\n' + ' default:\n' + ' return;\n' + ' }\n' + ' default:\n' + ' i--;\n' + ' }\n', + 'A case label should not be indented, but line up with its switch statement.' + ' [whitespace/indent] [4]') + self.assert_multi_line_lint( + ' switch (condition) {\n' + ' case fooCondition:\n' + ' case barCondition:\n' + ' i++;\n' + ' break;\n\n' + ' default:\n' + ' i--;\n' + ' }\n', + 'Non-label code inside switch statements should be indented.' + ' [whitespace/indent] [4]') + self.assert_multi_line_lint( + ' switch (condition) {\n' + ' case fooCondition:\n' + ' case barCondition:\n' + ' switch (otherCondition) {\n' + ' default:\n' + ' return;\n' + ' }\n' + ' default:\n' + ' i--;\n' + ' }\n', + 'Non-label code inside switch statements should be indented.' + ' [whitespace/indent] [4]') + + # 6. Boolean expressions at the same nesting level that span + # multiple lines should have their operators on the left side of + # the line instead of the right side. + self.assert_multi_line_lint( + ' return attr->name() == srcAttr\n' + ' || attr->name() == lowsrcAttr;\n', + '') + self.assert_multi_line_lint( + ' return attr->name() == srcAttr ||\n' + ' attr->name() == lowsrcAttr;\n', + 'Boolean expressions that span multiple lines should have their ' + 'operators on the left side of the line instead of the right side.' + ' [whitespace/operators] [4]') + + def test_spacing(self): + # 1. Do not place spaces around unary operators. + self.assert_multi_line_lint( + 'i++;', + '') + self.assert_multi_line_lint( + 'i ++;', + 'Extra space for operator ++; [whitespace/operators] [4]') + + # 2. Do place spaces around binary and ternary operators. + self.assert_multi_line_lint( + 'y = m * x + b;', + '') + self.assert_multi_line_lint( + 'f(a, b);', + '') + self.assert_multi_line_lint( + 'c = a | b;', + '') + self.assert_multi_line_lint( + 'return condition ? 1 : 0;', + '') + self.assert_multi_line_lint( + 'y=m*x+b;', + 'Missing spaces around = [whitespace/operators] [4]') + self.assert_multi_line_lint( + 'f(a,b);', + 'Missing space after , [whitespace/comma] [3]') + self.assert_multi_line_lint( + 'c = a|b;', + 'Missing spaces around | [whitespace/operators] [3]') + # FIXME: We cannot catch this lint error. + # self.assert_multi_line_lint( + # 'return condition ? 1:0;', + # '') + + # 3. Place spaces between control statements and their parentheses. + self.assert_multi_line_lint( + ' if (condition)\n' + ' doIt();\n', + '') + self.assert_multi_line_lint( + ' if(condition)\n' + ' doIt();\n', + 'Missing space before ( in if( [whitespace/parens] [5]') + + # 4. Do not place spaces between a function and its parentheses, + # or between a parenthesis and its content. + self.assert_multi_line_lint( + 'f(a, b);', + '') + self.assert_multi_line_lint( + 'f (a, b);', + 'Extra space before ( in function call [whitespace/parens] [4]') + self.assert_multi_line_lint( + 'f( a, b );', + ['Extra space after ( in function call [whitespace/parens] [4]', + 'Extra space before ) [whitespace/parens] [2]']) + + def test_line_breaking(self): + # 1. Each statement should get its own line. + self.assert_multi_line_lint( + ' x++;\n' + ' y++;\n' + ' if (condition);\n' + ' doIt();\n', + '') + self.assert_multi_line_lint( + ' if (condition) \\\n' + ' doIt();\n', + '') + self.assert_multi_line_lint( + ' x++; y++;', + 'More than one command on the same line [whitespace/newline] [4]') + self.assert_multi_line_lint( + ' if (condition) doIt();\n', + 'More than one command on the same line in if [whitespace/parens] [4]') + # Ensure that having a # in the line doesn't hide the error. + self.assert_multi_line_lint( + ' x++; char a[] = "#";', + 'More than one command on the same line [whitespace/newline] [4]') + # Ignore preprocessor if's. + self.assert_multi_line_lint( + ' #if (condition) || (condition2)\n', + '') + + # 2. An else statement should go on the same line as a preceding + # close brace if one is present, else it should line up with the + # if statement. + self.assert_multi_line_lint( + 'if (condition) {\n' + ' doSomething();\n' + ' doSomethingAgain();\n' + '} else {\n' + ' doSomethingElse();\n' + ' doSomethingElseAgain();\n' + '}\n', + '') + self.assert_multi_line_lint( + 'if (condition)\n' + ' doSomething();\n' + 'else\n' + ' doSomethingElse();\n', + '') + self.assert_multi_line_lint( + 'if (condition)\n' + ' doSomething();\n' + 'else {\n' + ' doSomethingElse();\n' + ' doSomethingElseAgain();\n' + '}\n', + '') + self.assert_multi_line_lint( + '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n', + '') + self.assert_multi_line_lint( + '#define TEST_ASSERT(expression) do { if ( !(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n', + 'Extra space after ( in if [whitespace/parens] [5]') + # FIXME: currently we only check first conditional, so we cannot detect errors in next ones. + # self.assert_multi_line_lint( + # '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0 )\n', + # 'Mismatching spaces inside () in if [whitespace/parens] [5]') + self.assert_multi_line_lint( + 'if (condition) {\n' + ' doSomething();\n' + ' doSomethingAgain();\n' + '}\n' + 'else {\n' + ' doSomethingElse();\n' + ' doSomethingElseAgain();\n' + '}\n', + 'An else should appear on the same line as the preceding } [whitespace/newline] [4]') + self.assert_multi_line_lint( + 'if (condition) doSomething(); else doSomethingElse();\n', + ['More than one command on the same line [whitespace/newline] [4]', + 'Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]', + 'More than one command on the same line in if [whitespace/parens] [4]']) + self.assert_multi_line_lint( + 'if (condition) doSomething(); else {\n' + ' doSomethingElse();\n' + '}\n', + ['More than one command on the same line in if [whitespace/parens] [4]', + 'One line control clauses should not use braces. [whitespace/braces] [4]']) + self.assert_multi_line_lint( + 'void func()\n' + '{\n' + ' while (condition) { }\n' + ' return 0;\n' + '}\n', + '') + self.assert_multi_line_lint( + 'void func()\n' + '{\n' + ' for (i = 0; i < 42; i++) { foobar(); }\n' + ' return 0;\n' + '}\n', + 'More than one command on the same line in for [whitespace/parens] [4]') + + # 3. An else if statement should be written as an if statement + # when the prior if concludes with a return statement. + self.assert_multi_line_lint( + 'if (motivated) {\n' + ' if (liquid)\n' + ' return money;\n' + '} else if (tired)\n' + ' break;\n', + '') + self.assert_multi_line_lint( + 'if (condition)\n' + ' doSomething();\n' + 'else if (otherCondition)\n' + ' doSomethingElse();\n', + '') + self.assert_multi_line_lint( + 'if (condition)\n' + ' doSomething();\n' + 'else\n' + ' doSomethingElse();\n', + '') + self.assert_multi_line_lint( + 'if (condition)\n' + ' returnValue = foo;\n' + 'else if (otherCondition)\n' + ' returnValue = bar;\n', + '') + self.assert_multi_line_lint( + 'if (condition)\n' + ' returnValue = foo;\n' + 'else\n' + ' returnValue = bar;\n', + '') + self.assert_multi_line_lint( + 'if (condition)\n' + ' doSomething();\n' + 'else if (liquid)\n' + ' return money;\n' + 'else if (broke)\n' + ' return favor;\n' + 'else\n' + ' sleep(28800);\n', + '') + self.assert_multi_line_lint( + 'if (liquid) {\n' + ' prepare();\n' + ' return money;\n' + '} else if (greedy) {\n' + ' keep();\n' + ' return nothing;\n' + '}\n', + 'An else if statement should be written as an if statement when the ' + 'prior "if" concludes with a return, break, continue or goto statement.' + ' [readability/control_flow] [4]') + self.assert_multi_line_lint( + ' if (stupid) {\n' + 'infiniteLoop:\n' + ' goto infiniteLoop;\n' + ' } else if (evil)\n' + ' goto hell;\n', + 'An else if statement should be written as an if statement when the ' + 'prior "if" concludes with a return, break, continue or goto statement.' + ' [readability/control_flow] [4]') + self.assert_multi_line_lint( + 'if (liquid)\n' + '{\n' + ' prepare();\n' + ' return money;\n' + '}\n' + 'else if (greedy)\n' + ' keep();\n', + ['This { should be at the end of the previous line [whitespace/braces] [4]', + 'An else should appear on the same line as the preceding } [whitespace/newline] [4]', + 'An else if statement should be written as an if statement when the ' + 'prior "if" concludes with a return, break, continue or goto statement.' + ' [readability/control_flow] [4]']) + self.assert_multi_line_lint( + 'if (gone)\n' + ' return;\n' + 'else if (here)\n' + ' go();\n', + 'An else if statement should be written as an if statement when the ' + 'prior "if" concludes with a return, break, continue or goto statement.' + ' [readability/control_flow] [4]') + self.assert_multi_line_lint( + 'if (gone)\n' + ' return;\n' + 'else\n' + ' go();\n', + 'An else statement can be removed when the prior "if" concludes ' + 'with a return, break, continue or goto statement.' + ' [readability/control_flow] [4]') + self.assert_multi_line_lint( + 'if (motivated) {\n' + ' prepare();\n' + ' continue;\n' + '} else {\n' + ' cleanUp();\n' + ' break;\n' + '}\n', + 'An else statement can be removed when the prior "if" concludes ' + 'with a return, break, continue or goto statement.' + ' [readability/control_flow] [4]') + self.assert_multi_line_lint( + 'if (tired)\n' + ' break;\n' + 'else {\n' + ' prepare();\n' + ' continue;\n' + '}\n', + 'An else statement can be removed when the prior "if" concludes ' + 'with a return, break, continue or goto statement.' + ' [readability/control_flow] [4]') + + def test_braces(self): + # 1. Function definitions: place each brace on its own line. + self.assert_multi_line_lint( + 'int main()\n' + '{\n' + ' doSomething();\n' + '}\n', + '') + self.assert_multi_line_lint( + 'int main() {\n' + ' doSomething();\n' + '}\n', + 'Place brace on its own line for function definitions. [whitespace/braces] [4]') + + # 2. Other braces: place the open brace on the line preceding the + # code block; place the close brace on its own line. + self.assert_multi_line_lint( + 'class MyClass {\n' + ' int foo;\n' + '};\n', + '') + self.assert_multi_line_lint( + 'namespace WebCore {\n' + 'int foo;\n' + '};\n', + '') + self.assert_multi_line_lint( + 'for (int i = 0; i < 10; i++) {\n' + ' DoSomething();\n' + '};\n', + '') + self.assert_multi_line_lint( + 'class MyClass\n' + '{\n' + ' int foo;\n' + '};\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'if (condition)\n' + '{\n' + ' int foo;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'for (int i = 0; i < 10; i++)\n' + '{\n' + ' int foo;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'while (true)\n' + '{\n' + ' int foo;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'foreach (Foo* foo, foos)\n' + '{\n' + ' int bar;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'switch (type)\n' + '{\n' + 'case foo: return;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'if (condition)\n' + '{\n' + ' int foo;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'for (int i = 0; i < 10; i++)\n' + '{\n' + ' int foo;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'while (true)\n' + '{\n' + ' int foo;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'switch (type)\n' + '{\n' + 'case foo: return;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + self.assert_multi_line_lint( + 'else if (type)\n' + '{\n' + 'case foo: return;\n' + '}\n', + 'This { should be at the end of the previous line [whitespace/braces] [4]') + + # 3. One-line control clauses should not use braces unless + # comments are included or a single statement spans multiple + # lines. + self.assert_multi_line_lint( + 'if (true) {\n' + ' int foo;\n' + '}\n', + 'One line control clauses should not use braces. [whitespace/braces] [4]') + + self.assert_multi_line_lint( + 'for (; foo; bar) {\n' + ' int foo;\n' + '}\n', + 'One line control clauses should not use braces. [whitespace/braces] [4]') + + self.assert_multi_line_lint( + 'foreach (foo, foos) {\n' + ' int bar;\n' + '}\n', + 'One line control clauses should not use braces. [whitespace/braces] [4]') + + self.assert_multi_line_lint( + 'while (true) {\n' + ' int foo;\n' + '}\n', + 'One line control clauses should not use braces. [whitespace/braces] [4]') + + self.assert_multi_line_lint( + 'if (true)\n' + ' int foo;\n' + 'else {\n' + ' int foo;\n' + '}\n', + 'One line control clauses should not use braces. [whitespace/braces] [4]') + + self.assert_multi_line_lint( + 'if (true) {\n' + ' int foo;\n' + '} else\n' + ' int foo;\n', + 'One line control clauses should not use braces. [whitespace/braces] [4]') + + self.assert_multi_line_lint( + 'if (true) {\n' + ' // Some comment\n' + ' int foo;\n' + '}\n', + '') + + self.assert_multi_line_lint( + 'if (true) {\n' + ' myFunction(reallyLongParam1, reallyLongParam2,\n' + ' reallyLongParam3);\n' + '}\n', + '') + + # 4. Control clauses without a body should use empty braces. + self.assert_multi_line_lint( + 'for ( ; current; current = current->next) { }\n', + '') + self.assert_multi_line_lint( + 'for ( ; current;\n' + ' current = current->next) {}\n', + '') + self.assert_multi_line_lint( + 'for ( ; current; current = current->next);\n', + 'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]') + self.assert_multi_line_lint( + 'while (true);\n', + 'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]') + self.assert_multi_line_lint( + '} while (true);\n', + '') + + def test_null_false_zero(self): + # 1. In C++, the null pointer value should be written as 0. In C, + # it should be written as NULL. In Objective-C and Objective-C++, + # follow the guideline for C or C++, respectively, but use nil to + # represent a null Objective-C object. + self.assert_lint( + 'functionCall(NULL)', + 'Use 0 instead of NULL.' + ' [readability/null] [5]', + 'foo.cpp') + self.assert_lint( + "// Don't use NULL in comments since it isn't in code.", + 'Use 0 instead of NULL.' + ' [readability/null] [4]', + 'foo.cpp') + self.assert_lint( + '"A string with NULL" // and a comment with NULL is tricky to flag correctly in cpp_style.', + 'Use 0 instead of NULL.' + ' [readability/null] [4]', + 'foo.cpp') + self.assert_lint( + '"A string containing NULL is ok"', + '', + 'foo.cpp') + self.assert_lint( + 'if (aboutNULL)', + '', + 'foo.cpp') + self.assert_lint( + 'myVariable = NULLify', + '', + 'foo.cpp') + # Make sure that the NULL check does not apply to C and Objective-C files. + self.assert_lint( + 'functionCall(NULL)', + '', + 'foo.c') + self.assert_lint( + 'functionCall(NULL)', + '', + 'foo.m') + + # Make sure that the NULL check does not apply to g_object_{set,get} and + # g_str{join,concat} + self.assert_lint( + 'g_object_get(foo, "prop", &bar, NULL);', + '') + self.assert_lint( + 'g_object_set(foo, "prop", bar, NULL);', + '') + self.assert_lint( + 'g_build_filename(foo, bar, NULL);', + '') + self.assert_lint( + 'gst_bin_add_many(foo, bar, boo, NULL);', + '') + self.assert_lint( + 'gst_bin_remove_many(foo, bar, boo, NULL);', + '') + self.assert_lint( + 'gst_element_link_many(foo, bar, boo, NULL);', + '') + self.assert_lint( + 'gst_element_unlink_many(foo, bar, boo, NULL);', + '') + self.assert_lint( + 'gchar* result = g_strconcat("part1", "part2", "part3", NULL);', + '') + self.assert_lint( + 'gchar* result = g_strconcat("part1", NULL);', + '') + self.assert_lint( + 'gchar* result = g_strjoin(",", "part1", "part2", "part3", NULL);', + '') + self.assert_lint( + 'gchar* result = g_strjoin(",", "part1", NULL);', + '') + self.assert_lint( + 'gchar* result = gdk_pixbuf_save_to_callback(pixbuf, function, data, type, error, NULL);', + '') + self.assert_lint( + 'gchar* result = gdk_pixbuf_save_to_buffer(pixbuf, function, data, type, error, NULL);', + '') + self.assert_lint( + 'gchar* result = gdk_pixbuf_save_to_stream(pixbuf, function, data, type, error, NULL);', + '') + + # 2. C++ and C bool values should be written as true and + # false. Objective-C BOOL values should be written as YES and NO. + # FIXME: Implement this. + + # 3. Tests for true/false, null/non-null, and zero/non-zero should + # all be done without equality comparisons. + self.assert_lint( + 'if (count == 0)', + 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.' + ' [readability/comparison_to_zero] [5]') + self.assert_lint_one_of_many_errors_re( + 'if (string != NULL)', + r'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons\.') + self.assert_lint( + 'if (condition == true)', + 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.' + ' [readability/comparison_to_zero] [5]') + self.assert_lint( + 'if (myVariable != /* Why would anyone put a comment here? */ false)', + 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.' + ' [readability/comparison_to_zero] [5]') + + self.assert_lint( + 'if (0 /* This comment also looks odd to me. */ != aLongerVariableName)', + 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.' + ' [readability/comparison_to_zero] [5]') + self.assert_lint_one_of_many_errors_re( + 'if (NULL == thisMayBeNull)', + r'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons\.') + self.assert_lint( + 'if (true != anotherCondition)', + 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.' + ' [readability/comparison_to_zero] [5]') + self.assert_lint( + 'if (false == myBoolValue)', + 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.' + ' [readability/comparison_to_zero] [5]') + + self.assert_lint( + 'if (fontType == trueType)', + '') + self.assert_lint( + 'if (othertrue == fontType)', + '') + + def test_using_std(self): + self.assert_lint( + 'using std::min;', + "Use 'using namespace std;' instead of 'using std::min;'." + " [build/using_std] [4]", + 'foo.cpp') + + def test_max_macro(self): + self.assert_lint( + 'int i = MAX(0, 1);', + '', + 'foo.c') + + self.assert_lint( + 'int i = MAX(0, 1);', + 'Use std::max() or std::max<type>() instead of the MAX() macro.' + ' [runtime/max_min_macros] [4]', + 'foo.cpp') + + self.assert_lint( + 'inline int foo() { return MAX(0, 1); }', + 'Use std::max() or std::max<type>() instead of the MAX() macro.' + ' [runtime/max_min_macros] [4]', + 'foo.h') + + def test_min_macro(self): + self.assert_lint( + 'int i = MIN(0, 1);', + '', + 'foo.c') + + self.assert_lint( + 'int i = MIN(0, 1);', + 'Use std::min() or std::min<type>() instead of the MIN() macro.' + ' [runtime/max_min_macros] [4]', + 'foo.cpp') + + self.assert_lint( + 'inline int foo() { return MIN(0, 1); }', + 'Use std::min() or std::min<type>() instead of the MIN() macro.' + ' [runtime/max_min_macros] [4]', + 'foo.h') + + def test_names(self): + name_underscore_error_message = " is incorrectly named. Don't use underscores in your identifier names. [readability/naming] [4]" + name_tooshort_error_message = " is incorrectly named. Don't use the single letter 'l' as an identifier name. [readability/naming] [4]" + + # Basic cases from WebKit style guide. + self.assert_lint('struct Data;', '') + self.assert_lint('size_t bufferSize;', '') + self.assert_lint('class HTMLDocument;', '') + self.assert_lint('String mimeType();', '') + self.assert_lint('size_t buffer_size;', + 'buffer_size' + name_underscore_error_message) + self.assert_lint('short m_length;', '') + self.assert_lint('short _length;', + '_length' + name_underscore_error_message) + self.assert_lint('short length_;', + 'length_' + name_underscore_error_message) + self.assert_lint('unsigned _length;', + '_length' + name_underscore_error_message) + self.assert_lint('unsigned int _length;', + '_length' + name_underscore_error_message) + self.assert_lint('unsigned long long _length;', + '_length' + name_underscore_error_message) + + # Allow underscores in Objective C files. + self.assert_lint('unsigned long long _length;', + '', + 'foo.m') + self.assert_lint('unsigned long long _length;', + '', + 'foo.mm') + self.assert_lint('#import "header_file.h"\n' + 'unsigned long long _length;', + '', + 'foo.h') + self.assert_lint('unsigned long long _length;\n' + '@interface WebFullscreenWindow;', + '', + 'foo.h') + self.assert_lint('unsigned long long _length;\n' + '@implementation WebFullscreenWindow;', + '', + 'foo.h') + self.assert_lint('unsigned long long _length;\n' + '@class WebWindowFadeAnimation;', + '', + 'foo.h') + + # Variable name 'l' is easy to confuse with '1' + self.assert_lint('int l;', 'l' + name_tooshort_error_message) + self.assert_lint('size_t l;', 'l' + name_tooshort_error_message) + self.assert_lint('long long l;', 'l' + name_tooshort_error_message) + + # Pointers, references, functions, templates, and adjectives. + self.assert_lint('char* under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('const int UNDER_SCORE;', + 'UNDER_SCORE' + name_underscore_error_message) + self.assert_lint('static inline const char const& const under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('WebCore::RenderObject* under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('int func_name();', + 'func_name' + name_underscore_error_message) + self.assert_lint('RefPtr<RenderObject*> under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('WTF::Vector<WTF::RefPtr<const RenderObject* const> > under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('int under_score[];', + 'under_score' + name_underscore_error_message) + self.assert_lint('struct dirent* under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('long under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('long long under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('long double under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('long long int under_score;', + 'under_score' + name_underscore_error_message) + + # Declarations in control statement. + self.assert_lint('if (int under_score = 42) {', + 'under_score' + name_underscore_error_message) + self.assert_lint('else if (int under_score = 42) {', + 'under_score' + name_underscore_error_message) + self.assert_lint('for (int under_score = 42; cond; i++) {', + 'under_score' + name_underscore_error_message) + self.assert_lint('while (foo & under_score = bar) {', + 'under_score' + name_underscore_error_message) + self.assert_lint('for (foo * under_score = p; cond; i++) {', + 'under_score' + name_underscore_error_message) + self.assert_lint('for (foo * under_score; cond; i++) {', + 'under_score' + name_underscore_error_message) + self.assert_lint('while (foo & value_in_thirdparty_library) {', '') + self.assert_lint('while (foo * value_in_thirdparty_library) {', '') + self.assert_lint('if (mli && S_OK == mli->foo()) {', '') + + # More member variables and functions. + self.assert_lint('int SomeClass::s_validName', '') + self.assert_lint('int m_under_score;', + 'm_under_score' + name_underscore_error_message) + self.assert_lint('int SomeClass::s_under_score = 0;', + 'SomeClass::s_under_score' + name_underscore_error_message) + self.assert_lint('int SomeClass::under_score = 0;', + 'SomeClass::under_score' + name_underscore_error_message) + + # Other statements. + self.assert_lint('return INT_MAX;', '') + self.assert_lint('return_t under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('goto under_score;', + 'under_score' + name_underscore_error_message) + self.assert_lint('delete static_cast<Foo*>(p);', '') + + # Multiple variables in one line. + self.assert_lint('void myFunction(int variable1, int another_variable);', + 'another_variable' + name_underscore_error_message) + self.assert_lint('int variable1, another_variable;', + 'another_variable' + name_underscore_error_message) + self.assert_lint('int first_variable, secondVariable;', + 'first_variable' + name_underscore_error_message) + self.assert_lint('void my_function(int variable_1, int variable_2);', + ['my_function' + name_underscore_error_message, + 'variable_1' + name_underscore_error_message, + 'variable_2' + name_underscore_error_message]) + self.assert_lint('for (int variable_1, variable_2;;) {', + ['variable_1' + name_underscore_error_message, + 'variable_2' + name_underscore_error_message]) + + # There is an exception for op code functions but only in the JavaScriptCore directory. + self.assert_lint('void this_op_code(int var1, int var2)', '', 'JavaScriptCore/foo.cpp') + self.assert_lint('void op_code(int var1, int var2)', '', 'JavaScriptCore/foo.cpp') + self.assert_lint('void this_op_code(int var1, int var2)', 'this_op_code' + name_underscore_error_message) + + # GObject requires certain magical names in class declarations. + self.assert_lint('void webkit_dom_object_init();', '') + self.assert_lint('void webkit_dom_object_class_init();', '') + + # There is an exception for some unit tests that begin with "tst_". + self.assert_lint('void tst_QWebFrame::arrayObjectEnumerable(int var1, int var2)', '') + + # The Qt API uses names that begin with "qt_". + self.assert_lint('void QTFrame::qt_drt_is_awesome(int var1, int var2)', '') + self.assert_lint('void qt_drt_is_awesome(int var1, int var2);', '') + + # Cairo forward-declarations should not be a failure. + self.assert_lint('typedef struct _cairo cairo_t;', '') + self.assert_lint('typedef struct _cairo_surface cairo_surface_t;', '') + self.assert_lint('typedef struct _cairo_scaled_font cairo_scaled_font_t;', '') + + # NPAPI functions that start with NPN_, NPP_ or NP_ are allowed. + self.assert_lint('void NPN_Status(NPP, const char*)', '') + self.assert_lint('NPError NPP_SetWindow(NPP instance, NPWindow *window)', '') + self.assert_lint('NPObject* NP_Allocate(NPP, NPClass*)', '') + + # const_iterator is allowed as well. + self.assert_lint('typedef VectorType::const_iterator const_iterator;', '') + + # vm_throw is allowed as well. + self.assert_lint('int vm_throw;', '') + + # Bitfields. + self.assert_lint('unsigned _fillRule : 1;', + '_fillRule' + name_underscore_error_message) + + # new operators in initialization. + self.assert_lint('OwnPtr<uint32_t> variable(new uint32_t);', '') + self.assert_lint('OwnPtr<uint32_t> variable(new (expr) uint32_t);', '') + self.assert_lint('OwnPtr<uint32_t> under_score(new uint32_t);', + 'under_score' + name_underscore_error_message) + + + def test_comments(self): + # A comment at the beginning of a line is ok. + self.assert_lint('// comment', '') + self.assert_lint(' // comment', '') + + self.assert_lint('} // namespace WebCore', + 'One space before end of line comments' + ' [whitespace/comments] [5]') + + def test_other(self): + # FIXME: Implement this. + pass + + +class CppCheckerTest(unittest.TestCase): + + """Tests CppChecker class.""" + + def mock_handle_style_error(self): + pass + + def _checker(self): + return CppChecker("foo", "h", self.mock_handle_style_error, 3) + + def test_init(self): + """Test __init__ constructor.""" + checker = self._checker() + self.assertEquals(checker.file_extension, "h") + self.assertEquals(checker.file_path, "foo") + self.assertEquals(checker.handle_style_error, self.mock_handle_style_error) + self.assertEquals(checker.min_confidence, 3) + + def test_eq(self): + """Test __eq__ equality function.""" + checker1 = self._checker() + checker2 = self._checker() + + # == calls __eq__. + self.assertTrue(checker1 == checker2) + + def mock_handle_style_error2(self): + pass + + # Verify that a difference in any argument cause equality to fail. + checker = CppChecker("foo", "h", self.mock_handle_style_error, 3) + self.assertFalse(checker == CppChecker("bar", "h", self.mock_handle_style_error, 3)) + self.assertFalse(checker == CppChecker("foo", "c", self.mock_handle_style_error, 3)) + self.assertFalse(checker == CppChecker("foo", "h", mock_handle_style_error2, 3)) + self.assertFalse(checker == CppChecker("foo", "h", self.mock_handle_style_error, 4)) + + def test_ne(self): + """Test __ne__ inequality function.""" + checker1 = self._checker() + checker2 = self._checker() + + # != calls __ne__. + # By default, __ne__ always returns true on different objects. + # Thus, just check the distinguishing case to verify that the + # code defines __ne__. + self.assertFalse(checker1 != checker2) + + +def tearDown(): + """A global check to make sure all error-categories have been tested. + + The main tearDown() routine is the only code we can guarantee will be + run after all other tests have been executed. + """ + try: + if _run_verifyallcategoriesseen: + ErrorCollector(None).verify_all_categories_are_seen() + except NameError: + # If nobody set the global _run_verifyallcategoriesseen, then + # we assume we shouldn't run the test + pass + +if __name__ == '__main__': + import sys + # We don't want to run the verify_all_categories_are_seen() test unless + # we're running the full test suite: if we only run one test, + # obviously we're not going to see all the error categories. So we + # only run verify_all_categories_are_seen() when no commandline flags + # are passed in. + global _run_verifyallcategoriesseen + _run_verifyallcategoriesseen = (len(sys.argv) == 1) + + unittest.main() diff --git a/Tools/Scripts/webkitpy/style/checkers/python.py b/Tools/Scripts/webkitpy/style/checkers/python.py new file mode 100644 index 0000000..70d4450 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/python.py @@ -0,0 +1,56 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Supports checking WebKit style in Python files.""" + +from ...style_references import pep8 + + +class PythonChecker(object): + + """Processes text lines for checking style.""" + + def __init__(self, file_path, handle_style_error): + self._file_path = file_path + self._handle_style_error = handle_style_error + + def check(self, lines): + # Initialize pep8.options, which is necessary for + # Checker.check_all() to execute. + pep8.process_options(arglist=[self._file_path]) + + checker = pep8.Checker(self._file_path) + + def _pep8_handle_error(line_number, offset, text, check): + # FIXME: Incorporate the character offset into the error output. + # This will require updating the error handler __call__ + # signature to include an optional "offset" parameter. + pep8_code = text[:4] + pep8_message = text[5:] + + category = "pep8/" + pep8_code + + self._handle_style_error(line_number, category, 5, pep8_message) + + checker.report_error = _pep8_handle_error + + errors = checker.check_all() diff --git a/Tools/Scripts/webkitpy/style/checkers/python_unittest.py b/Tools/Scripts/webkitpy/style/checkers/python_unittest.py new file mode 100644 index 0000000..e003eb8 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/python_unittest.py @@ -0,0 +1,62 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for python.py.""" + +import os +import unittest + +from python import PythonChecker + + +class PythonCheckerTest(unittest.TestCase): + + """Tests the PythonChecker class.""" + + def test_init(self): + """Test __init__() method.""" + def _mock_handle_style_error(self): + pass + + checker = PythonChecker("foo.txt", _mock_handle_style_error) + self.assertEquals(checker._file_path, "foo.txt") + self.assertEquals(checker._handle_style_error, + _mock_handle_style_error) + + def test_check(self): + """Test check() method.""" + errors = [] + + def _mock_handle_style_error(line_number, category, confidence, + message): + error = (line_number, category, confidence, message) + errors.append(error) + + current_dir = os.path.dirname(__file__) + file_path = os.path.join(current_dir, "python_unittest_input.py") + + checker = PythonChecker(file_path, _mock_handle_style_error) + checker.check(lines=[]) + + self.assertEquals(len(errors), 1) + self.assertEquals(errors[0], + (2, "pep8/W291", 5, "trailing whitespace")) diff --git a/Tools/Scripts/webkitpy/style/checkers/python_unittest_input.py b/Tools/Scripts/webkitpy/style/checkers/python_unittest_input.py new file mode 100644 index 0000000..9f1d118 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/python_unittest_input.py @@ -0,0 +1,2 @@ +# This file is sample input for python_unittest.py and includes a single +# error which is an extra space at the end of this line. diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations.py new file mode 100644 index 0000000..c86b32c --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations.py @@ -0,0 +1,120 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Checks WebKit style for test_expectations files.""" + +import logging +import os +import re +import sys + +from common import TabChecker +from webkitpy.style_references import port +from webkitpy.style_references import test_expectations + +_log = logging.getLogger("webkitpy.style.checkers.test_expectations") + + +class ChromiumOptions(object): + """A mock object for creating chromium port object. + + port.get() requires an options object which has 'chromium' attribute to create + chromium port object for each platform. This class mocks such object. + """ + def __init__(self): + self.chromium = True + + +class TestExpectationsChecker(object): + """Processes test_expectations.txt lines for validating the syntax.""" + + categories = set(['test/expectations']) + + def __init__(self, file_path, handle_style_error): + self._file_path = file_path + self._handle_style_error = handle_style_error + self._tab_checker = TabChecker(file_path, handle_style_error) + self._output_regex = re.compile('Line:(?P<line>\d+)\s*(?P<message>.+)') + # Determining the port of this expectations. + try: + port_name = self._file_path.split(os.sep)[-2] + if port_name == "chromium": + options = ChromiumOptions() + self._port_obj = port.get(port_name=None, options=options) + else: + self._port_obj = port.get(port_name=port_name) + except: + # Using 'test' port when we couldn't determine the port for this + # expectations. + _log.warn("Could not determine the port for %s. " + "Using 'test' port, but platform-specific expectations " + "will fail the check." % self._file_path) + self._port_obj = port.get('test') + self._port_to_check = self._port_obj.test_platform_name() + # Suppress error messages of test_expectations module since they will be + # reported later. + log = logging.getLogger("webkitpy.layout_tests.layout_package." + "test_expectations") + log.setLevel(logging.CRITICAL) + + def _handle_error_message(self, lineno, message, confidence): + pass + + def check_test_expectations(self, expectations_str, tests=None, overrides=None): + err = None + expectations = None + try: + expectations = test_expectations.TestExpectationsFile( + port=self._port_obj, expectations=expectations_str, full_test_list=tests, + test_platform_name=self._port_to_check, is_debug_mode=False, + is_lint_mode=True, overrides=overrides) + except test_expectations.ParseError, error: + err = error + + if err: + level = 2 + if err.fatal: + level = 5 + for error in err.errors: + matched = self._output_regex.match(error) + if matched: + lineno, message = matched.group('line', 'message') + self._handle_style_error(int(lineno), 'test/expectations', level, message) + + + def check_tabs(self, lines): + self._tab_checker.check(lines) + + def check(self, lines): + overrides = self._port_obj.test_expectations_overrides() + expectations = '\n'.join(lines) + self.check_test_expectations(expectations_str=expectations, + tests=None, + overrides=overrides) + # Warn tabs in lines as well + self.check_tabs(lines) diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py new file mode 100644 index 0000000..9817c5d --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py @@ -0,0 +1,173 @@ +#!/usr/bin/python +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for test_expectations.py.""" + +import os +import sys +import unittest + +# We need following workaround hack to run this unit tests in stand-alone. +try: + d = os.path.dirname(__file__) +except NameError: + d = os.path.dirname(sys.argv[0]) +sys.path.append(os.path.abspath(os.path.join(d, '../../../'))) + +from test_expectations import TestExpectationsChecker +from webkitpy.style_references import port +from webkitpy.style_references import test_expectations as test_expectations_style + + +class ErrorCollector(object): + """An error handler class for unit tests.""" + + def __init__(self): + self._errors = [] + + def __call__(self, lineno, category, confidence, message): + self._errors.append('%s [%s] [%d]' % (message, category, confidence)) + + def get_errors(self): + return ''.join(self._errors) + + def reset_errors(self): + self._errors = [] + + +class TestExpectationsTestCase(unittest.TestCase): + """TestCase for test_expectations.py""" + + def setUp(self): + self._error_collector = ErrorCollector() + port_obj = port.get('test') + self._test_file = os.path.join(port_obj.layout_tests_dir(), 'passes/text.html') + + def process_expectations(self, expectations, overrides=None): + self._checker = TestExpectationsChecker() + + def assert_lines_lint(self, lines, expected): + self._error_collector.reset_errors() + checker = TestExpectationsChecker('test/test_expectations.txt', + self._error_collector) + checker.check_test_expectations(expectations_str='\n'.join(lines), + tests=[self._test_file], + overrides=None) + checker.check_tabs(lines) + self.assertEqual(expected, self._error_collector.get_errors()) + + def test_valid_expectations(self): + self.assert_lines_lint( + ["passes/text.html = PASS"], + "") + self.assert_lines_lint( + ["passes/text.html = FAIL PASS"], + "") + self.assert_lines_lint( + ["passes/text.html = CRASH TIMEOUT FAIL PASS"], + "") + self.assert_lines_lint( + ["BUGCR1234 MAC : passes/text.html = PASS FAIL"], + "") + self.assert_lines_lint( + ["SKIP BUGCR1234 : passes/text.html = TIMEOUT PASS"], + "") + self.assert_lines_lint( + ["BUGCR1234 DEBUG : passes/text.html = TIMEOUT PASS"], + "") + self.assert_lines_lint( + ["BUGCR1234 DEBUG SKIP : passes/text.html = TIMEOUT PASS"], + "") + self.assert_lines_lint( + ["BUGCR1234 MAC DEBUG SKIP : passes/text.html = TIMEOUT PASS"], + "") + self.assert_lines_lint( + ["BUGCR1234 DEBUG MAC : passes/text.html = TIMEOUT PASS"], + "") + self.assert_lines_lint( + ["SLOW BUGCR1234 : passes/text.html = PASS"], + "") + self.assert_lines_lint( + ["WONTFIX SKIP : passes/text.html = TIMEOUT"], + "") + + def test_modifier_errors(self): + self.assert_lines_lint( + ["BUG1234 : passes/text.html = FAIL"], + 'Bug must be either BUGCR, BUGWK, or BUGV8_ for test: bug1234 passes/text.html [test/expectations] [5]') + + def test_valid_modifiers(self): + self.assert_lines_lint( + ["INVALID-MODIFIER : passes/text.html = PASS"], + "Invalid modifier for test: invalid-modifier " + "passes/text.html [test/expectations] [5]") + self.assert_lines_lint( + ["SKIP : passes/text.html = PASS"], + "Test lacks BUG modifier. " + "passes/text.html [test/expectations] [2]") + + def test_expectation_errors(self): + self.assert_lines_lint( + ["missing expectations"], + "Missing expectations. ['missing expectations'] [test/expectations] [5]") + self.assert_lines_lint( + ["SLOW : passes/text.html = TIMEOUT"], + "A test can not be both slow and timeout. " + "If it times out indefinitely, then it should be just timeout. " + "passes/text.html [test/expectations] [5]") + self.assert_lines_lint( + ["does/not/exist.html = FAIL"], + "Path does not exist. does/not/exist.html [test/expectations] [2]") + + def test_parse_expectations(self): + self.assert_lines_lint( + ["passes/text.html = PASS"], + "") + self.assert_lines_lint( + ["passes/text.html = UNSUPPORTED"], + "Unsupported expectation: unsupported " + "passes/text.html [test/expectations] [5]") + self.assert_lines_lint( + ["passes/text.html = PASS UNSUPPORTED"], + "Unsupported expectation: unsupported " + "passes/text.html [test/expectations] [5]") + + def test_already_seen_test(self): + self.assert_lines_lint( + ["passes/text.html = PASS", + "passes/text.html = TIMEOUT"], + "Duplicate expectation. %s [test/expectations] [5]" % self._test_file) + + def test_tab(self): + self.assert_lines_lint( + ["\tpasses/text.html = PASS"], + "Line contains tab character. [whitespace/tab] [5]") + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/style/checkers/text.py b/Tools/Scripts/webkitpy/style/checkers/text.py new file mode 100644 index 0000000..1147658 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/text.py @@ -0,0 +1,51 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Checks WebKit style for text files.""" + +from common import TabChecker + +class TextChecker(object): + + """Processes text lines for checking style.""" + + def __init__(self, file_path, handle_style_error): + self.file_path = file_path + self.handle_style_error = handle_style_error + self._tab_checker = TabChecker(file_path, handle_style_error) + + def check(self, lines): + self._tab_checker.check(lines) + + +# FIXME: Remove this function (requires refactoring unit tests). +def process_file_data(filename, lines, error): + checker = TextChecker(filename, error) + checker.check(lines) + diff --git a/Tools/Scripts/webkitpy/style/checkers/text_unittest.py b/Tools/Scripts/webkitpy/style/checkers/text_unittest.py new file mode 100644 index 0000000..ced49a9 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/text_unittest.py @@ -0,0 +1,94 @@ +#!/usr/bin/python +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test for text_style.py.""" + +import unittest + +import text as text_style +from text import TextChecker + +class TextStyleTestCase(unittest.TestCase): + """TestCase for text_style.py""" + + def assertNoError(self, lines): + """Asserts that the specified lines has no errors.""" + self.had_error = False + + def error_for_test(line_number, category, confidence, message): + """Records if an error occurs.""" + self.had_error = True + + text_style.process_file_data('', lines, error_for_test) + self.assert_(not self.had_error, '%s should not have any errors.' % lines) + + def assertError(self, lines, expected_line_number): + """Asserts that the specified lines has an error.""" + self.had_error = False + + def error_for_test(line_number, category, confidence, message): + """Checks if the expected error occurs.""" + self.assertEquals(expected_line_number, line_number) + self.assertEquals('whitespace/tab', category) + self.had_error = True + + text_style.process_file_data('', lines, error_for_test) + self.assert_(self.had_error, '%s should have an error [whitespace/tab].' % lines) + + + def test_no_error(self): + """Tests for no error cases.""" + self.assertNoError(['']) + self.assertNoError(['abc def', 'ggg']) + + + def test_error(self): + """Tests for error cases.""" + self.assertError(['2009-12-16\tKent Tamura\t<tkent@chromium.org>'], 1) + self.assertError(['2009-12-16 Kent Tamura <tkent@chromium.org>', + '', + '\tReviewed by NOBODY.'], 3) + + +class TextCheckerTest(unittest.TestCase): + + """Tests TextChecker class.""" + + def mock_handle_style_error(self): + pass + + def test_init(self): + """Test __init__ constructor.""" + checker = TextChecker("foo.txt", self.mock_handle_style_error) + self.assertEquals(checker.file_path, "foo.txt") + self.assertEquals(checker.handle_style_error, self.mock_handle_style_error) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/style/checkers/xml.py b/Tools/Scripts/webkitpy/style/checkers/xml.py new file mode 100644 index 0000000..2f7c0ce --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/xml.py @@ -0,0 +1,45 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Checks WebKit style for XML files.""" + +from __future__ import absolute_import + +from xml.parsers import expat + + +class XMLChecker(object): + """Processes XML lines for checking style.""" + + def __init__(self, file_path, handle_style_error): + self.file_path = file_path + self.handle_style_error = handle_style_error + + def check(self, lines): + parser = expat.ParserCreate() + try: + for line in lines: + parser.Parse(line) + parser.Parse('\n') + parser.Parse('', True) + except expat.ExpatError, error: + self.handle_style_error(error.lineno, 'xml/syntax', 5, expat.ErrorString(error.code)) diff --git a/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py b/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py new file mode 100644 index 0000000..3825660 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test for xml.py.""" + +import unittest + +import xml + + +class XMLCheckerTest(unittest.TestCase): + """Tests XMLChecker class.""" + + def assert_no_error(self, xml_data): + def handle_style_error(line_number, category, confidence, message): + self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message)) + checker = xml.XMLChecker('foo.xml', handle_style_error) + checker.check(xml_data.split('\n')) + + def assert_error(self, expected_line_number, expected_category, xml_data): + def handle_style_error(line_number, category, confidence, message): + self.had_error = True + self.assertEquals(expected_line_number, line_number) + self.assertEquals(expected_category, category) + checker = xml.XMLChecker('foo.xml', handle_style_error) + checker.check(xml_data.split('\n')) + self.assertTrue(self.had_error) + + def mock_handle_style_error(self): + pass + + def test_conflict_marker(self): + self.assert_error(1, 'xml/syntax', '<<<<<<< HEAD\n<foo>\n</foo>\n') + + def test_extra_closing_tag(self): + self.assert_error(3, 'xml/syntax', '<foo>\n</foo>\n</foo>\n') + + def test_init(self): + checker = xml.XMLChecker('foo.xml', self.mock_handle_style_error) + self.assertEquals(checker.file_path, 'foo.xml') + self.assertEquals(checker.handle_style_error, self.mock_handle_style_error) + + def test_missing_closing_tag(self): + self.assert_error(3, 'xml/syntax', '<foo>\n<bar>\n</foo>\n') + + def test_no_error(self): + checker = xml.XMLChecker('foo.xml', self.assert_no_error) + checker.check(['<foo>', '</foo>']) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/style/error_handlers.py b/Tools/Scripts/webkitpy/style/error_handlers.py new file mode 100644 index 0000000..0bede24 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/error_handlers.py @@ -0,0 +1,159 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines style error handler classes. + +A style error handler is a function to call when a style error is +found. Style error handlers can also have state. A class that represents +a style error handler should implement the following methods. + +Methods: + + __call__(self, line_number, category, confidence, message): + + Handle the occurrence of a style error. + + Check whether the error is reportable. If so, increment the total + error count and report the details. Note that error reporting can + be suppressed after reaching a certain number of reports. + + Args: + line_number: The integer line number of the line containing the error. + category: The name of the category of the error, for example + "whitespace/newline". + confidence: An integer between 1 and 5 inclusive that represents the + application's level of confidence in the error. The value + 5 means that we are certain of the problem, and the + value 1 means that it could be a legitimate construct. + message: The error message to report. + +""" + + +import sys + + +class DefaultStyleErrorHandler(object): + + """The default style error handler.""" + + def __init__(self, file_path, configuration, increment_error_count, + line_numbers=None): + """Create a default style error handler. + + Args: + file_path: The path to the file containing the error. This + is used for reporting to the user. + configuration: A StyleProcessorConfiguration instance. + increment_error_count: A function that takes no arguments and + increments the total count of reportable + errors. + line_numbers: An array of line numbers of the lines for which + style errors should be reported, or None if errors + for all lines should be reported. When it is not + None, this array normally contains the line numbers + corresponding to the modified lines of a patch. + + """ + if line_numbers is not None: + line_numbers = set(line_numbers) + + self._file_path = file_path + self._configuration = configuration + self._increment_error_count = increment_error_count + self._line_numbers = line_numbers + + # A string to integer dictionary cache of the number of reportable + # errors per category passed to this instance. + self._category_totals = {} + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this instance is equal to another.""" + if self._configuration != other._configuration: + return False + if self._file_path != other._file_path: + return False + if self._increment_error_count != other._increment_error_count: + return False + if self._line_numbers != other._line_numbers: + return False + + return True + + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce __ne__ from __eq__. + return not self.__eq__(other) + + def _add_reportable_error(self, category): + """Increment the error count and return the new category total.""" + self._increment_error_count() # Increment the total. + + # Increment the category total. + if not category in self._category_totals: + self._category_totals[category] = 1 + else: + self._category_totals[category] += 1 + + return self._category_totals[category] + + def _max_reports(self, category): + """Return the maximum number of errors to report.""" + if not category in self._configuration.max_reports_per_category: + return None + return self._configuration.max_reports_per_category[category] + + def __call__(self, line_number, category, confidence, message): + """Handle the occurrence of a style error. + + See the docstring of this module for more information. + + """ + if (self._line_numbers is not None and + line_number not in self._line_numbers): + # Then the error occurred in a line that was not modified, so + # the error is not reportable. + return + + if not self._configuration.is_reportable(category=category, + confidence_in_error=confidence, + file_path=self._file_path): + return + + category_total = self._add_reportable_error(category) + + max_reports = self._max_reports(category) + + if (max_reports is not None) and (category_total > max_reports): + # Then suppress displaying the error. + return + + self._configuration.write_style_error(category=category, + confidence_in_error=confidence, + file_path=self._file_path, + line_number=line_number, + message=message) + + if category_total == max_reports: + self._configuration.stderr_write("Suppressing further [%s] reports " + "for this file.\n" % category) diff --git a/Tools/Scripts/webkitpy/style/error_handlers_unittest.py b/Tools/Scripts/webkitpy/style/error_handlers_unittest.py new file mode 100644 index 0000000..23619cc --- /dev/null +++ b/Tools/Scripts/webkitpy/style/error_handlers_unittest.py @@ -0,0 +1,187 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for error_handlers.py.""" + + +import unittest + +from checker import StyleProcessorConfiguration +from error_handlers import DefaultStyleErrorHandler +from filter import FilterConfiguration + + +class DefaultStyleErrorHandlerTest(unittest.TestCase): + + """Tests the DefaultStyleErrorHandler class.""" + + def setUp(self): + self._error_messages = [] + self._error_count = 0 + + _category = "whitespace/tab" + """The category name for the tests in this class.""" + + _file_path = "foo.h" + """The file path for the tests in this class.""" + + def _mock_increment_error_count(self): + self._error_count += 1 + + def _mock_stderr_write(self, message): + self._error_messages.append(message) + + def _style_checker_configuration(self): + """Return a StyleProcessorConfiguration instance for testing.""" + base_rules = ["-whitespace", "+whitespace/tab"] + filter_configuration = FilterConfiguration(base_rules=base_rules) + + return StyleProcessorConfiguration( + filter_configuration=filter_configuration, + max_reports_per_category={"whitespace/tab": 2}, + min_confidence=3, + output_format="vs7", + stderr_write=self._mock_stderr_write) + + def _error_handler(self, configuration, line_numbers=None): + return DefaultStyleErrorHandler(configuration=configuration, + file_path=self._file_path, + increment_error_count=self._mock_increment_error_count, + line_numbers=line_numbers) + + def _check_initialized(self): + """Check that count and error messages are initialized.""" + self.assertEquals(0, self._error_count) + self.assertEquals(0, len(self._error_messages)) + + def _call_error_handler(self, handle_error, confidence, line_number=100): + """Call the given error handler with a test error.""" + handle_error(line_number=line_number, + category=self._category, + confidence=confidence, + message="message") + + def test_eq__true_return_value(self): + """Test the __eq__() method for the return value of True.""" + handler1 = self._error_handler(configuration=None) + handler2 = self._error_handler(configuration=None) + + self.assertTrue(handler1.__eq__(handler2)) + + def test_eq__false_return_value(self): + """Test the __eq__() method for the return value of False.""" + def make_handler(configuration=self._style_checker_configuration(), + file_path='foo.txt', increment_error_count=lambda: True, + line_numbers=[100]): + return DefaultStyleErrorHandler(configuration=configuration, + file_path=file_path, + increment_error_count=increment_error_count, + line_numbers=line_numbers) + + handler = make_handler() + + # Establish a baseline for our comparisons below. + self.assertTrue(handler.__eq__(make_handler())) + + # Verify that a difference in any argument causes equality to fail. + self.assertFalse(handler.__eq__(make_handler(configuration=None))) + self.assertFalse(handler.__eq__(make_handler(file_path='bar.txt'))) + self.assertFalse(handler.__eq__(make_handler(increment_error_count=None))) + self.assertFalse(handler.__eq__(make_handler(line_numbers=[50]))) + + def test_ne(self): + """Test the __ne__() method.""" + # By default, __ne__ always returns true on different objects. + # Thus, check just the distinguishing case to verify that the + # code defines __ne__. + handler1 = self._error_handler(configuration=None) + handler2 = self._error_handler(configuration=None) + + self.assertFalse(handler1.__ne__(handler2)) + + def test_non_reportable_error(self): + """Test __call__() with a non-reportable error.""" + self._check_initialized() + configuration = self._style_checker_configuration() + + confidence = 1 + # Confirm the error is not reportable. + self.assertFalse(configuration.is_reportable(self._category, + confidence, + self._file_path)) + error_handler = self._error_handler(configuration) + self._call_error_handler(error_handler, confidence) + + self.assertEquals(0, self._error_count) + self.assertEquals([], self._error_messages) + + # Also serves as a reportable error test. + def test_max_reports_per_category(self): + """Test error report suppression in __call__() method.""" + self._check_initialized() + configuration = self._style_checker_configuration() + error_handler = self._error_handler(configuration) + + confidence = 5 + + # First call: usual reporting. + self._call_error_handler(error_handler, confidence) + self.assertEquals(1, self._error_count) + self.assertEquals(1, len(self._error_messages)) + self.assertEquals(self._error_messages, + ["foo.h(100): message [whitespace/tab] [5]\n"]) + + # Second call: suppression message reported. + self._call_error_handler(error_handler, confidence) + # The "Suppressing further..." message counts as an additional + # message (but not as an addition to the error count). + self.assertEquals(2, self._error_count) + self.assertEquals(3, len(self._error_messages)) + self.assertEquals(self._error_messages[-2], + "foo.h(100): message [whitespace/tab] [5]\n") + self.assertEquals(self._error_messages[-1], + "Suppressing further [whitespace/tab] reports " + "for this file.\n") + + # Third call: no report. + self._call_error_handler(error_handler, confidence) + self.assertEquals(3, self._error_count) + self.assertEquals(3, len(self._error_messages)) + + def test_line_numbers(self): + """Test the line_numbers parameter.""" + self._check_initialized() + configuration = self._style_checker_configuration() + error_handler = self._error_handler(configuration, + line_numbers=[50]) + confidence = 5 + + # Error on non-modified line: no error. + self._call_error_handler(error_handler, confidence, line_number=60) + self.assertEquals(0, self._error_count) + self.assertEquals([], self._error_messages) + + # Error on modified line: error. + self._call_error_handler(error_handler, confidence, line_number=50) + self.assertEquals(1, self._error_count) + self.assertEquals(self._error_messages, + ["foo.h(50): message [whitespace/tab] [5]\n"]) diff --git a/Tools/Scripts/webkitpy/style/filereader.py b/Tools/Scripts/webkitpy/style/filereader.py new file mode 100644 index 0000000..1a24cb5 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/filereader.py @@ -0,0 +1,162 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# Copyright (C) 2010 ProFUSION embedded systems +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Supports reading and processing text files.""" + +import codecs +import logging +import os +import sys + + +_log = logging.getLogger(__name__) + + +class TextFileReader(object): + + """Supports reading and processing text files. + + Attributes: + file_count: The total number of files passed to this instance + for processing, including non-text files and files + that should be skipped. + delete_only_file_count: The total number of files that are not + processed this instance actually because + the files don't have any modified lines + but should be treated as processed. + + """ + + def __init__(self, processor): + """Create an instance. + + Arguments: + processor: A ProcessorBase instance. + + """ + self._processor = processor + self.file_count = 0 + self.delete_only_file_count = 0 + + def _read_lines(self, file_path): + """Read the file at a path, and return its lines. + + Raises: + IOError: If the file does not exist or cannot be read. + + """ + # Support the UNIX convention of using "-" for stdin. + if file_path == '-': + file = codecs.StreamReaderWriter(sys.stdin, + codecs.getreader('utf8'), + codecs.getwriter('utf8'), + 'replace') + else: + # We do not open the file with universal newline support + # (codecs does not support it anyway), so the resulting + # lines contain trailing "\r" characters if we are reading + # a file with CRLF endings. + file = codecs.open(file_path, 'r', 'utf8', 'replace') + + try: + contents = file.read() + finally: + file.close() + + lines = contents.split('\n') + return lines + + def process_file(self, file_path, **kwargs): + """Process the given file by calling the processor's process() method. + + Args: + file_path: The path of the file to process. + **kwargs: Any additional keyword parameters that should be passed + to the processor's process() method. The process() + method should support these keyword arguments. + + Raises: + SystemExit: If no file at file_path exists. + + """ + self.file_count += 1 + + if not os.path.exists(file_path) and file_path != "-": + _log.error("File does not exist: '%s'" % file_path) + sys.exit(1) + + if not self._processor.should_process(file_path): + _log.debug("Skipping file: '%s'" % file_path) + return + _log.debug("Processing file: '%s'" % file_path) + + try: + lines = self._read_lines(file_path) + except IOError, err: + message = ("Could not read file. Skipping: '%s'\n %s" + % (file_path, err)) + _log.warn(message) + return + + self._processor.process(lines, file_path, **kwargs) + + def _process_directory(self, directory): + """Process all files in the given directory, recursively. + + Args: + directory: A directory path. + + """ + for dir_path, dir_names, file_names in os.walk(directory): + for file_name in file_names: + file_path = os.path.join(dir_path, file_name) + self.process_file(file_path) + + def process_paths(self, paths): + """Process the given file and directory paths. + + Args: + paths: A list of file and directory paths. + + """ + for path in paths: + if os.path.isdir(path): + self._process_directory(directory=path) + else: + self.process_file(path) + + def count_delete_only_file(self): + """Count up files that contains only deleted lines. + + Files which has no modified or newly-added lines don't need + to check style, but should be treated as checked. For that + purpose, we just count up the number of such files. + """ + self.delete_only_file_count += 1 diff --git a/Tools/Scripts/webkitpy/style/filereader_unittest.py b/Tools/Scripts/webkitpy/style/filereader_unittest.py new file mode 100644 index 0000000..6328337 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/filereader_unittest.py @@ -0,0 +1,166 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains unit tests for filereader.py.""" + +from __future__ import with_statement + +import codecs +import os +import shutil +import tempfile +import unittest + +from webkitpy.common.system.logtesting import LoggingTestCase +from webkitpy.style.checker import ProcessorBase +from webkitpy.style.filereader import TextFileReader + + +class TextFileReaderTest(LoggingTestCase): + + class MockProcessor(ProcessorBase): + + """A processor for test purposes. + + This processor simply records the parameters passed to its process() + method for later checking by the unittest test methods. + + """ + + def __init__(self): + self.processed = [] + """The parameters passed for all calls to the process() method.""" + + def should_process(self, file_path): + return not file_path.endswith('should_not_process.txt') + + def process(self, lines, file_path, test_kwarg=None): + self.processed.append((lines, file_path, test_kwarg)) + + def setUp(self): + LoggingTestCase.setUp(self) + processor = TextFileReaderTest.MockProcessor() + + temp_dir = tempfile.mkdtemp() + + self._file_reader = TextFileReader(processor) + self._processor = processor + self._temp_dir = temp_dir + + def tearDown(self): + LoggingTestCase.tearDown(self) + shutil.rmtree(self._temp_dir) + + def _create_file(self, rel_path, text, encoding="utf-8"): + """Create a file with given text and return the path to the file.""" + # FIXME: There are better/more secure APIs for creatin tmp file paths. + file_path = os.path.join(self._temp_dir, rel_path) + with codecs.open(file_path, "w", encoding) as file: + file.write(text) + return file_path + + def _passed_to_processor(self): + """Return the parameters passed to MockProcessor.process().""" + return self._processor.processed + + def _assert_file_reader(self, passed_to_processor, file_count): + """Assert the state of the file reader.""" + self.assertEquals(passed_to_processor, self._passed_to_processor()) + self.assertEquals(file_count, self._file_reader.file_count) + + def test_process_file__does_not_exist(self): + try: + self._file_reader.process_file('does_not_exist.txt') + except SystemExit, err: + self.assertEquals(str(err), '1') + else: + self.fail('No Exception raised.') + self._assert_file_reader([], 1) + self.assertLog(["ERROR: File does not exist: 'does_not_exist.txt'\n"]) + + def test_process_file__is_dir(self): + temp_dir = os.path.join(self._temp_dir, 'test_dir') + os.mkdir(temp_dir) + + self._file_reader.process_file(temp_dir) + + # Because the log message below contains exception text, it is + # possible that the text varies across platforms. For this reason, + # we check only the portion of the log message that we control, + # namely the text at the beginning. + log_messages = self.logMessages() + # We remove the message we are looking at to prevent the tearDown() + # from raising an exception when it asserts that no log messages + # remain. + message = log_messages.pop() + + self.assertTrue(message.startswith('WARNING: Could not read file. ' + "Skipping: '%s'\n " % temp_dir)) + + self._assert_file_reader([], 1) + + def test_process_file__should_not_process(self): + file_path = self._create_file('should_not_process.txt', 'contents') + + self._file_reader.process_file(file_path) + self._assert_file_reader([], 1) + + def test_process_file__multiple_lines(self): + file_path = self._create_file('foo.txt', 'line one\r\nline two\n') + + self._file_reader.process_file(file_path) + processed = [(['line one\r', 'line two', ''], file_path, None)] + self._assert_file_reader(processed, 1) + + def test_process_file__file_stdin(self): + file_path = self._create_file('-', 'file contents') + + self._file_reader.process_file(file_path=file_path, test_kwarg='foo') + processed = [(['file contents'], file_path, 'foo')] + self._assert_file_reader(processed, 1) + + def test_process_file__with_kwarg(self): + file_path = self._create_file('foo.txt', 'file contents') + + self._file_reader.process_file(file_path=file_path, test_kwarg='foo') + processed = [(['file contents'], file_path, 'foo')] + self._assert_file_reader(processed, 1) + + def test_process_paths(self): + # We test a list of paths that contains both a file and a directory. + dir = os.path.join(self._temp_dir, 'foo_dir') + os.mkdir(dir) + + file_path1 = self._create_file('file1.txt', 'foo') + + rel_path = os.path.join('foo_dir', 'file2.txt') + file_path2 = self._create_file(rel_path, 'bar') + + self._file_reader.process_paths([dir, file_path1]) + processed = [(['bar'], file_path2, None), + (['foo'], file_path1, None)] + self._assert_file_reader(processed, 2) + + def test_count_delete_only_file(self): + self._file_reader.count_delete_only_file() + delete_only_file_count = self._file_reader.delete_only_file_count + self.assertEquals(delete_only_file_count, 1) diff --git a/Tools/Scripts/webkitpy/style/filter.py b/Tools/Scripts/webkitpy/style/filter.py new file mode 100644 index 0000000..608a9e6 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/filter.py @@ -0,0 +1,278 @@ +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains filter-related code.""" + + +def validate_filter_rules(filter_rules, all_categories): + """Validate the given filter rules, and raise a ValueError if not valid. + + Args: + filter_rules: A list of boolean filter rules, for example-- + ["-whitespace", "+whitespace/braces"] + all_categories: A list of all available category names, for example-- + ["whitespace/tabs", "whitespace/braces"] + + Raises: + ValueError: An error occurs if a filter rule does not begin + with "+" or "-" or if a filter rule does not match + the beginning of some category name in the list + of all available categories. + + """ + for rule in filter_rules: + if not (rule.startswith('+') or rule.startswith('-')): + raise ValueError('Invalid filter rule "%s": every rule ' + "must start with + or -." % rule) + + for category in all_categories: + if category.startswith(rule[1:]): + break + else: + raise ValueError('Suspected incorrect filter rule "%s": ' + "the rule does not match the beginning " + "of any category name." % rule) + + +class _CategoryFilter(object): + + """Filters whether to check style categories.""" + + def __init__(self, filter_rules=None): + """Create a category filter. + + Args: + filter_rules: A list of strings that are filter rules, which + are strings beginning with the plus or minus + symbol (+/-). The list should include any + default filter rules at the beginning. + Defaults to the empty list. + + Raises: + ValueError: Invalid filter rule if a rule does not start with + plus ("+") or minus ("-"). + + """ + if filter_rules is None: + filter_rules = [] + + self._filter_rules = filter_rules + self._should_check_category = {} # Cached dictionary of category to True/False + + def __str__(self): + return ",".join(self._filter_rules) + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this CategoryFilter instance is equal to another.""" + return self._filter_rules == other._filter_rules + + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce from __eq__(). + return not (self == other) + + def should_check(self, category): + """Return whether the category should be checked. + + The rules for determining whether a category should be checked + are as follows. By default all categories should be checked. + Then apply the filter rules in order from first to last, with + later flags taking precedence. + + A filter rule applies to a category if the string after the + leading plus/minus (+/-) matches the beginning of the category + name. A plus (+) means the category should be checked, while a + minus (-) means the category should not be checked. + + """ + if category in self._should_check_category: + return self._should_check_category[category] + + should_check = True # All categories checked by default. + for rule in self._filter_rules: + if not category.startswith(rule[1:]): + continue + should_check = rule.startswith('+') + self._should_check_category[category] = should_check # Update cache. + return should_check + + +class FilterConfiguration(object): + + """Supports filtering with path-specific and user-specified rules.""" + + def __init__(self, base_rules=None, path_specific=None, user_rules=None): + """Create a FilterConfiguration instance. + + Args: + base_rules: The starting list of filter rules to use for + processing. The default is the empty list, which + by itself would mean that all categories should be + checked. + + path_specific: A list of (sub_paths, path_rules) pairs + that stores the path-specific filter rules for + appending to the base rules. + The "sub_paths" value is a list of path + substrings. If a file path contains one of the + substrings, then the corresponding path rules + are appended. The first substring match takes + precedence, i.e. only the first match triggers + an append. + The "path_rules" value is a list of filter + rules that can be appended to the base rules. + + user_rules: A list of filter rules that is always appended + to the base rules and any path rules. In other + words, the user rules take precedence over the + everything. In practice, the user rules are + provided by the user from the command line. + + """ + if base_rules is None: + base_rules = [] + if path_specific is None: + path_specific = [] + if user_rules is None: + user_rules = [] + + self._base_rules = base_rules + self._path_specific = path_specific + self._path_specific_lower = None + """The backing store for self._get_path_specific_lower().""" + + self._user_rules = user_rules + + self._path_rules_to_filter = {} + """Cached dictionary of path rules to CategoryFilter instance.""" + + # The same CategoryFilter instance can be shared across + # multiple keys in this dictionary. This allows us to take + # greater advantage of the caching done by + # CategoryFilter.should_check(). + self._path_to_filter = {} + """Cached dictionary of file path to CategoryFilter instance.""" + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this FilterConfiguration is equal to another.""" + if self._base_rules != other._base_rules: + return False + if self._path_specific != other._path_specific: + return False + if self._user_rules != other._user_rules: + return False + + return True + + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce this from __eq__(). + return not self.__eq__(other) + + # We use the prefix "_get" since the name "_path_specific_lower" + # is already taken up by the data attribute backing store. + def _get_path_specific_lower(self): + """Return a copy of self._path_specific with the paths lower-cased.""" + if self._path_specific_lower is None: + self._path_specific_lower = [] + for (sub_paths, path_rules) in self._path_specific: + sub_paths = map(str.lower, sub_paths) + self._path_specific_lower.append((sub_paths, path_rules)) + return self._path_specific_lower + + def _path_rules_from_path(self, path): + """Determine the path-specific rules to use, and return as a tuple. + + This method returns a tuple rather than a list so the return + value can be passed to _filter_from_path_rules() without change. + + """ + path = path.lower() + for (sub_paths, path_rules) in self._get_path_specific_lower(): + for sub_path in sub_paths: + if path.find(sub_path) > -1: + return tuple(path_rules) + return () # Default to the empty tuple. + + def _filter_from_path_rules(self, path_rules): + """Return the CategoryFilter associated to the given path rules. + + Args: + path_rules: A tuple of path rules. We require a tuple rather + than a list so the value can be used as a dictionary + key in self._path_rules_to_filter. + + """ + # We reuse the same CategoryFilter where possible to take + # advantage of the caching they do. + if path_rules not in self._path_rules_to_filter: + rules = list(self._base_rules) # Make a copy + rules.extend(path_rules) + rules.extend(self._user_rules) + self._path_rules_to_filter[path_rules] = _CategoryFilter(rules) + + return self._path_rules_to_filter[path_rules] + + def _filter_from_path(self, path): + """Return the CategoryFilter associated to a path.""" + if path not in self._path_to_filter: + path_rules = self._path_rules_from_path(path) + filter = self._filter_from_path_rules(path_rules) + self._path_to_filter[path] = filter + + return self._path_to_filter[path] + + def should_check(self, category, path): + """Return whether the given category should be checked. + + This method determines whether a category should be checked + by checking the category name against the filter rules for + the given path. + + For a given path, the filter rules are the combination of + the base rules, the path-specific rules, and the user-provided + rules -- in that order. As we will describe below, later rules + in the list take precedence. The path-specific rules are the + rules corresponding to the first element of the "path_specific" + parameter that contains a string case-insensitively matching + some substring of the path. If there is no such element, + there are no path-specific rules for that path. + + Given a list of filter rules, the logic for determining whether + a category should be checked is as follows. By default all + categories should be checked. Then apply the filter rules in + order from first to last, with later flags taking precedence. + + A filter rule applies to a category if the string after the + leading plus/minus (+/-) matches the beginning of the category + name. A plus (+) means the category should be checked, while a + minus (-) means the category should not be checked. + + Args: + category: The category name. + path: The path of the file being checked. + + """ + return self._filter_from_path(path).should_check(category) + diff --git a/Tools/Scripts/webkitpy/style/filter_unittest.py b/Tools/Scripts/webkitpy/style/filter_unittest.py new file mode 100644 index 0000000..7b8a5402 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/filter_unittest.py @@ -0,0 +1,256 @@ +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for filter.py.""" + +import unittest + +from filter import _CategoryFilter as CategoryFilter +from filter import validate_filter_rules +from filter import FilterConfiguration + +# On Testing __eq__() and __ne__(): +# +# In the tests below, we deliberately do not use assertEquals() or +# assertNotEquals() to test __eq__() or __ne__(). We do this to be +# very explicit about what we are testing, especially in the case +# of assertNotEquals(). +# +# Part of the reason is that it is not immediately clear what +# expression the unittest module uses to assert "not equals" -- the +# negation of __eq__() or __ne__(), which are not necessarily +# equivalent expresions in Python. For example, from Python's "Data +# Model" documentation-- +# +# "There are no implied relationships among the comparison +# operators. The truth of x==y does not imply that x!=y is +# false. Accordingly, when defining __eq__(), one should +# also define __ne__() so that the operators will behave as +# expected." +# +# (from http://docs.python.org/reference/datamodel.html#object.__ne__ ) + +class ValidateFilterRulesTest(unittest.TestCase): + + """Tests validate_filter_rules() function.""" + + def test_validate_filter_rules(self): + all_categories = ["tabs", "whitespace", "build/include"] + + bad_rules = [ + "tabs", + "*tabs", + " tabs", + " +tabs", + "+whitespace/newline", + "+xxx", + ] + + good_rules = [ + "+tabs", + "-tabs", + "+build" + ] + + for rule in bad_rules: + self.assertRaises(ValueError, validate_filter_rules, + [rule], all_categories) + + for rule in good_rules: + # This works: no error. + validate_filter_rules([rule], all_categories) + + +class CategoryFilterTest(unittest.TestCase): + + """Tests CategoryFilter class.""" + + def test_init(self): + """Test __init__ method.""" + # Test that the attributes are getting set correctly. + filter = CategoryFilter(["+"]) + self.assertEquals(["+"], filter._filter_rules) + + def test_init_default_arguments(self): + """Test __init__ method default arguments.""" + filter = CategoryFilter() + self.assertEquals([], filter._filter_rules) + + def test_str(self): + """Test __str__ "to string" operator.""" + filter = CategoryFilter(["+a", "-b"]) + self.assertEquals(str(filter), "+a,-b") + + def test_eq(self): + """Test __eq__ equality function.""" + filter1 = CategoryFilter(["+a", "+b"]) + filter2 = CategoryFilter(["+a", "+b"]) + filter3 = CategoryFilter(["+b", "+a"]) + + # See the notes at the top of this module about testing + # __eq__() and __ne__(). + self.assertTrue(filter1.__eq__(filter2)) + self.assertFalse(filter1.__eq__(filter3)) + + def test_ne(self): + """Test __ne__ inequality function.""" + # By default, __ne__ always returns true on different objects. + # Thus, just check the distinguishing case to verify that the + # code defines __ne__. + # + # Also, see the notes at the top of this module about testing + # __eq__() and __ne__(). + self.assertFalse(CategoryFilter().__ne__(CategoryFilter())) + + def test_should_check(self): + """Test should_check() method.""" + filter = CategoryFilter() + self.assertTrue(filter.should_check("everything")) + # Check a second time to exercise cache. + self.assertTrue(filter.should_check("everything")) + + filter = CategoryFilter(["-"]) + self.assertFalse(filter.should_check("anything")) + # Check a second time to exercise cache. + self.assertFalse(filter.should_check("anything")) + + filter = CategoryFilter(["-", "+ab"]) + self.assertTrue(filter.should_check("abc")) + self.assertFalse(filter.should_check("a")) + + filter = CategoryFilter(["+", "-ab"]) + self.assertFalse(filter.should_check("abc")) + self.assertTrue(filter.should_check("a")) + + +class FilterConfigurationTest(unittest.TestCase): + + """Tests FilterConfiguration class.""" + + def _config(self, base_rules, path_specific, user_rules): + """Return a FilterConfiguration instance.""" + return FilterConfiguration(base_rules=base_rules, + path_specific=path_specific, + user_rules=user_rules) + + def test_init(self): + """Test __init__ method.""" + # Test that the attributes are getting set correctly. + # We use parameter values that are different from the defaults. + base_rules = ["-"] + path_specific = [(["path"], ["+a"])] + user_rules = ["+"] + + config = self._config(base_rules, path_specific, user_rules) + + self.assertEquals(base_rules, config._base_rules) + self.assertEquals(path_specific, config._path_specific) + self.assertEquals(user_rules, config._user_rules) + + def test_default_arguments(self): + # Test that the attributes are getting set correctly to the defaults. + config = FilterConfiguration() + + self.assertEquals([], config._base_rules) + self.assertEquals([], config._path_specific) + self.assertEquals([], config._user_rules) + + def test_eq(self): + """Test __eq__ method.""" + # See the notes at the top of this module about testing + # __eq__() and __ne__(). + self.assertTrue(FilterConfiguration().__eq__(FilterConfiguration())) + + # Verify that a difference in any argument causes equality to fail. + config = FilterConfiguration() + + # These parameter values are different from the defaults. + base_rules = ["-"] + path_specific = [(["path"], ["+a"])] + user_rules = ["+"] + + self.assertFalse(config.__eq__(FilterConfiguration( + base_rules=base_rules))) + self.assertFalse(config.__eq__(FilterConfiguration( + path_specific=path_specific))) + self.assertFalse(config.__eq__(FilterConfiguration( + user_rules=user_rules))) + + def test_ne(self): + """Test __ne__ method.""" + # By default, __ne__ always returns true on different objects. + # Thus, just check the distinguishing case to verify that the + # code defines __ne__. + # + # Also, see the notes at the top of this module about testing + # __eq__() and __ne__(). + self.assertFalse(FilterConfiguration().__ne__(FilterConfiguration())) + + def test_base_rules(self): + """Test effect of base_rules on should_check().""" + base_rules = ["-b"] + path_specific = [] + user_rules = [] + + config = self._config(base_rules, path_specific, user_rules) + + self.assertTrue(config.should_check("a", "path")) + self.assertFalse(config.should_check("b", "path")) + + def test_path_specific(self): + """Test effect of path_rules_specifier on should_check().""" + base_rules = ["-"] + path_specific = [(["path1"], ["+b"]), + (["path2"], ["+c"])] + user_rules = [] + + config = self._config(base_rules, path_specific, user_rules) + + self.assertFalse(config.should_check("c", "path1")) + self.assertTrue(config.should_check("c", "path2")) + # Test that first match takes precedence. + self.assertFalse(config.should_check("c", "path2/path1")) + + def test_path_with_different_case(self): + """Test a path that differs only in case.""" + base_rules = ["-"] + path_specific = [(["Foo/"], ["+whitespace"])] + user_rules = [] + + config = self._config(base_rules, path_specific, user_rules) + + self.assertFalse(config.should_check("whitespace", "Fooo/bar.txt")) + self.assertTrue(config.should_check("whitespace", "Foo/bar.txt")) + # Test different case. + self.assertTrue(config.should_check("whitespace", "FOO/bar.txt")) + + def test_user_rules(self): + """Test effect of user_rules on should_check().""" + base_rules = ["-"] + path_specific = [] + user_rules = ["+b"] + + config = self._config(base_rules, path_specific, user_rules) + + self.assertFalse(config.should_check("a", "path")) + self.assertTrue(config.should_check("b", "path")) + diff --git a/Tools/Scripts/webkitpy/style/main.py b/Tools/Scripts/webkitpy/style/main.py new file mode 100644 index 0000000..83c0323 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/main.py @@ -0,0 +1,130 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging +import os +import sys + +from webkitpy.common.system.ospath import relpath as _relpath + + +_log = logging.getLogger(__name__) + + +def change_directory(checkout_root, paths, mock_os=None): + """Change the working directory to the WebKit checkout root, if possible. + + If every path in the paths parameter is below the checkout root (or if + the paths parameter is empty or None), this method changes the current + working directory to the checkout root and converts the paths parameter + as described below. + This allows the paths being checked to be displayed relative to the + checkout root, and for path-specific style checks to work as expected. + Path-specific checks include whether files should be skipped, whether + custom style rules should apply to certain files, etc. + If the checkout root is None or the empty string, this method returns + the paths parameter unchanged. + + Returns: + paths: A copy of the paths parameter -- possibly converted, as follows. + If this method changed the current working directory to the + checkout root, then the list is the paths parameter converted to + normalized paths relative to the checkout root. Otherwise, the + paths are not converted. + + Args: + paths: A list of paths to the files that should be checked for style. + This argument can be None or the empty list if a git commit + or all changes under the checkout root should be checked. + checkout_root: The path to the root of the WebKit checkout, or None or + the empty string if no checkout could be detected. + mock_os: A replacement module for unit testing. Defaults to os. + + """ + os_module = os if mock_os is None else mock_os + + if paths is not None: + paths = list(paths) + + if not checkout_root: + if not paths: + raise Exception("The paths parameter must be non-empty if " + "there is no checkout root.") + + # FIXME: Consider trying to detect the checkout root for each file + # being checked rather than only trying to detect the checkout + # root for the current working directory. This would allow + # files to be checked correctly even if the script is being + # run from outside any WebKit checkout. + # + # Moreover, try to find the "source root" for each file + # using path-based heuristics rather than using only the + # presence of a WebKit checkout. For example, we could + # examine parent directories until a directory is found + # containing JavaScriptCore, WebCore, WebKit, Websites, + # and Tools. + # Then log an INFO message saying that a source root not + # in a WebKit checkout was found. This will allow us to check + # the style of non-scm copies of the source tree (e.g. + # nightlies). + _log.warn("WebKit checkout root not found:\n" + " Path-dependent style checks may not work correctly.\n" + " See the help documentation for more info.") + + return paths + + if paths: + # Then try converting all of the paths to paths relative to + # the checkout root. + rel_paths = [] + for path in paths: + rel_path = _relpath(path, checkout_root) + if rel_path is None: + # Then the path is not below the checkout root. Since all + # paths should be interpreted relative to the same root, + # do not interpret any of the paths as relative to the + # checkout root. Interpret all of them relative to the + # current working directory, and do not change the current + # working directory. + _log.warn( +"""Path-dependent style checks may not work correctly: + + One of the given paths is outside the WebKit checkout of the current + working directory: + + Path: %s + Checkout root: %s + + Pass only files below the checkout root to ensure correct results. + See the help documentation for more info. +""" + % (path, checkout_root)) + + return paths + rel_paths.append(rel_path) + # If we got here, the conversion was successful. + paths = rel_paths + + _log.debug("Changing to checkout root: " + checkout_root) + os_module.chdir(checkout_root) + + return paths diff --git a/Tools/Scripts/webkitpy/style/main_unittest.py b/Tools/Scripts/webkitpy/style/main_unittest.py new file mode 100644 index 0000000..fe448f5 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/main_unittest.py @@ -0,0 +1,124 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for main.py.""" + +import os +import unittest + +from main import change_directory +from webkitpy.style_references import LogTesting + + +class ChangeDirectoryTest(unittest.TestCase): + + """Tests change_directory().""" + + _original_directory = "/original" + _checkout_root = "/WebKit" + + class _MockOs(object): + + """A mock os module for unit testing.""" + + def __init__(self, test_case): + self._test_case = test_case + self._current_directory = \ + ChangeDirectoryTest._original_directory + + def chdir(self, current_directory): + self._current_directory = current_directory + + def assertCurrentDirectory(self, expected_directory): + self._test_case.assertEquals(expected_directory, + self._current_directory) + + def setUp(self): + self._log = LogTesting.setUp(self) + self._mock_os = self._MockOs(self) + + def tearDown(self): + self._log.tearDown() + + # This method is a convenient wrapper for change_working_directory() that + # passes the mock_os for this unit testing class. + def _change_directory(self, paths, checkout_root): + return change_directory(paths=paths, + checkout_root=checkout_root, + mock_os=self._mock_os) + + def _assert_result(self, actual_return_value, expected_return_value, + expected_log_messages, expected_current_directory): + self.assertEquals(actual_return_value, expected_return_value) + self._log.assertMessages(expected_log_messages) + self._mock_os.assertCurrentDirectory(expected_current_directory) + + def test_checkout_root_none_paths_none(self): + self.assertRaises(Exception, self._change_directory, + checkout_root=None, paths=None) + self._log.assertMessages([]) + self._mock_os.assertCurrentDirectory(self._original_directory) + + def test_checkout_root_none(self): + paths = self._change_directory(checkout_root=None, + paths=["path1"]) + log_messages = [ +"""WARNING: WebKit checkout root not found: + Path-dependent style checks may not work correctly. + See the help documentation for more info. +"""] + self._assert_result(paths, ["path1"], log_messages, + self._original_directory) + + def test_paths_none(self): + paths = self._change_directory(checkout_root=self._checkout_root, + paths=None) + self._assert_result(paths, None, [], self._checkout_root) + + def test_paths_convertible(self): + paths=["/WebKit/foo1.txt", + "/WebKit/foo2.txt"] + paths = self._change_directory(checkout_root=self._checkout_root, + paths=paths) + self._assert_result(paths, ["foo1.txt", "foo2.txt"], [], + self._checkout_root) + + def test_with_scm_paths_unconvertible(self): + paths=["/WebKit/foo1.txt", + "/outside/foo2.txt"] + paths = self._change_directory(checkout_root=self._checkout_root, + paths=paths) + log_messages = [ +"""WARNING: Path-dependent style checks may not work correctly: + + One of the given paths is outside the WebKit checkout of the current + working directory: + + Path: /outside/foo2.txt + Checkout root: /WebKit + + Pass only files below the checkout root to ensure correct results. + See the help documentation for more info. + +"""] + self._assert_result(paths, paths, log_messages, + self._original_directory) diff --git a/Tools/Scripts/webkitpy/style/optparser.py b/Tools/Scripts/webkitpy/style/optparser.py new file mode 100644 index 0000000..f4e9923 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/optparser.py @@ -0,0 +1,457 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Supports the parsing of command-line options for check-webkit-style.""" + +import logging +from optparse import OptionParser +import os.path +import sys + +from filter import validate_filter_rules +# This module should not import anything from checker.py. + +_log = logging.getLogger(__name__) + +_USAGE = """usage: %prog [--help] [options] [path1] [path2] ... + +Overview: + Check coding style according to WebKit style guidelines: + + http://webkit.org/coding/coding-style.html + + Path arguments can be files and directories. If neither a git commit nor + paths are passed, then all changes in your source control working directory + are checked. + +Style errors: + This script assigns to every style error a confidence score from 1-5 and + a category name. A confidence score of 5 means the error is certainly + a problem, and 1 means it could be fine. + + Category names appear in error messages in brackets, for example + [whitespace/indent]. See the options section below for an option that + displays all available categories and which are reported by default. + +Filters: + Use filters to configure what errors to report. Filters are specified using + a comma-separated list of boolean filter rules. The script reports errors + in a category if the category passes the filter, as described below. + + All categories start out passing. Boolean filter rules are then evaluated + from left to right, with later rules taking precedence. For example, the + rule "+foo" passes any category that starts with "foo", and "-foo" fails + any such category. The filter input "-whitespace,+whitespace/braces" fails + the category "whitespace/tab" and passes "whitespace/braces". + + Examples: --filter=-whitespace,+whitespace/braces + --filter=-whitespace,-runtime/printf,+runtime/printf_format + --filter=-,+build/include_what_you_use + +Paths: + Certain style-checking behavior depends on the paths relative to + the WebKit source root of the files being checked. For example, + certain types of errors may be handled differently for files in + WebKit/gtk/webkit/ (e.g. by suppressing "readability/naming" errors + for files in this directory). + + Consequently, if the path relative to the source root cannot be + determined for a file being checked, then style checking may not + work correctly for that file. This can occur, for example, if no + WebKit checkout can be found, or if the source root can be detected, + but one of the files being checked lies outside the source tree. + + If a WebKit checkout can be detected and all files being checked + are in the source tree, then all paths will automatically be + converted to paths relative to the source root prior to checking. + This is also useful for display purposes. + + Currently, this command can detect the source root only if the + command is run from within a WebKit checkout (i.e. if the current + working directory is below the root of a checkout). In particular, + it is not recommended to run this script from a directory outside + a checkout. + + Running this script from a top-level WebKit source directory and + checking only files in the source tree will ensure that all style + checking behaves correctly -- whether or not a checkout can be + detected. This is because all file paths will already be relative + to the source root and so will not need to be converted.""" + +_EPILOG = ("This script can miss errors and does not substitute for " + "code review.") + + +# This class should not have knowledge of the flag key names. +class DefaultCommandOptionValues(object): + + """Stores the default check-webkit-style command-line options. + + Attributes: + output_format: A string that is the default output format. + min_confidence: An integer that is the default minimum confidence level. + + """ + + def __init__(self, min_confidence, output_format): + self.min_confidence = min_confidence + self.output_format = output_format + + +# This class should not have knowledge of the flag key names. +class CommandOptionValues(object): + + """Stores the option values passed by the user via the command line. + + Attributes: + is_verbose: A boolean value of whether verbose logging is enabled. + + filter_rules: The list of filter rules provided by the user. + These rules are appended to the base rules and + path-specific rules and so take precedence over + the base filter rules, etc. + + git_commit: A string representing the git commit to check. + The default is None. + + min_confidence: An integer between 1 and 5 inclusive that is the + minimum confidence level of style errors to report. + The default is 1, which reports all errors. + + output_format: A string that is the output format. The supported + output formats are "emacs" which emacs can parse + and "vs7" which Microsoft Visual Studio 7 can parse. + + """ + def __init__(self, + filter_rules=None, + git_commit=None, + diff_files=None, + is_verbose=False, + min_confidence=1, + output_format="emacs"): + if filter_rules is None: + filter_rules = [] + + if (min_confidence < 1) or (min_confidence > 5): + raise ValueError('Invalid "min_confidence" parameter: value ' + "must be an integer between 1 and 5 inclusive. " + 'Value given: "%s".' % min_confidence) + + if output_format not in ("emacs", "vs7"): + raise ValueError('Invalid "output_format" parameter: ' + 'value must be "emacs" or "vs7". ' + 'Value given: "%s".' % output_format) + + self.filter_rules = filter_rules + self.git_commit = git_commit + self.diff_files = diff_files + self.is_verbose = is_verbose + self.min_confidence = min_confidence + self.output_format = output_format + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this instance is equal to another.""" + if self.filter_rules != other.filter_rules: + return False + if self.git_commit != other.git_commit: + return False + if self.diff_files != other.diff_files: + return False + if self.is_verbose != other.is_verbose: + return False + if self.min_confidence != other.min_confidence: + return False + if self.output_format != other.output_format: + return False + + return True + + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce this from __eq__(). + return not self.__eq__(other) + + +class ArgumentPrinter(object): + + """Supports the printing of check-webkit-style command arguments.""" + + def _flag_pair_to_string(self, flag_key, flag_value): + return '--%(key)s=%(val)s' % {'key': flag_key, 'val': flag_value } + + def to_flag_string(self, options): + """Return a flag string of the given CommandOptionValues instance. + + This method orders the flag values alphabetically by the flag key. + + Args: + options: A CommandOptionValues instance. + + """ + flags = {} + flags['min-confidence'] = options.min_confidence + flags['output'] = options.output_format + # Only include the filter flag if user-provided rules are present. + filter_rules = options.filter_rules + if filter_rules: + flags['filter'] = ",".join(filter_rules) + if options.git_commit: + flags['git-commit'] = options.git_commit + if options.diff_files: + flags['diff_files'] = options.diff_files + + flag_string = '' + # Alphabetizing lets us unit test this method. + for key in sorted(flags.keys()): + flag_string += self._flag_pair_to_string(key, flags[key]) + ' ' + + return flag_string.strip() + + +class ArgumentParser(object): + + # FIXME: Move the documentation of the attributes to the __init__ + # docstring after making the attributes internal. + """Supports the parsing of check-webkit-style command arguments. + + Attributes: + create_usage: A function that accepts a DefaultCommandOptionValues + instance and returns a string of usage instructions. + Defaults to the function that generates the usage + string for check-webkit-style. + default_options: A DefaultCommandOptionValues instance that provides + the default values for options not explicitly + provided by the user. + stderr_write: A function that takes a string as a parameter and + serves as stderr.write. Defaults to sys.stderr.write. + This parameter should be specified only for unit tests. + + """ + + def __init__(self, + all_categories, + default_options, + base_filter_rules=None, + mock_stderr=None, + usage=None): + """Create an ArgumentParser instance. + + Args: + all_categories: The set of all available style categories. + default_options: See the corresponding attribute in the class + docstring. + Keyword Args: + base_filter_rules: The list of filter rules at the beginning of + the list of rules used to check style. This + list has the least precedence when checking + style and precedes any user-provided rules. + The class uses this parameter only for display + purposes to the user. Defaults to the empty list. + create_usage: See the documentation of the corresponding + attribute in the class docstring. + stderr_write: See the documentation of the corresponding + attribute in the class docstring. + + """ + if base_filter_rules is None: + base_filter_rules = [] + stderr = sys.stderr if mock_stderr is None else mock_stderr + if usage is None: + usage = _USAGE + + self._all_categories = all_categories + self._base_filter_rules = base_filter_rules + + # FIXME: Rename these to reflect that they are internal. + self.default_options = default_options + self.stderr_write = stderr.write + + self._parser = self._create_option_parser(stderr=stderr, + usage=usage, + default_min_confidence=self.default_options.min_confidence, + default_output_format=self.default_options.output_format) + + def _create_option_parser(self, stderr, usage, + default_min_confidence, default_output_format): + # Since the epilog string is short, it is not necessary to replace + # the epilog string with a mock epilog string when testing. + # For this reason, we use _EPILOG directly rather than passing it + # as an argument like we do for the usage string. + parser = OptionParser(usage=usage, epilog=_EPILOG) + + filter_help = ('set a filter to control what categories of style ' + 'errors to report. Specify a filter using a comma-' + 'delimited list of boolean filter rules, for example ' + '"--filter -whitespace,+whitespace/braces". To display ' + 'all categories and which are enabled by default, pass ' + """no value (e.g. '-f ""' or '--filter=').""") + parser.add_option("-f", "--filter-rules", metavar="RULES", + dest="filter_value", help=filter_help) + + git_commit_help = ("check all changes in the given commit. " + "Use 'commit_id..' to check all changes after commmit_id") + parser.add_option("-g", "--git-diff", "--git-commit", + metavar="COMMIT", dest="git_commit", help=git_commit_help,) + + diff_files_help = "diff the files passed on the command line rather than checking the style of every line" + parser.add_option("--diff-files", action="store_true", dest="diff_files", default=False, help=diff_files_help) + + min_confidence_help = ("set the minimum confidence of style errors " + "to report. Can be an integer 1-5, with 1 " + "displaying all errors. Defaults to %default.") + parser.add_option("-m", "--min-confidence", metavar="INT", + type="int", dest="min_confidence", + default=default_min_confidence, + help=min_confidence_help) + + output_format_help = ('set the output format, which can be "emacs" ' + 'or "vs7" (for Visual Studio). ' + 'Defaults to "%default".') + parser.add_option("-o", "--output-format", metavar="FORMAT", + choices=["emacs", "vs7"], + dest="output_format", default=default_output_format, + help=output_format_help) + + verbose_help = "enable verbose logging." + parser.add_option("-v", "--verbose", dest="is_verbose", default=False, + action="store_true", help=verbose_help) + + # Override OptionParser's error() method so that option help will + # also display when an error occurs. Normally, just the usage + # string displays and not option help. + parser.error = self._parse_error + + # Override OptionParser's print_help() method so that help output + # does not render to the screen while running unit tests. + print_help = parser.print_help + parser.print_help = lambda: print_help(file=stderr) + + return parser + + def _parse_error(self, error_message): + """Print the help string and an error message, and exit.""" + # The method format_help() includes both the usage string and + # the flag options. + help = self._parser.format_help() + # Separate help from the error message with a single blank line. + self.stderr_write(help + "\n") + if error_message: + _log.error(error_message) + + # Since we are using this method to replace/override the Python + # module optparse's OptionParser.error() method, we match its + # behavior and exit with status code 2. + # + # As additional background, Python documentation says-- + # + # "Unix programs generally use 2 for command line syntax errors + # and 1 for all other kind of errors." + # + # (from http://docs.python.org/library/sys.html#sys.exit ) + sys.exit(2) + + def _exit_with_categories(self): + """Exit and print the style categories and default filter rules.""" + self.stderr_write('\nAll categories:\n') + for category in sorted(self._all_categories): + self.stderr_write(' ' + category + '\n') + + self.stderr_write('\nDefault filter rules**:\n') + for filter_rule in sorted(self._base_filter_rules): + self.stderr_write(' ' + filter_rule + '\n') + self.stderr_write('\n**The command always evaluates the above rules, ' + 'and before any --filter flag.\n\n') + + sys.exit(0) + + def _parse_filter_flag(self, flag_value): + """Parse the --filter flag, and return a list of filter rules. + + Args: + flag_value: A string of comma-separated filter rules, for + example "-whitespace,+whitespace/indent". + + """ + filters = [] + for uncleaned_filter in flag_value.split(','): + filter = uncleaned_filter.strip() + if not filter: + continue + filters.append(filter) + return filters + + def parse(self, args): + """Parse the command line arguments to check-webkit-style. + + Args: + args: A list of command-line arguments as returned by sys.argv[1:]. + + Returns: + A tuple of (paths, options) + + paths: The list of paths to check. + options: A CommandOptionValues instance. + + """ + (options, paths) = self._parser.parse_args(args=args) + + filter_value = options.filter_value + git_commit = options.git_commit + diff_files = options.diff_files + is_verbose = options.is_verbose + min_confidence = options.min_confidence + output_format = options.output_format + + if filter_value is not None and not filter_value: + # Then the user explicitly passed no filter, for + # example "-f ''" or "--filter=". + self._exit_with_categories() + + # Validate user-provided values. + + min_confidence = int(min_confidence) + if (min_confidence < 1) or (min_confidence > 5): + self._parse_error('option --min-confidence: invalid integer: ' + '%s: value must be between 1 and 5' + % min_confidence) + + if filter_value: + filter_rules = self._parse_filter_flag(filter_value) + else: + filter_rules = [] + + try: + validate_filter_rules(filter_rules, self._all_categories) + except ValueError, err: + self._parse_error(err) + + options = CommandOptionValues(filter_rules=filter_rules, + git_commit=git_commit, + diff_files=diff_files, + is_verbose=is_verbose, + min_confidence=min_confidence, + output_format=output_format) + + return (paths, options) + diff --git a/Tools/Scripts/webkitpy/style/optparser_unittest.py b/Tools/Scripts/webkitpy/style/optparser_unittest.py new file mode 100644 index 0000000..a6b64da --- /dev/null +++ b/Tools/Scripts/webkitpy/style/optparser_unittest.py @@ -0,0 +1,258 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for parser.py.""" + +import unittest + +from webkitpy.common.system.logtesting import LoggingTestCase +from webkitpy.style.optparser import ArgumentParser +from webkitpy.style.optparser import ArgumentPrinter +from webkitpy.style.optparser import CommandOptionValues as ProcessorOptions +from webkitpy.style.optparser import DefaultCommandOptionValues + + +class ArgumentPrinterTest(unittest.TestCase): + + """Tests the ArgumentPrinter class.""" + + _printer = ArgumentPrinter() + + def _create_options(self, + output_format='emacs', + min_confidence=3, + filter_rules=[], + git_commit=None): + return ProcessorOptions(filter_rules=filter_rules, + git_commit=git_commit, + min_confidence=min_confidence, + output_format=output_format) + + def test_to_flag_string(self): + options = self._create_options('vs7', 5, ['+foo', '-bar'], 'git') + self.assertEquals('--filter=+foo,-bar --git-commit=git ' + '--min-confidence=5 --output=vs7', + self._printer.to_flag_string(options)) + + # This is to check that --filter and --git-commit do not + # show up when not user-specified. + options = self._create_options() + self.assertEquals('--min-confidence=3 --output=emacs', + self._printer.to_flag_string(options)) + + +class ArgumentParserTest(LoggingTestCase): + + """Test the ArgumentParser class.""" + + class _MockStdErr(object): + + def write(self, message): + # We do not want the usage string or style categories + # to print during unit tests, so print nothing. + return + + def _parse(self, args): + """Call a test parser.parse().""" + parser = self._create_parser() + return parser.parse(args) + + def _create_defaults(self): + """Return a DefaultCommandOptionValues instance for testing.""" + base_filter_rules = ["-", "+whitespace"] + return DefaultCommandOptionValues(min_confidence=3, + output_format="vs7") + + def _create_parser(self): + """Return an ArgumentParser instance for testing.""" + default_options = self._create_defaults() + + all_categories = ["build" ,"whitespace"] + + mock_stderr = self._MockStdErr() + + return ArgumentParser(all_categories=all_categories, + base_filter_rules=[], + default_options=default_options, + mock_stderr=mock_stderr, + usage="test usage") + + def test_parse_documentation(self): + parse = self._parse + + # FIXME: Test both the printing of the usage string and the + # filter categories help. + + # Request the usage string. + self.assertRaises(SystemExit, parse, ['--help']) + # Request default filter rules and available style categories. + self.assertRaises(SystemExit, parse, ['--filter=']) + + def test_parse_bad_values(self): + parse = self._parse + + # Pass an unsupported argument. + self.assertRaises(SystemExit, parse, ['--bad']) + self.assertLog(['ERROR: no such option: --bad\n']) + + self.assertRaises(SystemExit, parse, ['--min-confidence=bad']) + self.assertLog(['ERROR: option --min-confidence: ' + "invalid integer value: 'bad'\n"]) + self.assertRaises(SystemExit, parse, ['--min-confidence=0']) + self.assertLog(['ERROR: option --min-confidence: invalid integer: 0: ' + 'value must be between 1 and 5\n']) + self.assertRaises(SystemExit, parse, ['--min-confidence=6']) + self.assertLog(['ERROR: option --min-confidence: invalid integer: 6: ' + 'value must be between 1 and 5\n']) + parse(['--min-confidence=1']) # works + parse(['--min-confidence=5']) # works + + self.assertRaises(SystemExit, parse, ['--output=bad']) + self.assertLog(['ERROR: option --output-format: invalid choice: ' + "'bad' (choose from 'emacs', 'vs7')\n"]) + parse(['--output=vs7']) # works + + # Pass a filter rule not beginning with + or -. + self.assertRaises(SystemExit, parse, ['--filter=build']) + self.assertLog(['ERROR: Invalid filter rule "build": ' + 'every rule must start with + or -.\n']) + parse(['--filter=+build']) # works + + def test_parse_default_arguments(self): + parse = self._parse + + (files, options) = parse([]) + + self.assertEquals(files, []) + + self.assertEquals(options.filter_rules, []) + self.assertEquals(options.git_commit, None) + self.assertEquals(options.diff_files, False) + self.assertEquals(options.is_verbose, False) + self.assertEquals(options.min_confidence, 3) + self.assertEquals(options.output_format, 'vs7') + + def test_parse_explicit_arguments(self): + parse = self._parse + + # Pass non-default explicit values. + (files, options) = parse(['--min-confidence=4']) + self.assertEquals(options.min_confidence, 4) + (files, options) = parse(['--output=emacs']) + self.assertEquals(options.output_format, 'emacs') + (files, options) = parse(['-g', 'commit']) + self.assertEquals(options.git_commit, 'commit') + (files, options) = parse(['--git-commit=commit']) + self.assertEquals(options.git_commit, 'commit') + (files, options) = parse(['--git-diff=commit']) + self.assertEquals(options.git_commit, 'commit') + (files, options) = parse(['--verbose']) + self.assertEquals(options.is_verbose, True) + (files, options) = parse(['--diff-files', 'file.txt']) + self.assertEquals(options.diff_files, True) + + # Pass user_rules. + (files, options) = parse(['--filter=+build,-whitespace']) + self.assertEquals(options.filter_rules, + ["+build", "-whitespace"]) + + # Pass spurious white space in user rules. + (files, options) = parse(['--filter=+build, -whitespace']) + self.assertEquals(options.filter_rules, + ["+build", "-whitespace"]) + + def test_parse_files(self): + parse = self._parse + + (files, options) = parse(['foo.cpp']) + self.assertEquals(files, ['foo.cpp']) + + # Pass multiple files. + (files, options) = parse(['--output=emacs', 'foo.cpp', 'bar.cpp']) + self.assertEquals(files, ['foo.cpp', 'bar.cpp']) + + +class CommandOptionValuesTest(unittest.TestCase): + + """Tests CommandOptionValues class.""" + + def test_init(self): + """Test __init__ constructor.""" + # Check default parameters. + options = ProcessorOptions() + self.assertEquals(options.filter_rules, []) + self.assertEquals(options.git_commit, None) + self.assertEquals(options.is_verbose, False) + self.assertEquals(options.min_confidence, 1) + self.assertEquals(options.output_format, "emacs") + + # Check argument validation. + self.assertRaises(ValueError, ProcessorOptions, output_format="bad") + ProcessorOptions(output_format="emacs") # No ValueError: works + ProcessorOptions(output_format="vs7") # works + self.assertRaises(ValueError, ProcessorOptions, min_confidence=0) + self.assertRaises(ValueError, ProcessorOptions, min_confidence=6) + ProcessorOptions(min_confidence=1) # works + ProcessorOptions(min_confidence=5) # works + + # Check attributes. + options = ProcessorOptions(filter_rules=["+"], + git_commit="commit", + is_verbose=True, + min_confidence=3, + output_format="vs7") + self.assertEquals(options.filter_rules, ["+"]) + self.assertEquals(options.git_commit, "commit") + self.assertEquals(options.is_verbose, True) + self.assertEquals(options.min_confidence, 3) + self.assertEquals(options.output_format, "vs7") + + def test_eq(self): + """Test __eq__ equality function.""" + self.assertTrue(ProcessorOptions().__eq__(ProcessorOptions())) + + # Also verify that a difference in any argument causes equality to fail. + + # Explicitly create a ProcessorOptions instance with all default + # values. We do this to be sure we are assuming the right default + # values in our self.assertFalse() calls below. + options = ProcessorOptions(filter_rules=[], + git_commit=None, + is_verbose=False, + min_confidence=1, + output_format="emacs") + # Verify that we created options correctly. + self.assertTrue(options.__eq__(ProcessorOptions())) + + self.assertFalse(options.__eq__(ProcessorOptions(filter_rules=["+"]))) + self.assertFalse(options.__eq__(ProcessorOptions(git_commit="commit"))) + self.assertFalse(options.__eq__(ProcessorOptions(is_verbose=True))) + self.assertFalse(options.__eq__(ProcessorOptions(min_confidence=2))) + self.assertFalse(options.__eq__(ProcessorOptions(output_format="vs7"))) + + def test_ne(self): + """Test __ne__ inequality function.""" + # By default, __ne__ always returns true on different objects. + # Thus, just check the distinguishing case to verify that the + # code defines __ne__. + self.assertFalse(ProcessorOptions().__ne__(ProcessorOptions())) + diff --git a/Tools/Scripts/webkitpy/style/patchreader.py b/Tools/Scripts/webkitpy/style/patchreader.py new file mode 100644 index 0000000..f44839d --- /dev/null +++ b/Tools/Scripts/webkitpy/style/patchreader.py @@ -0,0 +1,66 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# Copyright (C) 2010 ProFUSION embedded systems +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging + +from webkitpy.common.checkout.diff_parser import DiffParser + + +_log = logging.getLogger("webkitpy.style.patchreader") + + +class PatchReader(object): + """Supports checking style in patches.""" + + def __init__(self, text_file_reader): + """Create a PatchReader instance. + + Args: + text_file_reader: A TextFileReader instance. + + """ + self._text_file_reader = text_file_reader + + def check(self, patch_string): + """Check style in the given patch.""" + patch_files = DiffParser(patch_string.splitlines()).files + + for path, diff_file in patch_files.iteritems(): + line_numbers = diff_file.added_or_modified_line_numbers() + _log.debug('Found %s new or modified lines in: %s' % (len(line_numbers), path)) + + if not line_numbers: + # Don't check files which contain only deleted lines + # as they can never add style errors. However, mark them as + # processed so that we count up number of such files. + self._text_file_reader.count_delete_only_file() + continue + + self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers) diff --git a/Tools/Scripts/webkitpy/style/patchreader_unittest.py b/Tools/Scripts/webkitpy/style/patchreader_unittest.py new file mode 100644 index 0000000..b121082 --- /dev/null +++ b/Tools/Scripts/webkitpy/style/patchreader_unittest.py @@ -0,0 +1,92 @@ +#!/usr/bin/python +# +# Copyright (C) 2010 Google Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile Inc. +# Copyright (C) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.style.patchreader import PatchReader + + +class PatchReaderTest(unittest.TestCase): + + """Test the PatchReader class.""" + + class MockTextFileReader(object): + + def __init__(self): + self.passed_to_process_file = [] + """A list of (file_path, line_numbers) pairs.""" + self.delete_only_file_count = 0 + """A number of times count_delete_only_file() called""" + + def process_file(self, file_path, line_numbers): + self.passed_to_process_file.append((file_path, line_numbers)) + + def count_delete_only_file(self): + self.delete_only_file_count += 1 + + def setUp(self): + file_reader = self.MockTextFileReader() + self._file_reader = file_reader + self._patch_checker = PatchReader(file_reader) + + def _call_check_patch(self, patch_string): + self._patch_checker.check(patch_string) + + def _assert_checked(self, passed_to_process_file, delete_only_file_count): + self.assertEquals(self._file_reader.passed_to_process_file, + passed_to_process_file) + self.assertEquals(self._file_reader.delete_only_file_count, + delete_only_file_count) + + def test_check_patch(self): + # The modified line_numbers array for this patch is: [2]. + self._call_check_patch("""diff --git a/__init__.py b/__init__.py +index ef65bee..e3db70e 100644 +--- a/__init__.py ++++ b/__init__.py +@@ -1,1 +1,2 @@ + # Required for Python to search this directory for module files ++# New line +""") + self._assert_checked([("__init__.py", [2])], 0) + + def test_check_patch_with_deletion(self): + self._call_check_patch("""Index: __init__.py +=================================================================== +--- __init__.py (revision 3593) ++++ __init__.py (working copy) +@@ -1 +0,0 @@ +-foobar +""") + # _mock_check_file should not be called for the deletion patch. + self._assert_checked([], 1) diff --git a/Tools/Scripts/webkitpy/style_references.py b/Tools/Scripts/webkitpy/style_references.py new file mode 100644 index 0000000..a21e931 --- /dev/null +++ b/Tools/Scripts/webkitpy/style_references.py @@ -0,0 +1,74 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""References to non-style modules used by the style package.""" + +# This module is a simple facade to the functionality used by the +# style package that comes from WebKit modules outside the style +# package. +# +# With this module, the only intra-package references (i.e. +# references to webkitpy modules outside the style folder) that +# the style package needs to make are relative references to +# this module. For example-- +# +# > from .. style_references import parse_patch +# +# Similarly, people maintaining non-style code are not beholden +# to the contents of the style package when refactoring or +# otherwise changing non-style code. They only have to be aware +# of this module. + +import os + +from webkitpy.common.checkout.diff_parser import DiffParser +from webkitpy.common.system.logtesting import LogTesting +from webkitpy.common.system.logtesting import TestLogStream +from webkitpy.common.system.logutils import configure_logging +from webkitpy.common.checkout.scm import detect_scm_system +from webkitpy.layout_tests import port +from webkitpy.layout_tests.layout_package import test_expectations +from webkitpy.thirdparty.autoinstalled import pep8 + + +def detect_checkout(): + """Return a WebKitCheckout instance, or None if it cannot be found.""" + cwd = os.path.abspath(os.curdir) + scm = detect_scm_system(cwd) + + return None if scm is None else WebKitCheckout(scm) + + +class WebKitCheckout(object): + + """Simple facade to the SCM class for use by style package.""" + + def __init__(self, scm): + self._scm = scm + + def root_path(self): + """Return the checkout root as an absolute path.""" + return self._scm.checkout_root + + def create_patch(self, git_commit, changed_files=None): + # FIXME: SCM.create_patch should understand how to handle None. + return self._scm.create_patch(git_commit, changed_files=changed_files or []) diff --git a/Tools/Scripts/webkitpy/test/__init__.py b/Tools/Scripts/webkitpy/test/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/test/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/test/cat.py b/Tools/Scripts/webkitpy/test/cat.py new file mode 100644 index 0000000..ae1e143 --- /dev/null +++ b/Tools/Scripts/webkitpy/test/cat.py @@ -0,0 +1,42 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os.path +import sys + +# Add WebKitTools/Scripts to the path to ensure we can find webkitpy. +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) + +from webkitpy.common.system import fileutils + + +def command_arguments(*args): + return ['python', __file__] + list(args) + + +def main(): + fileutils.make_stdout_binary() + sys.stdout.write(sys.stdin.read()) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/Tools/Scripts/webkitpy/test/cat_unittest.py b/Tools/Scripts/webkitpy/test/cat_unittest.py new file mode 100644 index 0000000..4ed1f67 --- /dev/null +++ b/Tools/Scripts/webkitpy/test/cat_unittest.py @@ -0,0 +1,52 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import StringIO +import os.path +import sys +import unittest + +from webkitpy.common.system import executive, outputcapture +from webkitpy.test import cat + + +class CatTest(outputcapture.OutputCaptureTestCaseBase): + def assert_cat(self, input): + saved_stdin = sys.stdin + sys.stdin = StringIO.StringIO(input) + cat.main() + self.assertStdout(input) + sys.stdin = saved_stdin + + def test_basic(self): + self.assert_cat('foo bar baz\n') + + def test_no_newline(self): + self.assert_cat('foo bar baz') + + def test_unicode(self): + self.assert_cat(u'WebKit \u2661 Tor Arne Vestb\u00F8!') + + def test_as_command(self): + input = 'foo bar baz\n' + output = executive.Executive().run_command(cat.command_arguments(), input=input) + self.assertEqual(input, output) diff --git a/Tools/Scripts/webkitpy/test/echo.py b/Tools/Scripts/webkitpy/test/echo.py new file mode 100644 index 0000000..f7468f7 --- /dev/null +++ b/Tools/Scripts/webkitpy/test/echo.py @@ -0,0 +1,52 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os.path +import sys + +# Add WebKitTools/Scripts to the path to ensure we can find webkitpy. +sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) + +from webkitpy.common.system import fileutils + + +def command_arguments(*args): + return ['python', __file__] + list(args) + + +def main(args=None): + if args is None: + args = sys.argv[1:] + + fileutils.make_stdout_binary() + + print_newline = True + if len(args) and args[0] == '-n': + print_newline = False + del args[0] + sys.stdout.write(' '.join(args)) + if print_newline: + sys.stdout.write('\n') + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/Tools/Scripts/webkitpy/test/echo_unittest.py b/Tools/Scripts/webkitpy/test/echo_unittest.py new file mode 100644 index 0000000..bc13b5e --- /dev/null +++ b/Tools/Scripts/webkitpy/test/echo_unittest.py @@ -0,0 +1,64 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os.path +import sys +import unittest + +from webkitpy.common.system import executive, outputcapture +from webkitpy.test import echo + + +class EchoTest(outputcapture.OutputCaptureTestCaseBase): + def test_basic(self): + echo.main(['foo', 'bar', 'baz']) + self.assertStdout('foo bar baz\n') + + def test_no_newline(self): + echo.main(['-n', 'foo', 'bar', 'baz']) + self.assertStdout('foo bar baz') + + def test_unicode(self): + echo.main([u'WebKit \u2661', 'Tor Arne', u'Vestb\u00F8!']) + self.assertStdout(u'WebKit \u2661 Tor Arne Vestb\u00F8!\n') + + def test_argument_order(self): + echo.main(['foo', '-n', 'bar']) + self.assertStdout('foo -n bar\n') + + def test_empty_arguments(self): + old_argv = sys.argv + sys.argv = ['echo.py', 'foo', 'bar', 'baz'] + echo.main([]) + self.assertStdout('\n') + sys.argv = old_argv + + def test_no_arguments(self): + old_argv = sys.argv + sys.argv = ['echo.py', 'foo', 'bar', 'baz'] + echo.main() + self.assertStdout('foo bar baz\n') + sys.argv = old_argv + + def test_as_command(self): + output = executive.Executive().run_command(echo.command_arguments('foo', 'bar', 'baz')) + self.assertEqual(output, 'foo bar baz\n') diff --git a/Tools/Scripts/webkitpy/test/main.py b/Tools/Scripts/webkitpy/test/main.py new file mode 100644 index 0000000..1038d82 --- /dev/null +++ b/Tools/Scripts/webkitpy/test/main.py @@ -0,0 +1,140 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Contains the entry method for test-webkitpy.""" + +import logging +import os +import sys +import unittest + +import webkitpy + + +_log = logging.getLogger(__name__) + + +class Tester(object): + + """Discovers and runs webkitpy unit tests.""" + + def _find_unittest_files(self, webkitpy_dir): + """Return a list of paths to all unit-test files.""" + unittest_paths = [] # Return value. + + for dir_path, dir_names, file_names in os.walk(webkitpy_dir): + for file_name in file_names: + if not file_name.endswith("_unittest.py"): + continue + unittest_path = os.path.join(dir_path, file_name) + unittest_paths.append(unittest_path) + + return unittest_paths + + def _modules_from_paths(self, package_root, paths): + """Return a list of fully-qualified module names given paths.""" + package_path = os.path.abspath(package_root) + root_package_name = os.path.split(package_path)[1] # Equals "webkitpy". + + prefix_length = len(package_path) + + modules = [] + for path in paths: + path = os.path.abspath(path) + # This gives us, for example: /common/config/ports_unittest.py + rel_path = path[prefix_length:] + # This gives us, for example: /common/config/ports_unittest + rel_path = os.path.splitext(rel_path)[0] + + parts = [] + while True: + (rel_path, tail) = os.path.split(rel_path) + if not tail: + break + parts.insert(0, tail) + # We now have, for example: common.config.ports_unittest + # FIXME: This is all a hack around the fact that we always prefix webkitpy includes with "webkitpy." + parts.insert(0, root_package_name) # Put "webkitpy" at the beginning. + module = ".".join(parts) + modules.append(module) + + return modules + + def run_tests(self, sys_argv, external_package_paths=None): + """Run the unit tests in all *_unittest.py modules in webkitpy. + + This method excludes "webkitpy.common.checkout.scm_unittest" unless + the --all option is the second element of sys_argv. + + Args: + sys_argv: A reference to sys.argv. + + """ + if external_package_paths is None: + external_package_paths = [] + else: + # FIXME: We should consider moving webkitpy off of using "webkitpy." to prefix + # all includes. If we did that, then this would use path instead of dirname(path). + # QueueStatusServer.__init__ has a sys.path import hack due to this code. + sys.path.extend(set(os.path.dirname(path) for path in external_package_paths)) + + if len(sys_argv) > 1 and not sys_argv[-1].startswith("-"): + # Then explicit modules or test names were provided, which + # the unittest module is equipped to handle. + unittest.main(argv=sys_argv, module=None) + # No need to return since unitttest.main() exits. + + # Otherwise, auto-detect all unit tests. + + # FIXME: This should be combined with the external_package_paths code above. + webkitpy_dir = os.path.dirname(webkitpy.__file__) + + modules = [] + for path in [webkitpy_dir] + external_package_paths: + modules.extend(self._modules_from_paths(path, self._find_unittest_files(path))) + modules.sort() + + # This is a sanity check to ensure that the unit-test discovery + # methods are working. + if len(modules) < 1: + raise Exception("No unit-test modules found.") + + for module in modules: + _log.debug("Found: %s" % module) + + # FIXME: This is a hack, but I'm tired of commenting out the test. + # See https://bugs.webkit.org/show_bug.cgi?id=31818 + if len(sys_argv) > 1 and sys.argv[1] == "--all": + sys.argv.remove("--all") + else: + excluded_module = "webkitpy.common.checkout.scm_unittest" + _log.info("Excluding: %s (use --all to include)" % excluded_module) + modules.remove(excluded_module) + + sys_argv.extend(modules) + + # We pass None for the module because we do not want the unittest + # module to resolve module names relative to a given module. + # (This would require importing all of the unittest modules from + # this module.) See the loadTestsFromName() method of the + # unittest.TestLoader class for more details on this parameter. + unittest.main(argv=sys_argv, module=None) diff --git a/Tools/Scripts/webkitpy/test/skip.py b/Tools/Scripts/webkitpy/test/skip.py new file mode 100644 index 0000000..8587d56 --- /dev/null +++ b/Tools/Scripts/webkitpy/test/skip.py @@ -0,0 +1,52 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import logging + +_log = logging.getLogger(__name__) + + +def skip_if(klass, condition, message=None, logger=None): + """Makes all test_* methods in a given class no-ops if the given condition + is False. Backported from Python 3.1+'s unittest.skipIf decorator.""" + if not logger: + logger = _log + if not condition: + return klass + for name in dir(klass): + attr = getattr(klass, name) + if not callable(attr): + continue + if not name.startswith('test_'): + continue + setattr(klass, name, _skipped_method(attr, message, logger)) + klass._printed_skipped_message = False + return klass + + +def _skipped_method(method, message, logger): + def _skip(*args): + if method.im_class._printed_skipped_message: + return + method.im_class._printed_skipped_message = True + logger.info('Skipping %s.%s: %s' % (method.__module__, method.im_class.__name__, message)) + return _skip diff --git a/Tools/Scripts/webkitpy/test/skip_unittest.py b/Tools/Scripts/webkitpy/test/skip_unittest.py new file mode 100644 index 0000000..f61a1bb --- /dev/null +++ b/Tools/Scripts/webkitpy/test/skip_unittest.py @@ -0,0 +1,77 @@ +# Copyright (C) 2010 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import StringIO +import logging +import unittest + +from webkitpy.test.skip import skip_if + + +class SkipTest(unittest.TestCase): + def setUp(self): + self.logger = logging.getLogger(__name__) + + self.old_level = self.logger.level + self.logger.setLevel(logging.INFO) + + self.old_propagate = self.logger.propagate + self.logger.propagate = False + + self.log_stream = StringIO.StringIO() + self.handler = logging.StreamHandler(self.log_stream) + self.logger.addHandler(self.handler) + + self.foo_was_called = False + + def tearDown(self): + self.logger.removeHandler(self.handler) + self.propagate = self.old_propagate + self.logger.setLevel(self.old_level) + + def create_fixture_class(self): + class TestSkipFixture(object): + def __init__(self, callback): + self.callback = callback + + def test_foo(self): + self.callback() + + return TestSkipFixture + + def foo_callback(self): + self.foo_was_called = True + + def test_skip_if_false(self): + klass = skip_if(self.create_fixture_class(), False, 'Should not see this message.', logger=self.logger) + klass(self.foo_callback).test_foo() + self.assertEqual(self.log_stream.getvalue(), '') + self.assertTrue(self.foo_was_called) + + def test_skip_if_true(self): + klass = skip_if(self.create_fixture_class(), True, 'Should see this message.', logger=self.logger) + klass(self.foo_callback).test_foo() + self.assertEqual(self.log_stream.getvalue(), 'Skipping webkitpy.test.skip_unittest.TestSkipFixture: Should see this message.\n') + self.assertFalse(self.foo_was_called) + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py b/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py new file mode 100644 index 0000000..34204e7 --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/BeautifulSoup.py @@ -0,0 +1,2000 @@ +"""Beautiful Soup +Elixir and Tonic +"The Screen-Scraper's Friend" +http://www.crummy.com/software/BeautifulSoup/ + +Beautiful Soup parses a (possibly invalid) XML or HTML document into a +tree representation. It provides methods and Pythonic idioms that make +it easy to navigate, search, and modify the tree. + +A well-formed XML/HTML document yields a well-formed data +structure. An ill-formed XML/HTML document yields a correspondingly +ill-formed data structure. If your document is only locally +well-formed, you can use this library to find and process the +well-formed part of it. + +Beautiful Soup works with Python 2.2 and up. It has no external +dependencies, but you'll have more success at converting data to UTF-8 +if you also install these three packages: + +* chardet, for auto-detecting character encodings + http://chardet.feedparser.org/ +* cjkcodecs and iconv_codec, which add more encodings to the ones supported + by stock Python. + http://cjkpython.i18n.org/ + +Beautiful Soup defines classes for two main parsing strategies: + + * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific + language that kind of looks like XML. + + * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid + or invalid. This class has web browser-like heuristics for + obtaining a sensible parse tree in the face of common HTML errors. + +Beautiful Soup also defines a class (UnicodeDammit) for autodetecting +the encoding of an HTML or XML document, and converting it to +Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser. + +For more than you ever wanted to know about Beautiful Soup, see the +documentation: +http://www.crummy.com/software/BeautifulSoup/documentation.html + +Here, have some legalese: + +Copyright (c) 2004-2009, Leonard Richardson + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the the Beautiful Soup Consortium and All + Night Kosher Bakery nor the names of its contributors may be + used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT. + +""" +from __future__ import generators + +__author__ = "Leonard Richardson (leonardr@segfault.org)" +__version__ = "3.1.0.1" +__copyright__ = "Copyright (c) 2004-2009 Leonard Richardson" +__license__ = "New-style BSD" + +import codecs +import markupbase +import types +import re +from HTMLParser import HTMLParser, HTMLParseError +try: + from htmlentitydefs import name2codepoint +except ImportError: + name2codepoint = {} +try: + set +except NameError: + from sets import Set as set + +#These hacks make Beautiful Soup able to parse XML with namespaces +markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match + +DEFAULT_OUTPUT_ENCODING = "utf-8" + +# First, the classes that represent markup elements. + +def sob(unicode, encoding): + """Returns either the given Unicode string or its encoding.""" + if encoding is None: + return unicode + else: + return unicode.encode(encoding) + +class PageElement: + """Contains the navigational information for some part of the page + (either a tag or a piece of text)""" + + def setup(self, parent=None, previous=None): + """Sets up the initial relations between this element and + other elements.""" + self.parent = parent + self.previous = previous + self.next = None + self.previousSibling = None + self.nextSibling = None + if self.parent and self.parent.contents: + self.previousSibling = self.parent.contents[-1] + self.previousSibling.nextSibling = self + + def replaceWith(self, replaceWith): + oldParent = self.parent + myIndex = self.parent.contents.index(self) + if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent: + # We're replacing this element with one of its siblings. + index = self.parent.contents.index(replaceWith) + if index and index < myIndex: + # Furthermore, it comes before this element. That + # means that when we extract it, the index of this + # element will change. + myIndex = myIndex - 1 + self.extract() + oldParent.insert(myIndex, replaceWith) + + def extract(self): + """Destructively rips this element out of the tree.""" + if self.parent: + try: + self.parent.contents.remove(self) + except ValueError: + pass + + #Find the two elements that would be next to each other if + #this element (and any children) hadn't been parsed. Connect + #the two. + lastChild = self._lastRecursiveChild() + nextElement = lastChild.next + + if self.previous: + self.previous.next = nextElement + if nextElement: + nextElement.previous = self.previous + self.previous = None + lastChild.next = None + + self.parent = None + if self.previousSibling: + self.previousSibling.nextSibling = self.nextSibling + if self.nextSibling: + self.nextSibling.previousSibling = self.previousSibling + self.previousSibling = self.nextSibling = None + return self + + def _lastRecursiveChild(self): + "Finds the last element beneath this object to be parsed." + lastChild = self + while hasattr(lastChild, 'contents') and lastChild.contents: + lastChild = lastChild.contents[-1] + return lastChild + + def insert(self, position, newChild): + if (isinstance(newChild, basestring) + or isinstance(newChild, unicode)) \ + and not isinstance(newChild, NavigableString): + newChild = NavigableString(newChild) + + position = min(position, len(self.contents)) + if hasattr(newChild, 'parent') and newChild.parent != None: + # We're 'inserting' an element that's already one + # of this object's children. + if newChild.parent == self: + index = self.find(newChild) + if index and index < position: + # Furthermore we're moving it further down the + # list of this object's children. That means that + # when we extract this element, our target index + # will jump down one. + position = position - 1 + newChild.extract() + + newChild.parent = self + previousChild = None + if position == 0: + newChild.previousSibling = None + newChild.previous = self + else: + previousChild = self.contents[position-1] + newChild.previousSibling = previousChild + newChild.previousSibling.nextSibling = newChild + newChild.previous = previousChild._lastRecursiveChild() + if newChild.previous: + newChild.previous.next = newChild + + newChildsLastElement = newChild._lastRecursiveChild() + + if position >= len(self.contents): + newChild.nextSibling = None + + parent = self + parentsNextSibling = None + while not parentsNextSibling: + parentsNextSibling = parent.nextSibling + parent = parent.parent + if not parent: # This is the last element in the document. + break + if parentsNextSibling: + newChildsLastElement.next = parentsNextSibling + else: + newChildsLastElement.next = None + else: + nextChild = self.contents[position] + newChild.nextSibling = nextChild + if newChild.nextSibling: + newChild.nextSibling.previousSibling = newChild + newChildsLastElement.next = nextChild + + if newChildsLastElement.next: + newChildsLastElement.next.previous = newChildsLastElement + self.contents.insert(position, newChild) + + def append(self, tag): + """Appends the given tag to the contents of this tag.""" + self.insert(len(self.contents), tag) + + def findNext(self, name=None, attrs={}, text=None, **kwargs): + """Returns the first item that matches the given criteria and + appears after this Tag in the document.""" + return self._findOne(self.findAllNext, name, attrs, text, **kwargs) + + def findAllNext(self, name=None, attrs={}, text=None, limit=None, + **kwargs): + """Returns all items that match the given criteria and appear + after this Tag in the document.""" + return self._findAll(name, attrs, text, limit, self.nextGenerator, + **kwargs) + + def findNextSibling(self, name=None, attrs={}, text=None, **kwargs): + """Returns the closest sibling to this Tag that matches the + given criteria and appears after this Tag in the document.""" + return self._findOne(self.findNextSiblings, name, attrs, text, + **kwargs) + + def findNextSiblings(self, name=None, attrs={}, text=None, limit=None, + **kwargs): + """Returns the siblings of this Tag that match the given + criteria and appear after this Tag in the document.""" + return self._findAll(name, attrs, text, limit, + self.nextSiblingGenerator, **kwargs) + fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x + + def findPrevious(self, name=None, attrs={}, text=None, **kwargs): + """Returns the first item that matches the given criteria and + appears before this Tag in the document.""" + return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs) + + def findAllPrevious(self, name=None, attrs={}, text=None, limit=None, + **kwargs): + """Returns all items that match the given criteria and appear + before this Tag in the document.""" + return self._findAll(name, attrs, text, limit, self.previousGenerator, + **kwargs) + fetchPrevious = findAllPrevious # Compatibility with pre-3.x + + def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs): + """Returns the closest sibling to this Tag that matches the + given criteria and appears before this Tag in the document.""" + return self._findOne(self.findPreviousSiblings, name, attrs, text, + **kwargs) + + def findPreviousSiblings(self, name=None, attrs={}, text=None, + limit=None, **kwargs): + """Returns the siblings of this Tag that match the given + criteria and appear before this Tag in the document.""" + return self._findAll(name, attrs, text, limit, + self.previousSiblingGenerator, **kwargs) + fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x + + def findParent(self, name=None, attrs={}, **kwargs): + """Returns the closest parent of this Tag that matches the given + criteria.""" + # NOTE: We can't use _findOne because findParents takes a different + # set of arguments. + r = None + l = self.findParents(name, attrs, 1) + if l: + r = l[0] + return r + + def findParents(self, name=None, attrs={}, limit=None, **kwargs): + """Returns the parents of this Tag that match the given + criteria.""" + + return self._findAll(name, attrs, None, limit, self.parentGenerator, + **kwargs) + fetchParents = findParents # Compatibility with pre-3.x + + #These methods do the real heavy lifting. + + def _findOne(self, method, name, attrs, text, **kwargs): + r = None + l = method(name, attrs, text, 1, **kwargs) + if l: + r = l[0] + return r + + def _findAll(self, name, attrs, text, limit, generator, **kwargs): + "Iterates over a generator looking for things that match." + + if isinstance(name, SoupStrainer): + strainer = name + else: + # Build a SoupStrainer + strainer = SoupStrainer(name, attrs, text, **kwargs) + results = ResultSet(strainer) + g = generator() + while True: + try: + i = g.next() + except StopIteration: + break + if i: + found = strainer.search(i) + if found: + results.append(found) + if limit and len(results) >= limit: + break + return results + + #These Generators can be used to navigate starting from both + #NavigableStrings and Tags. + def nextGenerator(self): + i = self + while i: + i = i.next + yield i + + def nextSiblingGenerator(self): + i = self + while i: + i = i.nextSibling + yield i + + def previousGenerator(self): + i = self + while i: + i = i.previous + yield i + + def previousSiblingGenerator(self): + i = self + while i: + i = i.previousSibling + yield i + + def parentGenerator(self): + i = self + while i: + i = i.parent + yield i + + # Utility methods + def substituteEncoding(self, str, encoding=None): + encoding = encoding or "utf-8" + return str.replace("%SOUP-ENCODING%", encoding) + + def toEncoding(self, s, encoding=None): + """Encodes an object to a string in some encoding, or to Unicode. + .""" + if isinstance(s, unicode): + if encoding: + s = s.encode(encoding) + elif isinstance(s, str): + if encoding: + s = s.encode(encoding) + else: + s = unicode(s) + else: + if encoding: + s = self.toEncoding(str(s), encoding) + else: + s = unicode(s) + return s + +class NavigableString(unicode, PageElement): + + def __new__(cls, value): + """Create a new NavigableString. + + When unpickling a NavigableString, this method is called with + the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be + passed in to the superclass's __new__ or the superclass won't know + how to handle non-ASCII characters. + """ + if isinstance(value, unicode): + return unicode.__new__(cls, value) + return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING) + + def __getnewargs__(self): + return (unicode(self),) + + def __getattr__(self, attr): + """text.string gives you text. This is for backwards + compatibility for Navigable*String, but for CData* it lets you + get the string without the CData wrapper.""" + if attr == 'string': + return self + else: + raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr) + + def encode(self, encoding=DEFAULT_OUTPUT_ENCODING): + return self.decode().encode(encoding) + + def decodeGivenEventualEncoding(self, eventualEncoding): + return self + +class CData(NavigableString): + + def decodeGivenEventualEncoding(self, eventualEncoding): + return u'<![CDATA[' + self + u']]>' + +class ProcessingInstruction(NavigableString): + + def decodeGivenEventualEncoding(self, eventualEncoding): + output = self + if u'%SOUP-ENCODING%' in output: + output = self.substituteEncoding(output, eventualEncoding) + return u'<?' + output + u'?>' + +class Comment(NavigableString): + def decodeGivenEventualEncoding(self, eventualEncoding): + return u'<!--' + self + u'-->' + +class Declaration(NavigableString): + def decodeGivenEventualEncoding(self, eventualEncoding): + return u'<!' + self + u'>' + +class Tag(PageElement): + + """Represents a found HTML tag with its attributes and contents.""" + + def _invert(h): + "Cheap function to invert a hash." + i = {} + for k,v in h.items(): + i[v] = k + return i + + XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'", + "quot" : '"', + "amp" : "&", + "lt" : "<", + "gt" : ">" } + + XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS) + + def _convertEntities(self, match): + """Used in a call to re.sub to replace HTML, XML, and numeric + entities with the appropriate Unicode characters. If HTML + entities are being converted, any unrecognized entities are + escaped.""" + x = match.group(1) + if self.convertHTMLEntities and x in name2codepoint: + return unichr(name2codepoint[x]) + elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS: + if self.convertXMLEntities: + return self.XML_ENTITIES_TO_SPECIAL_CHARS[x] + else: + return u'&%s;' % x + elif len(x) > 0 and x[0] == '#': + # Handle numeric entities + if len(x) > 1 and x[1] == 'x': + return unichr(int(x[2:], 16)) + else: + return unichr(int(x[1:])) + + elif self.escapeUnrecognizedEntities: + return u'&%s;' % x + else: + return u'&%s;' % x + + def __init__(self, parser, name, attrs=None, parent=None, + previous=None): + "Basic constructor." + + # We don't actually store the parser object: that lets extracted + # chunks be garbage-collected + self.parserClass = parser.__class__ + self.isSelfClosing = parser.isSelfClosingTag(name) + self.name = name + if attrs == None: + attrs = [] + self.attrs = attrs + self.contents = [] + self.setup(parent, previous) + self.hidden = False + self.containsSubstitutions = False + self.convertHTMLEntities = parser.convertHTMLEntities + self.convertXMLEntities = parser.convertXMLEntities + self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities + + def convert(kval): + "Converts HTML, XML and numeric entities in the attribute value." + k, val = kval + if val is None: + return kval + return (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);", + self._convertEntities, val)) + self.attrs = map(convert, self.attrs) + + def get(self, key, default=None): + """Returns the value of the 'key' attribute for the tag, or + the value given for 'default' if it doesn't have that + attribute.""" + return self._getAttrMap().get(key, default) + + def has_key(self, key): + return self._getAttrMap().has_key(key) + + def __getitem__(self, key): + """tag[key] returns the value of the 'key' attribute for the tag, + and throws an exception if it's not there.""" + return self._getAttrMap()[key] + + def __iter__(self): + "Iterating over a tag iterates over its contents." + return iter(self.contents) + + def __len__(self): + "The length of a tag is the length of its list of contents." + return len(self.contents) + + def __contains__(self, x): + return x in self.contents + + def __nonzero__(self): + "A tag is non-None even if it has no contents." + return True + + def __setitem__(self, key, value): + """Setting tag[key] sets the value of the 'key' attribute for the + tag.""" + self._getAttrMap() + self.attrMap[key] = value + found = False + for i in range(0, len(self.attrs)): + if self.attrs[i][0] == key: + self.attrs[i] = (key, value) + found = True + if not found: + self.attrs.append((key, value)) + self._getAttrMap()[key] = value + + def __delitem__(self, key): + "Deleting tag[key] deletes all 'key' attributes for the tag." + for item in self.attrs: + if item[0] == key: + self.attrs.remove(item) + #We don't break because bad HTML can define the same + #attribute multiple times. + self._getAttrMap() + if self.attrMap.has_key(key): + del self.attrMap[key] + + def __call__(self, *args, **kwargs): + """Calling a tag like a function is the same as calling its + findAll() method. Eg. tag('a') returns a list of all the A tags + found within this tag.""" + return apply(self.findAll, args, kwargs) + + def __getattr__(self, tag): + #print "Getattr %s.%s" % (self.__class__, tag) + if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3: + return self.find(tag[:-3]) + elif tag.find('__') != 0: + return self.find(tag) + raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag) + + def __eq__(self, other): + """Returns true iff this tag has the same name, the same attributes, + and the same contents (recursively) as the given tag. + + NOTE: right now this will return false if two tags have the + same attributes in a different order. Should this be fixed?""" + if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other): + return False + for i in range(0, len(self.contents)): + if self.contents[i] != other.contents[i]: + return False + return True + + def __ne__(self, other): + """Returns true iff this tag is not identical to the other tag, + as defined in __eq__.""" + return not self == other + + def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING): + """Renders this tag as a string.""" + return self.decode(eventualEncoding=encoding) + + BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" + + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" + + ")") + + def _sub_entity(self, x): + """Used with a regular expression to substitute the + appropriate XML entity for an XML special character.""" + return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";" + + def __unicode__(self): + return self.decode() + + def __str__(self): + return self.encode() + + def encode(self, encoding=DEFAULT_OUTPUT_ENCODING, + prettyPrint=False, indentLevel=0): + return self.decode(prettyPrint, indentLevel, encoding).encode(encoding) + + def decode(self, prettyPrint=False, indentLevel=0, + eventualEncoding=DEFAULT_OUTPUT_ENCODING): + """Returns a string or Unicode representation of this tag and + its contents. To get Unicode, pass None for encoding.""" + + attrs = [] + if self.attrs: + for key, val in self.attrs: + fmt = '%s="%s"' + if isString(val): + if (self.containsSubstitutions + and eventualEncoding is not None + and '%SOUP-ENCODING%' in val): + val = self.substituteEncoding(val, eventualEncoding) + + # The attribute value either: + # + # * Contains no embedded double quotes or single quotes. + # No problem: we enclose it in double quotes. + # * Contains embedded single quotes. No problem: + # double quotes work here too. + # * Contains embedded double quotes. No problem: + # we enclose it in single quotes. + # * Embeds both single _and_ double quotes. This + # can't happen naturally, but it can happen if + # you modify an attribute value after parsing + # the document. Now we have a bit of a + # problem. We solve it by enclosing the + # attribute in single quotes, and escaping any + # embedded single quotes to XML entities. + if '"' in val: + fmt = "%s='%s'" + if "'" in val: + # TODO: replace with apos when + # appropriate. + val = val.replace("'", "&squot;") + + # Now we're okay w/r/t quotes. But the attribute + # value might also contain angle brackets, or + # ampersands that aren't part of entities. We need + # to escape those to XML entities too. + val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val) + if val is None: + # Handle boolean attributes. + decoded = key + else: + decoded = fmt % (key, val) + attrs.append(decoded) + close = '' + closeTag = '' + if self.isSelfClosing: + close = ' /' + else: + closeTag = '</%s>' % self.name + + indentTag, indentContents = 0, 0 + if prettyPrint: + indentTag = indentLevel + space = (' ' * (indentTag-1)) + indentContents = indentTag + 1 + contents = self.decodeContents(prettyPrint, indentContents, + eventualEncoding) + if self.hidden: + s = contents + else: + s = [] + attributeString = '' + if attrs: + attributeString = ' ' + ' '.join(attrs) + if prettyPrint: + s.append(space) + s.append('<%s%s%s>' % (self.name, attributeString, close)) + if prettyPrint: + s.append("\n") + s.append(contents) + if prettyPrint and contents and contents[-1] != "\n": + s.append("\n") + if prettyPrint and closeTag: + s.append(space) + s.append(closeTag) + if prettyPrint and closeTag and self.nextSibling: + s.append("\n") + s = ''.join(s) + return s + + def decompose(self): + """Recursively destroys the contents of this tree.""" + contents = [i for i in self.contents] + for i in contents: + if isinstance(i, Tag): + i.decompose() + else: + i.extract() + self.extract() + + def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING): + return self.encode(encoding, True) + + def encodeContents(self, encoding=DEFAULT_OUTPUT_ENCODING, + prettyPrint=False, indentLevel=0): + return self.decodeContents(prettyPrint, indentLevel).encode(encoding) + + def decodeContents(self, prettyPrint=False, indentLevel=0, + eventualEncoding=DEFAULT_OUTPUT_ENCODING): + """Renders the contents of this tag as a string in the given + encoding. If encoding is None, returns a Unicode string..""" + s=[] + for c in self: + text = None + if isinstance(c, NavigableString): + text = c.decodeGivenEventualEncoding(eventualEncoding) + elif isinstance(c, Tag): + s.append(c.decode(prettyPrint, indentLevel, eventualEncoding)) + if text and prettyPrint: + text = text.strip() + if text: + if prettyPrint: + s.append(" " * (indentLevel-1)) + s.append(text) + if prettyPrint: + s.append("\n") + return ''.join(s) + + #Soup methods + + def find(self, name=None, attrs={}, recursive=True, text=None, + **kwargs): + """Return only the first child of this Tag matching the given + criteria.""" + r = None + l = self.findAll(name, attrs, recursive, text, 1, **kwargs) + if l: + r = l[0] + return r + findChild = find + + def findAll(self, name=None, attrs={}, recursive=True, text=None, + limit=None, **kwargs): + """Extracts a list of Tag objects that match the given + criteria. You can specify the name of the Tag and any + attributes you want the Tag to have. + + The value of a key-value pair in the 'attrs' map can be a + string, a list of strings, a regular expression object, or a + callable that takes a string and returns whether or not the + string matches for some custom definition of 'matches'. The + same is true of the tag name.""" + generator = self.recursiveChildGenerator + if not recursive: + generator = self.childGenerator + return self._findAll(name, attrs, text, limit, generator, **kwargs) + findChildren = findAll + + # Pre-3.x compatibility methods. Will go away in 4.0. + first = find + fetch = findAll + + def fetchText(self, text=None, recursive=True, limit=None): + return self.findAll(text=text, recursive=recursive, limit=limit) + + def firstText(self, text=None, recursive=True): + return self.find(text=text, recursive=recursive) + + # 3.x compatibility methods. Will go away in 4.0. + def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING, + prettyPrint=False, indentLevel=0): + if encoding is None: + return self.decodeContents(prettyPrint, indentLevel, encoding) + else: + return self.encodeContents(encoding, prettyPrint, indentLevel) + + + #Private methods + + def _getAttrMap(self): + """Initializes a map representation of this tag's attributes, + if not already initialized.""" + if not getattr(self, 'attrMap'): + self.attrMap = {} + for (key, value) in self.attrs: + self.attrMap[key] = value + return self.attrMap + + #Generator methods + def recursiveChildGenerator(self): + if not len(self.contents): + raise StopIteration + stopNode = self._lastRecursiveChild().next + current = self.contents[0] + while current is not stopNode: + yield current + current = current.next + + def childGenerator(self): + if not len(self.contents): + raise StopIteration + current = self.contents[0] + while current: + yield current + current = current.nextSibling + raise StopIteration + +# Next, a couple classes to represent queries and their results. +class SoupStrainer: + """Encapsulates a number of ways of matching a markup element (tag or + text).""" + + def __init__(self, name=None, attrs={}, text=None, **kwargs): + self.name = name + if isString(attrs): + kwargs['class'] = attrs + attrs = None + if kwargs: + if attrs: + attrs = attrs.copy() + attrs.update(kwargs) + else: + attrs = kwargs + self.attrs = attrs + self.text = text + + def __str__(self): + if self.text: + return self.text + else: + return "%s|%s" % (self.name, self.attrs) + + def searchTag(self, markupName=None, markupAttrs={}): + found = None + markup = None + if isinstance(markupName, Tag): + markup = markupName + markupAttrs = markup + callFunctionWithTagData = callable(self.name) \ + and not isinstance(markupName, Tag) + + if (not self.name) \ + or callFunctionWithTagData \ + or (markup and self._matches(markup, self.name)) \ + or (not markup and self._matches(markupName, self.name)): + if callFunctionWithTagData: + match = self.name(markupName, markupAttrs) + else: + match = True + markupAttrMap = None + for attr, matchAgainst in self.attrs.items(): + if not markupAttrMap: + if hasattr(markupAttrs, 'get'): + markupAttrMap = markupAttrs + else: + markupAttrMap = {} + for k,v in markupAttrs: + markupAttrMap[k] = v + attrValue = markupAttrMap.get(attr) + if not self._matches(attrValue, matchAgainst): + match = False + break + if match: + if markup: + found = markup + else: + found = markupName + return found + + def search(self, markup): + #print 'looking for %s in %s' % (self, markup) + found = None + # If given a list of items, scan it for a text element that + # matches. + if isList(markup) and not isinstance(markup, Tag): + for element in markup: + if isinstance(element, NavigableString) \ + and self.search(element): + found = element + break + # If it's a Tag, make sure its name or attributes match. + # Don't bother with Tags if we're searching for text. + elif isinstance(markup, Tag): + if not self.text: + found = self.searchTag(markup) + # If it's text, make sure the text matches. + elif isinstance(markup, NavigableString) or \ + isString(markup): + if self._matches(markup, self.text): + found = markup + else: + raise Exception, "I don't know how to match against a %s" \ + % markup.__class__ + return found + + def _matches(self, markup, matchAgainst): + #print "Matching %s against %s" % (markup, matchAgainst) + result = False + if matchAgainst == True and type(matchAgainst) == types.BooleanType: + result = markup != None + elif callable(matchAgainst): + result = matchAgainst(markup) + else: + #Custom match methods take the tag as an argument, but all + #other ways of matching match the tag name as a string. + if isinstance(markup, Tag): + markup = markup.name + if markup is not None and not isString(markup): + markup = unicode(markup) + #Now we know that chunk is either a string, or None. + if hasattr(matchAgainst, 'match'): + # It's a regexp object. + result = markup and matchAgainst.search(markup) + elif (isList(matchAgainst) + and (markup is not None or not isString(matchAgainst))): + result = markup in matchAgainst + elif hasattr(matchAgainst, 'items'): + result = markup.has_key(matchAgainst) + elif matchAgainst and isString(markup): + if isinstance(markup, unicode): + matchAgainst = unicode(matchAgainst) + else: + matchAgainst = str(matchAgainst) + + if not result: + result = matchAgainst == markup + return result + +class ResultSet(list): + """A ResultSet is just a list that keeps track of the SoupStrainer + that created it.""" + def __init__(self, source): + list.__init__([]) + self.source = source + +# Now, some helper functions. + +def isList(l): + """Convenience method that works with all 2.x versions of Python + to determine whether or not something is listlike.""" + return ((hasattr(l, '__iter__') and not isString(l)) + or (type(l) in (types.ListType, types.TupleType))) + +def isString(s): + """Convenience method that works with all 2.x versions of Python + to determine whether or not something is stringlike.""" + try: + return isinstance(s, unicode) or isinstance(s, basestring) + except NameError: + return isinstance(s, str) + +def buildTagMap(default, *args): + """Turns a list of maps, lists, or scalars into a single map. + Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and + NESTING_RESET_TAGS maps out of lists and partial maps.""" + built = {} + for portion in args: + if hasattr(portion, 'items'): + #It's a map. Merge it. + for k,v in portion.items(): + built[k] = v + elif isList(portion) and not isString(portion): + #It's a list. Map each item to the default. + for k in portion: + built[k] = default + else: + #It's a scalar. Map it to the default. + built[portion] = default + return built + +# Now, the parser classes. + +class HTMLParserBuilder(HTMLParser): + + def __init__(self, soup): + HTMLParser.__init__(self) + self.soup = soup + + # We inherit feed() and reset(). + + def handle_starttag(self, name, attrs): + if name == 'meta': + self.soup.extractCharsetFromMeta(attrs) + else: + self.soup.unknown_starttag(name, attrs) + + def handle_endtag(self, name): + self.soup.unknown_endtag(name) + + def handle_data(self, content): + self.soup.handle_data(content) + + def _toStringSubclass(self, text, subclass): + """Adds a certain piece of text to the tree as a NavigableString + subclass.""" + self.soup.endData() + self.handle_data(text) + self.soup.endData(subclass) + + def handle_pi(self, text): + """Handle a processing instruction as a ProcessingInstruction + object, possibly one with a %SOUP-ENCODING% slot into which an + encoding will be plugged later.""" + if text[:3] == "xml": + text = u"xml version='1.0' encoding='%SOUP-ENCODING%'" + self._toStringSubclass(text, ProcessingInstruction) + + def handle_comment(self, text): + "Handle comments as Comment objects." + self._toStringSubclass(text, Comment) + + def handle_charref(self, ref): + "Handle character references as data." + if self.soup.convertEntities: + data = unichr(int(ref)) + else: + data = '&#%s;' % ref + self.handle_data(data) + + def handle_entityref(self, ref): + """Handle entity references as data, possibly converting known + HTML and/or XML entity references to the corresponding Unicode + characters.""" + data = None + if self.soup.convertHTMLEntities: + try: + data = unichr(name2codepoint[ref]) + except KeyError: + pass + + if not data and self.soup.convertXMLEntities: + data = self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref) + + if not data and self.soup.convertHTMLEntities and \ + not self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref): + # TODO: We've got a problem here. We're told this is + # an entity reference, but it's not an XML entity + # reference or an HTML entity reference. Nonetheless, + # the logical thing to do is to pass it through as an + # unrecognized entity reference. + # + # Except: when the input is "&carol;" this function + # will be called with input "carol". When the input is + # "AT&T", this function will be called with input + # "T". We have no way of knowing whether a semicolon + # was present originally, so we don't know whether + # this is an unknown entity or just a misplaced + # ampersand. + # + # The more common case is a misplaced ampersand, so I + # escape the ampersand and omit the trailing semicolon. + data = "&%s" % ref + if not data: + # This case is different from the one above, because we + # haven't already gone through a supposedly comprehensive + # mapping of entities to Unicode characters. We might not + # have gone through any mapping at all. So the chances are + # very high that this is a real entity, and not a + # misplaced ampersand. + data = "&%s;" % ref + self.handle_data(data) + + def handle_decl(self, data): + "Handle DOCTYPEs and the like as Declaration objects." + self._toStringSubclass(data, Declaration) + + def parse_declaration(self, i): + """Treat a bogus SGML declaration as raw data. Treat a CDATA + declaration as a CData object.""" + j = None + if self.rawdata[i:i+9] == '<![CDATA[': + k = self.rawdata.find(']]>', i) + if k == -1: + k = len(self.rawdata) + data = self.rawdata[i+9:k] + j = k+3 + self._toStringSubclass(data, CData) + else: + try: + j = HTMLParser.parse_declaration(self, i) + except HTMLParseError: + toHandle = self.rawdata[i:] + self.handle_data(toHandle) + j = i + len(toHandle) + return j + + +class BeautifulStoneSoup(Tag): + + """This class contains the basic parser and search code. It defines + a parser that knows nothing about tag behavior except for the + following: + + You can't close a tag without closing all the tags it encloses. + That is, "<foo><bar></foo>" actually means + "<foo><bar></bar></foo>". + + [Another possible explanation is "<foo><bar /></foo>", but since + this class defines no SELF_CLOSING_TAGS, it will never use that + explanation.] + + This class is useful for parsing XML or made-up markup languages, + or when BeautifulSoup makes an assumption counter to what you were + expecting.""" + + SELF_CLOSING_TAGS = {} + NESTABLE_TAGS = {} + RESET_NESTING_TAGS = {} + QUOTE_TAGS = {} + PRESERVE_WHITESPACE_TAGS = [] + + MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'), + lambda x: x.group(1) + ' />'), + (re.compile('<!\s+([^<>]*)>'), + lambda x: '<!' + x.group(1) + '>') + ] + + ROOT_TAG_NAME = u'[document]' + + HTML_ENTITIES = "html" + XML_ENTITIES = "xml" + XHTML_ENTITIES = "xhtml" + # TODO: This only exists for backwards-compatibility + ALL_ENTITIES = XHTML_ENTITIES + + # Used when determining whether a text node is all whitespace and + # can be replaced with a single space. A text node that contains + # fancy Unicode spaces (usually non-breaking) should be left + # alone. + STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, } + + def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None, + markupMassage=True, smartQuotesTo=XML_ENTITIES, + convertEntities=None, selfClosingTags=None, isHTML=False, + builder=HTMLParserBuilder): + """The Soup object is initialized as the 'root tag', and the + provided markup (which can be a string or a file-like object) + is fed into the underlying parser. + + HTMLParser will process most bad HTML, and the BeautifulSoup + class has some tricks for dealing with some HTML that kills + HTMLParser, but Beautiful Soup can nonetheless choke or lose data + if your data uses self-closing tags or declarations + incorrectly. + + By default, Beautiful Soup uses regexes to sanitize input, + avoiding the vast majority of these problems. If the problems + don't apply to you, pass in False for markupMassage, and + you'll get better performance. + + The default parser massage techniques fix the two most common + instances of invalid HTML that choke HTMLParser: + + <br/> (No space between name of closing tag and tag close) + <! --Comment--> (Extraneous whitespace in declaration) + + You can pass in a custom list of (RE object, replace method) + tuples to get Beautiful Soup to scrub your input the way you + want.""" + + self.parseOnlyThese = parseOnlyThese + self.fromEncoding = fromEncoding + self.smartQuotesTo = smartQuotesTo + self.convertEntities = convertEntities + # Set the rules for how we'll deal with the entities we + # encounter + if self.convertEntities: + # It doesn't make sense to convert encoded characters to + # entities even while you're converting entities to Unicode. + # Just convert it all to Unicode. + self.smartQuotesTo = None + if convertEntities == self.HTML_ENTITIES: + self.convertXMLEntities = False + self.convertHTMLEntities = True + self.escapeUnrecognizedEntities = True + elif convertEntities == self.XHTML_ENTITIES: + self.convertXMLEntities = True + self.convertHTMLEntities = True + self.escapeUnrecognizedEntities = False + elif convertEntities == self.XML_ENTITIES: + self.convertXMLEntities = True + self.convertHTMLEntities = False + self.escapeUnrecognizedEntities = False + else: + self.convertXMLEntities = False + self.convertHTMLEntities = False + self.escapeUnrecognizedEntities = False + + self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags) + self.builder = builder(self) + self.reset() + + if hasattr(markup, 'read'): # It's a file-type object. + markup = markup.read() + self.markup = markup + self.markupMassage = markupMassage + try: + self._feed(isHTML=isHTML) + except StopParsing: + pass + self.markup = None # The markup can now be GCed. + self.builder = None # So can the builder. + + def _feed(self, inDocumentEncoding=None, isHTML=False): + # Convert the document to Unicode. + markup = self.markup + if isinstance(markup, unicode): + if not hasattr(self, 'originalEncoding'): + self.originalEncoding = None + else: + dammit = UnicodeDammit\ + (markup, [self.fromEncoding, inDocumentEncoding], + smartQuotesTo=self.smartQuotesTo, isHTML=isHTML) + markup = dammit.unicode + self.originalEncoding = dammit.originalEncoding + self.declaredHTMLEncoding = dammit.declaredHTMLEncoding + if markup: + if self.markupMassage: + if not isList(self.markupMassage): + self.markupMassage = self.MARKUP_MASSAGE + for fix, m in self.markupMassage: + markup = fix.sub(m, markup) + # TODO: We get rid of markupMassage so that the + # soup object can be deepcopied later on. Some + # Python installations can't copy regexes. If anyone + # was relying on the existence of markupMassage, this + # might cause problems. + del(self.markupMassage) + self.builder.reset() + + self.builder.feed(markup) + # Close out any unfinished strings and close all the open tags. + self.endData() + while self.currentTag.name != self.ROOT_TAG_NAME: + self.popTag() + + def isSelfClosingTag(self, name): + """Returns true iff the given string is the name of a + self-closing tag according to this parser.""" + return self.SELF_CLOSING_TAGS.has_key(name) \ + or self.instanceSelfClosingTags.has_key(name) + + def reset(self): + Tag.__init__(self, self, self.ROOT_TAG_NAME) + self.hidden = 1 + self.builder.reset() + self.currentData = [] + self.currentTag = None + self.tagStack = [] + self.quoteStack = [] + self.pushTag(self) + + def popTag(self): + tag = self.tagStack.pop() + # Tags with just one string-owning child get the child as a + # 'string' property, so that soup.tag.string is shorthand for + # soup.tag.contents[0] + if len(self.currentTag.contents) == 1 and \ + isinstance(self.currentTag.contents[0], NavigableString): + self.currentTag.string = self.currentTag.contents[0] + + #print "Pop", tag.name + if self.tagStack: + self.currentTag = self.tagStack[-1] + return self.currentTag + + def pushTag(self, tag): + #print "Push", tag.name + if self.currentTag: + self.currentTag.contents.append(tag) + self.tagStack.append(tag) + self.currentTag = self.tagStack[-1] + + def endData(self, containerClass=NavigableString): + if self.currentData: + currentData = u''.join(self.currentData) + if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and + not set([tag.name for tag in self.tagStack]).intersection( + self.PRESERVE_WHITESPACE_TAGS)): + if '\n' in currentData: + currentData = '\n' + else: + currentData = ' ' + self.currentData = [] + if self.parseOnlyThese and len(self.tagStack) <= 1 and \ + (not self.parseOnlyThese.text or \ + not self.parseOnlyThese.search(currentData)): + return + o = containerClass(currentData) + o.setup(self.currentTag, self.previous) + if self.previous: + self.previous.next = o + self.previous = o + self.currentTag.contents.append(o) + + + def _popToTag(self, name, inclusivePop=True): + """Pops the tag stack up to and including the most recent + instance of the given tag. If inclusivePop is false, pops the tag + stack up to but *not* including the most recent instqance of + the given tag.""" + #print "Popping to %s" % name + if name == self.ROOT_TAG_NAME: + return + + numPops = 0 + mostRecentTag = None + for i in range(len(self.tagStack)-1, 0, -1): + if name == self.tagStack[i].name: + numPops = len(self.tagStack)-i + break + if not inclusivePop: + numPops = numPops - 1 + + for i in range(0, numPops): + mostRecentTag = self.popTag() + return mostRecentTag + + def _smartPop(self, name): + + """We need to pop up to the previous tag of this type, unless + one of this tag's nesting reset triggers comes between this + tag and the previous tag of this type, OR unless this tag is a + generic nesting trigger and another generic nesting trigger + comes between this tag and the previous tag of this type. + + Examples: + <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'. + <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'. + <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'. + + <li><ul><li> *<li>* should pop to 'ul', not the first 'li'. + <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr' + <td><tr><td> *<td>* should pop to 'tr', not the first 'td' + """ + + nestingResetTriggers = self.NESTABLE_TAGS.get(name) + isNestable = nestingResetTriggers != None + isResetNesting = self.RESET_NESTING_TAGS.has_key(name) + popTo = None + inclusive = True + for i in range(len(self.tagStack)-1, 0, -1): + p = self.tagStack[i] + if (not p or p.name == name) and not isNestable: + #Non-nestable tags get popped to the top or to their + #last occurance. + popTo = name + break + if (nestingResetTriggers != None + and p.name in nestingResetTriggers) \ + or (nestingResetTriggers == None and isResetNesting + and self.RESET_NESTING_TAGS.has_key(p.name)): + + #If we encounter one of the nesting reset triggers + #peculiar to this tag, or we encounter another tag + #that causes nesting to reset, pop up to but not + #including that tag. + popTo = p.name + inclusive = False + break + p = p.parent + if popTo: + self._popToTag(popTo, inclusive) + + def unknown_starttag(self, name, attrs, selfClosing=0): + #print "Start tag %s: %s" % (name, attrs) + if self.quoteStack: + #This is not a real tag. + #print "<%s> is not real!" % name + attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs)) + self.handle_data('<%s%s>' % (name, attrs)) + return + self.endData() + + if not self.isSelfClosingTag(name) and not selfClosing: + self._smartPop(name) + + if self.parseOnlyThese and len(self.tagStack) <= 1 \ + and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)): + return + + tag = Tag(self, name, attrs, self.currentTag, self.previous) + if self.previous: + self.previous.next = tag + self.previous = tag + self.pushTag(tag) + if selfClosing or self.isSelfClosingTag(name): + self.popTag() + if name in self.QUOTE_TAGS: + #print "Beginning quote (%s)" % name + self.quoteStack.append(name) + self.literal = 1 + return tag + + def unknown_endtag(self, name): + #print "End tag %s" % name + if self.quoteStack and self.quoteStack[-1] != name: + #This is not a real end tag. + #print "</%s> is not real!" % name + self.handle_data('</%s>' % name) + return + self.endData() + self._popToTag(name) + if self.quoteStack and self.quoteStack[-1] == name: + self.quoteStack.pop() + self.literal = (len(self.quoteStack) > 0) + + def handle_data(self, data): + self.currentData.append(data) + + def extractCharsetFromMeta(self, attrs): + self.unknown_starttag('meta', attrs) + + +class BeautifulSoup(BeautifulStoneSoup): + + """This parser knows the following facts about HTML: + + * Some tags have no closing tag and should be interpreted as being + closed as soon as they are encountered. + + * The text inside some tags (ie. 'script') may contain tags which + are not really part of the document and which should be parsed + as text, not tags. If you want to parse the text as tags, you can + always fetch it and parse it explicitly. + + * Tag nesting rules: + + Most tags can't be nested at all. For instance, the occurance of + a <p> tag should implicitly close the previous <p> tag. + + <p>Para1<p>Para2 + should be transformed into: + <p>Para1</p><p>Para2 + + Some tags can be nested arbitrarily. For instance, the occurance + of a <blockquote> tag should _not_ implicitly close the previous + <blockquote> tag. + + Alice said: <blockquote>Bob said: <blockquote>Blah + should NOT be transformed into: + Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah + + Some tags can be nested, but the nesting is reset by the + interposition of other tags. For instance, a <tr> tag should + implicitly close the previous <tr> tag within the same <table>, + but not close a <tr> tag in another table. + + <table><tr>Blah<tr>Blah + should be transformed into: + <table><tr>Blah</tr><tr>Blah + but, + <tr>Blah<table><tr>Blah + should NOT be transformed into + <tr>Blah<table></tr><tr>Blah + + Differing assumptions about tag nesting rules are a major source + of problems with the BeautifulSoup class. If BeautifulSoup is not + treating as nestable a tag your page author treats as nestable, + try ICantBelieveItsBeautifulSoup, MinimalSoup, or + BeautifulStoneSoup before writing your own subclass.""" + + def __init__(self, *args, **kwargs): + if not kwargs.has_key('smartQuotesTo'): + kwargs['smartQuotesTo'] = self.HTML_ENTITIES + kwargs['isHTML'] = True + BeautifulStoneSoup.__init__(self, *args, **kwargs) + + SELF_CLOSING_TAGS = buildTagMap(None, + ['br' , 'hr', 'input', 'img', 'meta', + 'spacer', 'link', 'frame', 'base']) + + PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea']) + + QUOTE_TAGS = {'script' : None, 'textarea' : None} + + #According to the HTML standard, each of these inline tags can + #contain another tag of the same type. Furthermore, it's common + #to actually use these tags this way. + NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup', + 'center'] + + #According to the HTML standard, these block tags can contain + #another tag of the same type. Furthermore, it's common + #to actually use these tags this way. + NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del'] + + #Lists can contain other lists, but there are restrictions. + NESTABLE_LIST_TAGS = { 'ol' : [], + 'ul' : [], + 'li' : ['ul', 'ol'], + 'dl' : [], + 'dd' : ['dl'], + 'dt' : ['dl'] } + + #Tables can contain other tables, but there are restrictions. + NESTABLE_TABLE_TAGS = {'table' : [], + 'tr' : ['table', 'tbody', 'tfoot', 'thead'], + 'td' : ['tr'], + 'th' : ['tr'], + 'thead' : ['table'], + 'tbody' : ['table'], + 'tfoot' : ['table'], + } + + NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre'] + + #If one of these tags is encountered, all tags up to the next tag of + #this type are popped. + RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript', + NON_NESTABLE_BLOCK_TAGS, + NESTABLE_LIST_TAGS, + NESTABLE_TABLE_TAGS) + + NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS, + NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS) + + # Used to detect the charset in a META tag; see start_meta + CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) + + def extractCharsetFromMeta(self, attrs): + """Beautiful Soup can detect a charset included in a META tag, + try to convert the document to that charset, and re-parse the + document from the beginning.""" + httpEquiv = None + contentType = None + contentTypeIndex = None + tagNeedsEncodingSubstitution = False + + for i in range(0, len(attrs)): + key, value = attrs[i] + key = key.lower() + if key == 'http-equiv': + httpEquiv = value + elif key == 'content': + contentType = value + contentTypeIndex = i + + if httpEquiv and contentType: # It's an interesting meta tag. + match = self.CHARSET_RE.search(contentType) + if match: + if (self.declaredHTMLEncoding is not None or + self.originalEncoding == self.fromEncoding): + # An HTML encoding was sniffed while converting + # the document to Unicode, or an HTML encoding was + # sniffed during a previous pass through the + # document, or an encoding was specified + # explicitly and it worked. Rewrite the meta tag. + def rewrite(match): + return match.group(1) + "%SOUP-ENCODING%" + newAttr = self.CHARSET_RE.sub(rewrite, contentType) + attrs[contentTypeIndex] = (attrs[contentTypeIndex][0], + newAttr) + tagNeedsEncodingSubstitution = True + else: + # This is our first pass through the document. + # Go through it again with the encoding information. + newCharset = match.group(3) + if newCharset and newCharset != self.originalEncoding: + self.declaredHTMLEncoding = newCharset + self._feed(self.declaredHTMLEncoding) + raise StopParsing + pass + tag = self.unknown_starttag("meta", attrs) + if tag and tagNeedsEncodingSubstitution: + tag.containsSubstitutions = True + + +class StopParsing(Exception): + pass + +class ICantBelieveItsBeautifulSoup(BeautifulSoup): + + """The BeautifulSoup class is oriented towards skipping over + common HTML errors like unclosed tags. However, sometimes it makes + errors of its own. For instance, consider this fragment: + + <b>Foo<b>Bar</b></b> + + This is perfectly valid (if bizarre) HTML. However, the + BeautifulSoup class will implicitly close the first b tag when it + encounters the second 'b'. It will think the author wrote + "<b>Foo<b>Bar", and didn't close the first 'b' tag, because + there's no real-world reason to bold something that's already + bold. When it encounters '</b></b>' it will close two more 'b' + tags, for a grand total of three tags closed instead of two. This + can throw off the rest of your document structure. The same is + true of a number of other tags, listed below. + + It's much more common for someone to forget to close a 'b' tag + than to actually use nested 'b' tags, and the BeautifulSoup class + handles the common case. This class handles the not-co-common + case: where you can't believe someone wrote what they did, but + it's valid HTML and BeautifulSoup screwed up by assuming it + wouldn't be.""" + + I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \ + ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong', + 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b', + 'big'] + + I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript'] + + NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS, + I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS, + I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS) + +class MinimalSoup(BeautifulSoup): + """The MinimalSoup class is for parsing HTML that contains + pathologically bad markup. It makes no assumptions about tag + nesting, but it does know which tags are self-closing, that + <script> tags contain Javascript and should not be parsed, that + META tags may contain encoding information, and so on. + + This also makes it better for subclassing than BeautifulStoneSoup + or BeautifulSoup.""" + + RESET_NESTING_TAGS = buildTagMap('noscript') + NESTABLE_TAGS = {} + +class BeautifulSOAP(BeautifulStoneSoup): + """This class will push a tag with only a single string child into + the tag's parent as an attribute. The attribute's name is the tag + name, and the value is the string child. An example should give + the flavor of the change: + + <foo><bar>baz</bar></foo> + => + <foo bar="baz"><bar>baz</bar></foo> + + You can then access fooTag['bar'] instead of fooTag.barTag.string. + + This is, of course, useful for scraping structures that tend to + use subelements instead of attributes, such as SOAP messages. Note + that it modifies its input, so don't print the modified version + out. + + I'm not sure how many people really want to use this class; let me + know if you do. Mainly I like the name.""" + + def popTag(self): + if len(self.tagStack) > 1: + tag = self.tagStack[-1] + parent = self.tagStack[-2] + parent._getAttrMap() + if (isinstance(tag, Tag) and len(tag.contents) == 1 and + isinstance(tag.contents[0], NavigableString) and + not parent.attrMap.has_key(tag.name)): + parent[tag.name] = tag.contents[0] + BeautifulStoneSoup.popTag(self) + +#Enterprise class names! It has come to our attention that some people +#think the names of the Beautiful Soup parser classes are too silly +#and "unprofessional" for use in enterprise screen-scraping. We feel +#your pain! For such-minded folk, the Beautiful Soup Consortium And +#All-Night Kosher Bakery recommends renaming this file to +#"RobustParser.py" (or, in cases of extreme enterprisiness, +#"RobustParserBeanInterface.class") and using the following +#enterprise-friendly class aliases: +class RobustXMLParser(BeautifulStoneSoup): + pass +class RobustHTMLParser(BeautifulSoup): + pass +class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup): + pass +class RobustInsanelyWackAssHTMLParser(MinimalSoup): + pass +class SimplifyingSOAPParser(BeautifulSOAP): + pass + +###################################################### +# +# Bonus library: Unicode, Dammit +# +# This class forces XML data into a standard format (usually to UTF-8 +# or Unicode). It is heavily based on code from Mark Pilgrim's +# Universal Feed Parser. It does not rewrite the XML or HTML to +# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi +# (XML) and BeautifulSoup.start_meta (HTML). + +# Autodetects character encodings. +# Download from http://chardet.feedparser.org/ +try: + import chardet +# import chardet.constants +# chardet.constants._debug = 1 +except ImportError: + chardet = None + +# cjkcodecs and iconv_codec make Python know about more character encodings. +# Both are available from http://cjkpython.i18n.org/ +# They're built in if you use Python 2.4. +try: + import cjkcodecs.aliases +except ImportError: + pass +try: + import iconv_codec +except ImportError: + pass + +class UnicodeDammit: + """A class for detecting the encoding of a *ML document and + converting it to a Unicode string. If the source encoding is + windows-1252, can replace MS smart quotes with their HTML or XML + equivalents.""" + + # This dictionary maps commonly seen values for "charset" in HTML + # meta tags to the corresponding Python codec names. It only covers + # values that aren't in Python's aliases and can't be determined + # by the heuristics in find_codec. + CHARSET_ALIASES = { "macintosh" : "mac-roman", + "x-sjis" : "shift-jis" } + + def __init__(self, markup, overrideEncodings=[], + smartQuotesTo='xml', isHTML=False): + self.declaredHTMLEncoding = None + self.markup, documentEncoding, sniffedEncoding = \ + self._detectEncoding(markup, isHTML) + self.smartQuotesTo = smartQuotesTo + self.triedEncodings = [] + if markup == '' or isinstance(markup, unicode): + self.originalEncoding = None + self.unicode = unicode(markup) + return + + u = None + for proposedEncoding in overrideEncodings: + u = self._convertFrom(proposedEncoding) + if u: break + if not u: + for proposedEncoding in (documentEncoding, sniffedEncoding): + u = self._convertFrom(proposedEncoding) + if u: break + + # If no luck and we have auto-detection library, try that: + if not u and chardet and not isinstance(self.markup, unicode): + u = self._convertFrom(chardet.detect(self.markup)['encoding']) + + # As a last resort, try utf-8 and windows-1252: + if not u: + for proposed_encoding in ("utf-8", "windows-1252"): + u = self._convertFrom(proposed_encoding) + if u: break + + self.unicode = u + if not u: self.originalEncoding = None + + def _subMSChar(self, match): + """Changes a MS smart quote character to an XML or HTML + entity.""" + orig = match.group(1) + sub = self.MS_CHARS.get(orig) + if type(sub) == types.TupleType: + if self.smartQuotesTo == 'xml': + sub = '&#x'.encode() + sub[1].encode() + ';'.encode() + else: + sub = '&'.encode() + sub[0].encode() + ';'.encode() + else: + sub = sub.encode() + return sub + + def _convertFrom(self, proposed): + proposed = self.find_codec(proposed) + if not proposed or proposed in self.triedEncodings: + return None + self.triedEncodings.append(proposed) + markup = self.markup + + # Convert smart quotes to HTML if coming from an encoding + # that might have them. + if self.smartQuotesTo and proposed.lower() in("windows-1252", + "iso-8859-1", + "iso-8859-2"): + smart_quotes_re = "([\x80-\x9f])" + smart_quotes_compiled = re.compile(smart_quotes_re) + markup = smart_quotes_compiled.sub(self._subMSChar, markup) + + try: + # print "Trying to convert document to %s" % proposed + u = self._toUnicode(markup, proposed) + self.markup = u + self.originalEncoding = proposed + except Exception, e: + # print "That didn't work!" + # print e + return None + #print "Correct encoding: %s" % proposed + return self.markup + + def _toUnicode(self, data, encoding): + '''Given a string and its encoding, decodes the string into Unicode. + %encoding is a string recognized by encodings.aliases''' + + # strip Byte Order Mark (if present) + if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ + and (data[2:4] != '\x00\x00'): + encoding = 'utf-16be' + data = data[2:] + elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ + and (data[2:4] != '\x00\x00'): + encoding = 'utf-16le' + data = data[2:] + elif data[:3] == '\xef\xbb\xbf': + encoding = 'utf-8' + data = data[3:] + elif data[:4] == '\x00\x00\xfe\xff': + encoding = 'utf-32be' + data = data[4:] + elif data[:4] == '\xff\xfe\x00\x00': + encoding = 'utf-32le' + data = data[4:] + newdata = unicode(data, encoding) + return newdata + + def _detectEncoding(self, xml_data, isHTML=False): + """Given a document, tries to detect its XML encoding.""" + xml_encoding = sniffed_xml_encoding = None + try: + if xml_data[:4] == '\x4c\x6f\xa7\x94': + # EBCDIC + xml_data = self._ebcdic_to_ascii(xml_data) + elif xml_data[:4] == '\x00\x3c\x00\x3f': + # UTF-16BE + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \ + and (xml_data[2:4] != '\x00\x00'): + # UTF-16BE with BOM + sniffed_xml_encoding = 'utf-16be' + xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x3f\x00': + # UTF-16LE + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \ + (xml_data[2:4] != '\x00\x00'): + # UTF-16LE with BOM + sniffed_xml_encoding = 'utf-16le' + xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\x00\x3c': + # UTF-32BE + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\x3c\x00\x00\x00': + # UTF-32LE + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') + elif xml_data[:4] == '\x00\x00\xfe\xff': + # UTF-32BE with BOM + sniffed_xml_encoding = 'utf-32be' + xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') + elif xml_data[:4] == '\xff\xfe\x00\x00': + # UTF-32LE with BOM + sniffed_xml_encoding = 'utf-32le' + xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') + elif xml_data[:3] == '\xef\xbb\xbf': + # UTF-8 with BOM + sniffed_xml_encoding = 'utf-8' + xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') + else: + sniffed_xml_encoding = 'ascii' + pass + except: + xml_encoding_match = None + xml_encoding_re = '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode() + xml_encoding_match = re.compile(xml_encoding_re).match(xml_data) + if not xml_encoding_match and isHTML: + meta_re = '<\s*meta[^>]+charset=([^>]*?)[;\'">]'.encode() + regexp = re.compile(meta_re, re.I) + xml_encoding_match = regexp.search(xml_data) + if xml_encoding_match is not None: + xml_encoding = xml_encoding_match.groups()[0].decode( + 'ascii').lower() + if isHTML: + self.declaredHTMLEncoding = xml_encoding + if sniffed_xml_encoding and \ + (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', + 'iso-10646-ucs-4', 'ucs-4', 'csucs4', + 'utf-16', 'utf-32', 'utf_16', 'utf_32', + 'utf16', 'u16')): + xml_encoding = sniffed_xml_encoding + return xml_data, xml_encoding, sniffed_xml_encoding + + + def find_codec(self, charset): + return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ + or (charset and self._codec(charset.replace("-", ""))) \ + or (charset and self._codec(charset.replace("-", "_"))) \ + or charset + + def _codec(self, charset): + if not charset: return charset + codec = None + try: + codecs.lookup(charset) + codec = charset + except (LookupError, ValueError): + pass + return codec + + EBCDIC_TO_ASCII_MAP = None + def _ebcdic_to_ascii(self, s): + c = self.__class__ + if not c.EBCDIC_TO_ASCII_MAP: + emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, + 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, + 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, + 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, + 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, + 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, + 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, + 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, + 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, + 201,202,106,107,108,109,110,111,112,113,114,203,204,205, + 206,207,208,209,126,115,116,117,118,119,120,121,122,210, + 211,212,213,214,215,216,217,218,219,220,221,222,223,224, + 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, + 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, + 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, + 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, + 250,251,252,253,254,255) + import string + c.EBCDIC_TO_ASCII_MAP = string.maketrans( \ + ''.join(map(chr, range(256))), ''.join(map(chr, emap))) + return s.translate(c.EBCDIC_TO_ASCII_MAP) + + MS_CHARS = { '\x80' : ('euro', '20AC'), + '\x81' : ' ', + '\x82' : ('sbquo', '201A'), + '\x83' : ('fnof', '192'), + '\x84' : ('bdquo', '201E'), + '\x85' : ('hellip', '2026'), + '\x86' : ('dagger', '2020'), + '\x87' : ('Dagger', '2021'), + '\x88' : ('circ', '2C6'), + '\x89' : ('permil', '2030'), + '\x8A' : ('Scaron', '160'), + '\x8B' : ('lsaquo', '2039'), + '\x8C' : ('OElig', '152'), + '\x8D' : '?', + '\x8E' : ('#x17D', '17D'), + '\x8F' : '?', + '\x90' : '?', + '\x91' : ('lsquo', '2018'), + '\x92' : ('rsquo', '2019'), + '\x93' : ('ldquo', '201C'), + '\x94' : ('rdquo', '201D'), + '\x95' : ('bull', '2022'), + '\x96' : ('ndash', '2013'), + '\x97' : ('mdash', '2014'), + '\x98' : ('tilde', '2DC'), + '\x99' : ('trade', '2122'), + '\x9a' : ('scaron', '161'), + '\x9b' : ('rsaquo', '203A'), + '\x9c' : ('oelig', '153'), + '\x9d' : '?', + '\x9e' : ('#x17E', '17E'), + '\x9f' : ('Yuml', ''),} + +####################################################################### + + +#By default, act as an HTML pretty-printer. +if __name__ == '__main__': + import sys + soup = BeautifulSoup(sys.stdin) + print soup.prettify() diff --git a/Tools/Scripts/webkitpy/thirdparty/__init__.py b/Tools/Scripts/webkitpy/thirdparty/__init__.py new file mode 100644 index 0000000..c2249c2 --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/__init__.py @@ -0,0 +1,98 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR +# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This module is required for Python to treat this directory as a package. + +"""Autoinstalls third-party code required by WebKit.""" + +from __future__ import with_statement + +import codecs +import os + +from webkitpy.common.system.autoinstall import AutoInstaller + +# Putting the autoinstall code into webkitpy/thirdparty/__init__.py +# ensures that no autoinstalling occurs until a caller imports from +# webkitpy.thirdparty. This is useful if the caller wants to configure +# logging prior to executing autoinstall code. + +# FIXME: Ideally, a package should be autoinstalled only if the caller +# attempts to import from that individual package. This would +# make autoinstalling lazier than it is currently. This can +# perhaps be done using Python's import hooks as the original +# autoinstall implementation did. + +# FIXME: If any of these servers is offline, webkit-patch breaks (and maybe +# other scripts do, too). See <http://webkit.org/b/42080>. + +# We put auto-installed third-party modules in this directory-- +# +# webkitpy/thirdparty/autoinstalled +thirdparty_dir = os.path.dirname(__file__) +autoinstalled_dir = os.path.join(thirdparty_dir, "autoinstalled") + +# We need to download ClientForm since the mechanize package that we download +# below requires it. The mechanize package uses ClientForm, for example, +# in _html.py. Since mechanize imports ClientForm in the following way, +# +# > import sgmllib, ClientForm +# +# the search path needs to include ClientForm. We put ClientForm in +# its own directory so that we can include it in the search path without +# including other modules as a side effect. +clientform_dir = os.path.join(autoinstalled_dir, "clientform") +installer = AutoInstaller(append_to_search_path=True, + target_dir=clientform_dir) +installer.install(url="http://pypi.python.org/packages/source/C/ClientForm/ClientForm-0.2.10.zip", + url_subpath="ClientForm.py") + +# The remaining packages do not need to be in the search path, so we create +# a new AutoInstaller instance that does not append to the search path. +installer = AutoInstaller(target_dir=autoinstalled_dir) + +installer.install(url="http://pypi.python.org/packages/source/m/mechanize/mechanize-0.1.11.zip", + url_subpath="mechanize") +installer.install(url="http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b", + url_subpath="pep8-0.5.0/pep8.py") +installer.install(url="http://www.adambarth.com/webkit/eliza", + target_name="eliza.py") + +# Since irclib and ircbot are two top-level packages, we need to import +# them separately. We group them into an irc package for better +# organization purposes. +irc_dir = os.path.join(autoinstalled_dir, "irc") +installer = AutoInstaller(target_dir=irc_dir) +installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip", url_subpath="irclib.py") +installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip", url_subpath="ircbot.py") + +pywebsocket_dir = os.path.join(autoinstalled_dir, "pywebsocket") +installer = AutoInstaller(target_dir=pywebsocket_dir) +installer.install(url="http://pywebsocket.googlecode.com/files/mod_pywebsocket-0.5.2.tar.gz", + url_subpath="pywebsocket-0.5.2/src/mod_pywebsocket") + +readme_path = os.path.join(autoinstalled_dir, "README") +if not os.path.exists(readme_path): + with codecs.open(readme_path, "w", "ascii") as file: + file.write("This directory is auto-generated by WebKit and is " + "safe to delete.\nIt contains needed third-party Python " + "packages automatically downloaded from the web.") diff --git a/Tools/Scripts/webkitpy/thirdparty/mock.py b/Tools/Scripts/webkitpy/thirdparty/mock.py new file mode 100644 index 0000000..015c19e --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/mock.py @@ -0,0 +1,309 @@ +# mock.py +# Test tools for mocking and patching. +# Copyright (C) 2007-2009 Michael Foord +# E-mail: fuzzyman AT voidspace DOT org DOT uk + +# mock 0.6.0 +# http://www.voidspace.org.uk/python/mock/ + +# Released subject to the BSD License +# Please see http://www.voidspace.org.uk/python/license.shtml + +# 2009-11-25: Licence downloaded from above URL. +# BEGIN DOWNLOADED LICENSE +# +# Copyright (c) 2003-2009, Michael Foord +# All rights reserved. +# E-mail : fuzzyman AT voidspace DOT org DOT uk +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# +# * Neither the name of Michael Foord nor the name of Voidspace +# may be used to endorse or promote products derived from this +# software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# END DOWNLOADED LICENSE + +# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml +# Comments, suggestions and bug reports welcome. + + +__all__ = ( + 'Mock', + 'patch', + 'patch_object', + 'sentinel', + 'DEFAULT' +) + +__version__ = '0.6.0' + +class SentinelObject(object): + def __init__(self, name): + self.name = name + + def __repr__(self): + return '<SentinelObject "%s">' % self.name + + +class Sentinel(object): + def __init__(self): + self._sentinels = {} + + def __getattr__(self, name): + return self._sentinels.setdefault(name, SentinelObject(name)) + + +sentinel = Sentinel() + +DEFAULT = sentinel.DEFAULT + +class OldStyleClass: + pass +ClassType = type(OldStyleClass) + +def _is_magic(name): + return '__%s__' % name[2:-2] == name + +def _copy(value): + if type(value) in (dict, list, tuple, set): + return type(value)(value) + return value + + +class Mock(object): + + def __init__(self, spec=None, side_effect=None, return_value=DEFAULT, + name=None, parent=None, wraps=None): + self._parent = parent + self._name = name + if spec is not None and not isinstance(spec, list): + spec = [member for member in dir(spec) if not _is_magic(member)] + + self._methods = spec + self._children = {} + self._return_value = return_value + self.side_effect = side_effect + self._wraps = wraps + + self.reset_mock() + + + def reset_mock(self): + self.called = False + self.call_args = None + self.call_count = 0 + self.call_args_list = [] + self.method_calls = [] + for child in self._children.itervalues(): + child.reset_mock() + if isinstance(self._return_value, Mock): + self._return_value.reset_mock() + + + def __get_return_value(self): + if self._return_value is DEFAULT: + self._return_value = Mock() + return self._return_value + + def __set_return_value(self, value): + self._return_value = value + + return_value = property(__get_return_value, __set_return_value) + + + def __call__(self, *args, **kwargs): + self.called = True + self.call_count += 1 + self.call_args = (args, kwargs) + self.call_args_list.append((args, kwargs)) + + parent = self._parent + name = self._name + while parent is not None: + parent.method_calls.append((name, args, kwargs)) + if parent._parent is None: + break + name = parent._name + '.' + name + parent = parent._parent + + ret_val = DEFAULT + if self.side_effect is not None: + if (isinstance(self.side_effect, Exception) or + isinstance(self.side_effect, (type, ClassType)) and + issubclass(self.side_effect, Exception)): + raise self.side_effect + + ret_val = self.side_effect(*args, **kwargs) + if ret_val is DEFAULT: + ret_val = self.return_value + + if self._wraps is not None and self._return_value is DEFAULT: + return self._wraps(*args, **kwargs) + if ret_val is DEFAULT: + ret_val = self.return_value + return ret_val + + + def __getattr__(self, name): + if self._methods is not None: + if name not in self._methods: + raise AttributeError("Mock object has no attribute '%s'" % name) + elif _is_magic(name): + raise AttributeError(name) + + if name not in self._children: + wraps = None + if self._wraps is not None: + wraps = getattr(self._wraps, name) + self._children[name] = Mock(parent=self, name=name, wraps=wraps) + + return self._children[name] + + + def assert_called_with(self, *args, **kwargs): + assert self.call_args == (args, kwargs), 'Expected: %s\nCalled with: %s' % ((args, kwargs), self.call_args) + + +def _dot_lookup(thing, comp, import_path): + try: + return getattr(thing, comp) + except AttributeError: + __import__(import_path) + return getattr(thing, comp) + + +def _importer(target): + components = target.split('.') + import_path = components.pop(0) + thing = __import__(import_path) + + for comp in components: + import_path += ".%s" % comp + thing = _dot_lookup(thing, comp, import_path) + return thing + + +class _patch(object): + def __init__(self, target, attribute, new, spec, create): + self.target = target + self.attribute = attribute + self.new = new + self.spec = spec + self.create = create + self.has_local = False + + + def __call__(self, func): + if hasattr(func, 'patchings'): + func.patchings.append(self) + return func + + def patched(*args, **keywargs): + # don't use a with here (backwards compatability with 2.5) + extra_args = [] + for patching in patched.patchings: + arg = patching.__enter__() + if patching.new is DEFAULT: + extra_args.append(arg) + args += tuple(extra_args) + try: + return func(*args, **keywargs) + finally: + for patching in getattr(patched, 'patchings', []): + patching.__exit__() + + patched.patchings = [self] + patched.__name__ = func.__name__ + patched.compat_co_firstlineno = getattr(func, "compat_co_firstlineno", + func.func_code.co_firstlineno) + return patched + + + def get_original(self): + target = self.target + name = self.attribute + create = self.create + + original = DEFAULT + if _has_local_attr(target, name): + try: + original = target.__dict__[name] + except AttributeError: + # for instances of classes with slots, they have no __dict__ + original = getattr(target, name) + elif not create and not hasattr(target, name): + raise AttributeError("%s does not have the attribute %r" % (target, name)) + return original + + + def __enter__(self): + new, spec, = self.new, self.spec + original = self.get_original() + if new is DEFAULT: + # XXXX what if original is DEFAULT - shouldn't use it as a spec + inherit = False + if spec == True: + # set spec to the object we are replacing + spec = original + if isinstance(spec, (type, ClassType)): + inherit = True + new = Mock(spec=spec) + if inherit: + new.return_value = Mock(spec=spec) + self.temp_original = original + setattr(self.target, self.attribute, new) + return new + + + def __exit__(self, *_): + if self.temp_original is not DEFAULT: + setattr(self.target, self.attribute, self.temp_original) + else: + delattr(self.target, self.attribute) + del self.temp_original + + +def patch_object(target, attribute, new=DEFAULT, spec=None, create=False): + return _patch(target, attribute, new, spec, create) + + +def patch(target, new=DEFAULT, spec=None, create=False): + try: + target, attribute = target.rsplit('.', 1) + except (TypeError, ValueError): + raise TypeError("Need a valid target to patch. You supplied: %r" % (target,)) + target = _importer(target) + return _patch(target, attribute, new, spec, create) + + + +def _has_local_attr(obj, name): + try: + return name in vars(obj) + except TypeError: + # objects without a __dict__ + return hasattr(obj, name) diff --git a/Tools/Scripts/webkitpy/thirdparty/simplejson/LICENSE.txt b/Tools/Scripts/webkitpy/thirdparty/simplejson/LICENSE.txt new file mode 100644 index 0000000..ad95f29 --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/simplejson/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2006 Bob Ippolito + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Tools/Scripts/webkitpy/thirdparty/simplejson/README.txt b/Tools/Scripts/webkitpy/thirdparty/simplejson/README.txt new file mode 100644 index 0000000..7f726ce --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/simplejson/README.txt @@ -0,0 +1,11 @@ +URL: http://undefined.org/python/#simplejson +Version: 1.7.3 +License: MIT +License File: LICENSE.txt + +Description: +simplejson is a JSON encoder and decoder for Python. + + +Local Modifications: +Removed unit tests from current distribution. diff --git a/Tools/Scripts/webkitpy/thirdparty/simplejson/__init__.py b/Tools/Scripts/webkitpy/thirdparty/simplejson/__init__.py new file mode 100644 index 0000000..38d6229 --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/simplejson/__init__.py @@ -0,0 +1,287 @@ +r""" +A simple, fast, extensible JSON encoder and decoder + +JSON (JavaScript Object Notation) <http://json.org> is a subset of +JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data +interchange format. + +simplejson exposes an API familiar to uses of the standard library +marshal and pickle modules. + +Encoding basic Python object hierarchies:: + + >>> import simplejson + >>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) + '["foo", {"bar": ["baz", null, 1.0, 2]}]' + >>> print simplejson.dumps("\"foo\bar") + "\"foo\bar" + >>> print simplejson.dumps(u'\u1234') + "\u1234" + >>> print simplejson.dumps('\\') + "\\" + >>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) + {"a": 0, "b": 0, "c": 0} + >>> from StringIO import StringIO + >>> io = StringIO() + >>> simplejson.dump(['streaming API'], io) + >>> io.getvalue() + '["streaming API"]' + +Compact encoding:: + + >>> import simplejson + >>> simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) + '[1,2,3,{"4":5,"6":7}]' + +Pretty printing:: + + >>> import simplejson + >>> print simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4) + { + "4": 5, + "6": 7 + } + +Decoding JSON:: + + >>> import simplejson + >>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') + [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] + >>> simplejson.loads('"\\"foo\\bar"') + u'"foo\x08ar' + >>> from StringIO import StringIO + >>> io = StringIO('["streaming API"]') + >>> simplejson.load(io) + [u'streaming API'] + +Specializing JSON object decoding:: + + >>> import simplejson + >>> def as_complex(dct): + ... if '__complex__' in dct: + ... return complex(dct['real'], dct['imag']) + ... return dct + ... + >>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}', + ... object_hook=as_complex) + (1+2j) + +Extending JSONEncoder:: + + >>> import simplejson + >>> class ComplexEncoder(simplejson.JSONEncoder): + ... def default(self, obj): + ... if isinstance(obj, complex): + ... return [obj.real, obj.imag] + ... return simplejson.JSONEncoder.default(self, obj) + ... + >>> dumps(2 + 1j, cls=ComplexEncoder) + '[2.0, 1.0]' + >>> ComplexEncoder().encode(2 + 1j) + '[2.0, 1.0]' + >>> list(ComplexEncoder().iterencode(2 + 1j)) + ['[', '2.0', ', ', '1.0', ']'] + + +Note that the JSON produced by this module's default settings +is a subset of YAML, so it may be used as a serializer for that as well. +""" +__version__ = '1.7.3' +__all__ = [ + 'dump', 'dumps', 'load', 'loads', + 'JSONDecoder', 'JSONEncoder', +] + +from decoder import JSONDecoder +from encoder import JSONEncoder + +_default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + encoding='utf-8' +) + +def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', **kw): + """ + Serialize ``obj`` as a JSON formatted stream to ``fp`` (a + ``.write()``-supporting file-like object). + + If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp`` + may be ``unicode`` instances, subject to normal Python ``str`` to + ``unicode`` coercion rules. Unless ``fp.write()`` explicitly + understands ``unicode`` (as in ``codecs.getwriter()``) this is likely + to cause an error. + + If ``check_circular`` is ``False``, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) + in strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a non-negative integer, then JSON array elements and object + members will be pretty-printed with that indent level. An indent level + of 0 will only insert newlines. ``None`` is the most compact representation. + + If ``separators`` is an ``(item_separator, dict_separator)`` tuple + then it will be used instead of the default ``(', ', ': ')`` separators. + ``(',', ':')`` is the most compact JSON representation. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. + """ + # cached encoder + if (skipkeys is False and ensure_ascii is True and + check_circular is True and allow_nan is True and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and not kw): + iterable = _default_encoder.iterencode(obj) + else: + if cls is None: + cls = JSONEncoder + iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, **kw).iterencode(obj) + # could accelerate with writelines in some versions of Python, at + # a debuggability cost + for chunk in iterable: + fp.write(chunk) + + +def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', **kw): + """ + Serialize ``obj`` to a JSON formatted ``str``. + + If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is ``False``, then the return value will be a + ``unicode`` instance subject to normal Python ``str`` to ``unicode`` + coercion rules instead of being escaped to an ASCII ``str``. + + If ``check_circular`` is ``False``, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in + strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a non-negative integer, then JSON array elements and + object members will be pretty-printed with that indent level. An indent + level of 0 will only insert newlines. ``None`` is the most compact + representation. + + If ``separators`` is an ``(item_separator, dict_separator)`` tuple + then it will be used instead of the default ``(', ', ': ')`` separators. + ``(',', ':')`` is the most compact JSON representation. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. + """ + # cached encoder + if (skipkeys is False and ensure_ascii is True and + check_circular is True and allow_nan is True and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and not kw): + return _default_encoder.encode(obj) + if cls is None: + cls = JSONEncoder + return cls( + skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, + **kw).encode(obj) + +_default_decoder = JSONDecoder(encoding=None, object_hook=None) + +def load(fp, encoding=None, cls=None, object_hook=None, **kw): + """ + Deserialize ``fp`` (a ``.read()``-supporting file-like object containing + a JSON document) to a Python object. + + If the contents of ``fp`` is encoded with an ASCII based encoding other + than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must + be specified. Encodings that are not ASCII based (such as UCS-2) are + not allowed, and should be wrapped with + ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode`` + object and passed to ``loads()`` + + ``object_hook`` is an optional function that will be called with the + result of any object literal decode (a ``dict``). The return value of + ``object_hook`` will be used instead of the ``dict``. This feature + can be used to implement custom decoders (e.g. JSON-RPC class hinting). + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. + """ + return loads(fp.read(), + encoding=encoding, cls=cls, object_hook=object_hook, **kw) + +def loads(s, encoding=None, cls=None, object_hook=None, **kw): + """ + Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON + document) to a Python object. + + If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding + other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name + must be specified. Encodings that are not ASCII based (such as UCS-2) + are not allowed and should be decoded to ``unicode`` first. + + ``object_hook`` is an optional function that will be called with the + result of any object literal decode (a ``dict``). The return value of + ``object_hook`` will be used instead of the ``dict``. This feature + can be used to implement custom decoders (e.g. JSON-RPC class hinting). + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. + """ + if cls is None and encoding is None and object_hook is None and not kw: + return _default_decoder.decode(s) + if cls is None: + cls = JSONDecoder + if object_hook is not None: + kw['object_hook'] = object_hook + return cls(encoding=encoding, **kw).decode(s) + +def read(s): + """ + json-py API compatibility hook. Use loads(s) instead. + """ + import warnings + warnings.warn("simplejson.loads(s) should be used instead of read(s)", + DeprecationWarning) + return loads(s) + +def write(obj): + """ + json-py API compatibility hook. Use dumps(s) instead. + """ + import warnings + warnings.warn("simplejson.dumps(s) should be used instead of write(s)", + DeprecationWarning) + return dumps(obj) + + diff --git a/Tools/Scripts/webkitpy/thirdparty/simplejson/_speedups.c b/Tools/Scripts/webkitpy/thirdparty/simplejson/_speedups.c new file mode 100644 index 0000000..8f290bb --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/simplejson/_speedups.c @@ -0,0 +1,215 @@ +#include "Python.h" +#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) +typedef int Py_ssize_t; +#define PY_SSIZE_T_MAX INT_MAX +#define PY_SSIZE_T_MIN INT_MIN +#endif + +static Py_ssize_t +ascii_escape_char(Py_UNICODE c, char *output, Py_ssize_t chars); +static PyObject * +ascii_escape_unicode(PyObject *pystr); +static PyObject * +ascii_escape_str(PyObject *pystr); +static PyObject * +py_encode_basestring_ascii(PyObject* self __attribute__((__unused__)), PyObject *pystr); +void init_speedups(void); + +#define S_CHAR(c) (c >= ' ' && c <= '~' && c != '\\' && c != '/' && c != '"') + +#define MIN_EXPANSION 6 +#ifdef Py_UNICODE_WIDE +#define MAX_EXPANSION (2 * MIN_EXPANSION) +#else +#define MAX_EXPANSION MIN_EXPANSION +#endif + +static Py_ssize_t +ascii_escape_char(Py_UNICODE c, char *output, Py_ssize_t chars) { + Py_UNICODE x; + output[chars++] = '\\'; + switch (c) { + case '/': output[chars++] = (char)c; break; + case '\\': output[chars++] = (char)c; break; + case '"': output[chars++] = (char)c; break; + case '\b': output[chars++] = 'b'; break; + case '\f': output[chars++] = 'f'; break; + case '\n': output[chars++] = 'n'; break; + case '\r': output[chars++] = 'r'; break; + case '\t': output[chars++] = 't'; break; + default: +#ifdef Py_UNICODE_WIDE + if (c >= 0x10000) { + /* UTF-16 surrogate pair */ + Py_UNICODE v = c - 0x10000; + c = 0xd800 | ((v >> 10) & 0x3ff); + output[chars++] = 'u'; + x = (c & 0xf000) >> 12; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x0f00) >> 8; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x00f0) >> 4; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x000f); + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + c = 0xdc00 | (v & 0x3ff); + output[chars++] = '\\'; + } +#endif + output[chars++] = 'u'; + x = (c & 0xf000) >> 12; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x0f00) >> 8; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x00f0) >> 4; + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + x = (c & 0x000f); + output[chars++] = (x < 10) ? '0' + x : 'a' + (x - 10); + } + return chars; +} + +static PyObject * +ascii_escape_unicode(PyObject *pystr) { + Py_ssize_t i; + Py_ssize_t input_chars; + Py_ssize_t output_size; + Py_ssize_t chars; + PyObject *rval; + char *output; + Py_UNICODE *input_unicode; + + input_chars = PyUnicode_GET_SIZE(pystr); + input_unicode = PyUnicode_AS_UNICODE(pystr); + /* One char input can be up to 6 chars output, estimate 4 of these */ + output_size = 2 + (MIN_EXPANSION * 4) + input_chars; + rval = PyString_FromStringAndSize(NULL, output_size); + if (rval == NULL) { + return NULL; + } + output = PyString_AS_STRING(rval); + chars = 0; + output[chars++] = '"'; + for (i = 0; i < input_chars; i++) { + Py_UNICODE c = input_unicode[i]; + if (S_CHAR(c)) { + output[chars++] = (char)c; + } else { + chars = ascii_escape_char(c, output, chars); + } + if (output_size - chars < (1 + MAX_EXPANSION)) { + /* There's more than four, so let's resize by a lot */ + output_size *= 2; + /* This is an upper bound */ + if (output_size > 2 + (input_chars * MAX_EXPANSION)) { + output_size = 2 + (input_chars * MAX_EXPANSION); + } + if (_PyString_Resize(&rval, output_size) == -1) { + return NULL; + } + output = PyString_AS_STRING(rval); + } + } + output[chars++] = '"'; + if (_PyString_Resize(&rval, chars) == -1) { + return NULL; + } + return rval; +} + +static PyObject * +ascii_escape_str(PyObject *pystr) { + Py_ssize_t i; + Py_ssize_t input_chars; + Py_ssize_t output_size; + Py_ssize_t chars; + PyObject *rval; + char *output; + char *input_str; + + input_chars = PyString_GET_SIZE(pystr); + input_str = PyString_AS_STRING(pystr); + /* One char input can be up to 6 chars output, estimate 4 of these */ + output_size = 2 + (MIN_EXPANSION * 4) + input_chars; + rval = PyString_FromStringAndSize(NULL, output_size); + if (rval == NULL) { + return NULL; + } + output = PyString_AS_STRING(rval); + chars = 0; + output[chars++] = '"'; + for (i = 0; i < input_chars; i++) { + Py_UNICODE c = (Py_UNICODE)input_str[i]; + if (S_CHAR(c)) { + output[chars++] = (char)c; + } else if (c > 0x7F) { + /* We hit a non-ASCII character, bail to unicode mode */ + PyObject *uni; + Py_DECREF(rval); + uni = PyUnicode_DecodeUTF8(input_str, input_chars, "strict"); + if (uni == NULL) { + return NULL; + } + rval = ascii_escape_unicode(uni); + Py_DECREF(uni); + return rval; + } else { + chars = ascii_escape_char(c, output, chars); + } + /* An ASCII char can't possibly expand to a surrogate! */ + if (output_size - chars < (1 + MIN_EXPANSION)) { + /* There's more than four, so let's resize by a lot */ + output_size *= 2; + if (output_size > 2 + (input_chars * MIN_EXPANSION)) { + output_size = 2 + (input_chars * MIN_EXPANSION); + } + if (_PyString_Resize(&rval, output_size) == -1) { + return NULL; + } + output = PyString_AS_STRING(rval); + } + } + output[chars++] = '"'; + if (_PyString_Resize(&rval, chars) == -1) { + return NULL; + } + return rval; +} + +PyDoc_STRVAR(pydoc_encode_basestring_ascii, + "encode_basestring_ascii(basestring) -> str\n" + "\n" + "..." +); + +static PyObject * +py_encode_basestring_ascii(PyObject* self __attribute__((__unused__)), PyObject *pystr) { + /* METH_O */ + if (PyString_Check(pystr)) { + return ascii_escape_str(pystr); + } else if (PyUnicode_Check(pystr)) { + return ascii_escape_unicode(pystr); + } + PyErr_SetString(PyExc_TypeError, "first argument must be a string"); + return NULL; +} + +#define DEFN(n, k) \ + { \ + #n, \ + (PyCFunction)py_ ##n, \ + k, \ + pydoc_ ##n \ + } +static PyMethodDef speedups_methods[] = { + DEFN(encode_basestring_ascii, METH_O), + {} +}; +#undef DEFN + +void +init_speedups(void) +{ + PyObject *m; + m = Py_InitModule4("_speedups", speedups_methods, NULL, NULL, PYTHON_API_VERSION); +} diff --git a/Tools/Scripts/webkitpy/thirdparty/simplejson/decoder.py b/Tools/Scripts/webkitpy/thirdparty/simplejson/decoder.py new file mode 100644 index 0000000..63f70cb --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/simplejson/decoder.py @@ -0,0 +1,273 @@ +""" +Implementation of JSONDecoder +""" +import re + +from scanner import Scanner, pattern + +FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL + +def _floatconstants(): + import struct + import sys + _BYTES = '7FF80000000000007FF0000000000000'.decode('hex') + if sys.byteorder != 'big': + _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] + nan, inf = struct.unpack('dd', _BYTES) + return nan, inf, -inf + +NaN, PosInf, NegInf = _floatconstants() + +def linecol(doc, pos): + lineno = doc.count('\n', 0, pos) + 1 + if lineno == 1: + colno = pos + else: + colno = pos - doc.rindex('\n', 0, pos) + return lineno, colno + +def errmsg(msg, doc, pos, end=None): + lineno, colno = linecol(doc, pos) + if end is None: + return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos) + endlineno, endcolno = linecol(doc, end) + return '%s: line %d column %d - line %d column %d (char %d - %d)' % ( + msg, lineno, colno, endlineno, endcolno, pos, end) + +_CONSTANTS = { + '-Infinity': NegInf, + 'Infinity': PosInf, + 'NaN': NaN, + 'true': True, + 'false': False, + 'null': None, +} + +def JSONConstant(match, context, c=_CONSTANTS): + return c[match.group(0)], None +pattern('(-?Infinity|NaN|true|false|null)')(JSONConstant) + +def JSONNumber(match, context): + match = JSONNumber.regex.match(match.string, *match.span()) + integer, frac, exp = match.groups() + if frac or exp: + res = float(integer + (frac or '') + (exp or '')) + else: + res = int(integer) + return res, None +pattern(r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?')(JSONNumber) + +STRINGCHUNK = re.compile(r'(.*?)(["\\])', FLAGS) +BACKSLASH = { + '"': u'"', '\\': u'\\', '/': u'/', + 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t', +} + +DEFAULT_ENCODING = "utf-8" + +def scanstring(s, end, encoding=None, _b=BACKSLASH, _m=STRINGCHUNK.match): + if encoding is None: + encoding = DEFAULT_ENCODING + chunks = [] + _append = chunks.append + begin = end - 1 + while 1: + chunk = _m(s, end) + if chunk is None: + raise ValueError( + errmsg("Unterminated string starting at", s, begin)) + end = chunk.end() + content, terminator = chunk.groups() + if content: + if not isinstance(content, unicode): + content = unicode(content, encoding) + _append(content) + if terminator == '"': + break + try: + esc = s[end] + except IndexError: + raise ValueError( + errmsg("Unterminated string starting at", s, begin)) + if esc != 'u': + try: + m = _b[esc] + except KeyError: + raise ValueError( + errmsg("Invalid \\escape: %r" % (esc,), s, end)) + end += 1 + else: + esc = s[end + 1:end + 5] + try: + m = unichr(int(esc, 16)) + if len(esc) != 4 or not esc.isalnum(): + raise ValueError + except ValueError: + raise ValueError(errmsg("Invalid \\uXXXX escape", s, end)) + end += 5 + _append(m) + return u''.join(chunks), end + +def JSONString(match, context): + encoding = getattr(context, 'encoding', None) + return scanstring(match.string, match.end(), encoding) +pattern(r'"')(JSONString) + +WHITESPACE = re.compile(r'\s*', FLAGS) + +def JSONObject(match, context, _w=WHITESPACE.match): + pairs = {} + s = match.string + end = _w(s, match.end()).end() + nextchar = s[end:end + 1] + # trivial empty object + if nextchar == '}': + return pairs, end + 1 + if nextchar != '"': + raise ValueError(errmsg("Expecting property name", s, end)) + end += 1 + encoding = getattr(context, 'encoding', None) + iterscan = JSONScanner.iterscan + while True: + key, end = scanstring(s, end, encoding) + end = _w(s, end).end() + if s[end:end + 1] != ':': + raise ValueError(errmsg("Expecting : delimiter", s, end)) + end = _w(s, end + 1).end() + try: + value, end = iterscan(s, idx=end, context=context).next() + except StopIteration: + raise ValueError(errmsg("Expecting object", s, end)) + pairs[key] = value + end = _w(s, end).end() + nextchar = s[end:end + 1] + end += 1 + if nextchar == '}': + break + if nextchar != ',': + raise ValueError(errmsg("Expecting , delimiter", s, end - 1)) + end = _w(s, end).end() + nextchar = s[end:end + 1] + end += 1 + if nextchar != '"': + raise ValueError(errmsg("Expecting property name", s, end - 1)) + object_hook = getattr(context, 'object_hook', None) + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end +pattern(r'{')(JSONObject) + +def JSONArray(match, context, _w=WHITESPACE.match): + values = [] + s = match.string + end = _w(s, match.end()).end() + # look-ahead for trivial empty array + nextchar = s[end:end + 1] + if nextchar == ']': + return values, end + 1 + iterscan = JSONScanner.iterscan + while True: + try: + value, end = iterscan(s, idx=end, context=context).next() + except StopIteration: + raise ValueError(errmsg("Expecting object", s, end)) + values.append(value) + end = _w(s, end).end() + nextchar = s[end:end + 1] + end += 1 + if nextchar == ']': + break + if nextchar != ',': + raise ValueError(errmsg("Expecting , delimiter", s, end)) + end = _w(s, end).end() + return values, end +pattern(r'\[')(JSONArray) + +ANYTHING = [ + JSONObject, + JSONArray, + JSONString, + JSONConstant, + JSONNumber, +] + +JSONScanner = Scanner(ANYTHING) + +class JSONDecoder(object): + """ + Simple JSON <http://json.org> decoder + + Performs the following translations in decoding: + + +---------------+-------------------+ + | JSON | Python | + +===============+===================+ + | object | dict | + +---------------+-------------------+ + | array | list | + +---------------+-------------------+ + | string | unicode | + +---------------+-------------------+ + | number (int) | int, long | + +---------------+-------------------+ + | number (real) | float | + +---------------+-------------------+ + | true | True | + +---------------+-------------------+ + | false | False | + +---------------+-------------------+ + | null | None | + +---------------+-------------------+ + + It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as + their corresponding ``float`` values, which is outside the JSON spec. + """ + + _scanner = Scanner(ANYTHING) + __all__ = ['__init__', 'decode', 'raw_decode'] + + def __init__(self, encoding=None, object_hook=None): + """ + ``encoding`` determines the encoding used to interpret any ``str`` + objects decoded by this instance (utf-8 by default). It has no + effect when decoding ``unicode`` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as ``unicode``. + + ``object_hook``, if specified, will be called with the result + of every JSON object decoded and its return value will be used in + place of the given ``dict``. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + """ + self.encoding = encoding + self.object_hook = object_hook + + def decode(self, s, _w=WHITESPACE.match): + """ + Return the Python representation of ``s`` (a ``str`` or ``unicode`` + instance containing a JSON document) + """ + obj, end = self.raw_decode(s, idx=_w(s, 0).end()) + end = _w(s, end).end() + if end != len(s): + raise ValueError(errmsg("Extra data", s, end, len(s))) + return obj + + def raw_decode(self, s, **kw): + """ + Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning + with a JSON document) and return a 2-tuple of the Python + representation and the index in ``s`` where the document ended. + + This can be used to decode a JSON document from a string that may + have extraneous data at the end. + """ + kw.setdefault('context', self) + try: + obj, end = self._scanner.iterscan(s, **kw).next() + except StopIteration: + raise ValueError("No JSON object could be decoded") + return obj, end + +__all__ = ['JSONDecoder'] diff --git a/Tools/Scripts/webkitpy/thirdparty/simplejson/encoder.py b/Tools/Scripts/webkitpy/thirdparty/simplejson/encoder.py new file mode 100644 index 0000000..d29919a --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/simplejson/encoder.py @@ -0,0 +1,371 @@ +""" +Implementation of JSONEncoder +""" +import re +try: + from simplejson import _speedups +except ImportError: + _speedups = None + +ESCAPE = re.compile(r'[\x00-\x19\\"\b\f\n\r\t]') +ESCAPE_ASCII = re.compile(r'([\\"/]|[^\ -~])') +ESCAPE_DCT = { + # escape all forward slashes to prevent </script> attack + '/': '\\/', + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) + +# assume this produces an infinity on all machines (probably not guaranteed) +INFINITY = float('1e66666') + +def floatstr(o, allow_nan=True): + # Check for specials. Note that this type of test is processor- and/or + # platform-specific, so do tests which don't depend on the internals. + + if o != o: + text = 'NaN' + elif o == INFINITY: + text = 'Infinity' + elif o == -INFINITY: + text = '-Infinity' + else: + return repr(o) + + if not allow_nan: + raise ValueError("Out of range float values are not JSON compliant: %r" + % (o,)) + + return text + + +def encode_basestring(s): + """ + Return a JSON representation of a Python string + """ + def replace(match): + return ESCAPE_DCT[match.group(0)] + return '"' + ESCAPE.sub(replace, s) + '"' + +def encode_basestring_ascii(s): + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + return '\\u%04x\\u%04x' % (s1, s2) + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + +try: + encode_basestring_ascii = _speedups.encode_basestring_ascii + _need_utf8 = True +except AttributeError: + _need_utf8 = False + +class JSONEncoder(object): + """ + Extensible JSON <http://json.org> encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str, unicode | string | + +-------------------+---------------+ + | int, long, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + """ + __all__ = ['__init__', 'default', 'encode', 'iterencode'] + item_separator = ', ' + key_separator = ': ' + def __init__(self, skipkeys=False, ensure_ascii=True, + check_circular=True, allow_nan=True, sort_keys=False, + indent=None, separators=None, encoding='utf-8'): + """ + Constructor for JSONEncoder, with sensible defaults. + + If skipkeys is False, then it is a TypeError to attempt + encoding of keys that are not str, int, long, float or None. If + skipkeys is True, such items are simply skipped. + + If ensure_ascii is True, the output is guaranteed to be str + objects with all incoming unicode characters escaped. If + ensure_ascii is false, the output will be unicode object. + + If check_circular is True, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an OverflowError). + Otherwise, no such check takes place. + + If allow_nan is True, then NaN, Infinity, and -Infinity will be + encoded as such. This behavior is not JSON specification compliant, + but is consistent with most JavaScript based encoders and decoders. + Otherwise, it will be a ValueError to encode such floats. + + If sort_keys is True, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a non-negative integer, then JSON array + elements and object members will be pretty-printed with that + indent level. An indent level of 0 will only insert newlines. + None is the most compact representation. + + If specified, separators should be a (item_separator, key_separator) + tuple. The default is (', ', ': '). To get the most compact JSON + representation you should specify (',', ':') to eliminate whitespace. + + If encoding is not None, then all input strings will be + transformed into unicode using that encoding prior to JSON-encoding. + The default is UTF-8. + """ + + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.allow_nan = allow_nan + self.sort_keys = sort_keys + self.indent = indent + self.current_indent_level = 0 + if separators is not None: + self.item_separator, self.key_separator = separators + self.encoding = encoding + + def _newline_indent(self): + return '\n' + (' ' * (self.indent * self.current_indent_level)) + + def _iterencode_list(self, lst, markers=None): + if not lst: + yield '[]' + return + if markers is not None: + markerid = id(lst) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = lst + yield '[' + if self.indent is not None: + self.current_indent_level += 1 + newline_indent = self._newline_indent() + separator = self.item_separator + newline_indent + yield newline_indent + else: + newline_indent = None + separator = self.item_separator + first = True + for value in lst: + if first: + first = False + else: + yield separator + for chunk in self._iterencode(value, markers): + yield chunk + if newline_indent is not None: + self.current_indent_level -= 1 + yield self._newline_indent() + yield ']' + if markers is not None: + del markers[markerid] + + def _iterencode_dict(self, dct, markers=None): + if not dct: + yield '{}' + return + if markers is not None: + markerid = id(dct) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = dct + yield '{' + key_separator = self.key_separator + if self.indent is not None: + self.current_indent_level += 1 + newline_indent = self._newline_indent() + item_separator = self.item_separator + newline_indent + yield newline_indent + else: + newline_indent = None + item_separator = self.item_separator + first = True + if self.ensure_ascii: + encoder = encode_basestring_ascii + else: + encoder = encode_basestring + allow_nan = self.allow_nan + if self.sort_keys: + keys = dct.keys() + keys.sort() + items = [(k, dct[k]) for k in keys] + else: + items = dct.iteritems() + _encoding = self.encoding + _do_decode = (_encoding is not None + and not (_need_utf8 and _encoding == 'utf-8')) + for key, value in items: + if isinstance(key, str): + if _do_decode: + key = key.decode(_encoding) + elif isinstance(key, basestring): + pass + # JavaScript is weakly typed for these, so it makes sense to + # also allow them. Many encoders seem to do something like this. + elif isinstance(key, float): + key = floatstr(key, allow_nan) + elif isinstance(key, (int, long)): + key = str(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif self.skipkeys: + continue + else: + raise TypeError("key %r is not a string" % (key,)) + if first: + first = False + else: + yield item_separator + yield encoder(key) + yield key_separator + for chunk in self._iterencode(value, markers): + yield chunk + if newline_indent is not None: + self.current_indent_level -= 1 + yield self._newline_indent() + yield '}' + if markers is not None: + del markers[markerid] + + def _iterencode(self, o, markers=None): + if isinstance(o, basestring): + if self.ensure_ascii: + encoder = encode_basestring_ascii + else: + encoder = encode_basestring + _encoding = self.encoding + if (_encoding is not None and isinstance(o, str) + and not (_need_utf8 and _encoding == 'utf-8')): + o = o.decode(_encoding) + yield encoder(o) + elif o is None: + yield 'null' + elif o is True: + yield 'true' + elif o is False: + yield 'false' + elif isinstance(o, (int, long)): + yield str(o) + elif isinstance(o, float): + yield floatstr(o, self.allow_nan) + elif isinstance(o, (list, tuple)): + for chunk in self._iterencode_list(o, markers): + yield chunk + elif isinstance(o, dict): + for chunk in self._iterencode_dict(o, markers): + yield chunk + else: + if markers is not None: + markerid = id(o) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = o + for chunk in self._iterencode_default(o, markers): + yield chunk + if markers is not None: + del markers[markerid] + + def _iterencode_default(self, o, markers=None): + newobj = self.default(o) + return self._iterencode(newobj, markers) + + def default(self, o): + """ + Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + """ + raise TypeError("%r is not JSON serializable" % (o,)) + + def encode(self, o): + """ + Return a JSON string representation of a Python data structure. + + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo":["bar", "baz"]}' + """ + # This is for extremely simple cases and benchmarks... + if isinstance(o, basestring): + if isinstance(o, str): + _encoding = self.encoding + if (_encoding is not None + and not (_encoding == 'utf-8' and _need_utf8)): + o = o.decode(_encoding) + return encode_basestring_ascii(o) + # This doesn't pass the iterator directly to ''.join() because it + # sucks at reporting exceptions. It's going to do this internally + # anyway because it uses PySequence_Fast or similar. + chunks = list(self.iterencode(o)) + return ''.join(chunks) + + def iterencode(self, o): + """ + Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + """ + if self.check_circular: + markers = {} + else: + markers = None + return self._iterencode(o, markers) + +__all__ = ['JSONEncoder'] diff --git a/Tools/Scripts/webkitpy/thirdparty/simplejson/jsonfilter.py b/Tools/Scripts/webkitpy/thirdparty/simplejson/jsonfilter.py new file mode 100644 index 0000000..01ca21d --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/simplejson/jsonfilter.py @@ -0,0 +1,40 @@ +import simplejson +import cgi + +class JSONFilter(object): + def __init__(self, app, mime_type='text/x-json'): + self.app = app + self.mime_type = mime_type + + def __call__(self, environ, start_response): + # Read JSON POST input to jsonfilter.json if matching mime type + response = {'status': '200 OK', 'headers': []} + def json_start_response(status, headers): + response['status'] = status + response['headers'].extend(headers) + environ['jsonfilter.mime_type'] = self.mime_type + if environ.get('REQUEST_METHOD', '') == 'POST': + if environ.get('CONTENT_TYPE', '') == self.mime_type: + args = [_ for _ in [environ.get('CONTENT_LENGTH')] if _] + data = environ['wsgi.input'].read(*map(int, args)) + environ['jsonfilter.json'] = simplejson.loads(data) + res = simplejson.dumps(self.app(environ, json_start_response)) + jsonp = cgi.parse_qs(environ.get('QUERY_STRING', '')).get('jsonp') + if jsonp: + content_type = 'text/javascript' + res = ''.join(jsonp + ['(', res, ')']) + elif 'Opera' in environ.get('HTTP_USER_AGENT', ''): + # Opera has bunk XMLHttpRequest support for most mime types + content_type = 'text/plain' + else: + content_type = self.mime_type + headers = [ + ('Content-type', content_type), + ('Content-length', len(res)), + ] + headers.extend(response['headers']) + start_response(response['status'], headers) + return [res] + +def factory(app, global_conf, **kw): + return JSONFilter(app, **kw) diff --git a/Tools/Scripts/webkitpy/thirdparty/simplejson/scanner.py b/Tools/Scripts/webkitpy/thirdparty/simplejson/scanner.py new file mode 100644 index 0000000..64f4999 --- /dev/null +++ b/Tools/Scripts/webkitpy/thirdparty/simplejson/scanner.py @@ -0,0 +1,63 @@ +""" +Iterator based sre token scanner +""" +import sre_parse, sre_compile, sre_constants +from sre_constants import BRANCH, SUBPATTERN +from re import VERBOSE, MULTILINE, DOTALL +import re + +__all__ = ['Scanner', 'pattern'] + +FLAGS = (VERBOSE | MULTILINE | DOTALL) +class Scanner(object): + def __init__(self, lexicon, flags=FLAGS): + self.actions = [None] + # combine phrases into a compound pattern + s = sre_parse.Pattern() + s.flags = flags + p = [] + for idx, token in enumerate(lexicon): + phrase = token.pattern + try: + subpattern = sre_parse.SubPattern(s, + [(SUBPATTERN, (idx + 1, sre_parse.parse(phrase, flags)))]) + except sre_constants.error: + raise + p.append(subpattern) + self.actions.append(token) + + p = sre_parse.SubPattern(s, [(BRANCH, (None, p))]) + self.scanner = sre_compile.compile(p) + + + def iterscan(self, string, idx=0, context=None): + """ + Yield match, end_idx for each match + """ + match = self.scanner.scanner(string, idx).match + actions = self.actions + lastend = idx + end = len(string) + while True: + m = match() + if m is None: + break + matchbegin, matchend = m.span() + if lastend == matchend: + break + action = actions[m.lastindex] + if action is not None: + rval, next_pos = action(m, context) + if next_pos is not None and next_pos != matchend: + # "fast forward" the scanner + matchend = next_pos + match = self.scanner.scanner(string, matchend).match + yield rval, matchend + lastend = matchend + +def pattern(pattern, flags=FLAGS): + def decorator(fn): + fn.pattern = pattern + fn.regex = re.compile(pattern, flags) + return fn + return decorator diff --git a/Tools/Scripts/webkitpy/tool/__init__.py b/Tools/Scripts/webkitpy/tool/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/tool/bot/__init__.py b/Tools/Scripts/webkitpy/tool/bot/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py new file mode 100644 index 0000000..1d82ea8 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py @@ -0,0 +1,220 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.common.system.executive import ScriptError +from webkitpy.common.net.layouttestresults import LayoutTestResults + + +class CommitQueueTaskDelegate(object): + def run_command(self, command): + raise NotImplementedError("subclasses must implement") + + def command_passed(self, message, patch): + raise NotImplementedError("subclasses must implement") + + def command_failed(self, message, script_error, patch): + raise NotImplementedError("subclasses must implement") + + def refetch_patch(self, patch): + raise NotImplementedError("subclasses must implement") + + def layout_test_results(self): + raise NotImplementedError("subclasses must implement") + + def report_flaky_tests(self, patch, flaky_tests): + raise NotImplementedError("subclasses must implement") + + +class CommitQueueTask(object): + def __init__(self, delegate, patch): + self._delegate = delegate + self._patch = patch + self._script_error = None + + def _validate(self): + # Bugs might get closed, or patches might be obsoleted or r-'d while the + # commit-queue is processing. + self._patch = self._delegate.refetch_patch(self._patch) + if self._patch.is_obsolete(): + return False + if self._patch.bug().is_closed(): + return False + if not self._patch.committer(): + return False + # Reviewer is not required. Missing reviewers will be caught during + # the ChangeLog check during landing. + return True + + def _run_command(self, command, success_message, failure_message): + try: + self._delegate.run_command(command) + self._delegate.command_passed(success_message, patch=self._patch) + return True + except ScriptError, e: + self._script_error = e + self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch) + return False + + def _clean(self): + return self._run_command([ + "clean", + ], + "Cleaned working directory", + "Unable to clean working directory") + + def _update(self): + # FIXME: Ideally the status server log message should include which revision we updated to. + return self._run_command([ + "update", + ], + "Updated working directory", + "Unable to update working directory") + + def _apply(self): + return self._run_command([ + "apply-attachment", + "--no-update", + "--non-interactive", + self._patch.id(), + ], + "Applied patch", + "Patch does not apply") + + def _build(self): + return self._run_command([ + "build", + "--no-clean", + "--no-update", + "--build-style=both", + ], + "Built patch", + "Patch does not build") + + def _build_without_patch(self): + return self._run_command([ + "build", + "--force-clean", + "--no-update", + "--build-style=both", + ], + "Able to build without patch", + "Unable to build without patch") + + def _test(self): + return self._run_command([ + "build-and-test", + "--no-clean", + "--no-update", + # Notice that we don't pass --build, which means we won't build! + "--test", + "--non-interactive", + ], + "Passed tests", + "Patch does not pass tests") + + def _build_and_test_without_patch(self): + return self._run_command([ + "build-and-test", + "--force-clean", + "--no-update", + "--build", + "--test", + "--non-interactive", + ], + "Able to pass tests without patch", + "Unable to pass tests without patch (tree is red?)") + + def _failing_tests_from_last_run(self): + results = self._delegate.layout_test_results() + if not results: + return None + return results.failing_tests() + + def _land(self): + # Unclear if this should pass --quiet or not. If --parent-command always does the reporting, then it should. + return self._run_command([ + "land-attachment", + "--force-clean", + "--ignore-builders", + "--non-interactive", + "--parent-command=commit-queue", + self._patch.id(), + ], + "Landed patch", + "Unable to land patch") + + def _report_flaky_tests(self, flaky_tests): + self._delegate.report_flaky_tests(self._patch, flaky_tests) + + def _test_patch(self): + if self._patch.is_rollout(): + return True + if self._test(): + return True + + first_failing_tests = self._failing_tests_from_last_run() + if self._test(): + self._report_flaky_tests(first_failing_tests) + return True + + second_failing_tests = self._failing_tests_from_last_run() + if first_failing_tests != second_failing_tests: + # We could report flaky tests here, but since run-webkit-tests + # is run with --exit-after-N-failures=1, we would need to + # be careful not to report constant failures as flaky due to earlier + # flaky test making them not fail (no results) in one of the runs. + # See https://bugs.webkit.org/show_bug.cgi?id=51272 + return False + + if self._build_and_test_without_patch(): + raise self._script_error # The error from the previous ._test() run is real, report it. + return False # Tree must be red, just retry later. + + def run(self): + if not self._validate(): + return False + if not self._clean(): + return False + if not self._update(): + return False + if not self._apply(): + raise self._script_error + if not self._build(): + if not self._build_without_patch(): + return False + raise self._script_error + if not self._test_patch(): + return False + # Make sure the patch is still valid before landing (e.g., make sure + # no one has set commit-queue- since we started working on the patch.) + if not self._validate(): + return False + # FIXME: We should understand why the land failure occured and retry if possible. + if not self._land(): + raise self._script_error + return True diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py new file mode 100644 index 0000000..376f407 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py @@ -0,0 +1,316 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from datetime import datetime +import unittest + +from webkitpy.common.system.deprecated_logging import error, log +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.bot.commitqueuetask import * +from webkitpy.tool.mocktool import MockTool + + +class MockCommitQueue(CommitQueueTaskDelegate): + def __init__(self, error_plan): + self._error_plan = error_plan + + def run_command(self, command): + log("run_webkit_patch: %s" % command) + if self._error_plan: + error = self._error_plan.pop(0) + if error: + raise error + + def command_passed(self, success_message, patch): + log("command_passed: success_message='%s' patch='%s'" % ( + success_message, patch.id())) + + def command_failed(self, failure_message, script_error, patch): + log("command_failed: failure_message='%s' script_error='%s' patch='%s'" % ( + failure_message, script_error, patch.id())) + return 3947 + + def refetch_patch(self, patch): + return patch + + def layout_test_results(self): + return None + + def report_flaky_tests(self, patch, flaky_tests): + log("report_flaky_tests: patch='%s' flaky_tests='%s'" % (patch.id(), flaky_tests)) + + +class CommitQueueTaskTest(unittest.TestCase): + def _run_through_task(self, commit_queue, expected_stderr, expected_exception=None, expect_retry=False): + tool = MockTool(log_executive=True) + patch = tool.bugs.fetch_attachment(197) + task = CommitQueueTask(commit_queue, patch) + success = OutputCapture().assert_outputs(self, task.run, expected_stderr=expected_stderr, expected_exception=expected_exception) + if not expected_exception: + self.assertEqual(success, not expect_retry) + + def test_success_case(self): + commit_queue = MockCommitQueue([]) + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_passed: success_message='Updated working directory' patch='197' +run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 197] +command_passed: success_message='Applied patch' patch='197' +run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] +command_passed: success_message='Built patch' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_passed: success_message='Passed tests' patch='197' +run_webkit_patch: ['land-attachment', '--force-clean', '--ignore-builders', '--non-interactive', '--parent-command=commit-queue', 197] +command_passed: success_message='Landed patch' patch='197' +""" + self._run_through_task(commit_queue, expected_stderr) + + def test_clean_failure(self): + commit_queue = MockCommitQueue([ + ScriptError("MOCK clean failure"), + ]) + expected_stderr = """run_webkit_patch: ['clean'] +command_failed: failure_message='Unable to clean working directory' script_error='MOCK clean failure' patch='197' +""" + self._run_through_task(commit_queue, expected_stderr, expect_retry=True) + + def test_update_failure(self): + commit_queue = MockCommitQueue([ + None, + ScriptError("MOCK update failure"), + ]) + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_failed: failure_message='Unable to update working directory' script_error='MOCK update failure' patch='197' +""" + self._run_through_task(commit_queue, expected_stderr, expect_retry=True) + + def test_apply_failure(self): + commit_queue = MockCommitQueue([ + None, + None, + ScriptError("MOCK apply failure"), + ]) + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_passed: success_message='Updated working directory' patch='197' +run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 197] +command_failed: failure_message='Patch does not apply' script_error='MOCK apply failure' patch='197' +""" + self._run_through_task(commit_queue, expected_stderr, ScriptError) + + def test_build_failure(self): + commit_queue = MockCommitQueue([ + None, + None, + None, + ScriptError("MOCK build failure"), + ]) + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_passed: success_message='Updated working directory' patch='197' +run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 197] +command_passed: success_message='Applied patch' patch='197' +run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] +command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='197' +run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both'] +command_passed: success_message='Able to build without patch' patch='197' +""" + self._run_through_task(commit_queue, expected_stderr, ScriptError) + + def test_red_build_failure(self): + commit_queue = MockCommitQueue([ + None, + None, + None, + ScriptError("MOCK build failure"), + ScriptError("MOCK clean build failure"), + ]) + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_passed: success_message='Updated working directory' patch='197' +run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 197] +command_passed: success_message='Applied patch' patch='197' +run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] +command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='197' +run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both'] +command_failed: failure_message='Unable to build without patch' script_error='MOCK clean build failure' patch='197' +""" + self._run_through_task(commit_queue, expected_stderr, expect_retry=True) + + def test_flaky_test_failure(self): + commit_queue = MockCommitQueue([ + None, + None, + None, + None, + ScriptError("MOCK tests failure"), + ]) + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_passed: success_message='Updated working directory' patch='197' +run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 197] +command_passed: success_message='Applied patch' patch='197' +run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] +command_passed: success_message='Built patch' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_passed: success_message='Passed tests' patch='197' +report_flaky_tests: patch='197' flaky_tests='None' +run_webkit_patch: ['land-attachment', '--force-clean', '--ignore-builders', '--non-interactive', '--parent-command=commit-queue', 197] +command_passed: success_message='Landed patch' patch='197' +""" + self._run_through_task(commit_queue, expected_stderr) + + _double_flaky_test_counter = 0 + + def test_double_flaky_test_failure(self): + commit_queue = MockCommitQueue([ + None, + None, + None, + None, + ScriptError("MOCK test failure"), + ScriptError("MOCK test failure again"), + ]) + # The (subtle) point of this test is that report_flaky_tests does not appear + # in the expected_stderr for this run. + # Note also that there is no attempt to run the tests w/o the patch. + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_passed: success_message='Updated working directory' patch='197' +run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 197] +command_passed: success_message='Applied patch' patch='197' +run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] +command_passed: success_message='Built patch' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='197' +""" + tool = MockTool(log_executive=True) + patch = tool.bugs.fetch_attachment(197) + task = CommitQueueTask(commit_queue, patch) + self._double_flaky_test_counter = 0 + + def mock_failing_tests_from_last_run(): + CommitQueueTaskTest._double_flaky_test_counter += 1 + if CommitQueueTaskTest._double_flaky_test_counter % 2: + return ['foo.html'] + return ['bar.html'] + + task._failing_tests_from_last_run = mock_failing_tests_from_last_run + success = OutputCapture().assert_outputs(self, task.run, expected_stderr=expected_stderr) + self.assertEqual(success, False) + + def test_test_failure(self): + commit_queue = MockCommitQueue([ + None, + None, + None, + None, + ScriptError("MOCK test failure"), + ScriptError("MOCK test failure again"), + ]) + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_passed: success_message='Updated working directory' patch='197' +run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 197] +command_passed: success_message='Applied patch' patch='197' +run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] +command_passed: success_message='Built patch' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='197' +run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive'] +command_passed: success_message='Able to pass tests without patch' patch='197' +""" + self._run_through_task(commit_queue, expected_stderr, ScriptError) + + def test_red_test_failure(self): + commit_queue = MockCommitQueue([ + None, + None, + None, + None, + ScriptError("MOCK test failure"), + ScriptError("MOCK test failure again"), + ScriptError("MOCK clean test failure"), + ]) + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_passed: success_message='Updated working directory' patch='197' +run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 197] +command_passed: success_message='Applied patch' patch='197' +run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] +command_passed: success_message='Built patch' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='197' +run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive'] +command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='197' +""" + self._run_through_task(commit_queue, expected_stderr, expect_retry=True) + + def test_land_failure(self): + commit_queue = MockCommitQueue([ + None, + None, + None, + None, + None, + ScriptError("MOCK land failure"), + ]) + expected_stderr = """run_webkit_patch: ['clean'] +command_passed: success_message='Cleaned working directory' patch='197' +run_webkit_patch: ['update'] +command_passed: success_message='Updated working directory' patch='197' +run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 197] +command_passed: success_message='Applied patch' patch='197' +run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both'] +command_passed: success_message='Built patch' patch='197' +run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +command_passed: success_message='Passed tests' patch='197' +run_webkit_patch: ['land-attachment', '--force-clean', '--ignore-builders', '--non-interactive', '--parent-command=commit-queue', 197] +command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='197' +""" + # FIXME: This should really be expect_retry=True for a better user experiance. + self._run_through_task(commit_queue, expected_stderr, ScriptError) diff --git a/Tools/Scripts/webkitpy/tool/bot/feeders.py b/Tools/Scripts/webkitpy/tool/bot/feeders.py new file mode 100644 index 0000000..046c4c1 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/feeders.py @@ -0,0 +1,90 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.common.config.committervalidator import CommitterValidator +from webkitpy.common.system.deprecated_logging import log +from webkitpy.tool.grammar import pluralize + + +class AbstractFeeder(object): + def __init__(self, tool): + self._tool = tool + + def feed(self): + raise NotImplementedError("subclasses must implement") + + +class CommitQueueFeeder(AbstractFeeder): + queue_name = "commit-queue" + + def __init__(self, tool): + AbstractFeeder.__init__(self, tool) + self.committer_validator = CommitterValidator(self._tool.bugs) + + def _update_work_items(self, item_ids): + # FIXME: This is the last use of update_work_items, the commit-queue + # should move to feeding patches one at a time like the EWS does. + self._tool.status_server.update_work_items(self.queue_name, item_ids) + log("Feeding %s items %s" % (self.queue_name, item_ids)) + + def feed(self): + patches = self._validate_patches() + patches = sorted(patches, self._patch_cmp) + patch_ids = [patch.id() for patch in patches] + self._update_work_items(patch_ids) + + def _patches_for_bug(self, bug_id): + return self._tool.bugs.fetch_bug(bug_id).commit_queued_patches(include_invalid=True) + + def _validate_patches(self): + # Not using BugzillaQueries.fetch_patches_from_commit_queue() so we can reject patches with invalid committers/reviewers. + bug_ids = self._tool.bugs.queries.fetch_bug_ids_from_commit_queue() + all_patches = sum([self._patches_for_bug(bug_id) for bug_id in bug_ids], []) + return self.committer_validator.patches_after_rejecting_invalid_commiters_and_reviewers(all_patches) + + def _patch_cmp(self, a, b): + # Sort first by is_rollout, then by attach_date. + # Reversing the order so that is_rollout is first. + rollout_cmp = cmp(b.is_rollout(), a.is_rollout()) + if rollout_cmp != 0: + return rollout_cmp + return cmp(a.attach_date(), b.attach_date()) + + +class EWSFeeder(AbstractFeeder): + def __init__(self, tool): + self._ids_sent_to_server = set() + AbstractFeeder.__init__(self, tool) + + def feed(self): + ids_needing_review = set(self._tool.bugs.queries.fetch_attachment_ids_from_review_queue()) + new_ids = ids_needing_review.difference(self._ids_sent_to_server) + log("Feeding EWS (%s, %s new)" % (pluralize("r? patch", len(ids_needing_review)), len(new_ids))) + for attachment_id in new_ids: # Order doesn't really matter for the EWS. + self._tool.status_server.submit_to_ews(attachment_id) + self._ids_sent_to_server.add(attachment_id) diff --git a/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py b/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py new file mode 100644 index 0000000..580f840 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py @@ -0,0 +1,70 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from datetime import datetime +import unittest + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.bot.feeders import * +from webkitpy.tool.mocktool import MockTool + + +class FeedersTest(unittest.TestCase): + def test_commit_queue_feeder(self): + feeder = CommitQueueFeeder(MockTool()) + expected_stderr = u"""Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com) +Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com) +MOCK setting flag 'commit-queue' to '-' on attachment '128' with comment 'Rejecting attachment 128 from commit-queue.' and additional comment 'non-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py. + +- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags. + +- If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your committer rights.' +MOCK: update_work_items: commit-queue [106, 197] +Feeding commit-queue items [106, 197] +""" + OutputCapture().assert_outputs(self, feeder.feed, expected_stderr=expected_stderr) + + def _mock_attachment(self, is_rollout, attach_date): + attachment = Mock() + attachment.is_rollout = lambda: is_rollout + attachment.attach_date = lambda: attach_date + return attachment + + def test_patch_cmp(self): + long_ago_date = datetime(1900, 1, 21) + recent_date = datetime(2010, 1, 21) + attachment1 = self._mock_attachment(is_rollout=False, attach_date=recent_date) + attachment2 = self._mock_attachment(is_rollout=False, attach_date=long_ago_date) + attachment3 = self._mock_attachment(is_rollout=True, attach_date=recent_date) + attachment4 = self._mock_attachment(is_rollout=True, attach_date=long_ago_date) + attachments = [attachment1, attachment2, attachment3, attachment4] + expected_sort = [attachment4, attachment3, attachment2, attachment1] + queue = CommitQueueFeeder(MockTool()) + attachments.sort(queue._patch_cmp) + self.assertEqual(attachments, expected_sort) diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py new file mode 100644 index 0000000..01cbf39 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py @@ -0,0 +1,181 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import codecs +import logging +import platform +import os.path + +from webkitpy.common.net.layouttestresults import path_for_layout_test, LayoutTestResults +from webkitpy.common.config import urls +from webkitpy.tool.grammar import plural, pluralize, join_with_separators + +_log = logging.getLogger(__name__) + + +class FlakyTestReporter(object): + def __init__(self, tool, bot_name): + self._tool = tool + self._bot_name = bot_name + + def _author_emails_for_test(self, flaky_test): + test_path = path_for_layout_test(flaky_test) + commit_infos = self._tool.checkout().recent_commit_infos_for_files([test_path]) + # This ignores authors which are not committers because we don't have their bugzilla_email. + return set([commit_info.author().bugzilla_email() for commit_info in commit_infos if commit_info.author()]) + + def _bugzilla_email(self): + # FIXME: This is kinda a funny way to get the bugzilla email, + # we could also just create a Credentials object directly + # but some of the Credentials logic is in bugzilla.py too... + self._tool.bugs.authenticate() + return self._tool.bugs.username + + # FIXME: This should move into common.config + _bot_emails = set([ + "commit-queue@webkit.org", # commit-queue + "eseidel@chromium.org", # old commit-queue + "webkit.review.bot@gmail.com", # style-queue, sheriff-bot, CrLx/Gtk EWS + "buildbot@hotmail.com", # Win EWS + # Mac EWS currently uses eric@webkit.org, but that's not normally a bot + ]) + + def _lookup_bug_for_flaky_test(self, flaky_test): + bugs = self._tool.bugs.queries.fetch_bugs_matching_search(search_string=flaky_test) + if not bugs: + return None + # Match any bugs which are from known bots or the email this bot is using. + allowed_emails = self._bot_emails | set([self._bugzilla_email]) + bugs = filter(lambda bug: bug.reporter_email() in allowed_emails, bugs) + if not bugs: + return None + if len(bugs) > 1: + # FIXME: There are probably heuristics we could use for finding + # the right bug instead of the first, like open vs. closed. + _log.warn("Found %s %s matching '%s' filed by a bot, using the first." % (pluralize('bug', len(bugs)), [bug.id() for bug in bugs], flaky_test)) + return bugs[0] + + def _view_source_url_for_test(self, test_path): + return urls.view_source_url("LayoutTests/%s" % test_path) + + def _create_bug_for_flaky_test(self, flaky_test, author_emails, latest_flake_message): + format_values = { + 'test': flaky_test, + 'authors': join_with_separators(sorted(author_emails)), + 'flake_message': latest_flake_message, + 'test_url': self._view_source_url_for_test(flaky_test), + 'bot_name': self._bot_name, + } + title = "Flaky Test: %(test)s" % format_values + description = """This is an automatically generated bug from the %(bot_name)s. +%(test)s has been flaky on the %(bot_name)s. + +%(test)s was authored by %(authors)s. +%(test_url)s + +%(flake_message)s + +The bots will update this with information from each new failure. + +If you would like to track this test fix with another bug, please close this bug as a duplicate. +""" % format_values + + master_flake_bug = 50856 # MASTER: Flaky tests found by the commit-queue + return self._tool.bugs.create_bug(title, description, + component="Tools / Tests", + cc=",".join(author_emails), + blocked="50856") + + # This is over-engineered, but it makes for pretty bug messages. + def _optional_author_string(self, author_emails): + if not author_emails: + return "" + heading_string = plural('author') if len(author_emails) > 1 else 'author' + authors_string = join_with_separators(sorted(author_emails)) + return " (%s: %s)" % (heading_string, authors_string) + + def _bot_information(self): + bot_id = self._tool.status_server.bot_id + bot_id_string = "Bot: %s " % (bot_id) if bot_id else "" + return "%sPort: %s Platform: %s" % (bot_id_string, self._tool.port().name(), self._tool.platform.display_name()) + + def _latest_flake_message(self, flaky_test, patch): + flake_message = "The %s just saw %s flake while processing attachment %s on bug %s." % (self._bot_name, flaky_test, patch.id(), patch.bug_id()) + return "%s\n%s" % (flake_message, self._bot_information()) + + def _results_diff_path_for_test(self, flaky_test): + # FIXME: This is a big hack. We should get this path from results.json + # except that old-run-webkit-tests doesn't produce a results.json + # so we just guess at the file path. + results_path = self._tool.port().layout_tests_results_path() + results_directory = os.path.dirname(results_path) + test_path = os.path.join(results_directory, flaky_test) + (test_path_root, _) = os.path.splitext(test_path) + return "%s-diffs.txt" % test_path_root + + def _follow_duplicate_chain(self, bug): + while bug.is_closed() and bug.duplicate_of(): + bug = self._tool.bugs.fetch_bug(bug.duplicate_of()) + return bug + + # Maybe this logic should move into Bugzilla? a reopen=True arg to post_comment? + def _update_bug_for_flaky_test(self, bug, latest_flake_message): + if bug.is_closed(): + self._tool.bugs.reopen_bug(bug.id(), latest_flake_message) + else: + self._tool.bugs.post_comment_to_bug(bug.id(), latest_flake_message) + + def report_flaky_tests(self, flaky_tests, patch): + message = "The %s encountered the following flaky tests while processing attachment %s:\n\n" % (self._bot_name, patch.id()) + for flaky_test in flaky_tests: + bug = self._lookup_bug_for_flaky_test(flaky_test) + latest_flake_message = self._latest_flake_message(flaky_test, patch) + author_emails = self._author_emails_for_test(flaky_test) + if not bug: + _log.info("Bug does not already exist for %s, creating." % flaky_test) + flake_bug_id = self._create_bug_for_flaky_test(flaky_test, author_emails, latest_flake_message) + else: + bug = self._follow_duplicate_chain(bug) + self._update_bug_for_flaky_test(bug, latest_flake_message) + flake_bug_id = bug.id() + # FIXME: Ideally we'd only make one comment per flake, not two. But that's not possible + # in all cases (e.g. when reopening), so for now we do the attachment in a second step. + results_diff_path = self._results_diff_path_for_test(flaky_test) + # Check to make sure that the path makes sense. + # Since we're not actually getting this path from the results.html + # there is a high probaility it's totally wrong. + if self._tool.filesystem.exists(results_diff_path): + results_diff = self._tool.filesystem.read_binary_file(results_diff_path) + bot_id = self._tool.status_server.bot_id or "bot" + self._tool.bugs.add_attachment_to_bug(flake_bug_id, results_diff, "Failure diff from %s" % bot_id, filename="failure.diff") + else: + _log.error("%s does not exist as expected, not uploading." % results_diff_path) + message += "%s bug %s%s\n" % (flaky_test, flake_bug_id, self._optional_author_string(author_emails)) + + message += "The %s is continuing to process your patch." % self._bot_name + self._tool.bugs.post_comment_to_bug(patch.bug_id(), message) diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py new file mode 100644 index 0000000..f72fb28 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py @@ -0,0 +1,145 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.config.committers import Committer +from webkitpy.common.system.filesystem_mock import MockFileSystem +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter +from webkitpy.tool.mocktool import MockTool, MockStatusServer + + +# Creating fake CommitInfos is a pain, so we use a mock one here. +class MockCommitInfo(object): + def __init__(self, author_email): + self._author_email = author_email + + def author(self): + # It's definitely possible to have commits with authors who + # are not in our committers.py list. + if not self._author_email: + return None + return Committer("Mock Committer", self._author_email) + + +class FlakyTestReporterTest(unittest.TestCase): + def _assert_emails_for_test(self, emails): + tool = MockTool() + reporter = FlakyTestReporter(tool, 'dummy-queue') + commit_infos = [MockCommitInfo(email) for email in emails] + tool.checkout().recent_commit_infos_for_files = lambda paths: set(commit_infos) + self.assertEqual(reporter._author_emails_for_test([]), set(emails)) + + def test_author_emails_for_test(self): + self._assert_emails_for_test([]) + self._assert_emails_for_test(["test1@test.com", "test1@test.com"]) + self._assert_emails_for_test(["test1@test.com", "test2@test.com"]) + + def test_create_bug_for_flaky_test(self): + reporter = FlakyTestReporter(MockTool(), 'dummy-queue') + expected_stderr = """MOCK create_bug +bug_title: Flaky Test: foo/bar.html +bug_description: This is an automatically generated bug from the dummy-queue. +foo/bar.html has been flaky on the dummy-queue. + +foo/bar.html was authored by test@test.com. +http://trac.webkit.org/browser/trunk/LayoutTests/foo/bar.html + +FLAKE_MESSAGE + +The bots will update this with information from each new failure. + +If you would like to track this test fix with another bug, please close this bug as a duplicate. + +component: Tools / Tests +cc: test@test.com +blocked: 50856 +""" + OutputCapture().assert_outputs(self, reporter._create_bug_for_flaky_test, ['foo/bar.html', ['test@test.com'], 'FLAKE_MESSAGE'], expected_stderr=expected_stderr) + + def test_follow_duplicate_chain(self): + tool = MockTool() + reporter = FlakyTestReporter(tool, 'dummy-queue') + bug = tool.bugs.fetch_bug(78) + self.assertEqual(reporter._follow_duplicate_chain(bug).id(), 76) + + def test_bot_information(self): + tool = MockTool() + tool.status_server = MockStatusServer("MockBotId") + reporter = FlakyTestReporter(tool, 'dummy-queue') + self.assertEqual(reporter._bot_information(), "Bot: MockBotId Port: MockPort Platform: MockPlatform 1.0") + + def test_report_flaky_tests_creating_bug(self): + tool = MockTool() + tool.filesystem = MockFileSystem({"/mock/foo/bar-diffs.txt": "mock"}) + tool.status_server = MockStatusServer(bot_id="mock-bot-id") + reporter = FlakyTestReporter(tool, 'dummy-queue') + reporter._lookup_bug_for_flaky_test = lambda bug_id: None + patch = tool.bugs.fetch_attachment(197) + expected_stderr = """MOCK create_bug +bug_title: Flaky Test: foo/bar.html +bug_description: This is an automatically generated bug from the dummy-queue. +foo/bar.html has been flaky on the dummy-queue. + +foo/bar.html was authored by abarth@webkit.org. +http://trac.webkit.org/browser/trunk/LayoutTests/foo/bar.html + +The dummy-queue just saw foo/bar.html flake while processing attachment 197 on bug 42. +Bot: mock-bot-id Port: MockPort Platform: MockPlatform 1.0 + +The bots will update this with information from each new failure. + +If you would like to track this test fix with another bug, please close this bug as a duplicate. + +component: Tools / Tests +cc: abarth@webkit.org +blocked: 50856 +MOCK add_attachment_to_bug: bug_id=78, description=Failure diff from mock-bot-id filename=failure.diff +MOCK bug comment: bug_id=42, cc=None +--- Begin comment --- +The dummy-queue encountered the following flaky tests while processing attachment 197: + +foo/bar.html bug 78 (author: abarth@webkit.org) +The dummy-queue is continuing to process your patch. +--- End comment --- + +""" + OutputCapture().assert_outputs(self, reporter.report_flaky_tests, [['foo/bar.html'], patch], expected_stderr=expected_stderr) + + def test_optional_author_string(self): + reporter = FlakyTestReporter(MockTool(), 'dummy-queue') + self.assertEqual(reporter._optional_author_string([]), "") + self.assertEqual(reporter._optional_author_string(["foo@bar.com"]), " (author: foo@bar.com)") + self.assertEqual(reporter._optional_author_string(["a@b.com", "b@b.com"]), " (authors: a@b.com and b@b.com)") + + def test_results_diff_path_for_test(self): + reporter = FlakyTestReporter(MockTool(), 'dummy-queue') + self.assertEqual(reporter._results_diff_path_for_test("test.html"), "/mock/test-diffs.txt") + + # report_flaky_tests is also tested by queues_unittest diff --git a/Tools/Scripts/webkitpy/tool/bot/irc_command.py b/Tools/Scripts/webkitpy/tool/bot/irc_command.py new file mode 100644 index 0000000..0c17c9f --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/irc_command.py @@ -0,0 +1,109 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import random +import webkitpy.common.config.irc as config_irc + +from webkitpy.common.config import urls +from webkitpy.tool.bot.queueengine import TerminateQueue +from webkitpy.common.net.bugzilla import parse_bug_id +from webkitpy.common.system.executive import ScriptError + +# FIXME: Merge with Command? +class IRCCommand(object): + def execute(self, nick, args, tool, sheriff): + raise NotImplementedError, "subclasses must implement" + + +class LastGreenRevision(IRCCommand): + def execute(self, nick, args, tool, sheriff): + return "%s: %s" % (nick, + urls.view_revision_url(tool.buildbot.last_green_revision())) + + +class Restart(IRCCommand): + def execute(self, nick, args, tool, sheriff): + tool.irc().post("Restarting...") + raise TerminateQueue() + + +class Rollout(IRCCommand): + def execute(self, nick, args, tool, sheriff): + if len(args) < 2: + tool.irc().post("%s: Usage: SVN_REVISION REASON" % nick) + return + svn_revision = args[0].lstrip("r") + rollout_reason = " ".join(args[1:]) + tool.irc().post("Preparing rollout for r%s..." % svn_revision) + try: + complete_reason = "%s (Requested by %s on %s)." % ( + rollout_reason, nick, config_irc.channel) + bug_id = sheriff.post_rollout_patch(svn_revision, complete_reason) + bug_url = tool.bugs.bug_url_for_bug_id(bug_id) + tool.irc().post("%s: Created rollout: %s" % (nick, bug_url)) + except ScriptError, e: + tool.irc().post("%s: Failed to create rollout patch:" % nick) + tool.irc().post("%s" % e) + bug_id = parse_bug_id(e.output) + if bug_id: + tool.irc().post("Ugg... Might have created %s" % + tool.bugs.bug_url_for_bug_id(bug_id)) + + +class Help(IRCCommand): + def execute(self, nick, args, tool, sheriff): + return "%s: Available commands: %s" % (nick, ", ".join(commands.keys())) + + +class Hi(IRCCommand): + def execute(self, nick, args, tool, sheriff): + quips = tool.bugs.quips() + quips.append('"Only you can prevent forest fires." -- Smokey the Bear') + return random.choice(quips) + + +class Eliza(IRCCommand): + therapist = None + + def __init__(self): + if not self.therapist: + import webkitpy.thirdparty.autoinstalled.eliza as eliza + Eliza.therapist = eliza.eliza() + + def execute(self, nick, args, tool, sheriff): + return "%s: %s" % (nick, self.therapist.respond(" ".join(args))) + + +# FIXME: Lame. We should have an auto-registering CommandCenter. +commands = { + "last-green-revision": LastGreenRevision, + "restart": Restart, + "rollout": Rollout, + "help": Help, + "hi": Hi, +} diff --git a/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py b/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py new file mode 100644 index 0000000..7aeb6a0 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py @@ -0,0 +1,38 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.tool.bot.irc_command import * + + +class IRCCommandTest(unittest.TestCase): + def test_eliza(self): + eliza = Eliza() + eliza.execute("tom", "hi", None, None) + eliza.execute("tom", "bye", None, None) diff --git a/Tools/Scripts/webkitpy/tool/bot/queueengine.py b/Tools/Scripts/webkitpy/tool/bot/queueengine.py new file mode 100644 index 0000000..8b016e8 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/queueengine.py @@ -0,0 +1,165 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import time +import traceback + +from datetime import datetime, timedelta + +from webkitpy.common.system.executive import ScriptError +from webkitpy.common.system.deprecated_logging import log, OutputTee + + +class TerminateQueue(Exception): + pass + + +class QueueEngineDelegate: + def queue_log_path(self): + raise NotImplementedError, "subclasses must implement" + + def work_item_log_path(self, work_item): + raise NotImplementedError, "subclasses must implement" + + def begin_work_queue(self): + raise NotImplementedError, "subclasses must implement" + + def should_continue_work_queue(self): + raise NotImplementedError, "subclasses must implement" + + def next_work_item(self): + raise NotImplementedError, "subclasses must implement" + + def should_proceed_with_work_item(self, work_item): + # returns (safe_to_proceed, waiting_message, patch) + raise NotImplementedError, "subclasses must implement" + + def process_work_item(self, work_item): + raise NotImplementedError, "subclasses must implement" + + def handle_unexpected_error(self, work_item, message): + raise NotImplementedError, "subclasses must implement" + + +class QueueEngine: + def __init__(self, name, delegate, wakeup_event): + self._name = name + self._delegate = delegate + self._wakeup_event = wakeup_event + self._output_tee = OutputTee() + + log_date_format = "%Y-%m-%d %H:%M:%S" + sleep_duration_text = "2 mins" # This could be generated from seconds_to_sleep + seconds_to_sleep = 120 + handled_error_code = 2 + + # Child processes exit with a special code to the parent queue process can detect the error was handled. + @classmethod + def exit_after_handled_error(cls, error): + log(error) + exit(cls.handled_error_code) + + def run(self): + self._begin_logging() + + self._delegate.begin_work_queue() + while (self._delegate.should_continue_work_queue()): + try: + self._ensure_work_log_closed() + work_item = self._delegate.next_work_item() + if not work_item: + self._sleep("No work item.") + continue + if not self._delegate.should_proceed_with_work_item(work_item): + self._sleep("Not proceeding with work item.") + continue + + # FIXME: Work logs should not depend on bug_id specificaly. + # This looks fixed, no? + self._open_work_log(work_item) + try: + if not self._delegate.process_work_item(work_item): + log("Unable to process work item.") + continue + except ScriptError, e: + # Use a special exit code to indicate that the error was already + # handled in the child process and we should just keep looping. + if e.exit_code == self.handled_error_code: + continue + message = "Unexpected failure when processing patch! Please file a bug against webkit-patch.\n%s" % e.message_with_output() + self._delegate.handle_unexpected_error(work_item, message) + except TerminateQueue, e: + self._stopping("TerminateQueue exception received.") + return 0 + except KeyboardInterrupt, e: + self._stopping("User terminated queue.") + return 1 + except Exception, e: + traceback.print_exc() + # Don't try tell the status bot, in case telling it causes an exception. + self._sleep("Exception while preparing queue") + self._stopping("Delegate terminated queue.") + return 0 + + def _stopping(self, message): + log("\n%s" % message) + self._delegate.stop_work_queue(message) + # Be careful to shut down our OutputTee or the unit tests will be unhappy. + self._ensure_work_log_closed() + self._output_tee.remove_log(self._queue_log) + + def _begin_logging(self): + self._queue_log = self._output_tee.add_log(self._delegate.queue_log_path()) + self._work_log = None + + def _open_work_log(self, work_item): + work_item_log_path = self._delegate.work_item_log_path(work_item) + if not work_item_log_path: + return + self._work_log = self._output_tee.add_log(work_item_log_path) + + def _ensure_work_log_closed(self): + # If we still have a bug log open, close it. + if self._work_log: + self._output_tee.remove_log(self._work_log) + self._work_log = None + + def _now(self): + """Overriden by the unit tests to allow testing _sleep_message""" + return datetime.now() + + def _sleep_message(self, message): + wake_time = self._now() + timedelta(seconds=self.seconds_to_sleep) + return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(self.log_date_format), self.sleep_duration_text) + + def _sleep(self, message): + log(self._sleep_message(message)) + self._wakeup_event.wait(self.seconds_to_sleep) + self._wakeup_event.clear() diff --git a/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py b/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py new file mode 100644 index 0000000..37d8502 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py @@ -0,0 +1,209 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import datetime +import os +import shutil +import tempfile +import threading +import unittest + +from webkitpy.common.system.executive import ScriptError +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.bot.queueengine import QueueEngine, QueueEngineDelegate, TerminateQueue + + +class LoggingDelegate(QueueEngineDelegate): + def __init__(self, test): + self._test = test + self._callbacks = [] + self._run_before = False + self.stop_message = None + + expected_callbacks = [ + 'queue_log_path', + 'begin_work_queue', + 'should_continue_work_queue', + 'next_work_item', + 'should_proceed_with_work_item', + 'work_item_log_path', + 'process_work_item', + 'should_continue_work_queue', + 'stop_work_queue', + ] + + def record(self, method_name): + self._callbacks.append(method_name) + + def queue_log_path(self): + self.record("queue_log_path") + return os.path.join(self._test.temp_dir, "queue_log_path") + + def work_item_log_path(self, work_item): + self.record("work_item_log_path") + return os.path.join(self._test.temp_dir, "work_log_path", "%s.log" % work_item) + + def begin_work_queue(self): + self.record("begin_work_queue") + + def should_continue_work_queue(self): + self.record("should_continue_work_queue") + if not self._run_before: + self._run_before = True + return True + return False + + def next_work_item(self): + self.record("next_work_item") + return "work_item" + + def should_proceed_with_work_item(self, work_item): + self.record("should_proceed_with_work_item") + self._test.assertEquals(work_item, "work_item") + fake_patch = { 'bug_id' : 42 } + return (True, "waiting_message", fake_patch) + + def process_work_item(self, work_item): + self.record("process_work_item") + self._test.assertEquals(work_item, "work_item") + return True + + def handle_unexpected_error(self, work_item, message): + self.record("handle_unexpected_error") + self._test.assertEquals(work_item, "work_item") + + def stop_work_queue(self, message): + self.record("stop_work_queue") + self.stop_message = message + + +class RaisingDelegate(LoggingDelegate): + def __init__(self, test, exception): + LoggingDelegate.__init__(self, test) + self._exception = exception + + def process_work_item(self, work_item): + self.record("process_work_item") + raise self._exception + + +class NotSafeToProceedDelegate(LoggingDelegate): + def should_proceed_with_work_item(self, work_item): + self.record("should_proceed_with_work_item") + self._test.assertEquals(work_item, "work_item") + return False + + +class FastQueueEngine(QueueEngine): + def __init__(self, delegate): + QueueEngine.__init__(self, "fast-queue", delegate, threading.Event()) + + # No sleep for the wicked. + seconds_to_sleep = 0 + + def _sleep(self, message): + pass + + +class QueueEngineTest(unittest.TestCase): + def test_trivial(self): + delegate = LoggingDelegate(self) + self._run_engine(delegate) + self.assertEquals(delegate.stop_message, "Delegate terminated queue.") + self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks) + self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "queue_log_path"))) + self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "work_log_path", "work_item.log"))) + + def test_unexpected_error(self): + delegate = RaisingDelegate(self, ScriptError(exit_code=3)) + self._run_engine(delegate) + expected_callbacks = LoggingDelegate.expected_callbacks[:] + work_item_index = expected_callbacks.index('process_work_item') + # The unexpected error should be handled right after process_work_item starts + # but before any other callback. Otherwise callbacks should be normal. + expected_callbacks.insert(work_item_index + 1, 'handle_unexpected_error') + self.assertEquals(delegate._callbacks, expected_callbacks) + + def test_handled_error(self): + delegate = RaisingDelegate(self, ScriptError(exit_code=QueueEngine.handled_error_code)) + self._run_engine(delegate) + self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks) + + def _run_engine(self, delegate, engine=None, termination_message=None): + if not engine: + engine = QueueEngine("test-queue", delegate, threading.Event()) + if not termination_message: + termination_message = "Delegate terminated queue." + expected_stderr = "\n%s\n" % termination_message + OutputCapture().assert_outputs(self, engine.run, [], expected_stderr=expected_stderr) + + def _test_terminating_queue(self, exception, termination_message): + work_item_index = LoggingDelegate.expected_callbacks.index('process_work_item') + # The terminating error should be handled right after process_work_item. + # There should be no other callbacks after stop_work_queue. + expected_callbacks = LoggingDelegate.expected_callbacks[:work_item_index + 1] + expected_callbacks.append("stop_work_queue") + + delegate = RaisingDelegate(self, exception) + self._run_engine(delegate, termination_message=termination_message) + + self.assertEquals(delegate._callbacks, expected_callbacks) + self.assertEquals(delegate.stop_message, termination_message) + + def test_terminating_error(self): + self._test_terminating_queue(KeyboardInterrupt(), "User terminated queue.") + self._test_terminating_queue(TerminateQueue(), "TerminateQueue exception received.") + + def test_not_safe_to_proceed(self): + delegate = NotSafeToProceedDelegate(self) + self._run_engine(delegate, engine=FastQueueEngine(delegate)) + expected_callbacks = LoggingDelegate.expected_callbacks[:] + expected_callbacks.remove('work_item_log_path') + expected_callbacks.remove('process_work_item') + self.assertEquals(delegate._callbacks, expected_callbacks) + + def test_now(self): + """Make sure there are no typos in the QueueEngine.now() method.""" + engine = QueueEngine("test", None, None) + self.assertTrue(isinstance(engine._now(), datetime.datetime)) + + def test_sleep_message(self): + engine = QueueEngine("test", None, None) + engine._now = lambda: datetime.datetime(2010, 1, 1) + expected_sleep_message = "MESSAGE Sleeping until 2010-01-01 00:02:00 (2 mins)." + self.assertEqual(engine._sleep_message("MESSAGE"), expected_sleep_message) + + def setUp(self): + self.temp_dir = tempfile.mkdtemp(suffix="work_queue_test_logs") + + def tearDown(self): + shutil.rmtree(self.temp_dir) + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriff.py b/Tools/Scripts/webkitpy/tool/bot/sheriff.py new file mode 100644 index 0000000..43f3221 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/sheriff.py @@ -0,0 +1,91 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.common.config import urls +from webkitpy.common.net.bugzilla import parse_bug_id +from webkitpy.common.system.deprecated_logging import log +from webkitpy.common.system.executive import ScriptError +from webkitpy.tool.grammar import join_with_separators + + +class Sheriff(object): + def __init__(self, tool, sheriffbot): + self._tool = tool + self._sheriffbot = sheriffbot + + def post_irc_warning(self, commit_info, builders): + irc_nicknames = sorted([party.irc_nickname for + party in commit_info.responsible_parties() + if party.irc_nickname]) + irc_prefix = ": " if irc_nicknames else "" + irc_message = "%s%s%s might have broken %s" % ( + ", ".join(irc_nicknames), + irc_prefix, + urls.view_revision_url(commit_info.revision()), + join_with_separators([builder.name() for builder in builders])) + + self._tool.irc().post(irc_message) + + def post_rollout_patch(self, svn_revision, rollout_reason): + # Ensure that svn_revision is a number (and not an option to + # create-rollout). + try: + svn_revision = int(svn_revision) + except: + raise ScriptError(message="Invalid svn revision number \"%s\"." + % svn_revision) + + if rollout_reason.startswith("-"): + raise ScriptError(message="The rollout reason may not begin " + "with - (\"%s\")." % rollout_reason) + + output = self._sheriffbot.run_webkit_patch([ + "create-rollout", + "--force-clean", + # In principle, we should pass --non-interactive here, but it + # turns out that create-rollout doesn't need it yet. We can't + # pass it prophylactically because we reject unrecognized command + # line switches. + "--parent-command=sheriff-bot", + svn_revision, + rollout_reason, + ]) + return parse_bug_id(output) + + def post_blame_comment_on_bug(self, commit_info, builders, tests): + if not commit_info.bug_id(): + return + comment = "%s might have broken %s" % ( + urls.view_revision_url(commit_info.revision()), + join_with_separators([builder.name() for builder in builders])) + if tests: + comment += "\nThe following tests are not passing:\n" + comment += "\n".join(tests) + self._tool.bugs.post_comment_to_bug(commit_info.bug_id(), + comment, + cc=self._sheriffbot.watchers) diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py b/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py new file mode 100644 index 0000000..690af1f --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py @@ -0,0 +1,90 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import unittest + +from webkitpy.common.net.buildbot import Builder +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.bot.sheriff import Sheriff +from webkitpy.tool.mocktool import MockTool + + +class MockSheriffBot(object): + name = "mock-sheriff-bot" + watchers = [ + "watcher@example.com", + ] + + def run_webkit_patch(self, args): + return "Created bug https://bugs.webkit.org/show_bug.cgi?id=36936\n" + + +class SheriffTest(unittest.TestCase): + def test_post_blame_comment_on_bug(self): + def run(): + sheriff = Sheriff(MockTool(), MockSheriffBot()) + builders = [ + Builder("Foo", None), + Builder("Bar", None), + ] + commit_info = Mock() + commit_info.bug_id = lambda: None + commit_info.revision = lambda: 4321 + # Should do nothing with no bug_id + sheriff.post_blame_comment_on_bug(commit_info, builders, []) + sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"]) + # Should try to post a comment to the bug, but MockTool.bugs does nothing. + commit_info.bug_id = lambda: 1234 + sheriff.post_blame_comment_on_bug(commit_info, builders, []) + sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1"]) + sheriff.post_blame_comment_on_bug(commit_info, builders, ["mock-test-1", "mock-test-2"]) + + expected_stderr = u"""MOCK bug comment: bug_id=1234, cc=['watcher@example.com'] +--- Begin comment --- +http://trac.webkit.org/changeset/4321 might have broken Foo and Bar +--- End comment --- + +MOCK bug comment: bug_id=1234, cc=['watcher@example.com'] +--- Begin comment --- +http://trac.webkit.org/changeset/4321 might have broken Foo and Bar +The following tests are not passing: +mock-test-1 +--- End comment --- + +MOCK bug comment: bug_id=1234, cc=['watcher@example.com'] +--- Begin comment --- +http://trac.webkit.org/changeset/4321 might have broken Foo and Bar +The following tests are not passing: +mock-test-1 +mock-test-2 +--- End comment --- + +""" + OutputCapture().assert_outputs(self, run, expected_stderr=expected_stderr) diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriffircbot.py b/Tools/Scripts/webkitpy/tool/bot/sheriffircbot.py new file mode 100644 index 0000000..de77222 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/sheriffircbot.py @@ -0,0 +1,83 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import webkitpy.tool.bot.irc_command as irc_command + +from webkitpy.common.net.irc.ircbot import IRCBotDelegate +from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue + + +class _IRCThreadTearoff(IRCBotDelegate): + def __init__(self, password, message_queue, wakeup_event): + self._password = password + self._message_queue = message_queue + self._wakeup_event = wakeup_event + + # IRCBotDelegate methods + + def irc_message_received(self, nick, message): + self._message_queue.post([nick, message]) + self._wakeup_event.set() + + def irc_nickname(self): + return "sheriffbot" + + def irc_password(self): + return self._password + + +class SheriffIRCBot(object): + def __init__(self, tool, sheriff): + self._tool = tool + self._sheriff = sheriff + self._message_queue = ThreadedMessageQueue() + + def irc_delegate(self): + return _IRCThreadTearoff(self._tool.irc_password, + self._message_queue, + self._tool.wakeup_event) + + def process_message(self, message): + (nick, request) = message + tokenized_request = request.strip().split(" ") + if not tokenized_request: + return + command = irc_command.commands.get(tokenized_request[0]) + args = tokenized_request[1:] + if not command: + # Give the peoples someone to talk with. + command = irc_command.Eliza + args = tokenized_request + response = command().execute(nick, args, self._tool, self._sheriff) + if response: + self._tool.irc().post(response) + + def process_pending_messages(self): + (messages, is_running) = self._message_queue.take_all() + for message in messages: + self.process_message(message) diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py b/Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py new file mode 100644 index 0000000..08023bd --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py @@ -0,0 +1,95 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest +import random + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.bot.sheriff import Sheriff +from webkitpy.tool.bot.sheriffircbot import SheriffIRCBot +from webkitpy.tool.bot.sheriff_unittest import MockSheriffBot +from webkitpy.tool.mocktool import MockTool + + +def run(message): + tool = MockTool() + tool.ensure_irc_connected(None) + bot = SheriffIRCBot(tool, Sheriff(tool, MockSheriffBot())) + bot._message_queue.post(["mock_nick", message]) + bot.process_pending_messages() + + +class SheriffIRCBotTest(unittest.TestCase): + def test_hi(self): + random.seed(23324) + expected_stderr = 'MOCK: irc.post: "Only you can prevent forest fires." -- Smokey the Bear\n' + OutputCapture().assert_outputs(self, run, args=["hi"], expected_stderr=expected_stderr) + + def test_help(self): + expected_stderr = "MOCK: irc.post: mock_nick: Available commands: rollout, hi, help, restart, last-green-revision\n" + OutputCapture().assert_outputs(self, run, args=["help"], expected_stderr=expected_stderr) + + def test_lgr(self): + expected_stderr = "MOCK: irc.post: mock_nick: http://trac.webkit.org/changeset/9479\n" + OutputCapture().assert_outputs(self, run, args=["last-green-revision"], expected_stderr=expected_stderr) + + def test_rollout(self): + expected_stderr = "MOCK: irc.post: Preparing rollout for r21654...\nMOCK: irc.post: mock_nick: Created rollout: http://example.com/36936\n" + OutputCapture().assert_outputs(self, run, args=["rollout 21654 This patch broke the world"], expected_stderr=expected_stderr) + + def test_rollout_with_r_in_svn_revision(self): + expected_stderr = "MOCK: irc.post: Preparing rollout for r21654...\nMOCK: irc.post: mock_nick: Created rollout: http://example.com/36936\n" + OutputCapture().assert_outputs(self, run, args=["rollout r21654 This patch broke the world"], expected_stderr=expected_stderr) + + def test_rollout_bananas(self): + expected_stderr = "MOCK: irc.post: mock_nick: Usage: SVN_REVISION REASON\n" + OutputCapture().assert_outputs(self, run, args=["rollout bananas"], expected_stderr=expected_stderr) + + def test_rollout_invalidate_revision(self): + expected_stderr = ("MOCK: irc.post: Preparing rollout for r--component=Tools...\n" + "MOCK: irc.post: mock_nick: Failed to create rollout patch:\n" + "MOCK: irc.post: Invalid svn revision number \"--component=Tools\".\n") + OutputCapture().assert_outputs(self, run, + args=["rollout " + "--component=Tools 21654"], + expected_stderr=expected_stderr) + + def test_rollout_invalidate_reason(self): + expected_stderr = ("MOCK: irc.post: Preparing rollout for " + "r21654...\nMOCK: irc.post: mock_nick: Failed to " + "create rollout patch:\nMOCK: irc.post: The rollout" + " reason may not begin with - (\"-bad (Requested " + "by mock_nick on #webkit).\").\n") + OutputCapture().assert_outputs(self, run, + args=["rollout " + "21654 -bad"], + expected_stderr=expected_stderr) + + def test_rollout_no_reason(self): + expected_stderr = "MOCK: irc.post: mock_nick: Usage: SVN_REVISION REASON\n" + OutputCapture().assert_outputs(self, run, args=["rollout 21654"], expected_stderr=expected_stderr) diff --git a/Tools/Scripts/webkitpy/tool/commands/__init__.py b/Tools/Scripts/webkitpy/tool/commands/__init__.py new file mode 100644 index 0000000..a974b67 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/__init__.py @@ -0,0 +1,14 @@ +# Required for Python to search this directory for module files + +from webkitpy.tool.commands.bugsearch import BugSearch +from webkitpy.tool.commands.bugfortest import BugForTest +from webkitpy.tool.commands.download import * +from webkitpy.tool.commands.earlywarningsystem import * +from webkitpy.tool.commands.openbugs import OpenBugs +from webkitpy.tool.commands.prettydiff import PrettyDiff +from webkitpy.tool.commands.queries import * +from webkitpy.tool.commands.queues import * +from webkitpy.tool.commands.rebaseline import Rebaseline +from webkitpy.tool.commands.rebaselineserver import RebaselineServer +from webkitpy.tool.commands.sheriffbot import * +from webkitpy.tool.commands.upload import * diff --git a/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py b/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py new file mode 100644 index 0000000..fd10890 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py @@ -0,0 +1,51 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.common.system.executive import ScriptError +from webkitpy.common.system.deprecated_logging import log +from webkitpy.tool.commands.stepsequence import StepSequence +from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand + + +class AbstractSequencedCommand(AbstractDeclarativeCommand): + steps = None + def __init__(self): + self._sequence = StepSequence(self.steps) + AbstractDeclarativeCommand.__init__(self, self._sequence.options()) + + def _prepare_state(self, options, args, tool): + return None + + def execute(self, options, args, tool): + try: + state = self._prepare_state(options, args, tool) + except ScriptError, e: + log(e.message_with_output()) + exit(e.exit_code or 2) + + self._sequence.run_and_handle_errors(tool, options, state) diff --git a/Tools/Scripts/webkitpy/tool/commands/bugfortest.py b/Tools/Scripts/webkitpy/tool/commands/bugfortest.py new file mode 100644 index 0000000..36aa6b5 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/bugfortest.py @@ -0,0 +1,48 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand +from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter + + +# This is mostly a command for testing FlakyTestReporter, however +# it could be easily expanded to auto-create bugs, etc. if another +# command outside of webkitpy wanted to use it. +class BugForTest(AbstractDeclarativeCommand): + name = "bug-for-test" + help_text = "Finds the bugzilla bug for a given test" + + def execute(self, options, args, tool): + reporter = FlakyTestReporter(tool, "webkitpy") + search_string = args[0] + bug = reporter._lookup_bug_for_flaky_test(search_string) + if bug: + bug = reporter._follow_duplicate_chain(bug) + print "%5s %s" % (bug.id(), bug.title()) + else: + print "No bugs found matching '%s'" % search_string diff --git a/Tools/Scripts/webkitpy/tool/commands/bugsearch.py b/Tools/Scripts/webkitpy/tool/commands/bugsearch.py new file mode 100644 index 0000000..5cbc1a0 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/bugsearch.py @@ -0,0 +1,42 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand + + +class BugSearch(AbstractDeclarativeCommand): + name = "bug-search" + help_text = "List bugs matching a query" + + def execute(self, options, args, tool): + search_string = args[0] + bugs = tool.bugs.queries.fetch_bugs_matching_quicksearch(search_string) + for bug in bugs: + print "%5s %s" % (bug.id(), bug.title()) + if not bugs: + print "No bugs found matching '%s'" % search_string diff --git a/Tools/Scripts/webkitpy/tool/commands/commandtest.py b/Tools/Scripts/webkitpy/tool/commands/commandtest.py new file mode 100644 index 0000000..c0efa50 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/commandtest.py @@ -0,0 +1,48 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.mocktool import MockOptions, MockTool + +class CommandsTest(unittest.TestCase): + def assert_execute_outputs(self, command, args, expected_stdout="", expected_stderr="", options=MockOptions(), tool=MockTool()): + options.blocks = None + options.cc = 'MOCK cc' + options.component = 'MOCK component' + options.confirm = True + options.email = 'MOCK email' + options.git_commit = 'MOCK git commit' + options.obsolete_patches = True + options.open_bug = True + options.port = 'MOCK port' + options.quiet = True + options.reviewer = 'MOCK reviewer' + command.bind_to_tool(tool) + OutputCapture().assert_outputs(self, command.execute, [options, args, tool], expected_stdout=expected_stdout, expected_stderr=expected_stderr) diff --git a/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/index.html b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/index.html new file mode 100644 index 0000000..8bdf7c2 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/index.html @@ -0,0 +1,180 @@ +<!DOCTYPE html> +<!-- + Copyright (c) 2010 Google Inc. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +--> +<html> +<head> + <title>Layout Test Rebaseline Server</title> + <link rel="stylesheet" href="/main.css" type="text/css"> + <script src="/util.js"></script> + <script src="/loupe.js"></script> + <script src="/main.js"></script> + <script src="/queue.js"></script> +</head> +<body class="loading"> + +<pre id="log" style="display: none"></pre> +<div id="queue" style="display: none"> + Queue: + <select id="queue-select" size="10"></select> + <button id="remove-queue-selection">Remove selection</button> + <button id="rebaseline-queue">Rebaseline queue</button> +</div> + +<div id="header"> + <div id="controls"> + <!-- Add a dummy <select> node so that this lines up with the text on the left --> + <select style="visibility: hidden"></select> + <span id="toggle-log" class="link">Log</span> + <span class="divider">|</span> + <a href="/quitquitquit">Exit</a> + </div> + + <span id="selectors"> + <label> + Failure type: + <select id="failure-type-selector"></select> + </label> + + <label> + Directory: + <select id="directory-selector"></select> + </label> + + <label> + Test: + <select id="test-selector"></select> + </label> + </span> + + <a id="test-link" target="_blank">View test</a> + + <span id="nav-buttons"> + <button id="previous-test">«</button> + <span id="test-index"></span> of <span id="test-count"></span> + <button id="next-test">»</button> + </span> +</div> + +<table id="test-output"> + <thead id="labels"> + <tr> + <th>Expected</th> + <th>Actual</th> + <th>Diff</th> + </tr> + </thead> + <tbody id="image-outputs" style="display: none"> + <tr> + <td colspan="3"><h2>Image</h2></td> + </tr> + <tr> + <td><img id="expected-image"></td> + <td><img id="actual-image"></td> + <td> + <canvas id="diff-canvas" width="800" height="600"></canvas> + <div id="diff-checksum" style="display: none"> + <h3>Checksum mismatch</h3> + Expected: <span id="expected-checksum"></span><br> + Actual: <span id="actual-checksum"></span> + </div> + </td> + </tr> + </tbody> + <tbody id="text-outputs" style="display: none"> + <tr> + <td colspan="3"><h2>Text</h2></td> + </tr> + <tr> + <td><pre id="expected-text" class="text-output"></pre></td> + <td><pre id="actual-text" class="text-output"></pre></td> + <td><div id="diff-text-pretty" class="text-output"></div></td> + </tr> + </tbody> +</table> + +<div id="footer"> + <label>State: <span id="state"></span></label> + <label>Existing baselines: <span id="current-baselines"></span></label> + <label> + Baseline target: + <select id="baseline-target"></select> + </label> + <label> + Move current baselines to: + <select id="baseline-move-to"> + <option value="none">Nowhere (replace)</option> + </select> + </label> + + <!-- Add a dummy <button> node so that this lines up with the text on the right --> + <button style="visibility: hidden; padding-left: 0; padding-right: 0;"></button> + + <div id="action-buttons"> + <span id="toggle-queue" class="link">Queue</span> + <button id="add-to-rebaseline-queue">Add to rebaseline queue</button> + </div> +</div> + +<table id="loupe" style="display: none"> + <tr> + <td colspan="3" id="loupe-info"> + <span id="loupe-close" class="link">Close</span> + <label>Coordinate: <span id="loupe-coordinate"></span></label> + </td> + </tr> + <tr> + <td> + <div class="loupe-container"> + <canvas id="expected-loupe" width="210" height="210"></canvas> + <div class="center-highlight"></div> + </div> + </td> + <td> + <div class="loupe-container"> + <canvas id="actual-loupe" width="210" height="210"></canvas> + <div class="center-highlight"></div> + </div> + </td> + <td> + <div class="loupe-container"> + <canvas id="diff-loupe" width="210" height="210"></canvas> + <div class="center-highlight"></div> + </div> + </td> + </tr> + <tr id="loupe-colors"> + <td><label>Exp. color: <span id="expected-loupe-color"></span></label></td> + <td><label>Actual color: <span id="actual-loupe-color"></span></label></td> + <td><label>Diff color: <span id="diff-loupe-color"></span></label></td> + </tr> +</table> + +</body> +</html> diff --git a/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/loupe.js b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/loupe.js new file mode 100644 index 0000000..41f977a --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/loupe.js @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +var LOUPE_MAGNIFICATION_FACTOR = 10; + +function Loupe() +{ + this._node = $('loupe'); + this._currentCornerX = -1; + this._currentCornerY = -1; + + var self = this; + + function handleOutputClick(event) { self._handleOutputClick(event); } + $('expected-image').addEventListener('click', handleOutputClick); + $('actual-image').addEventListener('click', handleOutputClick); + $('diff-canvas').addEventListener('click', handleOutputClick); + + function handleLoupeClick(event) { self._handleLoupeClick(event); } + $('expected-loupe').addEventListener('click', handleLoupeClick); + $('actual-loupe').addEventListener('click', handleLoupeClick); + $('diff-loupe').addEventListener('click', handleLoupeClick); + + function hide(event) { self.hide(); } + $('loupe-close').addEventListener('click', hide); +} + +Loupe.prototype._handleOutputClick = function(event) +{ + // The -1 compensates for the border around the image/canvas. + this._showFor(event.offsetX - 1, event.offsetY - 1); +}; + +Loupe.prototype._handleLoupeClick = function(event) +{ + var deltaX = Math.floor(event.offsetX/LOUPE_MAGNIFICATION_FACTOR); + var deltaY = Math.floor(event.offsetY/LOUPE_MAGNIFICATION_FACTOR); + + this._showFor( + this._currentCornerX + deltaX, this._currentCornerY + deltaY); +} + +Loupe.prototype.hide = function() +{ + this._node.style.display = 'none'; +}; + +Loupe.prototype._showFor = function(x, y) +{ + this._fillFromImage(x, y, 'expected', $('expected-image')); + this._fillFromImage(x, y, 'actual', $('actual-image')); + this._fillFromCanvas(x, y, 'diff', $('diff-canvas')); + + this._node.style.display = ''; +}; + +Loupe.prototype._fillFromImage = function(x, y, type, sourceImage) +{ + var tempCanvas = document.createElement('canvas'); + tempCanvas.width = sourceImage.width; + tempCanvas.height = sourceImage.height; + var tempContext = tempCanvas.getContext('2d'); + + tempContext.drawImage(sourceImage, 0, 0); + + this._fillFromCanvas(x, y, type, tempCanvas); +}; + +Loupe.prototype._fillFromCanvas = function(x, y, type, canvas) +{ + var context = canvas.getContext('2d'); + var sourceImageData = + context.getImageData(0, 0, canvas.width, canvas.height); + + var targetCanvas = $(type + '-loupe'); + var targetContext = targetCanvas.getContext('2d'); + targetContext.fillStyle = 'rgba(255, 255, 255, 1)'; + targetContext.fillRect(0, 0, targetCanvas.width, targetCanvas.height); + + var sourceXOffset = (targetCanvas.width/LOUPE_MAGNIFICATION_FACTOR - 1)/2; + var sourceYOffset = (targetCanvas.height/LOUPE_MAGNIFICATION_FACTOR - 1)/2; + + function readPixelComponent(x, y, component) { + var offset = (y * sourceImageData.width + x) * 4 + component; + return sourceImageData.data[offset]; + } + + for (var i = -sourceXOffset; i <= sourceXOffset; i++) { + for (var j = -sourceYOffset; j <= sourceYOffset; j++) { + var sourceX = x + i; + var sourceY = y + j; + + var sourceR = readPixelComponent(sourceX, sourceY, 0); + var sourceG = readPixelComponent(sourceX, sourceY, 1); + var sourceB = readPixelComponent(sourceX, sourceY, 2); + var sourceA = readPixelComponent(sourceX, sourceY, 3)/255; + sourceA = Math.round(sourceA * 10)/10; + + var targetX = (i + sourceXOffset) * LOUPE_MAGNIFICATION_FACTOR; + var targetY = (j + sourceYOffset) * LOUPE_MAGNIFICATION_FACTOR; + var colorString = + sourceR + ', ' + sourceG + ', ' + sourceB + ', ' + sourceA; + targetContext.fillStyle = 'rgba(' + colorString + ')'; + targetContext.fillRect( + targetX, targetY, + LOUPE_MAGNIFICATION_FACTOR, LOUPE_MAGNIFICATION_FACTOR); + + if (i == 0 && j == 0) { + $('loupe-coordinate').textContent = sourceX + ', ' + sourceY; + $(type + '-loupe-color').textContent = colorString; + } + } + } + + this._currentCornerX = x - sourceXOffset; + this._currentCornerY = y - sourceYOffset; +}; diff --git a/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/main.css b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/main.css new file mode 100644 index 0000000..76643c5 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/main.css @@ -0,0 +1,309 @@ +/* + * Copyright (c) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +body { + font-size: 12px; + font-family: Helvetica, Arial, sans-serif; + padding: 0; + margin: 0; +} + +.loading { + opacity: 0.5; +} + +div { + margin: 0; +} + +a, .link { + color: #aaf; + text-decoration: underline; + cursor: pointer; +} + +.link.selected { + color: #fff; + font-weight: bold; + text-decoration: none; +} + +#log, +#queue { + padding: .25em 0 0 .25em; + position: absolute; + right: 0; + height: 200px; + overflow: auto; + background: #fff; + -webkit-box-shadow: 1px 1px 5px rgba(0, 0, 0, .5); +} + +#log { + top: 2em; + width: 500px; +} + +#queue { + bottom: 3em; + width: 400px; +} + +#queue-select { + display: block; + width: 390px; +} + +#header, +#footer { + padding: .5em 1em; + background: #333; + color: #fff; + -webkit-box-shadow: 0 1px 5px rgba(0, 0, 0, 0.5); +} + +#header { + margin-bottom: 1em; +} + +#header .divider, +#footer .divider { + opacity: .3; + padding: 0 .5em; +} + +#header label, +#footer label { + padding-right: 1em; + color: #ccc; +} + +#test-link { + margin-right: 1em; +} + +#header label span, +#footer label span { + color: #fff; + font-weight: bold; +} + +#nav-buttons { + white-space: nowrap; +} + +#nav-buttons button { + background: #fff; + border: 0; + border-radius: 10px; +} + +#nav-buttons button:active { + -webkit-box-shadow: 0 0 5px #33f inset; + background: #aaa; +} + +#nav-buttons button[disabled] { + opacity: .5; +} + +#controls { + float: right; +} + +#test-output { + border-spacing: 0; + border-collapse: collapse; + margin: 0 auto; + width: 100%; +} + +#test-output td, +#test-output th { + padding: 0; + vertical-align: top; +} + +#image-outputs img, +#image-outputs canvas, +#image-outputs #diff-checksum { + width: 800px; + height: 600px; + border: solid 1px #ddd; + -webkit-user-select: none; + -webkit-user-drag: none; +} + +#image-outputs img, +#image-outputs canvas { + cursor: crosshair; +} + +#image-outputs img.loading, +#image-outputs canvas.loading { + opacity: .5; +} + +#image-outputs #actual-image { + margin: 0 1em; +} + +#test-output #labels th { + text-align: center; + color: #666; +} + +#text-outputs .text-output { + height: 600px; + width: 800px; + overflow: auto; +} + +#test-output h2 { + border-bottom: solid 1px #ccc; + font-weight: bold; + margin: 0; + background: #eee; +} + +#footer { + position: absolute; + bottom: 0; + left: 0; + right: 0; + margin-top: 1em; +} + +#state.needs_rebaseline { + color: yellow; +} + +#state.rebaseline_failed { + color: red; +} + +#state.rebaseline_succeeded { + color: green; +} + +#state.in_queue { + color: gray; +} + +#current-baselines { + font-weight: normal !important; +} + +#current-baselines .platform { + font-weight: bold; +} + +#current-baselines a { + color: #ddf; +} + +#current-baselines .was-used-for-test { + color: #aaf; + font-weight: bold; +} + +#action-buttons { + float: right; +} + +#action-buttons .link { + margin-right: 1em; +} + +#footer button { + padding: 1em; +} + +#loupe { + -webkit-box-shadow: 2px 2px 5px rgba(0, 0, 0, .5); + position: absolute; + width: 634px; + top: 50%; + left: 50%; + margin-left: -151px; + margin-top: -50px; + background: #fff; + border-spacing: 0; + border-collapse: collapse; +} + +#loupe td { + padding: 0; + border: solid 1px #ccc; +} + +#loupe label { + color: #999; + padding-right: 1em; +} + +#loupe span { + color: #000; + font-weight: bold; +} + +#loupe canvas { + cursor: crosshair; +} + +#loupe #loupe-close { + float: right; +} + +#loupe #loupe-info { + background: #eee; + padding: .3em .5em; +} + +#loupe #loupe-colors td { + text-align: center; +} + +#loupe .loupe-container { + position: relative; + width: 210px; + height: 210px; +} + +#loupe .center-highlight { + position: absolute; + width: 10px; + height: 10px; + top: 50%; + left: 50%; + margin-left: -5px; + margin-top: -5px; + outline: solid 1px #999; +} diff --git a/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/main.js b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/main.js new file mode 100644 index 0000000..aeaac04 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/main.js @@ -0,0 +1,543 @@ +/* + * Copyright (c) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +var ALL_DIRECTORY_PATH = '[all]'; + +var STATE_NEEDS_REBASELINE = 'needs_rebaseline'; +var STATE_REBASELINE_FAILED = 'rebaseline_failed'; +var STATE_REBASELINE_SUCCEEDED = 'rebaseline_succeeded'; +var STATE_IN_QUEUE = 'in_queue'; +var STATE_TO_DISPLAY_STATE = {}; +STATE_TO_DISPLAY_STATE[STATE_NEEDS_REBASELINE] = 'Needs rebaseline'; +STATE_TO_DISPLAY_STATE[STATE_REBASELINE_FAILED] = 'Rebaseline failed'; +STATE_TO_DISPLAY_STATE[STATE_REBASELINE_SUCCEEDED] = 'Rebaseline succeeded'; +STATE_TO_DISPLAY_STATE[STATE_IN_QUEUE] = 'In queue'; + +var results; +var testsByFailureType = {}; +var testsByDirectory = {}; +var selectedTests = []; +var loupe; +var queue; + +function main() +{ + $('failure-type-selector').addEventListener('change', selectFailureType); + $('directory-selector').addEventListener('change', selectDirectory); + $('test-selector').addEventListener('change', selectTest); + $('next-test').addEventListener('click', nextTest); + $('previous-test').addEventListener('click', previousTest); + + $('toggle-log').addEventListener('click', function() { toggle('log'); }); + + loupe = new Loupe(); + queue = new RebaselineQueue(); + + document.addEventListener('keydown', function(event) { + if (event.altKey || event.ctrlKey || event.metaKey || event.shiftKey) { + return; + } + + switch (event.keyIdentifier) { + case 'Left': + event.preventDefault(); + previousTest(); + break; + case 'Right': + event.preventDefault(); + nextTest(); + break; + case 'U+0051': // q + queue.addCurrentTest(); + break; + case 'U+0058': // x + queue.removeCurrentTest(); + break; + case 'U+0052': // r + queue.rebaseline(); + break; + } + }); + + loadText('/platforms.json', function(text) { + var platforms = JSON.parse(text); + platforms.platforms.forEach(function(platform) { + var platformOption = document.createElement('option'); + platformOption.value = platform; + platformOption.textContent = platform; + + var targetOption = platformOption.cloneNode(true); + targetOption.selected = platform == platforms.defaultPlatform; + $('baseline-target').appendChild(targetOption); + $('baseline-move-to').appendChild(platformOption.cloneNode(true)); + }); + }); + + loadText('/results.json', function(text) { + results = JSON.parse(text); + displayResults(); + }); +} + +/** + * Groups test results by failure type. + */ +function displayResults() +{ + var failureTypeSelector = $('failure-type-selector'); + var failureTypes = []; + + for (var testName in results.tests) { + var test = results.tests[testName]; + if (test.actual == 'PASS') { + continue; + } + var failureType = test.actual + ' (expected ' + test.expected + ')'; + if (!(failureType in testsByFailureType)) { + testsByFailureType[failureType] = []; + failureTypes.push(failureType); + } + testsByFailureType[failureType].push(testName); + } + + // Sort by number of failures + failureTypes.sort(function(a, b) { + return testsByFailureType[b].length - testsByFailureType[a].length; + }); + + for (var i = 0, failureType; failureType = failureTypes[i]; i++) { + var failureTypeOption = document.createElement('option'); + failureTypeOption.value = failureType; + failureTypeOption.textContent = failureType + ' - ' + testsByFailureType[failureType].length + ' tests'; + failureTypeSelector.appendChild(failureTypeOption); + } + + selectFailureType(); + + document.body.className = ''; +} + +/** + * For a given failure type, gets all the tests and groups them by directory + * (populating the directory selector with them). + */ +function selectFailureType() +{ + var selectedFailureType = getSelectValue('failure-type-selector'); + var tests = testsByFailureType[selectedFailureType]; + + testsByDirectory = {} + var displayDirectoryNamesByDirectory = {}; + var directories = []; + + // Include a special option for all tests + testsByDirectory[ALL_DIRECTORY_PATH] = tests; + displayDirectoryNamesByDirectory[ALL_DIRECTORY_PATH] = 'all'; + directories.push(ALL_DIRECTORY_PATH); + + // Roll up tests by ancestor directories + tests.forEach(function(test) { + var pathPieces = test.split('/'); + var pathDirectories = pathPieces.slice(0, pathPieces.length -1); + var ancestorDirectory = ''; + + pathDirectories.forEach(function(pathDirectory, index) { + ancestorDirectory += pathDirectory + '/'; + if (!(ancestorDirectory in testsByDirectory)) { + testsByDirectory[ancestorDirectory] = []; + var displayDirectoryName = new Array(index * 6).join(' ') + pathDirectory; + displayDirectoryNamesByDirectory[ancestorDirectory] = displayDirectoryName; + directories.push(ancestorDirectory); + } + + testsByDirectory[ancestorDirectory].push(test); + }); + }); + + directories.sort(); + + var directorySelector = $('directory-selector'); + directorySelector.innerHTML = ''; + + directories.forEach(function(directory) { + var directoryOption = document.createElement('option'); + directoryOption.value = directory; + directoryOption.innerHTML = + displayDirectoryNamesByDirectory[directory] + ' - ' + + testsByDirectory[directory].length + ' tests'; + directorySelector.appendChild(directoryOption); + }); + + selectDirectory(); +} + +/** + * For a given failure type and directory and failure type, gets all the tests + * in that directory and populatest the test selector with them. + */ +function selectDirectory() +{ + var previouslySelectedTest = getSelectedTest(); + + var selectedDirectory = getSelectValue('directory-selector'); + selectedTests = testsByDirectory[selectedDirectory]; + selectedTests.sort(); + + var testsByState = {}; + selectedTests.forEach(function(testName) { + var state = results.tests[testName].state; + if (state == STATE_IN_QUEUE) { + state = STATE_NEEDS_REBASELINE; + } + if (!(state in testsByState)) { + testsByState[state] = []; + } + testsByState[state].push(testName); + }); + + var optionIndexByTest = {}; + + var testSelector = $('test-selector'); + testSelector.innerHTML = ''; + + for (var state in testsByState) { + var stateOption = document.createElement('option'); + stateOption.textContent = STATE_TO_DISPLAY_STATE[state]; + stateOption.disabled = true; + testSelector.appendChild(stateOption); + + testsByState[state].forEach(function(testName) { + var testOption = document.createElement('option'); + testOption.value = testName; + var testDisplayName = testName; + if (testName.lastIndexOf(selectedDirectory) == 0) { + testDisplayName = testName.substring(selectedDirectory.length); + } + testOption.innerHTML = ' ' + testDisplayName; + optionIndexByTest[testName] = testSelector.options.length; + testSelector.appendChild(testOption); + }); + } + + if (previouslySelectedTest in optionIndexByTest) { + testSelector.selectedIndex = optionIndexByTest[previouslySelectedTest]; + } else if (STATE_NEEDS_REBASELINE in testsByState) { + testSelector.selectedIndex = + optionIndexByTest[testsByState[STATE_NEEDS_REBASELINE][0]]; + selectTest(); + } else { + testSelector.selectedIndex = 1; + selectTest(); + } + + selectTest(); +} + +function getSelectedTest() +{ + return getSelectValue('test-selector'); +} + +function selectTest() +{ + var selectedTest = getSelectedTest(); + + if (results.tests[selectedTest].actual.indexOf('IMAGE') != -1) { + $('image-outputs').style.display = ''; + displayImageResults(selectedTest); + } else { + $('image-outputs').style.display = 'none'; + } + + if (results.tests[selectedTest].actual.indexOf('TEXT') != -1) { + $('text-outputs').style.display = ''; + displayTextResults(selectedTest); + } else { + $('text-outputs').style.display = 'none'; + } + + var currentBaselines = $('current-baselines'); + currentBaselines.textContent = ''; + var baselines = results.tests[selectedTest].baselines; + var testName = selectedTest.split('.').slice(0, -1).join('.'); + getSortedKeys(baselines).forEach(function(platform, i) { + if (i != 0) { + currentBaselines.appendChild(document.createTextNode('; ')); + } + var platformName = document.createElement('span'); + platformName.className = 'platform'; + platformName.textContent = platform; + currentBaselines.appendChild(platformName); + currentBaselines.appendChild(document.createTextNode(' (')); + getSortedKeys(baselines[platform]).forEach(function(extension, j) { + if (j != 0) { + currentBaselines.appendChild(document.createTextNode(', ')); + } + var link = document.createElement('a'); + var baselinePath = ''; + if (platform != 'base') { + baselinePath += 'platform/' + platform + '/'; + } + baselinePath += testName + '-expected' + extension; + link.href = getTracUrl(baselinePath); + if (extension == '.checksum') { + link.textContent = 'chk'; + } else { + link.textContent = extension.substring(1); + } + link.target = '_blank'; + if (baselines[platform][extension]) { + link.className = 'was-used-for-test'; + } + currentBaselines.appendChild(link); + }); + currentBaselines.appendChild(document.createTextNode(')')); + }); + + updateState(); + loupe.hide(); + + prefetchNextImageTest(); +} + +function prefetchNextImageTest() +{ + var testSelector = $('test-selector'); + if (testSelector.selectedIndex == testSelector.options.length - 1) { + return; + } + var nextTest = testSelector.options[testSelector.selectedIndex + 1].value; + if (results.tests[nextTest].actual.indexOf('IMAGE') != -1) { + new Image().src = getTestResultUrl(nextTest, 'expected-image'); + new Image().src = getTestResultUrl(nextTest, 'actual-image'); + } +} + +function updateState() +{ + var testName = getSelectedTest(); + var testIndex = selectedTests.indexOf(testName); + var testCount = selectedTests.length + $('test-index').textContent = testIndex + 1; + $('test-count').textContent = testCount; + + $('next-test').disabled = testIndex == testCount - 1; + $('previous-test').disabled = testIndex == 0; + + $('test-link').href = getTracUrl(testName); + + var state = results.tests[testName].state; + $('state').className = state; + $('state').innerHTML = STATE_TO_DISPLAY_STATE[state]; + + queue.updateState(); +} + +function getTestResultUrl(testName, mode) +{ + return '/test_result?test=' + testName + '&mode=' + mode; +} + +var currentExpectedImageTest; +var currentActualImageTest; + +function displayImageResults(testName) +{ + if (currentExpectedImageTest == currentActualImageTest + && currentExpectedImageTest == testName) { + return; + } + + function displayImageResult(mode, callback) { + var image = $(mode); + image.className = 'loading'; + image.src = getTestResultUrl(testName, mode); + image.onload = function() { + image.className = ''; + callback(); + updateImageDiff(); + }; + } + + displayImageResult( + 'expected-image', + function() { currentExpectedImageTest = testName; }); + displayImageResult( + 'actual-image', + function() { currentActualImageTest = testName; }); + + $('diff-canvas').className = 'loading'; + $('diff-canvas').style.display = ''; + $('diff-checksum').style.display = 'none'; +} + +/** + * Computes a graphical a diff between the expected and actual images by + * rendering each to a canvas, getting the image data, and comparing the RGBA + * components of each pixel. The output is put into the diff canvas, with + * identical pixels appearing at 12.5% opacity and different pixels being + * highlighted in red. + */ +function updateImageDiff() { + if (currentExpectedImageTest != currentActualImageTest) + return; + + var expectedImage = $('expected-image'); + var actualImage = $('actual-image'); + + function getImageData(image) { + var imageCanvas = document.createElement('canvas'); + imageCanvas.width = image.width; + imageCanvas.height = image.height; + imageCanvasContext = imageCanvas.getContext('2d'); + + imageCanvasContext.fillStyle = 'rgba(255, 255, 255, 1)'; + imageCanvasContext.fillRect( + 0, 0, image.width, image.height); + + imageCanvasContext.drawImage(image, 0, 0); + return imageCanvasContext.getImageData( + 0, 0, image.width, image.height); + } + + var expectedImageData = getImageData(expectedImage); + var actualImageData = getImageData(actualImage); + + var diffCanvas = $('diff-canvas'); + var diffCanvasContext = diffCanvas.getContext('2d'); + var diffImageData = + diffCanvasContext.createImageData(diffCanvas.width, diffCanvas.height); + + // Avoiding property lookups for all these during the per-pixel loop below + // provides a significant performance benefit. + var expectedWidth = expectedImage.width; + var expectedHeight = expectedImage.height; + var expected = expectedImageData.data; + + var actualWidth = actualImage.width; + var actual = actualImageData.data; + + var diffWidth = diffImageData.width; + var diff = diffImageData.data; + + var hadDiff = false; + for (var x = 0; x < expectedWidth; x++) { + for (var y = 0; y < expectedHeight; y++) { + var expectedOffset = (y * expectedWidth + x) * 4; + var actualOffset = (y * actualWidth + x) * 4; + var diffOffset = (y * diffWidth + x) * 4; + if (expected[expectedOffset] != actual[actualOffset] || + expected[expectedOffset + 1] != actual[actualOffset + 1] || + expected[expectedOffset + 2] != actual[actualOffset + 2] || + expected[expectedOffset + 3] != actual[actualOffset + 3]) { + hadDiff = true; + diff[diffOffset] = 255; + diff[diffOffset + 1] = 0; + diff[diffOffset + 2] = 0; + diff[diffOffset + 3] = 255; + } else { + diff[diffOffset] = expected[expectedOffset]; + diff[diffOffset + 1] = expected[expectedOffset + 1]; + diff[diffOffset + 2] = expected[expectedOffset + 2]; + diff[diffOffset + 3] = 32; + } + } + } + + diffCanvasContext.putImageData( + diffImageData, + 0, 0, + 0, 0, + diffImageData.width, diffImageData.height); + diffCanvas.className = ''; + + if (!hadDiff) { + diffCanvas.style.display = 'none'; + $('diff-checksum').style.display = ''; + loadTextResult(currentExpectedImageTest, 'expected-checksum'); + loadTextResult(currentExpectedImageTest, 'actual-checksum'); + } +} + +function loadTextResult(testName, mode, responseIsHtml) +{ + loadText(getTestResultUrl(testName, mode), function(text) { + if (responseIsHtml) { + $(mode).innerHTML = text; + } else { + $(mode).textContent = text; + } + }); +} + +function displayTextResults(testName) +{ + loadTextResult(testName, 'expected-text'); + loadTextResult(testName, 'actual-text'); + loadTextResult(testName, 'diff-text-pretty', true); +} + +function nextTest() +{ + var testSelector = $('test-selector'); + var nextTestIndex = testSelector.selectedIndex + 1; + while (true) { + if (nextTestIndex == testSelector.options.length) { + return; + } + if (testSelector.options[nextTestIndex].disabled) { + nextTestIndex++; + } else { + testSelector.selectedIndex = nextTestIndex; + selectTest(); + return; + } + } +} + +function previousTest() +{ + var testSelector = $('test-selector'); + var previousTestIndex = testSelector.selectedIndex - 1; + while (true) { + if (previousTestIndex == -1) { + return; + } + if (testSelector.options[previousTestIndex].disabled) { + previousTestIndex--; + } else { + testSelector.selectedIndex = previousTestIndex; + selectTest(); + return + } + } +} + +window.addEventListener('DOMContentLoaded', main); diff --git a/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/queue.js b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/queue.js new file mode 100644 index 0000000..338e28f --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/queue.js @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +function RebaselineQueue() +{ + this._selectNode = $('queue-select'); + this._rebaselineButtonNode = $('rebaseline-queue'); + this._toggleNode = $('toggle-queue'); + this._removeSelectionButtonNode = $('remove-queue-selection'); + + this._inProgressRebaselineCount = 0; + + var self = this; + $('add-to-rebaseline-queue').addEventListener( + 'click', function() { self.addCurrentTest(); }); + this._selectNode.addEventListener('change', updateState); + this._removeSelectionButtonNode.addEventListener( + 'click', function() { self._removeSelection(); }); + this._rebaselineButtonNode.addEventListener( + 'click', function() { self.rebaseline(); }); + this._toggleNode.addEventListener( + 'click', function() { toggle('queue'); }); +} + +RebaselineQueue.prototype.updateState = function() +{ + var testName = getSelectedTest(); + + var state = results.tests[testName].state; + $('add-to-rebaseline-queue').disabled = state != STATE_NEEDS_REBASELINE; + + var queueLength = this._selectNode.options.length; + if (this._inProgressRebaselineCount > 0) { + this._rebaselineButtonNode.disabled = true; + this._rebaselineButtonNode.textContent = + 'Rebaseline in progress (' + this._inProgressRebaselineCount + + ' tests left)'; + } else if (queueLength == 0) { + this._rebaselineButtonNode.disabled = true; + this._rebaselineButtonNode.textContent = 'Rebaseline queue'; + this._toggleNode.textContent = 'Queue'; + } else { + this._rebaselineButtonNode.disabled = false; + this._rebaselineButtonNode.textContent = + 'Rebaseline queue (' + queueLength + ' tests)'; + this._toggleNode.textContent = 'Queue (' + queueLength + ' tests)'; + } + this._removeSelectionButtonNode.disabled = + this._selectNode.selectedIndex == -1; +}; + +RebaselineQueue.prototype.addCurrentTest = function() +{ + var testName = getSelectedTest(); + var test = results.tests[testName]; + + if (test.state != STATE_NEEDS_REBASELINE) { + log('Cannot add test with state "' + test.state + '" to queue.', + log.WARNING); + return; + } + + var queueOption = document.createElement('option'); + queueOption.value = testName; + queueOption.textContent = testName; + this._selectNode.appendChild(queueOption); + test.state = STATE_IN_QUEUE; + updateState(); +}; + +RebaselineQueue.prototype.removeCurrentTest = function() +{ + this._removeTest(getSelectedTest()); +}; + +RebaselineQueue.prototype._removeSelection = function() +{ + if (this._selectNode.selectedIndex == -1) + return; + + this._removeTest( + this._selectNode.options[this._selectNode.selectedIndex].value); +}; + +RebaselineQueue.prototype._removeTest = function(testName) +{ + var queueOption = this._selectNode.firstChild; + + while (queueOption && queueOption.value != testName) { + queueOption = queueOption.nextSibling; + } + + if (!queueOption) + return; + + this._selectNode.removeChild(queueOption); + var test = results.tests[testName]; + test.state = STATE_NEEDS_REBASELINE; + updateState(); +}; + +RebaselineQueue.prototype.rebaseline = function() +{ + var testNames = []; + for (var queueOption = this._selectNode.firstChild; + queueOption; + queueOption = queueOption.nextSibling) { + testNames.push(queueOption.value); + } + + this._inProgressRebaselineCount = testNames.length; + updateState(); + + testNames.forEach(this._rebaselineTest, this); +}; + +RebaselineQueue.prototype._rebaselineTest = function(testName) +{ + var baselineTarget = getSelectValue('baseline-target'); + var baselineMoveTo = getSelectValue('baseline-move-to'); + + var xhr = new XMLHttpRequest(); + xhr.open('POST', + '/rebaseline?test=' + encodeURIComponent(testName) + + '&baseline-target=' + encodeURIComponent(baselineTarget) + + '&baseline-move-to=' + encodeURIComponent(baselineMoveTo)); + + var self = this; + function handleResponse(logType, newState) { + log(xhr.responseText, logType); + self._removeTest(testName); + self._inProgressRebaselineCount--; + results.tests[testName].state = newState; + updateState(); + // If we're done with a set of rebaselines, regenerate the test menu + // (which is grouped by state) since test states have changed. + if (self._inProgressRebaselineCount == 0) { + selectDirectory(); + } + } + + function handleSuccess() { + handleResponse(log.SUCCESS, STATE_REBASELINE_SUCCEEDED); + } + function handleFailure() { + handleResponse(log.ERROR, STATE_REBASELINE_FAILED); + } + + xhr.addEventListener('load', function() { + if (xhr.status < 400) { + handleSuccess(); + } else { + handleFailure(); + } + }); + xhr.addEventListener('error', handleFailure); + + xhr.send(); +}; diff --git a/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/util.js b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/util.js new file mode 100644 index 0000000..5ad7612 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/data/rebaselineserver/util.js @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +var results; +var testsByFailureType = {}; +var testsByDirectory = {}; +var selectedTests = []; + +function $(id) +{ + return document.getElementById(id); +} + +function getSelectValue(id) +{ + var select = $(id); + if (select.selectedIndex == -1) { + return null; + } else { + return select.options[select.selectedIndex].value; + } +} + +function loadText(url, callback) +{ + var xhr = new XMLHttpRequest(); + xhr.open('GET', url); + xhr.addEventListener('load', function() { callback(xhr.responseText); }); + xhr.send(); +} + +function log(text, type) +{ + var node = $('log'); + + if (type) { + var typeNode = document.createElement('span'); + typeNode.textContent = type.text; + typeNode.style.color = type.color; + node.appendChild(typeNode); + } + + node.appendChild(document.createTextNode(text + '\n')); + node.scrollTop = node.scrollHeight; +} + +log.WARNING = {text: 'Warning: ', color: '#aa3'}; +log.SUCCESS = {text: 'Success: ', color: 'green'}; +log.ERROR = {text: 'Error: ', color: 'red'}; + +function toggle(id) +{ + var element = $(id); + var toggler = $('toggle-' + id); + if (element.style.display == 'none') { + element.style.display = ''; + toggler.className = 'link selected'; + } else { + element.style.display = 'none'; + toggler.className = 'link'; + } +} + +function getTracUrl(layoutTestPath) +{ + return 'http://trac.webkit.org/browser/trunk/LayoutTests/' + layoutTestPath; +} + +function getSortedKeys(obj) +{ + var keys = []; + for (var key in obj) { + keys.push(key); + } + keys.sort(); + return keys; +}
\ No newline at end of file diff --git a/Tools/Scripts/webkitpy/tool/commands/download.py b/Tools/Scripts/webkitpy/tool/commands/download.py new file mode 100644 index 0000000..020f339 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/download.py @@ -0,0 +1,405 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +import webkitpy.tool.steps as steps + +from webkitpy.common.checkout.changelog import ChangeLog +from webkitpy.common.config import urls +from webkitpy.common.system.executive import ScriptError +from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand +from webkitpy.tool.commands.stepsequence import StepSequence +from webkitpy.tool.comments import bug_comment_from_commit_text +from webkitpy.tool.grammar import pluralize +from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand +from webkitpy.common.system.deprecated_logging import error, log + + +class Clean(AbstractSequencedCommand): + name = "clean" + help_text = "Clean the working copy" + steps = [ + steps.CleanWorkingDirectory, + ] + + def _prepare_state(self, options, args, tool): + options.force_clean = True + + +class Update(AbstractSequencedCommand): + name = "update" + help_text = "Update working copy (used internally)" + steps = [ + steps.CleanWorkingDirectory, + steps.Update, + ] + + +class Build(AbstractSequencedCommand): + name = "build" + help_text = "Update working copy and build" + steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.Build, + ] + + def _prepare_state(self, options, args, tool): + options.build = True + + +class BuildAndTest(AbstractSequencedCommand): + name = "build-and-test" + help_text = "Update working copy, build, and run the tests" + steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.Build, + steps.RunTests, + ] + + +class Land(AbstractSequencedCommand): + name = "land" + help_text = "Land the current working directory diff and updates the associated bug if any" + argument_names = "[BUGID]" + show_in_main_help = True + steps = [ + steps.EnsureBuildersAreGreen, + steps.UpdateChangeLogsWithReviewer, + steps.ValidateReviewer, + steps.Build, + steps.RunTests, + steps.Commit, + steps.CloseBugForLandDiff, + ] + long_help = """land commits the current working copy diff (just as svn or git commit would). +land will NOT build and run the tests before committing, but you can use the --build option for that. +If a bug id is provided, or one can be found in the ChangeLog land will update the bug after committing.""" + + def _prepare_state(self, options, args, tool): + changed_files = self._tool.scm().changed_files(options.git_commit) + return { + "changed_files": changed_files, + "bug_id": (args and args[0]) or tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files), + } + + +class LandCowboy(AbstractSequencedCommand): + name = "land-cowboy" + help_text = "Prepares a ChangeLog and lands the current working directory diff." + steps = [ + steps.PrepareChangeLog, + steps.EditChangeLog, + steps.ConfirmDiff, + steps.Build, + steps.RunTests, + steps.Commit, + ] + + +class AbstractPatchProcessingCommand(AbstractDeclarativeCommand): + # Subclasses must implement the methods below. We don't declare them here + # because we want to be able to implement them with mix-ins. + # + # def _fetch_list_of_patches_to_process(self, options, args, tool): + # def _prepare_to_process(self, options, args, tool): + + @staticmethod + def _collect_patches_by_bug(patches): + bugs_to_patches = {} + for patch in patches: + bugs_to_patches[patch.bug_id()] = bugs_to_patches.get(patch.bug_id(), []) + [patch] + return bugs_to_patches + + def execute(self, options, args, tool): + self._prepare_to_process(options, args, tool) + patches = self._fetch_list_of_patches_to_process(options, args, tool) + + # It's nice to print out total statistics. + bugs_to_patches = self._collect_patches_by_bug(patches) + log("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches)))) + + for patch in patches: + self._process_patch(patch, options, args, tool) + + +class AbstractPatchSequencingCommand(AbstractPatchProcessingCommand): + prepare_steps = None + main_steps = None + + def __init__(self): + options = [] + self._prepare_sequence = StepSequence(self.prepare_steps) + self._main_sequence = StepSequence(self.main_steps) + options = sorted(set(self._prepare_sequence.options() + self._main_sequence.options())) + AbstractPatchProcessingCommand.__init__(self, options) + + def _prepare_to_process(self, options, args, tool): + self._prepare_sequence.run_and_handle_errors(tool, options) + + def _process_patch(self, patch, options, args, tool): + state = { "patch" : patch } + self._main_sequence.run_and_handle_errors(tool, options, state) + + +class ProcessAttachmentsMixin(object): + def _fetch_list_of_patches_to_process(self, options, args, tool): + return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args) + + +class ProcessBugsMixin(object): + def _fetch_list_of_patches_to_process(self, options, args, tool): + all_patches = [] + for bug_id in args: + patches = tool.bugs.fetch_bug(bug_id).reviewed_patches() + log("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id)) + all_patches += patches + return all_patches + + +class CheckStyle(AbstractPatchSequencingCommand, ProcessAttachmentsMixin): + name = "check-style" + help_text = "Run check-webkit-style on the specified attachments" + argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]" + main_steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.ApplyPatch, + steps.CheckStyle, + ] + + +class BuildAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin): + name = "build-attachment" + help_text = "Apply and build patches from bugzilla" + argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]" + main_steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.ApplyPatch, + steps.Build, + ] + + +class BuildAndTestAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin): + name = "build-and-test-attachment" + help_text = "Apply, build, and test patches from bugzilla" + argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]" + main_steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.ApplyPatch, + steps.Build, + steps.RunTests, + ] + + +class AbstractPatchApplyingCommand(AbstractPatchSequencingCommand): + prepare_steps = [ + steps.EnsureLocalCommitIfNeeded, + steps.CleanWorkingDirectoryWithLocalCommits, + steps.Update, + ] + main_steps = [ + steps.ApplyPatchWithLocalCommit, + ] + long_help = """Updates the working copy. +Downloads and applies the patches, creating local commits if necessary.""" + + +class ApplyAttachment(AbstractPatchApplyingCommand, ProcessAttachmentsMixin): + name = "apply-attachment" + help_text = "Apply an attachment to the local working directory" + argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]" + show_in_main_help = True + + +class ApplyFromBug(AbstractPatchApplyingCommand, ProcessBugsMixin): + name = "apply-from-bug" + help_text = "Apply reviewed patches from provided bugs to the local working directory" + argument_names = "BUGID [BUGIDS]" + show_in_main_help = True + + +class AbstractPatchLandingCommand(AbstractPatchSequencingCommand): + prepare_steps = [ + steps.EnsureBuildersAreGreen, + ] + main_steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.ApplyPatch, + steps.ValidateReviewer, + steps.Build, + steps.RunTests, + steps.Commit, + steps.ClosePatch, + steps.CloseBug, + ] + long_help = """Checks to make sure builders are green. +Updates the working copy. +Applies the patch. +Builds. +Runs the layout tests. +Commits the patch. +Clears the flags on the patch. +Closes the bug if no patches are marked for review.""" + + +class LandAttachment(AbstractPatchLandingCommand, ProcessAttachmentsMixin): + name = "land-attachment" + help_text = "Land patches from bugzilla, optionally building and testing them first" + argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]" + show_in_main_help = True + + +class LandFromBug(AbstractPatchLandingCommand, ProcessBugsMixin): + name = "land-from-bug" + help_text = "Land all patches on the given bugs, optionally building and testing them first" + argument_names = "BUGID [BUGIDS]" + show_in_main_help = True + + +class AbstractRolloutPrepCommand(AbstractSequencedCommand): + argument_names = "REVISION [REVISIONS] REASON" + + def _commit_info(self, revision): + commit_info = self._tool.checkout().commit_info_for_revision(revision) + if commit_info and commit_info.bug_id(): + # Note: Don't print a bug URL here because it will confuse the + # SheriffBot because the SheriffBot just greps the output + # of create-rollout for bug URLs. It should do better + # parsing instead. + log("Preparing rollout for bug %s." % commit_info.bug_id()) + else: + log("Unable to parse bug number from diff.") + return commit_info + + def _prepare_state(self, options, args, tool): + revision_list = [] + for revision in str(args[0]).split(): + if revision.isdigit(): + revision_list.append(int(revision)) + else: + raise ScriptError(message="Invalid svn revision number: " + revision) + revision_list.sort() + + # We use the earliest revision for the bug info + earliest_revision = revision_list[0] + commit_info = self._commit_info(earliest_revision) + cc_list = sorted([party.bugzilla_email() + for party in commit_info.responsible_parties() + if party.bugzilla_email()]) + return { + "revision": earliest_revision, + "revision_list": revision_list, + "bug_id": commit_info.bug_id(), + # FIXME: We should used the list as the canonical representation. + "bug_cc": ",".join(cc_list), + "reason": args[1], + } + + +class PrepareRollout(AbstractRolloutPrepCommand): + name = "prepare-rollout" + help_text = "Revert the given revision(s) in the working copy and prepare ChangeLogs with revert reason" + long_help = """Updates the working copy. +Applies the inverse diff for the provided revision(s). +Creates an appropriate rollout ChangeLog, including a trac link and bug link. +""" + steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.RevertRevision, + steps.PrepareChangeLogForRevert, + ] + + +class CreateRollout(AbstractRolloutPrepCommand): + name = "create-rollout" + help_text = "Creates a bug to track the broken SVN revision(s) and uploads a rollout patch." + steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.RevertRevision, + steps.CreateBug, + steps.PrepareChangeLogForRevert, + steps.PostDiffForRevert, + ] + + def _prepare_state(self, options, args, tool): + state = AbstractRolloutPrepCommand._prepare_state(self, options, args, tool) + # Currently, state["bug_id"] points to the bug that caused the + # regression. We want to create a new bug that blocks the old bug + # so we move state["bug_id"] to state["bug_blocked"] and delete the + # old state["bug_id"] so that steps.CreateBug will actually create + # the new bug that we want (and subsequently store its bug id into + # state["bug_id"]) + state["bug_blocked"] = state["bug_id"] + del state["bug_id"] + state["bug_title"] = "REGRESSION(r%s): %s" % (state["revision"], state["reason"]) + state["bug_description"] = "%s broke the build:\n%s" % (urls.view_revision_url(state["revision"]), state["reason"]) + # FIXME: If we had more context here, we could link to other open bugs + # that mention the test that regressed. + if options.parent_command == "sheriff-bot": + state["bug_description"] += """ + +This is an automatic bug report generated by the sheriff-bot. If this bug +report was created because of a flaky test, please file a bug for the flaky +test (if we don't already have one on file) and dup this bug against that bug +so that we can track how often these flaky tests case pain. + +"Only you can prevent forest fires." -- Smokey the Bear +""" + return state + + +class Rollout(AbstractRolloutPrepCommand): + name = "rollout" + show_in_main_help = True + help_text = "Revert the given revision(s) in the working copy and optionally commit the revert and re-open the original bug" + long_help = """Updates the working copy. +Applies the inverse diff for the provided revision. +Creates an appropriate rollout ChangeLog, including a trac link and bug link. +Opens the generated ChangeLogs in $EDITOR. +Shows the prepared diff for confirmation. +Commits the revert and updates the bug (including re-opening the bug if necessary).""" + steps = [ + steps.CleanWorkingDirectory, + steps.Update, + steps.RevertRevision, + steps.PrepareChangeLogForRevert, + steps.EditChangeLog, + steps.ConfirmDiff, + steps.Build, + steps.Commit, + steps.ReopenBugAfterRollout, + ] diff --git a/Tools/Scripts/webkitpy/tool/commands/download_unittest.py b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py new file mode 100644 index 0000000..3748a8f --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py @@ -0,0 +1,206 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.commands.commandtest import CommandsTest +from webkitpy.tool.commands.download import * +from webkitpy.tool.mocktool import MockCheckout, MockOptions, MockTool + + +class AbstractRolloutPrepCommandTest(unittest.TestCase): + def test_commit_info(self): + command = AbstractRolloutPrepCommand() + tool = MockTool() + command.bind_to_tool(tool) + output = OutputCapture() + + expected_stderr = "Preparing rollout for bug 42.\n" + commit_info = output.assert_outputs(self, command._commit_info, [1234], expected_stderr=expected_stderr) + self.assertTrue(commit_info) + + mock_commit_info = Mock() + mock_commit_info.bug_id = lambda: None + tool._checkout.commit_info_for_revision = lambda revision: mock_commit_info + expected_stderr = "Unable to parse bug number from diff.\n" + commit_info = output.assert_outputs(self, command._commit_info, [1234], expected_stderr=expected_stderr) + self.assertEqual(commit_info, mock_commit_info) + + def test_prepare_state(self): + command = AbstractRolloutPrepCommand() + mock_commit_info = MockCheckout().commit_info_for_revision(123) + command._commit_info = lambda revision: mock_commit_info + + state = command._prepare_state(None, ["124 123 125", "Reason"], None) + self.assertEqual(123, state["revision"]) + self.assertEqual([123, 124, 125], state["revision_list"]) + + self.assertRaises(ScriptError, command._prepare_state, options=None, args=["125 r122 123", "Reason"], tool=None) + self.assertRaises(ScriptError, command._prepare_state, options=None, args=["125 foo 123", "Reason"], tool=None) + + +class DownloadCommandsTest(CommandsTest): + def _default_options(self): + options = MockOptions() + options.build = True + options.build_style = True + options.check_builders = True + options.check_style = True + options.clean = True + options.close_bug = True + options.force_clean = False + options.force_patch = True + options.non_interactive = False + options.parent_command = 'MOCK parent command' + options.quiet = False + options.test = True + options.update = True + return options + + def test_build(self): + expected_stderr = "Updating working directory\nBuilding WebKit\n" + self.assert_execute_outputs(Build(), [], options=self._default_options(), expected_stderr=expected_stderr) + + def test_build_and_test(self): + expected_stderr = "Updating working directory\nBuilding WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\n" + self.assert_execute_outputs(BuildAndTest(), [], options=self._default_options(), expected_stderr=expected_stderr) + + def test_apply_attachment(self): + options = self._default_options() + options.update = True + options.local_commit = True + expected_stderr = "Updating working directory\nProcessing 1 patch from 1 bug.\nProcessing patch 197 from bug 42.\n" + self.assert_execute_outputs(ApplyAttachment(), [197], options=options, expected_stderr=expected_stderr) + + def test_apply_patches(self): + options = self._default_options() + options.update = True + options.local_commit = True + expected_stderr = "Updating working directory\n2 reviewed patches found on bug 42.\nProcessing 2 patches from 1 bug.\nProcessing patch 197 from bug 42.\nProcessing patch 128 from bug 42.\n" + self.assert_execute_outputs(ApplyFromBug(), [42], options=options, expected_stderr=expected_stderr) + + def test_land_diff(self): + expected_stderr = "Building WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\nCommitted r49824: <http://trac.webkit.org/changeset/49824>\nUpdating bug 42\n" + mock_tool = MockTool() + mock_tool.scm().create_patch = Mock() + mock_tool.checkout().modified_changelogs = Mock(return_value=[]) + self.assert_execute_outputs(Land(), [42], options=self._default_options(), expected_stderr=expected_stderr, tool=mock_tool) + # Make sure we're not calling expensive calls too often. + self.assertEqual(mock_tool.scm().create_patch.call_count, 0) + self.assertEqual(mock_tool.checkout().modified_changelogs.call_count, 1) + + def test_land_red_builders(self): + expected_stderr = '\nWARNING: Builders ["Builder2"] are red, please watch your commit carefully.\nSee http://dummy_buildbot_host/console?category=core\n\nBuilding WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\nCommitted r49824: <http://trac.webkit.org/changeset/49824>\nUpdating bug 42\n' + mock_tool = MockTool() + mock_tool.buildbot.light_tree_on_fire() + self.assert_execute_outputs(Land(), [42], options=self._default_options(), expected_stderr=expected_stderr, tool=mock_tool) + + def test_check_style(self): + expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 197 from bug 42.\nRunning check-webkit-style\n" + self.assert_execute_outputs(CheckStyle(), [197], options=self._default_options(), expected_stderr=expected_stderr) + + def test_build_attachment(self): + expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 197 from bug 42.\nBuilding WebKit\n" + self.assert_execute_outputs(BuildAttachment(), [197], options=self._default_options(), expected_stderr=expected_stderr) + + def test_land_attachment(self): + # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags. + expected_stderr = """Processing 1 patch from 1 bug. +Updating working directory +Processing patch 197 from bug 42. +Building WebKit +Running Python unit tests +Running Perl unit tests +Running JavaScriptCore tests +Running run-webkit-tests +Committed r49824: <http://trac.webkit.org/changeset/49824> +Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug. +""" + self.assert_execute_outputs(LandAttachment(), [197], options=self._default_options(), expected_stderr=expected_stderr) + + def test_land_patches(self): + # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags. + expected_stderr = """2 reviewed patches found on bug 42. +Processing 2 patches from 1 bug. +Updating working directory +Processing patch 197 from bug 42. +Building WebKit +Running Python unit tests +Running Perl unit tests +Running JavaScriptCore tests +Running run-webkit-tests +Committed r49824: <http://trac.webkit.org/changeset/49824> +Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug. +Updating working directory +Processing patch 128 from bug 42. +Building WebKit +Running Python unit tests +Running Perl unit tests +Running JavaScriptCore tests +Running run-webkit-tests +Committed r49824: <http://trac.webkit.org/changeset/49824> +Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug. +""" + self.assert_execute_outputs(LandFromBug(), [42], options=self._default_options(), expected_stderr=expected_stderr) + + def test_prepare_rollout(self): + expected_stderr = "Preparing rollout for bug 42.\nUpdating working directory\nRunning prepare-ChangeLog\n" + self.assert_execute_outputs(PrepareRollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr) + + def test_create_rollout(self): + expected_stderr = """Preparing rollout for bug 42. +Updating working directory +MOCK create_bug +bug_title: REGRESSION(r852): Reason +bug_description: http://trac.webkit.org/changeset/852 broke the build: +Reason +component: MOCK component +cc: MOCK cc +blocked: 42 +Running prepare-ChangeLog +MOCK add_patch_to_bug: bug_id=78, description=ROLLOUT of r852, mark_for_review=False, mark_for_commit_queue=True, mark_for_landing=False +-- Begin comment -- +Any committer can land this patch automatically by marking it commit-queue+. The commit-queue will build and test the patch before landing to ensure that the rollout will be successful. This process takes approximately 15 minutes. + +If you would like to land the rollout faster, you can use the following command: + + webkit-patch land-attachment ATTACHMENT_ID --ignore-builders + +where ATTACHMENT_ID is the ID of this attachment. +-- End comment -- +""" + self.assert_execute_outputs(CreateRollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr) + self.assert_execute_outputs(CreateRollout(), ["855 852 854", "Reason"], options=self._default_options(), expected_stderr=expected_stderr) + + def test_rollout(self): + expected_stderr = "Preparing rollout for bug 42.\nUpdating working directory\nRunning prepare-ChangeLog\nMOCK: user.open_url: file://...\nBuilding WebKit\nCommitted r49824: <http://trac.webkit.org/changeset/49824>\n" + expected_stdout = "Was that diff correct?\n" + self.assert_execute_outputs(Rollout(), [852, "Reason"], options=self._default_options(), expected_stdout=expected_stdout, expected_stderr=expected_stderr) + diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py new file mode 100644 index 0000000..3b53d1a --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py @@ -0,0 +1,182 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.commands.queues import AbstractReviewQueue +from webkitpy.common.config.committers import CommitterList +from webkitpy.common.config.ports import WebKitPort +from webkitpy.common.system.executive import ScriptError +from webkitpy.tool.bot.queueengine import QueueEngine + + +class AbstractEarlyWarningSystem(AbstractReviewQueue): + _build_style = "release" + + def __init__(self): + AbstractReviewQueue.__init__(self) + self.port = WebKitPort.port(self.port_name) + + def should_proceed_with_work_item(self, patch): + return True + + def _can_build(self): + try: + self.run_webkit_patch([ + "build", + self.port.flag(), + "--build-style=%s" % self._build_style, + "--force-clean", + "--no-update"]) + return True + except ScriptError, e: + failure_log = self._log_from_script_error_for_upload(e) + self._update_status("Unable to perform a build", results_file=failure_log) + return False + + def _build(self, patch, first_run=False): + try: + args = [ + "build-attachment", + self.port.flag(), + "--build", + "--build-style=%s" % self._build_style, + "--force-clean", + "--quiet", + "--non-interactive", + patch.id()] + if not first_run: + # See commit-queue for an explanation of what we're doing here. + args.append("--no-update") + args.append("--parent-command=%s" % self.name) + self.run_webkit_patch(args) + return True + except ScriptError, e: + if first_run: + return False + raise + + def review_patch(self, patch): + if patch.is_obsolete(): + self._did_error(patch, "%s does not process obsolete patches." % self.name) + return False + + if patch.bug().is_closed(): + self._did_error(patch, "%s does not process patches on closed bugs." % self.name) + return False + + if not self._build(patch, first_run=True): + if not self._can_build(): + return False + self._build(patch) + return True + + @classmethod + def handle_script_error(cls, tool, state, script_error): + is_svn_apply = script_error.command_name() == "svn-apply" + status_id = cls._update_status_for_script_error(tool, state, script_error, is_error=is_svn_apply) + if is_svn_apply: + QueueEngine.exit_after_handled_error(script_error) + results_link = tool.status_server.results_url_for_status(status_id) + message = "Attachment %s did not build on %s:\nBuild output: %s" % (state["patch"].id(), cls.port_name, results_link) + tool.bugs.post_comment_to_bug(state["patch"].bug_id(), message, cc=cls.watchers) + exit(1) + + +class GtkEWS(AbstractEarlyWarningSystem): + name = "gtk-ews" + port_name = "gtk" + watchers = AbstractEarlyWarningSystem.watchers + [ + "gns@gnome.org", + "xan.lopez@gmail.com", + ] + + +class EflEWS(AbstractEarlyWarningSystem): + name = "efl-ews" + port_name = "efl" + watchers = AbstractEarlyWarningSystem.watchers + [ + "leandro@profusion.mobi", + "antognolli@profusion.mobi", + "lucas.demarchi@profusion.mobi", + ] + + +class QtEWS(AbstractEarlyWarningSystem): + name = "qt-ews" + port_name = "qt" + + +class WinEWS(AbstractEarlyWarningSystem): + name = "win-ews" + port_name = "win" + # Use debug, the Apple Win port fails to link Release on 32-bit Windows. + # https://bugs.webkit.org/show_bug.cgi?id=39197 + _build_style = "debug" + + +class AbstractChromiumEWS(AbstractEarlyWarningSystem): + port_name = "chromium" + watchers = AbstractEarlyWarningSystem.watchers + [ + "dglazkov@chromium.org", + ] + + +class ChromiumLinuxEWS(AbstractChromiumEWS): + # FIXME: We should rename this command to cr-linux-ews, but that requires + # a database migration. :( + name = "chromium-ews" + + +class ChromiumWindowsEWS(AbstractChromiumEWS): + name = "cr-win-ews" + + +# For platforms that we can't run inside a VM (like Mac OS X), we require +# patches to be uploaded by committers, who are generally trustworthy folk. :) +class AbstractCommitterOnlyEWS(AbstractEarlyWarningSystem): + def __init__(self, committers=CommitterList()): + AbstractEarlyWarningSystem.__init__(self) + self._committers = committers + + def process_work_item(self, patch): + if not self._committers.committer_by_email(patch.attacher_email()): + self._did_error(patch, "%s cannot process patches from non-committers :(" % self.name) + return False + return AbstractEarlyWarningSystem.process_work_item(self, patch) + + +# FIXME: Inheriting from AbstractCommitterOnlyEWS is kinda a hack, but it +# happens to work because AbstractChromiumEWS and AbstractCommitterOnlyEWS +# provide disjoint sets of functionality, and Python is otherwise smart +# enough to handle the diamond inheritance. +class ChromiumMacEWS(AbstractChromiumEWS, AbstractCommitterOnlyEWS): + name = "cr-mac-ews" + + +class MacEWS(AbstractCommitterOnlyEWS): + name = "mac-ews" + port_name = "mac" diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py new file mode 100644 index 0000000..830e11c --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py @@ -0,0 +1,132 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.thirdparty.mock import Mock +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.bot.queueengine import QueueEngine +from webkitpy.tool.commands.earlywarningsystem import * +from webkitpy.tool.commands.queuestest import QueuesTest +from webkitpy.tool.mocktool import MockTool, MockOptions + + +class AbstractEarlyWarningSystemTest(QueuesTest): + def test_can_build(self): + # Needed to define port_name, used in AbstractEarlyWarningSystem.__init__ + class TestEWS(AbstractEarlyWarningSystem): + port_name = "win" # Needs to be a port which port/factory understands. + + queue = TestEWS() + queue.bind_to_tool(MockTool(log_executive=True)) + queue._options = MockOptions(port=None) + expected_stderr = "MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build', '--port=win', '--build-style=release', '--force-clean', '--no-update']\n" + OutputCapture().assert_outputs(self, queue._can_build, [], expected_stderr=expected_stderr) + + def mock_run_webkit_patch(args): + raise ScriptError("MOCK script error") + + queue.run_webkit_patch = mock_run_webkit_patch + expected_stderr = "MOCK: update_status: None Unable to perform a build\n" + OutputCapture().assert_outputs(self, queue._can_build, [], expected_stderr=expected_stderr) + + # FIXME: This belongs on an AbstractReviewQueueTest object in queues_unittest.py + def test_subprocess_handled_error(self): + queue = AbstractReviewQueue() + queue.bind_to_tool(MockTool()) + + def mock_review_patch(patch): + raise ScriptError('MOCK script error', exit_code=QueueEngine.handled_error_code) + + queue.review_patch = mock_review_patch + mock_patch = queue._tool.bugs.fetch_attachment(197) + expected_stderr = "MOCK: release_work_item: None 197\n" + OutputCapture().assert_outputs(self, queue.process_work_item, [mock_patch], expected_stderr=expected_stderr, expected_exception=ScriptError) + + +class EarlyWarningSytemTest(QueuesTest): + def test_failed_builds(self): + ews = ChromiumLinuxEWS() + ews.bind_to_tool(MockTool()) + ews._build = lambda patch, first_run=False: False + ews._can_build = lambda: True + mock_patch = ews._tool.bugs.fetch_attachment(197) + ews.review_patch(mock_patch) + + def _default_expected_stderr(self, ews): + string_replacemnts = { + "name": ews.name, + "port": ews.port_name, + "watchers": ews.watchers, + } + expected_stderr = { + "begin_work_queue": self._default_begin_work_queue_stderr(ews.name, ews._tool.scm().checkout_root), + "handle_unexpected_error": "Mock error message\n", + "next_work_item": "", + "process_work_item": "MOCK: update_status: %(name)s Pass\nMOCK: release_work_item: %(name)s 197\n" % string_replacemnts, + "handle_script_error": "MOCK: update_status: %(name)s ScriptError error message\nMOCK bug comment: bug_id=42, cc=%(watchers)s\n--- Begin comment ---\nAttachment 197 did not build on %(port)s:\nBuild output: http://dummy_url\n--- End comment ---\n\n" % string_replacemnts, + } + return expected_stderr + + def _test_ews(self, ews): + ews.bind_to_tool(MockTool()) + expected_exceptions = { + "handle_script_error": SystemExit, + } + self.assert_queue_outputs(ews, expected_stderr=self._default_expected_stderr(ews), expected_exceptions=expected_exceptions) + + def _test_committer_only_ews(self, ews): + ews.bind_to_tool(MockTool()) + expected_stderr = self._default_expected_stderr(ews) + string_replacemnts = {"name": ews.name} + expected_stderr["process_work_item"] = "MOCK: update_status: %(name)s Error: %(name)s cannot process patches from non-committers :(\nMOCK: release_work_item: %(name)s 197\n" % string_replacemnts + expected_exceptions = {"handle_script_error": SystemExit} + self.assert_queue_outputs(ews, expected_stderr=expected_stderr, expected_exceptions=expected_exceptions) + + # FIXME: If all EWSes are going to output the same text, we + # could test them all in one method with a for loop over an array. + def test_chromium_linux_ews(self): + self._test_ews(ChromiumLinuxEWS()) + + def test_chromium_windows_ews(self): + self._test_ews(ChromiumWindowsEWS()) + + def test_qt_ews(self): + self._test_ews(QtEWS()) + + def test_gtk_ews(self): + self._test_ews(GtkEWS()) + + def test_efl_ews(self): + self._test_ews(EflEWS()) + + def test_mac_ews(self): + self._test_committer_only_ews(MacEWS()) + + def test_chromium_mac_ews(self): + self._test_committer_only_ews(ChromiumMacEWS()) diff --git a/Tools/Scripts/webkitpy/tool/commands/openbugs.py b/Tools/Scripts/webkitpy/tool/commands/openbugs.py new file mode 100644 index 0000000..1b51c9f --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/openbugs.py @@ -0,0 +1,63 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import re +import sys + +from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand +from webkitpy.common.system.deprecated_logging import log + + +class OpenBugs(AbstractDeclarativeCommand): + name = "open-bugs" + help_text = "Finds all bug numbers passed in arguments (or stdin if no args provided) and opens them in a web browser" + + bug_number_regexp = re.compile(r"\b\d{4,6}\b") + + def _open_bugs(self, bug_ids): + for bug_id in bug_ids: + bug_url = self._tool.bugs.bug_url_for_bug_id(bug_id) + self._tool.user.open_url(bug_url) + + # _find_bugs_in_string mostly exists for easy unit testing. + def _find_bugs_in_string(self, string): + return self.bug_number_regexp.findall(string) + + def _find_bugs_in_iterable(self, iterable): + return sum([self._find_bugs_in_string(string) for string in iterable], []) + + def execute(self, options, args, tool): + if args: + bug_ids = self._find_bugs_in_iterable(args) + else: + # This won't open bugs until stdin is closed but could be made to easily. That would just make unit testing slightly harder. + bug_ids = self._find_bugs_in_iterable(sys.stdin) + + log("%s bugs found in input." % len(bug_ids)) + + self._open_bugs(bug_ids) diff --git a/Tools/Scripts/webkitpy/tool/commands/openbugs_unittest.py b/Tools/Scripts/webkitpy/tool/commands/openbugs_unittest.py new file mode 100644 index 0000000..40a6e1b --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/openbugs_unittest.py @@ -0,0 +1,50 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.commands.commandtest import CommandsTest +from webkitpy.tool.commands.openbugs import OpenBugs + +class OpenBugsTest(CommandsTest): + + find_bugs_in_string_expectations = [ + ["123", []], + ["1234", ["1234"]], + ["12345", ["12345"]], + ["123456", ["123456"]], + ["1234567", []], + [" 123456 234567", ["123456", "234567"]], + ] + + def test_find_bugs_in_string(self): + openbugs = OpenBugs() + for expectation in self.find_bugs_in_string_expectations: + self.assertEquals(openbugs._find_bugs_in_string(expectation[0]), expectation[1]) + + def test_args_parsing(self): + expected_stderr = "2 bugs found in input.\nMOCK: user.open_url: http://example.com/12345\nMOCK: user.open_url: http://example.com/23456\n" + self.assert_execute_outputs(OpenBugs(), ["12345\n23456"], expected_stderr=expected_stderr) diff --git a/Tools/Scripts/webkitpy/tool/commands/prettydiff.py b/Tools/Scripts/webkitpy/tool/commands/prettydiff.py new file mode 100644 index 0000000..e3fc00c --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/prettydiff.py @@ -0,0 +1,38 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand +import webkitpy.tool.steps as steps + + +class PrettyDiff(AbstractSequencedCommand): + name = "pretty-diff" + help_text = "Shows the pretty diff in the default browser" + steps = [ + steps.ConfirmDiff, + ] diff --git a/Tools/Scripts/webkitpy/tool/commands/queries.py b/Tools/Scripts/webkitpy/tool/commands/queries.py new file mode 100644 index 0000000..f04f384 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/queries.py @@ -0,0 +1,389 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +from optparse import make_option + +import webkitpy.tool.steps as steps + +from webkitpy.common.checkout.commitinfo import CommitInfo +from webkitpy.common.config.committers import CommitterList +from webkitpy.common.net.buildbot import BuildBot +from webkitpy.common.net.regressionwindow import RegressionWindow +from webkitpy.common.system.user import User +from webkitpy.tool.grammar import pluralize +from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand +from webkitpy.common.system.deprecated_logging import log +from webkitpy.layout_tests import port + + +class SuggestReviewers(AbstractDeclarativeCommand): + name = "suggest-reviewers" + help_text = "Suggest reviewers for a patch based on recent changes to the modified files." + + def __init__(self): + options = [ + steps.Options.git_commit, + ] + AbstractDeclarativeCommand.__init__(self, options=options) + + def execute(self, options, args, tool): + reviewers = tool.checkout().suggested_reviewers(options.git_commit) + print "\n".join([reviewer.full_name for reviewer in reviewers]) + + +class BugsToCommit(AbstractDeclarativeCommand): + name = "bugs-to-commit" + help_text = "List bugs in the commit-queue" + + def execute(self, options, args, tool): + # FIXME: This command is poorly named. It's fetching the commit-queue list here. The name implies it's fetching pending-commit (all r+'d patches). + bug_ids = tool.bugs.queries.fetch_bug_ids_from_commit_queue() + for bug_id in bug_ids: + print "%s" % bug_id + + +class PatchesInCommitQueue(AbstractDeclarativeCommand): + name = "patches-in-commit-queue" + help_text = "List patches in the commit-queue" + + def execute(self, options, args, tool): + patches = tool.bugs.queries.fetch_patches_from_commit_queue() + log("Patches in commit queue:") + for patch in patches: + print patch.url() + + +class PatchesToCommitQueue(AbstractDeclarativeCommand): + name = "patches-to-commit-queue" + help_text = "Patches which should be added to the commit queue" + def __init__(self): + options = [ + make_option("--bugs", action="store_true", dest="bugs", help="Output bug links instead of patch links"), + ] + AbstractDeclarativeCommand.__init__(self, options=options) + + @staticmethod + def _needs_commit_queue(patch): + if patch.commit_queue() == "+": # If it's already cq+, ignore the patch. + log("%s already has cq=%s" % (patch.id(), patch.commit_queue())) + return False + + # We only need to worry about patches from contributers who are not yet committers. + committer_record = CommitterList().committer_by_email(patch.attacher_email()) + if committer_record: + log("%s committer = %s" % (patch.id(), committer_record)) + return not committer_record + + def execute(self, options, args, tool): + patches = tool.bugs.queries.fetch_patches_from_pending_commit_list() + patches_needing_cq = filter(self._needs_commit_queue, patches) + if options.bugs: + bugs_needing_cq = map(lambda patch: patch.bug_id(), patches_needing_cq) + bugs_needing_cq = sorted(set(bugs_needing_cq)) + for bug_id in bugs_needing_cq: + print "%s" % tool.bugs.bug_url_for_bug_id(bug_id) + else: + for patch in patches_needing_cq: + print "%s" % tool.bugs.attachment_url_for_id(patch.id(), action="edit") + + +class PatchesToReview(AbstractDeclarativeCommand): + name = "patches-to-review" + help_text = "List patches that are pending review" + + def execute(self, options, args, tool): + patch_ids = tool.bugs.queries.fetch_attachment_ids_from_review_queue() + log("Patches pending review:") + for patch_id in patch_ids: + print patch_id + + +class LastGreenRevision(AbstractDeclarativeCommand): + name = "last-green-revision" + help_text = "Prints the last known good revision" + + def execute(self, options, args, tool): + print self._tool.buildbot.last_green_revision() + + +class WhatBroke(AbstractDeclarativeCommand): + name = "what-broke" + help_text = "Print failing buildbots (%s) and what revisions broke them" % BuildBot.default_host + + def _print_builder_line(self, builder_name, max_name_width, status_message): + print "%s : %s" % (builder_name.ljust(max_name_width), status_message) + + def _print_blame_information_for_builder(self, builder_status, name_width, avoid_flakey_tests=True): + builder = self._tool.buildbot.builder_with_name(builder_status["name"]) + red_build = builder.build(builder_status["build_number"]) + regression_window = builder.find_regression_window(red_build) + if not regression_window.failing_build(): + self._print_builder_line(builder.name(), name_width, "FAIL (error loading build information)") + return + if not regression_window.build_before_failure(): + self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: sometime before %s?)" % regression_window.failing_build().revision()) + return + + revisions = regression_window.revisions() + first_failure_message = "" + if (regression_window.failing_build() == builder.build(builder_status["build_number"])): + first_failure_message = " FIRST FAILURE, possibly a flaky test" + self._print_builder_line(builder.name(), name_width, "FAIL (blame-list: %s%s)" % (revisions, first_failure_message)) + for revision in revisions: + commit_info = self._tool.checkout().commit_info_for_revision(revision) + if commit_info: + print commit_info.blame_string(self._tool.bugs) + else: + print "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision + + def execute(self, options, args, tool): + builder_statuses = tool.buildbot.builder_statuses() + longest_builder_name = max(map(len, map(lambda builder: builder["name"], builder_statuses))) + failing_builders = 0 + for builder_status in builder_statuses: + # If the builder is green, print OK, exit. + if builder_status["is_green"]: + continue + self._print_blame_information_for_builder(builder_status, name_width=longest_builder_name) + failing_builders += 1 + if failing_builders: + print "%s of %s are failing" % (failing_builders, pluralize("builder", len(builder_statuses))) + else: + print "All builders are passing!" + + +class ResultsFor(AbstractDeclarativeCommand): + name = "results-for" + help_text = "Print a list of failures for the passed revision from bots on %s" % BuildBot.default_host + argument_names = "REVISION" + + def _print_layout_test_results(self, results): + if not results: + print " No results." + return + for title, files in results.parsed_results().items(): + print " %s" % title + for filename in files: + print " %s" % filename + + def execute(self, options, args, tool): + builders = self._tool.buildbot.builders() + for builder in builders: + print "%s:" % builder.name() + build = builder.build_for_revision(args[0], allow_failed_lookups=True) + self._print_layout_test_results(build.layout_test_results()) + + +class FailureReason(AbstractDeclarativeCommand): + name = "failure-reason" + help_text = "Lists revisions where individual test failures started at %s" % BuildBot.default_host + + def _blame_line_for_revision(self, revision): + try: + commit_info = self._tool.checkout().commit_info_for_revision(revision) + except Exception, e: + return "FAILED to fetch CommitInfo for r%s, exception: %s" % (revision, e) + if not commit_info: + return "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision + return commit_info.blame_string(self._tool.bugs) + + def _print_blame_information_for_transition(self, regression_window, failing_tests): + red_build = regression_window.failing_build() + print "SUCCESS: Build %s (r%s) was the first to show failures: %s" % (red_build._number, red_build.revision(), failing_tests) + print "Suspect revisions:" + for revision in regression_window.revisions(): + print self._blame_line_for_revision(revision) + + def _explain_failures_for_builder(self, builder, start_revision): + print "Examining failures for \"%s\", starting at r%s" % (builder.name(), start_revision) + revision_to_test = start_revision + build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True) + layout_test_results = build.layout_test_results() + if not layout_test_results: + # FIXME: This could be made more user friendly. + print "Failed to load layout test results; can't continue. (start revision = r%s)" % start_revision + return 1 + + results_to_explain = set(layout_test_results.failing_tests()) + last_build_with_results = build + print "Starting at %s" % revision_to_test + while results_to_explain: + revision_to_test -= 1 + new_build = builder.build_for_revision(revision_to_test, allow_failed_lookups=True) + if not new_build: + print "No build for %s" % revision_to_test + continue + build = new_build + latest_results = build.layout_test_results() + if not latest_results: + print "No results build %s (r%s)" % (build._number, build.revision()) + continue + failures = set(latest_results.failing_tests()) + if len(failures) >= 20: + # FIXME: We may need to move this logic into the LayoutTestResults class. + # The buildbot stops runs after 20 failures so we don't have full results to work with here. + print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision()) + continue + fixed_results = results_to_explain - failures + if not fixed_results: + print "No change in build %s (r%s), %s unexplained failures (%s in this build)" % (build._number, build.revision(), len(results_to_explain), len(failures)) + last_build_with_results = build + continue + regression_window = RegressionWindow(build, last_build_with_results) + self._print_blame_information_for_transition(regression_window, fixed_results) + last_build_with_results = build + results_to_explain -= fixed_results + if results_to_explain: + print "Failed to explain failures: %s" % results_to_explain + return 1 + print "Explained all results for %s" % builder.name() + return 0 + + def _builder_to_explain(self): + builder_statuses = self._tool.buildbot.builder_statuses() + red_statuses = [status for status in builder_statuses if not status["is_green"]] + print "%s failing" % (pluralize("builder", len(red_statuses))) + builder_choices = [status["name"] for status in red_statuses] + # We could offer an "All" choice here. + chosen_name = User.prompt_with_list("Which builder to diagnose:", builder_choices) + # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object. + for status in red_statuses: + if status["name"] == chosen_name: + return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"]) + + def execute(self, options, args, tool): + (builder, latest_revision) = self._builder_to_explain() + start_revision = self._tool.user.prompt("Revision to walk backwards from? [%s] " % latest_revision) or latest_revision + if not start_revision: + print "Revision required." + return 1 + return self._explain_failures_for_builder(builder, start_revision=int(start_revision)) + + +class FindFlakyTests(AbstractDeclarativeCommand): + name = "find-flaky-tests" + help_text = "Lists tests that often fail for a single build at %s" % BuildBot.default_host + + def _find_failures(self, builder, revision): + build = builder.build_for_revision(revision, allow_failed_lookups=True) + if not build: + print "No build for %s" % revision + return (None, None) + results = build.layout_test_results() + if not results: + print "No results build %s (r%s)" % (build._number, build.revision()) + return (None, None) + failures = set(results.failing_tests()) + if len(failures) >= 20: + # FIXME: We may need to move this logic into the LayoutTestResults class. + # The buildbot stops runs after 20 failures so we don't have full results to work with here. + print "Too many failures in build %s (r%s), ignoring." % (build._number, build.revision()) + return (None, None) + return (build, failures) + + def _increment_statistics(self, flaky_tests, flaky_test_statistics): + for test in flaky_tests: + count = flaky_test_statistics.get(test, 0) + flaky_test_statistics[test] = count + 1 + + def _print_statistics(self, statistics): + print "=== Results ===" + print "Occurances Test name" + for value, key in sorted([(value, key) for key, value in statistics.items()]): + print "%10d %s" % (value, key) + + def _walk_backwards_from(self, builder, start_revision, limit): + flaky_test_statistics = {} + all_previous_failures = set([]) + one_time_previous_failures = set([]) + previous_build = None + for i in range(limit): + revision = start_revision - i + print "Analyzing %s ... " % revision, + (build, failures) = self._find_failures(builder, revision) + if failures == None: + # Notice that we don't loop on the empty set! + continue + print "has %s failures" % len(failures) + flaky_tests = one_time_previous_failures - failures + if flaky_tests: + print "Flaky tests: %s %s" % (sorted(flaky_tests), + previous_build.results_url()) + self._increment_statistics(flaky_tests, flaky_test_statistics) + one_time_previous_failures = failures - all_previous_failures + all_previous_failures = failures + previous_build = build + self._print_statistics(flaky_test_statistics) + + def _builder_to_analyze(self): + statuses = self._tool.buildbot.builder_statuses() + choices = [status["name"] for status in statuses] + chosen_name = User.prompt_with_list("Which builder to analyze:", choices) + for status in statuses: + if status["name"] == chosen_name: + return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"]) + + def execute(self, options, args, tool): + (builder, latest_revision) = self._builder_to_analyze() + limit = self._tool.user.prompt("How many revisions to look through? [10000] ") or 10000 + return self._walk_backwards_from(builder, latest_revision, limit=int(limit)) + + +class TreeStatus(AbstractDeclarativeCommand): + name = "tree-status" + help_text = "Print the status of the %s buildbots" % BuildBot.default_host + long_help = """Fetches build status from http://build.webkit.org/one_box_per_builder +and displayes the status of each builder.""" + + def execute(self, options, args, tool): + for builder in tool.buildbot.builder_statuses(): + status_string = "ok" if builder["is_green"] else "FAIL" + print "%s : %s" % (status_string.ljust(4), builder["name"]) + + +class SkippedPorts(AbstractDeclarativeCommand): + name = "skipped-ports" + help_text = "Print the list of ports skipping the given layout test(s)" + long_help = """Scans the the Skipped file of each port and figure +out what ports are skipping the test(s). Categories are taken in account too.""" + argument_names = "TEST_NAME" + + def execute(self, options, args, tool): + results = dict([(test_name, []) for test_name in args]) + for port_name, port_object in tool.port_factory.get_all().iteritems(): + for test_name in args: + if port_object.skips_layout_test(test_name): + results[test_name].append(port_name) + + for test_name, ports in results.iteritems(): + if ports: + print "Ports skipping test %r: %s" % (test_name, ', '.join(ports)) + else: + print "Test %r is not skipped by any port." % test_name diff --git a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py new file mode 100644 index 0000000..05a4a5c --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py @@ -0,0 +1,90 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.net.bugzilla import Bugzilla +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.commands.commandtest import CommandsTest +from webkitpy.tool.commands.queries import * +from webkitpy.tool.mocktool import MockTool + + +class QueryCommandsTest(CommandsTest): + def test_bugs_to_commit(self): + expected_stderr = "Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)\n" + self.assert_execute_outputs(BugsToCommit(), None, "42\n77\n", expected_stderr) + + def test_patches_in_commit_queue(self): + expected_stdout = "http://example.com/197\nhttp://example.com/103\n" + expected_stderr = "Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)\nPatches in commit queue:\n" + self.assert_execute_outputs(PatchesInCommitQueue(), None, expected_stdout, expected_stderr) + + def test_patches_to_commit_queue(self): + expected_stdout = "http://example.com/104&action=edit\n" + expected_stderr = "197 already has cq=+\n128 already has cq=+\n105 committer = \"Eric Seidel\" <eric@webkit.org>\n" + options = Mock() + options.bugs = False + self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options) + + expected_stdout = "http://example.com/77\n" + options.bugs = True + self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options) + + def test_patches_to_review(self): + expected_stdout = "103\n" + expected_stderr = "Patches pending review:\n" + self.assert_execute_outputs(PatchesToReview(), None, expected_stdout, expected_stderr) + + def test_tree_status(self): + expected_stdout = "ok : Builder1\nok : Builder2\n" + self.assert_execute_outputs(TreeStatus(), None, expected_stdout) + + def test_skipped_ports(self): + expected_stdout = "Ports skipping test 'media/foo/bar.html': test_port1, test_port2\n" + self.assert_execute_outputs(SkippedPorts(), ("media/foo/bar.html",), expected_stdout) + + expected_stdout = "Ports skipping test 'foo': test_port1\n" + self.assert_execute_outputs(SkippedPorts(), ("foo",), expected_stdout) + + expected_stdout = "Test 'media' is not skipped by any port.\n" + self.assert_execute_outputs(SkippedPorts(), ("media",), expected_stdout) + + +class FailureReasonTest(unittest.TestCase): + def test_blame_line_for_revision(self): + tool = MockTool() + command = FailureReason() + command.bind_to_tool(tool) + # This is an artificial example, mostly to test the CommitInfo lookup failure case. + self.assertEquals(command._blame_line_for_revision(None), "FAILED to fetch CommitInfo for rNone, likely missing ChangeLog") + + def raising_mock(self): + raise Exception("MESSAGE") + tool.checkout().commit_info_for_revision = raising_mock + self.assertEquals(command._blame_line_for_revision(None), "FAILED to fetch CommitInfo for rNone, exception: MESSAGE") diff --git a/Tools/Scripts/webkitpy/tool/commands/queues.py b/Tools/Scripts/webkitpy/tool/commands/queues.py new file mode 100644 index 0000000..e15555f --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/queues.py @@ -0,0 +1,406 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from __future__ import with_statement + +import codecs +import time +import traceback +import os + +from datetime import datetime +from optparse import make_option +from StringIO import StringIO + +from webkitpy.common.config.committervalidator import CommitterValidator +from webkitpy.common.net.bugzilla import Attachment +from webkitpy.common.net.layouttestresults import LayoutTestResults +from webkitpy.common.net.statusserver import StatusServer +from webkitpy.common.system.deprecated_logging import error, log +from webkitpy.common.system.executive import ScriptError +from webkitpy.tool.bot.commitqueuetask import CommitQueueTask, CommitQueueTaskDelegate +from webkitpy.tool.bot.feeders import CommitQueueFeeder, EWSFeeder +from webkitpy.tool.bot.queueengine import QueueEngine, QueueEngineDelegate +from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter +from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler +from webkitpy.tool.multicommandtool import Command, TryAgain + + +class AbstractQueue(Command, QueueEngineDelegate): + watchers = [ + ] + + _pass_status = "Pass" + _fail_status = "Fail" + _retry_status = "Retry" + _error_status = "Error" + + def __init__(self, options=None): # Default values should never be collections (like []) as default values are shared between invocations + options_list = (options or []) + [ + make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue. Dangerous!"), + make_option("--exit-after-iteration", action="store", type="int", dest="iterations", default=None, help="Stop running the queue after iterating this number of times."), + ] + Command.__init__(self, "Run the %s" % self.name, options=options_list) + self._iteration_count = 0 + + def _cc_watchers(self, bug_id): + try: + self._tool.bugs.add_cc_to_bug(bug_id, self.watchers) + except Exception, e: + traceback.print_exc() + log("Failed to CC watchers.") + + def run_webkit_patch(self, args): + webkit_patch_args = [self._tool.path()] + # FIXME: This is a hack, we should have a more general way to pass global options. + # FIXME: We must always pass global options and their value in one argument + # because our global option code looks for the first argument which does + # not begin with "-" and assumes that is the command name. + webkit_patch_args += ["--status-host=%s" % self._tool.status_server.host] + if self._tool.status_server.bot_id: + webkit_patch_args += ["--bot-id=%s" % self._tool.status_server.bot_id] + if self._options.port: + webkit_patch_args += ["--port=%s" % self._options.port] + webkit_patch_args.extend(args) + # FIXME: There is probably no reason to use run_and_throw_if_fail anymore. + # run_and_throw_if_fail was invented to support tee'd output + # (where we write both to a log file and to the console at once), + # but the queues don't need live-progress, a dump-of-output at the + # end should be sufficient. + return self._tool.executive.run_and_throw_if_fail(webkit_patch_args) + + def _log_directory(self): + return "%s-logs" % self.name + + # QueueEngineDelegate methods + + def queue_log_path(self): + return os.path.join(self._log_directory(), "%s.log" % self.name) + + def work_item_log_path(self, work_item): + raise NotImplementedError, "subclasses must implement" + + def begin_work_queue(self): + log("CAUTION: %s will discard all local changes in \"%s\"" % (self.name, self._tool.scm().checkout_root)) + if self._options.confirm: + response = self._tool.user.prompt("Are you sure? Type \"yes\" to continue: ") + if (response != "yes"): + error("User declined.") + log("Running WebKit %s." % self.name) + self._tool.status_server.update_status(self.name, "Starting Queue") + + def stop_work_queue(self, reason): + self._tool.status_server.update_status(self.name, "Stopping Queue, reason: %s" % reason) + + def should_continue_work_queue(self): + self._iteration_count += 1 + return not self._options.iterations or self._iteration_count <= self._options.iterations + + def next_work_item(self): + raise NotImplementedError, "subclasses must implement" + + def should_proceed_with_work_item(self, work_item): + raise NotImplementedError, "subclasses must implement" + + def process_work_item(self, work_item): + raise NotImplementedError, "subclasses must implement" + + def handle_unexpected_error(self, work_item, message): + raise NotImplementedError, "subclasses must implement" + + # Command methods + + def execute(self, options, args, tool, engine=QueueEngine): + self._options = options # FIXME: This code is wrong. Command.options is a list, this assumes an Options element! + self._tool = tool # FIXME: This code is wrong too! Command.bind_to_tool handles this! + return engine(self.name, self, self._tool.wakeup_event).run() + + @classmethod + def _log_from_script_error_for_upload(cls, script_error, output_limit=None): + # We have seen request timeouts with app engine due to large + # log uploads. Trying only the last 512k. + if not output_limit: + output_limit = 512 * 1024 # 512k + output = script_error.message_with_output(output_limit=output_limit) + # We pre-encode the string to a byte array before passing it + # to status_server, because ClientForm (part of mechanize) + # wants a file-like object with pre-encoded data. + return StringIO(output.encode("utf-8")) + + @classmethod + def _update_status_for_script_error(cls, tool, state, script_error, is_error=False): + message = str(script_error) + if is_error: + message = "Error: %s" % message + failure_log = cls._log_from_script_error_for_upload(script_error) + return tool.status_server.update_status(cls.name, message, state["patch"], failure_log) + + +class FeederQueue(AbstractQueue): + name = "feeder-queue" + + _sleep_duration = 30 # seconds + + # AbstractPatchQueue methods + + def begin_work_queue(self): + AbstractQueue.begin_work_queue(self) + self.feeders = [ + CommitQueueFeeder(self._tool), + EWSFeeder(self._tool), + ] + + def next_work_item(self): + # This really show inherit from some more basic class that doesn't + # understand work items, but the base class in the heirarchy currently + # understands work items. + return "synthetic-work-item" + + def should_proceed_with_work_item(self, work_item): + return True + + def process_work_item(self, work_item): + for feeder in self.feeders: + feeder.feed() + time.sleep(self._sleep_duration) + return True + + def work_item_log_path(self, work_item): + return None + + def handle_unexpected_error(self, work_item, message): + log(message) + + +class AbstractPatchQueue(AbstractQueue): + def _update_status(self, message, patch=None, results_file=None): + return self._tool.status_server.update_status(self.name, message, patch, results_file) + + def _next_patch(self): + patch_id = self._tool.status_server.next_work_item(self.name) + if not patch_id: + return None + patch = self._tool.bugs.fetch_attachment(patch_id) + if not patch: + # FIXME: Using a fake patch because release_work_item has the wrong API. + # We also don't really need to release the lock (although that's fine), + # mostly we just need to remove this bogus patch from our queue. + # If for some reason bugzilla is just down, then it will be re-fed later. + patch = Attachment({'id': patch_id}, None) + self._release_work_item(patch) + return None + return patch + + def _release_work_item(self, patch): + self._tool.status_server.release_work_item(self.name, patch) + + def _did_pass(self, patch): + self._update_status(self._pass_status, patch) + self._release_work_item(patch) + + def _did_fail(self, patch): + self._update_status(self._fail_status, patch) + self._release_work_item(patch) + + def _did_retry(self, patch): + self._update_status(self._retry_status, patch) + self._release_work_item(patch) + + def _did_error(self, patch, reason): + message = "%s: %s" % (self._error_status, reason) + self._update_status(message, patch) + self._release_work_item(patch) + + def work_item_log_path(self, patch): + return os.path.join(self._log_directory(), "%s.log" % patch.bug_id()) + + +class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskDelegate): + name = "commit-queue" + + # AbstractPatchQueue methods + + def begin_work_queue(self): + AbstractPatchQueue.begin_work_queue(self) + self.committer_validator = CommitterValidator(self._tool.bugs) + + def next_work_item(self): + return self._next_patch() + + def should_proceed_with_work_item(self, patch): + patch_text = "rollout patch" if patch.is_rollout() else "patch" + self._update_status("Processing %s" % patch_text, patch) + return True + + def process_work_item(self, patch): + self._cc_watchers(patch.bug_id()) + task = CommitQueueTask(self, patch) + try: + if task.run(): + self._did_pass(patch) + return True + self._did_retry(patch) + except ScriptError, e: + validator = CommitterValidator(self._tool.bugs) + validator.reject_patch_from_commit_queue(patch.id(), self._error_message_for_bug(task.failure_status_id, e)) + self._did_fail(patch) + + def _error_message_for_bug(self, status_id, script_error): + if not script_error.output: + return script_error.message_with_output() + results_link = self._tool.status_server.results_url_for_status(status_id) + return "%s\nFull output: %s" % (script_error.message_with_output(), results_link) + + def handle_unexpected_error(self, patch, message): + self.committer_validator.reject_patch_from_commit_queue(patch.id(), message) + + # CommitQueueTaskDelegate methods + + def run_command(self, command): + self.run_webkit_patch(command) + + def command_passed(self, message, patch): + self._update_status(message, patch=patch) + + def command_failed(self, message, script_error, patch): + failure_log = self._log_from_script_error_for_upload(script_error) + return self._update_status(message, patch=patch, results_file=failure_log) + + # FIXME: This exists for mocking, but should instead be mocked via + # tool.filesystem.read_text_file. They have different error handling at the moment. + def _read_file_contents(self, path): + try: + with codecs.open(path, "r", "utf-8") as open_file: + return open_file.read() + except OSError, e: # File does not exist or can't be read. + return None + + # FIXME: This may belong on the Port object. + def layout_test_results(self): + results_path = self._tool.port().layout_tests_results_path() + results_html = self._read_file_contents(results_path) + if not results_html: + return None + return LayoutTestResults.results_from_string(results_html) + + def refetch_patch(self, patch): + return self._tool.bugs.fetch_attachment(patch.id()) + + def report_flaky_tests(self, patch, flaky_tests): + reporter = FlakyTestReporter(self._tool, self.name) + reporter.report_flaky_tests(flaky_tests, patch) + + # StepSequenceErrorHandler methods + + def handle_script_error(cls, tool, state, script_error): + # Hitting this error handler should be pretty rare. It does occur, + # however, when a patch no longer applies to top-of-tree in the final + # land step. + log(script_error.message_with_output()) + + @classmethod + def handle_checkout_needs_update(cls, tool, state, options, error): + message = "Tests passed, but commit failed (checkout out of date). Updating, then landing without building or re-running tests." + tool.status_server.update_status(cls.name, message, state["patch"]) + # The only time when we find out that out checkout needs update is + # when we were ready to actually pull the trigger and land the patch. + # Rather than spinning in the master process, we retry without + # building or testing, which is much faster. + options.build = False + options.test = False + options.update = True + raise TryAgain() + + +class AbstractReviewQueue(AbstractPatchQueue, StepSequenceErrorHandler): + """This is the base-class for the EWS queues and the style-queue.""" + def __init__(self, options=None): + AbstractPatchQueue.__init__(self, options) + + def review_patch(self, patch): + raise NotImplementedError("subclasses must implement") + + # AbstractPatchQueue methods + + def begin_work_queue(self): + AbstractPatchQueue.begin_work_queue(self) + + def next_work_item(self): + return self._next_patch() + + def should_proceed_with_work_item(self, patch): + raise NotImplementedError("subclasses must implement") + + def process_work_item(self, patch): + try: + if not self.review_patch(patch): + return False + self._did_pass(patch) + return True + except ScriptError, e: + if e.exit_code != QueueEngine.handled_error_code: + self._did_fail(patch) + else: + # The subprocess handled the error, but won't have released the patch, so we do. + # FIXME: We need to simplify the rules by which _release_work_item is called. + self._release_work_item(patch) + raise e + + def handle_unexpected_error(self, patch, message): + log(message) + + # StepSequenceErrorHandler methods + + @classmethod + def handle_script_error(cls, tool, state, script_error): + log(script_error.message_with_output()) + + +class StyleQueue(AbstractReviewQueue): + name = "style-queue" + def __init__(self): + AbstractReviewQueue.__init__(self) + + def should_proceed_with_work_item(self, patch): + self._update_status("Checking style", patch) + return True + + def review_patch(self, patch): + self.run_webkit_patch(["check-style", "--force-clean", "--non-interactive", "--parent-command=style-queue", patch.id()]) + return True + + @classmethod + def handle_script_error(cls, tool, state, script_error): + is_svn_apply = script_error.command_name() == "svn-apply" + status_id = cls._update_status_for_script_error(tool, state, script_error, is_error=is_svn_apply) + if is_svn_apply: + QueueEngine.exit_after_handled_error(script_error) + message = "Attachment %s did not pass %s:\n\n%s\n\nIf any of these errors are false positives, please file a bug against check-webkit-style." % (state["patch"].id(), cls.name, script_error.message_with_output(output_limit=3*1024)) + tool.bugs.post_comment_to_bug(state["patch"].bug_id(), message, cc=cls.watchers) + exit(1) diff --git a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py new file mode 100644 index 0000000..d793213 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py @@ -0,0 +1,380 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.common.checkout.scm import CheckoutNeedsUpdate +from webkitpy.common.net.bugzilla import Attachment +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.commands.commandtest import CommandsTest +from webkitpy.tool.commands.queues import * +from webkitpy.tool.commands.queuestest import QueuesTest +from webkitpy.tool.commands.stepsequence import StepSequence +from webkitpy.tool.mocktool import MockTool, MockSCM, MockStatusServer + + +class TestQueue(AbstractPatchQueue): + name = "test-queue" + + +class TestReviewQueue(AbstractReviewQueue): + name = "test-review-queue" + + +class TestFeederQueue(FeederQueue): + _sleep_duration = 0 + + +class AbstractQueueTest(CommandsTest): + def test_log_directory(self): + self.assertEquals(TestQueue()._log_directory(), "test-queue-logs") + + def _assert_run_webkit_patch(self, run_args, port=None): + queue = TestQueue() + tool = MockTool() + tool.status_server.bot_id = "gort" + tool.executive = Mock() + queue.bind_to_tool(tool) + queue._options = Mock() + queue._options.port = port + + queue.run_webkit_patch(run_args) + expected_run_args = ["echo", "--status-host=example.com", "--bot-id=gort"] + if port: + expected_run_args.append("--port=%s" % port) + expected_run_args.extend(run_args) + tool.executive.run_and_throw_if_fail.assert_called_with(expected_run_args) + + def test_run_webkit_patch(self): + self._assert_run_webkit_patch([1]) + self._assert_run_webkit_patch(["one", 2]) + self._assert_run_webkit_patch([1], port="mockport") + + def test_iteration_count(self): + queue = TestQueue() + queue._options = Mock() + queue._options.iterations = 3 + self.assertTrue(queue.should_continue_work_queue()) + self.assertTrue(queue.should_continue_work_queue()) + self.assertTrue(queue.should_continue_work_queue()) + self.assertFalse(queue.should_continue_work_queue()) + + def test_no_iteration_count(self): + queue = TestQueue() + queue._options = Mock() + self.assertTrue(queue.should_continue_work_queue()) + self.assertTrue(queue.should_continue_work_queue()) + self.assertTrue(queue.should_continue_work_queue()) + self.assertTrue(queue.should_continue_work_queue()) + + def _assert_log_message(self, script_error, log_message): + failure_log = AbstractQueue._log_from_script_error_for_upload(script_error, output_limit=10) + self.assertTrue(failure_log.read(), log_message) + + def test_log_from_script_error_for_upload(self): + self._assert_log_message(ScriptError("test"), "test") + # In python 2.5 unicode(Exception) is busted. See: + # http://bugs.python.org/issue2517 + # With no good workaround, we just ignore these tests. + if not hasattr(Exception, "__unicode__"): + return + + unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!" + utf8_tor = unicode_tor.encode("utf-8") + self._assert_log_message(ScriptError(unicode_tor), utf8_tor) + script_error = ScriptError(unicode_tor, output=unicode_tor) + expected_output = "%s\nLast %s characters of output:\n%s" % (utf8_tor, 10, utf8_tor[-10:]) + self._assert_log_message(script_error, expected_output) + + +class FeederQueueTest(QueuesTest): + def test_feeder_queue(self): + queue = TestFeederQueue() + tool = MockTool(log_executive=True) + expected_stderr = { + "begin_work_queue": self._default_begin_work_queue_stderr("feeder-queue", MockSCM.fake_checkout_root), + "should_proceed_with_work_item": "", + "next_work_item": "", + "process_work_item": """Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com) +Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com) +MOCK setting flag 'commit-queue' to '-' on attachment '128' with comment 'Rejecting attachment 128 from commit-queue.' and additional comment 'non-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py. + +- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags. + +- If you have committer rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your committer rights.' +MOCK: update_work_items: commit-queue [106, 197] +Feeding commit-queue items [106, 197] +Feeding EWS (1 r? patch, 1 new) +MOCK: submit_to_ews: 103 +""", + "handle_unexpected_error": "Mock error message\n", + } + self.assert_queue_outputs(queue, tool=tool, expected_stderr=expected_stderr) + + +class AbstractPatchQueueTest(CommandsTest): + def test_next_patch(self): + queue = AbstractPatchQueue() + tool = MockTool() + queue.bind_to_tool(tool) + queue._options = Mock() + queue._options.port = None + self.assertEquals(queue._next_patch(), None) + tool.status_server = MockStatusServer(work_items=[2, 197]) + expected_stdout = "MOCK: fetch_attachment: 2 is not a known attachment id\n" # A mock-only message to prevent us from making mistakes. + expected_stderr = "MOCK: release_work_item: None 2\n" + patch_id = OutputCapture().assert_outputs(self, queue._next_patch, [], expected_stdout=expected_stdout, expected_stderr=expected_stderr) + self.assertEquals(patch_id, None) # 2 is an invalid patch id + self.assertEquals(queue._next_patch().id(), 197) + + +class NeedsUpdateSequence(StepSequence): + def _run(self, tool, options, state): + raise CheckoutNeedsUpdate([], 1, "", None) + + +class AlwaysCommitQueueTool(object): + def __init__(self): + self.status_server = MockStatusServer() + + def command_by_name(self, name): + return CommitQueue + + +class SecondThoughtsCommitQueue(CommitQueue): + def __init__(self): + self._reject_patch = False + CommitQueue.__init__(self) + + def run_command(self, command): + # We want to reject the patch after the first validation, + # so wait to reject it until after some other command has run. + self._reject_patch = True + return CommitQueue.run_command(self, command) + + def refetch_patch(self, patch): + if not self._reject_patch: + return self._tool.bugs.fetch_attachment(patch.id()) + + attachment_dictionary = { + "id": patch.id(), + "bug_id": patch.bug_id(), + "name": "Rejected", + "is_obsolete": True, + "is_patch": False, + "review": "-", + "reviewer_email": "foo@bar.com", + "commit-queue": "-", + "committer_email": "foo@bar.com", + "attacher_email": "Contributer1", + } + return Attachment(attachment_dictionary, None) + + +class CommitQueueTest(QueuesTest): + def test_commit_queue(self): + expected_stderr = { + "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue", MockSCM.fake_checkout_root), + "should_proceed_with_work_item": "MOCK: update_status: commit-queue Processing patch\n", + "next_work_item": "", + "process_work_item": """MOCK: update_status: commit-queue Cleaned working directory +MOCK: update_status: commit-queue Updated working directory +MOCK: update_status: commit-queue Applied patch +MOCK: update_status: commit-queue Built patch +MOCK: update_status: commit-queue Passed tests +MOCK: update_status: commit-queue Landed patch +MOCK: update_status: commit-queue Pass +MOCK: release_work_item: commit-queue 197 +""", + "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '197' with comment 'Rejecting attachment 197 from commit-queue.' and additional comment 'Mock error message'\n", + "handle_script_error": "ScriptError error message\n", + } + self.assert_queue_outputs(CommitQueue(), expected_stderr=expected_stderr) + + def test_commit_queue_failure(self): + expected_stderr = { + "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue", MockSCM.fake_checkout_root), + "should_proceed_with_work_item": "MOCK: update_status: commit-queue Processing patch\n", + "next_work_item": "", + "process_work_item": """MOCK: update_status: commit-queue Cleaned working directory +MOCK: update_status: commit-queue Updated working directory +MOCK: update_status: commit-queue Patch does not apply +MOCK setting flag 'commit-queue' to '-' on attachment '197' with comment 'Rejecting attachment 197 from commit-queue.' and additional comment 'MOCK script error' +MOCK: update_status: commit-queue Fail +MOCK: release_work_item: commit-queue 197 +""", + "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '197' with comment 'Rejecting attachment 197 from commit-queue.' and additional comment 'Mock error message'\n", + "handle_script_error": "ScriptError error message\n", + } + queue = CommitQueue() + + def mock_run_webkit_patch(command): + if command == ['clean'] or command == ['update']: + # We want cleaning to succeed so we can error out on a step + # that causes the commit-queue to reject the patch. + return + raise ScriptError('MOCK script error') + + queue.run_webkit_patch = mock_run_webkit_patch + self.assert_queue_outputs(queue, expected_stderr=expected_stderr) + + def test_rollout(self): + tool = MockTool(log_executive=True) + tool.buildbot.light_tree_on_fire() + expected_stderr = { + "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue", MockSCM.fake_checkout_root), + "should_proceed_with_work_item": "MOCK: update_status: commit-queue Processing patch\n", + "next_work_item": "", + "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'] +MOCK: update_status: commit-queue Cleaned working directory +MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update'] +MOCK: update_status: commit-queue Updated working directory +MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 197] +MOCK: update_status: commit-queue Applied patch +MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build', '--no-clean', '--no-update', '--build-style=both'] +MOCK: update_status: commit-queue Built patch +MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive'] +MOCK: update_status: commit-queue Passed tests +MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'land-attachment', '--force-clean', '--ignore-builders', '--non-interactive', '--parent-command=commit-queue', 197] +MOCK: update_status: commit-queue Landed patch +MOCK: update_status: commit-queue Pass +MOCK: release_work_item: commit-queue 197 +""", + "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '197' with comment 'Rejecting attachment 197 from commit-queue.' and additional comment 'Mock error message'\n", + "handle_script_error": "ScriptError error message\n", + } + self.assert_queue_outputs(CommitQueue(), tool=tool, expected_stderr=expected_stderr) + + def test_rollout_lands(self): + tool = MockTool(log_executive=True) + tool.buildbot.light_tree_on_fire() + rollout_patch = tool.bugs.fetch_attachment(106) # _patch6, a rollout patch. + assert(rollout_patch.is_rollout()) + expected_stderr = { + "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue", MockSCM.fake_checkout_root), + "should_proceed_with_work_item": "MOCK: update_status: commit-queue Processing rollout patch\n", + "next_work_item": "", + "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'] +MOCK: update_status: commit-queue Cleaned working directory +MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update'] +MOCK: update_status: commit-queue Updated working directory +MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 106] +MOCK: update_status: commit-queue Applied patch +MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build', '--no-clean', '--no-update', '--build-style=both'] +MOCK: update_status: commit-queue Built patch +MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'land-attachment', '--force-clean', '--ignore-builders', '--non-interactive', '--parent-command=commit-queue', 106] +MOCK: update_status: commit-queue Landed patch +MOCK: update_status: commit-queue Pass +MOCK: release_work_item: commit-queue 106 +""", + "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '106' with comment 'Rejecting attachment 106 from commit-queue.' and additional comment 'Mock error message'\n", + "handle_script_error": "ScriptError error message\n", + } + self.assert_queue_outputs(CommitQueue(), tool=tool, work_item=rollout_patch, expected_stderr=expected_stderr) + + def test_auto_retry(self): + queue = CommitQueue() + options = Mock() + options.parent_command = "commit-queue" + tool = AlwaysCommitQueueTool() + sequence = NeedsUpdateSequence(None) + + expected_stderr = "Commit failed because the checkout is out of date. Please update and try again.\nMOCK: update_status: commit-queue Tests passed, but commit failed (checkout out of date). Updating, then landing without building or re-running tests.\n" + state = {'patch': None} + OutputCapture().assert_outputs(self, sequence.run_and_handle_errors, [tool, options, state], expected_exception=TryAgain, expected_stderr=expected_stderr) + + self.assertEquals(options.update, True) + self.assertEquals(options.build, False) + self.assertEquals(options.test, False) + + def test_manual_reject_during_processing(self): + queue = SecondThoughtsCommitQueue() + queue.bind_to_tool(MockTool()) + queue._options = Mock() + queue._options.port = None + expected_stderr = """MOCK: update_status: commit-queue Cleaned working directory +MOCK: update_status: commit-queue Updated working directory +MOCK: update_status: commit-queue Applied patch +MOCK: update_status: commit-queue Built patch +MOCK: update_status: commit-queue Passed tests +MOCK: update_status: commit-queue Retry +MOCK: release_work_item: commit-queue 197 +""" + OutputCapture().assert_outputs(self, queue.process_work_item, [QueuesTest.mock_work_item], expected_stderr=expected_stderr) + + def test_report_flaky_tests(self): + queue = CommitQueue() + queue.bind_to_tool(MockTool()) + expected_stderr = """MOCK bug comment: bug_id=76, cc=None +--- Begin comment --- +The commit-queue just saw foo/bar.html flake while processing attachment 197 on bug 42. +Port: MockPort Platform: MockPlatform 1.0 +--- End comment --- + +MOCK bug comment: bug_id=76, cc=None +--- Begin comment --- +The commit-queue just saw bar/baz.html flake while processing attachment 197 on bug 42. +Port: MockPort Platform: MockPlatform 1.0 +--- End comment --- + +MOCK bug comment: bug_id=42, cc=None +--- Begin comment --- +The commit-queue encountered the following flaky tests while processing attachment 197: + +foo/bar.html bug 76 (author: abarth@webkit.org) +bar/baz.html bug 76 (author: abarth@webkit.org) +The commit-queue is continuing to process your patch. +--- End comment --- + +""" + OutputCapture().assert_outputs(self, queue.report_flaky_tests, [QueuesTest.mock_work_item, ["foo/bar.html", "bar/baz.html"]], expected_stderr=expected_stderr) + + def test_layout_test_results(self): + queue = CommitQueue() + queue.bind_to_tool(MockTool()) + queue._read_file_contents = lambda path: None + self.assertEquals(queue.layout_test_results(), None) + queue._read_file_contents = lambda path: "" + self.assertEquals(queue.layout_test_results(), None) + + +class StyleQueueTest(QueuesTest): + def test_style_queue(self): + expected_stderr = { + "begin_work_queue": self._default_begin_work_queue_stderr("style-queue", MockSCM.fake_checkout_root), + "next_work_item": "", + "should_proceed_with_work_item": "MOCK: update_status: style-queue Checking style\n", + "process_work_item": "MOCK: update_status: style-queue Pass\nMOCK: release_work_item: style-queue 197\n", + "handle_unexpected_error": "Mock error message\n", + "handle_script_error": "MOCK: update_status: style-queue ScriptError error message\nMOCK bug comment: bug_id=42, cc=[]\n--- Begin comment ---\nAttachment 197 did not pass style-queue:\n\nScriptError error message\n\nIf any of these errors are false positives, please file a bug against check-webkit-style.\n--- End comment ---\n\n", + } + expected_exceptions = { + "handle_script_error": SystemExit, + } + self.assert_queue_outputs(StyleQueue(), expected_stderr=expected_stderr, expected_exceptions=expected_exceptions) diff --git a/Tools/Scripts/webkitpy/tool/commands/queuestest.py b/Tools/Scripts/webkitpy/tool/commands/queuestest.py new file mode 100644 index 0000000..6455617 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/queuestest.py @@ -0,0 +1,95 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.net.bugzilla import Attachment +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.common.system.executive import ScriptError +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler +from webkitpy.tool.mocktool import MockTool + + +class MockQueueEngine(object): + def __init__(self, name, queue, wakeup_event): + pass + + def run(self): + pass + + +class QueuesTest(unittest.TestCase): + # This is _patch1 in mocktool.py + mock_work_item = MockTool().bugs.fetch_attachment(197) + + def assert_outputs(self, func, func_name, args, expected_stdout, expected_stderr, expected_exceptions): + exception = None + if expected_exceptions and func_name in expected_exceptions: + exception = expected_exceptions[func_name] + + OutputCapture().assert_outputs(self, + func, + args=args, + expected_stdout=expected_stdout.get(func_name, ""), + expected_stderr=expected_stderr.get(func_name, ""), + expected_exception=exception) + + def _default_begin_work_queue_stderr(self, name, checkout_dir): + string_replacements = {"name": name, 'checkout_dir': checkout_dir} + return "CAUTION: %(name)s will discard all local changes in \"%(checkout_dir)s\"\nRunning WebKit %(name)s.\nMOCK: update_status: %(name)s Starting Queue\n" % string_replacements + + def assert_queue_outputs(self, queue, args=None, work_item=None, expected_stdout=None, expected_stderr=None, expected_exceptions=None, options=None, tool=None): + if not tool: + tool = MockTool() + if not expected_stdout: + expected_stdout = {} + if not expected_stderr: + expected_stderr = {} + if not args: + args = [] + if not options: + options = Mock() + options.port = None + if not work_item: + work_item = self.mock_work_item + tool.user.prompt = lambda message: "yes" + + queue.execute(options, args, tool, engine=MockQueueEngine) + + self.assert_outputs(queue.queue_log_path, "queue_log_path", [], expected_stdout, expected_stderr, expected_exceptions) + self.assert_outputs(queue.work_item_log_path, "work_item_log_path", [work_item], expected_stdout, expected_stderr, expected_exceptions) + self.assert_outputs(queue.begin_work_queue, "begin_work_queue", [], expected_stdout, expected_stderr, expected_exceptions) + self.assert_outputs(queue.should_continue_work_queue, "should_continue_work_queue", [], expected_stdout, expected_stderr, expected_exceptions) + self.assert_outputs(queue.next_work_item, "next_work_item", [], expected_stdout, expected_stderr, expected_exceptions) + self.assert_outputs(queue.should_proceed_with_work_item, "should_proceed_with_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions) + self.assert_outputs(queue.process_work_item, "process_work_item", [work_item], expected_stdout, expected_stderr, expected_exceptions) + self.assert_outputs(queue.handle_unexpected_error, "handle_unexpected_error", [work_item, "Mock error message"], expected_stdout, expected_stderr, expected_exceptions) + # Should we have a different function for testing StepSequenceErrorHandlers? + if isinstance(queue, StepSequenceErrorHandler): + self.assert_outputs(queue.handle_script_error, "handle_script_error", [tool, {"patch": self.mock_work_item}, ScriptError(message="ScriptError error message", script_args="MockErrorCommand")], expected_stdout, expected_stderr, expected_exceptions) diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py new file mode 100644 index 0000000..8c4b997 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py @@ -0,0 +1,112 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os.path +import re +import shutil +import urllib + +from webkitpy.common.net.buildbot import BuildBot +from webkitpy.common.net.layouttestresults import LayoutTestResults +from webkitpy.common.system.user import User +from webkitpy.layout_tests.port import factory +from webkitpy.tool.grammar import pluralize +from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand + + +# FIXME: I'm not sure where this logic should go in the end. +# For now it's here, until we have a second need for it. +class BuilderToPort(object): + _builder_name_to_port_name = { + r"SnowLeopard": "mac-snowleopard", + r"Leopard": "mac-leopard", + r"Tiger": "mac-tiger", + r"Windows": "win", + r"GTK": "gtk", + r"Qt": "qt", + r"Chromium Mac": "chromium-mac", + r"Chromium Linux": "chromium-linux", + r"Chromium Win": "chromium-win", + } + + def _port_name_for_builder_name(self, builder_name): + for regexp, port_name in self._builder_name_to_port_name.items(): + if re.match(regexp, builder_name): + return port_name + + def port_for_builder(self, builder_name): + port_name = self._port_name_for_builder_name(builder_name) + assert(port_name) # Need to update _builder_name_to_port_name + port = factory.get(port_name) + assert(port) # Need to update _builder_name_to_port_name + return port + + +class Rebaseline(AbstractDeclarativeCommand): + name = "rebaseline" + help_text = "Replaces local expected.txt files with new results from build bots" + + # FIXME: This should share more code with FailureReason._builder_to_explain + def _builder_to_pull_from(self): + builder_statuses = self._tool.buildbot.builder_statuses() + red_statuses = [status for status in builder_statuses if not status["is_green"]] + print "%s failing" % (pluralize("builder", len(red_statuses))) + builder_choices = [status["name"] for status in red_statuses] + chosen_name = self._tool.user.prompt_with_list("Which builder to pull results from:", builder_choices) + # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object. + for status in red_statuses: + if status["name"] == chosen_name: + return (self._tool.buildbot.builder_with_name(chosen_name), status["build_number"]) + + def _replace_expectation_with_remote_result(self, local_file, remote_file): + (downloaded_file, headers) = urllib.urlretrieve(remote_file) + shutil.move(downloaded_file, local_file) + + def _tests_to_update(self, build): + failing_tests = build.layout_test_results().results_matching_keys([LayoutTestResults.fail_key]) + return self._tool.user.prompt_with_list("Which test(s) to rebaseline:", failing_tests, can_choose_multiple=True) + + def _results_url_for_test(self, build, test): + test_base = os.path.splitext(test)[0] + actual_path = test_base + "-actual.txt" + return build.results_url() + "/" + actual_path + + def execute(self, options, args, tool): + builder, build_number = self._builder_to_pull_from() + build = builder.build(build_number) + port = BuilderToPort().port_for_builder(builder.name()) + + for test in self._tests_to_update(build): + results_url = self._results_url_for_test(build, test) + # Port operates with absolute paths. + absolute_path = os.path.join(port.layout_tests_dir(), test) + expected_file = port.expected_filename(absolute_path, ".txt") + print test + self._replace_expectation_with_remote_result(expected_file, results_url) + + # FIXME: We should handle new results too. diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py new file mode 100644 index 0000000..d6582a7 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py @@ -0,0 +1,38 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.tool.commands.rebaseline import BuilderToPort + + +class BuilderToPortTest(unittest.TestCase): + def test_port_for_builder(self): + converter = BuilderToPort() + port = converter.port_for_builder("Leopard Intel Debug (Tests)") + self.assertEqual(port.name(), "mac-leopard") diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py b/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py new file mode 100644 index 0000000..56780b5 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/rebaselineserver.py @@ -0,0 +1,457 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Starts a local HTTP server which displays layout test failures (given a test +results directory), provides comparisons of expected and actual results (both +images and text) and allows one-click rebaselining of tests.""" +from __future__ import with_statement + +import codecs +import datetime +import fnmatch +import mimetypes +import os +import os.path +import shutil +import threading +import time +import urlparse +import BaseHTTPServer + +from optparse import make_option +from wsgiref.handlers import format_date_time + +from webkitpy.common import system +from webkitpy.layout_tests.port import factory +from webkitpy.layout_tests.port.webkit import WebKitPort +from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand +from webkitpy.thirdparty import simplejson + +STATE_NEEDS_REBASELINE = 'needs_rebaseline' +STATE_REBASELINE_FAILED = 'rebaseline_failed' +STATE_REBASELINE_SUCCEEDED = 'rebaseline_succeeded' + +class RebaselineHTTPServer(BaseHTTPServer.HTTPServer): + def __init__(self, httpd_port, test_config, results_json, platforms_json): + BaseHTTPServer.HTTPServer.__init__(self, ("", httpd_port), RebaselineHTTPRequestHandler) + self.test_config = test_config + self.results_json = results_json + self.platforms_json = platforms_json + + +class RebaselineHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): + STATIC_FILE_NAMES = frozenset([ + "index.html", + "loupe.js", + "main.js", + "main.css", + "queue.js", + "util.js", + ]) + + STATIC_FILE_DIRECTORY = os.path.join( + os.path.dirname(__file__), "data", "rebaselineserver") + + def do_GET(self): + self._handle_request() + + def do_POST(self): + self._handle_request() + + def _handle_request(self): + # Parse input. + if "?" in self.path: + path, query_string = self.path.split("?", 1) + self.query = urlparse.parse_qs(query_string) + else: + path = self.path + self.query = {} + function_or_file_name = path[1:] or "index.html" + + # See if a static file matches. + if function_or_file_name in RebaselineHTTPRequestHandler.STATIC_FILE_NAMES: + self._serve_static_file(function_or_file_name) + return + + # See if a class method matches. + function_name = function_or_file_name.replace(".", "_") + if not hasattr(self, function_name): + self.send_error(404, "Unknown function %s" % function_name) + return + if function_name[0] == "_": + self.send_error( + 401, "Not allowed to invoke private or protected methods") + return + function = getattr(self, function_name) + function() + + def _serve_static_file(self, static_path): + self._serve_file(os.path.join( + RebaselineHTTPRequestHandler.STATIC_FILE_DIRECTORY, static_path)) + + def rebaseline(self): + test = self.query['test'][0] + baseline_target = self.query['baseline-target'][0] + baseline_move_to = self.query['baseline-move-to'][0] + test_json = self.server.results_json['tests'][test] + + if test_json['state'] != STATE_NEEDS_REBASELINE: + self.send_error(400, "Test %s is in unexpected state: %s" % + (test, test_json["state"])) + return + + log = [] + success = _rebaseline_test( + test, + baseline_target, + baseline_move_to, + self.server.test_config, + log=lambda l: log.append(l)) + + if success: + test_json['state'] = STATE_REBASELINE_SUCCEEDED + self.send_response(200) + else: + test_json['state'] = STATE_REBASELINE_FAILED + self.send_response(500) + + self.send_header('Content-type', 'text/plain') + self.end_headers() + self.wfile.write('\n'.join(log)) + + def quitquitquit(self): + self.send_response(200) + self.send_header("Content-type", "text/plain") + self.end_headers() + self.wfile.write("Quit.\n") + + # Shutdown has to happen on another thread from the server's thread, + # otherwise there's a deadlock + threading.Thread(target=lambda: self.server.shutdown()).start() + + def test_result(self): + test_name, _ = os.path.splitext(self.query['test'][0]) + mode = self.query['mode'][0] + if mode == 'expected-image': + file_name = test_name + '-expected.png' + elif mode == 'actual-image': + file_name = test_name + '-actual.png' + if mode == 'expected-checksum': + file_name = test_name + '-expected.checksum' + elif mode == 'actual-checksum': + file_name = test_name + '-actual.checksum' + elif mode == 'diff-image': + file_name = test_name + '-diff.png' + if mode == 'expected-text': + file_name = test_name + '-expected.txt' + elif mode == 'actual-text': + file_name = test_name + '-actual.txt' + elif mode == 'diff-text': + file_name = test_name + '-diff.txt' + elif mode == 'diff-text-pretty': + file_name = test_name + '-pretty-diff.html' + + file_path = os.path.join(self.server.test_config.results_directory, file_name) + + # Let results be cached for 60 seconds, so that they can be pre-fetched + # by the UI + self._serve_file(file_path, cacheable_seconds=60) + + def results_json(self): + self._serve_json(self.server.results_json) + + def platforms_json(self): + self._serve_json(self.server.platforms_json) + + def _serve_json(self, json): + self.send_response(200) + self.send_header('Content-type', 'application/json') + self.end_headers() + simplejson.dump(json, self.wfile) + + def _serve_file(self, file_path, cacheable_seconds=0): + if not os.path.exists(file_path): + self.send_error(404, "File not found") + return + with codecs.open(file_path, "rb") as static_file: + self.send_response(200) + self.send_header("Content-Length", os.path.getsize(file_path)) + mime_type, encoding = mimetypes.guess_type(file_path) + if mime_type: + self.send_header("Content-type", mime_type) + + if cacheable_seconds: + expires_time = (datetime.datetime.now() + + datetime.timedelta(0, cacheable_seconds)) + expires_formatted = format_date_time( + time.mktime(expires_time.timetuple())) + self.send_header("Expires", expires_formatted) + self.end_headers() + + shutil.copyfileobj(static_file, self.wfile) + + +class TestConfig(object): + def __init__(self, test_port, layout_tests_directory, results_directory, platforms, filesystem, scm): + self.test_port = test_port + self.layout_tests_directory = layout_tests_directory + self.results_directory = results_directory + self.platforms = platforms + self.filesystem = filesystem + self.scm = scm + + +def _get_actual_result_files(test_file, test_config): + test_name, _ = os.path.splitext(test_file) + test_directory = os.path.dirname(test_file) + + test_results_directory = test_config.filesystem.join( + test_config.results_directory, test_directory) + actual_pattern = os.path.basename(test_name) + '-actual.*' + actual_files = [] + for filename in test_config.filesystem.listdir(test_results_directory): + if fnmatch.fnmatch(filename, actual_pattern): + actual_files.append(filename) + actual_files.sort() + return tuple(actual_files) + + +def _rebaseline_test(test_file, baseline_target, baseline_move_to, test_config, log): + test_name, _ = os.path.splitext(test_file) + test_directory = os.path.dirname(test_name) + + log('Rebaselining %s...' % test_name) + + actual_result_files = _get_actual_result_files(test_file, test_config) + filesystem = test_config.filesystem + scm = test_config.scm + layout_tests_directory = test_config.layout_tests_directory + results_directory = test_config.results_directory + target_expectations_directory = filesystem.join( + layout_tests_directory, 'platform', baseline_target, test_directory) + test_results_directory = test_config.filesystem.join( + test_config.results_directory, test_directory) + + # If requested, move current baselines out + current_baselines = _get_test_baselines(test_file, test_config) + if baseline_target in current_baselines and baseline_move_to != 'none': + log(' Moving current %s baselines to %s' % + (baseline_target, baseline_move_to)) + + # See which ones we need to move (only those that are about to be + # updated), and make sure we're not clobbering any files in the + # destination. + current_extensions = set(current_baselines[baseline_target].keys()) + actual_result_extensions = [ + os.path.splitext(f)[1] for f in actual_result_files] + extensions_to_move = current_extensions.intersection( + actual_result_extensions) + + if extensions_to_move.intersection( + current_baselines.get(baseline_move_to, {}).keys()): + log(' Already had baselines in %s, could not move existing ' + '%s ones' % (baseline_move_to, baseline_target)) + return False + + # Do the actual move. + if extensions_to_move: + if not _move_test_baselines( + test_file, + list(extensions_to_move), + baseline_target, + baseline_move_to, + test_config, + log): + return False + else: + log(' No current baselines to move') + + log(' Updating baselines for %s' % baseline_target) + filesystem.maybe_make_directory(target_expectations_directory) + for source_file in actual_result_files: + source_path = filesystem.join(test_results_directory, source_file) + destination_file = source_file.replace('-actual', '-expected') + destination_path = filesystem.join( + target_expectations_directory, destination_file) + filesystem.copyfile(source_path, destination_path) + exit_code = scm.add(destination_path, return_exit_code=True) + if exit_code: + log(' Could not update %s in SCM, exit code %d' % + (destination_file, exit_code)) + return False + else: + log(' Updated %s' % destination_file) + + return True + + +def _move_test_baselines(test_file, extensions_to_move, source_platform, destination_platform, test_config, log): + test_file_name = os.path.splitext(os.path.basename(test_file))[0] + test_directory = os.path.dirname(test_file) + filesystem = test_config.filesystem + + # Want predictable output order for unit tests. + extensions_to_move.sort() + + source_directory = os.path.join( + test_config.layout_tests_directory, + 'platform', + source_platform, + test_directory) + destination_directory = os.path.join( + test_config.layout_tests_directory, + 'platform', + destination_platform, + test_directory) + filesystem.maybe_make_directory(destination_directory) + + for extension in extensions_to_move: + file_name = test_file_name + '-expected' + extension + source_path = filesystem.join(source_directory, file_name) + destination_path = filesystem.join(destination_directory, file_name) + filesystem.copyfile(source_path, destination_path) + exit_code = test_config.scm.add(destination_path, return_exit_code=True) + if exit_code: + log(' Could not update %s in SCM, exit code %d' % + (file_name, exit_code)) + return False + else: + log(' Moved %s' % file_name) + + return True + +def _get_test_baselines(test_file, test_config): + class AllPlatformsPort(WebKitPort): + def __init__(self): + WebKitPort.__init__(self, filesystem=test_config.filesystem) + self._platforms_by_directory = dict( + [(self._webkit_baseline_path(p), p) for p in test_config.platforms]) + + def baseline_search_path(self): + return self._platforms_by_directory.keys() + + def platform_from_directory(self, directory): + return self._platforms_by_directory[directory] + + test_path = test_config.filesystem.join( + test_config.layout_tests_directory, test_file) + + all_platforms_port = AllPlatformsPort() + + all_test_baselines = {} + for baseline_extension in ('.txt', '.checksum', '.png'): + test_baselines = test_config.test_port.expected_baselines( + test_path, baseline_extension) + baselines = all_platforms_port.expected_baselines( + test_path, baseline_extension, all_baselines=True) + for platform_directory, expected_filename in baselines: + if not platform_directory: + continue + if platform_directory == test_config.layout_tests_directory: + platform = 'base' + else: + platform = all_platforms_port.platform_from_directory( + platform_directory) + platform_baselines = all_test_baselines.setdefault(platform, {}) + was_used_for_test = ( + platform_directory, expected_filename) in test_baselines + platform_baselines[baseline_extension] = was_used_for_test + + return all_test_baselines + + +class RebaselineServer(AbstractDeclarativeCommand): + name = "rebaseline-server" + help_text = __doc__ + argument_names = "/path/to/results/directory" + + def __init__(self): + options = [ + make_option("--httpd-port", action="store", type="int", default=8127, help="Port to use for the the rebaseline HTTP server"), + ] + AbstractDeclarativeCommand.__init__(self, options=options) + + def execute(self, options, args, tool): + results_directory = args[0] + filesystem = system.filesystem.FileSystem() + scm = self._tool.scm() + + if options.dry_run: + + def no_op_copyfile(src, dest): + pass + + def no_op_add(path, return_exit_code=False): + if return_exit_code: + return 0 + + filesystem.copyfile = no_op_copyfile + scm.add = no_op_add + + print 'Parsing unexpected_results.json...' + results_json_path = filesystem.join( + results_directory, 'unexpected_results.json') + with codecs.open(results_json_path, "r") as results_json_file: + results_json_file = file(results_json_path) + results_json = simplejson.load(results_json_file) + + port = factory.get() + layout_tests_directory = port.layout_tests_dir() + platforms = filesystem.listdir( + filesystem.join(layout_tests_directory, 'platform')) + test_config = TestConfig( + port, + layout_tests_directory, + results_directory, + platforms, + filesystem, + scm) + + print 'Gathering current baselines...' + for test_file, test_json in results_json['tests'].items(): + test_json['state'] = STATE_NEEDS_REBASELINE + test_path = filesystem.join(layout_tests_directory, test_file) + test_json['baselines'] = _get_test_baselines(test_file, test_config) + + server_url = "http://localhost:%d/" % options.httpd_port + print "Starting server at %s" % server_url + print ("Use the 'Exit' link in the UI, %squitquitquit " + "or Ctrl-C to stop") % server_url + + threading.Timer( + .1, lambda: self._tool.user.open_url(server_url)).start() + + httpd = RebaselineHTTPServer( + httpd_port=options.httpd_port, + test_config=test_config, + results_json=results_json, + platforms_json={ + 'platforms': platforms, + 'defaultPlatform': port.name(), + }) + httpd.serve_forever() diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaselineserver_unittest.py b/Tools/Scripts/webkitpy/tool/commands/rebaselineserver_unittest.py new file mode 100644 index 0000000..f4371f4 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/rebaselineserver_unittest.py @@ -0,0 +1,304 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.system import filesystem_mock +from webkitpy.layout_tests.port import base +from webkitpy.layout_tests.port.webkit import WebKitPort +from webkitpy.tool.commands import rebaselineserver +from webkitpy.tool.mocktool import MockSCM + + +class RebaselineTestTest(unittest.TestCase): + def test_text_rebaseline_update(self): + self._assertRebaseline( + test_files=( + 'fast/text-expected.txt', + 'platform/mac/fast/text-expected.txt', + ), + results_files=( + 'fast/text-actual.txt', + ), + test_name='fast/text.html', + baseline_target='mac', + baseline_move_to='none', + expected_success=True, + expected_log=[ + 'Rebaselining fast/text...', + ' Updating baselines for mac', + ' Updated text-expected.txt', + ]) + + def test_text_rebaseline_new(self): + self._assertRebaseline( + test_files=( + 'fast/text-expected.txt', + ), + results_files=( + 'fast/text-actual.txt', + ), + test_name='fast/text.html', + baseline_target='mac', + baseline_move_to='none', + expected_success=True, + expected_log=[ + 'Rebaselining fast/text...', + ' Updating baselines for mac', + ' Updated text-expected.txt', + ]) + + def test_text_rebaseline_move_no_op_1(self): + self._assertRebaseline( + test_files=( + 'fast/text-expected.txt', + 'platform/win/fast/text-expected.txt', + ), + results_files=( + 'fast/text-actual.txt', + ), + test_name='fast/text.html', + baseline_target='mac', + baseline_move_to='mac-leopard', + expected_success=True, + expected_log=[ + 'Rebaselining fast/text...', + ' Updating baselines for mac', + ' Updated text-expected.txt', + ]) + + def test_text_rebaseline_move_no_op_2(self): + self._assertRebaseline( + test_files=( + 'fast/text-expected.txt', + 'platform/mac/fast/text-expected.checksum', + ), + results_files=( + 'fast/text-actual.txt', + ), + test_name='fast/text.html', + baseline_target='mac', + baseline_move_to='mac-leopard', + expected_success=True, + expected_log=[ + 'Rebaselining fast/text...', + ' Moving current mac baselines to mac-leopard', + ' No current baselines to move', + ' Updating baselines for mac', + ' Updated text-expected.txt', + ]) + + def test_text_rebaseline_move(self): + self._assertRebaseline( + test_files=( + 'fast/text-expected.txt', + 'platform/mac/fast/text-expected.txt', + ), + results_files=( + 'fast/text-actual.txt', + ), + test_name='fast/text.html', + baseline_target='mac', + baseline_move_to='mac-leopard', + expected_success=True, + expected_log=[ + 'Rebaselining fast/text...', + ' Moving current mac baselines to mac-leopard', + ' Moved text-expected.txt', + ' Updating baselines for mac', + ' Updated text-expected.txt', + ]) + + def test_text_rebaseline_move_only_images(self): + self._assertRebaseline( + test_files=( + 'fast/image-expected.txt', + 'platform/mac/fast/image-expected.txt', + 'platform/mac/fast/image-expected.png', + 'platform/mac/fast/image-expected.checksum', + ), + results_files=( + 'fast/image-actual.png', + 'fast/image-actual.checksum', + ), + test_name='fast/image.html', + baseline_target='mac', + baseline_move_to='mac-leopard', + expected_success=True, + expected_log=[ + 'Rebaselining fast/image...', + ' Moving current mac baselines to mac-leopard', + ' Moved image-expected.checksum', + ' Moved image-expected.png', + ' Updating baselines for mac', + ' Updated image-expected.checksum', + ' Updated image-expected.png', + ]) + + def test_text_rebaseline_move_already_exist(self): + self._assertRebaseline( + test_files=( + 'fast/text-expected.txt', + 'platform/mac-leopard/fast/text-expected.txt', + 'platform/mac/fast/text-expected.txt', + ), + results_files=( + 'fast/text-actual.txt', + ), + test_name='fast/text.html', + baseline_target='mac', + baseline_move_to='mac-leopard', + expected_success=False, + expected_log=[ + 'Rebaselining fast/text...', + ' Moving current mac baselines to mac-leopard', + ' Already had baselines in mac-leopard, could not move existing mac ones', + ]) + + def test_image_rebaseline(self): + self._assertRebaseline( + test_files=( + 'fast/image-expected.txt', + 'platform/mac/fast/image-expected.png', + 'platform/mac/fast/image-expected.checksum', + ), + results_files=( + 'fast/image-actual.png', + 'fast/image-actual.checksum', + ), + test_name='fast/image.html', + baseline_target='mac', + baseline_move_to='none', + expected_success=True, + expected_log=[ + 'Rebaselining fast/image...', + ' Updating baselines for mac', + ' Updated image-expected.checksum', + ' Updated image-expected.png', + ]) + + def _assertRebaseline(self, test_files, results_files, test_name, baseline_target, baseline_move_to, expected_success, expected_log): + log = [] + test_config = get_test_config(test_files, results_files) + success = rebaselineserver._rebaseline_test( + test_name, + baseline_target, + baseline_move_to, + test_config, + log=lambda l: log.append(l)) + self.assertEqual(expected_log, log) + self.assertEqual(expected_success, success) + + +class GetActualResultFilesTest(unittest.TestCase): + def test(self): + test_config = get_test_config(result_files=( + 'fast/text-actual.txt', + 'fast2/text-actual.txt', + 'fast/text2-actual.txt', + 'fast/text-notactual.txt', + )) + self.assertEqual( + ('text-actual.txt',), + rebaselineserver._get_actual_result_files( + 'fast/text.html', test_config)) + + +class GetBaselinesTest(unittest.TestCase): + def test_no_baselines(self): + self._assertBaselines( + test_files=(), + test_name='fast/missing.html', + expected_baselines={}) + + def test_text_baselines(self): + self._assertBaselines( + test_files=( + 'fast/text-expected.txt', + 'platform/mac/fast/text-expected.txt', + ), + test_name='fast/text.html', + expected_baselines={ + 'mac': {'.txt': True}, + 'base': {'.txt': False}, + }) + + def test_image_and_text_baselines(self): + self._assertBaselines( + test_files=( + 'fast/image-expected.txt', + 'platform/mac/fast/image-expected.png', + 'platform/mac/fast/image-expected.checksum', + 'platform/win/fast/image-expected.png', + 'platform/win/fast/image-expected.checksum', + ), + test_name='fast/image.html', + expected_baselines={ + 'base': {'.txt': True}, + 'mac': {'.checksum': True, '.png': True}, + 'win': {'.checksum': False, '.png': False}, + }) + + def test_extra_baselines(self): + self._assertBaselines( + test_files=( + 'fast/text-expected.txt', + 'platform/nosuchplatform/fast/text-expected.txt', + ), + test_name='fast/text.html', + expected_baselines={'base': {'.txt': True}}) + + def _assertBaselines(self, test_files, test_name, expected_baselines): + actual_baselines = rebaselineserver._get_test_baselines( + test_name, get_test_config(test_files)) + self.assertEqual(expected_baselines, actual_baselines) + + +def get_test_config(test_files=[], result_files=[]): + layout_tests_directory = base.Port().layout_tests_dir() + results_directory = '/WebKitBuild/Debug/layout-test-results' + mock_filesystem = filesystem_mock.MockFileSystem() + for file in test_files: + file_path = mock_filesystem.join(layout_tests_directory, file) + mock_filesystem.files[file_path] = '' + for file in result_files: + file_path = mock_filesystem.join(results_directory, file) + mock_filesystem.files[file_path] = '' + + class TestMacPort(WebKitPort): + def __init__(self): + WebKitPort.__init__(self, filesystem=mock_filesystem) + self._name = 'mac' + + return rebaselineserver.TestConfig( + TestMacPort(), + layout_tests_directory, + results_directory, + ('mac', 'mac-leopard', 'win', 'linux'), + mock_filesystem, + MockSCM()) diff --git a/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py b/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py new file mode 100644 index 0000000..145f485 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py @@ -0,0 +1,106 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.common.system.deprecated_logging import log +from webkitpy.common.config.ports import WebKitPort +from webkitpy.tool.bot.sheriff import Sheriff +from webkitpy.tool.bot.sheriffircbot import SheriffIRCBot +from webkitpy.tool.commands.queues import AbstractQueue +from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler + + +class SheriffBot(AbstractQueue, StepSequenceErrorHandler): + name = "sheriff-bot" + watchers = AbstractQueue.watchers + [ + "abarth@webkit.org", + "eric@webkit.org", + ] + + def _update(self): + self.run_webkit_patch(["update", "--force-clean", "--quiet"]) + + # AbstractQueue methods + + def begin_work_queue(self): + AbstractQueue.begin_work_queue(self) + self._sheriff = Sheriff(self._tool, self) + self._irc_bot = SheriffIRCBot(self._tool, self._sheriff) + self._tool.ensure_irc_connected(self._irc_bot.irc_delegate()) + + def work_item_log_path(self, failure_map): + return None + + def _is_old_failure(self, revision): + return self._tool.status_server.svn_revision(revision) + + def next_work_item(self): + self._irc_bot.process_pending_messages() + self._update() + + # FIXME: We need to figure out how to provoke_flaky_builders. + + failure_map = self._tool.buildbot.failure_map() + failure_map.filter_out_old_failures(self._is_old_failure) + if failure_map.is_empty(): + return None + return failure_map + + def should_proceed_with_work_item(self, failure_map): + # Currently, we don't have any reasons not to proceed with work items. + return True + + def process_work_item(self, failure_map): + failing_revisions = failure_map.failing_revisions() + for revision in failing_revisions: + builders = failure_map.builders_failing_for(revision) + tests = failure_map.tests_failing_for(revision) + try: + commit_info = self._tool.checkout().commit_info_for_revision(revision) + if not commit_info: + print "FAILED to fetch CommitInfo for r%s, likely missing ChangeLog" % revision + continue + self._sheriff.post_irc_warning(commit_info, builders) + self._sheriff.post_blame_comment_on_bug(commit_info, builders, tests) + + finally: + for builder in builders: + self._tool.status_server.update_svn_revision(revision, builder.name()) + return True + + def handle_unexpected_error(self, failure_map, message): + log(message) + + # StepSequenceErrorHandler methods + + @classmethod + def handle_script_error(cls, tool, state, script_error): + # Ideally we would post some information to IRC about what went wrong + # here, but we don't have the IRC password in the child process. + pass diff --git a/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py b/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py new file mode 100644 index 0000000..4db463e --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py @@ -0,0 +1,57 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.tool.commands.queuestest import QueuesTest +from webkitpy.tool.commands.sheriffbot import SheriffBot +from webkitpy.tool.mocktool import * + + +class SheriffBotTest(QueuesTest): + builder1 = MockBuilder("Builder1") + builder2 = MockBuilder("Builder2") + + def test_sheriff_bot(self): + tool = MockTool() + mock_work_item = MockFailureMap(tool.buildbot) + expected_stderr = { + "begin_work_queue": self._default_begin_work_queue_stderr("sheriff-bot", tool.scm().checkout_root), + "next_work_item": "", + "process_work_item": """MOCK: irc.post: abarth, darin, eseidel: http://trac.webkit.org/changeset/29837 might have broken Builder1 +MOCK bug comment: bug_id=42, cc=['abarth@webkit.org', 'eric@webkit.org'] +--- Begin comment --- +http://trac.webkit.org/changeset/29837 might have broken Builder1 +The following tests are not passing: +mock-test-1 +--- End comment --- + +""", + "handle_unexpected_error": "Mock error message\n" + } + self.assert_queue_outputs(SheriffBot(), work_item=mock_work_item, expected_stderr=expected_stderr) diff --git a/Tools/Scripts/webkitpy/tool/commands/stepsequence.py b/Tools/Scripts/webkitpy/tool/commands/stepsequence.py new file mode 100644 index 0000000..be2ed4c --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/stepsequence.py @@ -0,0 +1,83 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import webkitpy.tool.steps as steps + +from webkitpy.common.system.executive import ScriptError +from webkitpy.common.checkout.scm import CheckoutNeedsUpdate +from webkitpy.tool.bot.queueengine import QueueEngine +from webkitpy.common.system.deprecated_logging import log + + +class StepSequenceErrorHandler(): + @classmethod + def handle_script_error(cls, tool, patch, script_error): + raise NotImplementedError, "subclasses must implement" + + @classmethod + def handle_checkout_needs_update(cls, tool, state, options, error): + raise NotImplementedError, "subclasses must implement" + + +class StepSequence(object): + def __init__(self, steps): + self._steps = steps or [] + + def options(self): + collected_options = [ + steps.Options.parent_command, + steps.Options.quiet, + ] + for step in self._steps: + collected_options = collected_options + step.options() + # Remove duplicates. + collected_options = sorted(set(collected_options)) + return collected_options + + def _run(self, tool, options, state): + for step in self._steps: + step(tool, options).run(state) + + def run_and_handle_errors(self, tool, options, state=None): + if not state: + state = {} + try: + self._run(tool, options, state) + except CheckoutNeedsUpdate, e: + log("Commit failed because the checkout is out of date. Please update and try again.") + if options.parent_command: + command = tool.command_by_name(options.parent_command) + command.handle_checkout_needs_update(tool, state, options, e) + QueueEngine.exit_after_handled_error(e) + except ScriptError, e: + if not options.quiet: + log(e.message_with_output()) + if options.parent_command: + command = tool.command_by_name(options.parent_command) + command.handle_script_error(tool, state, e) + QueueEngine.exit_after_handled_error(e) diff --git a/Tools/Scripts/webkitpy/tool/commands/upload.py b/Tools/Scripts/webkitpy/tool/commands/upload.py new file mode 100644 index 0000000..e12c8e2 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/upload.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python +# Copyright (c) 2009, 2010 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import re +import sys + +from optparse import make_option + +import webkitpy.tool.steps as steps + +from webkitpy.common.config.committers import CommitterList +from webkitpy.common.net.bugzilla import parse_bug_id +from webkitpy.common.system.user import User +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand +from webkitpy.tool.grammar import pluralize, join_with_separators +from webkitpy.tool.comments import bug_comment_from_svn_revision +from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand +from webkitpy.common.system.deprecated_logging import error, log + + +class CommitMessageForCurrentDiff(AbstractDeclarativeCommand): + name = "commit-message" + help_text = "Print a commit message suitable for the uncommitted changes" + + def __init__(self): + options = [ + steps.Options.git_commit, + ] + AbstractDeclarativeCommand.__init__(self, options=options) + + def execute(self, options, args, tool): + # This command is a useful test to make sure commit_message_for_this_commit + # always returns the right value regardless of the current working directory. + print "%s" % tool.checkout().commit_message_for_this_commit(options.git_commit).message() + + +class CleanPendingCommit(AbstractDeclarativeCommand): + name = "clean-pending-commit" + help_text = "Clear r+ on obsolete patches so they do not appear in the pending-commit list." + + # NOTE: This was designed to be generic, but right now we're only processing patches from the pending-commit list, so only r+ matters. + def _flags_to_clear_on_patch(self, patch): + if not patch.is_obsolete(): + return None + what_was_cleared = [] + if patch.review() == "+": + if patch.reviewer(): + what_was_cleared.append("%s's review+" % patch.reviewer().full_name) + else: + what_was_cleared.append("review+") + return join_with_separators(what_was_cleared) + + def execute(self, options, args, tool): + committers = CommitterList() + for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list(): + bug = self._tool.bugs.fetch_bug(bug_id) + patches = bug.patches(include_obsolete=True) + for patch in patches: + flags_to_clear = self._flags_to_clear_on_patch(patch) + if not flags_to_clear: + continue + message = "Cleared %s from obsolete attachment %s so that this bug does not appear in http://webkit.org/pending-commit." % (flags_to_clear, patch.id()) + self._tool.bugs.obsolete_attachment(patch.id(), message) + + +# FIXME: This should be share more logic with AssignToCommitter and CleanPendingCommit +class CleanReviewQueue(AbstractDeclarativeCommand): + name = "clean-review-queue" + help_text = "Clear r? on obsolete patches so they do not appear in the pending-commit list." + + def execute(self, options, args, tool): + queue_url = "http://webkit.org/pending-review" + # We do this inefficient dance to be more like webkit.org/pending-review + # bugs.queries.fetch_bug_ids_from_review_queue() doesn't return + # closed bugs, but folks using /pending-review will see them. :( + for patch_id in tool.bugs.queries.fetch_attachment_ids_from_review_queue(): + patch = self._tool.bugs.fetch_attachment(patch_id) + if not patch.review() == "?": + continue + attachment_obsolete_modifier = "" + if patch.is_obsolete(): + attachment_obsolete_modifier = "obsolete " + elif patch.bug().is_closed(): + bug_closed_explanation = " If you would like this patch reviewed, please attach it to a new bug (or re-open this bug before marking it for review again)." + else: + # Neither the patch was obsolete or the bug was closed, next patch... + continue + message = "Cleared review? from %sattachment %s so that this bug does not appear in %s.%s" % (attachment_obsolete_modifier, patch.id(), queue_url, bug_closed_explanation) + self._tool.bugs.obsolete_attachment(patch.id(), message) + + +class AssignToCommitter(AbstractDeclarativeCommand): + name = "assign-to-committer" + help_text = "Assign bug to whoever attached the most recent r+'d patch" + + def _patches_have_commiters(self, reviewed_patches): + for patch in reviewed_patches: + if not patch.committer(): + return False + return True + + def _assign_bug_to_last_patch_attacher(self, bug_id): + committers = CommitterList() + bug = self._tool.bugs.fetch_bug(bug_id) + if not bug.is_unassigned(): + assigned_to_email = bug.assigned_to_email() + log("Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email))) + return + + reviewed_patches = bug.reviewed_patches() + if not reviewed_patches: + log("Bug %s has no non-obsolete patches, ignoring." % bug_id) + return + + # We only need to do anything with this bug if one of the r+'d patches does not have a valid committer (cq+ set). + if self._patches_have_commiters(reviewed_patches): + log("All reviewed patches on bug %s already have commit-queue+, ignoring." % bug_id) + return + + latest_patch = reviewed_patches[-1] + attacher_email = latest_patch.attacher_email() + committer = committers.committer_by_email(attacher_email) + if not committer: + log("Attacher %s is not a committer. Bug %s likely needs commit-queue+." % (attacher_email, bug_id)) + return + + reassign_message = "Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name) + self._tool.bugs.reassign_bug(bug_id, committer.bugzilla_email(), reassign_message) + + def execute(self, options, args, tool): + for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list(): + self._assign_bug_to_last_patch_attacher(bug_id) + + +class ObsoleteAttachments(AbstractSequencedCommand): + name = "obsolete-attachments" + help_text = "Mark all attachments on a bug as obsolete" + argument_names = "BUGID" + steps = [ + steps.ObsoletePatches, + ] + + def _prepare_state(self, options, args, tool): + return { "bug_id" : args[0] } + + +class AbstractPatchUploadingCommand(AbstractSequencedCommand): + def _bug_id(self, options, args, tool, state): + # Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs). + bug_id = args and args[0] + if not bug_id: + changed_files = self._tool.scm().changed_files(options.git_commit) + state["changed_files"] = changed_files + bug_id = tool.checkout().bug_id_for_this_commit(options.git_commit, changed_files) + return bug_id + + def _prepare_state(self, options, args, tool): + state = {} + state["bug_id"] = self._bug_id(options, args, tool, state) + if not state["bug_id"]: + error("No bug id passed and no bug url found in ChangeLogs.") + return state + + +class Post(AbstractPatchUploadingCommand): + name = "post" + help_text = "Attach the current working directory diff to a bug as a patch file" + argument_names = "[BUGID]" + steps = [ + steps.CheckStyle, + steps.ConfirmDiff, + steps.ObsoletePatches, + steps.SuggestReviewers, + steps.PostDiff, + ] + + +class LandSafely(AbstractPatchUploadingCommand): + name = "land-safely" + help_text = "Land the current diff via the commit-queue" + argument_names = "[BUGID]" + long_help = """land-safely updates the ChangeLog with the reviewer listed + in bugs.webkit.org for BUGID (or the bug ID detected from the ChangeLog). + The command then uploads the current diff to the bug and marks it for + commit by the commit-queue.""" + show_in_main_help = True + steps = [ + steps.UpdateChangeLogsWithReviewer, + steps.ObsoletePatches, + steps.PostDiffForCommit, + ] + + +class Prepare(AbstractSequencedCommand): + name = "prepare" + help_text = "Creates a bug (or prompts for an existing bug) and prepares the ChangeLogs" + argument_names = "[BUGID]" + steps = [ + steps.PromptForBugOrTitle, + steps.CreateBug, + steps.PrepareChangeLog, + ] + + def _prepare_state(self, options, args, tool): + bug_id = args and args[0] + return { "bug_id" : bug_id } + + +class Upload(AbstractPatchUploadingCommand): + name = "upload" + help_text = "Automates the process of uploading a patch for review" + argument_names = "[BUGID]" + show_in_main_help = True + steps = [ + steps.CheckStyle, + steps.PromptForBugOrTitle, + steps.CreateBug, + steps.PrepareChangeLog, + steps.EditChangeLog, + steps.ConfirmDiff, + steps.ObsoletePatches, + steps.SuggestReviewers, + steps.PostDiff, + ] + long_help = """upload uploads the current diff to bugs.webkit.org. + If no bug id is provided, upload will create a bug. + If the current diff does not have a ChangeLog, upload + will prepare a ChangeLog. Once a patch is read, upload + will open the ChangeLogs for editing using the command in the + EDITOR environment variable and will display the diff using the + command in the PAGER environment variable.""" + + def _prepare_state(self, options, args, tool): + state = {} + state["bug_id"] = self._bug_id(options, args, tool, state) + return state + + +class EditChangeLogs(AbstractSequencedCommand): + name = "edit-changelogs" + help_text = "Opens modified ChangeLogs in $EDITOR" + show_in_main_help = True + steps = [ + steps.EditChangeLog, + ] + + +class PostCommits(AbstractDeclarativeCommand): + name = "post-commits" + help_text = "Attach a range of local commits to bugs as patch files" + argument_names = "COMMITISH" + + def __init__(self): + options = [ + make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."), + make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."), + make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"), + steps.Options.obsolete_patches, + steps.Options.review, + steps.Options.request_commit, + ] + AbstractDeclarativeCommand.__init__(self, options=options, requires_local_commits=True) + + def _comment_text_for_commit(self, options, commit_message, tool, commit_id): + comment_text = None + if (options.add_log_as_comment): + comment_text = commit_message.body(lstrip=True) + comment_text += "---\n" + comment_text += tool.scm().files_changed_summary_for_commit(commit_id) + return comment_text + + def execute(self, options, args, tool): + commit_ids = tool.scm().commit_ids_from_commitish_arguments(args) + if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is. + error("webkit-patch does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize("patch", len(commit_ids)))) + + have_obsoleted_patches = set() + for commit_id in commit_ids: + commit_message = tool.scm().commit_message_for_local_commit(commit_id) + + # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs). + bug_id = options.bug_id or parse_bug_id(commit_message.message()) or parse_bug_id(tool.scm().create_patch(git_commit=commit_id)) + if not bug_id: + log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id) + continue + + if options.obsolete_patches and bug_id not in have_obsoleted_patches: + state = { "bug_id": bug_id } + steps.ObsoletePatches(tool, options).run(state) + have_obsoleted_patches.add(bug_id) + + diff = tool.scm().create_patch(git_commit=commit_id) + description = options.description or commit_message.description(lstrip=True, strip_url=True) + comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id) + tool.bugs.add_patch_to_bug(bug_id, diff, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) + + +# FIXME: This command needs to be brought into the modern age with steps and CommitInfo. +class MarkBugFixed(AbstractDeclarativeCommand): + name = "mark-bug-fixed" + help_text = "Mark the specified bug as fixed" + argument_names = "[SVN_REVISION]" + def __init__(self): + options = [ + make_option("--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."), + make_option("--comment", action="store", type="string", dest="comment", help="Text to include in bug comment."), + make_option("--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only)."), + make_option("--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it."), + ] + AbstractDeclarativeCommand.__init__(self, options=options) + + # FIXME: We should be using checkout().changelog_entries_for_revision(...) instead here. + def _fetch_commit_log(self, tool, svn_revision): + if not svn_revision: + return tool.scm().last_svn_commit_log() + return tool.scm().svn_commit_log(svn_revision) + + def _determine_bug_id_and_svn_revision(self, tool, bug_id, svn_revision): + commit_log = self._fetch_commit_log(tool, svn_revision) + + if not bug_id: + bug_id = parse_bug_id(commit_log) + + if not svn_revision: + match = re.search("^r(?P<svn_revision>\d+) \|", commit_log, re.MULTILINE) + if match: + svn_revision = match.group('svn_revision') + + if not bug_id or not svn_revision: + not_found = [] + if not bug_id: + not_found.append("bug id") + if not svn_revision: + not_found.append("svn revision") + error("Could not find %s on command-line or in %s." + % (" or ".join(not_found), "r%s" % svn_revision if svn_revision else "last commit")) + + return (bug_id, svn_revision) + + def execute(self, options, args, tool): + bug_id = options.bug_id + + svn_revision = args and args[0] + if svn_revision: + if re.match("^r[0-9]+$", svn_revision, re.IGNORECASE): + svn_revision = svn_revision[1:] + if not re.match("^[0-9]+$", svn_revision): + error("Invalid svn revision: '%s'" % svn_revision) + + needs_prompt = False + if not bug_id or not svn_revision: + needs_prompt = True + (bug_id, svn_revision) = self._determine_bug_id_and_svn_revision(tool, bug_id, svn_revision) + + log("Bug: <%s> %s" % (tool.bugs.bug_url_for_bug_id(bug_id), tool.bugs.fetch_bug_dictionary(bug_id)["title"])) + log("Revision: %s" % svn_revision) + + if options.open_bug: + tool.user.open_url(tool.bugs.bug_url_for_bug_id(bug_id)) + + if needs_prompt: + if not tool.user.confirm("Is this correct?"): + exit(1) + + bug_comment = bug_comment_from_svn_revision(svn_revision) + if options.comment: + bug_comment = "%s\n\n%s" % (options.comment, bug_comment) + + if options.update_only: + log("Adding comment to Bug %s." % bug_id) + tool.bugs.post_comment_to_bug(bug_id, bug_comment) + else: + log("Adding comment to Bug %s and marking as Resolved/Fixed." % bug_id) + tool.bugs.close_bug_as_fixed(bug_id, bug_comment) + + +# FIXME: Requires unit test. Blocking issue: too complex for now. +class CreateBug(AbstractDeclarativeCommand): + name = "create-bug" + help_text = "Create a bug from local changes or local commits" + argument_names = "[COMMITISH]" + + def __init__(self): + options = [ + steps.Options.cc, + steps.Options.component, + make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."), + make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."), + make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."), + ] + AbstractDeclarativeCommand.__init__(self, options=options) + + def create_bug_from_commit(self, options, args, tool): + commit_ids = tool.scm().commit_ids_from_commitish_arguments(args) + if len(commit_ids) > 3: + error("Are you sure you want to create one bug with %s patches?" % len(commit_ids)) + + commit_id = commit_ids[0] + + bug_title = "" + comment_text = "" + if options.prompt: + (bug_title, comment_text) = self.prompt_for_bug_title_and_comment() + else: + commit_message = tool.scm().commit_message_for_local_commit(commit_id) + bug_title = commit_message.description(lstrip=True, strip_url=True) + comment_text = commit_message.body(lstrip=True) + comment_text += "---\n" + comment_text += tool.scm().files_changed_summary_for_commit(commit_id) + + diff = tool.scm().create_patch(git_commit=commit_id) + bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) + + if bug_id and len(commit_ids) > 1: + options.bug_id = bug_id + options.obsolete_patches = False + # FIXME: We should pass through --no-comment switch as well. + PostCommits.execute(self, options, commit_ids[1:], tool) + + def create_bug_from_patch(self, options, args, tool): + bug_title = "" + comment_text = "" + if options.prompt: + (bug_title, comment_text) = self.prompt_for_bug_title_and_comment() + else: + commit_message = tool.checkout().commit_message_for_this_commit(options.git_commit) + bug_title = commit_message.description(lstrip=True, strip_url=True) + comment_text = commit_message.body(lstrip=True) + + diff = tool.scm().create_patch(options.git_commit) + bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit) + + def prompt_for_bug_title_and_comment(self): + bug_title = User.prompt("Bug title: ") + print "Bug comment (hit ^D on blank line to end):" + lines = sys.stdin.readlines() + try: + sys.stdin.seek(0, os.SEEK_END) + except IOError: + # Cygwin raises an Illegal Seek (errno 29) exception when the above + # seek() call is made. Ignoring it seems to cause no harm. + # FIXME: Figure out a way to get avoid the exception in the first + # place. + pass + comment_text = "".join(lines) + return (bug_title, comment_text) + + def execute(self, options, args, tool): + if len(args): + if (not tool.scm().supports_local_commits()): + error("Extra arguments not supported; patch is taken from working directory.") + self.create_bug_from_commit(options, args, tool) + else: + self.create_bug_from_patch(options, args, tool) diff --git a/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py b/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py new file mode 100644 index 0000000..a347b00 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py @@ -0,0 +1,122 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.commands.commandtest import CommandsTest +from webkitpy.tool.commands.upload import * +from webkitpy.tool.mocktool import MockOptions, MockTool + +class UploadCommandsTest(CommandsTest): + def test_commit_message_for_current_diff(self): + tool = MockTool() + expected_stdout = "This is a fake commit message that is at least 50 characters.\n" + self.assert_execute_outputs(CommitMessageForCurrentDiff(), [], expected_stdout=expected_stdout, tool=tool) + + def test_clean_pending_commit(self): + self.assert_execute_outputs(CleanPendingCommit(), []) + + def test_assign_to_committer(self): + tool = MockTool() + expected_stderr = "Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)\nBug 77 is already assigned to foo@foo.com (None).\nBug 76 has no non-obsolete patches, ignoring.\n" + self.assert_execute_outputs(AssignToCommitter(), [], expected_stderr=expected_stderr, tool=tool) + tool.bugs.reassign_bug.assert_called_with(42, "eric@webkit.org", "Attachment 128 was posted by a committer and has review+, assigning to Eric Seidel for commit.") + + def test_obsolete_attachments(self): + expected_stderr = "Obsoleting 2 old patches on bug 42\n" + self.assert_execute_outputs(ObsoleteAttachments(), [42], expected_stderr=expected_stderr) + + def test_post(self): + options = MockOptions() + options.cc = None + options.check_style = True + options.comment = None + options.description = "MOCK description" + options.request_commit = False + options.review = True + options.suggest_reviewers = False + expected_stderr = """Running check-webkit-style +MOCK: user.open_url: file://... +Obsoleting 2 old patches on bug 42 +MOCK add_patch_to_bug: bug_id=42, description=MOCK description, mark_for_review=True, mark_for_commit_queue=False, mark_for_landing=False +MOCK: user.open_url: http://example.com/42 +""" + expected_stdout = "Was that diff correct?\n" + self.assert_execute_outputs(Post(), [42], options=options, expected_stdout=expected_stdout, expected_stderr=expected_stderr) + + def test_land_safely(self): + expected_stderr = "Obsoleting 2 old patches on bug 42\nMOCK add_patch_to_bug: bug_id=42, description=Patch for landing, mark_for_review=False, mark_for_commit_queue=False, mark_for_landing=True\n" + self.assert_execute_outputs(LandSafely(), [42], expected_stderr=expected_stderr) + + def test_prepare_diff_with_arg(self): + self.assert_execute_outputs(Prepare(), [42]) + + def test_prepare(self): + expected_stderr = "MOCK create_bug\nbug_title: Mock user response\nbug_description: Mock user response\ncomponent: MOCK component\ncc: MOCK cc\n" + self.assert_execute_outputs(Prepare(), [], expected_stderr=expected_stderr) + + def test_upload(self): + options = MockOptions() + options.cc = None + options.check_style = True + options.comment = None + options.description = "MOCK description" + options.request_commit = False + options.review = True + options.suggest_reviewers = False + expected_stderr = """Running check-webkit-style +MOCK: user.open_url: file://... +Obsoleting 2 old patches on bug 42 +MOCK add_patch_to_bug: bug_id=42, description=MOCK description, mark_for_review=True, mark_for_commit_queue=False, mark_for_landing=False +MOCK: user.open_url: http://example.com/42 +""" + expected_stdout = "Was that diff correct?\n" + self.assert_execute_outputs(Upload(), [42], options=options, expected_stdout=expected_stdout, expected_stderr=expected_stderr) + + def test_mark_bug_fixed(self): + tool = MockTool() + tool._scm.last_svn_commit_log = lambda: "r9876 |" + options = Mock() + options.bug_id = 42 + options.comment = "MOCK comment" + expected_stderr = """Bug: <http://example.com/42> Bug with two r+'d and cq+'d patches, one of which has an invalid commit-queue setter. +Revision: 9876 +MOCK: user.open_url: http://example.com/42 +Adding comment to Bug 42. +MOCK bug comment: bug_id=42, cc=None +--- Begin comment --- +MOCK comment + +Committed r9876: <http://trac.webkit.org/changeset/9876> +--- End comment --- + +""" + expected_stdout = "Is this correct?\n" + self.assert_execute_outputs(MarkBugFixed(), [], expected_stdout=expected_stdout, expected_stderr=expected_stderr, tool=tool, options=options) + + def test_edit_changelog(self): + self.assert_execute_outputs(EditChangeLogs(), []) diff --git a/Tools/Scripts/webkitpy/tool/comments.py b/Tools/Scripts/webkitpy/tool/comments.py new file mode 100755 index 0000000..771953e --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/comments.py @@ -0,0 +1,42 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# A tool for automating dealing with bugzilla, posting patches, committing +# patches, etc. + +from webkitpy.common.config import urls + + +def bug_comment_from_svn_revision(svn_revision): + return "Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision)) + + +def bug_comment_from_commit_text(scm, commit_text): + svn_revision = scm.svn_revision_from_commit_text(commit_text) + return bug_comment_from_svn_revision(svn_revision) diff --git a/Tools/Scripts/webkitpy/tool/grammar.py b/Tools/Scripts/webkitpy/tool/grammar.py new file mode 100644 index 0000000..8db9826 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/grammar.py @@ -0,0 +1,54 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import re + + +def plural(noun): + # This is a dumb plural() implementation that is just enough for our uses. + if re.search("h$", noun): + return noun + "es" + else: + return noun + "s" + + +def pluralize(noun, count): + if count != 1: + noun = plural(noun) + return "%d %s" % (count, noun) + + +def join_with_separators(list_of_strings, separator=', ', only_two_separator=" and ", last_separator=', and '): + if not list_of_strings: + return "" + if len(list_of_strings) == 1: + return list_of_strings[0] + if len(list_of_strings) == 2: + return only_two_separator.join(list_of_strings) + return "%s%s%s" % (separator.join(list_of_strings[:-1]), last_separator, list_of_strings[-1]) diff --git a/Tools/Scripts/webkitpy/tool/grammar_unittest.py b/Tools/Scripts/webkitpy/tool/grammar_unittest.py new file mode 100644 index 0000000..cab71db --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/grammar_unittest.py @@ -0,0 +1,41 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.tool.grammar import join_with_separators + +class GrammarTest(unittest.TestCase): + + def test_join_with_separators(self): + self.assertEqual(join_with_separators(["one"]), "one") + self.assertEqual(join_with_separators(["one", "two"]), "one and two") + self.assertEqual(join_with_separators(["one", "two", "three"]), "one, two, and three") + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/tool/main.py b/Tools/Scripts/webkitpy/tool/main.py new file mode 100755 index 0000000..0006e87 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/main.py @@ -0,0 +1,141 @@ +# Copyright (c) 2010 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# A tool for automating dealing with bugzilla, posting patches, committing patches, etc. + +from optparse import make_option +import os +import threading + +from webkitpy.common.checkout.api import Checkout +from webkitpy.common.checkout.scm import default_scm +from webkitpy.common.config.ports import WebKitPort +from webkitpy.common.net.bugzilla import Bugzilla +from webkitpy.common.net.buildbot import BuildBot +from webkitpy.common.net.irc.ircproxy import IRCProxy +from webkitpy.common.net.statusserver import StatusServer +from webkitpy.common.system.executive import Executive +from webkitpy.common.system.filesystem import FileSystem +from webkitpy.common.system.platforminfo import PlatformInfo +from webkitpy.common.system.user import User +from webkitpy.layout_tests import port +from webkitpy.tool.multicommandtool import MultiCommandTool +import webkitpy.tool.commands as commands + + +class WebKitPatch(MultiCommandTool): + global_options = [ + make_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="enable all logging"), + make_option("--dry-run", action="store_true", dest="dry_run", default=False, help="do not touch remote servers"), + make_option("--status-host", action="store", dest="status_host", type="string", help="Hostname (e.g. localhost or commit.webkit.org) where status updates should be posted."), + make_option("--bot-id", action="store", dest="bot_id", type="string", help="Identifier for this bot (if multiple bots are running for a queue)"), + make_option("--irc-password", action="store", dest="irc_password", type="string", help="Password to use when communicating via IRC."), + make_option("--port", action="store", dest="port", default=None, help="Specify a port (e.g., mac, qt, gtk, ...)."), + ] + + def __init__(self, path): + MultiCommandTool.__init__(self) + + self._path = path + self.wakeup_event = threading.Event() + # FIXME: All of these shared objects should move off onto a + # separate "Tool" object. WebKitPatch should inherit from + # "Tool" and all these objects should use getters/setters instead of + # manual getter functions (e.g. scm()). + self.bugs = Bugzilla() + self.buildbot = BuildBot() + self.executive = Executive() + self._irc = None + self.filesystem = FileSystem() + self._port = None + self.user = User() + self._scm = None + self._checkout = None + self.status_server = StatusServer() + self.port_factory = port.factory + self.platform = PlatformInfo() + + def scm(self): + # Lazily initialize SCM to not error-out before command line parsing (or when running non-scm commands). + if not self._scm: + self._scm = default_scm() + return self._scm + + def checkout(self): + if not self._checkout: + self._checkout = Checkout(self.scm()) + return self._checkout + + def port(self): + return self._port + + def ensure_irc_connected(self, irc_delegate): + if not self._irc: + self._irc = IRCProxy(irc_delegate) + + def irc(self): + # We don't automatically construct IRCProxy here because constructing + # IRCProxy actually connects to IRC. We want clients to explicitly + # connect to IRC. + return self._irc + + def path(self): + return self._path + + def command_completed(self): + if self._irc: + self._irc.disconnect() + + def should_show_in_main_help(self, command): + if not command.show_in_main_help: + return False + if command.requires_local_commits: + return self.scm().supports_local_commits() + return True + + # FIXME: This may be unnecessary since we pass global options to all commands during execute() as well. + def handle_global_options(self, options): + self._options = options + if options.dry_run: + self.scm().dryrun = True + self.bugs.dryrun = True + if options.status_host: + self.status_server.set_host(options.status_host) + if options.bot_id: + self.status_server.set_bot_id(options.bot_id) + if options.irc_password: + self.irc_password = options.irc_password + # If options.port is None, we'll get the default port for this platform. + self._port = WebKitPort.port(options.port) + + def should_execute_command(self, command): + if command.requires_local_commits and not self.scm().supports_local_commits(): + failure_reason = "%s requires local commits using %s in %s." % (command.name, self.scm().display_name(), self.scm().checkout_root) + return (False, failure_reason) + return (True, None) diff --git a/Tools/Scripts/webkitpy/tool/mocktool.py b/Tools/Scripts/webkitpy/tool/mocktool.py new file mode 100644 index 0000000..30a4bc3 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/mocktool.py @@ -0,0 +1,735 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import threading + +from webkitpy.common.config.committers import CommitterList, Reviewer +from webkitpy.common.checkout.commitinfo import CommitInfo +from webkitpy.common.checkout.scm import CommitMessage +from webkitpy.common.net.bugzilla import Bug, Attachment +from webkitpy.common.system.deprecated_logging import log +from webkitpy.common.system.filesystem_mock import MockFileSystem +from webkitpy.thirdparty.mock import Mock + + +def _id_to_object_dictionary(*objects): + dictionary = {} + for thing in objects: + dictionary[thing["id"]] = thing + return dictionary + +# Testing + +# FIXME: The ids should be 1, 2, 3 instead of crazy numbers. + + +_patch1 = { + "id": 197, + "bug_id": 42, + "url": "http://example.com/197", + "name": "Patch1", + "is_obsolete": False, + "is_patch": True, + "review": "+", + "reviewer_email": "foo@bar.com", + "commit-queue": "+", + "committer_email": "foo@bar.com", + "attacher_email": "Contributer1", +} + + +_patch2 = { + "id": 128, + "bug_id": 42, + "url": "http://example.com/128", + "name": "Patch2", + "is_obsolete": False, + "is_patch": True, + "review": "+", + "reviewer_email": "foo@bar.com", + "commit-queue": "+", + "committer_email": "non-committer@example.com", + "attacher_email": "eric@webkit.org", +} + + +_patch3 = { + "id": 103, + "bug_id": 75, + "url": "http://example.com/103", + "name": "Patch3", + "is_obsolete": False, + "is_patch": True, + "review": "?", + "attacher_email": "eric@webkit.org", +} + + +_patch4 = { + "id": 104, + "bug_id": 77, + "url": "http://example.com/103", + "name": "Patch3", + "is_obsolete": False, + "is_patch": True, + "review": "+", + "commit-queue": "?", + "reviewer_email": "foo@bar.com", + "attacher_email": "Contributer2", +} + + +_patch5 = { + "id": 105, + "bug_id": 77, + "url": "http://example.com/103", + "name": "Patch5", + "is_obsolete": False, + "is_patch": True, + "review": "+", + "reviewer_email": "foo@bar.com", + "attacher_email": "eric@webkit.org", +} + + +_patch6 = { # Valid committer, but no reviewer. + "id": 106, + "bug_id": 77, + "url": "http://example.com/103", + "name": "ROLLOUT of r3489", + "is_obsolete": False, + "is_patch": True, + "commit-queue": "+", + "committer_email": "foo@bar.com", + "attacher_email": "eric@webkit.org", +} + + +_patch7 = { # Valid review, patch is marked obsolete. + "id": 107, + "bug_id": 76, + "url": "http://example.com/103", + "name": "Patch7", + "is_obsolete": True, + "is_patch": True, + "review": "+", + "reviewer_email": "foo@bar.com", + "attacher_email": "eric@webkit.org", +} + + +# This matches one of Bug.unassigned_emails +_unassigned_email = "webkit-unassigned@lists.webkit.org" +# This is needed for the FlakyTestReporter to believe the bug +# was filed by one of the webkitpy bots. +_commit_queue_email = "commit-queue@webkit.org" + + +# FIXME: The ids should be 1, 2, 3 instead of crazy numbers. + + +_bug1 = { + "id": 42, + "title": "Bug with two r+'d and cq+'d patches, one of which has an " + "invalid commit-queue setter.", + "reporter_email": "foo@foo.com", + "assigned_to_email": _unassigned_email, + "attachments": [_patch1, _patch2], + "bug_status": "UNCONFIRMED", +} + + +_bug2 = { + "id": 75, + "title": "Bug with a patch needing review.", + "reporter_email": "foo@foo.com", + "assigned_to_email": "foo@foo.com", + "attachments": [_patch3], + "bug_status": "ASSIGNED", +} + + +_bug3 = { + "id": 76, + "title": "The third bug", + "reporter_email": "foo@foo.com", + "assigned_to_email": _unassigned_email, + "attachments": [_patch7], + "bug_status": "NEW", +} + + +_bug4 = { + "id": 77, + "title": "The fourth bug", + "reporter_email": "foo@foo.com", + "assigned_to_email": "foo@foo.com", + "attachments": [_patch4, _patch5, _patch6], + "bug_status": "REOPENED", +} + + +_bug5 = { + "id": 78, + "title": "The fifth bug", + "reporter_email": _commit_queue_email, + "assigned_to_email": "foo@foo.com", + "attachments": [], + "bug_status": "RESOLVED", + "dup_id": 76, +} + + +# FIXME: This should not inherit from Mock +class MockBugzillaQueries(Mock): + + def __init__(self, bugzilla): + Mock.__init__(self) + self._bugzilla = bugzilla + + def _all_bugs(self): + return map(lambda bug_dictionary: Bug(bug_dictionary, self._bugzilla), + self._bugzilla.bug_cache.values()) + + def fetch_bug_ids_from_commit_queue(self): + bugs_with_commit_queued_patches = filter( + lambda bug: bug.commit_queued_patches(), + self._all_bugs()) + return map(lambda bug: bug.id(), bugs_with_commit_queued_patches) + + def fetch_attachment_ids_from_review_queue(self): + unreviewed_patches = sum([bug.unreviewed_patches() + for bug in self._all_bugs()], []) + return map(lambda patch: patch.id(), unreviewed_patches) + + def fetch_patches_from_commit_queue(self): + return sum([bug.commit_queued_patches() + for bug in self._all_bugs()], []) + + def fetch_bug_ids_from_pending_commit_list(self): + bugs_with_reviewed_patches = filter(lambda bug: bug.reviewed_patches(), + self._all_bugs()) + bug_ids = map(lambda bug: bug.id(), bugs_with_reviewed_patches) + # NOTE: This manual hack here is to allow testing logging in + # test_assign_to_committer the real pending-commit query on bugzilla + # will return bugs with patches which have r+, but are also obsolete. + return bug_ids + [76] + + def fetch_patches_from_pending_commit_list(self): + return sum([bug.reviewed_patches() for bug in self._all_bugs()], []) + + def fetch_bugs_matching_search(self, search_string, author_email=None): + return [self._bugzilla.fetch_bug(78), self._bugzilla.fetch_bug(77)] + +_mock_reviewer = Reviewer("Foo Bar", "foo@bar.com") + + +# FIXME: Bugzilla is the wrong Mock-point. Once we have a BugzillaNetwork +# class we should mock that instead. +# Most of this class is just copy/paste from Bugzilla. +# FIXME: This should not inherit from Mock +class MockBugzilla(Mock): + + bug_server_url = "http://example.com" + + bug_cache = _id_to_object_dictionary(_bug1, _bug2, _bug3, _bug4, _bug5) + + attachment_cache = _id_to_object_dictionary(_patch1, + _patch2, + _patch3, + _patch4, + _patch5, + _patch6, + _patch7) + + def __init__(self): + Mock.__init__(self) + self.queries = MockBugzillaQueries(self) + self.committers = CommitterList(reviewers=[_mock_reviewer]) + self._override_patch = None + + def create_bug(self, + bug_title, + bug_description, + component=None, + diff=None, + patch_description=None, + cc=None, + blocked=None, + mark_for_review=False, + mark_for_commit_queue=False): + log("MOCK create_bug") + log("bug_title: %s" % bug_title) + log("bug_description: %s" % bug_description) + if component: + log("component: %s" % component) + if cc: + log("cc: %s" % cc) + if blocked: + log("blocked: %s" % blocked) + return 78 + + def quips(self): + return ["Good artists copy. Great artists steal. - Pablo Picasso"] + + def fetch_bug(self, bug_id): + return Bug(self.bug_cache.get(bug_id), self) + + def set_override_patch(self, patch): + self._override_patch = patch + + def fetch_attachment(self, attachment_id): + if self._override_patch: + return self._override_patch + + attachment_dictionary = self.attachment_cache.get(attachment_id) + if not attachment_dictionary: + print "MOCK: fetch_attachment: %s is not a known attachment id" % attachment_id + return None + bug = self.fetch_bug(attachment_dictionary["bug_id"]) + for attachment in bug.attachments(include_obsolete=True): + if attachment.id() == int(attachment_id): + return attachment + + def bug_url_for_bug_id(self, bug_id): + return "%s/%s" % (self.bug_server_url, bug_id) + + def fetch_bug_dictionary(self, bug_id): + return self.bug_cache.get(bug_id) + + def attachment_url_for_id(self, attachment_id, action="view"): + action_param = "" + if action and action != "view": + action_param = "&action=%s" % action + return "%s/%s%s" % (self.bug_server_url, attachment_id, action_param) + + def set_flag_on_attachment(self, + attachment_id, + flag_name, + flag_value, + comment_text=None, + additional_comment_text=None): + log("MOCK setting flag '%s' to '%s' on attachment '%s' with comment '%s' and additional comment '%s'" % ( + flag_name, flag_value, attachment_id, comment_text, additional_comment_text)) + + def post_comment_to_bug(self, bug_id, comment_text, cc=None): + log("MOCK bug comment: bug_id=%s, cc=%s\n--- Begin comment ---\n%s\n--- End comment ---\n" % ( + bug_id, cc, comment_text)) + + def add_attachment_to_bug(self, + bug_id, + file_or_string, + description, + filename=None, + comment_text=None): + log("MOCK add_attachment_to_bug: bug_id=%s, description=%s filename=%s" % (bug_id, description, filename)) + if comment_text: + log("-- Begin comment --") + log(comment_text) + log("-- End comment --") + + def add_patch_to_bug(self, + bug_id, + diff, + description, + comment_text=None, + mark_for_review=False, + mark_for_commit_queue=False, + mark_for_landing=False): + log("MOCK add_patch_to_bug: bug_id=%s, description=%s, mark_for_review=%s, mark_for_commit_queue=%s, mark_for_landing=%s" % + (bug_id, description, mark_for_review, mark_for_commit_queue, mark_for_landing)) + if comment_text: + log("-- Begin comment --") + log(comment_text) + log("-- End comment --") + + +class MockBuilder(object): + def __init__(self, name): + self._name = name + + def name(self): + return self._name + + def results_url(self): + return "http://example.com/builders/%s/results/" % self.name() + + def force_build(self, username, comments): + log("MOCK: force_build: name=%s, username=%s, comments=%s" % ( + self._name, username, comments)) + + +class MockFailureMap(object): + def __init__(self, buildbot): + self._buildbot = buildbot + + def is_empty(self): + return False + + def filter_out_old_failures(self, is_old_revision): + pass + + def failing_revisions(self): + return [29837] + + def builders_failing_for(self, revision): + return [self._buildbot.builder_with_name("Builder1")] + + def tests_failing_for(self, revision): + return ["mock-test-1"] + + +class MockBuildBot(object): + buildbot_host = "dummy_buildbot_host" + def __init__(self): + self._mock_builder1_status = { + "name": "Builder1", + "is_green": True, + "activity": "building", + } + self._mock_builder2_status = { + "name": "Builder2", + "is_green": True, + "activity": "idle", + } + + def builder_with_name(self, name): + return MockBuilder(name) + + def builder_statuses(self): + return [ + self._mock_builder1_status, + self._mock_builder2_status, + ] + + def red_core_builders_names(self): + if not self._mock_builder2_status["is_green"]: + return [self._mock_builder2_status["name"]] + return [] + + def red_core_builders(self): + if not self._mock_builder2_status["is_green"]: + return [self._mock_builder2_status] + return [] + + def idle_red_core_builders(self): + if not self._mock_builder2_status["is_green"]: + return [self._mock_builder2_status] + return [] + + def last_green_revision(self): + return 9479 + + def light_tree_on_fire(self): + self._mock_builder2_status["is_green"] = False + + def failure_map(self): + return MockFailureMap(self) + + +# FIXME: This should not inherit from Mock +class MockSCM(Mock): + + fake_checkout_root = os.path.realpath("/tmp") # realpath is needed to allow for Mac OS X's /private/tmp + + def __init__(self): + Mock.__init__(self) + # FIXME: We should probably use real checkout-root detection logic here. + # os.getcwd() can't work here because other parts of the code assume that "checkout_root" + # will actually be the root. Since getcwd() is wrong, use a globally fake root for now. + self.checkout_root = self.fake_checkout_root + + def changed_files(self, git_commit=None): + return ["MockFile1"] + + def create_patch(self, git_commit, changed_files=None): + return "Patch1" + + def commit_ids_from_commitish_arguments(self, args): + return ["Commitish1", "Commitish2"] + + def commit_message_for_local_commit(self, commit_id): + if commit_id == "Commitish1": + return CommitMessage("CommitMessage1\n" \ + "https://bugs.example.org/show_bug.cgi?id=42\n") + if commit_id == "Commitish2": + return CommitMessage("CommitMessage2\n" \ + "https://bugs.example.org/show_bug.cgi?id=75\n") + raise Exception("Bogus commit_id in commit_message_for_local_commit.") + + def diff_for_revision(self, revision): + return "DiffForRevision%s\n" \ + "http://bugs.webkit.org/show_bug.cgi?id=12345" % revision + + def svn_revision_from_commit_text(self, commit_text): + return "49824" + + def add(self, destination_path, return_exit_code=False): + if return_exit_code: + return 0 + + +class MockCheckout(object): + + _committer_list = CommitterList() + + def commit_info_for_revision(self, svn_revision): + # The real Checkout would probably throw an exception, but this is the only way tests have to get None back at the moment. + if not svn_revision: + return None + return CommitInfo(svn_revision, "eric@webkit.org", { + "bug_id": 42, + "author_name": "Adam Barth", + "author_email": "abarth@webkit.org", + "author": self._committer_list.committer_by_email("abarth@webkit.org"), + "reviewer_text": "Darin Adler", + "reviewer": self._committer_list.committer_by_name("Darin Adler"), + }) + + def bug_id_for_revision(self, svn_revision): + return 12345 + + def recent_commit_infos_for_files(self, paths): + return [self.commit_info_for_revision(32)] + + def modified_changelogs(self, git_commit, changed_files=None): + # Ideally we'd return something more interesting here. The problem is + # that LandDiff will try to actually read the patch from disk! + return [] + + def commit_message_for_this_commit(self, git_commit, changed_files=None): + commit_message = Mock() + commit_message.message = lambda:"This is a fake commit message that is at least 50 characters." + return commit_message + + def apply_patch(self, patch, force=False): + pass + + def apply_reverse_diffs(self, revision): + pass + + def suggested_reviewers(self, git_commit, changed_files=None): + return [_mock_reviewer] + + +class MockUser(object): + + @staticmethod + def prompt(message, repeat=1, raw_input=raw_input): + return "Mock user response" + + def edit(self, files): + pass + + def edit_changelog(self, files): + pass + + def page(self, message): + pass + + def confirm(self, message=None, default='y'): + print message + return default == 'y' + + def can_open_url(self): + return True + + def open_url(self, url): + if url.startswith("file://"): + log("MOCK: user.open_url: file://...") + return + log("MOCK: user.open_url: %s" % url) + + +class MockIRC(object): + + def post(self, message): + log("MOCK: irc.post: %s" % message) + + def disconnect(self): + log("MOCK: irc.disconnect") + + +class MockStatusServer(object): + + def __init__(self, bot_id=None, work_items=None): + self.host = "example.com" + self.bot_id = bot_id + self._work_items = work_items or [] + + def patch_status(self, queue_name, patch_id): + return None + + def svn_revision(self, svn_revision): + return None + + def next_work_item(self, queue_name): + if not self._work_items: + return None + return self._work_items.pop(0) + + def release_work_item(self, queue_name, patch): + log("MOCK: release_work_item: %s %s" % (queue_name, patch.id())) + + def update_work_items(self, queue_name, work_items): + self._work_items = work_items + log("MOCK: update_work_items: %s %s" % (queue_name, work_items)) + + def submit_to_ews(self, patch_id): + log("MOCK: submit_to_ews: %s" % (patch_id)) + + def update_status(self, queue_name, status, patch=None, results_file=None): + log("MOCK: update_status: %s %s" % (queue_name, status)) + return 187 + + def update_svn_revision(self, svn_revision, broken_bot): + return 191 + + def results_url_for_status(self, status_id): + return "http://dummy_url" + + +# FIXME: This should not inherit from Mock +# FIXME: Unify with common.system.executive_mock.MockExecutive. +class MockExecutive(Mock): + def __init__(self, should_log): + self._should_log = should_log + + def run_and_throw_if_fail(self, args, quiet=False): + if self._should_log: + log("MOCK run_and_throw_if_fail: %s" % args) + return "MOCK output of child process" + + def run_command(self, + args, + cwd=None, + input=None, + error_handler=None, + return_exit_code=False, + return_stderr=True, + decode_output=False): + if self._should_log: + log("MOCK run_command: %s" % args) + return "MOCK output of child process" + + +class MockOptions(object): + """Mock implementation of optparse.Values.""" + + def __init__(self, **kwargs): + # The caller can set option values using keyword arguments. We don't + # set any values by default because we don't know how this + # object will be used. Generally speaking unit tests should + # subclass this or provider wrapper functions that set a common + # set of options. + for key, value in kwargs.items(): + self.__dict__[key] = value + + +class MockPort(Mock): + def name(self): + return "MockPort" + + def layout_tests_results_path(self): + return "/mock/results.html" + +class MockTestPort1(object): + + def skips_layout_test(self, test_name): + return test_name in ["media/foo/bar.html", "foo"] + + +class MockTestPort2(object): + + def skips_layout_test(self, test_name): + return test_name == "media/foo/bar.html" + + +class MockPortFactory(object): + + def get_all(self, options=None): + return {"test_port1": MockTestPort1(), "test_port2": MockTestPort2()} + + +class MockPlatformInfo(object): + def display_name(self): + return "MockPlatform 1.0" + + +class MockTool(object): + + def __init__(self, log_executive=False): + self.wakeup_event = threading.Event() + self.bugs = MockBugzilla() + self.buildbot = MockBuildBot() + self.executive = MockExecutive(should_log=log_executive) + self.filesystem = MockFileSystem() + self._irc = None + self.user = MockUser() + self._scm = MockSCM() + self._checkout = MockCheckout() + self.status_server = MockStatusServer() + self.irc_password = "MOCK irc password" + self.port_factory = MockPortFactory() + self.platform = MockPlatformInfo() + + def scm(self): + return self._scm + + def checkout(self): + return self._checkout + + def ensure_irc_connected(self, delegate): + if not self._irc: + self._irc = MockIRC() + + def irc(self): + return self._irc + + def path(self): + return "echo" + + def port(self): + return MockPort() + + +class MockBrowser(object): + params = {} + + def open(self, url): + pass + + def select_form(self, name): + pass + + def __setitem__(self, key, value): + self.params[key] = value + + def submit(self): + return Mock(file) diff --git a/Tools/Scripts/webkitpy/tool/mocktool_unittest.py b/Tools/Scripts/webkitpy/tool/mocktool_unittest.py new file mode 100644 index 0000000..cceaa2e --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/mocktool_unittest.py @@ -0,0 +1,59 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from mocktool import MockOptions + + +class MockOptionsTest(unittest.TestCase): + # MockOptions() should implement the same semantics that + # optparse.Values does. + + def test_get__set(self): + # Test that we can still set options after we construct the + # object. + options = MockOptions() + options.foo = 'bar' + self.assertEqual(options.foo, 'bar') + + def test_get__unset(self): + # Test that unset options raise an exception (regular Mock + # objects return an object and hence are different from + # optparse.Values()). + options = MockOptions() + self.assertRaises(AttributeError, lambda: options.foo) + + def test_kwarg__set(self): + # Test that keyword arguments work in the constructor. + options = MockOptions(foo='bar') + self.assertEqual(options.foo, 'bar') + + +if __name__ == '__main__': + unittest.main() diff --git a/Tools/Scripts/webkitpy/tool/multicommandtool.py b/Tools/Scripts/webkitpy/tool/multicommandtool.py new file mode 100644 index 0000000..4848ae5 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/multicommandtool.py @@ -0,0 +1,314 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# Copyright (c) 2009 Apple Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# MultiCommandTool provides a framework for writing svn-like/git-like tools +# which are called with the following format: +# tool-name [global options] command-name [command options] + +import sys + +from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option + +from webkitpy.tool.grammar import pluralize +from webkitpy.common.system.deprecated_logging import log + + +class TryAgain(Exception): + pass + + +class Command(object): + name = None + show_in_main_help = False + def __init__(self, help_text, argument_names=None, options=None, long_help=None, requires_local_commits=False): + self.help_text = help_text + self.long_help = long_help + self.argument_names = argument_names + self.required_arguments = self._parse_required_arguments(argument_names) + self.options = options + self.requires_local_commits = requires_local_commits + self._tool = None + # option_parser can be overriden by the tool using set_option_parser + # This default parser will be used for standalone_help printing. + self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options) + + # This design is slightly awkward, but we need the + # the tool to be able to create and modify the option_parser + # before it knows what Command to run. + def set_option_parser(self, option_parser): + self.option_parser = option_parser + self._add_options_to_parser() + + def _add_options_to_parser(self): + options = self.options or [] + for option in options: + self.option_parser.add_option(option) + + # The tool calls bind_to_tool on each Command after adding it to its list. + def bind_to_tool(self, tool): + # Command instances can only be bound to one tool at a time. + if self._tool and tool != self._tool: + raise Exception("Command already bound to tool!") + self._tool = tool + + @staticmethod + def _parse_required_arguments(argument_names): + required_args = [] + if not argument_names: + return required_args + split_args = argument_names.split(" ") + for argument in split_args: + if argument[0] == '[': + # For now our parser is rather dumb. Do some minimal validation that + # we haven't confused it. + if argument[-1] != ']': + raise Exception("Failure to parse argument string %s. Argument %s is missing ending ]" % (argument_names, argument)) + else: + required_args.append(argument) + return required_args + + def name_with_arguments(self): + usage_string = self.name + if self.options: + usage_string += " [options]" + if self.argument_names: + usage_string += " " + self.argument_names + return usage_string + + def parse_args(self, args): + return self.option_parser.parse_args(args) + + def check_arguments_and_execute(self, options, args, tool=None): + if len(args) < len(self.required_arguments): + log("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % ( + pluralize("argument", len(self.required_arguments)), + pluralize("argument", len(args)), + "'%s'" % " ".join(args), + " ".join(self.required_arguments), + tool.name(), + self.name)) + return 1 + return self.execute(options, args, tool) or 0 + + def standalone_help(self): + help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n\n" + if self.long_help: + help_text += "%s\n\n" % self.long_help + help_text += self.option_parser.format_option_help(IndentedHelpFormatter()) + return help_text + + def execute(self, options, args, tool): + raise NotImplementedError, "subclasses must implement" + + # main() exists so that Commands can be turned into stand-alone scripts. + # Other parts of the code will likely require modification to work stand-alone. + def main(self, args=sys.argv): + (options, args) = self.parse_args(args) + # Some commands might require a dummy tool + return self.check_arguments_and_execute(options, args) + + +# FIXME: This should just be rolled into Command. help_text and argument_names do not need to be instance variables. +class AbstractDeclarativeCommand(Command): + help_text = None + argument_names = None + long_help = None + def __init__(self, options=None, **kwargs): + Command.__init__(self, self.help_text, self.argument_names, options=options, long_help=self.long_help, **kwargs) + + +class HelpPrintingOptionParser(OptionParser): + def __init__(self, epilog_method=None, *args, **kwargs): + self.epilog_method = epilog_method + OptionParser.__init__(self, *args, **kwargs) + + def error(self, msg): + self.print_usage(sys.stderr) + error_message = "%s: error: %s\n" % (self.get_prog_name(), msg) + # This method is overriden to add this one line to the output: + error_message += "\nType \"%s --help\" to see usage.\n" % self.get_prog_name() + self.exit(1, error_message) + + # We override format_epilog to avoid the default formatting which would paragraph-wrap the epilog + # and also to allow us to compute the epilog lazily instead of in the constructor (allowing it to be context sensitive). + def format_epilog(self, epilog): + if self.epilog_method: + return "\n%s\n" % self.epilog_method() + return "" + + +class HelpCommand(AbstractDeclarativeCommand): + name = "help" + help_text = "Display information about this program or its subcommands" + argument_names = "[COMMAND]" + + def __init__(self): + options = [ + make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"), + ] + AbstractDeclarativeCommand.__init__(self, options) + self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser. + + def _help_epilog(self): + # Only show commands which are relevant to this checkout's SCM system. Might this be confusing to some users? + if self.show_all_commands: + epilog = "All %prog commands:\n" + relevant_commands = self._tool.commands[:] + else: + epilog = "Common %prog commands:\n" + relevant_commands = filter(self._tool.should_show_in_main_help, self._tool.commands) + longest_name_length = max(map(lambda command: len(command.name), relevant_commands)) + relevant_commands.sort(lambda a, b: cmp(a.name, b.name)) + command_help_texts = map(lambda command: " %s %s\n" % (command.name.ljust(longest_name_length), command.help_text), relevant_commands) + epilog += "%s\n" % "".join(command_help_texts) + epilog += "See '%prog help --all-commands' to list all commands.\n" + epilog += "See '%prog help COMMAND' for more information on a specific command.\n" + return epilog.replace("%prog", self._tool.name()) # Use of %prog here mimics OptionParser.expand_prog_name(). + + # FIXME: This is a hack so that we don't show --all-commands as a global option: + def _remove_help_options(self): + for option in self.options: + self.option_parser.remove_option(option.get_opt_string()) + + def execute(self, options, args, tool): + if args: + command = self._tool.command_by_name(args[0]) + if command: + print command.standalone_help() + return 0 + + self.show_all_commands = options.show_all_commands + self._remove_help_options() + self.option_parser.print_help() + return 0 + + +class MultiCommandTool(object): + global_options = None + + def __init__(self, name=None, commands=None): + self._name = name or OptionParser(prog=name).get_prog_name() # OptionParser has nice logic for fetching the name. + # Allow the unit tests to disable command auto-discovery. + self.commands = commands or [cls() for cls in self._find_all_commands() if cls.name] + self.help_command = self.command_by_name(HelpCommand.name) + # Require a help command, even if the manual test list doesn't include one. + if not self.help_command: + self.help_command = HelpCommand() + self.commands.append(self.help_command) + for command in self.commands: + command.bind_to_tool(self) + + @classmethod + def _add_all_subclasses(cls, class_to_crawl, seen_classes): + for subclass in class_to_crawl.__subclasses__(): + if subclass not in seen_classes: + seen_classes.add(subclass) + cls._add_all_subclasses(subclass, seen_classes) + + @classmethod + def _find_all_commands(cls): + commands = set() + cls._add_all_subclasses(Command, commands) + return sorted(commands) + + def name(self): + return self._name + + def _create_option_parser(self): + usage = "Usage: %prog [options] COMMAND [ARGS]" + return HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=self.name(), usage=usage) + + @staticmethod + def _split_command_name_from_args(args): + # Assume the first argument which doesn't start with "-" is the command name. + command_index = 0 + for arg in args: + if arg[0] != "-": + break + command_index += 1 + else: + return (None, args[:]) + + command = args[command_index] + return (command, args[:command_index] + args[command_index + 1:]) + + def command_by_name(self, command_name): + for command in self.commands: + if command_name == command.name: + return command + return None + + def path(self): + raise NotImplementedError, "subclasses must implement" + + def command_completed(self): + pass + + def should_show_in_main_help(self, command): + return command.show_in_main_help + + def should_execute_command(self, command): + return True + + def _add_global_options(self, option_parser): + global_options = self.global_options or [] + for option in global_options: + option_parser.add_option(option) + + def handle_global_options(self, options): + pass + + def main(self, argv=sys.argv): + (command_name, args) = self._split_command_name_from_args(argv[1:]) + + option_parser = self._create_option_parser() + self._add_global_options(option_parser) + + command = self.command_by_name(command_name) or self.help_command + if not command: + option_parser.error("%s is not a recognized command" % command_name) + + command.set_option_parser(option_parser) + (options, args) = command.parse_args(args) + self.handle_global_options(options) + + (should_execute, failure_reason) = self.should_execute_command(command) + if not should_execute: + log(failure_reason) + return 0 # FIXME: Should this really be 0? + + while True: + try: + result = command.check_arguments_and_execute(options, args, self) + break + except TryAgain, e: + pass + + self.command_completed() + return result diff --git a/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py b/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py new file mode 100644 index 0000000..c19095c --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py @@ -0,0 +1,177 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import sys +import unittest + +from optparse import make_option + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.multicommandtool import MultiCommandTool, Command, TryAgain + + +class TrivialCommand(Command): + name = "trivial" + show_in_main_help = True + def __init__(self, **kwargs): + Command.__init__(self, "help text", **kwargs) + + def execute(self, options, args, tool): + pass + + +class UncommonCommand(TrivialCommand): + name = "uncommon" + show_in_main_help = False + + +class LikesToRetry(Command): + name = "likes-to-retry" + show_in_main_help = True + + def __init__(self, **kwargs): + Command.__init__(self, "help text", **kwargs) + self.execute_count = 0 + + def execute(self, options, args, tool): + self.execute_count += 1 + if self.execute_count < 2: + raise TryAgain() + + +class CommandTest(unittest.TestCase): + def test_name_with_arguments(self): + command_with_args = TrivialCommand(argument_names="ARG1 ARG2") + self.assertEqual(command_with_args.name_with_arguments(), "trivial ARG1 ARG2") + + command_with_args = TrivialCommand(options=[make_option("--my_option")]) + self.assertEqual(command_with_args.name_with_arguments(), "trivial [options]") + + def test_parse_required_arguments(self): + self.assertEqual(Command._parse_required_arguments("ARG1 ARG2"), ["ARG1", "ARG2"]) + self.assertEqual(Command._parse_required_arguments("[ARG1] [ARG2]"), []) + self.assertEqual(Command._parse_required_arguments("[ARG1] ARG2"), ["ARG2"]) + # Note: We might make our arg parsing smarter in the future and allow this type of arguments string. + self.assertRaises(Exception, Command._parse_required_arguments, "[ARG1 ARG2]") + + def test_required_arguments(self): + two_required_arguments = TrivialCommand(argument_names="ARG1 ARG2 [ARG3]") + expected_missing_args_error = "2 arguments required, 1 argument provided. Provided: 'foo' Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n" + exit_code = OutputCapture().assert_outputs(self, two_required_arguments.check_arguments_and_execute, [None, ["foo"], TrivialTool()], expected_stderr=expected_missing_args_error) + self.assertEqual(exit_code, 1) + + +class TrivialTool(MultiCommandTool): + def __init__(self, commands=None): + MultiCommandTool.__init__(self, name="trivial-tool", commands=commands) + + def path(self): + return __file__ + + def should_execute_command(self, command): + return (True, None) + + +class MultiCommandToolTest(unittest.TestCase): + def _assert_split(self, args, expected_split): + self.assertEqual(MultiCommandTool._split_command_name_from_args(args), expected_split) + + def test_split_args(self): + # MultiCommandToolTest._split_command_name_from_args returns: (command, args) + full_args = ["--global-option", "command", "--option", "arg"] + full_args_expected = ("command", ["--global-option", "--option", "arg"]) + self._assert_split(full_args, full_args_expected) + + full_args = [] + full_args_expected = (None, []) + self._assert_split(full_args, full_args_expected) + + full_args = ["command", "arg"] + full_args_expected = ("command", ["arg"]) + self._assert_split(full_args, full_args_expected) + + def test_command_by_name(self): + # This also tests Command auto-discovery. + tool = TrivialTool() + self.assertEqual(tool.command_by_name("trivial").name, "trivial") + self.assertEqual(tool.command_by_name("bar"), None) + + def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", expected_exit_code=0): + exit_code = OutputCapture().assert_outputs(self, tool.main, [main_args], expected_stdout=expected_stdout, expected_stderr=expected_stderr) + self.assertEqual(exit_code, expected_exit_code) + + def test_retry(self): + likes_to_retry = LikesToRetry() + tool = TrivialTool(commands=[likes_to_retry]) + tool.main(["tool", "likes-to-retry"]) + self.assertEqual(likes_to_retry.execute_count, 2) + + def test_global_help(self): + tool = TrivialTool(commands=[TrivialCommand(), UncommonCommand()]) + expected_common_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS] + +Options: + -h, --help show this help message and exit + +Common trivial-tool commands: + trivial help text + +See 'trivial-tool help --all-commands' to list all commands. +See 'trivial-tool help COMMAND' for more information on a specific command. + +""" + self._assert_tool_main_outputs(tool, ["tool"], expected_common_commands_help) + self._assert_tool_main_outputs(tool, ["tool", "help"], expected_common_commands_help) + expected_all_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS] + +Options: + -h, --help show this help message and exit + +All trivial-tool commands: + help Display information about this program or its subcommands + trivial help text + uncommon help text + +See 'trivial-tool help --all-commands' to list all commands. +See 'trivial-tool help COMMAND' for more information on a specific command. + +""" + self._assert_tool_main_outputs(tool, ["tool", "help", "--all-commands"], expected_all_commands_help) + # Test that arguments can be passed before commands as well + self._assert_tool_main_outputs(tool, ["tool", "--all-commands", "help"], expected_all_commands_help) + + + def test_command_help(self): + command_with_options = TrivialCommand(options=[make_option("--my_option")], long_help="LONG HELP") + tool = TrivialTool(commands=[command_with_options]) + expected_subcommand_help = "trivial [options] help text\n\nLONG HELP\n\nOptions:\n --my_option=MY_OPTION\n\n" + self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help) + + +if __name__ == "__main__": + unittest.main() diff --git a/Tools/Scripts/webkitpy/tool/steps/__init__.py b/Tools/Scripts/webkitpy/tool/steps/__init__.py new file mode 100644 index 0000000..64d9d05 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/__init__.py @@ -0,0 +1,59 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# FIXME: Is this the right way to do this? +from webkitpy.tool.steps.applypatch import ApplyPatch +from webkitpy.tool.steps.applypatchwithlocalcommit import ApplyPatchWithLocalCommit +from webkitpy.tool.steps.build import Build +from webkitpy.tool.steps.checkstyle import CheckStyle +from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory +from webkitpy.tool.steps.cleanworkingdirectorywithlocalcommits import CleanWorkingDirectoryWithLocalCommits +from webkitpy.tool.steps.closebug import CloseBug +from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff +from webkitpy.tool.steps.closepatch import ClosePatch +from webkitpy.tool.steps.commit import Commit +from webkitpy.tool.steps.confirmdiff import ConfirmDiff +from webkitpy.tool.steps.createbug import CreateBug +from webkitpy.tool.steps.editchangelog import EditChangeLog +from webkitpy.tool.steps.ensurebuildersaregreen import EnsureBuildersAreGreen +from webkitpy.tool.steps.ensurelocalcommitifneeded import EnsureLocalCommitIfNeeded +from webkitpy.tool.steps.obsoletepatches import ObsoletePatches +from webkitpy.tool.steps.options import Options +from webkitpy.tool.steps.postdiff import PostDiff +from webkitpy.tool.steps.postdiffforcommit import PostDiffForCommit +from webkitpy.tool.steps.postdiffforrevert import PostDiffForRevert +from webkitpy.tool.steps.preparechangelogforrevert import PrepareChangeLogForRevert +from webkitpy.tool.steps.preparechangelog import PrepareChangeLog +from webkitpy.tool.steps.promptforbugortitle import PromptForBugOrTitle +from webkitpy.tool.steps.reopenbugafterrollout import ReopenBugAfterRollout +from webkitpy.tool.steps.revertrevision import RevertRevision +from webkitpy.tool.steps.runtests import RunTests +from webkitpy.tool.steps.suggestreviewers import SuggestReviewers +from webkitpy.tool.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer +from webkitpy.tool.steps.update import Update +from webkitpy.tool.steps.validatereviewer import ValidateReviewer diff --git a/Tools/Scripts/webkitpy/tool/steps/abstractstep.py b/Tools/Scripts/webkitpy/tool/steps/abstractstep.py new file mode 100644 index 0000000..5525ea0 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/abstractstep.py @@ -0,0 +1,79 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.common.system.deprecated_logging import log +from webkitpy.common.system.executive import ScriptError +from webkitpy.common.config.ports import WebKitPort +from webkitpy.tool.steps.options import Options + + +class AbstractStep(object): + def __init__(self, tool, options): + self._tool = tool + self._options = options + + # FIXME: This should use tool.port() + def _run_script(self, script_name, args=None, quiet=False, port=WebKitPort): + log("Running %s" % script_name) + command = [port.script_path(script_name)] + if args: + command.extend(args) + self._tool.executive.run_and_throw_if_fail(command, quiet) + + def _changed_files(self, state): + return self.cached_lookup(state, "changed_files") + + _well_known_keys = { + "bug_title": lambda self, state: self._tool.bugs.fetch_bug(state["bug_id"]).title(), + "changed_files": lambda self, state: self._tool.scm().changed_files(self._options.git_commit), + "diff": lambda self, state: self._tool.scm().create_patch(self._options.git_commit, changed_files=self._changed_files(state)), + "changelogs": lambda self, state: self._tool.checkout().modified_changelogs(self._options.git_commit, changed_files=self._changed_files(state)), + } + + def cached_lookup(self, state, key, promise=None): + if state.get(key): + return state[key] + if not promise: + promise = self._well_known_keys.get(key) + state[key] = promise(self, state) + return state[key] + + def did_modify_checkout(self, state): + state["diff"] = None + state["changelogs"] = None + state["changed_files"] = None + + @classmethod + def options(cls): + return [ + # We need this option here because cached_lookup uses it. :( + Options.git_commit, + ] + + def run(self, state): + raise NotImplementedError, "subclasses must implement" diff --git a/Tools/Scripts/webkitpy/tool/steps/applypatch.py b/Tools/Scripts/webkitpy/tool/steps/applypatch.py new file mode 100644 index 0000000..327ac09 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/applypatch.py @@ -0,0 +1,43 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import log + +class ApplyPatch(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.non_interactive, + Options.force_patch, + ] + + def run(self, state): + log("Processing patch %s from bug %s." % (state["patch"].id(), state["patch"].bug_id())) + self._tool.checkout().apply_patch(state["patch"], force=self._options.non_interactive or self._options.force_patch) diff --git a/Tools/Scripts/webkitpy/tool/steps/applypatchwithlocalcommit.py b/Tools/Scripts/webkitpy/tool/steps/applypatchwithlocalcommit.py new file mode 100644 index 0000000..3dcd8d9 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/applypatchwithlocalcommit.py @@ -0,0 +1,43 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.applypatch import ApplyPatch +from webkitpy.tool.steps.options import Options + +class ApplyPatchWithLocalCommit(ApplyPatch): + @classmethod + def options(cls): + return ApplyPatch.options() + [ + Options.local_commit, + ] + + def run(self, state): + ApplyPatch.run(self, state) + if self._options.local_commit: + commit_message = self._tool.checkout().commit_message_for_this_commit(git_commit=None) + self._tool.scm().commit_locally_with_message(commit_message.message() or state["patch"].name()) diff --git a/Tools/Scripts/webkitpy/tool/steps/build.py b/Tools/Scripts/webkitpy/tool/steps/build.py new file mode 100644 index 0000000..0990b8b --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/build.py @@ -0,0 +1,54 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import log + + +class Build(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.build, + Options.quiet, + Options.build_style, + ] + + def build(self, build_style): + self._tool.executive.run_and_throw_if_fail(self._tool.port().build_webkit_command(build_style=build_style), self._options.quiet) + + def run(self, state): + if not self._options.build: + return + log("Building WebKit") + if self._options.build_style == "both": + self.build("debug") + self.build("release") + else: + self.build(self._options.build_style) diff --git a/Tools/Scripts/webkitpy/tool/steps/checkstyle.py b/Tools/Scripts/webkitpy/tool/steps/checkstyle.py new file mode 100644 index 0000000..af66c50 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/checkstyle.py @@ -0,0 +1,66 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.common.system.executive import ScriptError +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import error + +class CheckStyle(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.non_interactive, + Options.check_style, + Options.git_commit, + ] + + def run(self, state): + if not self._options.check_style: + return + os.chdir(self._tool.scm().checkout_root) + + args = [] + if self._options.git_commit: + args.append("--git-commit") + args.append(self._options.git_commit) + + args.append("--diff-files") + args.extend(self._changed_files(state)) + + try: + self._run_script("check-webkit-style", args) + except ScriptError, e: + if self._options.non_interactive: + # We need to re-raise the exception here to have the + # style-queue do the right thing. + raise e + if not self._tool.user.confirm("Are you sure you want to continue?"): + exit(1) diff --git a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py new file mode 100644 index 0000000..9c16242 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py @@ -0,0 +1,54 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options + + +class CleanWorkingDirectory(AbstractStep): + def __init__(self, tool, options, allow_local_commits=False): + AbstractStep.__init__(self, tool, options) + self._allow_local_commits = allow_local_commits + + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.force_clean, + Options.clean, + ] + + def run(self, state): + if not self._options.clean: + return + # FIXME: This chdir should not be necessary. + os.chdir(self._tool.scm().checkout_root) + if not self._allow_local_commits: + self._tool.scm().ensure_no_local_commits(self._options.force_clean) + self._tool.scm().ensure_clean_working_directory(force_clean=self._options.force_clean) diff --git a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py new file mode 100644 index 0000000..36a3d2b --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py @@ -0,0 +1,49 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.thirdparty.mock import Mock +from webkitpy.tool.mocktool import MockOptions, MockTool +from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory + + +class CleanWorkingDirectoryTest(unittest.TestCase): + def test_run(self): + tool = MockTool() + step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=False)) + step.run({}) + self.assertEqual(tool._scm.ensure_no_local_commits.call_count, 1) + self.assertEqual(tool._scm.ensure_clean_working_directory.call_count, 1) + + def test_no_clean(self): + tool = MockTool() + step = CleanWorkingDirectory(tool, MockOptions(clean=False)) + step.run({}) + self.assertEqual(tool._scm.ensure_no_local_commits.call_count, 0) + self.assertEqual(tool._scm.ensure_clean_working_directory.call_count, 0) diff --git a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectorywithlocalcommits.py b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectorywithlocalcommits.py new file mode 100644 index 0000000..f06f94e --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectorywithlocalcommits.py @@ -0,0 +1,34 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory + +class CleanWorkingDirectoryWithLocalCommits(CleanWorkingDirectory): + def __init__(self, tool, options): + # FIXME: This a bit of a hack. Consider doing this more cleanly. + CleanWorkingDirectory.__init__(self, tool, options, allow_local_commits=True) diff --git a/Tools/Scripts/webkitpy/tool/steps/closebug.py b/Tools/Scripts/webkitpy/tool/steps/closebug.py new file mode 100644 index 0000000..e77bc24 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/closebug.py @@ -0,0 +1,51 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import log + + +class CloseBug(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.close_bug, + ] + + def run(self, state): + if not self._options.close_bug: + return + # Check to make sure there are no r? or r+ patches on the bug before closing. + # Assume that r- patches are just previous patches someone forgot to obsolete. + patches = self._tool.bugs.fetch_bug(state["patch"].bug_id()).patches() + for patch in patches: + if patch.review() == "?" or patch.review() == "+": + log("Not closing bug %s as attachment %s has review=%s. Assuming there are more patches to land from this bug." % (patch.bug_id(), patch.id(), patch.review())) + return + self._tool.bugs.close_bug_as_fixed(state["patch"].bug_id(), "All reviewed patches have been landed. Closing bug.") diff --git a/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff.py b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff.py new file mode 100644 index 0000000..e5a68db --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff.py @@ -0,0 +1,58 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.comments import bug_comment_from_commit_text +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import log + + +class CloseBugForLandDiff(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.close_bug, + ] + + def run(self, state): + comment_text = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"]) + bug_id = state.get("bug_id") + if not bug_id and state.get("patch"): + bug_id = state.get("patch").bug_id() + + if bug_id: + log("Updating bug %s" % bug_id) + if self._options.close_bug: + self._tool.bugs.close_bug_as_fixed(bug_id, comment_text) + else: + # FIXME: We should a smart way to figure out if the patch is attached + # to the bug, and if so obsolete it. + self._tool.bugs.post_comment_to_bug(bug_id, comment_text) + else: + log(comment_text) + log("No bug id provided.") diff --git a/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py new file mode 100644 index 0000000..0a56564 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py @@ -0,0 +1,40 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.mocktool import MockOptions, MockTool +from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff + +class CloseBugForLandDiffTest(unittest.TestCase): + def test_empty_state(self): + capture = OutputCapture() + step = CloseBugForLandDiff(MockTool(), MockOptions()) + expected_stderr = "Committed r49824: <http://trac.webkit.org/changeset/49824>\nNo bug id provided.\n" + capture.assert_outputs(self, step.run, [{"commit_text" : "Mock commit text"}], expected_stderr=expected_stderr) diff --git a/Tools/Scripts/webkitpy/tool/steps/closepatch.py b/Tools/Scripts/webkitpy/tool/steps/closepatch.py new file mode 100644 index 0000000..ff94df8 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/closepatch.py @@ -0,0 +1,36 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.comments import bug_comment_from_commit_text +from webkitpy.tool.steps.abstractstep import AbstractStep + + +class ClosePatch(AbstractStep): + def run(self, state): + comment_text = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"]) + self._tool.bugs.clear_attachment_flags(state["patch"].id(), comment_text) diff --git a/Tools/Scripts/webkitpy/tool/steps/commit.py b/Tools/Scripts/webkitpy/tool/steps/commit.py new file mode 100644 index 0000000..5aa6b51 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/commit.py @@ -0,0 +1,81 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.common.checkout.scm import AuthenticationError, AmbiguousCommitError +from webkitpy.common.config import urls +from webkitpy.common.system.deprecated_logging import log +from webkitpy.common.system.executive import ScriptError +from webkitpy.common.system.user import User +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options + + +class Commit(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.git_commit, + ] + + def _commit_warning(self, error): + working_directory_message = "" if error.working_directory_is_clean else " and working copy changes" + return ('There are %s local commits%s. Everything will be committed as a single commit. ' + 'To avoid this prompt, set "git config webkit-patch.commit-should-always-squash true".' % ( + error.num_local_commits, working_directory_message)) + + def run(self, state): + self._commit_message = self._tool.checkout().commit_message_for_this_commit(self._options.git_commit).message() + if len(self._commit_message) < 50: + raise Exception("Attempted to commit with a commit message shorter than 50 characters. Either your patch is missing a ChangeLog or webkit-patch may have a bug.") + + self._state = state + + username = None + force_squash = False + + num_tries = 0 + while num_tries < 3: + num_tries += 1 + + try: + scm = self._tool.scm() + commit_text = scm.commit_with_message(self._commit_message, git_commit=self._options.git_commit, username=username, force_squash=force_squash) + svn_revision = scm.svn_revision_from_commit_text(commit_text) + log("Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision))) + self._state["commit_text"] = commit_text + break; + except AmbiguousCommitError, e: + if self._tool.user.confirm(self._commit_warning(e)): + force_squash = True + else: + # This will correctly interrupt the rest of the commit process. + raise ScriptError(message="Did not commit") + except AuthenticationError, e: + username = self._tool.user.prompt("%s login: " % e.server_host, repeat=5) + if not username: + raise ScriptError("You need to specify the username on %s to perform the commit as." % self.svn_server_host) diff --git a/Tools/Scripts/webkitpy/tool/steps/confirmdiff.py b/Tools/Scripts/webkitpy/tool/steps/confirmdiff.py new file mode 100644 index 0000000..7e8e348 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/confirmdiff.py @@ -0,0 +1,77 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import urllib + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.prettypatch import PrettyPatch +from webkitpy.common.system import logutils +from webkitpy.common.system.executive import ScriptError + + +_log = logutils.get_logger(__file__) + + +class ConfirmDiff(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.confirm, + ] + + def _show_pretty_diff(self, diff): + if not self._tool.user.can_open_url(): + return None + + try: + pretty_patch = PrettyPatch(self._tool.executive, + self._tool.scm().checkout_root) + pretty_diff_file = pretty_patch.pretty_diff_file(diff) + url = "file://%s" % urllib.quote(pretty_diff_file.name) + self._tool.user.open_url(url) + # We return the pretty_diff_file here because we need to keep the + # file alive until the user has had a chance to confirm the diff. + return pretty_diff_file + except ScriptError, e: + _log.warning("PrettyPatch failed. :(") + except OSError, e: + _log.warning("PrettyPatch unavailable.") + + def run(self, state): + if not self._options.confirm: + return + diff = self.cached_lookup(state, "diff") + pretty_diff_file = self._show_pretty_diff(diff) + if not pretty_diff_file: + self._tool.user.page(diff) + diff_correct = self._tool.user.confirm("Was that diff correct?") + if pretty_diff_file: + pretty_diff_file.close() + if not diff_correct: + exit(1) diff --git a/Tools/Scripts/webkitpy/tool/steps/createbug.py b/Tools/Scripts/webkitpy/tool/steps/createbug.py new file mode 100644 index 0000000..0ab6f68 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/createbug.py @@ -0,0 +1,52 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options + + +class CreateBug(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.cc, + Options.component, + Options.blocks, + ] + + def run(self, state): + # No need to create a bug if we already have one. + if state.get("bug_id"): + return + cc = self._options.cc + if not cc: + cc = state.get("bug_cc") + blocks = self._options.blocks + if not blocks: + blocks = state.get("bug_blocked") + state["bug_id"] = self._tool.bugs.create_bug(state["bug_title"], state["bug_description"], blocked=blocks, component=self._options.component, cc=cc) diff --git a/Tools/Scripts/webkitpy/tool/steps/editchangelog.py b/Tools/Scripts/webkitpy/tool/steps/editchangelog.py new file mode 100644 index 0000000..4d9646f --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/editchangelog.py @@ -0,0 +1,38 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.tool.steps.abstractstep import AbstractStep + + +class EditChangeLog(AbstractStep): + def run(self, state): + os.chdir(self._tool.scm().checkout_root) + self._tool.user.edit_changelog(self.cached_lookup(state, "changelogs")) + self.did_modify_checkout(state) diff --git a/Tools/Scripts/webkitpy/tool/steps/ensurebuildersaregreen.py b/Tools/Scripts/webkitpy/tool/steps/ensurebuildersaregreen.py new file mode 100644 index 0000000..a4fc174 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/ensurebuildersaregreen.py @@ -0,0 +1,48 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import log, error + + +class EnsureBuildersAreGreen(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.check_builders, + ] + + def run(self, state): + if not self._options.check_builders: + return + red_builders_names = self._tool.buildbot.red_core_builders_names() + if not red_builders_names: + return + red_builders_names = map(lambda name: "\"%s\"" % name, red_builders_names) # Add quotes around the names. + log("\nWARNING: Builders [%s] are red, please watch your commit carefully.\nSee http://%s/console?category=core\n" % (", ".join(red_builders_names), self._tool.buildbot.buildbot_host)) diff --git a/Tools/Scripts/webkitpy/tool/steps/ensurelocalcommitifneeded.py b/Tools/Scripts/webkitpy/tool/steps/ensurelocalcommitifneeded.py new file mode 100644 index 0000000..d0cda46 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/ensurelocalcommitifneeded.py @@ -0,0 +1,43 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import error + + +class EnsureLocalCommitIfNeeded(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.local_commit, + ] + + def run(self, state): + if self._options.local_commit and not self._tool.scm().supports_local_commits(): + error("--local-commit passed, but %s does not support local commits" % self._tool.scm.display_name()) diff --git a/Tools/Scripts/webkitpy/tool/steps/metastep.py b/Tools/Scripts/webkitpy/tool/steps/metastep.py new file mode 100644 index 0000000..7cbd1c5 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/metastep.py @@ -0,0 +1,54 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep + + +# FIXME: Unify with StepSequence? I'm not sure yet which is the better design. +class MetaStep(AbstractStep): + substeps = [] # Override in subclasses + def __init__(self, tool, options): + AbstractStep.__init__(self, tool, options) + self._step_instances = [] + for step_class in self.substeps: + self._step_instances.append(step_class(tool, options)) + + @staticmethod + def _collect_options_from_steps(steps): + collected_options = [] + for step in steps: + collected_options = collected_options + step.options() + return collected_options + + @classmethod + def options(cls): + return cls._collect_options_from_steps(cls.substeps) + + def run(self, state): + for step in self._step_instances: + step.run(state) diff --git a/Tools/Scripts/webkitpy/tool/steps/obsoletepatches.py b/Tools/Scripts/webkitpy/tool/steps/obsoletepatches.py new file mode 100644 index 0000000..de508c6 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/obsoletepatches.py @@ -0,0 +1,51 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.grammar import pluralize +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import log + + +class ObsoletePatches(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.obsolete_patches, + ] + + def run(self, state): + if not self._options.obsolete_patches: + return + bug_id = state["bug_id"] + patches = self._tool.bugs.fetch_bug(bug_id).patches() + if not patches: + return + log("Obsoleting %s on bug %s" % (pluralize("old patch", len(patches)), bug_id)) + for patch in patches: + self._tool.bugs.obsolete_attachment(patch.id()) diff --git a/Tools/Scripts/webkitpy/tool/steps/options.py b/Tools/Scripts/webkitpy/tool/steps/options.py new file mode 100644 index 0000000..5b8baf0 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/options.py @@ -0,0 +1,59 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from optparse import make_option + +class Options(object): + blocks = make_option("--blocks", action="store", type="string", dest="blocks", default=None, help="Bug number which the created bug blocks.") + build = make_option("--build", action="store_true", dest="build", default=False, help="Build and run run-webkit-tests before committing.") + build_style = make_option("--build-style", action="store", dest="build_style", default=None, help="Whether to build debug, release, or both.") + cc = make_option("--cc", action="store", type="string", dest="cc", help="Comma-separated list of email addresses to carbon-copy.") + check_builders = make_option("--ignore-builders", action="store_false", dest="check_builders", default=True, help="Don't check to see if the build.webkit.org builders are green before landing.") + check_style = make_option("--ignore-style", action="store_false", dest="check_style", default=True, help="Don't check to see if the patch has proper style before uploading.") + clean = make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches") + close_bug = make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing.") + comment = make_option("--comment", action="store", type="string", dest="comment", help="Comment to post to bug.") + component = make_option("--component", action="store", type="string", dest="component", help="Component for the new bug.") + confirm = make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Skip confirmation steps.") + description = make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: \"patch\")") + email = make_option("--email", action="store", type="string", dest="email", help="Email address to use in ChangeLogs.") + force_clean = make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)") + force_patch = make_option("--force-patch", action="store_true", dest="force_patch", default=False, help="Forcefully applies the patch, continuing past errors.") + git_commit = make_option("-g", "--git-commit", action="store", dest="git_commit", help="Operate on a local commit. If a range, the commits are squashed into one. HEAD.. operates on working copy changes only.") + local_commit = make_option("--local-commit", action="store_true", dest="local_commit", default=False, help="Make a local commit for each applied patch") + non_interactive = make_option("--non-interactive", action="store_true", dest="non_interactive", default=False, help="Never prompt the user, fail as fast as possible.") + obsolete_patches = make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting this one.") + open_bug = make_option("--open-bug", action="store_true", dest="open_bug", default=False, help="Opens the associated bug in a browser.") + parent_command = make_option("--parent-command", action="store", dest="parent_command", default=None, help="(Internal) The command that spawned this instance.") + quiet = make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output.") + request_commit = make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review.") + review = make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review.") + reviewer = make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER.") + suggest_reviewers = make_option("--suggest-reviewers", action="store_true", default=False, help="Offer to CC appropriate reviewers.") + test = make_option("--test", action="store_true", dest="test", default=False, help="Run run-webkit-tests before committing.") + update = make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory.") diff --git a/Tools/Scripts/webkitpy/tool/steps/postdiff.py b/Tools/Scripts/webkitpy/tool/steps/postdiff.py new file mode 100644 index 0000000..c40b6ff --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/postdiff.py @@ -0,0 +1,50 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options + + +class PostDiff(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.description, + Options.comment, + Options.review, + Options.request_commit, + Options.open_bug, + ] + + def run(self, state): + diff = self.cached_lookup(state, "diff") + description = self._options.description or "Patch" + comment_text = self._options.comment + self._tool.bugs.add_patch_to_bug(state["bug_id"], diff, description, comment_text=comment_text, mark_for_review=self._options.review, mark_for_commit_queue=self._options.request_commit) + if self._options.open_bug: + self._tool.user.open_url(self._tool.bugs.bug_url_for_bug_id(state["bug_id"])) diff --git a/Tools/Scripts/webkitpy/tool/steps/postdiffforcommit.py b/Tools/Scripts/webkitpy/tool/steps/postdiffforcommit.py new file mode 100644 index 0000000..13bc00c --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/postdiffforcommit.py @@ -0,0 +1,39 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep + + +class PostDiffForCommit(AbstractStep): + def run(self, state): + self._tool.bugs.add_patch_to_bug( + state["bug_id"], + self.cached_lookup(state, "diff"), + "Patch for landing", + mark_for_review=False, + mark_for_landing=True) diff --git a/Tools/Scripts/webkitpy/tool/steps/postdiffforrevert.py b/Tools/Scripts/webkitpy/tool/steps/postdiffforrevert.py new file mode 100644 index 0000000..bfa631f --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/postdiffforrevert.py @@ -0,0 +1,49 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.common.net.bugzilla import Attachment +from webkitpy.tool.steps.abstractstep import AbstractStep + + +class PostDiffForRevert(AbstractStep): + def run(self, state): + comment_text = "Any committer can land this patch automatically by \ +marking it commit-queue+. The commit-queue will build and test \ +the patch before landing to ensure that the rollout will be \ +successful. This process takes approximately 15 minutes.\n\n\ +If you would like to land the rollout faster, you can use the \ +following command:\n\n\ + webkit-patch land-attachment ATTACHMENT_ID --ignore-builders\n\n\ +where ATTACHMENT_ID is the ID of this attachment." + self._tool.bugs.add_patch_to_bug( + state["bug_id"], + self.cached_lookup(state, "diff"), + "%s%s" % (Attachment.rollout_preamble, state["revision"]), + comment_text=comment_text, + mark_for_review=False, + mark_for_commit_queue=True) diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py new file mode 100644 index 0000000..099dfe3 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py @@ -0,0 +1,77 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.common.checkout.changelog import ChangeLog +from webkitpy.common.system.executive import ScriptError +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import error + + +class PrepareChangeLog(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.quiet, + Options.email, + Options.git_commit, + ] + + def _ensure_bug_url(self, state): + if not state.get("bug_id"): + return + bug_id = state.get("bug_id") + changelogs = self.cached_lookup(state, "changelogs") + for changelog_path in changelogs: + changelog = ChangeLog(changelog_path) + if not changelog.latest_entry().bug_id(): + changelog.set_short_description_and_bug_url( + self.cached_lookup(state, "bug_title"), + self._tool.bugs.bug_url_for_bug_id(bug_id)) + + def run(self, state): + if self.cached_lookup(state, "changelogs"): + self._ensure_bug_url(state) + return + os.chdir(self._tool.scm().checkout_root) + args = [self._tool.port().script_path("prepare-ChangeLog")] + if state.get("bug_id"): + args.append("--bug=%s" % state["bug_id"]) + if self._options.email: + args.append("--email=%s" % self._options.email) + + if self._tool.scm().supports_local_commits(): + args.append("--merge-base=%s" % self._tool.scm().merge_base(self._options.git_commit)) + + try: + self._tool.executive.run_and_throw_if_fail(args, self._options.quiet) + except ScriptError, e: + error("Unable to prepare ChangeLogs.") + self.did_modify_checkout(state) diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py new file mode 100644 index 0000000..eceffdf --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py @@ -0,0 +1,54 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import unittest + +from webkitpy.common.checkout.changelog_unittest import ChangeLogTest +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.mocktool import MockOptions, MockTool +from webkitpy.tool.steps.preparechangelog import PrepareChangeLog + + +class PrepareChangeLogTest(ChangeLogTest): + def test_ensure_bug_url(self): + capture = OutputCapture() + step = PrepareChangeLog(MockTool(), MockOptions()) + changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog) + changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8")) + state = { + "bug_title": "Example title", + "bug_id": 1234, + "changelogs": [changelog_path], + } + capture.assert_outputs(self, step.run, [state]) + actual_contents = self._read_file_contents(changelog_path, "utf-8") + expected_message = "Example title\n http://example.com/1234" + expected_contents = changelog_contents.replace("Need a short description and bug URL (OOPS!)", expected_message) + os.remove(changelog_path) + self.assertEquals(actual_contents, expected_contents) diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py new file mode 100644 index 0000000..1e47a6a --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py @@ -0,0 +1,44 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.common.checkout.changelog import ChangeLog +from webkitpy.tool.steps.abstractstep import AbstractStep + + +class PrepareChangeLogForRevert(AbstractStep): + def run(self, state): + # This could move to prepare-ChangeLog by adding a --revert= option. + self._run_script("prepare-ChangeLog") + changelog_paths = self._tool.checkout().modified_changelogs(git_commit=None) + bug_url = self._tool.bugs.bug_url_for_bug_id(state["bug_id"]) if state["bug_id"] else None + for changelog_path in changelog_paths: + # FIXME: Seems we should prepare the message outside of changelogs.py and then just pass in + # text that we want to use to replace the reviewed by line. + ChangeLog(changelog_path).update_for_revert(state["revision_list"], state["reason"], bug_url) diff --git a/Tools/Scripts/webkitpy/tool/steps/promptforbugortitle.py b/Tools/Scripts/webkitpy/tool/steps/promptforbugortitle.py new file mode 100644 index 0000000..31c913c --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/promptforbugortitle.py @@ -0,0 +1,45 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep + + +class PromptForBugOrTitle(AbstractStep): + def run(self, state): + # No need to prompt if we alrady have the bug_id. + if state.get("bug_id"): + return + user_response = self._tool.user.prompt("Please enter a bug number or a title for a new bug:\n") + # If the user responds with a number, we assume it's bug number. + # Otherwise we assume it's a bug subject. + try: + state["bug_id"] = int(user_response) + except ValueError, TypeError: + state["bug_title"] = user_response + # FIXME: This is kind of a lame description. + state["bug_description"] = user_response diff --git a/Tools/Scripts/webkitpy/tool/steps/reopenbugafterrollout.py b/Tools/Scripts/webkitpy/tool/steps/reopenbugafterrollout.py new file mode 100644 index 0000000..f369ca9 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/reopenbugafterrollout.py @@ -0,0 +1,44 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.comments import bug_comment_from_commit_text +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.common.system.deprecated_logging import log + + +class ReopenBugAfterRollout(AbstractStep): + def run(self, state): + commit_comment = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"]) + comment_text = "Reverted r%s for reason:\n\n%s\n\n%s" % (state["revision"], state["reason"], commit_comment) + + bug_id = state["bug_id"] + if not bug_id: + log(comment_text) + log("No bugs were updated.") + return + self._tool.bugs.reopen_bug(bug_id, comment_text) diff --git a/Tools/Scripts/webkitpy/tool/steps/revertrevision.py b/Tools/Scripts/webkitpy/tool/steps/revertrevision.py new file mode 100644 index 0000000..8016be5 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/revertrevision.py @@ -0,0 +1,35 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep + + +class RevertRevision(AbstractStep): + def run(self, state): + self._tool.checkout().apply_reverse_diffs(state["revision_list"]) + self.did_modify_checkout(state) diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests.py b/Tools/Scripts/webkitpy/tool/steps/runtests.py new file mode 100644 index 0000000..282e381 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/runtests.py @@ -0,0 +1,81 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import log + +class RunTests(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.test, + Options.non_interactive, + Options.quiet, + ] + + def run(self, state): + if not self._options.test: + return + + # Run the scripting unit tests first because they're quickest. + log("Running Python unit tests") + self._tool.executive.run_and_throw_if_fail(self._tool.port().run_python_unittests_command()) + log("Running Perl unit tests") + self._tool.executive.run_and_throw_if_fail(self._tool.port().run_perl_unittests_command()) + + javascriptcore_tests_command = self._tool.port().run_javascriptcore_tests_command() + if javascriptcore_tests_command: + log("Running JavaScriptCore tests") + self._tool.executive.run_and_throw_if_fail(javascriptcore_tests_command, quiet=True) + + log("Running run-webkit-tests") + args = self._tool.port().run_webkit_tests_command() + if self._options.non_interactive: + args.append("--no-new-test-results") + args.append("--no-launch-safari") + args.append("--exit-after-n-failures=1") + args.append("--wait-for-httpd") + # FIXME: Hack to work around https://bugs.webkit.org/show_bug.cgi?id=38912 + # when running the commit-queue on a mac leopard machine since compositing + # does not work reliably on Leopard due to various graphics driver/system bugs. + if self._tool.port().name() == "Mac" and self._tool.port().is_leopard(): + tests_to_ignore = [] + tests_to_ignore.append("compositing") + + # media tests are also broken on mac leopard due to + # a separate CoreVideo bug which causes random crashes/hangs + # https://bugs.webkit.org/show_bug.cgi?id=38912 + tests_to_ignore.append("media") + + args.extend(["--ignore-tests", ",".join(tests_to_ignore)]) + + if self._options.quiet: + args.append("--quiet") + self._tool.executive.run_and_throw_if_fail(args) + diff --git a/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py b/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py new file mode 100644 index 0000000..783ae29 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py @@ -0,0 +1,92 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.common.config.ports import WebKitPort +from webkitpy.tool.mocktool import MockOptions, MockTool +from webkitpy.tool.steps.update import Update +from webkitpy.tool.steps.runtests import RunTests +from webkitpy.tool.steps.promptforbugortitle import PromptForBugOrTitle + + +class StepsTest(unittest.TestCase): + def _step_options(self): + options = MockOptions() + options.non_interactive = True + options.port = 'MOCK port' + options.quiet = True + options.test = True + return options + + def _run_step(self, step, tool=None, options=None, state=None): + if not tool: + tool = MockTool() + if not options: + options = self._step_options() + if not state: + state = {} + step(tool, options).run(state) + + def test_update_step(self): + tool = MockTool() + options = self._step_options() + options.update = True + expected_stderr = "Updating working directory\n" + OutputCapture().assert_outputs(self, self._run_step, [Update, tool, options], expected_stderr=expected_stderr) + + def test_prompt_for_bug_or_title_step(self): + tool = MockTool() + tool.user.prompt = lambda message: 42 + self._run_step(PromptForBugOrTitle, tool=tool) + + def test_runtests_leopard_commit_queue_hack_step(self): + expected_stderr = "Running Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\n" + OutputCapture().assert_outputs(self, self._run_step, [RunTests], expected_stderr=expected_stderr) + + def test_runtests_leopard_commit_queue_hack_command(self): + mock_options = self._step_options() + step = RunTests(MockTool(log_executive=True), mock_options) + # FIXME: We shouldn't use a real port-object here, but there is too much to mock at the moment. + mock_port = WebKitPort() + mock_port.name = lambda: "Mac" + mock_port.is_leopard = lambda: True + tool = MockTool(log_executive=True) + tool.port = lambda: mock_port + step = RunTests(tool, mock_options) + expected_stderr = """Running Python unit tests +MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitpy'] +Running Perl unit tests +MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitperl'] +Running JavaScriptCore tests +MOCK run_and_throw_if_fail: ['Tools/Scripts/run-javascriptcore-tests'] +Running run-webkit-tests +MOCK run_and_throw_if_fail: ['Tools/Scripts/run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--exit-after-n-failures=1', '--wait-for-httpd', '--ignore-tests', 'compositing,media', '--quiet'] +""" + OutputCapture().assert_outputs(self, step.run, [{}], expected_stderr=expected_stderr) diff --git a/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py new file mode 100644 index 0000000..76bef35 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py @@ -0,0 +1,51 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options + + +class SuggestReviewers(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.git_commit, + Options.suggest_reviewers, + ] + + def run(self, state): + if not self._options.suggest_reviewers: + return + + reviewers = self._tool.checkout().suggested_reviewers(self._options.git_commit, self._changed_files(state)) + print "The following reviewers have recently modified files in your patch:" + print "\n".join([reviewer.full_name for reviewer in reviewers]) + if not self._tool.user.confirm("Would you like to CC them?"): + return + reviewer_emails = [reviewer.bugzilla_email() for reviewer in reviewers] + self._tool.bugs.add_cc_to_bug(state['bug_id'], reviewer_emails) diff --git a/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py new file mode 100644 index 0000000..0c86535 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py @@ -0,0 +1,45 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.mocktool import MockOptions, MockTool +from webkitpy.tool.steps.suggestreviewers import SuggestReviewers + + +class SuggestReviewersTest(unittest.TestCase): + def test_disabled(self): + step = SuggestReviewers(MockTool(), MockOptions(suggest_reviewers=False)) + OutputCapture().assert_outputs(self, step.run, [{}]) + + def test_basic(self): + capture = OutputCapture() + step = SuggestReviewers(MockTool(), MockOptions(suggest_reviewers=True, git_commit=None)) + expected_stdout = "The following reviewers have recently modified files in your patch:\nFoo Bar\nWould you like to CC them?\n" + capture.assert_outputs(self, step.run, [{"bug_id": "123"}], expected_stdout=expected_stdout) diff --git a/Tools/Scripts/webkitpy/tool/steps/update.py b/Tools/Scripts/webkitpy/tool/steps/update.py new file mode 100644 index 0000000..cd1d4d8 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/update.py @@ -0,0 +1,45 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import log + + +class Update(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.update, + ] + + def run(self, state): + if not self._options.update: + return + log("Updating working directory") + self._tool.executive.run_and_throw_if_fail(self._tool.port().update_webkit_command(), quiet=True) diff --git a/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py new file mode 100644 index 0000000..b475378 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py @@ -0,0 +1,48 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.mocktool import MockOptions, MockTool +from webkitpy.tool.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer + +class UpdateChangeLogsWithReviewerTest(unittest.TestCase): + def test_guess_reviewer_from_bug(self): + capture = OutputCapture() + step = UpdateChangeLogsWithReviewer(MockTool(), MockOptions()) + expected_stderr = "0 reviewed patches on bug 75, cannot infer reviewer.\n" + capture.assert_outputs(self, step._guess_reviewer_from_bug, [75], expected_stderr=expected_stderr) + + def test_empty_state(self): + capture = OutputCapture() + options = MockOptions() + options.reviewer = 'MOCK reviewer' + options.git_commit = 'MOCK git commit' + step = UpdateChangeLogsWithReviewer(MockTool(), options) + capture.assert_outputs(self, step.run, [{}]) diff --git a/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreviewer.py b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreviewer.py new file mode 100644 index 0000000..e46b790 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreviewer.py @@ -0,0 +1,72 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +from webkitpy.common.checkout.changelog import ChangeLog +from webkitpy.tool.grammar import pluralize +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import log, error + +class UpdateChangeLogsWithReviewer(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.git_commit, + Options.reviewer, + ] + + def _guess_reviewer_from_bug(self, bug_id): + patches = self._tool.bugs.fetch_bug(bug_id).reviewed_patches() + if len(patches) != 1: + log("%s on bug %s, cannot infer reviewer." % (pluralize("reviewed patch", len(patches)), bug_id)) + return None + patch = patches[0] + log("Guessing \"%s\" as reviewer from attachment %s on bug %s." % (patch.reviewer().full_name, patch.id(), bug_id)) + return patch.reviewer().full_name + + def run(self, state): + bug_id = state.get("bug_id") + if not bug_id and state.get("patch"): + bug_id = state.get("patch").bug_id() + + reviewer = self._options.reviewer + if not reviewer: + if not bug_id: + log("No bug id provided and --reviewer= not provided. Not updating ChangeLogs with reviewer.") + return + reviewer = self._guess_reviewer_from_bug(bug_id) + + if not reviewer: + log("Failed to guess reviewer from bug %s and --reviewer= not provided. Not updating ChangeLogs with reviewer." % bug_id) + return + + os.chdir(self._tool.scm().checkout_root) + for changelog_path in self.cached_lookup(state, "changelogs"): + ChangeLog(changelog_path).set_reviewer(reviewer) diff --git a/Tools/Scripts/webkitpy/tool/steps/validatereviewer.py b/Tools/Scripts/webkitpy/tool/steps/validatereviewer.py new file mode 100644 index 0000000..bdf729e --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/validatereviewer.py @@ -0,0 +1,71 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import re + +from webkitpy.common.checkout.changelog import ChangeLog +from webkitpy.tool.steps.abstractstep import AbstractStep +from webkitpy.tool.steps.options import Options +from webkitpy.common.system.deprecated_logging import error, log + + +# FIXME: Some of this logic should probably be unified with CommitterValidator? +class ValidateReviewer(AbstractStep): + @classmethod + def options(cls): + return AbstractStep.options() + [ + Options.git_commit, + ] + + # FIXME: This should probably move onto ChangeLogEntry + def _has_valid_reviewer(self, changelog_entry): + if changelog_entry.reviewer(): + return True + if re.search("unreviewed", changelog_entry.contents(), re.IGNORECASE): + return True + if re.search("rubber[ -]stamp", changelog_entry.contents(), re.IGNORECASE): + return True + return False + + def run(self, state): + # FIXME: For now we disable this check when a user is driving the script + # this check is too draconian (and too poorly tested) to foist upon users. + if not self._options.non_interactive: + return + # FIXME: We should figure out how to handle the current working + # directory issue more globally. + os.chdir(self._tool.scm().checkout_root) + for changelog_path in self.cached_lookup(state, "changelogs"): + changelog_entry = ChangeLog(changelog_path).latest_entry() + if self._has_valid_reviewer(changelog_entry): + continue + reviewer_text = changelog_entry.reviewer_text() + if reviewer_text: + log("%s found in %s does not appear to be a valid reviewer according to committers.py." % (reviewer_text, changelog_path)) + error('%s neither lists a valid reviewer nor contains the string "Unreviewed" or "Rubber stamp" (case insensitive).' % changelog_path) diff --git a/Tools/Scripts/webkitpy/tool/steps/validatereviewer_unittest.py b/Tools/Scripts/webkitpy/tool/steps/validatereviewer_unittest.py new file mode 100644 index 0000000..d9b856a --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/steps/validatereviewer_unittest.py @@ -0,0 +1,57 @@ +# Copyright (C) 2010 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.common.checkout.changelog import ChangeLogEntry +from webkitpy.common.system.outputcapture import OutputCapture +from webkitpy.tool.mocktool import MockOptions, MockTool +from webkitpy.tool.steps.validatereviewer import ValidateReviewer + +class ValidateReviewerTest(unittest.TestCase): + _boilerplate_entry = '''2009-08-19 Eric Seidel <eric@webkit.org> + + REVIEW_LINE + + * Scripts/bugzilla-tool: +''' + + def _test_review_text(self, step, text, expected): + contents = self._boilerplate_entry.replace("REVIEW_LINE", text) + entry = ChangeLogEntry(contents) + self.assertEqual(step._has_valid_reviewer(entry), expected) + + def test_has_valid_reviewer(self): + step = ValidateReviewer(MockTool(), MockOptions()) + self._test_review_text(step, "Reviewed by Eric Seidel.", True) + self._test_review_text(step, "Reviewed by Eric Seidel", True) # Not picky about the '.' + self._test_review_text(step, "Reviewed by Eric.", False) + self._test_review_text(step, "Reviewed by Eric C Seidel.", False) + self._test_review_text(step, "Rubber-stamped by Eric.", True) + self._test_review_text(step, "Rubber stamped by Eric.", True) + self._test_review_text(step, "Unreviewed build fix.", True) diff --git a/Tools/Scripts/wkstyle b/Tools/Scripts/wkstyle new file mode 100755 index 0000000..4b3447f --- /dev/null +++ b/Tools/Scripts/wkstyle @@ -0,0 +1,89 @@ + +# Copyright (C) 2006 Michael Emmel<mike.emmel@gmail.com> All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of +# its contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +cmdcpp="astyle \ +--unpad=paren \ +--style=linux \ +--brackets=linux \ +--indent=spaces=4 \ +--indent-switches \ +--convert-tabs" + +cmdh="astyle \ +--unpad=paren \ +--style=linux \ +--brackets=break \ +--indent=spaces=4 \ +--convert-tabs" + +#astyle does not support unpadding so we use sed +for i in $@ +do +echo $i + +ext=`echo $i|awk -F . '{print $NF}'` + +cmd=$cmdcpp + +if [ $ext == "h" ] ; then + cmd=$cmdh +fi + +$cmd $i + +#first print the changes we are making +sed -n -e ' +/( .*/p +s/( /(/gp +/*. )/p +s/ )/)/gp +#supress printing this +#/^namespace WebCore/{ +#N +#s/\n{/ {/p +#} +' $i + +#do it for real +sed -e ' +#unpad leading spaces +s/( /(/g +#unpad traling spaces +s/ )/)/g +#fixup the namspec decl +/^namespace WebCore/{ +N +s/\n{/ {/ +} +#fixup extra tab in constructor initalizer +/^ \+,/{s/^ //} +/^ \+:/{s/^ //} +' $i > $i.sed +mv $i.sed $i +done + + |