diff options
| author | Ben Murdoch <benm@google.com> | 2011-06-02 12:07:03 +0100 | 
|---|---|---|
| committer | Ben Murdoch <benm@google.com> | 2011-06-10 10:47:21 +0100 | 
| commit | 2daae5fd11344eaa88a0d92b0f6d65f8d2255c00 (patch) | |
| tree | e4964fbd1cb70599f7718ff03e50ea1dab33890b /Tools/Scripts | |
| parent | 87bdf0060a247bfbe668342b87e0874182e0ffa9 (diff) | |
| download | external_webkit-2daae5fd11344eaa88a0d92b0f6d65f8d2255c00.zip external_webkit-2daae5fd11344eaa88a0d92b0f6d65f8d2255c00.tar.gz external_webkit-2daae5fd11344eaa88a0d92b0f6d65f8d2255c00.tar.bz2  | |
Merge WebKit at r84325: Initial merge by git.
Change-Id: Ic1a909300ecc0a13ddc6b4e784371d2ac6e3d59b
Diffstat (limited to 'Tools/Scripts')
105 files changed, 2997 insertions, 2041 deletions
diff --git a/Tools/Scripts/VCSUtils.pm b/Tools/Scripts/VCSUtils.pm index a05a75d..8353f25 100644 --- a/Tools/Scripts/VCSUtils.pm +++ b/Tools/Scripts/VCSUtils.pm @@ -1,6 +1,6 @@  # Copyright (C) 2007, 2008, 2009 Apple Inc.  All rights reserved.  # Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com) -# Copyright (C) Research In Motion Limited 2010. All rights reserved. +# Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved.  #  # Redistribution and use in source and binary forms, with or without  # modification, are permitted provided that the following conditions @@ -68,6 +68,7 @@ BEGIN {          &makeFilePathRelative          &mergeChangeLogs          &normalizePath +        &parseFirstEOL          &parsePatch          &pathRelativeToSVNRepositoryRootForPath          &prepareParsedPatch @@ -97,6 +98,7 @@ my $svnVersion;  # Project time zone for Cupertino, CA, US  my $changeLogTimeZone = "PST8PDT"; +my $chunkRangeRegEx = qr#^\@\@ -(\d+),(\d+) \+\d+,(\d+) \@\@$#; # e.g. @@ -2,6 +2,18 @@  my $gitDiffStartRegEx = qr#^diff --git (\w/)?(.+) (\w/)?([^\r\n]+)#;  my $svnDiffStartRegEx = qr#^Index: ([^\r\n]+)#;  my $svnPropertiesStartRegEx = qr#^Property changes on: ([^\r\n]+)#; # $1 is normally the same as the index path. @@ -449,6 +451,40 @@ sub removeEOL($)      return $line;  } +sub parseFirstEOL($) +{ +    my ($fileHandle) = @_; + +    # Make input record separator the new-line character to simplify regex matching below. +    my $savedInputRecordSeparator = $INPUT_RECORD_SEPARATOR; +    $INPUT_RECORD_SEPARATOR = "\n"; +    my $firstLine  = <$fileHandle>; +    $INPUT_RECORD_SEPARATOR = $savedInputRecordSeparator; + +    return unless defined($firstLine); + +    my $eol; +    if ($firstLine =~ /\r\n/) { +        $eol = "\r\n"; +    } elsif ($firstLine =~ /\r/) { +        $eol = "\r"; +    } elsif ($firstLine =~ /\n/) { +        $eol = "\n"; +    } +    return $eol; +} + +sub firstEOLInFile($) +{ +    my ($file) = @_; +    my $eol; +    if (open(FILE, $file)) { +        $eol = parseFirstEOL(*FILE); +        close(FILE); +    } +    return $eol; +} +  sub svnStatus($)  {      my ($fullPath) = @_; @@ -822,23 +858,30 @@ sub parseDiffHeader($$)  #   $fileHandle: a file handle advanced to the first line of the next  #                header block. Leading junk is okay.  #   $line: the line last read from $fileHandle. +#   $optionsHashRef: a hash reference representing optional options to use +#                    when processing a diff. +#     shouldNotUseIndexPathEOL: whether to use the line endings in the diff instead +#                               instead of the line endings in the target file; the +#                               value of 1 if svnConvertedText should use the line +#                               endings in the diff.  #  # Returns ($diffHashRefs, $lastReadLine):  #   $diffHashRefs: A reference to an array of references to %diffHash hashes.  #                  See the %diffHash documentation above.  #   $lastReadLine: the line last read from $fileHandle -sub parseDiff($$) +sub parseDiff($$;$)  {      # FIXME: Adjust this method so that it dies if the first line does not      #        match the start of a diff.  This will require a change to      #        parsePatch() so that parsePatch() skips over leading junk. -    my ($fileHandle, $line) = @_; +    my ($fileHandle, $line, $optionsHashRef) = @_;      my $headerStartRegEx = $svnDiffStartRegEx; # SVN-style header for the default      my $headerHashRef; # Last header found, as returned by parseDiffHeader().      my $svnPropertiesHashRef; # Last SVN properties diff found, as returned by parseSvnDiffProperties().      my $svnText; +    my $indexPathEOL;      while (defined($line)) {          if (!$headerHashRef && ($line =~ $gitDiffStartRegEx)) {              # Then assume all diffs in the patch are Git-formatted. This @@ -861,6 +904,11 @@ sub parseDiff($$)          }          if ($line !~ $headerStartRegEx) {              # Then we are in the body of the diff. +            if ($indexPathEOL && $line !~ /$chunkRangeRegEx/) { +                # The chunk range is part of the body of the diff, but its line endings should't be +                # modified or patch(1) will complain. So, we only modify non-chunk range lines. +                $line =~ s/\r\n|\r|\n/$indexPathEOL/g; +            }              $svnText .= $line;              $line = <$fileHandle>;              next; @@ -873,6 +921,9 @@ sub parseDiff($$)          }          ($headerHashRef, $line) = parseDiffHeader($fileHandle, $line); +        if (!$optionsHashRef || !$optionsHashRef->{shouldNotUseIndexPathEOL}) { +            $indexPathEOL = firstEOLInFile($headerHashRef->{indexPath}) if !$headerHashRef->{isNew} && !$headerHashRef->{isBinary}; +        }          $svnText .= $headerHashRef->{svnConvertedText};      } @@ -1167,13 +1218,19 @@ sub parseSvnPropertyValue($$)  # Args:  #   $fileHandle: A file handle to the patch file that has not yet been  #                read from. +#   $optionsHashRef: a hash reference representing optional options to use +#                    when processing a diff. +#     shouldNotUseIndexPathEOL: whether to use the line endings in the diff instead +#                               instead of the line endings in the target file; the +#                               value of 1 if svnConvertedText should use the line +#                               endings in the diff.  #  # Returns:  #   @diffHashRefs: an array of diff hash references.  #                  See the %diffHash documentation above. -sub parsePatch($) +sub parsePatch($;$)  { -    my ($fileHandle) = @_; +    my ($fileHandle, $optionsHashRef) = @_;      my $newDiffHashRefs;      my @diffHashRefs; # return value @@ -1182,7 +1239,7 @@ sub parsePatch($)      while (defined($line)) { # Otherwise, at EOF. -        ($newDiffHashRefs, $line) = parseDiff($fileHandle, $line); +        ($newDiffHashRefs, $line) = parseDiff($fileHandle, $line, $optionsHashRef);          push @diffHashRefs, @$newDiffHashRefs;      } @@ -1440,7 +1497,6 @@ sub fixChangeLogPatch($)      $deletedLineCount += $dateStartIndex - $chunkStartIndex;      # Update the initial chunk range. -    my $chunkRangeRegEx = '^\@\@ -(\d+),(\d+) \+\d+,(\d+) \@\@$'; # e.g. @@ -2,6 +2,18 @@      if ($lines[$chunkStartIndex - 1] !~ /$chunkRangeRegEx/) {          # FIXME: Handle errors differently from ChangeLog files that          # are okay but should not be altered. That way we can find out diff --git a/Tools/Scripts/build-webkit b/Tools/Scripts/build-webkit index 8ff638d..1b4743e 100755 --- a/Tools/Scripts/build-webkit +++ b/Tools/Scripts/build-webkit @@ -96,7 +96,9 @@ my (      $notificationsSupport,      $offlineWebApplicationSupport,      $orientationEventsSupport, +    $pageVisibilityApiSupport,      $progressTagSupport, +    $quotaSupport,      $registerProtocolHandlerSupport,      $sharedWorkersSupport,      $svgSupport, @@ -225,9 +227,15 @@ my @features = (      { option => "orientation-events", desc => "Toggle Orientation Events support",        define => "ENABLE_ORIENTATION_EVENTS", default => 0, value => \$orientationEventsSupport }, +    { option => "page-visibility-api", desc => "Page Visibility API support", +      define => "ENABLE_PAGE_VISIBILITY_API", default => 0, value => \$pageVisibilityApiSupport }, +      { option => "progress-tag", desc => "Progress Tag support",        define => "ENABLE_PROGRESS_TAG", default => 1, value => \$progressTagSupport }, +    { option => "quota", desc => "Toggle Quota support", +      define => "ENABLE_QUOTA", default => 0, value => \$quotaSupport }, +      { option => "register-protocol-handler", desc => "Register Protocol Handler support",        define => "ENABLE_REGISTER_PROTOCOL_HANDLER", default => 0, value => \$registerProtocolHandlerSupport }, @@ -354,6 +362,7 @@ Usage: $programName [options] [options to pass to build system]    --install-headers=<path>          Set installation path for the headers (Qt only)    --install-libs=<path>             Set installation path for the libraries (Qt only)    --v8                              Use V8 as JavaScript engine (Qt only) +  -2                                build WebKit2 (Qt only)    --prefix=<path>                   Set installation prefix to the given path (Gtk/Efl only)    --makeargs=<arguments>            Optional Makefile flags @@ -414,13 +423,16 @@ sub unlinkZeroFiles()  # Check that all the project directories are there.  my @projects = ("Source/JavaScriptCore", "Source/WebCore", "Source/WebKit"); -my @otherDirs = ("WebKitLibraries"); -for my $dir (@projects, @otherDirs) { +for my $dir (@projects) {      if (! -d $dir) {          die "Error: No $dir directory found. Please do a fresh checkout.\n";      }  } +if (!isQt() && !-d "WebKitLibraries") { +    die "Error: No WebKitLibraries directory found. Please do a fresh checkout.\n"; +} +  # Generate the generate project files from .gyp files  if ($useGYP) {      system("perl", "Tools/Scripts/generate-project-files") == 0 or die "Failed to run generate-project-files"; @@ -500,11 +512,16 @@ if (isGtk()) {      # Copy WebKitSupportLibrary to the correct location in WebKitLibraries so it can be found.      # Will fail if WebKitSupportLibrary.zip is not in source root.      (system("perl Tools/Scripts/update-webkit-support-libs") == 0) or die; +    # Update Cairo Dependancies. +    if (isWinCairo()) { +        (system("perl Tools/Scripts/update-webkit-wincairo-libs") == 0) or die; +    }  } elsif (isQt()) { -    @options = @ARGV;      push @options, "--install-headers=" . $installHeaders if defined($installHeaders);      push @options, "--install-libs=" . $installLibs if defined($installLibs);      push @options, "--makeargs=" . $makeArgs if $makeArgs; +    push @options, "--qmakearg=CONFIG+=webkit2" if isWK2(); +    @options = (@ARGV, @options);      foreach (@features) {          push @options, "DEFINES+=$_->{define}=${$_->{value}}" if ${$_->{value}} != $_->{default}; diff --git a/Tools/Scripts/do-webcore-rename b/Tools/Scripts/do-webcore-rename index da08cf7..4829f39 100755 --- a/Tools/Scripts/do-webcore-rename +++ b/Tools/Scripts/do-webcore-rename @@ -97,7 +97,7 @@ sub wanted  my $isDOMTypeRename = 0;  my %renames = (      # Renames go here in the form of: -    "DocLoader" => "CachedResourceLoader", +    "MediaControls" => "MediaControlRootElement",  );  my %renamesContemplatedForTheFuture = ( diff --git a/Tools/Scripts/extract-localizable-strings b/Tools/Scripts/extract-localizable-strings index 116f11f..dae60c2 100755 --- a/Tools/Scripts/extract-localizable-strings +++ b/Tools/Scripts/extract-localizable-strings @@ -107,7 +107,7 @@ for my $dir (@directoriesToSkip) {  my @files = ( split "\n", `find $quotedDirectoriesString \\( -name "*.h" -o -name "*.m" -o -name "*.mm" -o -name "*.c" -o -name "*.cpp" \\)` );  for my $file (sort @files) { -    next if $file =~ /\/\w+LocalizableStrings\w*\.h$/; +    next if $file =~ /\/\w+LocalizableStrings\w*\.h$/ || $file =~ /\/LocalizedStrings\.h$/;      $file =~ s-^./--; @@ -169,7 +169,7 @@ handleString:                          # FIXME: Validate UTF-8 here?                          $UIString = $string;                          $expected = ","; -                    } elsif (($macro =~ /UI_STRING_KEY(_INTERNAL)?$/) and !defined $key) { +                    } elsif (($macro =~ /(WEB_)?UI_STRING_KEY(_INTERNAL)?$/) and !defined $key) {                          # FIXME: Validate UTF-8 here?                          $key = $string;                          $expected = ","; @@ -228,7 +228,7 @@ handleString:                      $sawError = 1;                      $expected = "";                  } -                if ($token =~ /UI_STRING(_KEY)?(_INTERNAL)?$/) { +                if ($token =~ /(WEB_)?UI_STRING(_KEY)?(_INTERNAL)?$/) {                      $expected = "(";                      $macro = $token;                      $UIString = undef; diff --git a/Tools/Scripts/gdb-safari b/Tools/Scripts/gdb-safari index 9776212..0c55bd3 100755 --- a/Tools/Scripts/gdb-safari +++ b/Tools/Scripts/gdb-safari @@ -50,4 +50,4 @@ $ENV{WEBKIT_UNSET_DYLD_FRAMEWORK_PATH} = 'YES';  print "Starting Safari under gdb with DYLD_FRAMEWORK_PATH set to point to built WebKit in $productDir.\n";  my @architectureFlags = ("-arch", architecture()) if !isTiger(); -exec $gdbPath, @architectureFlags, $safariPath or die; +exec $gdbPath, @architectureFlags, "--arg", $safariPath, @ARGV or die; diff --git a/Tools/Scripts/old-run-webkit-tests b/Tools/Scripts/old-run-webkit-tests index fe9bdd0..333777e 100755 --- a/Tools/Scripts/old-run-webkit-tests +++ b/Tools/Scripts/old-run-webkit-tests @@ -105,6 +105,7 @@ sub openWebSocketServerIfNeeded();  sub pathcmp($$);  sub printFailureMessageForTest($$);  sub processIgnoreTests($$); +sub readChecksumFromPng($);  sub readFromDumpToolWithTimer(**);  sub readSkippedFiles($);  sub recordActualResultsAndDiff($$); @@ -125,6 +126,7 @@ sub writeToFile($$);  # Argument handling  my $addPlatformExceptions = 0; +my @additionalPlatformDirectories = ();  my $complexText = 0;  my $exitAfterNFailures = 0;  my $exitAfterNCrashesOrTimeouts = 0; @@ -275,6 +277,8 @@ my $sampleDefault = $runSample ? "run" : "do not run";  my $usage = <<EOF;  Usage: $programName [options] [testdir|testpath ...]    --add-platform-exceptions       Put new results for non-platform-specific failing tests into the platform-specific results directory +  --additional-platform-directory path/to/directory +                                  Look in the specified directory before looking in any of the default platform-specific directories    --complex-text                  Use the complex text code path for all text (Mac OS X and Windows only)    -c|--configuration config       Set DumpRenderTree build configuration    -g|--guard-malloc               Enable malloc guard @@ -323,6 +327,7 @@ setConfiguration();  my $getOptionsResult = GetOptions(      'add-platform-exceptions' => \$addPlatformExceptions, +    'additional-platform-directory=s' => \@additionalPlatformDirectories,      'complex-text' => \$complexText,      'exit-after-n-failures=i' => \$exitAfterNFailures,      'exit-after-n-crashes-or-timeouts=i' => \$exitAfterNCrashesOrTimeouts, @@ -426,6 +431,7 @@ my $dumpToolName = $useWebKitTestRunner ? "WebKitTestRunner" : "DumpRenderTree";  if (isAppleWinWebKit()) {      $dumpToolName .= "_debug" if configurationForVisualStudio() eq "Debug_All"; +    $dumpToolName .= "_debug" if configurationForVisualStudio() eq "Debug_Cairo_CFLite";      $dumpToolName .= $Config{_exe};  }  my $dumpTool = File::Spec->catfile($productDir, $dumpToolName); @@ -433,6 +439,7 @@ die "can't find executable $dumpToolName (looked in $productDir)\n" unless -x $d  my $imageDiffTool = "$productDir/ImageDiff";  $imageDiffTool .= "_debug" if isCygwin() && configurationForVisualStudio() eq "Debug_All"; +$imageDiffTool .= "_debug" if isCygwin() && configurationForVisualStudio() eq "Debug_Cairo_CFLite";  die "can't find executable $imageDiffTool (looked in $productDir)\n" if $pixelTests && !-x $imageDiffTool;  checkFrameworks() unless isCygwin(); @@ -556,6 +563,7 @@ if (!$hasAcceleratedCompositing) {          $ignoredFiles{'media/controls-drag-timebar.html'} = 1;          $ignoredFiles{'media/controls-strict.html'} = 1;          $ignoredFiles{'media/controls-styling.html'} = 1; +        $ignoredFiles{'media/controls-without-preload.html'} = 1;          $ignoredFiles{'media/video-controls-rendering.html'} = 1;          $ignoredFiles{'media/video-display-toggle.html'} = 1;          $ignoredFiles{'media/video-no-audio.html'} = 1; @@ -599,6 +607,12 @@ if (!checkWebCoreFeatureSupport("XHTMLMP", 0)) {      $ignoredDirectories{'fast/xhtmlmp'} = 1;  } +if (isAppleMacWebKit() && $platform ne "mac-wk2" && osXVersion()->{minor} >= 6 && architecture() =~ /x86_64/) { +    # This test relies on executing JavaScript during NPP_Destroy, which isn't supported with +    # out-of-process plugins in WebKit1. See <http://webkit.org/b/58077>. +    $ignoredFiles{'plugins/npp-set-window-called-during-destruction.html'} = 1; +} +  processIgnoreTests(join(',', @ignoreTests), "ignore-tests") if @ignoreTests;  if (!$ignoreSkipped) {      if (!$skippedOnly || @ARGV == 0) { @@ -679,6 +693,9 @@ for (my $i = 1; $i < $iterations; $i++) {      push(@tests, @originalTests);  } +my $absTestResultsDirectory = resolveAndMakeTestResultsDirectory(); +open my $tests_run_fh, '>', "$absTestResultsDirectory/tests_run.txt" or die $!; +  for my $test (@tests) {      my $newDumpTool = not $isDumpToolOpen;      openDumpTool(); @@ -729,6 +746,8 @@ for my $test (@tests) {      my $startTime = time if $report10Slowest; +    print $tests_run_fh "$testDirectory/$test\n"; +      # Try to read expected hash file for pixel tests      my $suffixExpectedHash = "";      if ($pixelTests && !$resetResults) { @@ -740,6 +759,8 @@ for my $test (@tests) {              # Format expected hash into a suffix string that is appended to the path / URL passed to DRT              $suffixExpectedHash = "'$expectedHash"; +        } elsif (my $expectedHash = readChecksumFromPng(File::Spec->catfile($expectedPixelDir, "$base-$expectedTag.png"))) { +            $suffixExpectedHash = "'$expectedHash";          }      } @@ -1075,6 +1096,8 @@ for my $test (@tests) {      last if stopRunningTestsEarlyIfNeeded();  } +close($tests_run_fh); +  my $totalTestingTime = time - $overallStartTime;  my $waitTime = getWaitTime();  if ($waitTime > 0.1) { @@ -1157,6 +1180,9 @@ print HTML htmlForResultsSection(@{$tests{webProcessCrash}}, "Tests that caused  print HTML htmlForResultsSection(@{$tests{error}}, "Tests that had stderr output", \&linksForErrorTest);  print HTML htmlForResultsSection(@{$tests{new}}, "Tests that had no expected results (probably new)", \&linksForNewTest); +print HTML "<p>httpd access log: <a href=\"access_log.txt\">access_log.txt</a></p>\n"; +print HTML "<p>httpd error log: <a href=\"error_log.txt\">error_log.txt</a></p>\n"; +  print HTML "</body>\n";  print HTML "</html>\n";  close HTML; @@ -1255,12 +1281,6 @@ sub countAndPrintLeaks($$$)          );      } -    if (isDarwin() && !isTiger() && !isLeopard() && !isSnowLeopard()) { -        push @callStacksToExclude, ( -            "CGGradientCreateWithColorComponents", # leak in CoreGraphics, <rdar://problem/7888492> -        ); -    } -      my $leaksTool = sourceDir() . "/Tools/Scripts/run-leaks";      my $excludeString = "--exclude-callstack '" . (join "' --exclude-callstack '", @callStacksToExclude) . "'";      $excludeString .= " --exclude-type '" . (join "' --exclude-type '", @typesToExclude) . "'" if @typesToExclude; @@ -2112,13 +2132,13 @@ sub recordActualResultsAndDiff($$)      mkpath(dirname($actualResultsPath));      writeToFile("$actualResultsPath", $actualResults); -    if (-f $expectedResultPath) { -        copy("$expectedResultPath", "$copiedExpectedResultsPath"); -    } else { -        open EMPTY, ">$copiedExpectedResultsPath"; -        close EMPTY; +    # We don't need diff and pretty diff for tests without expected file. +    if ( !-f $expectedResultPath) { +        return;      } +    copy("$expectedResultPath", "$copiedExpectedResultsPath"); +      my $diffOuputBasePath = File::Spec->catfile($testResultsDirectory, $base);      my $diffOutputPath = "$diffOuputBasePath-$diffsTag.txt";      system "diff -u \"$copiedExpectedResultsPath\" \"$actualResultsPath\" > \"$diffOutputPath\""; @@ -2165,6 +2185,8 @@ sub buildPlatformResultHierarchy()          push(@hierarchy, $scoped) if (-d $scoped);      } +    unshift @hierarchy, grep { -d $_ } @additionalPlatformDirectories; +      return @hierarchy;  } @@ -2457,6 +2479,15 @@ sub readSkippedFiles($)      }  } +sub readChecksumFromPng($) +{ +    my ($path) = @_; +    my $data; +    if (open(PNGFILE, $path) && read(PNGFILE, $data, 2048) && $data =~ /tEXtchecksum\0([a-fA-F0-9]{32})/) { +        return $1; +    } +} +  my @testsFound;  sub isUsedInReftest @@ -2668,10 +2699,13 @@ sub setUpWindowsCrashLogSaving()      my $ntsdPath = File::Spec->catfile(toCygwinPath($ENV{PROGRAMFILES}), "Debugging Tools for Windows (x86)", "ntsd.exe");      unless (-f $ntsdPath) { -        $ntsdPath = File::Spec->catfile(toCygwinPath($ENV{SYSTEMROOT}), "system32", "ntsd.exe"); +        $ntsdPath = File::Spec->catfile(toCygwinPath($ENV{ProgramW6432}), "Debugging Tools for Windows (x64)", "ntsd.exe");          unless (-f $ntsdPath) { -            print STDERR "Can't find ntsd.exe. Crash logs will not be saved.\nSee <http://trac.webkit.org/wiki/BuildingOnWindows#GettingCrashLogs>.\n"; -            return; +            $ntsdPath = File::Spec->catfile(toCygwinPath($ENV{SYSTEMROOT}), "system32", "ntsd.exe"); +            unless (-f $ntsdPath) { +                print STDERR "Can't find ntsd.exe. Crash logs will not be saved.\nSee <http://trac.webkit.org/wiki/BuildingOnWindows#GettingCrashLogs>.\n"; +                return; +            }          }      } diff --git a/Tools/Scripts/prepare-ChangeLog b/Tools/Scripts/prepare-ChangeLog index a4b3d6b..e6fef40 100755 --- a/Tools/Scripts/prepare-ChangeLog +++ b/Tools/Scripts/prepare-ChangeLog @@ -140,6 +140,7 @@ if (!$parseOptionsResult || $showHelp) {      print STDERR "  -o|--open       Open ChangeLogs in an editor when done\n";      print STDERR "  --[no-]update   Update ChangeLogs from svn before adding entry (default: update)\n";      print STDERR "  --[no-]write    Write ChangeLogs to disk (otherwise send new entries to stdout) (default: write)\n"; +    print STDERR "  --email=        Specify the email address to be used in the patch\n";      exit 1;  } diff --git a/Tools/Scripts/run-iexploder-tests b/Tools/Scripts/run-iexploder-tests index 97e3630..cb696a2 100755 --- a/Tools/Scripts/run-iexploder-tests +++ b/Tools/Scripts/run-iexploder-tests @@ -114,7 +114,6 @@ sub configureAndOpenHTTPDIfNeeded()  {      return if $isHttpdOpen;      mkdir $iExploderTestDirectory; -    my $httpdPath = getHTTPDPath();      my $webkitDirectory = getcwd();      my $testDirectory = $webkitDirectory . "/LayoutTests";      my $iExploderDirectory = $webkitDirectory . "/Tools/iExploder/iExploder-1.3.2"; diff --git a/Tools/Scripts/update-webkit b/Tools/Scripts/update-webkit index 5c132ae..6a7b9f7 100755 --- a/Tools/Scripts/update-webkit +++ b/Tools/Scripts/update-webkit @@ -2,6 +2,7 @@  # Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.  # Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2011 Brent Fulgham. All rights reserved.  #  # Redistribution and use in source and binary forms, with or without  # modification, are permitted provided that the following conditions @@ -49,6 +50,8 @@ my $useGYP = 0;  determineIsChromium(); +determineIsWinCairo(); +  chdirWebKit();  my $getOptionsResult = GetOptions( @@ -64,6 +67,7 @@ Usage: @{[ basename($0) ]} [options]    -h|--help   show the help message    -q|--quiet  pass -q to svn update for quiet updates    --gyp       generate project files from gyp after update +  --wincairo  also update dependencies of the WinCairo port  __END__      exit 1;  } @@ -96,6 +100,9 @@ if (-d "../Internal") {      system("perl", "Tools/Scripts/update-webkit-chromium") == 0 or die $!;  } elsif (isAppleWinWebKit()) {      system("perl", "Tools/Scripts/update-webkit-auxiliary-libs") == 0 or die; +    if (isWinCairo()) { +        system("perl", "Tools/Scripts/update-webkit-wincairo-libs") == 0 or die; +    }  }  setupAppleWinEnv() if isAppleWinWebKit(); diff --git a/Tools/Scripts/update-webkit-auxiliary-libs b/Tools/Scripts/update-webkit-auxiliary-libs index 9a6b20f..d301938 100755 --- a/Tools/Scripts/update-webkit-auxiliary-libs +++ b/Tools/Scripts/update-webkit-auxiliary-libs @@ -30,106 +30,11 @@  use strict;  use warnings; - -use File::Find; -use File::Spec; -use File::Temp ();  use FindBin; -use HTTP::Date qw(str2time); -use POSIX; -use lib $FindBin::Bin; -use webkitdirs; - -sub lastModifiedToUnixTime($); - -# Time in seconds that the new zip file must be newer than the old for us to -# consider them to be different. If the difference in modification time is less -# than this threshold, we assume that the files are the same. We need this -# because the zip file is served from a set of mirrors with slightly different -# Last-Modified times. -my $newnessThreshold = 30; -my $sourceDir = sourceDir();  my $file = "WebKitAuxiliaryLibrary";  my $zipFile = "$file.zip";   my $auxiliaryLibsURL = "http://developer.apple.com/opensource/internet/$zipFile"; -my $webkitLibrariesDir = toUnixPath($ENV{'WEBKITLIBRARIESDIR'}) || "$sourceDir/WebKitLibraries/win"; -my $tmpRelativeDir = File::Temp::tempdir("webkitlibsXXXXXXX", TMPDIR => 1, CLEANUP => 1); -my $tmpAbsDir = File::Spec->rel2abs($tmpRelativeDir); - -print "Checking Last-Modified date of $zipFile...\n"; - -my $result = system "curl -s -I $auxiliaryLibsURL | grep Last-Modified > \"$tmpAbsDir/$file.headers\""; - -if (WEXITSTATUS($result)) { -    print STDERR "Couldn't check Last-Modified date of new $zipFile.\n"; -    print STDERR "Please ensure that $auxiliaryLibsURL is reachable.\n"; - -    if (! -f "$webkitLibrariesDir/$file.headers") { -        print STDERR "Unable to check Last-Modified date and no version of $file to fall back to.\n"; -        exit 1; -    } - -    print STDERR "Falling back to existing version of $file.\n"; -    exit 0; -} - -if (open NEW, "$tmpAbsDir/$file.headers") { -    my $new = lastModifiedToUnixTime(<NEW>); -    close NEW; - -    if (defined $new && open OLD, "$webkitLibrariesDir/$file.headers") { -        my $old = lastModifiedToUnixTime(<OLD>); -        close OLD; -        if (defined $old && abs($new - $old) < $newnessThreshold) { -            print "Current $file is up to date\n"; -            exit 0; -        } -    } -} - -print "Downloading $zipFile...\n\n"; -$result = system "curl -o \"$tmpAbsDir/$zipFile\" $auxiliaryLibsURL"; -die "Couldn't download $zipFile!" if $result; - -$result = system "unzip", "-q", "-d", $tmpAbsDir, "$tmpAbsDir/$zipFile"; -die "Couldn't unzip $zipFile." if $result; - -print "\nInstalling $file...\n"; - -sub wanted -{ -    my $relativeName = File::Spec->abs2rel($File::Find::name, "$tmpAbsDir/$file/win"); -    my $destination = "$webkitLibrariesDir/$relativeName"; - -    if (-d $_) { -        mkdir $destination; -        return; -    } - -    system "cp", $_, $destination; -} - -File::Find::find(\&wanted, "$tmpAbsDir/$file"); - -$result = system "mv", "$tmpAbsDir/$file.headers", $webkitLibrariesDir; -print STDERR "Couldn't move $file.headers to $webkitLibrariesDir" . ".\n" if $result; - -print "The $file has been sucessfully installed in\n $webkitLibrariesDir\n"; -exit; - -sub toUnixPath -{ -    my $path = shift; -    return unless $path; -    chomp($path = `cygpath -u '$path'`); -    return $path; -} - -sub lastModifiedToUnixTime($) -{ -    my ($str) = @_; +my $command = "$FindBin::Bin/update-webkit-dependency"; -    $str =~ /^Last-Modified: (.*)$/ or return; -    return str2time($1); -} +system("perl", $command, $auxiliaryLibsURL, "win") == 0 or die; diff --git a/Tools/Scripts/update-webkit-dependency b/Tools/Scripts/update-webkit-dependency new file mode 100755 index 0000000..1ad4d6d --- /dev/null +++ b/Tools/Scripts/update-webkit-dependency @@ -0,0 +1,157 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2005, 2006, 2007 Apple Computer, Inc.  All rights reserved. +# Copyright (C) 2011 Carl Lobo.  All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1.  Redistributions of source code must retain the above copyright +#     notice, this list of conditions and the following disclaimer.  +# 2.  Redistributions in binary form must reproduce the above copyright +#     notice, this list of conditions and the following disclaimer in the +#     documentation and/or other materials provided with the distribution.  +# 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of +#     its contributors may be used to endorse or promote products derived +#     from this software without specific prior written permission.  +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Updates a development environment to the new WebKitAuxiliaryLibrary + +use strict; +use warnings; + +use File::Find; +use File::Spec; +use File::Temp (); +use FindBin; +use HTTP::Date qw(str2time); +use POSIX; +use lib $FindBin::Bin; +use webkitdirs; + +if ($#ARGV != 1) { +    die <<EOF; +Usage: +        update-webkit-dependancy <URL with the dependancy zip file> <*prefix dir inside zip without filename> + +        * If filename is requirements.zip and the contents of the zipfile are "requirements/x" then prefix = "." +        * If filename is xyz.zip and the contents of the zipfile are xyz/abc/x" then prefix = "abc" +        * x is lib or include or bin. +EOF +} + +sub lastModifiedToUnixTime($); +sub getLibraryName($); + +# Time in seconds that the new zip file must be newer than the old for us to +# consider them to be different. If the difference in modification time is less +# than this threshold, we assume that the files are the same. We need this +# because the zip file is served from a set of mirrors with slightly different +# Last-Modified times. +my $newnessThreshold = 30; + +my $libsURL = shift; +my $prefixInZip = shift; +my $sourceDir = sourceDir(); +my $file = getLibraryName($libsURL); +my $zipFile = "$file.zip";  +my $webkitLibrariesDir = toUnixPath($ENV{'WEBKITLIBRARIESDIR'}) || "$sourceDir/WebKitLibraries/win"; +my $tmpRelativeDir = File::Temp::tempdir("webkitlibsXXXXXXX", TMPDIR => 1, CLEANUP => 1); +my $tmpAbsDir = File::Spec->rel2abs($tmpRelativeDir); + +print "Checking Last-Modified date of $zipFile...\n"; + +my $result = system "curl -s -I $libsURL | grep Last-Modified > \"$tmpAbsDir/$file.headers\""; + +if (WEXITSTATUS($result)) { +    print STDERR "Couldn't check Last-Modified date of new $zipFile.\n"; +    print STDERR "Please ensure that $libsURL is reachable.\n"; + +    if (! -f "$webkitLibrariesDir/$file.headers") { +        print STDERR "Unable to check Last-Modified date and no version of $file to fall back to.\n"; +        exit 1; +    } + +    print STDERR "Falling back to existing version of $file.\n"; +    exit 0; +} + +if (open NEW, "$tmpAbsDir/$file.headers") { +    my $new = lastModifiedToUnixTime(<NEW>); +    close NEW; + +    if (defined $new && open OLD, "$webkitLibrariesDir/$file.headers") { +        my $old = lastModifiedToUnixTime(<OLD>); +        close OLD; +        if (defined $old && abs($new - $old) < $newnessThreshold) { +            print "Current $file is up to date\n"; +            exit 0; +        } +    } +} + +print "Downloading $zipFile...\n\n"; +$result = system "curl -o \"$tmpAbsDir/$zipFile\" $libsURL"; +die "Couldn't download $zipFile!" if $result; + +$result = system "unzip", "-q", "-d", $tmpAbsDir, "$tmpAbsDir/$zipFile"; +die "Couldn't unzip $zipFile." if $result; + +print "\nInstalling $file...\n"; + +sub wanted +{ +    my $relativeName = File::Spec->abs2rel($File::Find::name, "$tmpAbsDir/$file/$prefixInZip"); +    my $destination = "$webkitLibrariesDir/$relativeName"; + +    if (-d $_) { +        mkdir $destination; +        return; +    } + +    system "cp", $_, $destination; +} + +File::Find::find(\&wanted, "$tmpAbsDir/$file"); + +$result = system "mv", "$tmpAbsDir/$file.headers", $webkitLibrariesDir; +print STDERR "Couldn't move $file.headers to $webkitLibrariesDir" . ".\n" if $result; + +print "The $file has been sucessfully installed in\n $webkitLibrariesDir\n"; +exit; + +sub toUnixPath +{ +    my $path = shift; +    return unless $path; +    chomp($path = `cygpath -u '$path'`); +    return $path; +} + +sub lastModifiedToUnixTime($) +{ +    my ($str) = @_; + +    $str =~ /^Last-Modified: (.*)$/ or return; +    return str2time($1); +} + +sub getLibraryName($) +{ +    my $url = shift; +    $url =~ m#/([^/]+)\.zip$#; +    return $1; +} + diff --git a/Tools/Scripts/update-webkit-localizable-strings b/Tools/Scripts/update-webkit-localizable-strings index 0a0ada9..ceb25a5 100755 --- a/Tools/Scripts/update-webkit-localizable-strings +++ b/Tools/Scripts/update-webkit-localizable-strings @@ -35,7 +35,7 @@ use lib $FindBin::Bin;  use webkitdirs;  # WebKit and WebKit2 strings go into WebCore's Localizable.strings. -my @directoriesToScan = ("Source/WebCore", "Source/WebKit/mac", "Source/WebKit2", "-Source/WebCore/icu", "-Source/WebKit/mac/icu"); +my @directoriesToScan = ("Source/WebCore", "Source/WebKit/mac", "Source/WebKit/win", "Source/WebKit2", "-Source/WebCore/icu", "-Source/WebKit/mac/icu");  my $fileToUpdate = "Source/WebCore/English.lproj/Localizable.strings";  @ARGV == 0 or die "Usage: " . basename($0) . "\n"; @@ -43,9 +43,3 @@ my $fileToUpdate = "Source/WebCore/English.lproj/Localizable.strings";  chdirWebKit();  system "Tools/Scripts/extract-localizable-strings", "-", $fileToUpdate, @directoriesToScan; - -# FIXME: the following can be removed and "Source/WebKit/win" added above once Windows uses WebCore's Localizable.strings. <rdar://problem/9119405> -my @webKitDirectoriesToScan = ("Source/WebKit/win"); -my $webKitFileToUpdate = "Source/WebKit/English.lproj/Localizable.strings"; - -system "Tools/Scripts/extract-localizable-strings", "-", $webKitFileToUpdate, @webKitDirectoriesToScan; diff --git a/Tools/Scripts/update-webkit-wincairo-libs b/Tools/Scripts/update-webkit-wincairo-libs new file mode 100755 index 0000000..15c7182 --- /dev/null +++ b/Tools/Scripts/update-webkit-wincairo-libs @@ -0,0 +1,40 @@ +#!/usr/bin/perl -w + +# Copyright (C) 2011 Carl Lobo.  All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1.  Redistributions of source code must retain the above copyright +#     notice, this list of conditions and the following disclaimer.  +# 2.  Redistributions in binary form must reproduce the above copyright +#     notice, this list of conditions and the following disclaimer in the +#     documentation and/or other materials provided with the distribution.  +# 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of +#     its contributors may be used to endorse or promote products derived +#     from this software without specific prior written permission.  +# +# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Updates a development environment to the new WebKitAuxiliaryLibrary + +use strict; +use warnings; +use FindBin; + +my $file = "WinCairoRequirements"; +my $zipFile = "$file.zip";  +my $winCairoLibsURL = "http://idisk.mac.com/bfulgham-Public/$zipFile"; +my $command = "$FindBin::Bin/update-webkit-dependency"; + +system("perl", $command, $winCairoLibsURL, ".") == 0 or die; diff --git a/Tools/Scripts/webkitdirs.pm b/Tools/Scripts/webkitdirs.pm index d27caba..e7afbea 100644 --- a/Tools/Scripts/webkitdirs.pm +++ b/Tools/Scripts/webkitdirs.pm @@ -67,6 +67,7 @@ my $isSymbian;  my %qtFeatureDefaults;  my $isGtk;  my $isWinCE; +my $isWinCairo;  my $isWx;  my $isEfl;  my @wxArgs; @@ -445,26 +446,24 @@ sub determinePassedConfiguration      return if $searchedForPassedConfiguration;      $searchedForPassedConfiguration = 1; -    my $isWinCairo = checkForArgumentAndRemoveFromARGV("--wincairo"); -      for my $i (0 .. $#ARGV) {          my $opt = $ARGV[$i];          if ($opt =~ /^--debug$/i || $opt =~ /^--devel/i) {              splice(@ARGV, $i, 1);              $passedConfiguration = "Debug"; -            $passedConfiguration .= "_Cairo_CFLite" if ($isWinCairo && isCygwin()); +            $passedConfiguration .= "_Cairo_CFLite" if (isWinCairo() && isCygwin());              return;          }          if ($opt =~ /^--release$/i || $opt =~ /^--deploy/i) {              splice(@ARGV, $i, 1);              $passedConfiguration = "Release"; -            $passedConfiguration .= "_Cairo_CFLite" if ($isWinCairo && isCygwin()); +            $passedConfiguration .= "_Cairo_CFLite" if (isWinCairo() && isCygwin());              return;          }          if ($opt =~ /^--profil(e|ing)$/i) {              splice(@ARGV, $i, 1);              $passedConfiguration = "Profiling"; -            $passedConfiguration .= "_Cairo_CFLite" if ($isWinCairo && isCygwin()); +            $passedConfiguration .= "_Cairo_CFLite" if (isWinCairo() && isCygwin());              return;          }      } @@ -872,6 +871,18 @@ sub determineIsChromium()      $isChromium = checkForArgumentAndRemoveFromARGV("--chromium");  } +sub isWinCairo() +{ +    determineIsWinCairo(); +    return $isWinCairo; +} + +sub determineIsWinCairo() +{ +    return if defined($isWinCairo); +    $isWinCairo = checkForArgumentAndRemoveFromARGV("--wincairo"); +} +  sub isCygwin()  {      return ($^O eq "cygwin") || 0; @@ -1332,7 +1343,7 @@ sub buildWafProject          chomp($wafCommand);      }      if ($shouldClean) { -        return system $wafCommand, "clean", "distclean"; +        return system $wafCommand, "uninstall", "clean", "distclean";      }      return system $wafCommand, 'configure', 'build', 'install', @options; @@ -1645,7 +1656,6 @@ sub buildQMakeProject($@)                  if ($result ne 0) {                      die "Failed while running $qmakebin to generate derived sources for Tools!\n";                  } -                push @subdirs, "MiniBrowser";                  push @subdirs, "WebKitTestRunner";              }          } diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl index 9fe077f..e195023 100644 --- a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl @@ -1198,7 +1198,7 @@ foreach my $testCase (@testCaseHashRefs) {      open($fileHandle, "<", \$testCase->{inputText});      my $line = <$fileHandle>; -    my @got = VCSUtils::parseDiff($fileHandle, $line); +    my @got = VCSUtils::parseDiff($fileHandle, $line, {"shouldNotUseIndexPathEOL" => 1});      my $expectedReturn = $testCase->{expectedReturn};      is_deeply(\@got, $expectedReturn, "$testNameStart return value."); diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl new file mode 100644 index 0000000..307f3a7 --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl @@ -0,0 +1,305 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2011 Research In Motion Limited. All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +#  +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +# Lesser General Public License for more details. +#  +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA + +# Unit tests of parseDiff() with mock files; test override of patch EOL with EOL of target file. + +use strict; +use warnings; + +use File::Temp; +use POSIX qw/getcwd/; +use Test::More; +use VCSUtils; + +my $gitDiffHeaderForNewFile = <<EOF; +diff --git a/Makefile b/Makefile +new file mode 100644 +index 0000000..756e864 +--- /dev/null ++++ b/Makefile +@@ -0,0 +1,17 @@ +EOF + +my $gitDiffHeader = <<EOF; +diff --git a/Makefile b/Makefile +index 756e864..04d2ae1 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,3 +1,4 @@ +EOF + +my $svnConvertedGitDiffHeader = <<EOF; +Index: Makefile +index 756e864..04d2ae1 100644 +--- Makefile ++++ Makefile +@@ -1,3 +1,4 @@ +EOF + +my $svnConvertedGitDiffHeaderForNewFile = <<EOF; +Index: Makefile +new file mode 100644 +index 0000000..756e864 +--- Makefile ++++ Makefile +@@ -0,0 +1,17 @@ +EOF + +my $svnDiffHeaderForNewFile = <<EOF; +Index: Makefile +=================================================================== +--- Makefile	(revision 0) ++++ Makefile	(revision 0) +@@ -0,0 +1,17 @@ +EOF + +my $svnDiffHeader = <<EOF; +Index: Makefile +=================================================================== +--- Makefile	(revision 53052) ++++ Makefile	(working copy) +@@ -1,3 +1,4 @@ +EOF + +my $diffBody = <<EOF; ++ + MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools +  + all: +EOF + +my $MakefileContents = <<EOF; +MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools  + +all: +EOF + +my $mockDir = File::Temp->tempdir("parseDiffXXXX", CLEANUP => 1); +writeToFile(File::Spec->catfile($mockDir, "MakefileWithUnixEOL"), $MakefileContents); +writeToFile(File::Spec->catfile($mockDir, "MakefileWithWindowsEOL"), toWindowsLineEndings($MakefileContents)); + +# The array of test cases. +my @testCaseHashRefs = ( +### +# SVN test cases +## +{ +    # New test +    diffName => "SVN: Patch with Unix line endings and IndexPath has Unix line endings", +    inputText => substituteString($svnDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody, +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody, # Same as input text +    indexPath => "MakefileWithUnixEOL", +    isSvn => 1, +    sourceRevision => "53052", +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "SVN: Patch with Windows line endings and IndexPath has Unix line endings", +    inputText => substituteString($svnDiffHeader, "Makefile", "MakefileWithUnixEOL") . toWindowsLineEndings($diffBody), +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody, +    indexPath => "MakefileWithUnixEOL", +    isSvn => 1, +    sourceRevision => "53052", +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "SVN: Patch with Windows line endings and IndexPath has Windows line endings", +    inputText => substituteString($svnDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody), +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody), # Same as input text +    indexPath => "MakefileWithWindowsEOL", +    isSvn => 1, +    sourceRevision => "53052", +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "SVN: Patch with Unix line endings and IndexPath has Windows line endings", +    inputText => substituteString($svnDiffHeader, "Makefile", "MakefileWithWindowsEOL") . $diffBody, +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody), +    indexPath => "MakefileWithWindowsEOL", +    isSvn => 1, +    sourceRevision => "53052", +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "SVN: Patch with Unix line endings and nonexistent IndexPath", +    inputText => substituteString($svnDiffHeaderForNewFile, "Makefile", "NonexistentFile") . $diffBody, +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnDiffHeaderForNewFile, "Makefile", "NonexistentFile") . $diffBody, # Same as input text +    indexPath => "NonexistentFile", +    isSvn => 1, +    isNew => 1, +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "SVN: Patch with Windows line endings and nonexistent IndexPath", +    inputText => substituteString($svnDiffHeaderForNewFile, "Makefile", "NonexistentFile") . toWindowsLineEndings($diffBody), +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnDiffHeaderForNewFile, "Makefile", "NonexistentFile") . toWindowsLineEndings($diffBody), # Same as input text +    indexPath => "NonexistentFile", +    isSvn => 1, +    isNew => 1, +}], +undef], +    expectedNextLine => undef, +}, +### +# Git test cases +## +{ +    # New test +    diffName => "Git: Patch with Unix line endings and IndexPath has Unix line endings", +    inputText => substituteString($gitDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody, +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnConvertedGitDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody, # Same as input text +    indexPath => "MakefileWithUnixEOL", +    isGit => 1, +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "Git: Patch with Windows line endings and IndexPath has Unix line endings", +    inputText => substituteString($gitDiffHeader, "Makefile", "MakefileWithUnixEOL") . toWindowsLineEndings($diffBody), +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnConvertedGitDiffHeader, "Makefile", "MakefileWithUnixEOL") . $diffBody, +    indexPath => "MakefileWithUnixEOL", +    isGit => 1, +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "Git: Patch with Windows line endings and IndexPath has Windows line endings", +    inputText => substituteString($gitDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody), +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnConvertedGitDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody), # Same as input text +    indexPath => "MakefileWithWindowsEOL", +    isGit => 1, +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "Git: Patch with Unix line endings and IndexPath has Windows line endings", +    inputText => substituteString($gitDiffHeader, "Makefile", "MakefileWithWindowsEOL") . $diffBody, +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnConvertedGitDiffHeader, "Makefile", "MakefileWithWindowsEOL") . toWindowsLineEndings($diffBody), +    indexPath => "MakefileWithWindowsEOL", +    isGit => 1, +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "Git: Patch with Unix line endings and nonexistent IndexPath", +    inputText => substituteString($gitDiffHeaderForNewFile, "Makefile", "NonexistentFile") . $diffBody, +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnConvertedGitDiffHeaderForNewFile, "Makefile", "NonexistentFile") . $diffBody, # Same as input text +    indexPath => "NonexistentFile", +    isGit => 1, +    isNew => 1, +}], +undef], +    expectedNextLine => undef, +}, +{ +    # New test +    diffName => "Git: Patch with Windows line endings and nonexistent IndexPath", +    inputText => substituteString($gitDiffHeaderForNewFile, "Makefile", "NonexistentFile") . toWindowsLineEndings($diffBody), +    expectedReturn => [ +[{ +    svnConvertedText => substituteString($svnConvertedGitDiffHeaderForNewFile, "Makefile", "NonexistentFile") . toWindowsLineEndings($diffBody), # Same as input text +    indexPath => "NonexistentFile", +    isGit => 1, +    isNew => 1, +}], +undef], +    expectedNextLine => undef, +}, +); + +my $testCasesCount = @testCaseHashRefs; +plan(tests => 2 * $testCasesCount); # Total number of assertions. + +my $savedCWD = getcwd(); +chdir($mockDir) or die; +foreach my $testCase (@testCaseHashRefs) { +    my $testNameStart = "parseDiff(): $testCase->{diffName}: comparing"; + +    my $fileHandle; +    open($fileHandle, "<", \$testCase->{inputText}); +    my $line = <$fileHandle>; + +    my @got = VCSUtils::parseDiff($fileHandle, $line); +    my $expectedReturn = $testCase->{expectedReturn}; + +    is_deeply(\@got, $expectedReturn, "$testNameStart return value."); + +    my $gotNextLine = <$fileHandle>; +    is($gotNextLine, $testCase->{expectedNextLine},  "$testNameStart next read line."); +} +chdir($savedCWD); + +sub substituteString +{ +    my ($string, $searchString, $replacementString) = @_; +    $string =~ s/$searchString/$replacementString/g; +    return $string; +} + +sub writeToFile +{ +    my ($file, $text) = @_; +    open(FILE, ">$file") or die; +    print FILE $text; +    close(FILE); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseFirstEOL.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseFirstEOL.pl new file mode 100644 index 0000000..367ad1d --- /dev/null +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseFirstEOL.pl @@ -0,0 +1,63 @@ +#!/usr/bin/perl -w +# +# Copyright (C) 2011 Research In Motion Limited. All rights reserved. +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +#  +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +# Lesser General Public License for more details. +#  +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA + +# Unit tests of VCSUtils::parseFirstEOL(). + +use strict; +use warnings; + +use Test::Simple tests => 7; +use VCSUtils; + +my $title; + +# New test +$title = "parseFirstEOL: Empty string."; +ok(!defined(firstEOLInString("")), $title); + +# New test +$title = "parseFirstEOL: Line without a line ending character"; +ok(!defined(firstEOLInString("This line doesn't have a line ending character.")), $title); + +# New test +$title = "parseFirstEOL: Line with Windows line ending."; +ok(firstEOLInString("This line ends with a Windows line ending.\r\n") eq "\r\n", $title); + +# New test +$title = "parseFirstEOL: Line with Unix line ending."; +ok(firstEOLInString("This line ends with a Unix line ending.\n") eq "\n", $title); + +# New test +$title = "parseFirstEOL: Line with Mac line ending."; +ok(firstEOLInString("This line ends with a Mac line ending.\r") eq "\r", $title); + +# New test +$title = "parseFirstEOL: Line with Mac line ending followed by line without a line ending."; +ok(firstEOLInString("This line ends with a Mac line ending.\rThis line doesn't have a line ending.") eq "\r", $title); + +# New test +$title = "parseFirstEOL: Line with a mix of line endings."; +ok(firstEOLInString("This line contains a mix of line endings.\r\n\r\n\r\r\n\n\n\n") eq "\r\n", $title); + +sub firstEOLInString +{ +    my ($string) = @_; +    my $fileHandle; +    open($fileHandle, "<", \$string); +    return parseFirstEOL($fileHandle); +} diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl index 8bd8e90..6880214 100644 --- a/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl +++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/removeEOL.pl @@ -37,20 +37,20 @@ my $title;  # New test  $title = "removeEOL: Undefined argument."; -ok(removeEOL(undef) eq ""); +ok(removeEOL(undef) eq "", $title);  # New test  $title = "removeEOL: Line with Windows line ending."; -ok(removeEOL("This line ends with a Windows line ending.\r\n") eq "This line ends with a Windows line ending."); +ok(removeEOL("This line ends with a Windows line ending.\r\n") eq "This line ends with a Windows line ending.", $title);  # New test  $title = "removeEOL: Line with Unix line ending."; -ok(removeEOL("This line ends with a Unix line ending.\n") eq "This line ends with a Unix line ending."); +ok(removeEOL("This line ends with a Unix line ending.\n") eq "This line ends with a Unix line ending.", $title);  # New test  $title = "removeEOL: Line with Mac line ending."; -ok(removeEOL("This line ends with a Mac line ending.\r") eq "This line ends with a Mac line ending."); +ok(removeEOL("This line ends with a Mac line ending.\r") eq "This line ends with a Mac line ending.", $title);  # New test  $title = "removeEOL: Line with a mix of line endings."; -ok(removeEOL("This line contains a mix of line endings.\r\n\r\n\r\r\n\n\n\n") eq "This line contains a mix of line endings."); +ok(removeEOL("This line contains a mix of line endings.\r\n\r\n\r\r\n\n\n\n") eq "This line contains a mix of line endings.", $title); diff --git a/Tools/Scripts/webkitperl/httpd.pm b/Tools/Scripts/webkitperl/httpd.pm index 5795340..3a40b4e 100644 --- a/Tools/Scripts/webkitperl/httpd.pm +++ b/Tools/Scripts/webkitperl/httpd.pm @@ -63,7 +63,6 @@ $tmpDir = convertMsysPath($tmpDir) if isMsys();  my $httpdLockPrefix = "WebKitHttpd.lock.";  my $myLockFile;  my $exclusiveLockFile = File::Spec->catfile($tmpDir, "WebKit.lock"); -my $httpdPath;  my $httpdPidDir = File::Spec->catfile($tmpDir, "WebKit");  my $httpdPidFile = File::Spec->catfile($httpdPidDir, "httpd.pid");  my $httpdPid; @@ -76,6 +75,7 @@ $SIG{'TERM'} = 'handleInterrupt';  sub getHTTPDPath  { +    my $httpdPath;      if (isDebianBased()) {          $httpdPath = "/usr/sbin/apache2";      } elsif (isMsys()) { @@ -130,7 +130,7 @@ sub getHTTPDConfigPathForTestDirectory      my ($testDirectory) = @_;      die "No test directory has been specified." unless ($testDirectory);      my $httpdConfig; -    getHTTPDPath(); +    my $httpdPath = getHTTPDPath();      if (isCygwin()) {          my $windowsConfDirectory = "$testDirectory/http/conf/";          unless (-x "/usr/lib/apache/libphp4.dll") { @@ -173,7 +173,7 @@ sub openHTTPD(@)          unlink $httpdPidFile;      } -    $httpdPath = "/usr/sbin/httpd" unless ($httpdPath); +    my $httpdPath = getHTTPDPath();      open2(">&1", \*HTTPDIN, $httpdPath, @args); diff --git a/Tools/Scripts/webkitpy/common/checkout/scm.py b/Tools/Scripts/webkitpy/common/checkout/scm.py index e436402..3e8d5e5 100644 --- a/Tools/Scripts/webkitpy/common/checkout/scm.py +++ b/Tools/Scripts/webkitpy/common/checkout/scm.py @@ -134,8 +134,9 @@ def commit_error_handler(error):  class AuthenticationError(Exception): -    def __init__(self, server_host): +    def __init__(self, server_host, prompt_for_password=False):          self.server_host = server_host +        self.prompt_for_password = prompt_for_password  class AmbiguousCommitError(Exception): @@ -291,7 +292,7 @@ class SCM:      def revert_files(self, file_paths):          self._subclass_must_implement() -    def commit_with_message(self, message, username=None, git_commit=None, force_squash=False, changed_files=None): +    def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):          self._subclass_must_implement()      def svn_commit_log(self, svn_revision): @@ -319,8 +320,19 @@ class SCM:          return [] -class SVN(SCM): -    # FIXME: We should move these values to a WebKit-specific config file. +# A mixin class that represents common functionality for SVN and Git-SVN. +class SVNRepository: +    def has_authorization_for_realm(self, realm, home_directory=os.getenv("HOME")): +        # Assumes find and grep are installed. +        if not os.path.isdir(os.path.join(home_directory, ".subversion")): +            return False +        find_args = ["find", ".subversion", "-type", "f", "-exec", "grep", "-q", realm, "{}", ";", "-print"] +        find_output = self.run(find_args, cwd=home_directory, error_handler=Executive.ignore_error).rstrip() +        return find_output and os.path.isfile(os.path.join(home_directory, find_output)) + + +class SVN(SCM, SVNRepository): +    # FIXME: These belong in common.config.urls      svn_server_host = "svn.webkit.org"      svn_server_realm = "<http://svn.webkit.org:80> Mac OS Forge" @@ -374,14 +386,6 @@ class SVN(SCM):      def commit_success_regexp():          return "^Committed revision (?P<svn_revision>\d+)\.$" -    def has_authorization_for_realm(self, realm=svn_server_realm, home_directory=os.getenv("HOME")): -        # Assumes find and grep are installed. -        if not os.path.isdir(os.path.join(home_directory, ".subversion")): -            return False -        find_args = ["find", ".subversion", "-type", "f", "-exec", "grep", "-q", realm, "{}", ";", "-print"]; -        find_output = self.run(find_args, cwd=home_directory, error_handler=Executive.ignore_error).rstrip() -        return find_output and os.path.isfile(os.path.join(home_directory, find_output)) -      @memoized      def svn_version(self):          return self.run(['svn', '--version', '--quiet']) @@ -556,11 +560,11 @@ class SVN(SCM):          # FIXME: This should probably use cwd=self.checkout_root.          self.run(['svn', 'revert'] + file_paths) -    def commit_with_message(self, message, username=None, git_commit=None, force_squash=False, changed_files=None): +    def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):          # git-commit and force are not used by SVN.          svn_commit_args = ["svn", "commit"] -        if not username and not self.has_authorization_for_realm(): +        if not username and not self.has_authorization_for_realm(self.svn_server_realm):              raise AuthenticationError(self.svn_server_host)          if username:              svn_commit_args.extend(["--username", username]) @@ -577,8 +581,7 @@ class SVN(SCM):              # Return a string which looks like a commit so that things which parse this output will succeed.              return "Dry run, no commit.\nCommitted revision 0." -        # FIXME: Should this use cwd=self.checkout_root? -        return self.run(svn_commit_args, error_handler=commit_error_handler) +        return self.run(svn_commit_args, cwd=self.checkout_root, error_handler=commit_error_handler)      def svn_commit_log(self, svn_revision):          svn_revision = self.strip_r_from_svn_revision(svn_revision) @@ -599,7 +602,7 @@ class SVN(SCM):  # All git-specific logic should go here. -class Git(SCM): +class Git(SCM, SVNRepository):      def __init__(self, cwd, executive=None):          SCM.__init__(self, cwd, executive)          self._check_git_architecture() @@ -834,7 +837,7 @@ class Git(SCM):              if num_local_commits > 1 or (num_local_commits > 0 and not working_directory_is_clean):                  raise AmbiguousCommitError(num_local_commits, working_directory_is_clean) -    def commit_with_message(self, message, username=None, git_commit=None, force_squash=False, changed_files=None): +    def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):          # Username is ignored during Git commits.          working_directory_is_clean = self.working_directory_is_clean() @@ -844,7 +847,7 @@ class Git(SCM):                  if working_directory_is_clean:                      raise ScriptError(message="The working copy is not modified. --git-commit=HEAD.. only commits working copy changes.")                  self.commit_locally_with_message(message) -                return self._commit_on_branch(message, 'HEAD') +                return self._commit_on_branch(message, 'HEAD', username=username, password=password)              # Need working directory changes to be committed so we can checkout the merge branch.              if not working_directory_is_clean: @@ -852,15 +855,15 @@ class Git(SCM):                  # That will modify the working-copy and cause us to hit this error.                  # The ChangeLog modification could be made to modify the existing local commit.                  raise ScriptError(message="Working copy is modified. Cannot commit individual git_commits.") -            return self._commit_on_branch(message, git_commit) +            return self._commit_on_branch(message, git_commit, username=username, password=password)          if not force_squash:              self._assert_can_squash(working_directory_is_clean)          self.run(['git', 'reset', '--soft', self.remote_merge_base()])          self.commit_locally_with_message(message) -        return self.push_local_commits_to_server() +        return self.push_local_commits_to_server(username=username, password=password) -    def _commit_on_branch(self, message, git_commit): +    def _commit_on_branch(self, message, git_commit, username=None, password=None):          branch_ref = self.run(['git', 'symbolic-ref', 'HEAD']).strip()          branch_name = branch_ref.replace('refs/heads/', '')          commit_ids = self.commit_ids_from_commitish_arguments([git_commit]) @@ -889,7 +892,7 @@ class Git(SCM):                  self.run(['git', 'cherry-pick', '--no-commit', commit])              self.run(['git', 'commit', '-m', message]) -            output = self.push_local_commits_to_server() +            output = self.push_local_commits_to_server(username=username, password=password)          except Exception, e:              log("COMMIT FAILED: " + str(e))              output = "Commit failed." @@ -937,11 +940,15 @@ class Git(SCM):      def commit_locally_with_message(self, message):          self.run(['git', 'commit', '--all', '-F', '-'], input=message) -    def push_local_commits_to_server(self): +    def push_local_commits_to_server(self, username=None, password=None):          dcommit_command = ['git', 'svn', 'dcommit']          if self.dryrun:              dcommit_command.append('--dry-run') -        output = self.run(dcommit_command, error_handler=commit_error_handler) +        if not self.has_authorization_for_realm(SVN.svn_server_realm): +            raise AuthenticationError(SVN.svn_server_host, prompt_for_password=True) +        if username: +            dcommit_command.extend(["--username", username]) +        output = self.run(dcommit_command, error_handler=commit_error_handler, input=password)          # Return a string which looks like a commit so that things which parse this output will succeed.          if self.dryrun:              output += "\nCommitted r0" diff --git a/Tools/Scripts/webkitpy/common/checkout/scm_unittest.py b/Tools/Scripts/webkitpy/common/checkout/scm_unittest.py index 79b354d..ab3f45a 100644 --- a/Tools/Scripts/webkitpy/common/checkout/scm_unittest.py +++ b/Tools/Scripts/webkitpy/common/checkout/scm_unittest.py @@ -650,6 +650,13 @@ Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==          commit_text = self.scm.commit_with_message("yet another test commit", username)          self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '0') +    def test_commit_in_subdir(self, username=None): +        write_into_file_at_path('test_dir/test_file3', 'more test content') +        os.chdir("test_dir") +        commit_text = self.scm.commit_with_message("another test commit", username) +        os.chdir("..") +        self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6') +      def test_commit_text_parsing(self):          self._shared_test_commit_with_message() @@ -657,7 +664,7 @@ Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==          self._shared_test_commit_with_message("dbates@webkit.org")      def test_commit_without_authorization(self): -        self.scm.has_authorization_for_realm = lambda: False +        self.scm.has_authorization_for_realm = lambda realm: False          self.assertRaises(AuthenticationError, self._shared_test_commit_with_message)      def test_has_authorization_for_realm(self): @@ -667,7 +674,7 @@ Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==          os.mkdir(svn_config_dir_path)          fake_webkit_auth_file = os.path.join(svn_config_dir_path, "fake_webkit_auth_file")          write_into_file_at_path(fake_webkit_auth_file, SVN.svn_server_realm) -        self.assertTrue(scm.has_authorization_for_realm(home_directory=fake_home_dir)) +        self.assertTrue(scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))          os.remove(fake_webkit_auth_file)          os.rmdir(svn_config_dir_path)          os.rmdir(fake_home_dir) @@ -677,7 +684,7 @@ Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==          fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")          svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")          os.mkdir(svn_config_dir_path) -        self.assertFalse(scm.has_authorization_for_realm(home_directory=fake_home_dir)) +        self.assertFalse(scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))          os.rmdir(svn_config_dir_path)          os.rmdir(fake_home_dir) diff --git a/Tools/Scripts/webkitpy/common/config/build.py b/Tools/Scripts/webkitpy/common/config/build.py index 42d0721..d25d606 100644 --- a/Tools/Scripts/webkitpy/common/config/build.py +++ b/Tools/Scripts/webkitpy/common/config/build.py @@ -97,10 +97,12 @@ def _should_file_trigger_build(target_platform, file):          (r"(?:^|/)GNUmakefile\.am$", ["gtk"]),          (r"/\w+Chromium\w*\.(?:cpp|h|mm)$", ["chromium"]),          (r"Mac\.(?:cpp|h|mm)$", ["mac"]), +        (r"\.(?:vcproj|vsprops|sln)$", ["win"]),          (r"\.exp(?:\.in)?$", ["mac"]),          (r"\.gypi?", ["chromium"]),          (r"\.order$", ["mac"]),          (r"\.pr[io]$", ["qt"]), +        (r"\.vcproj/", ["win"]),          (r"\.xcconfig$", ["mac"]),          (r"\.xcodeproj/", ["mac"]),      ] diff --git a/Tools/Scripts/webkitpy/common/config/build_unittest.py b/Tools/Scripts/webkitpy/common/config/build_unittest.py index 9144874..6bd71e8 100644 --- a/Tools/Scripts/webkitpy/common/config/build_unittest.py +++ b/Tools/Scripts/webkitpy/common/config/build_unittest.py @@ -32,6 +32,7 @@ class ShouldBuildTest(unittest.TestCase):          (["Websites/bugs.webkit.org/foo", "Source/WebCore/bar"], ["*"]),          (["Websites/bugs.webkit.org/foo"], []),          (["Source/JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-leopard", "mac-snowleopard"]), +        (["Source/JavaScriptCore/JavaScriptCore.vcproj/foo", "Source/WebKit2/win/WebKit2.vcproj", "Source/WebKit/win/WebKit.sln", "Tools/WebKitTestRunner/Configurations/WebKitTestRunnerCommon.vsprops"], ["win"]),          (["Source/JavaScriptGlue/foo", "Source/WebCore/bar"], ["*"]),          (["Source/JavaScriptGlue/foo"], ["mac-leopard", "mac-snowleopard"]),          (["LayoutTests/foo"], ["*"]), diff --git a/Tools/Scripts/webkitpy/common/config/committers.py b/Tools/Scripts/webkitpy/common/config/committers.py index fd9bdbb..50506c8 100644 --- a/Tools/Scripts/webkitpy/common/config/committers.py +++ b/Tools/Scripts/webkitpy/common/config/committers.py @@ -90,7 +90,7 @@ committers_unable_to_review = [      Committer("Brett Wilson", "brettw@chromium.org", "brettx"),      Committer("Cameron McCormack", "cam@webkit.org", "heycam"),      Committer("Carlos Garcia Campos", ["cgarcia@igalia.com", "carlosgc@gnome.org", "carlosgc@webkit.org"], "KaL"), -    Committer("Carol Szabo", "carol.szabo@nokia.com"), +    Committer("Carol Szabo", "carol@webkit.org", "cszabo1"),      Committer("Chang Shu", ["cshu@webkit.org", "Chang.Shu@nokia.com"], "cshu"),      Committer("Chris Evans", "cevans@google.com"),      Committer("Chris Petersen", "cpetersen@apple.com", "cpetersen"), @@ -129,6 +129,7 @@ committers_unable_to_review = [      Committer("Jakob Petsovits", ["jpetsovits@rim.com", "jpetso@gmx.at"], "jpetso"),      Committer("Jakub Wieczorek", "jwieczorek@webkit.org", "fawek"),      Committer("James Hawkins", ["jhawkins@chromium.org", "jhawkins@google.com"], "jhawkins"), +    Committer("James Kozianski", ["koz@chromium.org", "koz@google.com"], "koz"),      Committer("James Simonsen", "simonjam@chromium.org", "simonjam"),      Committer("Jay Civelli", "jcivelli@chromium.org", "jcivelli"),      Committer("Jeff Miller", "jeffm@apple.com", "jeffm"), @@ -204,8 +205,9 @@ committers_unable_to_review = [      Committer("Victor Wang", "victorw@chromium.org", "victorw"),      Committer("Vitaly Repeshko", "vitalyr@chromium.org"),      Committer("William Siegrist", "wsiegrist@apple.com", "wms"), +    Committer("W. James MacLean", "wjmaclean@chromium.org", "wjmaclean"),      Committer("Xiaomei Ji", "xji@chromium.org", "xji"), -    Committer("Yael Aharon", "yael.aharon@nokia.com"), +    Committer("Yael Aharon", "yael.aharon@nokia.com", "yael"),      Committer("Yaar Schnitman", ["yaar@chromium.org", "yaar@google.com"]),      Committer("Yong Li", ["yong.li.webkit@gmail.com", "yong.li@torchmobile.com"], "yong"),      Committer("Yongjun Zhang", "yongjun.zhang@nokia.com"), diff --git a/Tools/Scripts/webkitpy/common/config/ports.py b/Tools/Scripts/webkitpy/common/config/ports.py index 9a5a269..444a4ac 100644 --- a/Tools/Scripts/webkitpy/common/config/ports.py +++ b/Tools/Scripts/webkitpy/common/config/ports.py @@ -30,6 +30,7 @@  import os  import platform +import sys  from webkitpy.common.system.executive import Executive @@ -43,7 +44,13 @@ class WebKitPort(object):      @classmethod      def script_shell_command(cls, script_name): -        return [cls.script_path(script_name)] +        script_path = cls.script_path(script_name) +        # Win32 does not support shebang. We need to detect the interpreter ourself. +        if sys.platform == 'win32': +            interpreter = Executive.interpreter_for_script(script_path) +            if interpreter: +                return [interpreter, script_path] +        return [script_path]      @staticmethod      def port(port_name): @@ -83,6 +90,14 @@ class WebKitPort(object):          return cls.script_shell_command("update-webkit")      @classmethod +    def check_webkit_style_command(cls): +        return cls.script_shell_command("check-webkit-style") + +    @classmethod +    def prepare_changelog_command(cls): +        return cls.script_shell_command("prepare-ChangeLog") + +    @classmethod      def build_webkit_command(cls, build_style=None):          command = cls.script_shell_command("build-webkit")          if build_style == "debug": diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py index 8daf92e..c781dfb 100644 --- a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py +++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py @@ -46,7 +46,7 @@ from webkitpy.common.config import committers  from webkitpy.common.net.credentials import Credentials  from webkitpy.common.system.user import User  from webkitpy.thirdparty.autoinstalled.mechanize import Browser -from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, SoupStrainer +from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, BeautifulStoneSoup, SoupStrainer  # FIXME: parse_bug_id should not be a free function. @@ -74,7 +74,9 @@ def parse_bug_id_from_changelog(message):      match = re.search("^\s*" + Bugzilla.bug_url_long + "$", message, re.MULTILINE)      if match:          return int(match.group('bug_id')) -    return None +    # We weren't able to find a bug URL in the format used by prepare-ChangeLog. Fall back to the +    # first bug URL found anywhere in the message. +    return parse_bug_id(message)  def timestamp():      return datetime.now().strftime("%Y%m%d%H%M%S") @@ -218,7 +220,8 @@ class Bugzilla(object):          # script.          self.browser.set_handle_robots(False) -    # FIXME: Much of this should go into some sort of config module: +    # FIXME: Much of this should go into some sort of config module, +    # such as common.config.urls.      bug_server_host = "bugs.webkit.org"      bug_server_regex = "https?://%s/" % re.sub('\.', '\\.', bug_server_host)      bug_server_url = "https://%s/" % bug_server_host @@ -270,7 +273,7 @@ class Bugzilla(object):      def _string_contents(self, soup):          # WebKit's bugzilla instance uses UTF-8. -        # BeautifulSoup always returns Unicode strings, however +        # BeautifulStoneSoup always returns Unicode strings, however          # the .string method returns a (unicode) NavigableString.          # NavigableString can confuse other parts of the code, so we          # convert from NavigableString to a real unicode() object using unicode(). @@ -317,7 +320,7 @@ class Bugzilla(object):          return [Bug(self._parse_bug_dictionary_from_xml(unicode(bug_xml)), self) for bug_xml in soup('bug')]      def _parse_bug_dictionary_from_xml(self, page): -        soup = BeautifulSoup(page) +        soup = BeautifulStoneSoup(page, convertEntities=BeautifulStoneSoup.XML_ENTITIES)          bug = {}          bug["id"] = int(soup.find("bug_id").string)          bug["title"] = self._string_contents(soup.find("short_desc")) diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py index 2e75ca9..b996b7c 100644 --- a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py +++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py @@ -104,7 +104,7 @@ class BugzillaTest(unittest.TestCase):      <bug>            <bug_id>32585</bug_id>            <creation_ts>2009-12-15 15:17 PST</creation_ts> -          <short_desc>bug to test webkit-patch and commit-queue failures</short_desc> +          <short_desc>bug to test webkit-patch's and commit-queue's failures</short_desc>            <delta_ts>2009-12-27 21:04:50 PST</delta_ts>            <reporter_accessible>1</reporter_accessible>            <cclist_accessible>1</cclist_accessible> @@ -173,7 +173,7 @@ ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg==      _expected_example_bug_parsing = {          "id" : 32585, -        "title" : u"bug to test webkit-patch and commit-queue failures", +        "title" : u"bug to test webkit-patch's and commit-queue's failures",          "cc_emails" : ["foo@bar.com", "example@example.com"],          "reporter_email" : "eric@webkit.org",          "assigned_to_email" : "webkit-unassigned@lists.webkit.org", @@ -203,7 +203,7 @@ ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg==          ''' -        self.assertEquals(None, parse_bug_id_from_changelog(commit_text)) +        self.assertEquals(56988, parse_bug_id_from_changelog(commit_text))          commit_text = '''  2011-03-23  Ojan Vafai  <ojan@chromium.org> @@ -218,6 +218,25 @@ ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg==          self.assertEquals(12345, parse_bug_id_from_changelog(commit_text)) +        commit_text = ''' +2011-03-31  Adam Roben  <aroben@apple.com> + +        Quote the executable path we pass to ::CreateProcessW + +        This will ensure that spaces in the path will be interpreted correctly. + +        Fixes <http://webkit.org/b/57569> Web process sometimes fails to launch when there are +        spaces in its path + +        Reviewed by Steve Falkenburg. + +        * UIProcess/Launcher/win/ProcessLauncherWin.cpp: +        (WebKit::ProcessLauncher::launchProcess): Surround the executable path in quotes. + +        ''' + +        self.assertEquals(57569, parse_bug_id_from_changelog(commit_text)) +      # FIXME: This should move to a central location and be shared by more unit tests.      def _assert_dictionaries_equal(self, actual, expected): diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py index d23a6cc..5fdf184 100644 --- a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py +++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py @@ -267,7 +267,7 @@ class Build(object):  class BuildBot(object): -    # FIXME: This should move into some sort of webkit_config.py +    # FIXME: This should move into common.config.urls.      default_host = "build.webkit.org"      def __init__(self, host=default_host): diff --git a/Tools/Scripts/webkitpy/common/net/credentials.py b/Tools/Scripts/webkitpy/common/net/credentials.py index 30480b3..d76405b 100644 --- a/Tools/Scripts/webkitpy/common/net/credentials.py +++ b/Tools/Scripts/webkitpy/common/net/credentials.py @@ -29,7 +29,6 @@  #  # Python module for reading stored web credentials from the OS. -import getpass  import os  import platform  import re @@ -149,7 +148,7 @@ class Credentials(object):          if not username:              username = User.prompt("%s login: " % self.host)          if not password: -            password = getpass.getpass("%s password for %s: " % (self.host, username)) +            password = User.prompt_password("%s password for %s: " % (self.host, username))              self._offer_to_store_credentials_in_keyring(username, password)          return (username, password) diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults.py b/Tools/Scripts/webkitpy/common/net/layouttestresults.py index 249ecc9..a0e8ae4 100644 --- a/Tools/Scripts/webkitpy/common/net/layouttestresults.py +++ b/Tools/Scripts/webkitpy/common/net/layouttestresults.py @@ -134,6 +134,19 @@ class LayoutTestResults(object):      def __init__(self, test_results):          self._test_results = test_results +        self._failure_limit_count = None + +    # FIXME: run-webkit-tests should store the --exit-after-N-failures value +    # (or some indication of early exit) somewhere in the results.html/results.json +    # file.  Until it does, callers should set the limit to +    # --exit-after-N-failures value used in that run.  Consumers of LayoutTestResults +    # may use that value to know if absence from the failure list means PASS. +    # https://bugs.webkit.org/show_bug.cgi?id=58481 +    def set_failure_limit_count(self, limit): +        self._failure_limit_count = limit + +    def failure_limit_count(self): +        return self._failure_limit_count      def test_results(self):          return self._test_results diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py index 01b91b8..d25ad02 100644 --- a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py +++ b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py @@ -61,6 +61,12 @@ class LayoutTestResultsTest(unittest.TestCase):  </html>  """ +    def test_set_failure_limit_count(self): +        results = LayoutTestResults([]) +        self.assertEquals(results.failure_limit_count(), None) +        results.set_failure_limit_count(10) +        self.assertEquals(results.failure_limit_count(), 10) +      def test_parse_layout_test_results(self):          failures = [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]          testname = 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html' diff --git a/Tools/Scripts/webkitpy/common/net/statusserver.py b/Tools/Scripts/webkitpy/common/net/statusserver.py index abd298a..9622c89 100644 --- a/Tools/Scripts/webkitpy/common/net/statusserver.py +++ b/Tools/Scripts/webkitpy/common/net/statusserver.py @@ -25,6 +25,8 @@  # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# This the client designed to talk to Tools/QueueStatusServer.  from webkitpy.common.net.networktransaction import NetworkTransaction  from webkitpy.common.system.deprecated_logging import log @@ -39,6 +41,7 @@ _log = logging.getLogger("webkitpy.common.net.statusserver")  class StatusServer: +    # FIXME: This should probably move to common.config.urls.      default_host = "queues.webkit.org"      def __init__(self, host=default_host, browser=None, bot_id=None): diff --git a/Tools/Scripts/webkitpy/common/system/executive.py b/Tools/Scripts/webkitpy/common/system/executive.py index 02619db..7d198dd 100644 --- a/Tools/Scripts/webkitpy/common/system/executive.py +++ b/Tools/Scripts/webkitpy/common/system/executive.py @@ -45,6 +45,7 @@ import sys  import time  from webkitpy.common.system.deprecated_logging import tee +from webkitpy.common.system.filesystem import FileSystem  from webkitpy.python24 import versioning @@ -179,6 +180,22 @@ class Executive(object):          # machines.          return 2 +    @staticmethod +    def interpreter_for_script(script_path, fs=FileSystem()): +        lines = fs.read_text_file(script_path).splitlines() +        if not len(lines): +            return None +        first_line = lines[0] +        if not first_line.startswith('#!'): +            return None +        if first_line.find('python') > -1: +            return sys.executable +        if first_line.find('perl') > -1: +            return 'perl' +        if first_line.find('ruby') > -1: +            return 'ruby' +        return None +      def kill_process(self, pid):          """Attempts to kill the given pid.          Will fail silently if pid does not exist or insufficient permisssions.""" diff --git a/Tools/Scripts/webkitpy/common/system/executive_unittest.py b/Tools/Scripts/webkitpy/common/system/executive_unittest.py index 1dadc36..9a14d6b 100644 --- a/Tools/Scripts/webkitpy/common/system/executive_unittest.py +++ b/Tools/Scripts/webkitpy/common/system/executive_unittest.py @@ -34,6 +34,7 @@ import sys  import unittest  from webkitpy.common.system.executive import Executive, run_command, ScriptError +from webkitpy.common.system.filesystem_mock import MockFileSystem  from webkitpy.test import cat, echo @@ -65,6 +66,33 @@ def never_ending_command():  class ExecutiveTest(unittest.TestCase): +    def assert_interpreter_for_content(self, intepreter, content): +        fs = MockFileSystem() +        file_path = None +        file_interpreter = None + +        tempfile, temp_name = fs.open_binary_tempfile('') +        tempfile.write(content) +        tempfile.close() +        file_interpreter = Executive.interpreter_for_script(temp_name, fs) + +        self.assertEqual(file_interpreter, intepreter) + +    def test_interpreter_for_script(self): +        self.assert_interpreter_for_content(None, '') +        self.assert_interpreter_for_content(None, 'abcd\nefgh\nijklm') +        self.assert_interpreter_for_content(None, '##/usr/bin/perl') +        self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl') +        self.assert_interpreter_for_content('perl', '#!/usr/bin/env perl\nfirst\nsecond') +        self.assert_interpreter_for_content('perl', '#!/usr/bin/perl') +        self.assert_interpreter_for_content('perl', '#!/usr/bin/perl -w') +        self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python') +        self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/env python\nfirst\nsecond') +        self.assert_interpreter_for_content(sys.executable, '#!/usr/bin/python') +        self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby') +        self.assert_interpreter_for_content('ruby', '#!/usr/bin/env ruby\nfirst\nsecond') +        self.assert_interpreter_for_content('ruby', '#!/usr/bin/ruby') +      def test_run_command_with_bad_command(self):          def run_bad_command():              run_command(["foo_bar_command_blah"], error_handler=Executive.ignore_error, return_exit_code=True) diff --git a/Tools/Scripts/webkitpy/common/system/filesystem.py b/Tools/Scripts/webkitpy/common/system/filesystem.py index 1988546..58be03a 100644 --- a/Tools/Scripts/webkitpy/common/system/filesystem.py +++ b/Tools/Scripts/webkitpy/common/system/filesystem.py @@ -61,6 +61,10 @@ class FileSystem(object):          """Wraps os.path.basename()."""          return os.path.basename(path) +    def chdir(self, path): +        """Wraps os.chdir().""" +        return os.chdir(path) +      def copyfile(self, source, destination):          """Copies the contents of the file at the given path to the destination          path.""" @@ -108,6 +112,10 @@ class FileSystem(object):                      files.append(self.join(dirpath, filename))          return files +    def getcwd(self): +        """Wraps os.getcwd().""" +        return os.getcwd() +      def glob(self, path):          """Wraps glob.glob()."""          return glob.glob(path) diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py index a6d158a..3be5854 100644 --- a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py +++ b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py @@ -35,7 +35,7 @@ from webkitpy.common.system import ospath  class MockFileSystem(object): -    def __init__(self, files=None): +    def __init__(self, files=None, cwd='/'):          """Initializes a "mock" filesystem that can be used to completely          stub out a filesystem. @@ -48,6 +48,8 @@ class MockFileSystem(object):          self.written_files = {}          self._sep = '/'          self.current_tmpno = 0 +        self.cwd = cwd +        self.dirs = {}      def _get_sep(self):          return self._sep @@ -61,13 +63,19 @@ class MockFileSystem(object):          return path.rsplit(self.sep, 1)      def abspath(self, path): -        if path.endswith(self.sep): -            return path[:-1] -        return path +        if os.path.isabs(path): +            return self.normpath(path) +        return self.abspath(self.join(self.cwd, path))      def basename(self, path):          return self._split(path)[1] +    def chdir(self, path): +        path = self.normpath(path) +        if not self.isdir(path): +            raise OSError(errno.ENOENT, path, os.strerror(errno.ENOENT)) +        self.cwd = path +      def copyfile(self, source, destination):          if not self.exists(source):              self._raise_not_found(source) @@ -117,6 +125,9 @@ class MockFileSystem(object):          return files +    def getcwd(self, path): +        return self.cwd +      def glob(self, path):          # FIXME: This only handles a wildcard '*' at the end of the path.          # Maybe it should handle more? @@ -134,14 +145,18 @@ class MockFileSystem(object):      def isdir(self, path):          if path in self.files:              return False -        if not path.endswith(self.sep): -            path += self.sep +        path = self.normpath(path) +        if path in self.dirs: +            return True          # We need to use a copy of the keys here in order to avoid switching          # to a different thread and potentially modifying the dict in          # mid-iteration.          files = self.files.keys()[:] -        return any(f.startswith(path) for f in files) +        result = any(f.startswith(path) for f in files) +        if result: +            self.dirs[path] = True +        return result      def join(self, *comps):          # FIXME: might want tests for this and/or a better comment about how @@ -204,8 +219,9 @@ class MockFileSystem(object):          return TemporaryDirectory(fs=self, **kwargs)      def maybe_make_directory(self, *path): -        # FIXME: Implement such that subsequent calls to isdir() work? -        pass +        norm_path = self.normpath(self.join(*path)) +        if not self.isdir(norm_path): +            self.dirs[norm_path] = True      def move(self, source, destination):          if self.files[source] is None: @@ -216,7 +232,9 @@ class MockFileSystem(object):          self.written_files[source] = None      def normpath(self, path): -        return path +        # Like join(), relies on os.path functionality but normalizes the +        # path separator to the mock one. +        return re.sub(re.escape(os.path.sep), self.sep, os.path.normpath(path))      def open_binary_tempfile(self, suffix=''):          path = self._mktemp(suffix) diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py b/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py index 8455d72..8d4f0cb 100644 --- a/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py +++ b/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py @@ -48,6 +48,23 @@ class FileSystemTest(unittest.TestCase):          self._missing_file = os.path.join(self._this_dir, 'missing_file.py')          self._this_file = os.path.join(self._this_dir, 'filesystem_unittest.py') +    def test_chdir(self): +        fs = FileSystem() +        cwd = fs.getcwd() +        newdir = '/' +        if sys.platform == 'win32': +            newdir = 'c:\\' +        fs.chdir(newdir) +        self.assertEquals(fs.getcwd(), newdir) +        fs.chdir(cwd) + +    def test_chdir__notexists(self): +        fs = FileSystem() +        newdir = '/dirdoesnotexist' +        if sys.platform == 'win32': +            newdir = 'c:\\dirdoesnotexist' +        self.assertRaises(OSError, fs.chdir, newdir) +      def test_exists__true(self):          fs = FileSystem()          self.assertTrue(fs.exists(self._this_file)) @@ -56,6 +73,10 @@ class FileSystemTest(unittest.TestCase):          fs = FileSystem()          self.assertFalse(fs.exists(self._missing_file)) +    def test_getcwd(self): +        fs = FileSystem() +        self.assertTrue(fs.exists(fs.getcwd())) +      def test_isdir__true(self):          fs = FileSystem()          self.assertTrue(fs.isdir(self._this_dir)) diff --git a/Tools/Scripts/webkitpy/common/system/user.py b/Tools/Scripts/webkitpy/common/system/user.py index b79536c..aecb6ec 100644 --- a/Tools/Scripts/webkitpy/common/system/user.py +++ b/Tools/Scripts/webkitpy/common/system/user.py @@ -26,6 +26,7 @@  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import getpass  import logging  import os  import re @@ -65,6 +66,10 @@ class User(object):          return response      @classmethod +    def prompt_password(cls, message, repeat=1): +        return cls.prompt(message, repeat=repeat, raw_input=getpass.getpass) + +    @classmethod      def prompt_with_list(cls, list_title, list_items, can_choose_multiple=False, raw_input=raw_input):          print list_title          i = 0 diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py deleted file mode 100644 index 6d5cda8..0000000 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/dump_render_tree_thread.py +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env python -# Copyright (C) 2010 Google Inc. All rights reserved. -# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -#     * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -#     * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -#     * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""This module implements a shared-memory, thread-based version of the worker -task in new-run-webkit-tests: it receives a list of tests from TestShellThread -and passes them one at a time to SingleTestRunner to execute.""" - -import logging -import Queue -import signal -import sys -import thread -import threading -import time - -from webkitpy.layout_tests.layout_package import worker_mixin - -_log = logging.getLogger("webkitpy.layout_tests.layout_package." -                         "dump_render_tree_thread") - - -class TestShellThread(threading.Thread, worker_mixin.WorkerMixin): -    def __init__(self, port, options, worker_number, worker_name, -                 filename_list_queue, result_queue): -        """Initialize all the local state for this DumpRenderTree thread. - -        Args: -          port: interface to port-specific hooks -          options: command line options argument from optparse -          worker_number: identifier for a particular worker thread. -          worker_name: for logging. -          filename_list_queue: A thread safe Queue class that contains lists -              of tuples of (filename, uri) pairs. -          result_queue: A thread safe Queue class that will contain -              serialized TestResult objects. -        """ -        threading.Thread.__init__(self) -        self._canceled = False -        self._exception_info = None -        self._next_timeout = None -        self._thread_id = None -        self._port = port -        self._options = options -        self._worker_number = worker_number -        self._name = worker_name -        self._filename_list_queue = filename_list_queue -        self._result_queue = result_queue -        self._current_group = None -        self._filename_list = [] -        self._test_group_timing_stats = {} -        self._test_results = [] -        self._num_tests = 0 -        self._start_time = 0 -        self._stop_time = 0 -        self._http_lock_wait_begin = 0 -        self._http_lock_wait_end = 0 - -    def cancel(self): -        """Set a flag telling this thread to quit.""" -        self._canceled = True - -    def clear_next_timeout(self): -        """Mark a flag telling this thread to stop setting timeouts.""" -        self._timeout = 0 - -    def exception_info(self): -        """If run() terminated on an uncaught exception, return it here -        ((type, value, traceback) tuple). -        Returns None if run() terminated normally. Meant to be called after -        joining this thread.""" -        return self._exception_info - -    def id(self): -        """Return a thread identifier.""" -        return self._thread_id - -    def next_timeout(self): -        """Return the time the test is supposed to finish by.""" -        if self._next_timeout: -            return self._next_timeout + self._http_lock_wait_time() -        return self._next_timeout - -    def get_test_group_timing_stats(self): -        """Returns a dictionary mapping test group to a tuple of -        (number of tests in that group, time to run the tests)""" -        return self._test_group_timing_stats - -    def get_test_results(self): -        """Return the list of all tests run on this thread. - -        This is used to calculate per-thread statistics. - -        """ -        return self._test_results - -    def get_total_time(self): -        return max(self._stop_time - self._start_time - -                   self._http_lock_wait_time(), 0.0) - -    def get_num_tests(self): -        return self._num_tests - -    def run(self): -        """Delegate main work to a helper method and watch for uncaught -        exceptions.""" - -        self._covered_run() - -    def _covered_run(self): -        # FIXME: this is a separate routine to work around a bug -        # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85. -        self._thread_id = thread.get_ident() -        self._start_time = time.time() -        self._num_tests = 0 -        try: -            _log.debug('%s starting' % (self.getName())) -            self._run(test_runner=None, result_summary=None) -            _log.debug('%s done (%d tests)' % (self.getName(), -                       self.get_num_tests())) -        except KeyboardInterrupt: -            self._exception_info = sys.exc_info() -            _log.debug("%s interrupted" % self.getName()) -        except: -            # Save the exception for our caller to see. -            self._exception_info = sys.exc_info() -            self._stop_time = time.time() -            _log.error('%s dying, exception raised' % self.getName()) - -        self._stop_time = time.time() - -    def run_in_main_thread(self, test_runner, result_summary): -        """This hook allows us to run the tests from the main thread if -        --num-test-shells==1, instead of having to always run two or more -        threads. This allows us to debug the test harness without having to -        do multi-threaded debugging.""" -        self._run(test_runner, result_summary) - -    def _http_lock_wait_time(self): -        """Return the time what http locking takes.""" -        if self._http_lock_wait_begin == 0: -            return 0 -        if self._http_lock_wait_end == 0: -            return time.time() - self._http_lock_wait_begin -        return self._http_lock_wait_end - self._http_lock_wait_begin - -    def _run(self, test_runner, result_summary): -        """Main work entry point of the thread. Basically we pull urls from the -        filename queue and run the tests until we run out of urls. - -        If test_runner is not None, then we call test_runner.UpdateSummary() -        with the results of each test.""" - -        # Initialize the real state of the WorkerMixin now that we're executing -        # in the child thread. Technically, we could have called this during -        # __init__(), but we wait until now to match Worker.run(). -        self.safe_init(self._port) - -        while True: -            if self._canceled: -                _log.debug('Testing cancelled') -                self.cleanup() -                return - -            if len(self._filename_list) is 0: -                if self._current_group is not None: -                    self._test_group_timing_stats[self._current_group] = \ -                        (self._num_tests_in_current_group, -                         time.time() - self._current_group_start_time) - -                try: -                    self._current_group, self._filename_list = \ -                        self._filename_list_queue.get_nowait() -                except Queue.Empty: -                    self.cleanup() -                    return - -                if self._current_group == "tests_to_http_lock": -                    self._http_lock_wait_begin = time.time() -                    self.start_servers_with_lock() -                    self._http_lock_wait_end = time.time() -                elif self._has_http_lock: -                    self.stop_servers_with_lock() - -                self._num_tests_in_current_group = len(self._filename_list) -                self._current_group_start_time = time.time() - -            test_input = self._filename_list.pop(0) - -            # We have a url, run tests. -            self._num_tests += 1 - -            result = self.run_test_with_timeout(test_input, self.timeout(test_input)) - -            self.clean_up_after_test(test_input, result) -            self._test_results.append(result) -            self._result_queue.put(result.dumps()) - -            if test_runner: -                test_runner.update_summary(result_summary) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py index 19b02e8..dbb16c0 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py @@ -50,6 +50,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase                         test_expectations.IMAGE: "I",                         test_expectations.TEXT: "F",                         test_expectations.MISSING: "O", +                       test_expectations.AUDIO: "A",                         test_expectations.IMAGE_PLUS_TEXT: "Z"}      def __init__(self, port, builder_name, build_name, build_number, diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results.html b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results.html new file mode 100644 index 0000000..33aa04a --- /dev/null +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results.html @@ -0,0 +1,555 @@ +<!DocType html> +<style> +body { +    margin: 4px; +} + +body > p:first-of-type { +    margin-top: 0; +} + +tr:first-of-type:hover { +    opacity: 0.7 +} + +thead, tbody { +    background-color: #E3E9FF; +} + +td { +    padding: 0 4px; +} + +th:empty, td:empty { +    padding: 0; +} + +th { +    -webkit-user-select: none; +    -moz-user-select: none; +} + +label { +    margin-left: 10px; +} + +.results-row { +    background-color: white; +} + +.results-row iframe { +    width: 800px; +    height: 600px; +} + +#options { +    position: absolute; +    top: 4px; +    right: 4px; +} + +.expand-button { +    background-color: white; +    color: blue; +    width: 11px; +    height: 11px; +    border: 1px solid blue; +    display: inline-block; +    margin: 0 3px 0 0; +    position: relative; +} + +.expand-button-text { +    position: absolute; +    top: -0.3em; +    left: 1px; +} + +.result-container { +    display: inline-block; +    border: 1px solid gray; +} + +.result-container iframe, .result-container img { +    border: 0; +    border-top: 1px solid lightgray; +    vertical-align: top; +} + +.label { +    padding-left: 3px; +    font-weight: bold; +    font-size: small; +} + +.pixel-zoom-container { +    position: fixed; +    top: 0; +    left: 0; +    width: 100%; +    display: -webkit-box; +} + +.pixel-zoom-container > * { +    display: -webkit-box; +    -webkit-box-flex: 1; +    border: 1px inset lightgray; +    height: 100px; +    overflow: hidden; +    zoom: 300%; +    background-color: white; +} + +.pixel-zoom-container img { +    width: 800px; +    height: 600px; +    vertical-align: top; +} +</style> + +<script> +var g_results; +function ADD_RESULTS(input) +{ +    g_results = input; +} +</script> + +<script src="full_results.json"></script> + +<script> +function stripExtension(test) +{ +    var index = test.lastIndexOf('.'); +    return test.substring(0, index); +} + +function parentOfType(node, selector) +{ +    while (node = node.parentElement) { +        if (node.webkitMatchesSelector(selector)) +            return node; +    } +    return null; +} + +function appendResultIframe(src, parent) +{ +    // FIXME: use audio tags for AUDIO tests? +    var layoutTestsIndex = src.indexOf('LayoutTests'); +    var name; +    if (layoutTestsIndex != -1) { +        var hasTrac = src.indexOf('trac.webkit.org') != -1; +        var prefix = hasTrac ? 'trac.webkit.org/.../' : ''; +        name = prefix + src.substring(layoutTestsIndex + 'LayoutTests/'.length); +    } else { +        var lastDashIndex = src.lastIndexOf('-pretty'); +        if (lastDashIndex == -1) +            lastDashIndex = src.lastIndexOf('-'); +        name = src.substring(lastDashIndex + 1); +    } + +    var tagName = (src.lastIndexOf('.png') == -1) ? 'iframe' : 'img'; + +    var container = document.createElement('div'); +    container.className = 'result-container'; +    container.innerHTML = '<div class=label>' + name + '</div><' + tagName + ' src="' + src + '?format=txt"></' + tagName + '>'; +    parent.appendChild(container); +} + +function expandExpectations(e) +{ +    var expandLink = e.target; +    if (expandLink.className != 'expand-button-text') +        expandLink = expandLink.querySelector('.expand-button-text'); + +    var isExpand = expandLink.textContent == '+'; +    var row = parentOfType(expandLink, 'tr'); +    var parentTbody = row.parentNode; +    var existingResultsRow = parentTbody.querySelector('.results-row'); +     +    if (!isExpand) { +        expandLink.textContent = '+'; +        existingResultsRow.style.display = 'none'; +        return; +    } +     +    var enDash = '\u2013'; +    expandLink.textContent = enDash; +    if (existingResultsRow) { +        existingResultsRow.style.display = ''; +        return; +    } +     +    var newRow = document.createElement('tr'); +    newRow.className = 'results-row'; +    var newCell = document.createElement('td'); +    newCell.colSpan = row.querySelectorAll('td').length; + +    appendResultIframe(row.querySelector('.test-link').href, newCell); + +    var resultLinks = row.querySelectorAll('.result-link'); +    for (var i = 0; i < resultLinks.length; i++) +        appendResultIframe(resultLinks[i].href, newCell); + +    newRow.appendChild(newCell); +    parentTbody.appendChild(newRow); +} + +function testLink(test) +{ +    var basePath; +    if (g_results.layout_tests_dir && location.toString().indexOf('file://') == 0) +        basePath = g_results.layout_tests_dir + '/'; +    else +        basePath = 'http://trac.webkit.org/browser/trunk/LayoutTests/'; +    return '<span class=expand-button onclick="expandExpectations(event)"><span class=expand-button-text>+</span></span>' + +        '<a class=test-link href="' + basePath + test + '">' + test + '</a>'; +} + +function resultLink(testPrefix, suffix, contents) +{ +    return '<a class=result-link href="' + testPrefix + suffix + '">' + contents + '</a> '; +} + +var g_hasTextFailures = false; +var g_hasImageFailures = false; + +var g_testsWithStderr = []; +var g_newTests = []; +var g_hasHttpTests = false; + +function tableRows() +{ +    var html = ''; +    for (var test in g_results.tests) { +        if (g_results.tests[test].has_stderr) +            g_testsWithStderr.push(test); + +        g_hasHttpTests = g_hasHttpTests || test.indexOf('http/') == 0; + +        var actual = g_results.tests[test].actual; +        if (actual == 'MISSING') { +            // FIXME: make sure that new-run-webkit-tests spits out an -actual.txt file for +            // tests with MISSING results. +            g_newTests.push(test); +            continue; +        } + +        var expected = g_results.tests[test].expected || 'PASS'; +        if (actual == 'PASS' && (!g_results.uses_expectations_file || expected == 'PASS')) +          continue; + +        // FIXME: put unexpected passes in a separate table. + +        var row = '<td>' + testLink(test) + '</td>'; +        var test_prefix = stripExtension(test); + +        row += '<td>'; +        if (actual == 'CRASH') +            row += resultLink(test_prefix, '-stack.txt', 'stack'); +        else if (actual == 'AUDIO') { +            row += resultLink(test_prefix, '-expected.wav', 'expected'); +            row += resultLink(test_prefix, '-actual.wav', 'actual'); +        } else if (actual.indexOf('TEXT') != -1 || actual == 'TIMEOUT') { +            // FIXME: only include timeout actual/expected results here if we actually spit out results for timeout tests. +            g_hasTextFailures = true; +            row += resultLink(test_prefix, '-expected.txt', 'expected') + +                resultLink(test_prefix, '-actual.txt', 'actual') + +                resultLink(test_prefix, '-diff.txt', 'diff'); +       +            if (g_results.has_pretty_patch) +                row += resultLink(test_prefix, '-pretty-diff.html', 'pretty diff'); + +            if (g_results.has_wdiff) +                row += resultLink(test_prefix, '-wdiff.html', 'wdiff'); +        } + +        row += '</td><td>'; + +        if (actual.indexOf('IMAGE') != -1) { +            g_hasImageFailures = true; + +            if (g_results.tests[test].is_mismatch_reftest) { +                row += resultLink(test_prefix, '-expected-mismatch.html', 'ref mismatch html') + +                    resultLink(test_prefix, '-actual.png', 'actual'); +            } else { +                if (g_results.tests[test].is_reftest) +                    row += resultLink(test_prefix, '-expected.html', 'ref html'); + +                row += resultLink(test_prefix, '-expected.png', 'expected') + +                    resultLink(test_prefix, '-actual.png', 'actual') + +                    resultLink(test_prefix, '-diff.png', 'diff'); +            } +        } + +        row += '</td>'; +        row += '<td>' + actual + '</td>'; + +        if (g_results.uses_expectations_file) +          row += '<td>' + expected + '</td>'; + +        var isExpected = actual == 'SKIP'; +        if (!isExpected && g_results.uses_expectations_file) { +            var expectedArray = expected.split(' '); +            if (expectedArray.indexOf(actual) != -1) +                isExpected = true; +            else if (expectedArray.indexOf('FAIL') != -1) +                isExpected = actual == 'IMAGE' || actual == 'TEXT' || actual == 'IMAGE+TEXT'; +        } +        html += '<tbody class="' + (isExpected ? 'expected' : '') + '"><tr>' + row + '</tr></tbody>'; +    } +    return html; +} + +var html = ''; +if (g_results.uses_expectations_file) +    html += '<div id=options><label><input class="unexpected-results" type=checkbox checked>Only show unexpected results</label></div>'; + +var tableRowsHtml = tableRows(); + +if (tableRowsHtml) { +    html += '<p>Tests where results did not match expected results:</p>' + +        '<table id="results-table"><thead><tr>' + +        '<th>test</th>' + +        '<th id="text-results-header">text results</th>' + +        '<th id="image-results-header">image results</th>' + +        '<th>failure type</th>'; +         +    if (g_results.uses_expectations_file) +        html += '<th>expected failure type</th>'; + +    html += '</tr></thead>' + tableRowsHtml + '</table>'; +} + +function appendTestList(tests, header, tableId, fileSuffix, linkName) +{ +    tests.sort(); + +    html += '<p>' + header + '</p><table id="' + tableId + '">'; +    for (var i = 0; i < tests.length; i++) { +        var test = tests[i]; +        html += '<tbody><tr><td>' + testLink(test) + '</td><td>'; +         +        if (fileSuffix.indexOf('actual') == -1) +            html += resultLink(stripExtension(test), fileSuffix, linkName); +        else { +            var testObject = g_results.tests[test]; +            if (testObject.is_missing_audio) +                html += resultLink(stripExtension(test), '-actual.wav', 'audio result'); +            if (testObject.is_missing_text) +                html += resultLink(stripExtension(test), fileSuffix, linkName); +            if (testObject.is_missing_image) +                html += resultLink(stripExtension(test), '-actual.png', 'png result'); +        }  +         +        html += '</td></tr></tbody>'; +    } +    html += '</table>' +} + +if (g_newTests.length) +    appendTestList(g_newTests, 'Tests that had no expected results (probably new):', 'new-tests-table', '-actual.txt', 'result'); + +if (g_testsWithStderr.length) +    appendTestList(g_testsWithStderr, 'Tests that had stderr output:', 'stderr-table', '-stderr.txt', 'stderr'); + +if (g_hasHttpTests) { +    html += '<p>httpd access log: <a href="access_log.txt">access_log.txt</a></p>' + +        '<p>httpd error log: <a href="error_log.txt">error_log.txt</a></p>'; +} + +document.write(html); + +function toArray(nodeList) +{ +    return Array.prototype.slice.call(nodeList); +} + +function trim(string) +{ +    return string.replace(/^[\s\xa0]+|[\s\xa0]+$/g, ''); +} + +// Just a namespace for code management. +var TableSorter = {}; + +TableSorter._forwardArrow = '<svg style="width:10px;height:10px"><polygon points="0,0 10,0 5,10" style="fill:#aaa"></svg>'; + +TableSorter._backwardArrow = '<svg style="width:10px;height:10px"><polygon points="0,10 10,10 5,0" style="fill:#aaa"></svg>'; + +TableSorter._sortedContents = function(header, arrow) +{ +    return arrow + ' ' + trim(header.textContent) + ' ' + arrow; +} + +TableSorter._updateHeaderClassNames = function(newHeader) +{ +    var sortHeader = document.querySelector('.sortHeader'); +    if (sortHeader) { +        if (sortHeader == newHeader) { +            var isAlreadyReversed = sortHeader.classList.contains('reversed'); +            if (isAlreadyReversed) +                sortHeader.classList.remove('reversed'); +            else +                sortHeader.classList.add('reversed'); +        } else { +            sortHeader.textContent = sortHeader.textContent; +            sortHeader.classList.remove('sortHeader'); +            sortHeader.classList.remove('reversed'); +        } +    } + +    newHeader.classList.add('sortHeader'); +} + +TableSorter._textContent = function(tbodyRow, column) +{ +    return tbodyRow.querySelectorAll('td')[column].textContent; +} + +TableSorter._sortRows = function(newHeader, reversed) +{ +    var testsTable = document.getElementById('results-table'); +    var headers = toArray(testsTable.querySelectorAll('th')); +    var sortColumn = headers.indexOf(newHeader); + +    var rows = toArray(testsTable.querySelectorAll('tbody')); + +    rows.sort(function(a, b) { +        // Only need to support lexicographic sort for now. +        var aText = TableSorter._textContent(a, sortColumn); +        var bText = TableSorter._textContent(b, sortColumn); +         +        // Forward sort equal values by test name. +        if (sortColumn && aText == bText) { +            var aTestName = TableSorter._textContent(a, 0); +            var bTestName = TableSorter._textContent(b, 0); +            if (aTestName == bTestName) +                return 0; +            return aTestName < bTestName ? -1 : 1; +        } + +        if (reversed) +            return aText < bText ? 1 : -1; +        else +            return aText < bText ? -1 : 1; +    }); + +    for (var i = 0; i < rows.length; i++) +        testsTable.appendChild(rows[i]); +} + +TableSorter.sortColumn = function(columnNumber) +{ +    var newHeader = document.getElementById('results-table').querySelectorAll('th')[columnNumber]; +    TableSorter._sort(newHeader); +} + +TableSorter.handleClick = function(e) +{ +    var newHeader = e.target; +    if (newHeader.localName != 'th') +        return; +    TableSorter._sort(newHeader); +} + +TableSorter._sort = function(newHeader) +{ +    TableSorter._updateHeaderClassNames(newHeader); +     +    var reversed = newHeader.classList.contains('reversed'); +    var sortArrow = reversed ? TableSorter._backwardArrow : TableSorter._forwardArrow; +    newHeader.innerHTML = TableSorter._sortedContents(newHeader, sortArrow); +     +    TableSorter._sortRows(newHeader, reversed); +} + +if (document.getElementById('results-table')) +    document.getElementById('results-table').addEventListener('click', TableSorter.handleClick, false); +TableSorter.sortColumn(0); + +var PixelZoomer = {}; + +PixelZoomer._createContainer = function(e) +{ +    var tbody = parentOfType(e.target, 'tbody'); +    var imageDiffLinks = tbody.querySelector('tr').querySelectorAll('a[href$=".png"]'); +     +    var container = document.createElement('div'); +    container.className = 'pixel-zoom-container'; +     +    var html = ''; +    for (var i = 0; i < imageDiffLinks.length; i++) +        html += '<div class=zoom-image-container><img src="' + imageDiffLinks[i].href + '"></div>'; +     +    container.innerHTML = html; +    document.body.appendChild(container); + +    PixelZoomer._position(e); +} + +PixelZoomer._position = function(e) +{ +    var pageX = e.clientX; +    var pageY = e.clientY; +    var targetLocation = e.target.getBoundingClientRect(); +    var x = pageX - targetLocation.left; +    var y = pageY - targetLocation.top; + +    var zoomContainers = document.querySelectorAll('.pixel-zoom-container > .zoom-image-container'); +    for (var i = 0; i < zoomContainers.length; i++) { +        var container = zoomContainers[i]; +        container.scrollLeft = x - container.offsetWidth / 2; +        container.scrollTop = y - container.offsetHeight / 2; +    } +} + +PixelZoomer.handleMouseMove = function(e) { +    if (PixelZoomer._mouseMoveTimeout) +        clearTimeout(PixelZoomer._mouseMoveTimeout); + +    if (parentOfType(e.target, '.pixel-zoom-container')) +        return; + +    var container = document.querySelector('.pixel-zoom-container'); +    if (!e.target.src || e.target.src.indexOf('.png') == -1) { +        if (container) +            container.parentNode.removeChild(container); +        return; +    } +     +    if (!container) { +        PixelZoomer._mouseMoveTimeout = setTimeout(function() { +            PixelZoomer._createContainer(e); +        }, 200); +        return; +    } +     +    PixelZoomer._position(e); +} + +document.body.addEventListener('mousemove', PixelZoomer.handleMouseMove, false); + + +var unexpectedStyleNode = document.createElement('style'); +document.body.appendChild(unexpectedStyleNode); + +function updateExpectedResults() +{ +    var checkBox = document.querySelector('.unexpected-results'); +    if (!checkBox || checkBox.checked) +        unexpectedStyleNode.innerText = '.expected { display: none; }'; +    else +        unexpectedStyleNode.innerText = ''; +} + +updateExpectedResults(); +if (document.querySelector('.unexpected-results')) +    document.querySelector('.unexpected-results').addEventListener('change', updateExpectedResults, false); + +if (!g_hasTextFailures) +  document.body.getElementById('text-results-header').textContent = ''; +if (!g_hasImageFailures) +  document.body.getElementById('image-results-header').textContent = ''; +</script> diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py index 4886c30..7ead483 100644..100755 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker.py @@ -45,7 +45,9 @@ TestRunner2  --> _InlineManager ---> _InlineWorker <-> Worker  import logging  import optparse +import printing  import Queue +import sys  import thread  import threading  import time @@ -315,9 +317,15 @@ if multiprocessing:              _log.error("%s (pid %d) is wedged on test %s" % (self.name, self.pid, test_name))          def run(self): -            logging.basicConfig() -            port_obj = port.get(self._platform_name, self._options) +            options = self._options +            port_obj = port.get(self._platform_name, options) +            # FIXME: this won't work if the calling process is logging +            # somewhere other than sys.stderr and sys.stdout, but I'm not sure +            # if this will be an issue in practice. +            printer = printing.Printer(port_obj, options, sys.stderr, sys.stdout, +                int(options.child_processes), options.experimental_fully_parallel)              self._client.run(port_obj) +            printer.cleanup()  class _MultiProcessWorkerConnection(_WorkerConnection): diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py index c32f880..6919225 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/manager_worker_broker_unittest.py @@ -42,6 +42,7 @@ from webkitpy.common.system import outputcapture  from webkitpy.layout_tests import port  from webkitpy.layout_tests.layout_package import manager_worker_broker  from webkitpy.layout_tests.layout_package import message_broker2 +from webkitpy.layout_tests.layout_package import printing  # In order to reliably control when child workers are starting and stopping,  # we use a pair of global variables to hold queues used for messaging. Ideally @@ -104,7 +105,10 @@ class _TestWorker(manager_worker_broker.AbstractWorker):  def get_options(worker_model): -    option_list = manager_worker_broker.runtime_options() +    option_list = (manager_worker_broker.runtime_options() + +                   printing.print_options() + +                   [optparse.make_option("--experimental-fully-parallel", default=False), +                    optparse.make_option("--child-processes", default='2')])      parser = optparse.OptionParser(option_list=option_list)      options, args = parser.parse_args(args=['--worker-model', worker_model])      return options diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py deleted file mode 100644 index 66a7aa8..0000000 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -#     * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -#     * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -#     * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -"""Module for handling messages, threads, processes, and concurrency for run-webkit-tests. - -Testing is accomplished by having a manager (TestRunner) gather all of the -tests to be run, and sending messages to a pool of workers (TestShellThreads) -to run each test. Each worker communicates with one driver (usually -DumpRenderTree) to run one test at a time and then compare the output against -what we expected to get. - -This modules provides a message broker that connects the manager to the -workers: it provides a messaging abstraction and message loops, and -handles launching threads and/or processes depending on the -requested configuration. -""" - -import logging -import time - -from webkitpy.common.system import stack_utils - -import dump_render_tree_thread - -_log = logging.getLogger(__name__) - - -def get(port, options): -    """Return an instance of a WorkerMessageBroker.""" -    worker_model = options.worker_model -    if worker_model == 'old-inline': -        return InlineBroker(port, options) -    if worker_model == 'old-threads': -        return MultiThreadedBroker(port, options) -    raise ValueError('unsupported value for --worker-model: %s' % worker_model) - - -class _WorkerState(object): -    def __init__(self, name): -        self.name = name -        self.thread = None - - -class WorkerMessageBroker(object): -    def __init__(self, port, options): -        self._port = port -        self._options = options -        self._num_workers = int(self._options.child_processes) - -        # This maps worker names to their _WorkerState values. -        self._workers = {} - -    def _threads(self): -        return tuple([w.thread for w in self._workers.values()]) - -    def start_workers(self, test_runner): -        """Starts up the pool of workers for running the tests. - -        Args: -            test_runner: a handle to the manager/TestRunner object -        """ -        self._test_runner = test_runner -        for worker_number in xrange(self._num_workers): -            worker = _WorkerState('worker-%d' % worker_number) -            worker.thread = self._start_worker(worker_number, worker.name) -            self._workers[worker.name] = worker -        return self._threads() - -    def _start_worker(self, worker_number, worker_name): -        raise NotImplementedError - -    def run_message_loop(self): -        """Loop processing messages until done.""" -        raise NotImplementedError - -    def cancel_workers(self): -        """Cancel/interrupt any workers that are still alive.""" -        pass - -    def cleanup(self): -        """Perform any necessary cleanup on shutdown.""" -        pass - - -class InlineBroker(WorkerMessageBroker): -    def _start_worker(self, worker_number, worker_name): -        # FIXME: Replace with something that isn't a thread. -        thread = dump_render_tree_thread.TestShellThread(self._port, -            self._options, worker_number, worker_name, -            self._test_runner._current_filename_queue, -            self._test_runner._result_queue) -        # Note: Don't start() the thread! If we did, it would actually -        # create another thread and start executing it, and we'd no longer -        # be single-threaded. -        return thread - -    def run_message_loop(self): -        thread = self._threads()[0] -        thread.run_in_main_thread(self._test_runner, -                                  self._test_runner._current_result_summary) -        self._test_runner.update() - - -class MultiThreadedBroker(WorkerMessageBroker): -    def _start_worker(self, worker_number, worker_name): -        thread = dump_render_tree_thread.TestShellThread(self._port, -            self._options, worker_number, worker_name, -            self._test_runner._current_filename_queue, -            self._test_runner._result_queue) -        thread.start() -        return thread - -    def run_message_loop(self): -        threads = self._threads() -        wedged_threads = set() - -        # Loop through all the threads waiting for them to finish. -        some_thread_is_alive = True -        while some_thread_is_alive: -            some_thread_is_alive = False -            t = time.time() -            for thread in threads: -                if thread.isAlive(): -                    if thread in wedged_threads: -                        continue - -                    some_thread_is_alive = True -                    next_timeout = thread.next_timeout() -                    if next_timeout and t > next_timeout: -                        stack_utils.log_thread_state(_log.error, thread.getName(), thread.id(), "is wedged") -                        thread.clear_next_timeout() -                        wedged_threads.add(thread) - -                exception_info = thread.exception_info() -                if exception_info is not None: -                    # Re-raise the thread's exception here to make it -                    # clear that testing was aborted. Otherwise, -                    # the tests that did not run would be assumed -                    # to have passed. -                    raise exception_info[0], exception_info[1], exception_info[2] - -            self._test_runner.update() - -            if some_thread_is_alive: -                time.sleep(0.01) - -        if wedged_threads: -            _log.warning("All remaining threads are wedged, bailing out.") - -    def cancel_workers(self): -        threads = self._threads() -        for thread in threads: -            thread.cancel() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py deleted file mode 100644 index f4cb5d2..0000000 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/message_broker_unittest.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (C) 2010 Google Inc. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -#     * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -#     * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -#     * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -import logging -import Queue -import sys -import thread -import threading -import time -import unittest - -from webkitpy.common import array_stream -from webkitpy.common.system import outputcapture -from webkitpy.tool import mocktool - -from webkitpy.layout_tests import run_webkit_tests - -import message_broker - - -class TestThread(threading.Thread): -    def __init__(self, started_queue, stopping_queue): -        threading.Thread.__init__(self) -        self._thread_id = None -        self._started_queue = started_queue -        self._stopping_queue = stopping_queue -        self._timeout = False -        self._timeout_queue = Queue.Queue() -        self._exception_info = None - -    def id(self): -        return self._thread_id - -    def getName(self): -        return "worker-0" - -    def run(self): -        self._covered_run() - -    def _covered_run(self): -        # FIXME: this is a separate routine to work around a bug -        # in coverage: see http://bitbucket.org/ned/coveragepy/issue/85. -        self._thread_id = thread.get_ident() -        try: -            self._started_queue.put('') -            msg = self._stopping_queue.get() -            if msg == 'KeyboardInterrupt': -                raise KeyboardInterrupt -            elif msg == 'Exception': -                raise ValueError() -            elif msg == 'Timeout': -                self._timeout = True -                self._timeout_queue.get() -        except: -            self._exception_info = sys.exc_info() - -    def exception_info(self): -        return self._exception_info - -    def next_timeout(self): -        if self._timeout: -            return time.time() - 10 -        return time.time() - -    def clear_next_timeout(self): -        self._next_timeout = None - -class TestHandler(logging.Handler): -    def __init__(self, astream): -        logging.Handler.__init__(self) -        self._stream = astream - -    def emit(self, record): -        self._stream.write(self.format(record)) - - -class MultiThreadedBrokerTest(unittest.TestCase): -    class MockTestRunner(object): -        def __init__(self): -            pass - -        def __del__(self): -            pass - -        def update(self): -            pass - -    def run_one_thread(self, msg): -        runner = self.MockTestRunner() -        port = None -        options = mocktool.MockOptions(child_processes='1') -        starting_queue = Queue.Queue() -        stopping_queue = Queue.Queue() -        broker = message_broker.MultiThreadedBroker(port, options) -        broker._test_runner = runner -        child_thread = TestThread(starting_queue, stopping_queue) -        broker._workers['worker-0'] = message_broker._WorkerState('worker-0') -        broker._workers['worker-0'].thread = child_thread -        child_thread.start() -        started_msg = starting_queue.get() -        stopping_queue.put(msg) -        res = broker.run_message_loop() -        if msg == 'Timeout': -            child_thread._timeout_queue.put('done') -        child_thread.join(1.0) -        self.assertFalse(child_thread.isAlive()) -        return res - -    def test_basic(self): -        interrupted = self.run_one_thread('') -        self.assertFalse(interrupted) - -    def test_interrupt(self): -        self.assertRaises(KeyboardInterrupt, self.run_one_thread, 'KeyboardInterrupt') - -    def test_timeout(self): -        # Because the timeout shows up as a wedged thread, this also tests -        # log_wedged_worker(). -        oc = outputcapture.OutputCapture() -        stdout, stderr = oc.capture_output() -        logger = message_broker._log -        astream = array_stream.ArrayStream() -        handler = TestHandler(astream) -        logger.addHandler(handler) -        interrupted = self.run_one_thread('Timeout') -        stdout, stderr = oc.restore_output() -        self.assertFalse(interrupted) -        logger.handlers.remove(handler) -        self.assertTrue('All remaining threads are wedged, bailing out.' in astream.get()) - -    def test_exception(self): -        self.assertRaises(ValueError, self.run_one_thread, 'Exception') - - -if __name__ == '__main__': -    unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py index a8c716f..c38cb8f 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/single_test_runner.py @@ -82,7 +82,7 @@ class SingleTestRunner:              # For example, if 'foo.html' has two expectation files, 'foo-expected.html' and              # 'foo-expected.txt', we should warn users. One test file must be used exclusively              # in either layout tests or reftests, but not in both. -            for suffix in ['.txt', '.checksum', '.png']: +            for suffix in ('.txt', '.checksum', '.png', '.wav'):                  expected_filename = self._port.expected_filename(self._filename, suffix)                  if fs.exists(expected_filename):                      _log.error('The reftest (%s) can not have an expectation file (%s).' @@ -91,7 +91,8 @@ class SingleTestRunner:      def _expected_driver_output(self):          return base.DriverOutput(self._port.expected_text(self._filename),                                   self._port.expected_image(self._filename), -                                 self._port.expected_checksum(self._filename)) +                                 self._port.expected_checksum(self._filename), +                                 self._port.expected_audio(self._filename))      def _should_fetch_expected_checksum(self):          return (self._options.pixel_tests and @@ -122,14 +123,14 @@ class SingleTestRunner:          driver_output = self._driver.run_test(self._driver_input())          expected_driver_output = self._expected_driver_output()          test_result = self._compare_output(driver_output, expected_driver_output) -        test_result_writer.write_test_result(self._port, self._options.results_directory, self._filename, +        test_result_writer.write_test_result(self._port, self._filename,                                               driver_output, expected_driver_output, test_result.failures)          return test_result      def _run_rebaseline(self):          driver_output = self._driver.run_test(self._driver_input())          failures = self._handle_error(driver_output) -        test_result_writer.write_test_result(self._port, self._options.results_directory, self._filename, +        test_result_writer.write_test_result(self._port, self._filename,                                               driver_output, None, failures)          # FIXME: It the test crashed or timed out, it might be bettter to avoid          # to write new baselines. @@ -142,6 +143,9 @@ class SingleTestRunner:          # DumpRenderTree may not output utf-8 text (e.g. webarchives).          self._save_baseline_data(driver_output.text, ".txt",                                   generate_new_baseline=self._options.new_baseline) +        if driver_output.audio: +            self._save_baseline_data(driver_output.audio, '.wav', +                                     generate_new_baseline=self._options.new_baseline)          if self._options.pixel_tests and driver_output.image_hash:              self._save_baseline_data(driver_output.image, ".png",                                       generate_new_baseline=self._options.new_baseline) @@ -190,7 +194,7 @@ class SingleTestRunner:          failures = []          fs = self._port._filesystem          if driver_output.timeout: -            failures.append(test_failures.FailureTimeout(reference_filename)) +            failures.append(test_failures.FailureTimeout(bool(reference_filename)))          if reference_filename:              testname = self._port.relative_test_filename(reference_filename) @@ -198,7 +202,7 @@ class SingleTestRunner:              testname = self._testname          if driver_output.crash: -            failures.append(test_failures.FailureCrash(reference_filename)) +            failures.append(test_failures.FailureCrash(bool(reference_filename)))              _log.debug("%s Stacktrace for %s:\n%s" % (self._worker_name, testname,                                                        driver_output.error))          elif driver_output.error: @@ -216,19 +220,28 @@ class SingleTestRunner:              return TestResult(self._filename, failures, driver_output.test_time)          failures.extend(self._compare_text(driver_output.text, expected_driver_output.text)) +        failures.extend(self._compare_audio(driver_output.audio, expected_driver_output.audio))          if self._options.pixel_tests:              failures.extend(self._compare_image(driver_output, expected_driver_output))          return TestResult(self._filename, failures, driver_output.test_time)      def _compare_text(self, actual_text, expected_text):          failures = [] -        if self._port.compare_text(self._get_normalized_output_text(actual_text), -                                   # Assuming expected_text is already normalized. -                                   expected_text): -            if expected_text == '': -                failures.append(test_failures.FailureMissingResult()) -            else: -                failures.append(test_failures.FailureTextMismatch()) +        if (expected_text and actual_text and +            # Assuming expected_text is already normalized. +            self._port.compare_text(self._get_normalized_output_text(actual_text), expected_text)): +            failures.append(test_failures.FailureTextMismatch()) +        elif actual_text and not expected_text: +            failures.append(test_failures.FailureMissingResult()) +        return failures + +    def _compare_audio(self, actual_audio, expected_audio): +        failures = [] +        if (expected_audio and actual_audio and +            self._port.compare_audio(actual_audio, expected_audio)): +            failures.append(test_failures.FailureAudioMismatch()) +        elif actual_audio and not expected_audio: +            failures.append(test_failures.FailureMissingAudio())          return failures      def _get_normalized_output_text(self, output): @@ -259,7 +272,7 @@ class SingleTestRunner:              base.DriverInput(self._reference_filename, self._timeout, driver_output1.image_hash))          test_result = self._compare_output_with_reference(driver_output1, driver_output2) -        test_result_writer.write_test_result(self._port, self._options.results_directory, self._filename, +        test_result_writer.write_test_result(self._port, self._filename,                                               driver_output1, driver_output2, test_result.failures)          return test_result diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py index 132ccc2..a407ecc 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py @@ -41,8 +41,8 @@ _log = logging.getLogger("webkitpy.layout_tests.layout_package."                           "test_expectations")  # Test expectation and modifier constants. -(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX, - SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(15) +(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, SKIP, WONTFIX, + SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16)  # Test expectation file update action constants  (NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4) @@ -120,7 +120,8 @@ class TestExpectations:                  self._expected_failures.get_test_set(REBASELINE, IMAGE) |                  self._expected_failures.get_test_set(REBASELINE, TEXT) |                  self._expected_failures.get_test_set(REBASELINE, -                                                     IMAGE_PLUS_TEXT)) +                                                     IMAGE_PLUS_TEXT) | +                self._expected_failures.get_test_set(REBASELINE, AUDIO))      def get_options(self, test):          return self._expected_failures.get_options(test) @@ -166,9 +167,8 @@ class TestExpectations:      def has_modifier(self, test, modifier):          return self._expected_failures.has_modifier(test, modifier) -    def remove_platform_from_expectations(self, tests, platform): -        return self._expected_failures.remove_platform_from_expectations( -            tests, platform) +    def remove_rebaselined_tests(self, tests): +        return self._expected_failures.remove_rebaselined_tests(tests)  def strip_comments(line): @@ -245,11 +245,11 @@ class TestExpectationsFile:      Notes:        -A test cannot be both SLOW and TIMEOUT -      -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is -       a migratory state that currently means either IMAGE, TEXT, or -       IMAGE+TEXT. Once we have finished migrating the expectations, we will -       change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT -       identifier. +      -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, AUDIO, or FAIL. +       FAIL is a legacy value that currently means either IMAGE, +       TEXT, or IMAGE+TEXT. Once we have finished migrating the expectations, +       we should change FAIL to have the meaning of IMAGE+TEXT and remove the +       IMAGE+TEXT identifier.        -A test can be included twice, but not via the same path.        -If a test is included twice, then the more precise path wins.        -CRASH tests cannot be WONTFIX @@ -260,6 +260,7 @@ class TestExpectationsFile:                      'text': TEXT,                      'image': IMAGE,                      'image+text': IMAGE_PLUS_TEXT, +                    'audio': AUDIO,                      'timeout': TIMEOUT,                      'crash': CRASH,                      'missing': MISSING} @@ -272,6 +273,7 @@ class TestExpectationsFile:                                  IMAGE: ('image mismatch', 'image mismatch'),                                  IMAGE_PLUS_TEXT: ('image and text mismatch',                                                    'image and text mismatch'), +                                AUDIO: ('audio mismatch', 'audio mismatch'),                                  CRASH: ('DumpRenderTree crash',                                          'DumpRenderTree crashes'),                                  TIMEOUT: ('test timed out', 'tests timed out'), @@ -279,7 +281,7 @@ class TestExpectationsFile:                                            'no expected results found')}      EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT, -       TEXT, IMAGE, FAIL, SKIP) +       TEXT, IMAGE, AUDIO, FAIL, SKIP)      BUILD_TYPES = ('debug', 'release') @@ -436,75 +438,14 @@ class TestExpectationsFile:      def get_non_fatal_errors(self):          return self._non_fatal_errors -    def remove_platform_from_expectations(self, tests, platform): -        """Returns a copy of the expectations with the tests matching the -        platform removed. - -        If a test is in the test list and has an option that matches the given -        platform, remove the matching platform and save the updated test back -        to the file. If no other platforms remaining after removal, delete the -        test from the file. - -        Args: -          tests: list of tests that need to update.. -          platform: which platform option to remove. - -        Returns: -          the updated string. -        """ - -        assert(platform) -        f_orig = self._get_iterable_expectations(self._expectations) -        f_new = [] - -        tests_removed = 0 -        tests_updated = 0 -        lineno = 0 -        for line in f_orig: -            lineno += 1 -            action = self._get_platform_update_action(line, lineno, tests, -                                                      platform) -            assert(action in (NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, -                              ADD_PLATFORMS_EXCEPT_THIS)) -            if action == NO_CHANGE: -                # Save the original line back to the file -                _log.debug('No change to test: %s', line) -                f_new.append(line) -            elif action == REMOVE_TEST: -                tests_removed += 1 -                _log.info('Test removed: %s', line) -            elif action == REMOVE_PLATFORM: -                parts = line.split(':') -                new_options = parts[0].replace(platform.upper() + ' ', '', 1) -                new_line = ('%s:%s' % (new_options, parts[1])) -                f_new.append(new_line) -                tests_updated += 1 -                _log.info('Test updated: ') -                _log.info('  old: %s', line) -                _log.info('  new: %s', new_line) -            elif action == ADD_PLATFORMS_EXCEPT_THIS: -                parts = line.split(':') -                _log.info('Test updated: ') -                _log.info('  old: %s', line) -                for p in self._port.test_platform_names(): -                    p = p.upper() -                    # This is a temp solution for rebaselining tool. -                    # Do not add tags WIN-7 and WIN-VISTA to test expectations -                    # if the original line does not specify the platform -                    # option. -                    # TODO(victorw): Remove WIN-VISTA and WIN-WIN7 once we have -                    # reliable Win 7 and Win Vista buildbots setup. -                    if not p in (platform.upper(), 'WIN-VISTA', 'WIN-WIN7'): -                        new_options = parts[0] + p + ' ' -                        new_line = ('%s:%s' % (new_options, parts[1])) -                        f_new.append(new_line) -                        _log.info('  new: %s', new_line) -                tests_updated += 1 - -        _log.info('Total tests removed: %d', tests_removed) -        _log.info('Total tests updated: %d', tests_updated) - -        return "".join(f_new) +    def remove_rebaselined_tests(self, tests): +        """Returns a copy of the expectations with the tests removed.""" +        lines = [] +        for (lineno, line) in enumerate(self._get_iterable_expectations(self._expectations)): +            test, options, _ = self.parse_expectations_line(line, lineno) +            if not (test and test in tests and 'rebaseline' in options): +                lines.append(line) +        return ''.join(lines)      def parse_expectations_line(self, line, lineno):          """Parses a line from test_expectations.txt and returns a tuple @@ -534,41 +475,6 @@ class TestExpectationsFile:          return (test, options, expectations) -    def _get_platform_update_action(self, line, lineno, tests, platform): -        """Check the platform option and return the action needs to be taken. - -        Args: -          line: current line in test expectations file. -          lineno: current line number of line -          tests: list of tests that need to update.. -          platform: which platform option to remove. - -        Returns: -          NO_CHANGE: no change to the line (comments, test not in the list etc) -          REMOVE_TEST: remove the test from file. -          REMOVE_PLATFORM: remove this platform option from the test. -          ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one. -        """ -        test, options, expectations = self.parse_expectations_line(line, -                                                                   lineno) -        if not test or test not in tests: -            return NO_CHANGE - -        has_any_platform = False -        for option in options: -            if option in self._port.test_platform_names(): -                has_any_platform = True -                if not option == platform: -                    return REMOVE_PLATFORM - -        # If there is no platform specified, then it means apply to all -        # platforms. Return the action to add all the platforms except this -        # one. -        if not has_any_platform: -            return ADD_PLATFORMS_EXCEPT_THIS - -        return REMOVE_TEST -      def _add_to_all_expectations(self, test, options, expectations):          # Make all paths unix-style so the dashboard doesn't need to.          test = test.replace('\\', '/') @@ -929,7 +835,7 @@ class ModifierMatcher(object):          'mac-leopard': ['mac', 'leopard'],          'win-xp': ['win', 'xp'],          'win-vista': ['win', 'vista'], -        'win-7': ['win', 'win7'], +        'win-win7': ['win', 'win7'],      }      # We don't include the "none" modifier because it isn't actually legal. diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py index 05d805d..0833079 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_expectations_unittest.py @@ -377,44 +377,23 @@ BUG_TEST WIN-XP : passes/text.html = TEXT  class RebaseliningTest(Base):      """Test rebaselining-specific functionality.""" -    def assertRemove(self, platform, input_expectations, expected_expectations): +    def assertRemove(self, input_expectations, tests, expected_expectations):          self.parse_exp(input_expectations) -        test = self.get_test('failures/expected/text.html') -        actual_expectations = self._exp.remove_platform_from_expectations( -            test, platform) +        actual_expectations = self._exp.remove_rebaselined_tests(tests)          self.assertEqual(expected_expectations, actual_expectations) +    def test_remove(self): +        self.assertRemove('BUGX REBASELINE : failures/expected/text.html = TEXT\n' +                          'BUGY : failures/expected/image.html = IMAGE\n' +                          'BUGZ REBASELINE : failures/expected/crash.html = CRASH\n', +                          ['failures/expected/text.html'], +                          'BUGY : failures/expected/image.html = IMAGE\n' +                          'BUGZ REBASELINE : failures/expected/crash.html = CRASH\n') +      def test_no_get_rebaselining_failures(self):          self.parse_exp(self.get_basic_expectations())          self.assertEqual(len(self._exp.get_rebaselining_failures()), 0) -    def test_get_rebaselining_failures_expand(self): -        self.parse_exp(""" -BUG_TEST REBASELINE : failures/expected/text.html = TEXT -""") -        self.assertEqual(len(self._exp.get_rebaselining_failures()), 1) - -    def test_remove_expand(self): -        self.assertRemove('mac', -            'BUGX REBASELINE : failures/expected/text.html = TEXT\n', -            'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n' -            'BUGX REBASELINE WIN-XP : failures/expected/text.html = TEXT\n') - -    def test_remove_mac_win(self): -        self.assertRemove('mac', -            'BUGX REBASELINE MAC WIN : failures/expected/text.html = TEXT\n', -            'BUGX REBASELINE WIN : failures/expected/text.html = TEXT\n') - -    def test_remove_mac_mac(self): -        self.assertRemove('mac', -            'BUGX REBASELINE MAC : failures/expected/text.html = TEXT\n', -            '') - -    def test_remove_nothing(self): -        self.assertRemove('mac', -            '\n\n', -            '\n\n') -  class ModifierTests(unittest.TestCase):      def setUp(self): diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py index 1fad772..41f457c 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py @@ -54,7 +54,8 @@ def determine_result_type(failure_list):          return test_expectations.TIMEOUT      elif (FailureMissingResult in failure_types or            FailureMissingImage in failure_types or -          FailureMissingImageHash in failure_types): +          FailureMissingImageHash in failure_types or +          FailureMissingAudio in failure_types):          return test_expectations.MISSING      else:          is_text_failure = FailureTextMismatch in failure_types @@ -62,12 +63,15 @@ def determine_result_type(failure_list):                              FailureImageHashMismatch in failure_types)          is_reftest_failure = (FailureReftestMismatch in failure_types or                                FailureReftestMismatchDidNotOccur in failure_types) +        is_audio_failure = (FailureAudioMismatch in failure_types)          if is_text_failure and is_image_failure:              return test_expectations.IMAGE_PLUS_TEXT          elif is_text_failure:              return test_expectations.TEXT          elif is_image_failure or is_reftest_failure:              return test_expectations.IMAGE +        elif is_audio_failure: +            return test_expectations.AUDIO          else:              raise ValueError("unclassifiable set of failures: "                               + str(failure_types)) @@ -99,152 +103,56 @@ class TestFailure(object):          """Returns the string/JSON representation of a TestFailure."""          return cPickle.dumps(self) -    def result_html_output(self, filename): -        """Returns an HTML string to be included on the results.html page.""" -        raise NotImplementedError -      def should_kill_dump_render_tree(self):          """Returns True if we should kill DumpRenderTree before the next          test."""          return False -    def relative_output_filename(self, filename, modifier): -        """Returns a relative filename inside the output dir that contains -        modifier. - -        For example, if filename is fast\dom\foo.html and modifier is -        "-expected.txt", the return value is fast\dom\foo-expected.txt - -        Args: -          filename: relative filename to test file -          modifier: a string to replace the extension of filename with - -        Return: -          The relative windows path to the output filename -        """ -        # FIXME: technically this breaks if files don't use ".ext" to indicate -        # the extension, but passing in a Filesystem object here is a huge -        # hassle. -        return filename[:filename.rfind('.')] + modifier - - -class ComparisonTestFailure(TestFailure): -    """Base class that produces standard HTML output based on the result of the comparison test. - -    Subclasses may commonly choose to override the ResultHtmlOutput, but still -    use the standard OutputLinks. -    """ - -    # Filename suffixes used by ResultHtmlOutput. -    OUT_FILENAMES = () - -    def output_links(self, filename, out_names): -        """Returns a string holding all applicable output file links. - -        Args: -          filename: the test filename, used to construct the result file names -          out_names: list of filename suffixes for the files. If three or more -              suffixes are in the list, they should be [actual, expected, diff, -              wdiff]. Two suffixes should be [actual, expected], and a -              single item is the [actual] filename suffix. -              If out_names is empty, returns the empty string. -        """ -        # FIXME: Seems like a bad idea to separate the display name data -        # from the path data by hard-coding the display name here -        # and passing in the path information via out_names. -        # -        # FIXME: Also, we don't know for sure that these files exist, -        # and we shouldn't be creating links to files that don't exist -        # (for example, if we don't actually have wdiff output). -        links = [''] -        uris = [self.relative_output_filename(filename, fn) for -                fn in out_names] -        if len(uris) > 1: -            links.append("<a href='%s'>expected</a>" % uris[1]) -        if len(uris) > 0: -            links.append("<a href='%s'>actual</a>" % uris[0]) -        if len(uris) > 2: -            links.append("<a href='%s'>diff</a>" % uris[2]) -        if len(uris) > 3: -            links.append("<a href='%s'>wdiff</a>" % uris[3]) -        if len(uris) > 4: -            links.append("<a href='%s'>pretty diff</a>" % uris[4]) -        return ' '.join(links) - -    def result_html_output(self, filename): -        return self.message() + self.output_links(filename, self.OUT_FILENAMES) -  class FailureTimeout(TestFailure):      """Test timed out.  We also want to restart DumpRenderTree if this      happens.""" - -    def __init__(self, reference_filename=None): -        self.reference_filename = reference_filename +    def __init__(self, is_reftest=False): +        self.is_reftest = is_reftest      @staticmethod      def message():          return "Test timed out" -    def result_html_output(self, filename): -        if self.reference_filename: -            return "<strong>%s</strong> (occured in <a href=%s>expected html</a>)" % ( -                self.message(), self.reference_filename) -        return "<strong>%s</strong>" % self.message() -      def should_kill_dump_render_tree(self):          return True  class FailureCrash(TestFailure):      """DumpRenderTree crashed.""" - -    def __init__(self, reference_filename=None): -        self.reference_filename = reference_filename +    def __init__(self, is_reftest=False): +        self.is_reftest = is_reftest      @staticmethod      def message():          return "DumpRenderTree crashed" -    def result_html_output(self, filename): -        # FIXME: create a link to the minidump file -        stack = self.relative_output_filename(filename, "-stack.txt") -        if self.reference_filename: -            return "<strong>%s</strong> <a href=%s>stack</a> (occured in <a href=%s>expected html</a>)" % ( -                self.message(), stack, self.reference_filename) -        else: -            return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(), stack) -      def should_kill_dump_render_tree(self):          return True -class FailureMissingResult(ComparisonTestFailure): +class FailureMissingResult(TestFailure):      """Expected result was missing.""" -    OUT_FILENAMES = ("-actual.txt",)      @staticmethod      def message():          return "No expected results found" -    def result_html_output(self, filename): -        return ("<strong>%s</strong>" % self.message() + -                self.output_links(filename, self.OUT_FILENAMES)) - -class FailureTextMismatch(ComparisonTestFailure): +class FailureTextMismatch(TestFailure):      """Text diff output failed.""" -    # Filename suffixes used by ResultHtmlOutput. -    # FIXME: Why don't we use the constants from TestTypeBase here? -    OUT_FILENAMES = ("-actual.txt", "-expected.txt", "-diff.txt", -                     "-wdiff.html", "-pretty-diff.html")      @staticmethod      def message():          return "Text diff mismatch" -class FailureMissingImageHash(ComparisonTestFailure): +class FailureMissingImageHash(TestFailure):      """Actual result hash was missing."""      # Chrome doesn't know to display a .checksum file as text, so don't bother      # putting in a link to the actual result. @@ -253,26 +161,17 @@ class FailureMissingImageHash(ComparisonTestFailure):      def message():          return "No expected image hash found" -    def result_html_output(self, filename): -        return "<strong>%s</strong>" % self.message() - -class FailureMissingImage(ComparisonTestFailure): +class FailureMissingImage(TestFailure):      """Actual result image was missing.""" -    OUT_FILENAMES = ("-actual.png",)      @staticmethod      def message():          return "No expected image found" -    def result_html_output(self, filename): -        return ("<strong>%s</strong>" % self.message() + -                self.output_links(filename, self.OUT_FILENAMES)) - -class FailureImageHashMismatch(ComparisonTestFailure): +class FailureImageHashMismatch(TestFailure):      """Image hashes didn't match.""" -    OUT_FILENAMES = ("-actual.png", "-expected.png", "-diff.png")      @staticmethod      def message(): @@ -281,7 +180,7 @@ class FailureImageHashMismatch(ComparisonTestFailure):          return "Image mismatch" -class FailureImageHashIncorrect(ComparisonTestFailure): +class FailureImageHashIncorrect(TestFailure):      """Actual result hash is incorrect."""      # Chrome doesn't know to display a .checksum file as text, so don't bother      # putting in a link to the actual result. @@ -290,45 +189,37 @@ class FailureImageHashIncorrect(ComparisonTestFailure):      def message():          return "Images match, expected image hash incorrect. " -    def result_html_output(self, filename): -        return "<strong>%s</strong>" % self.message() - -class FailureReftestMismatch(ComparisonTestFailure): +class FailureReftestMismatch(TestFailure):      """The result didn't match the reference rendering.""" -    OUT_FILENAMES = ("-expected.html", "-expected.png", "-actual.png", -                     "-diff.png",) -      @staticmethod      def message():          return "Mismatch with reference" -    def output_links(self, filename, out_names): -        links = [''] -        uris = [self.relative_output_filename(filename, output_filename) -                for output_filename in out_names] -        for text, uri in zip(['-expected.html', 'expected', 'actual', 'diff'], uris): -            links.append("<a href='%s'>%s</a>" % (uri, text)) -        return ' '.join(links) - -class FailureReftestMismatchDidNotOccur(ComparisonTestFailure): +class FailureReftestMismatchDidNotOccur(TestFailure):      """Unexpected match between the result and the reference rendering.""" -    OUT_FILENAMES = ("-expected-mismatch.html", "-actual.png",) -      @staticmethod      def message():          return "Mismatch with the reference did not occur" -    def output_links(self, filename, out_names): -        links = [''] -        uris = [self.relative_output_filename(filename, output_filename) -                for output_filename in out_names] -        for text, uri in zip(['-expected-mismatch.html', 'image'], uris): -            links.append("<a href='%s'>%s</a>" % (uri, text)) -        return ' '.join(links) + +class FailureMissingAudio(TestFailure): +    """Actual result image was missing.""" + +    @staticmethod +    def message(): +        return "No expected audio found" + + +class FailureAudioMismatch(TestFailure): +    """Audio files didn't match.""" + +    @staticmethod +    def message(): +        return "Audio mismatch"  # Convenient collection of all failure classes for anything that might diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py index c5aa2d6..9b0576e 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_failures_unittest.py @@ -34,9 +34,6 @@ from webkitpy.layout_tests.layout_package.test_failures import *  class Test(unittest.TestCase): -    def assertResultHtml(self, failure_obj): -        self.assertNotEqual(failure_obj.result_html_output('foo'), None) -      def assert_loads(self, cls):          failure_obj = cls()          s = failure_obj.dumps() @@ -49,22 +46,22 @@ class Test(unittest.TestCase):          self.assertFalse(failure_obj != new_failure_obj)      def test_crash(self): -        self.assertResultHtml(FailureCrash()) +        FailureCrash()      def test_hash_incorrect(self): -        self.assertResultHtml(FailureImageHashIncorrect()) +        FailureImageHashIncorrect()      def test_missing(self): -        self.assertResultHtml(FailureMissingResult()) +        FailureMissingResult()      def test_missing_image(self): -        self.assertResultHtml(FailureMissingImage()) +        FailureMissingImage()      def test_missing_image_hash(self): -        self.assertResultHtml(FailureMissingImageHash()) +        FailureMissingImageHash()      def test_timeout(self): -        self.assertResultHtml(FailureTimeout()) +        FailureTimeout()      def test_unknown_failure_type(self):          class UnknownFailure(TestFailure): @@ -73,8 +70,6 @@ class Test(unittest.TestCase):          failure_obj = UnknownFailure()          self.assertRaises(ValueError, determine_result_type, [failure_obj])          self.assertRaises(NotImplementedError, failure_obj.message) -        self.assertRaises(NotImplementedError, failure_obj.result_html_output, -                          "foo.txt")      def test_loads(self):          for c in ALL_FAILURE_CLASSES: @@ -89,12 +84,5 @@ class Test(unittest.TestCase):          crash_set = set([FailureCrash(), "FailureCrash"])          self.assertEqual(len(crash_set), 2) -    def test_relative_output_filename(self): -        # This could be any Failure* object, since we're testing a method -        # on the base class. -        failure_obj = FailureTextMismatch() -        actual_filename = failure_obj.relative_output_filename("fast/html/article-element.html", "-actual.txt") -        self.assertEquals(actual_filename, "fast/html/article-element-actual.txt") -  if __name__ == '__main__':      unittest.main() diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_result_writer.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_result_writer.py index e209503..07e6389 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_result_writer.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_result_writer.py @@ -36,12 +36,16 @@ from webkitpy.layout_tests.layout_package import test_failures  _log = logging.getLogger(__name__) -def write_test_result(port, root_output_dir, filename, driver_output, +def write_test_result(port, filename, driver_output,                        expected_driver_output, failures):      """Write the test result to the result output directory.""" +    root_output_dir = port.results_directory()      checksums_mismatch_but_images_are_same = False      imagehash_mismatch_failure = None      writer = TestResultWriter(port, root_output_dir, filename) +    if driver_output.error: +        writer.write_stderr(driver_output.error) +      for failure in failures:          # FIXME: Instead of this long 'if' block, each failure class might          # have a responsibility for writing a test result. @@ -63,8 +67,11 @@ def write_test_result(port, root_output_dir, filename, driver_output,              if not images_are_different:                  checksums_mismatch_but_images_are_same = True                  imagehash_mismatch_failure = failure +        elif isinstance(failure, (test_failures.FailureAudioMismatch, +                                  test_failures.FailureMissingAudio)): +            writer.write_audio_files(driver_output.audio, expected_driver_output.audio)          elif isinstance(failure, test_failures.FailureCrash): -            if failure.reference_filename: +            if failure.is_reftest:                  writer.write_crash_report(expected_driver_output.error)              else:                  writer.write_crash_report(driver_output.error) @@ -150,6 +157,12 @@ class TestResultWriter(object):          if expected is not None:              fs.write_binary_file(expected_filename, expected) +    def write_stderr(self, error): +        fs = self._port._filesystem +        filename = self.output_filename("-stderr.txt") +        fs.maybe_make_directory(fs.dirname(filename)) +        fs.write_text_file(filename, error) +      def write_crash_report(self, error):          """Write crash information."""          fs = self._port._filesystem @@ -187,6 +200,9 @@ class TestResultWriter(object):          pretty_patch_filename = self.output_filename(self.FILENAME_SUFFIX_PRETTY_PATCH)          fs.write_binary_file(pretty_patch_filename, pretty_patch) +    def write_audio_files(self, actual_audio, expected_audio): +        self.write_output_files('.wav', actual_audio, expected_audio) +      def write_image_files(self, actual_image, expected_image):          self.write_output_files('.png', actual_image, expected_image) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py index 569dd51..8e534b1 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner.py @@ -46,10 +46,8 @@ import random  import sys  import time -from webkitpy.layout_tests.layout_package import dump_render_tree_thread  from webkitpy.layout_tests.layout_package import json_layout_results_generator  from webkitpy.layout_tests.layout_package import json_results_generator -from webkitpy.layout_tests.layout_package import message_broker  from webkitpy.layout_tests.layout_package import printing  from webkitpy.layout_tests.layout_package import test_expectations  from webkitpy.layout_tests.layout_package import test_failures @@ -145,6 +143,29 @@ def summarize_results(port_obj, expectations, result_summary, retry_summary, tes          tests[test] = {}          tests[test]['expected'] = expected          tests[test]['actual'] = " ".join(actual) +        # FIXME: Set this correctly once https://webkit.org/b/37739 is fixed +        # and only set it if there actually is stderr data. +        tests[test]['has_stderr'] = False + +        failure_types = [type(f) for f in result.failures] +        if test_failures.FailureMissingAudio in failure_types: +            tests[test]['is_missing_audio'] = True + +        if test_failures.FailureReftestMismatch in failure_types: +            tests[test]['is_reftest'] = True + +        for f in result.failures: +            if 'is_reftest' in result.failures: +                tests[test]['is_reftest'] = True + +        if test_failures.FailureReftestMismatchDidNotOccur in failure_types: +            tests[test]['is_mismatch_reftest'] = True + +        if test_failures.FailureMissingResult in failure_types: +            tests[test]['is_missing_text'] = True + +        if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types: +            tests[test]['is_missing_image'] = True          if filename in test_timings_map:              time_seconds = test_timings_map[filename] @@ -154,6 +175,12 @@ def summarize_results(port_obj, expectations, result_summary, retry_summary, tes      results['num_passes'] = num_passes      results['num_flaky'] = num_flaky      results['num_regressions'] = num_regressions +    # FIXME: If non-chromium ports start using an expectations file, +    # we should make this check more robust. +    results['uses_expectations_file'] = port_obj.name().find('chromium') != -1 +    results['layout_tests_dir'] = port_obj.layout_tests_dir() +    results['has_wdiff'] = port_obj.wdiff_available() +    results['has_pretty_patch'] = port_obj.pretty_patch_available()      return results @@ -205,6 +232,7 @@ class TestRunner:          self._test_files_list = None          self._result_queue = Queue.Queue()          self._retrying = False +        self._results_directory = self._port.results_directory()      def collect_tests(self, args, last_unexpected_results):          """Find all the files to test. @@ -355,8 +383,7 @@ class TestRunner:                  self._printer.print_expected(extra_msg)                  tests_run_msg += "\n" + extra_msg                  files.extend(test_files[0:extra]) -            tests_run_filename = self._fs.join(self._options.results_directory, -                                              "tests_run.txt") +            tests_run_filename = self._fs.join(self._results_directory, "tests_run.txt")              self._fs.write_text_file(tests_run_filename, tests_run_msg)              len_skip_chunk = int(len(files) * len(skipped) / @@ -513,8 +540,16 @@ class TestRunner:                  return True          return False -    def _num_workers(self): -        return int(self._options.child_processes) +    def _num_workers(self, num_shards): +        num_workers = min(int(self._options.child_processes), num_shards) +        driver_name = self._port.driver_name() +        if num_workers == 1: +            self._printer.print_config("Running 1 %s over %s" % +                (driver_name, grammar.pluralize('shard', num_shards))) +        else: +            self._printer.print_config("Running %d %ss in parallel over %d shards" % +                (num_workers, driver_name, num_shards)) +        return num_workers      def _run_tests(self, file_list, result_summary):          """Runs the tests in the file_list. @@ -532,54 +567,7 @@ class TestRunner:                in the form {filename:filename, test_run_time:test_run_time}              result_summary: summary object to populate with the results          """ - -        self._printer.print_update('Sharding tests ...') -        num_workers = self._num_workers() -        test_lists = self._shard_tests(file_list, -            num_workers > 1 and not self._options.experimental_fully_parallel) -        filename_queue = Queue.Queue() -        for item in test_lists: -            filename_queue.put(item) - -        self._printer.print_update('Starting %s ...' % -                                   grammar.pluralize('worker', num_workers)) -        self._message_broker = message_broker.get(self._port, self._options) -        broker = self._message_broker -        self._current_filename_queue = filename_queue -        self._current_result_summary = result_summary - -        if not self._options.dry_run: -            threads = broker.start_workers(self) -        else: -            threads = {} - -        self._printer.print_update("Starting testing ...") -        keyboard_interrupted = False -        interrupted = False -        if not self._options.dry_run: -            try: -                broker.run_message_loop() -            except KeyboardInterrupt: -                _log.info("Interrupted, exiting") -                broker.cancel_workers() -                keyboard_interrupted = True -                interrupted = True -            except TestRunInterruptedException, e: -                _log.info(e.reason) -                broker.cancel_workers() -                interrupted = True -            except: -                # Unexpected exception; don't try to clean up workers. -                _log.info("Exception raised, exiting") -                raise - -        thread_timings, test_timings, individual_test_timings = \ -            self._collect_timing_info(threads) - -        broker.cleanup() -        self._message_broker = None -        return (interrupted, keyboard_interrupted, thread_timings, test_timings, -                individual_test_timings) +        raise NotImplementedError()      def update(self):          self.update_summary(self._current_result_summary) @@ -629,7 +617,7 @@ class TestRunner:              self._clobber_old_results()          # Create the output directory if it doesn't already exist. -        self._port.maybe_make_directory(self._options.results_directory) +        self._port.maybe_make_directory(self._results_directory)          self._port.setup_test_run() @@ -711,9 +699,9 @@ class TestRunner:          # Write the summary to disk (results.html) and display it if requested.          if not self._options.dry_run: -            wrote_results = self._write_results_html_file(result_summary) -            if self._options.show_results and wrote_results: -                self._show_results_html_file() +            self._copy_results_html_file() +            if self._options.show_results: +                self._show_results_html_file(result_summary)          # Now that we've completed all the processing we can, we re-raise          # a KeyboardInterrupt if necessary so the caller can handle it. @@ -773,13 +761,12 @@ class TestRunner:          # files in the results directory are explicitly used for cross-run          # tracking.          self._printer.print_update("Clobbering old results in %s" % -                                   self._options.results_directory) +                                   self._results_directory)          layout_tests_dir = self._port.layout_tests_dir()          possible_dirs = self._port.test_dirs()          for dirname in possible_dirs:              if self._fs.isdir(self._fs.join(layout_tests_dir, dirname)): -                self._fs.rmtree(self._fs.join(self._options.results_directory, -                                              dirname)) +                self._fs.rmtree(self._fs.join(self._results_directory, dirname))      def _get_failures(self, result_summary, include_crashes):          """Filters a dict of results and returns only the failures. @@ -829,17 +816,17 @@ class TestRunner:            individual_test_timings: list of test times (used by the flakiness              dashboard).          """ -        _log.debug("Writing JSON files in %s." % self._options.results_directory) +        _log.debug("Writing JSON files in %s." % self._results_directory) -        unexpected_json_path = self._fs.join(self._options.results_directory, "unexpected_results.json") +        unexpected_json_path = self._fs.join(self._results_directory, "unexpected_results.json")          json_results_generator.write_json(self._fs, unexpected_results, unexpected_json_path) -        full_results_path = self._fs.join(self._options.results_directory, "full_results.json") +        full_results_path = self._fs.join(self._results_directory, "full_results.json")          json_results_generator.write_json(self._fs, summarized_results, full_results_path)          # Write a json file of the test_expectations.txt file for the layout          # tests dashboard. -        expectations_path = self._fs.join(self._options.results_directory, "expectations.json") +        expectations_path = self._fs.join(self._results_directory, "expectations.json")          expectations_json = \              self._expectations.get_expectations_json_for_all_platforms()          self._fs.write_text_file(expectations_path, @@ -847,7 +834,7 @@ class TestRunner:          generator = json_layout_results_generator.JSONLayoutResultsGenerator(              self._port, self._options.builder_name, self._options.build_name, -            self._options.build_number, self._options.results_directory, +            self._options.build_number, self._results_directory,              BUILDER_BASE_URL, individual_test_timings,              self._expectations, result_summary, self._test_files_list,              self._options.test_results_server, @@ -865,8 +852,7 @@ class TestRunner:          p = self._printer          p.print_config("Using port '%s'" % self._port.name())          p.print_config("Test configuration: %s" % self._port.test_configuration()) -        p.print_config("Placing test results in %s" % -                       self._options.results_directory) +        p.print_config("Placing test results in %s" % self._results_directory)          if self._options.new_baseline:              p.print_config("Placing new baselines in %s" %                             self._port.baseline_path()) @@ -880,12 +866,6 @@ class TestRunner:                         (self._options.time_out_ms,                          self._options.slow_time_out_ms)) -        if self._num_workers() == 1: -            p.print_config("Running one %s" % self._port.driver_name()) -        else: -            p.print_config("Running %s %ss in parallel" % -                           (self._options.child_processes, -                            self._port.driver_name()))          p.print_config('Command line: ' +                         ' '.join(self._port.driver_cmd_line()))          p.print_config("Worker model: %s" % self._options.worker_model) @@ -1136,67 +1116,25 @@ class TestRunner:                  self._printer.print_actual("  %5d %-24s (%4.1f%%)" %                      (len(results), desc[len(results) != 1], pct)) -    def _results_html(self, test_files, failures, title="Test Failures", override_time=None): -        """ -        test_files = a list of file paths -        failures = dictionary mapping test paths to failure objects -        title = title printed at top of test -        override_time = current time (used by unit tests) -        """ -        page = """<html> -  <head> -    <title>Layout Test Results (%(time)s)</title> -  </head> -  <body> -    <h2>%(title)s (%(time)s)</h2> -        """ % {'title': title, 'time': override_time or time.asctime()} - -        for test_file in sorted(test_files): -            test_name = self._port.relative_test_filename(test_file) -            test_url = self._port.filename_to_uri(test_file) -            page += u"<p><a href='%s'>%s</a><br />\n" % (test_url, test_name) -            test_failures = failures.get(test_file, []) -            for failure in test_failures: -                page += (u"  %s<br/>" % -                         failure.result_html_output(test_name)) -            page += "</p>\n" -        page += "</body></html>\n" -        return page - -    def _write_results_html_file(self, result_summary): -        """Write results.html which is a summary of tests that failed. - -        Args: -          result_summary: a summary of the results :) +    def _copy_results_html_file(self): +        base_dir = self._port.path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'layout_tests', 'layout_package') +        results_file = self._fs.join(base_dir, 'json_results.html') +        # FIXME: What should we do if this doesn't exist (e.g., in unit tests)? +        if self._fs.exists(results_file): +            self._fs.copyfile(results_file, self._fs.join(self._results_directory, "results.html")) -        Returns: -          True if any results were written (since expected failures may be -          omitted) -        """ -        # test failures +    def _show_results_html_file(self, result_summary): +        """Shows the results.html page."""          if self._options.full_results_html: -            results_title = "Test Failures"              test_files = result_summary.failures.keys()          else: -            results_title = "Unexpected Test Failures" -            unexpected_failures = self._get_failures(result_summary, -                include_crashes=True) +            unexpected_failures = self._get_failures(result_summary, include_crashes=True)              test_files = unexpected_failures.keys() -        if not len(test_files): -            return False - -        out_filename = self._fs.join(self._options.results_directory, -                                     "results.html") -        with self._fs.open_text_file_for_writing(out_filename) as results_file: -            html = self._results_html(test_files, result_summary.failures, results_title) -            results_file.write(html) -        return True +        if not len(test_files): +            return -    def _show_results_html_file(self): -        """Shows the results.html page.""" -        results_filename = self._fs.join(self._options.results_directory, -                                         "results.html") +        results_filename = self._fs.join(self._results_directory, "results.html")          self._port.show_results_html_file(results_filename) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py index 5a6344c..8c19bfe 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner2.py @@ -117,15 +117,15 @@ class TestRunner2(test_runner.TestRunner):          self._group_stats = {}          self._worker_states = {} -        num_workers = self._num_workers()          keyboard_interrupted = False          interrupted = False          thread_timings = []          self._printer.print_update('Sharding tests ...')          test_lists = self._shard_tests(file_list, -            num_workers > 1 and not self._options.experimental_fully_parallel) -        _log.debug("Using %d shards" % len(test_lists)) +            (int(self._options.child_processes) > 1) and not self._options.experimental_fully_parallel) + +        num_workers = self._num_workers(len(test_lists))          manager_connection = manager_worker_broker.get(self._port, self._options,                                                         self, worker.Worker) diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py index 97f8630..82564d2 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/test_runner_unittest.py @@ -44,27 +44,6 @@ class TestRunnerWrapper(test_runner.TestRunner):  class TestRunnerTest(unittest.TestCase): -    def test_results_html(self): -        mock_port = Mock() -        mock_port._filesystem = filesystem_mock.MockFileSystem() -        mock_port.relative_test_filename = lambda name: name -        mock_port.filename_to_uri = lambda name: name - -        runner = test_runner.TestRunner(port=mock_port, options=Mock(), -            printer=Mock()) -        expected_html = u"""<html> -  <head> -    <title>Layout Test Results (time)</title> -  </head> -  <body> -    <h2>Title (time)</h2> -        <p><a href='test_path'>test_path</a><br /> -</p> -</body></html> -""" -        html = runner._results_html(["test_path"], {}, "Title", override_time="time") -        self.assertEqual(html, expected_html) -      def test_shard_tests(self):          # Test that _shard_tests in test_runner.TestRunner really          # put the http tests first in the queue. diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/worker_mixin.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/worker_mixin.py index 7876f91..78d7cdb 100644 --- a/Tools/Scripts/webkitpy/layout_tests/layout_package/worker_mixin.py +++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/worker_mixin.py @@ -55,7 +55,7 @@ class WorkerMixin(object):          self._batch_count = 0          self._batch_size = self._options.batch_size          self._driver = None -        tests_run_filename = self._filesystem.join(self._options.results_directory, +        tests_run_filename = self._filesystem.join(port.results_directory(),                                                     "tests_run%d.txt" % self._worker_number)          self._tests_run_file = self._filesystem.open_text_file_for_writing(tests_run_filename) @@ -159,18 +159,18 @@ class WorkerMixin(object):            A TestResult          """          worker = self -        result = None          driver = worker._port.create_driver(worker._worker_number)          driver.start()          class SingleTestThread(threading.Thread):              def run(self): -                result = worker._run_single_test(driver, test_input) +                self.result = worker._run_single_test(driver, test_input)          thread = SingleTestThread()          thread.start()          thread.join(thread_timeout_sec) +        result = getattr(thread, 'result', None)          if thread.isAlive():              # If join() returned with the thread still running, the              # DumpRenderTree is completely hung and there's nothing diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py index dea126f..c2e565e 100644..100755 --- a/Tools/Scripts/webkitpy/layout_tests/port/base.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py @@ -90,18 +90,24 @@ class Port(object):                   config=None,                   **kwargs):          self._name = port_name + +        # These are default values that should be overridden in a subclasses. +        # FIXME: These should really be passed in. +        self._operating_system = 'mac' +        self._version = ''          self._architecture = 'x86' -        self._options = options -        if self._options is None: -            # FIXME: Ideally we'd have a package-wide way to get a -            # well-formed options object that had all of the necessary -            # options defined on it. -            self._options = DummyOptions() +        self._graphics_type = 'cpu' + +        # FIXME: Ideally we'd have a package-wide way to get a +        # well-formed options object that had all of the necessary +        # options defined on it. +        self._options = options or DummyOptions() +          self._executive = executive or Executive()          self._user = user or User()          self._filesystem = filesystem or system.filesystem.FileSystem() -        self._config = config or port_config.Config(self._executive, -                                                    self._filesystem) +        self._config = config or port_config.Config(self._executive, self._filesystem) +          self._helper = None          self._http_server = None          self._webkit_base_dir = None @@ -123,16 +129,22 @@ class Port(object):          # http://bugs.python.org/issue3210          self._wdiff_available = True +        # FIXME: prettypatch.py knows this path, why is it copied here?          self._pretty_patch_path = self.path_from_webkit_base("Websites",              "bugs.webkit.org", "PrettyPatch", "prettify.rb") -        # If we're running on a mocked-out filesystem, this file almost -        # certainly won't be available, so it's a good test to keep us -        # from erroring out later. -        self._pretty_patch_available = self._filesystem.exists(self._pretty_patch_path) +        self._pretty_patch_available = None +          if not hasattr(self._options, 'configuration') or self._options.configuration is None:              self._options.configuration = self.default_configuration()          self._test_configuration = None          self._multiprocessing_is_available = (multiprocessing is not None) +        self._results_directory = None + +    def wdiff_available(self): +        return bool(self._wdiff_available) + +    def pretty_patch_available(self): +        return bool(self._pretty_patch_available)      def default_child_processes(self):          """Return the number of DumpRenderTree instances to use for this @@ -171,23 +183,22 @@ class Port(object):          """This routine is used to check whether image_diff binary exists."""          raise NotImplementedError('Port.check_image_diff') -    def check_pretty_patch(self): +    def check_pretty_patch(self, logging=True):          """Checks whether we can use the PrettyPatch ruby script.""" -          # check if Ruby is installed          try:              result = self._executive.run_command(['ruby', '--version'])          except OSError, e:              if e.errno in [errno.ENOENT, errno.EACCES, errno.ECHILD]: -                _log.error("Ruby is not installed; " -                           "can't generate pretty patches.") -                _log.error('') +                if logging: +                    _log.error("Ruby is not installed; can't generate pretty patches.") +                    _log.error('')                  return False          if not self.path_exists(self._pretty_patch_path): -            _log.error('Unable to find %s .' % self._pretty_patch_path) -            _log.error("Can't generate pretty patches.") -            _log.error('') +            if logging: +                _log.error("Unable to find %s; can't generate pretty patches." % self._pretty_patch_path) +                _log.error('')              return False          return True @@ -200,6 +211,10 @@ class Port(object):          interface so that it can be overriden for testing purposes."""          return expected_text != actual_text +    def compare_audio(self, expected_audio, actual_audio): +        """Return whether the two audio files are *not* equal.""" +        return expected_audio != actual_audio +      def diff_image(self, expected_contents, actual_contents,                     diff_filename=None, tolerance=0):          """Compare two images and produce a delta image file. @@ -276,7 +291,7 @@ class Port(object):          baseline_filename = testname + '-expected' + suffix -        baseline_search_path = self.baseline_search_path() +        baseline_search_path = self.get_option('additional_platform_directory', []) + self.baseline_search_path()          baselines = []          for platform_dir in baseline_search_path: @@ -345,15 +360,22 @@ class Port(object):              return None          return self._filesystem.read_binary_file(path) +    def expected_audio(self, test): +        path = self.expected_filename(test, '.wav') +        if not self.path_exists(path): +            return None +        return self._filesystem.read_binary_file(path) +      def expected_text(self, test): -        """Returns the text output we expect the test to produce. +        """Returns the text output we expect the test to produce, or None +        if we don't expect there to be any text output.          End-of-line characters are normalized to '\n'."""          # FIXME: DRT output is actually utf-8, but since we don't decode the          # output from DRT (instead treating it as a binary string), we read the          # baselines as a binary string, too.          path = self.expected_filename(test, '.txt')          if not self.path_exists(path): -            return '' +            return None          text = self._filesystem.read_binary_file(path)          return text.replace("\r\n", "\n") @@ -481,16 +503,27 @@ class Port(object):          self._filesystem.maybe_make_directory(*path)      def name(self): -        """Return the name of the port (e.g., 'mac', 'chromium-win-xp'). - -        Note that this is different from the test_platform_name(), which -        may be different (e.g., 'win-xp' instead of 'chromium-win-xp'.""" +        """Return the name of the port (e.g., 'mac', 'chromium-win-xp')."""          return self._name +    def operating_system(self): +        return self._operating_system + +    def version(self): +        """Returns a string indicating the version of a given platform, e.g. +        'leopard' or 'xp'. + +        This is used to help identify the exact port when parsing test +        expectations, determining search paths, and logging information.""" +        return self._version +      def graphics_type(self):          """Returns whether the port uses accelerated graphics ('gpu') or not          ('cpu').""" -        return 'cpu' +        return self._graphics_type + +    def architecture(self): +        return self._architecture      def real_name(self):          """Returns the actual name of the port, not the delegate's.""" @@ -541,8 +574,15 @@ class Port(object):          return self._filesystem.normpath(self._filesystem.join(self.layout_tests_dir(), test_name))      def results_directory(self): -        """Absolute path to the place to store the test results.""" -        raise NotImplementedError('Port.results_directory') +        """Absolute path to the place to store the test results (uses --results-directory).""" +        if not self._results_directory: +            option_val = self.get_option('results_directory') or self.default_results_directory() +            self._results_directory = self._filesystem.abspath(option_val) +        return self._results_directory + +    def default_results_directory(self): +        """Absolute path to the default place to store the test results.""" +        raise NotImplementedError()      def setup_test_run(self):          """Perform port-specific work at the beginning of a test run.""" @@ -578,18 +618,16 @@ class Port(object):          is already running."""          if self.get_option('use_apache'):              self._http_server = apache_http_server.LayoutTestApacheHttpd(self, -                self.get_option('results_directory')) +                self.results_directory())          else: -            self._http_server = http_server.Lighttpd(self, -                self.get_option('results_directory')) +            self._http_server = http_server.Lighttpd(self, self.results_directory())          self._http_server.start()      def start_websocket_server(self):          """Start a websocket server if it is available. Do nothing if          it isn't. This routine is allowed to (and may) fail if a server          is already running.""" -        self._websocket_server = websocket_server.PyWebSocket(self, -            self.get_option('results_directory')) +        self._websocket_server = websocket_server.PyWebSocket(self, self.results_directory())          self._websocket_server.start()      def acquire_http_lock(self): @@ -631,6 +669,14 @@ class Port(object):      def all_test_configurations(self):          return self.test_configuration().all_test_configurations() +    def all_baseline_variants(self): +        """Returns a list of platform names sufficient to cover all the baselines. + +        The list should be sorted so that a later platform  will reuse +        an earlier platform's baselines if they are the same (e.g., +        'snowleopard' should precede 'leopard').""" +        raise NotImplementedError +      def test_expectations(self):          """Returns the test expectations for this port. @@ -647,39 +693,6 @@ class Port(object):          sync up the two repos."""          return None -    def test_platform_name(self): -        """Returns the string that corresponds to the given platform name -        in the test expectations. This may be the same as name(), or it -        may be different. For example, chromium returns 'mac' for -        'chromium-mac'.""" -        raise NotImplementedError('Port.test_platform_name') - -    def test_platforms(self): -        """Returns the list of test platform identifiers as used in the -        test_expectations and on dashboards, the rebaselining tool, etc. - -        Note that this is not necessarily the same as the list of ports, -        which must be globally unique (e.g., both 'chromium-mac' and 'mac' -        might return 'mac' as a test_platform name'.""" -        raise NotImplementedError('Port.platforms') - -    def test_platform_name_to_name(self, test_platform_name): -        """Returns the Port platform name that corresponds to the name as -        referenced in the expectations file. E.g., "mac" returns -        "chromium-mac" on the Chromium ports.""" -        raise NotImplementedError('Port.test_platform_name_to_name') - -    def architecture(self): -        return self._architecture - -    def version(self): -        """Returns a string indicating the version of a given platform, e.g. -        'leopard' or 'xp'. - -        This is used to help identify the exact port when parsing test -        expectations, determining search paths, and logging information.""" -        raise NotImplementedError('Port.version') -      def test_repository_paths(self):          """Returns a list of (repository_name, repository_path) tuples          of its depending code base.  By default it returns a list that only @@ -748,6 +761,8 @@ class Port(object):      _pretty_patch_error_html = "Failed to run PrettyPatch, see error log."      def pretty_patch_text(self, diff_path): +        if self._pretty_patch_available is None: +            self._pretty_patch_available = self.check_pretty_patch(logging=False)          if not self._pretty_patch_available:              return self._pretty_patch_error_html          command = ("ruby", "-I", self._filesystem.dirname(self._pretty_patch_path), @@ -875,22 +890,24 @@ class DriverInput(object):  class DriverOutput(object):      """Groups information about a output from driver for easy passing of data.""" -    def __init__(self, text, image, image_hash, -                 crash=False, test_time=None, timeout=False, error=''): +    def __init__(self, text, image, image_hash, audio, +                 crash=False, test_time=0, timeout=False, error=''):          """Initializes a TestOutput object.          Args:            text: a text output            image: an image output            image_hash: a string containing the checksum of the image +          audio: contents of an audio stream, if any (in WAV format)            crash: a boolean indicating whether the driver crashed on the test -          test_time: a time which the test has taken +          test_time: the time the test took to execute            timeout: a boolean indicating whehter the test timed out            error: any unexpected or additional (or error) text output          """          self.text = text          self.image = image          self.image_hash = image_hash +        self.audio = audio          self.crash = crash          self.test_time = test_time          self.timeout = timeout @@ -956,15 +973,8 @@ class Driver:  class TestConfiguration(object):      def __init__(self, port=None, os=None, version=None, architecture=None,                   build_type=None, graphics_type=None): - -        # FIXME: We can get the O/S and version from test_platform_name() -        # and version() for now, but those should go away and be cleaned up -        # with more generic methods like operation_system() and os_version() -        # or something. -        if port: -            port_version = port.version() -        self.os = os or port.test_platform_name().replace('-' + port_version, '') -        self.version = version or port_version +        self.os = os or port.operating_system() +        self.version = version or port.version()          self.architecture = architecture or port.architecture()          self.build_type = build_type or port._options.configuration.lower()          self.graphics_type = graphics_type or port.graphics_type() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py index ef90484..b4758fc 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py @@ -33,7 +33,7 @@ import unittest  from webkitpy.common.system.executive import Executive, ScriptError  from webkitpy.common.system import executive_mock -from webkitpy.common.system import filesystem +from webkitpy.common.system.filesystem_mock import MockFileSystem  from webkitpy.common.system import outputcapture  from webkitpy.common.system.path import abspath_to_uri  from webkitpy.thirdparty.mock import Mock @@ -86,8 +86,8 @@ class PortTest(unittest.TestCase):      def test_pretty_patch_script_error(self):          # FIXME: This is some ugly white-box test hacking ... -        base._pretty_patch_available = True          port = base.Port(executive=executive_mock.MockExecutive2(exception=ScriptError)) +        port._pretty_patch_available = True          self.assertEqual(port.pretty_patch_text("patch.txt"),                           port._pretty_patch_error_html) @@ -232,6 +232,33 @@ class PortTest(unittest.TestCase):          port = base.Port(port_name='foo')          self.assertEqual(port.name(), 'foo') +    def test_additional_platform_directory(self): +        filesystem = MockFileSystem() +        options, args = optparse.OptionParser().parse_args([]) +        port = base.Port(port_name='foo', filesystem=filesystem, options=options) +        port.baseline_search_path = lambda: [] +        layout_test_dir = port.layout_tests_dir() +        test_file = filesystem.join(layout_test_dir, 'fast', 'test.html') + +        # No additional platform directory +        self.assertEqual( +            port.expected_baselines(test_file, '.txt'), +            [(None, 'fast/test-expected.txt')]) + +        # Simple additional platform directory +        options.additional_platform_directory = ['/tmp/local-baselines'] +        filesystem.files = { +            '/tmp/local-baselines/fast/test-expected.txt': 'foo', +        } +        self.assertEqual( +            port.expected_baselines(test_file, '.txt'), +            [('/tmp/local-baselines', 'fast/test-expected.txt')]) + +        # Multiple additional platform directories +        options.additional_platform_directory = ['/foo', '/tmp/local-baselines'] +        self.assertEqual( +            port.expected_baselines(test_file, '.txt'), +            [('/tmp/local-baselines', 'fast/test-expected.txt')])  class VirtualTest(unittest.TestCase):      """Tests that various methods expected to be virtual are.""" @@ -247,13 +274,8 @@ class VirtualTest(unittest.TestCase):          self.assertVirtual(port.create_driver, 0)          self.assertVirtual(port.diff_image, None, None)          self.assertVirtual(port.path_to_test_expectations_file) -        self.assertVirtual(port.test_platform_name) -        self.assertVirtual(port.results_directory) +        self.assertVirtual(port.default_results_directory)          self.assertVirtual(port.test_expectations) -        self.assertVirtual(port.test_platform_name) -        self.assertVirtual(port.test_platforms) -        self.assertVirtual(port.test_platform_name_to_name, None) -        self.assertVirtual(port.version)          self.assertVirtual(port._path_to_apache)          self.assertVirtual(port._path_to_apache_config_file)          self.assertVirtual(port._path_to_driver) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py index baf1893..a4279b4 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py @@ -51,6 +51,12 @@ _log = logging.getLogger("webkitpy.layout_tests.port.chromium")  # FIXME: This function doesn't belong in this package.  class ChromiumPort(base.Port):      """Abstract base class for Chromium implementations of the Port class.""" +    ALL_BASELINE_VARIANTS = [ +        'chromium-mac-snowleopard', 'chromium-mac-leopard', +        'chromium-win-win7', 'chromium-win-vista', 'chromium-win-xp', +        'chromium-linux-x86', 'chromium-linux-x86_64', +        'chromium-gpu-mac-snowleopard', 'chromium-gpu-win-win7', 'chromium-gpu-linux-x86_64', +    ]      def __init__(self, **kwargs):          base.Port.__init__(self, **kwargs) @@ -102,7 +108,7 @@ class ChromiumPort(base.Port):          # It's okay if pretty patch isn't available, but we will at          # least log a message. -        self.check_pretty_patch() +        self._pretty_patch_available = self.check_pretty_patch()          return result @@ -130,7 +136,11 @@ class ChromiumPort(base.Port):      def diff_image(self, expected_contents, actual_contents,                     diff_filename=None): -        executable = self._path_to_image_diff() +        # FIXME: need unit tests for this. +        if not actual_contents and not expected_contents: +            return False +        if not actual_contents or not expected_contents: +            return True          tempdir = self._filesystem.mkdtemp()          expected_filename = self._filesystem.join(str(tempdir), "expected.png") @@ -138,6 +148,7 @@ class ChromiumPort(base.Port):          actual_filename = self._filesystem.join(str(tempdir), "actual.png")          self._filesystem.write_binary_file(actual_filename, actual_contents) +        executable = self._path_to_image_diff()          if diff_filename:              cmd = [executable, '--diff', expected_filename,                     actual_filename, diff_filename] @@ -189,14 +200,14 @@ class ChromiumPort(base.Port):          return self.path_from_webkit_base('LayoutTests', 'platform',              'chromium', 'test_expectations.txt') -    def results_directory(self): +    def default_results_directory(self):          try:              return self.path_from_chromium_base('webkit',                  self.get_option('configuration'), -                self.get_option('results_directory')) +                'layout-test-results')          except AssertionError:              return self._build_path(self.get_option('configuration'), -                                    self.get_option('results_directory')) +                                    'layout-test-results')      def setup_test_run(self):          # Delete the disk cache if any to ensure a clean test run. @@ -230,6 +241,9 @@ class ChromiumPort(base.Port):              # http://bugs.python.org/issue1731717              self._helper.wait() +    def all_baseline_variants(self): +        return self.ALL_BASELINE_VARIANTS +      def test_expectations(self):          """Returns the test expectations for this port. @@ -251,7 +265,6 @@ class ChromiumPort(base.Port):      def skipped_layout_tests(self, extra_test_files=None):          expectations_str = self.test_expectations()          overrides_str = self.test_expectations_overrides() -        test_platform_name = self.test_platform_name()          is_debug_mode = False          all_test_files = self.tests([]) @@ -265,15 +278,6 @@ class ChromiumPort(base.Port):          return [self.relative_test_filename(test)                  for test in expectations.get_tests_with_result_type(test_expectations.SKIP)] -    def test_platform_names(self): -        return ('mac', 'win', 'linux', 'win-xp', 'win-vista', 'win-7') - -    def test_platform_name_to_name(self, test_platform_name): -        if test_platform_name in self.test_platform_names(): -            return 'chromium-' + test_platform_name -        raise ValueError('Unsupported test_platform_name: %s' % -                         test_platform_name) -      def test_repository_paths(self):          # Note: for JSON file's backward-compatibility we use 'chrome' rather          # than 'chromium' here. @@ -335,9 +339,9 @@ class ChromiumDriver(base.Driver):          self._port = port          self._worker_number = worker_number          self._image_path = None +        self.KILL_TIMEOUT = 3.0          if self._port.get_option('pixel_tests'): -            self._image_path = self._port._filesystem.join( -                self._port.get_option('results_directory'), +            self._image_path = self._port._filesystem.join(self._port.results_directory(),                  'png_result%s.png' % self._worker_number)      def cmd_line(self): @@ -371,6 +375,8 @@ class ChromiumDriver(base.Driver):              cmd.append('--enable-accelerated-2d-canvas')          if self._port.get_option('enable_hardware_gpu'):              cmd.append('--enable-hardware-gpu') + +        cmd.extend(self._port.get_option('additional_drt_flag', []))          return cmd      def start(self): @@ -426,7 +432,7 @@ class ChromiumDriver(base.Driver):          if png_path and self._port._filesystem.exists(png_path):              return self._port._filesystem.read_binary_file(png_path)          else: -            return '' +            return None      def _output_image_with_retry(self):          # Retry a few more times because open() sometimes fails on Windows, @@ -501,11 +507,16 @@ class ChromiumDriver(base.Driver):              (line, crash) = self._write_command_and_read_line(input=None) +        # FIXME: Add support for audio when we're ready. +          run_time = time.time() - start_time          output_image = self._output_image_with_retry() -        assert output_image is not None -        return base.DriverOutput(''.join(output), output_image, actual_checksum, -                                 crash, run_time, timeout, ''.join(error)) +        text = ''.join(output) +        if not text: +            text = None + +        return base.DriverOutput(text, output_image, actual_checksum, audio=None, +            crash=crash, test_time=run_time, timeout=timeout, error=''.join(error))      def stop(self):          if self._proc: @@ -513,21 +524,19 @@ class ChromiumDriver(base.Driver):              self._proc.stdout.close()              if self._proc.stderr:                  self._proc.stderr.close() -            if sys.platform not in ('win32', 'cygwin'): -                # Closing stdin/stdout/stderr hangs sometimes on OS X, -                # (see __init__(), above), and anyway we don't want to hang -                # the harness if DRT is buggy, so we wait a couple -                # seconds to give DRT a chance to clean up, but then -                # force-kill the process if necessary. -                KILL_TIMEOUT = 3.0 -                timeout = time.time() + KILL_TIMEOUT -                # poll() is not threadsafe and can throw OSError due to: -                # http://bugs.python.org/issue1731717 -                while self._proc.poll() is None and time.time() < timeout: -                    time.sleep(0.1) -                # poll() is not threadsafe and can throw OSError due to: -                # http://bugs.python.org/issue1731717 -                if self._proc.poll() is None: -                    _log.warning('stopping test driver timed out, ' -                                 'killing it') -                    self._port._executive.kill_process(self._proc.pid) +            # Closing stdin/stdout/stderr hangs sometimes on OS X, +            # (see __init__(), above), and anyway we don't want to hang +            # the harness if DRT is buggy, so we wait a couple +            # seconds to give DRT a chance to clean up, but then +            # force-kill the process if necessary. +            timeout = time.time() + self.KILL_TIMEOUT +            while self._proc.poll() is None and time.time() < timeout: +                time.sleep(0.1) +            if self._proc.poll() is None: +                _log.warning('stopping test driver timed out, ' +                                'killing it') +                self._port._executive.kill_process(self._proc.pid) +            # FIXME: This is sometime none. What is wrong? assert self._proc.poll() is not None +            if self._proc.poll() is not None: +                self._proc.wait() +            self._proc = None diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py index 167f23e..ffc2cf7 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_gpu.py @@ -60,6 +60,7 @@ def get(platform=None, port_name='chromium-gpu', **kwargs):  # FIXME: These should really be a mixin class.  def _set_gpu_options(port): +    port._graphics_type = 'gpu'      if port.get_option('accelerated_compositing') is None:          port._options.accelerated_compositing = True      if port.get_option('accelerated_2d_canvas') is None: @@ -84,17 +85,6 @@ def _tests(port, paths):      return test_files.find(port, paths) -def _test_platform_names(self): -    return ('mac', 'win', 'linux') - - -def _test_platform_name_to_name(self, test_platform_name): -    if test_platform_name in self.test_platform_names(): -        return 'chromium-gpu-' + test_platform_name -    raise ValueError('Unsupported test_platform_name: %s' % -                     test_platform_name) - -  class ChromiumGpuLinuxPort(chromium_linux.ChromiumLinuxPort):      def __init__(self, port_name='chromium-gpu-linux', **kwargs):          chromium_linux.ChromiumLinuxPort.__init__(self, port_name=port_name, **kwargs) @@ -112,21 +102,9 @@ class ChromiumGpuLinuxPort(chromium_linux.ChromiumLinuxPort):      def default_child_processes(self):          return 1 -    def graphics_type(self): -        return 'gpu' -      def tests(self, paths):          return _tests(self, paths) -    def test_platform_name(self): -        return 'linux' - -    def test_platform_names(self): -        return _test_platform_names(self) - -    def test_platform_name_to_name(self, name): -        return _test_platform_name_to_name(self, name) -  class ChromiumGpuMacPort(chromium_mac.ChromiumMacPort):      def __init__(self, port_name='chromium-gpu-mac', **kwargs): @@ -144,21 +122,9 @@ class ChromiumGpuMacPort(chromium_mac.ChromiumMacPort):      def default_child_processes(self):          return 1 -    def graphics_type(self): -        return 'gpu' -      def tests(self, paths):          return _tests(self, paths) -    def test_platform_name(self): -        return 'mac' - -    def test_platform_names(self): -        return _test_platform_names(self) - -    def test_platform_name_to_name(self, name): -        return _test_platform_name_to_name(self, name) -  class ChromiumGpuWinPort(chromium_win.ChromiumWinPort):      def __init__(self, port_name='chromium-gpu-win', **kwargs): @@ -176,17 +142,5 @@ class ChromiumGpuWinPort(chromium_win.ChromiumWinPort):      def default_child_processes(self):          return 1 -    def graphics_type(self): -        return 'gpu' -      def tests(self, paths):          return _tests(self, paths) - -    def test_platform_name(self): -        return 'win' - -    def test_platform_names(self): -        return _test_platform_names(self) - -    def test_platform_name_to_name(self, name): -        return _test_platform_name_to_name(self, name) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py index 2cd2435..49c01c4 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py @@ -47,27 +47,29 @@ class ChromiumLinuxPort(chromium.ChromiumPort):          'x86': ['chromium-linux', 'chromium-win', 'chromium', 'win', 'mac'],      } -    def __init__(self, port_name=None, rebaselining=False, **kwargs): +    def __init__(self, port_name=None, **kwargs):          port_name = port_name or 'chromium-linux'          chromium.ChromiumPort.__init__(self, port_name=port_name, **kwargs)          # We re-set the port name once the base object is fully initialized          # in order to be able to find the DRT binary properly. -        if port_name.endswith('-linux') and not rebaselining: +        if port_name.endswith('-linux'):              self._architecture = self._determine_architecture()              # FIXME: this is an ugly hack to avoid renaming the GPU port.              if port_name == 'chromium-linux':                  port_name = port_name + '-' + self._architecture -        elif rebaselining: -            self._architecture = 'x86'          else:              base, arch = port_name.rsplit('-', 1)              assert base in ('chromium-linux', 'chromium-gpu-linux')              self._architecture = arch          assert self._architecture in self.SUPPORTED_ARCHITECTURES          assert port_name in ('chromium-linux', 'chromium-gpu-linux', -                             'chromium-linux-x86', 'chromium-linux-x86_64') +                             'chromium-linux-x86', 'chromium-linux-x86_64', +                             'chromium-gpu-linux-x86_64')          self._name = port_name +        self._operating_system = 'linux' +        # FIXME: add support for 'lucid' +        self._version = 'hardy'      def _determine_architecture(self):          driver_path = self._path_to_driver() @@ -114,14 +116,6 @@ class ChromiumLinuxPort(chromium.ChromiumPort):                         'LinuxBuildInstructions')          return result -    def test_platform_name(self): -        # We use 'linux' instead of 'chromium-linux' in test_expectations.txt. -        return 'linux' - -    def version(self): -        # FIXME: add support for Lucid. -        return 'hardy' -      #      # PROTECTED METHODS      # diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py index 141b587..7f57bdf 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py @@ -43,7 +43,7 @@ _log = logging.getLogger("webkitpy.layout_tests.port.chromium_mac")  class ChromiumMacPort(chromium.ChromiumPort):      """Chromium Mac implementation of the Port class.""" -    SUPPORTED_OS_VERSIONS = ('leopard', 'snowleopard') +    SUPPORTED_OS_VERSIONS = ('leopard', 'snowleopard', 'future')      FALLBACK_PATHS = {          'leopard': [ @@ -60,41 +60,28 @@ class ChromiumMacPort(chromium.ChromiumPort):              'mac-snowleopard',              'mac',          ], -        '': [ +        'future': [              'chromium-mac',              'chromium',              'mac',          ],      } -    def __init__(self, port_name=None, os_version_string=None, rebaselining=False, **kwargs): +    def __init__(self, port_name=None, os_version_string=None, **kwargs):          # We're a little generic here because this code is reused by the          # 'google-chrome' port as well as the 'mock-' and 'dryrun-' ports.          port_name = port_name or 'chromium-mac' - +        chromium.ChromiumPort.__init__(self, port_name=port_name, **kwargs)          if port_name.endswith('-mac'): -            # FIXME: The rebaselining flag is an ugly hack that lets us create an -            # "chromium-mac" port that is not version-specific. It should only be -            # used by rebaseline-chromium-webkit-tests to explicitly put files into -            # the generic directory. In theory we shouldn't need this, because -            # the newest mac port should be using 'chromium-mac' as the baseline -            # directory. However, we also don't have stable SL bots :( -            # -            # When we remove this FIXME, we also need to remove '' as a valid -            # fallback key in self.FALLBACK_PATHS. -            if rebaselining: -                self._version = '' -            else: -                self._version = mac.os_version(os_version_string, self.SUPPORTED_OS_VERSIONS) -                port_name = port_name + '-' + self._version +            self._version = mac.os_version(os_version_string, self.SUPPORTED_OS_VERSIONS) +            self._name = port_name + '-' + self._version          else:              self._version = port_name[port_name.index('-mac-') + 5:]              assert self._version in self.SUPPORTED_OS_VERSIONS - -        chromium.ChromiumPort.__init__(self, port_name=port_name, **kwargs) +        self._operating_system = 'mac'      def baseline_path(self): -        if self.version() == 'snowleopard': +        if self.version() in ('snowleopard', 'future'):              # We treat Snow Leopard as the newest version of mac,              # so it gets the base dir.              return self._webkit_baseline_path('chromium-mac') @@ -123,15 +110,6 @@ class ChromiumMacPort(chromium.ChromiumPort):      def driver_name(self):          return "DumpRenderTree" -    def test_platform_name(self): -        # We use 'mac' instead of 'chromium-mac' - -        # FIXME: Get rid of this method after rebaseline_chromium_webkit_tests dies. -        return 'mac' - -    def version(self): -        return self._version -      #      # PROTECTED METHODS      # diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py index 12011c6..4f9c302 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py @@ -53,7 +53,7 @@ class ChromiumMacPortTest(port_testcase.PortTestCase):      def test_versions(self):          port = chromium_mac.ChromiumMacPort() -        self.assertTrue(port.name() in ('chromium-mac-leopard', 'chromium-mac-snowleopard')) +        self.assertTrue(port.name() in ('chromium-mac-leopard', 'chromium-mac-snowleopard', 'chromium-mac-future'))          self.assert_name(None, '10.5.3', 'chromium-mac-leopard')          self.assert_name('chromium-mac', '10.5.3', 'chromium-mac-leopard') @@ -65,18 +65,16 @@ class ChromiumMacPortTest(port_testcase.PortTestCase):          self.assert_name('chromium-mac-snowleopard', '10.5.3', 'chromium-mac-snowleopard')          self.assert_name('chromium-mac-snowleopard', '10.6.3', 'chromium-mac-snowleopard') -        self.assertRaises(KeyError, self.assert_name, None, '10.7.1', 'chromium-mac-leopard') -        self.assertRaises(AssertionError, self.assert_name, None, '10.4.1', 'chromium-mac-leopard') +        self.assert_name(None, '10.7', 'chromium-mac-future') +        self.assert_name(None, '10.7.3', 'chromium-mac-future') +        self.assert_name(None, '10.8', 'chromium-mac-future') +        self.assert_name('chromium-mac', '10.7.3', 'chromium-mac-future') +        self.assert_name('chromium-mac-future', '10.4.3', 'chromium-mac-future') +        self.assert_name('chromium-mac-future', '10.5.3', 'chromium-mac-future') +        self.assert_name('chromium-mac-future', '10.6.3', 'chromium-mac-future') +        self.assert_name('chromium-mac-future', '10.7.3', 'chromium-mac-future') -    def test_generic_rebaselining_port(self): -        port = chromium_mac.ChromiumMacPort(rebaselining=True) -        self.assertEquals(port.name(), 'chromium-mac') -        self.assertEquals(port.version(), '') -        self.assertEquals(port.baseline_path(), port._webkit_baseline_path(port.name())) - -        port = chromium_mac.ChromiumMacPort(port_name='chromium-mac-leopard', rebaselining=True) -        self.assertEquals(port.name(), 'chromium-mac-leopard') -        self.assertEquals(port.baseline_path(), port._webkit_baseline_path(port.name())) +        self.assertRaises(AssertionError, self.assert_name, None, '10.4.1', 'should-raise-assertion-so-this-value-does-not-matter')      def test_baseline_path(self):          port = chromium_mac.ChromiumMacPort(port_name='chromium-mac-leopard') diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py index b287875..0260dff 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py @@ -41,8 +41,10 @@ import chromium_linux  import chromium_mac  import chromium_win -class ChromiumDriverTest(unittest.TestCase): +from webkitpy.layout_tests.port import port_testcase + +class ChromiumDriverTest(unittest.TestCase):      def setUp(self):          mock_port = Mock()          mock_port.get_option = lambda option_name: '' @@ -85,8 +87,46 @@ class ChromiumDriverTest(unittest.TestCase):          self.driver._proc.stdout.readline = mock_readline          self._assert_write_command_and_read_line(expected_crash=True) +    def test_stop(self): +        self.pid = None +        self.wait_called = False +        self.driver._proc = Mock() +        self.driver._proc.pid = 1 +        self.driver._proc.stdin = StringIO.StringIO() +        self.driver._proc.stdout = StringIO.StringIO() +        self.driver._proc.stderr = StringIO.StringIO() +        self.driver._proc.poll = lambda: None + +        def fake_wait(): +            self.assertTrue(self.pid is not None) +            self.wait_called = True + +        self.driver._proc.wait = fake_wait + +        class FakeExecutive(object): +            def kill_process(other, pid): +                self.pid = pid +                self.driver._proc.poll = lambda: 2 + +        self.driver._port._executive = FakeExecutive() +        self.driver.KILL_TIMEOUT = 0.01 +        self.driver.stop() +        self.assertTrue(self.wait_called) +        self.assertEquals(self.pid, 1) + + +class ChromiumPortTest(port_testcase.PortTestCase): +    def port_maker(self, platform): +        return chromium.ChromiumPort + +    def test_driver_cmd_line(self): +        # Override this test since ChromiumPort doesn't implement driver_cmd_line(). +        pass + +    def test_baseline_search_path(self): +        # Override this test since ChromiumPort doesn't implement baseline_search_path(). +        pass -class ChromiumPortTest(unittest.TestCase):      class TestMacPort(chromium_mac.ChromiumMacPort):          def __init__(self, options):              chromium_mac.ChromiumMacPort.__init__(self, diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py index d0908df..171519e 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py @@ -63,34 +63,21 @@ class ChromiumWinPort(chromium.ChromiumPort):          'xp': ['chromium-win-xp', 'chromium-win-vista', 'chromium-win', 'chromium', 'win', 'mac'],          'vista': ['chromium-win-vista', 'chromium-win', 'chromium', 'win', 'mac'],          'win7': ['chromium-win', 'chromium', 'win', 'mac'], -        '': ['chromium-win', 'chromium', 'win', 'mac'],      } -    def __init__(self, port_name=None, windows_version=None, rebaselining=False, **kwargs): +    def __init__(self, port_name=None, windows_version=None, **kwargs):          # We're a little generic here because this code is reused by the          # 'google-chrome' port as well as the 'mock-' and 'dryrun-' ports.          port_name = port_name or 'chromium-win' - +        chromium.ChromiumPort.__init__(self, port_name=port_name, **kwargs)          if port_name.endswith('-win'): -            # FIXME: The rebaselining flag is an ugly hack that lets us create an -            # "chromium-win" port that is not version-specific. It should only be -            # used by rebaseline-chromium-webkit-tests to explicitly put files into -            # the generic directory. In theory we shouldn't need this, because -            # the newest win port should be using 'chromium-win' as the baseline -            # directory. However, we also don't have stable Win 7 bots :( -            # -            # When we remove this FIXME, we also need to remove '' as a valid -            # fallback key in self.FALLBACK_PATHS. -            if rebaselining: -                self._version = '' -            else: -                self._version = os_version(windows_version) -                port_name = port_name + '-' + self._version +            self._version = os_version(windows_version) +            self._name = port_name + '-' + self._version          else:              self._version = port_name[port_name.index('-win-') + 5:]              assert self._version in self.SUPPORTED_VERSIONS -        chromium.ChromiumPort.__init__(self, port_name=port_name, **kwargs) +        self._operating_system = 'win'      def setup_environ_for_server(self):          env = chromium.ChromiumPort.setup_environ_for_server(self) @@ -129,25 +116,10 @@ class ChromiumWinPort(chromium.ChromiumPort):                         'build-instructions-windows')          return result -    def default_worker_model(self): -        # FIXME: should use base class method instead. See bug 55163. -        return 'old-threads' -      def relative_test_filename(self, filename):          path = filename[len(self.layout_tests_dir()) + 1:]          return path.replace('\\', '/') -    def test_platform_name(self): -        # We return 'win-xp', not 'chromium-win-xp' here, for convenience. - -        # FIXME: Get rid of this method after rebaseline_chromium_webkit_tests dies. -        if self.version() == '': -            return 'win' -        return 'win-' + self.version() - -    def version(self): -        return self._version -      #      # PROTECTED ROUTINES      # diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py index 8ea7060..9fb9e2d 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py @@ -58,13 +58,6 @@ class ChromiumWinTest(port_testcase.PortTestCase):      def _mock_path_from_chromium_base(self, *comps):          return self._port._filesystem.join("/chromium/src", *comps) -    def test_default_worker_model(self): -        port = self.make_port() -        if not port: -            return - -        self.assertEqual(port.default_worker_model(), 'old-threads') -      def test_setup_environ_for_server(self):          port = self.make_port()          if not port: @@ -127,16 +120,6 @@ class ChromiumWinTest(port_testcase.PortTestCase):          self.assertRaises(KeyError, self.assert_name, None, (5, 2), 'chromium-win-xp')          self.assertRaises(KeyError, self.assert_name, None, (7, 1), 'chromium-win-xp') -    def test_generic_rebaselining_port(self): -        port = chromium_win.ChromiumWinPort(rebaselining=True) -        self.assertEquals(port.name(), 'chromium-win') -        self.assertEquals(port.version(), '') -        self.assertEquals(port.baseline_path(), port._webkit_baseline_path(port.name())) - -        port = chromium_win.ChromiumWinPort(port_name='chromium-win-xp', rebaselining=True) -        self.assertEquals(port.name(), 'chromium-win-xp') -        self.assertEquals(port.baseline_path(), port._webkit_baseline_path(port.name())) -      def test_baseline_path(self):          port = chromium_win.ChromiumWinPort(port_name='chromium-win-xp')          self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-win-xp')) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/dryrun.py b/Tools/Scripts/webkitpy/layout_tests/port/dryrun.py index 20aa776..ba99636 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/dryrun.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/dryrun.py @@ -119,29 +119,25 @@ class DryrunDriver(base.Driver):      def run_test(self, driver_input):          start_time = time.time()          fs = self._port._filesystem -        if fs.exists(self._port.reftest_expected_filename(driver_input.filename)) or \ -            fs.exists(self._port.reftest_expected_mismatch_filename(driver_input.filename)): -            text_output = 'test-text' +        if (fs.exists(self._port.reftest_expected_filename(driver_input.filename)) or +            fs.exists(self._port.reftest_expected_mismatch_filename(driver_input.filename)) or +            driver_input.filename.endswith('-expected.html')): +            text = 'test-text'              image = 'test-image' -            hash = 'test-checksum' -        elif driver_input.filename.endswith('-expected.html'): -            text_output = 'test-text' -            image = 'test-image' -            hash = 'test-checksum' +            checksum = 'test-checksum' +            audio = None          elif driver_input.filename.endswith('-expected-mismatch.html'): -            text_output = 'test-text-mismatch' +            text = 'test-text-mismatch'              image = 'test-image-mismatch' -            hash = 'test-checksum-mismatch' -        elif driver_input.image_hash is not None: -            text_output = self._port.expected_text(driver_input.filename) -            image = self._port.expected_image(driver_input.filename) -            hash = self._port.expected_checksum(driver_input.filename) +            checksum = 'test-checksum-mismatch' +            audio = None          else: -            text_output = self._port.expected_text(driver_input.filename) -            image = None -            hash = None -        return base.DriverOutput(text_output, image, hash, False, -                                 time.time() - start_time, False, '') +            text = self._port.expected_text(driver_input.filename) +            image = self._port.expected_image(driver_input.filename) +            checksum = self._port.expected_checksum(driver_input.filename) +            audio = self._port.expected_audio(driver_input.filename) +        return base.DriverOutput(text, image, checksum, audio, crash=False, +            test_time=time.time() - start_time, timeout=False, error='')      def start(self):          pass diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_server.py b/Tools/Scripts/webkitpy/layout_tests/port/http_server.py index 1753aee..5ba767f 100755 --- a/Tools/Scripts/webkitpy/layout_tests/port/http_server.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/http_server.py @@ -214,7 +214,7 @@ class Lighttpd(http_server_base.HttpServerBase):          env = self._port_obj.setup_environ_for_server()          _log.debug('Starting http server, cmd="%s"' % str(start_cmd))          # FIXME: Should use Executive.run_command -        self._process = subprocess.Popen(start_cmd, env=env) +        self._process = subprocess.Popen(start_cmd, env=env, stdin=subprocess.PIPE)          # Wait for server to start.          self.mappings = mappings diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac.py b/Tools/Scripts/webkitpy/layout_tests/port/mac.py index 4315543..a44d6d3 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/mac.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/mac.py @@ -39,7 +39,6 @@ _log = logging.getLogger("webkitpy.layout_tests.port.mac")  def os_version(os_version_string=None, supported_versions=None): -    # We only support Tiger, Leopard, and Snow Leopard.      if not os_version_string:          if hasattr(platform, 'mac_ver') and platform.mac_ver()[0]:              os_version_string = platform.mac_ver()[0] @@ -52,7 +51,8 @@ def os_version(os_version_string=None, supported_versions=None):          5: 'leopard',          6: 'snowleopard',      } -    version_string = version_strings[release_version] +    assert release_version >= min(version_strings.keys()) +    version_string = version_strings.get(release_version, 'future')      if supported_versions:          assert version_string in supported_versions      return version_string @@ -62,26 +62,28 @@ class MacPort(WebKitPort):      """WebKit Mac implementation of the Port class."""      # FIXME: 'wk2' probably shouldn't be a version, it should probably be      # a modifier, like 'chromium-gpu' is to 'chromium'. -    SUPPORTED_VERSIONS = ('tiger', 'leopard', 'snowleopard', 'wk2') +    SUPPORTED_VERSIONS = ('tiger', 'leopard', 'snowleopard', 'future', 'wk2')      FALLBACK_PATHS = {          'tiger': ['mac-tiger', 'mac-leopard', 'mac-snowleopard', 'mac'],          'leopard': ['mac-leopard', 'mac-snowleopard', 'mac'],          'snowleopard': ['mac-snowleopard', 'mac'], +        'future': ['mac'],          'wk2': ['mac-wk2', 'mac'],      }      def __init__(self, port_name=None, os_version_string=None, **kwargs):          port_name = port_name or 'mac' - +        WebKitPort.__init__(self, port_name=port_name, **kwargs)          if port_name == 'mac':              self._version = os_version(os_version_string) -            port_name = port_name + '-' + self._version +            self._name = port_name + '-' + self._version          else:              self._version = port_name[4:]              assert self._version in self.SUPPORTED_VERSIONS - -        WebKitPort.__init__(self, port_name=port_name, **kwargs) +        self._operating_system = 'mac' +        if not hasattr(self._options, 'time-out-ms') or self._options.time_out_ms is None: +            self._options.time_out_ms = 35000      def default_child_processes(self):          # FIXME: new-run-webkit-tests is unstable on Mac running more than @@ -92,10 +94,12 @@ class MacPort(WebKitPort):              return 4          return child_processes -    def default_worker_model(self): -        if self._multiprocessing_is_available: -            return 'processes' -        return 'threads' +    def baseline_path(self): +        if self.version() != 'future': +            return WebKitPort.baseline_path(self) + +        assert(self._name[-7:] == '-future') +        return self._webkit_baseline_path(self._name[:-7])      def baseline_search_path(self):          return map(self._webkit_baseline_path, self.FALLBACK_PATHS[self._version]) @@ -115,12 +119,6 @@ class MacPort(WebKitPort):                                            'Skipped'))          return skipped_files -    def test_platform_name(self): -        return 'mac-' + self.version() - -    def version(self): -        return self._version -      def _build_java_test_support(self):          java_tests_path = self._filesystem.join(self.layout_tests_dir(), "java")          build_java = ["/usr/bin/make", "-C", java_tests_path] @@ -132,24 +130,13 @@ class MacPort(WebKitPort):      def _check_port_build(self):          return self._build_java_test_support() -    def _tests_for_other_platforms(self): -        # The original run-webkit-tests builds up a "whitelist" of tests to -        # run, and passes that to DumpRenderTree. new-run-webkit-tests assumes -        # we run *all* tests and test_expectations.txt functions as a -        # blacklist. -        # FIXME: This list could be dynamic based on platform name and -        # pushed into base.Port. -        return [ -            "platform/chromium", -            "platform/gtk", -            "platform/qt", -            "platform/win", -        ] -      def _path_to_apache_config_file(self):          return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf',                                       'apache2-httpd.conf') +    def _path_to_webcore_library(self): +        return self._build_path('WebCore.framework/Versions/A/WebCore') +      # FIXME: This doesn't have anything to do with WebKit.      def _shut_down_http_server(self, server_pid):          """Shut down the lighttpd web server. Blocks until it's fully diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py index 4586a23..8906154 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py @@ -84,10 +84,22 @@ svg/batik/text/smallFonts.svg                             os_version_string=os_version_string)          self.assertEquals(expected, port.name()) +    def test_tests_for_other_platforms(self): +        port = mac.MacPort(port_name='mac-snowleopard') +        dirs_to_skip = port._tests_for_other_platforms() +        self.assertTrue('platform/chromium-linux' in dirs_to_skip) +        self.assertTrue('platform/mac-tiger' in dirs_to_skip) +        self.assertFalse('platform/mac' in dirs_to_skip) +        self.assertFalse('platform/mac-snowleopard' in dirs_to_skip) + +    def test_version(self): +        port = mac.MacPort() +        self.assertTrue(port.version()) +      def test_versions(self):          port = self.make_port()          if port: -            self.assertTrue(port.name() in ('mac-tiger', 'mac-leopard', 'mac-snowleopard')) +            self.assertTrue(port.name() in ('mac-tiger', 'mac-leopard', 'mac-snowleopard', 'mac-future'))          self.assert_name(None, '10.4.8', 'mac-tiger')          self.assert_name('mac', '10.4.8', 'mac-tiger') @@ -107,8 +119,16 @@ svg/batik/text/smallFonts.svg          self.assert_name('mac-snowleopard', '10.5.3', 'mac-snowleopard')          self.assert_name('mac-snowleopard', '10.6.3', 'mac-snowleopard') -        self.assertRaises(KeyError, self.assert_name, None, '10.7.1', 'mac-leopard') -        self.assertRaises(KeyError, self.assert_name, None, '10.3.1', 'mac-leopard') +        self.assert_name(None, '10.7', 'mac-future') +        self.assert_name(None, '10.7.3', 'mac-future') +        self.assert_name(None, '10.8', 'mac-future') +        self.assert_name('mac', '10.7.3', 'mac-future') +        self.assert_name('mac-future', '10.4.3', 'mac-future') +        self.assert_name('mac-future', '10.5.3', 'mac-future') +        self.assert_name('mac-future', '10.6.3', 'mac-future') +        self.assert_name('mac-future', '10.7.3', 'mac-future') + +        self.assertRaises(AssertionError, self.assert_name, None, '10.3.1', 'should-raise-assertion-so-this-value-does-not-matter')  if __name__ == '__main__': diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py index 1147846..73967cf 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py @@ -32,6 +32,7 @@ This is an implementation of the Port interface that overrides other  ports and changes the Driver binary to "MockDRT".  """ +import base64  import logging  import optparse  import os @@ -206,15 +207,23 @@ class MockDRT(object):              test_path = test_input.uri          actual_text = port.expected_text(test_path) +        actual_audio = port.expected_audio(test_path)          if self._options.pixel_tests and test_input.checksum:              actual_checksum = port.expected_checksum(test_path)              actual_image = port.expected_image(test_path) -        self._stdout.write('Content-Type: text/plain\n') +        if actual_audio: +            self._stdout.write('Content-Type: audio/wav\n') +            self._stdout.write('Content-Transfer-Encoding: base64\n') +            output = base64.b64encode(actual_audio) +            self._stdout.write('Content-Length: %s\n' % len(output)) +            self._stdout.write(output) +        else: +            self._stdout.write('Content-Type: text/plain\n') +            # FIXME: Note that we don't ensure there is a trailing newline! +            # This mirrors actual (Mac) DRT behavior but is a bug. +            self._stdout.write(actual_text) -        # FIXME: Note that we don't ensure there is a trailing newline! -        # This mirrors actual (Mac) DRT behavior but is a bug. -        self._stdout.write(actual_text)          self._stdout.write('#EOF\n')          if self._options.pixel_tests and test_input.checksum: @@ -223,7 +232,7 @@ class MockDRT(object):              self._stdout.write('ExpectedHash: %s\n' % test_input.checksum)              if actual_checksum != test_input.checksum:                  self._stdout.write('Content-Type: image/png\n') -                self._stdout.write('Content-Length: %s\n\n' % len(actual_image)) +                self._stdout.write('Content-Length: %s\n' % len(actual_image))                  self._stdout.write(actual_image)          self._stdout.write('#EOF\n')          self._stdout.flush() diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py index b6f6e8a..c489e20 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py @@ -39,13 +39,17 @@ from webkitpy.layout_tests.port import factory  from webkitpy.layout_tests.port import port_testcase  from webkitpy.layout_tests.port import test +from webkitpy.tool import mocktool +mock_options = mocktool.MockOptions(use_apache=True, +                                    configuration='Release') +  class MockDRTPortTest(port_testcase.PortTestCase): -    def make_port(self): +    def make_port(self, options=mock_options):          if sys.platform == 'win32':              # We use this because the 'win' port doesn't work yet. -            return mock_drt.MockDRTPort(port_name='mock-chromium-win') -        return mock_drt.MockDRTPort() +            return mock_drt.MockDRTPort(port_name='mock-chromium-win', options=options) +        return mock_drt.MockDRTPort(options=options)      def test_default_worker_model(self):          # only overridding the default test; we don't care about this one. @@ -200,7 +204,7 @@ class MockDRTTest(unittest.TestCase):                          'ActualHash: checksum-checksum\n',                          'ExpectedHash: wrong-checksum\n',                          'Content-Type: image/png\n', -                        'Content-Length: 13\n\n', +                        'Content-Length: 13\n',                          'checksum\x8a-png',                          '#EOF\n']) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py index 649e33c..cb1b915 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py @@ -38,8 +38,7 @@ except ImportError:      multiprocessing = None  from webkitpy.tool import mocktool -mock_options = mocktool.MockOptions(results_directory='layout-test-results', -                                    use_apache=True, +mock_options = mocktool.MockOptions(use_apache=True,                                      configuration='Release')  # FIXME: This should be used for all ports, not just WebKit Mac. See @@ -60,10 +59,7 @@ class PortTestCase(unittest.TestCase):          if not maker:              return None -        port = maker(options=options) -        if hasattr(options, "results_directory"): -            port._options.results_directory = port.results_directory() -        return port +        return maker(options=options)      def test_default_worker_model(self):          port = self.make_port() @@ -81,6 +77,12 @@ class PortTestCase(unittest.TestCase):              return          self.assertTrue(len(port.driver_cmd_line())) +        options = mocktool.MockOptions(additional_drt_flag=['--foo=bar', '--foo=baz']) +        port = self.make_port(options=options) +        cmd_line = port.driver_cmd_line() +        self.assertTrue('--foo=bar' in cmd_line) +        self.assertTrue('--foo=baz' in cmd_line) +      def disabled_test_http_server(self):          port = self.make_port()          if not port: @@ -113,6 +115,30 @@ class PortTestCase(unittest.TestCase):          port._filesystem.remove(tmpfile) +    def test_diff_image__missing_both(self): +        port = self.make_port() +        if not port: +            return +        self.assertFalse(port.diff_image(None, None, None)) +        self.assertFalse(port.diff_image(None, '', None)) +        self.assertFalse(port.diff_image('', None, None)) +        self.assertFalse(port.diff_image('', '', None)) + +    def test_diff_image__missing_actual(self): +        port = self.make_port() +        if not port: +            return +        self.assertTrue(port.diff_image(None, 'foo', None)) +        self.assertTrue(port.diff_image('', 'foo', None)) + +    def test_diff_image__missing_expected(self): +        port = self.make_port() +        if not port: +            return +        self.assertTrue(port.diff_image('foo', None, None)) +        self.assertTrue(port.diff_image('foo', '', None)) + +      def disabled_test_websocket_server(self):          port = self.make_port()          if not port: diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py index 392818d..fed7e11 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/test.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py @@ -30,6 +30,7 @@  """Dummy Port implementation used for testing."""  from __future__ import with_statement +import base64  import time  from webkitpy.common.system import filesystem_mock @@ -66,6 +67,8 @@ class TestInstance:          self.expected_checksum = self.actual_checksum          self.expected_image = self.actual_image +        self.actual_audio = None +        self.expected_audio = None  # This is an in-memory list of tests, what we want them to produce, and  # what we want to claim are the expected results. @@ -111,11 +114,20 @@ def unit_test_list():      tests.add('failures/expected/image_checksum.html',                actual_checksum='image_checksum_fail-checksum',                actual_image='image_checksum_fail-png') +    tests.add('failures/expected/audio.html', +              actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav', +              actual_text=None, expected_text=None, +              actual_image=None, expected_image=None, +              actual_checksum=None, expected_checksum=None)      tests.add('failures/expected/keyboard.html', keyboard=True)      tests.add('failures/expected/missing_check.html',                expected_checksum=None,                expected_image=None)      tests.add('failures/expected/missing_image.html', expected_image=None) +    tests.add('failures/expected/missing_audio.html', expected_audio=None, +              actual_text=None, expected_text=None, +              actual_image=None, expected_image=None, +              actual_checksum=None, expected_checksum=None)      tests.add('failures/expected/missing_text.html', expected_text=None)      tests.add('failures/expected/newlines_leading.html',                expected_text="\nfoo\n", actual_text="foo\n") @@ -134,6 +146,11 @@ def unit_test_list():      tests.add('http/tests/ssl/text.html')      tests.add('passes/error.html', error='stuff going to stderr')      tests.add('passes/image.html') +    tests.add('passes/audio.html', +              actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav', +              actual_text=None, expected_text=None, +              actual_image=None, expected_image=None, +              actual_checksum=None, expected_checksum=None)      tests.add('passes/platform_image.html')      tests.add('passes/checksum_in_image.html',                expected_checksum=None, @@ -184,20 +201,27 @@ def unit_test_filesystem(files=None):          add_file(files, test, '.html', '')          if test.is_reftest:              continue +        if test.actual_audio: +            add_file(files, test, '-expected.wav', test.expected_audio) +            continue +          add_file(files, test, '-expected.txt', test.expected_text)          add_file(files, test, '-expected.checksum', test.expected_checksum)          add_file(files, test, '-expected.png', test.expected_image) +      # Add the test_expectations file.      files[LAYOUT_TEST_DIR + '/platform/test/test_expectations.txt'] = """  WONTFIX : failures/expected/checksum.html = IMAGE  WONTFIX : failures/expected/crash.html = CRASH  // This one actually passes because the checksums will match.  WONTFIX : failures/expected/image.html = PASS +WONTFIX : failures/expected/audio.html = AUDIO  WONTFIX : failures/expected/image_checksum.html = IMAGE  WONTFIX : failures/expected/mismatch.html = IMAGE  WONTFIX : failures/expected/missing_check.html = MISSING PASS  WONTFIX : failures/expected/missing_image.html = MISSING PASS +WONTFIX : failures/expected/missing_audio.html = MISSING PASS  WONTFIX : failures/expected/missing_text.html = MISSING PASS  WONTFIX : failures/expected/newlines_leading.html = TEXT  WONTFIX : failures/expected/newlines_trailing.html = TEXT @@ -220,23 +244,41 @@ WONTFIX SKIP : failures/expected/exception.html = CRASH  class TestPort(base.Port):      """Test implementation of the Port interface.""" +    ALL_BASELINE_VARIANTS = ( +        'test-mac-snowleopard', 'test-mac-leopard', +        'test-win-win7', 'test-win-vista', 'test-win-xp', +        'test-linux-x86', +    )      def __init__(self, port_name=None, user=None, filesystem=None, **kwargs): -        if not filesystem: -            filesystem = unit_test_filesystem() +        if not port_name or port_name == 'test': +            port_name = 'test-mac-leopard' +        user = user or mocktool.MockUser() +        filesystem = filesystem or unit_test_filesystem() +        base.Port.__init__(self, port_name=port_name, filesystem=filesystem, user=user, +                           **kwargs) +        self._results_directory = None          assert filesystem._tests          self._tests = filesystem._tests -        if not user: -            user = mocktool.MockUser() +        self._operating_system = 'mac' +        if port_name.startswith('test-win'): +            self._operating_system = 'win' +        elif port_name.startswith('test-linux'): +            self._operating_system = 'linux' -        if not port_name or port_name == 'test': -            port_name = 'test-mac' +        version_map = { +            'test-win-xp': 'xp', +            'test-win-win7': 'win7', +            'test-win-vista': 'vista', +            'test-mac-leopard': 'leopard', +            'test-mac-snowleopard': 'snowleopard', +            'test-linux-x86': '', +        } +        self._version = version_map[port_name]          self._expectations_path = LAYOUT_TEST_DIR + '/platform/test/test_expectations.txt' -        base.Port.__init__(self, port_name=port_name, filesystem=filesystem, user=user, -                           **kwargs)      def _path_to_driver(self):          # This routine shouldn't normally be called, but it is called by @@ -248,7 +290,15 @@ class TestPort(base.Port):          return self._filesystem.join(self.layout_tests_dir(), 'platform', self.name())      def baseline_search_path(self): -        return [self.baseline_path()] +        search_paths = { +            'test-mac-snowleopard': ['test-mac-snowleopard'], +            'test-mac-leopard': ['test-mac-leopard', 'test-mac-snowleopard'], +            'test-win-win7': ['test-win-win7'], +            'test-win-vista': ['test-win-vista', 'test-win-win7'], +            'test-win-xp': ['test-win-xp', 'test-win-vista', 'test-win-win7'], +            'test-linux-x86': ['test-linux', 'test-win-win7'], +        } +        return [self._webkit_baseline_path(d) for d in search_paths[self.name()]]      def default_child_processes(self):          return 1 @@ -279,8 +329,8 @@ class TestPort(base.Port):      def _path_to_wdiff(self):          return None -    def results_directory(self): -        return '/tmp/' + self.get_option('results_directory') +    def default_results_directory(self): +        return '/tmp/layout-test-results'      def setup_test_run(self):          pass @@ -303,24 +353,8 @@ class TestPort(base.Port):      def path_to_test_expectations_file(self):          return self._expectations_path -    def test_platform_name(self): -        name_map = { -            'test-mac': 'mac', -            'test-win': 'win', -            'test-win-xp': 'win-xp', -        } -        return name_map[self._name] - -    def test_platform_names(self): -        return ('mac', 'win', 'win-xp') - -    def test_platform_name_to_name(self, test_platform_name): -        name_map = { -            'mac': 'test-mac', -            'win': 'test-win', -            'win-xp': 'test-win-xp', -        } -        return name_map[test_platform_name] +    def all_baseline_variants(self): +        return self.ALL_BASELINE_VARIANTS      # FIXME: These next two routines are copied from base.py with      # the calls to path.abspath_to_uri() removed. We shouldn't have @@ -380,19 +414,6 @@ class TestPort(base.Port):          raise NotImplementedError('unknown url type: %s' % uri) -    def version(self): -        version_map = { -            'test-win-xp': 'xp', -            'test-win': 'win7', -            'test-mac': 'leopard', -        } -        return version_map[self._name] - -    def test_configuration(self): -        if not self._test_configuration: -            self._test_configuration = TestTestConfiguration(self) -        return self._test_configuration -  class TestDriver(base.Driver):      """Test/Dummy implementation of the DumpRenderTree interface.""" @@ -401,7 +422,7 @@ class TestDriver(base.Driver):          self._port = port      def cmd_line(self): -        return [self._port._path_to_driver()] +        return [self._port._path_to_driver()] + self._port.get_option('additional_drt_flag', [])      def poll(self):          return True @@ -416,20 +437,16 @@ class TestDriver(base.Driver):              raise ValueError('exception from ' + test_name)          if test.hang:              time.sleep((float(test_input.timeout) * 4) / 1000.0) + +        audio = None +        if test.actual_audio: +            audio = base64.b64decode(test.actual_audio)          return base.DriverOutput(test.actual_text, test.actual_image, -                                 test.actual_checksum, test.crash, -                                 time.time() - start_time, test.timeout, -                                 test.error) +            test.actual_checksum, audio, crash=test.crash, +            test_time=time.time() - start_time, timeout=test.timeout, error=test.error)      def start(self):          pass      def stop(self):          pass - - -class TestTestConfiguration(base.TestConfiguration): -    def all_systems(self): -        return (('mac', 'leopard', 'x86'), -                ('win', 'xp', 'x86'), -                ('win', 'win7', 'x86')) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py index 65a047d..4ac4a13 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py @@ -30,7 +30,7 @@  """WebKit implementations of the Port interface.""" - +import base64  import logging  import operator  import os @@ -69,10 +69,6 @@ class WebKitPort(base.Port):          return self._filesystem.join(self._webkit_baseline_path(self._name),                                       'test_expectations.txt') -    # Only needed by ports which maintain versioned test expectations (like mac-tiger vs. mac-leopard) -    def version(self): -        return '' -      def _build_driver(self):          configuration = self.get_option('configuration')          return self._config.build_dumprendertree(configuration) @@ -113,7 +109,10 @@ class WebKitPort(base.Port):          image of the two images into |diff_filename| if it is not None."""          # Handle the case where the test didn't actually generate an image. -        if not actual_contents: +        # FIXME: need unit tests for this. +        if not actual_contents and not expected_contents: +            return False +        if not actual_contents or not expected_contents:              return True          sp = self._diff_image_request(expected_contents, actual_contents) @@ -167,10 +166,10 @@ class WebKitPort(base.Port):          sp.stop()          return result -    def results_directory(self): +    def default_results_directory(self):          # Results are store relative to the built products to make it easy          # to have multiple copies of webkit checked out and built. -        return self._build_path(self.get_option('results_directory')) +        return self._build_path('layout-test-results')      def setup_test_run(self):          # This port doesn't require any specific configuration. @@ -180,19 +179,17 @@ class WebKitPort(base.Port):          return WebKitDriver(self, worker_number)      def _tests_for_other_platforms(self): -        raise NotImplementedError('WebKitPort._tests_for_other_platforms') -        # The original run-webkit-tests builds up a "whitelist" of tests to -        # run, and passes that to DumpRenderTree. new-run-webkit-tests assumes -        # we run *all* tests and test_expectations.txt functions as a -        # blacklist. -        # FIXME: This list could be dynamic based on platform name and -        # pushed into base.Port. -        return [ -            "platform/chromium", -            "platform/gtk", -            "platform/qt", -            "platform/win", -        ] +        # By default we will skip any directory under LayoutTests/platform +        # that isn't in our baseline search path (this mirrors what +        # old-run-webkit-tests does in findTestsToRun()). +        # Note this returns LayoutTests/platform/*, not platform/*/*. +        entries = self._filesystem.glob(self._webkit_baseline_path('*')) +        dirs_to_skip = [] +        for entry in entries: +            if self._filesystem.isdir(entry) and not entry in self.baseline_search_path(): +                basename = self._filesystem.basename(entry) +                dirs_to_skip.append('platform/%s' % basename) +        return dirs_to_skip      def _runtime_feature_list(self):          """Return the supported features of DRT. If a port doesn't support @@ -327,12 +324,6 @@ class WebKitPort(base.Port):          tests_to_skip.update(self._tests_for_disabled_features())          return tests_to_skip -    def test_platform_name(self): -        return self._name + self.version() - -    def test_platform_names(self): -        return ('mac', 'win', 'mac-tiger', 'mac-leopard', 'mac-snowleopard') -      def _build_path(self, *comps):          return self._filesystem.join(self._config.build_directory(              self.get_option('configuration')), *comps) @@ -381,11 +372,11 @@ class WebKitDriver(base.Driver):      def cmd_line(self):          cmd = self._command_wrapper(self._port.get_option('wrapper')) -        cmd += [self._port._path_to_driver(), '-'] - +        cmd.append(self._port._path_to_driver())          if self._port.get_option('pixel_tests'):              cmd.append('--pixel-tests') - +        cmd.extend(self._port.get_option('additional_drt_flag', [])) +        cmd.append('-')          return cmd      def start(self): @@ -418,47 +409,24 @@ class WebKitDriver(base.Driver):          start_time = time.time()          self._server_process.write(command) -        have_seen_content_type = False +        text = None +        image = None          actual_image_hash = None -        output = str()  # Use a byte array for output, even though it should be UTF-8. -        image = str() +        audio = None +        deadline = time.time() + int(driver_input.timeout) / 1000.0 -        timeout = int(driver_input.timeout) / 1000.0 -        deadline = time.time() + timeout -        line = self._server_process.read_line(timeout) -        while (not self._server_process.timed_out -               and not self._server_process.crashed -               and line.rstrip() != "#EOF"): -            if (line.startswith('Content-Type:') and not -                have_seen_content_type): -                have_seen_content_type = True -            else: -                # Note: Text output from DumpRenderTree is always UTF-8. -                # However, some tests (e.g. webarchives) spit out binary -                # data instead of text.  So to make things simple, we -                # always treat the output as binary. -                output += line -            line = self._server_process.read_line(timeout) -            timeout = deadline - time.time() +        # First block is either text or audio +        block = self._read_block(deadline) +        if block.content_type == 'audio/wav': +            audio = block.decoded_content +        else: +            text = block.decoded_content -        # Now read a second block of text for the optional image data -        remaining_length = -1 -        HASH_HEADER = 'ActualHash: ' -        LENGTH_HEADER = 'Content-Length: ' -        line = self._server_process.read_line(timeout) -        while (not self._server_process.timed_out -               and not self._server_process.crashed -               and line.rstrip() != "#EOF"): -            if line.startswith(HASH_HEADER): -                actual_image_hash = line[len(HASH_HEADER):].strip() -            elif line.startswith('Content-Type:'): -                pass -            elif line.startswith(LENGTH_HEADER): -                timeout = deadline - time.time() -                content_length = int(line[len(LENGTH_HEADER):]) -                image = self._server_process.read(timeout, content_length) -            timeout = deadline - time.time() -            line = self._server_process.read_line(timeout) +        # Now read an optional second block of image data +        block = self._read_block(deadline) +        if block.content and block.content_type == 'image/png': +            image = block.decoded_content +            actual_image_hash = block.content_hash          error_lines = self._server_process.error.splitlines()          # FIXME: This is a hack.  It is unclear why sometimes @@ -470,13 +438,59 @@ class WebKitDriver(base.Driver):          # FIXME: This seems like the wrong section of code to be doing          # this reset in.          self._server_process.error = "" -        return base.DriverOutput(output, image, actual_image_hash, -                                 self._server_process.crashed, -                                 time.time() - start_time, -                                 self._server_process.timed_out, -                                 error) +        return base.DriverOutput(text, image, actual_image_hash, audio, +            crash=self._server_process.crashed, test_time=time.time() - start_time, +            timeout=self._server_process.timed_out, error=error) + +    def _read_block(self, deadline): +        LENGTH_HEADER = 'Content-Length: ' +        HASH_HEADER = 'ActualHash: ' +        TYPE_HEADER = 'Content-Type: ' +        ENCODING_HEADER = 'Content-Transfer-Encoding: ' +        content_type = None +        encoding = None +        content_hash = None +        content_length = None + +        # Content is treated as binary data even though the text output +        # is usually UTF-8. +        content = '' +        timeout = deadline - time.time() +        line = self._server_process.read_line(timeout) +        while (not self._server_process.timed_out +               and not self._server_process.crashed +               and line.rstrip() != "#EOF"): +            if line.startswith(TYPE_HEADER) and content_type is None: +                content_type = line.split()[1] +            elif line.startswith(ENCODING_HEADER) and encoding is None: +                encoding = line.split()[1] +            elif line.startswith(LENGTH_HEADER) and content_length is None: +                timeout = deadline - time.time() +                content_length = int(line[len(LENGTH_HEADER):]) +                # FIXME: Technically there should probably be another blank +                # line here, but DRT doesn't write one. +                content = self._server_process.read(timeout, content_length) +            elif line.startswith(HASH_HEADER): +                content_hash = line.split()[1] +            else: +                content += line +            line = self._server_process.read_line(timeout) +            timeout = deadline - time.time() +        return ContentBlock(content_type, encoding, content_hash, content)      def stop(self):          if self._server_process:              self._server_process.stop()              self._server_process = None + + +class ContentBlock(object): +    def __init__(self, content_type, encoding, content_hash, content): +        self.content_type = content_type +        self.encoding = encoding +        self.content_hash = content_hash +        self.content = content +        if self.encoding == 'base64': +            self.decoded_content = base64.b64decode(content) +        else: +            self.decoded_content = content diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py index c72a411..ef1a1c2 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py @@ -30,6 +30,7 @@ import unittest  from webkitpy.common.system import filesystem_mock  from webkitpy.layout_tests.port.webkit import WebKitPort +from webkitpy.layout_tests.port import port_testcase  class TestWebKitPort(WebKitPort): @@ -63,7 +64,18 @@ class TestWebKitPort(WebKitPort):              return [self.skips_file]          return [] -class WebKitPortTest(unittest.TestCase): + +class WebKitPortTest(port_testcase.PortTestCase): +    def port_maker(self, platform): +        return WebKitPort + +    def test_driver_cmd_line(self): +        # Routine is not implemented. +        pass + +    def test_baseline_search_path(self): +        # Routine is not implemented. +        pass      def test_skipped_directories_for_symbols(self):          supported_symbols = ["GraphicsLayer", "WebCoreHas3DRendering", "isXHTMLMPDocument", "fooSymbol"] diff --git a/Tools/Scripts/webkitpy/layout_tests/port/win.py b/Tools/Scripts/webkitpy/layout_tests/port/win.py index e7d2004..03a76f4 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/win.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/win.py @@ -38,9 +38,11 @@ _log = logging.getLogger("webkitpy.layout_tests.port.win")  class WinPort(WebKitPort):      """WebKit Win implementation of the Port class.""" -    def __init__(self, **kwargs): -        kwargs.setdefault('port_name', 'win') -        WebKitPort.__init__(self, **kwargs) +    def __init__(self, port_name=None, **kwargs): +        port_name = port_name or 'win' +        WebKitPort.__init__(self, port_name=port_name, **kwargs) +        self._version = 'win7' +        self._operating_system = 'win'      def baseline_search_path(self):          # Based on code from old-run-webkit-tests expectedDirectoryForTest() diff --git a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py index 9f1d347..50a7374 100644 --- a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py +++ b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py @@ -41,6 +41,8 @@ The script does the following for each platform specified:  At the end, the script generates a html that compares old and new baselines.  """ +from __future__ import with_statement +  import copy  import logging  import optparse @@ -55,27 +57,26 @@ from webkitpy.common.system import urlfetcher  from webkitpy.common.system.executive import ScriptError  from webkitpy.layout_tests import port +from webkitpy.layout_tests import read_checksum_from_png  from webkitpy.layout_tests.layout_package import test_expectations  _log = logging.getLogger(__name__)  BASELINE_SUFFIXES = ('.txt', '.png', '.checksum') -REBASELINE_PLATFORM_ORDER = ('mac', 'win', 'win-xp', 'win-vista', 'linux') -ARCHIVE_DIR_NAME_DICT = {'win': 'Webkit_Win__deps_', -                         'win-vista': 'webkit-dbg-vista', -                         'win-xp': 'Webkit_Win__deps_', -                         'mac': 'Webkit_Mac10_5__deps_', -                         'linux': 'Webkit_Linux__deps_', - -                         'win-canary': 'Webkit_Win', -                         'win-vista-canary': 'webkit-dbg-vista', -                         'win-xp-canary': 'Webkit_Win', -                         'mac-canary': 'Webkit_Mac10_5', -                         'linux-canary': 'Webkit_Linux', - -                         'gpu-mac-canary': 'Webkit_Mac10_5_-_GPU', -                         'gpu-win-canary': 'Webkit_Win_-_GPU', -                         'gpu-linux-canary': 'Webkit_Linux_-_GPU', + +ARCHIVE_DIR_NAME_DICT = { +    'chromium-win-win7': 'Webkit_Win7', +    'chromium-win-vista': 'Webkit_Vista', +    'chromium-win-xp': 'Webkit_Win', +    'chromium-mac-leopard': 'Webkit_Mac10_5', +    'chromium-mac-snowleopard': 'Webkit_Mac10_6', +    'chromium-linux-x86': 'Webkit_Linux', +    'chromium-linux-x86_64': 'Webkit_Linux_64', +    'chromium-gpu-mac-snowleopard': 'Webkit_Mac10_6_-_GPU', +    'chromium-gpu-win-xp': 'Webkit_Win_-_GPU', +    'chromium-gpu-win-win7': 'Webkit_Win7_-_GPU', +    'chromium-gpu-linux': 'Webkit_Linux_-_GPU', +    'chromium-gpu-linux-x86_64': 'Webkit_Linux_64_-_GPU',  } @@ -166,11 +167,7 @@ class Rebaseliner(object):          self._filesystem = running_port._filesystem          self._target_port = target_port -        # FIXME: See the comments in chromium_{win,mac}.py about why we need -        # the 'rebaselining' keyword. -        self._rebaseline_port = port.get( -            self._target_port.test_platform_name_to_name(platform), options, -            filesystem=self._filesystem, rebaselining=True) +        self._rebaseline_port = port.get(platform, options, filesystem=self._filesystem)          self._rebaselining_tests = set()          self._rebaselined_tests = [] @@ -185,13 +182,13 @@ class Rebaseliner(object):          self._zip_factory = zip_factory          self._scm = scm -    def run(self, backup): +    def run(self):          """Run rebaseline process."""          log_dashed_string('Compiling rebaselining tests', self._platform)          if not self._compile_rebaselining_tests():              return False -        if not self.get_rebaselining_tests(): +        if not self._rebaselining_tests:              return True          log_dashed_string('Downloading archive', self._platform) @@ -209,8 +206,6 @@ class Rebaseliner(object):          archive_file.close()          log_dashed_string('Updating rebaselined tests in file', self._platform) -        self._update_rebaselined_tests_in_file(backup) -        _log.info('')          if len(self._rebaselining_tests) != len(self._rebaselined_tests):              _log.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN REBASELINED.') @@ -222,8 +217,23 @@ class Rebaseliner(object):          return True -    def get_rebaselining_tests(self): -        return self._rebaselining_tests +    def remove_rebaselining_expectations(self, tests, backup): +        """if backup is True, we backup the original test expectations file.""" +        new_expectations = self._test_expectations.remove_rebaselined_tests(tests) +        path = self._target_port.path_to_test_expectations_file() +        if backup: +            date_suffix = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) +            backup_file = '%s.orig.%s' % (path, date_suffix) +            if self._filesystem.exists(backup_file): +                self._filesystem.remove(backup_file) +            _log.info('Saving original file to "%s"', backup_file) +            self._filesystem.move(path, backup_file) + +        self._filesystem.write_text_file(path, new_expectations) +        # self._scm.add(path) + +    def get_rebaselined_tests(self): +        return self._rebaselined_tests      def _compile_rebaselining_tests(self):          """Compile list of tests that need rebaselining for the platform. @@ -232,8 +242,7 @@ class Rebaseliner(object):            False if reftests are wrongly marked as 'needs rebaselining' or True          """ -        self._rebaselining_tests = \ -            self._test_expectations.get_rebaselining_failures() +        self._rebaselining_tests = self._test_expectations.get_rebaselining_failures()          if not self._rebaselining_tests:              _log.warn('No tests found that need rebaselining.')              return True @@ -307,13 +316,7 @@ class Rebaseliner(object):          if self._options.force_archive_url:              return self._options.force_archive_url -        platform = self._platform -        if self._options.webkit_canary: -            platform += '-canary' -        if self._options.gpu: -            platform = 'gpu-' + platform - -        dir_name = self._get_archive_dir_name(platform) +        dir_name = self._get_archive_dir_name(self._platform)          if not dir_name:              return None @@ -349,8 +352,7 @@ class Rebaseliner(object):          for name in zip_namelist:              _log.debug('  ' + name) -        platform = self._rebaseline_port.test_platform_name_to_name(self._platform) -        _log.debug('Platform dir: "%s"', platform) +        _log.debug('Platform dir: "%s"', self._platform)          self._rebaselined_tests = []          for test_no, test in enumerate(self._rebaselining_tests): @@ -392,6 +394,12 @@ class Rebaseliner(object):                  self._delete_baseline(expected_fullpath)                  continue +            if suffix == '.checksum' and self._png_has_same_checksum(temp_name, test, expected_fullpath): +                self._filesystem.remove(temp_name) +                # If an old checksum exists, delete it. +                self._delete_baseline(expected_fullpath) +                continue +              self._filesystem.maybe_make_directory(self._filesystem.dirname(expected_fullpath))              self._filesystem.move(temp_name, expected_fullpath) @@ -419,15 +427,40 @@ class Rebaseliner(object):          tempfile.close()          return temp_name -    def _is_dup_baseline(self, new_baseline, baseline_path, test, suffix, -                         platform): +    def _png_has_same_checksum(self, checksum_path, test, checksum_expected_fullpath): +        """Returns True if the fallback png for |checksum_expected_fullpath| +        contains the same checksum.""" +        fs = self._filesystem +        png_fullpath = self._first_fallback_png_for_test(test) + +        if not fs.exists(png_fullpath): +            _log.error('  Checksum without png file found! Expected %s to exist.' % png_fullpath) +            return False + +        with fs.open_binary_file_for_reading(png_fullpath) as filehandle: +            checksum_in_png = read_checksum_from_png.read_checksum(filehandle) +            checksum_in_text_file = fs.read_text_file(checksum_path) +            if checksum_in_png and checksum_in_png != checksum_in_text_file: +                _log.error("  checksum in %s and %s don't match!  Continuing" +                           " to copy but please investigate." % ( +                           checksum_expected_fullpath, png_fullpath)) +            return checksum_in_text_file == checksum_in_png + +    def _first_fallback_png_for_test(self, test): +        test_filepath = self._filesystem.join(self._target_port.layout_tests_dir(), test) +        all_baselines = self._rebaseline_port.expected_baselines( +            test_filepath, '.png', True) +        return self._filesystem.join(all_baselines[0][0], all_baselines[0][1]) + +    def _is_dup_baseline(self, new_baseline, baseline_path, test, suffix, platform):          """Check whether a baseline is duplicate and can fallback to same             baseline for another platform. For example, if a test has same             baseline on linux and windows, then we only store windows             baseline and linux baseline will fallback to the windows version.          Args: -          expected_filename: baseline expectation file name. +          new_baseline: temp filename containing the new baseline results +          baseline_path: baseline expectation file name.            test: test name.            suffix: file suffix of the expected results, including dot;                    e.g. '.txt' or '.png'. @@ -487,33 +520,6 @@ class Rebaseliner(object):              return          self._scm.delete(filename) -    def _update_rebaselined_tests_in_file(self, backup): -        """Update the rebaselined tests in test expectations file. - -        Args: -          backup: if True, backup the original test expectations file. - -        Returns: -          no -        """ - -        if self._rebaselined_tests: -            new_expectations = self._test_expectations.remove_platform_from_expectations( -                self._rebaselined_tests, self._platform) -            path = self._target_port.path_to_test_expectations_file() -            if backup: -                date_suffix = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) -                backup_file = '%s.orig.%s' % (path, date_suffix) -                if self._filesystem.exists(backup_file): -                    self._filesystem.remove(backup_file) -                _log.info('Saving original file to "%s"', backup_file) -                self._filesystem.move(path, backup_file) - -            self._filesystem.write_text_file(path, new_expectations) -            # self._scm.add(path) -        else: -            _log.info('No test was rebaselined so nothing to remove.') -      def _create_html_baseline_files(self, baseline_fullpath):          """Create baseline files (old, new and diff) in html directory. @@ -792,10 +798,6 @@ def parse_options(args):                               action='store_true',                               help='Suppress result HTML viewing') -    option_parser.add_option('-g', '--gpu', -                            action='store_true', default=False, -                            help='Rebaseline the GPU versions') -      option_parser.add_option('-p', '--platforms',                               default=None,                               help=('Comma delimited list of platforms ' @@ -810,12 +812,6 @@ def parse_options(args):                               help=('Url of result zip file. This option is for debugging '                                     'purposes')) -    option_parser.add_option('-w', '--webkit_canary', -                             action='store_true', -                             default=False, -                             help=('If True, pull baselines from webkit.org ' -                                   'canary bot.')) -      option_parser.add_option('-b', '--backup',                               action='store_true',                               default=False, @@ -833,17 +829,21 @@ def parse_options(args):                               help=('Use ImageDiff from DumpRenderTree instead '                                     'of image_diff for pixel tests.')) +    option_parser.add_option('-w', '--webkit_canary', +                             action='store_true', +                             default=False, +                             help=('DEPRECATED. This flag no longer has any effect.' +                                   '  The canaries are always used.')) +      option_parser.add_option('', '--target-platform',                               default='chromium',                               help=('The target platform to rebaseline '                                     '("mac", "chromium", "qt", etc.). Defaults '                                     'to "chromium".')) +      options = option_parser.parse_args(args)[0] -    if options.platforms == None: -        if options.gpu: -            options.platforms = 'mac,win,linux' -        else: -            options.platforms = 'mac,win,win-xp,win-vista,linux' +    if options.webkit_canary: +        print "-w/--webkit-canary is no longer necessary, ignoring."      target_options = copy.copy(options)      if options.target_platform == 'chromium': @@ -866,10 +866,7 @@ def main(args):                                  '%(levelname)s %(message)s'),                          datefmt='%y%m%d %H:%M:%S') -    target_port_name = None -    if options.gpu and options.target_platform == 'chromium': -        target_port_name = 'chromium-gpu' -    target_port_obj = port.get(target_port_name, target_options) +    target_port_obj = port.get(None, target_options)      host_port_obj = get_host_port_object(options)      if not host_port_obj or not target_port_obj:          return 1 @@ -906,30 +903,21 @@ def real_main(options, target_options, host_port_obj, target_port_obj, url_fetch              the archives.          scm_obj: object used to add new baselines to the source control system.      """ -    # Verify 'platforms' option is valid. -    if not options.platforms: -        _log.error('Invalid "platforms" option. --platforms must be ' -                   'specified in order to rebaseline.') -        return 1 -    platforms = [p.strip().lower() for p in options.platforms.split(',')] -    for platform in platforms: -        if not platform in REBASELINE_PLATFORM_ORDER: -            _log.error('Invalid platform: "%s"' % (platform)) -            return 1 - -    # Adjust the platform order so rebaseline tool is running at the order of -    # 'mac', 'win' and 'linux'. This is in same order with layout test baseline -    # search paths. It simplifies how the rebaseline tool detects duplicate -    # baselines. Check _IsDupBaseline method for details. -    rebaseline_platforms = [] -    for platform in REBASELINE_PLATFORM_ORDER: -        if platform in platforms: -            rebaseline_platforms.append(platform) -      options.html_directory = setup_html_directory(host_port_obj._filesystem, options.html_directory) +    all_platforms = target_port_obj.all_baseline_variants() +    if options.platforms: +        bail = False +        for platform in options.platforms: +            if not platform in all_platforms: +                _log.error('Invalid platform: "%s"' % (platform)) +                bail = True +        if bail: +            return 1 +        rebaseline_platforms = options.platforms +    else: +        rebaseline_platforms = all_platforms -    rebaselining_tests = set() -    backup = options.backup +    rebaselined_tests = set()      for platform in rebaseline_platforms:          rebaseliner = Rebaseliner(host_port_obj, target_port_obj,                                    platform, options, url_fetcher, zip_factory, @@ -937,14 +925,16 @@ def real_main(options, target_options, host_port_obj, target_port_obj, url_fetch          _log.info('')          log_dashed_string('Rebaseline started', platform) -        if rebaseliner.run(backup): -            # Only need to backup one original copy of test expectation file. -            backup = False +        if rebaseliner.run():              log_dashed_string('Rebaseline done', platform)          else:              log_dashed_string('Rebaseline failed', platform, logging.ERROR) -        rebaselining_tests |= set(rebaseliner.get_rebaselining_tests()) +        rebaselined_tests |= set(rebaseliner.get_rebaselined_tests()) + +    if rebaselined_tests: +        rebaseliner.remove_rebaselining_expectations(rebaselined_tests, +                                                     options.backup)      _log.info('')      log_dashed_string('Rebaselining result comparison started', None) @@ -952,7 +942,7 @@ def real_main(options, target_options, host_port_obj, target_port_obj, url_fetch                                     target_port_obj,                                     options,                                     rebaseline_platforms, -                                   rebaselining_tests) +                                   rebaselined_tests)      html_generator.generate_html()      if not options.quiet:          html_generator.show_html() diff --git a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py index 7179bb7..73bc1a7 100644 --- a/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests_unittest.py @@ -65,13 +65,9 @@ def test_options():                                  html_directory='/tmp',                                  archive_url=ARCHIVE_URL,                                  force_archive_url=None, -                                webkit_canary=True, -                                use_drt=False, -                                target_platform='chromium',                                  verbose=False,                                  quiet=False, -                                platforms='mac,win,win-xp', -                                gpu=False) +                                platforms=None)  def test_host_port_and_filesystem(options, expectations): @@ -86,8 +82,12 @@ def test_host_port_and_filesystem(options, expectations):  def test_url_fetcher(filesystem):      urls = { +        ARCHIVE_URL + '/Webkit_Mac10_6/': '<a href="4/">',          ARCHIVE_URL + '/Webkit_Mac10_5/': '<a href="1/"><a href="2/">', +        ARCHIVE_URL + '/Webkit_Win7/': '<a href="1/">', +        ARCHIVE_URL + '/Webkit_Vista/': '<a href="1/">',          ARCHIVE_URL + '/Webkit_Win/': '<a href="1/">', +        ARCHIVE_URL + '/Webkit_Linux/': '<a href="1/">',      }      return urlfetcher_mock.make_fetcher_cls(urls)(filesystem) @@ -98,8 +98,31 @@ def test_zip_factory():              'layout-test-results/failures/expected/image-actual.txt': 'new-image-txt',              'layout-test-results/failures/expected/image-actual.checksum': 'new-image-checksum',              'layout-test-results/failures/expected/image-actual.png': 'new-image-png', +            'layout-test-results/failures/expected/image_checksum-actual.txt': 'png-comment-txt', +            'layout-test-results/failures/expected/image_checksum-actual.checksum': '0123456789', +            'layout-test-results/failures/expected/image_checksum-actual.png': 'tEXtchecksum\x000123456789',          }, -        ARCHIVE_URL + '/Webkit_Win/1/layout-test-results.zip': { +        ARCHIVE_URL + '/Webkit_Mac10_6/4/layout-test-results.zip': { +            'layout-test-results/failures/expected/image-actual.txt': 'new-image-txt', +            'layout-test-results/failures/expected/image-actual.checksum': 'new-image-checksum', +            'layout-test-results/failures/expected/image-actual.png': 'new-image-png', +        }, +         ARCHIVE_URL + '/Webkit_Vista/1/layout-test-results.zip': { +            'layout-test-results/failures/expected/image-actual.txt': 'win-image-txt', +            'layout-test-results/failures/expected/image-actual.checksum': 'win-image-checksum', +            'layout-test-results/failures/expected/image-actual.png': 'win-image-png', +        }, +          ARCHIVE_URL + '/Webkit_Win7/1/layout-test-results.zip': { +            'layout-test-results/failures/expected/image-actual.txt': 'win-image-txt', +            'layout-test-results/failures/expected/image-actual.checksum': 'win-image-checksum', +            'layout-test-results/failures/expected/image-actual.png': 'win-image-png', +        }, +          ARCHIVE_URL + '/Webkit_Win/1/layout-test-results.zip': { +            'layout-test-results/failures/expected/image-actual.txt': 'win-image-txt', +            'layout-test-results/failures/expected/image-actual.checksum': 'win-image-checksum', +            'layout-test-results/failures/expected/image-actual.png': 'win-image-png', +        }, +          ARCHIVE_URL + '/Webkit_Linux/1/layout-test-results.zip': {              'layout-test-results/failures/expected/image-actual.txt': 'win-image-txt',              'layout-test-results/failures/expected/image-actual.checksum': 'win-image-checksum',              'layout-test-results/failures/expected/image-actual.png': 'win-image-png', @@ -108,6 +131,14 @@ def test_zip_factory():      return zipfileset_mock.make_factory(ziphashes) +def test_archive(orig_archive_dict): +    new_archive_dict = {} +    for platform, dirname in orig_archive_dict.iteritems(): +        platform = platform.replace('chromium', 'test') +        new_archive_dict[platform] = dirname +    return new_archive_dict + +  class TestGetHostPortObject(unittest.TestCase):      def assert_result(self, release_present, debug_present, valid_port_obj):          # Tests whether we get a valid port object returned when we claim @@ -150,6 +181,14 @@ class TestOptions(unittest.TestCase):  class TestRebaseliner(unittest.TestCase): +    def setUp(self): +        if not hasattr(self, '_orig_archive'): +            self._orig_archive = rebaseline_chromium_webkit_tests.ARCHIVE_DIR_NAME_DICT +            rebaseline_chromium_webkit_tests.ARCHIVE_DIR_NAME_DICT = test_archive(self._orig_archive) + +    def tearDown(self): +        rebaseline_chromium_webkit_tests.ARCHIVE_DIR_NAME_DICT = self._orig_archive +      def make_rebaseliner(self, expectations):          options = test_options()          host_port_obj, filesystem = test_host_port_and_filesystem(options, expectations) @@ -158,11 +197,11 @@ class TestRebaseliner(unittest.TestCase):          target_port_obj = port.get('test', target_options,                                     filesystem=filesystem)          target_port_obj._expectations = expectations -        platform = target_port_obj.test_platform_name() +        platform = target_port_obj.name()          url_fetcher = test_url_fetcher(filesystem)          zip_factory = test_zip_factory() -        mock_scm = mocktool.MockSCM() +        mock_scm = mocktool.MockSCM(filesystem)          rebaseliner = rebaseline_chromium_webkit_tests.Rebaseliner(host_port_obj,              target_port_obj, platform, options, url_fetcher, zip_factory, mock_scm)          return rebaseliner, filesystem @@ -171,7 +210,7 @@ class TestRebaseliner(unittest.TestCase):          # this method tests that was can at least instantiate an object, even          # if there is nothing to do.          rebaseliner, filesystem = self.make_rebaseliner("") -        rebaseliner.run(False) +        rebaseliner.run()          self.assertEqual(len(filesystem.written_files), 1)      def test_rebaselining_tests(self): @@ -179,19 +218,19 @@ class TestRebaseliner(unittest.TestCase):              "BUGX REBASELINE MAC : failures/expected/image.html = IMAGE")          compile_success = rebaseliner._compile_rebaselining_tests()          self.assertTrue(compile_success) -        self.assertEqual(set(['failures/expected/image.html']), rebaseliner.get_rebaselining_tests()) +        self.assertEqual(set(['failures/expected/image.html']), rebaseliner._rebaselining_tests)      def test_rebaselining_tests_should_ignore_reftests(self):          rebaseliner, filesystem = self.make_rebaseliner(              "BUGX REBASELINE : failures/expected/reftest.html = IMAGE")          compile_success = rebaseliner._compile_rebaselining_tests()          self.assertFalse(compile_success) -        self.assertFalse(rebaseliner.get_rebaselining_tests()) +        self.assertFalse(rebaseliner._rebaselining_tests)      def test_one_platform(self):          rebaseliner, filesystem = self.make_rebaseliner(              "BUGX REBASELINE MAC : failures/expected/image.html = IMAGE") -        rebaseliner.run(False) +        rebaseliner.run()          # We expect to have written 12 files over the course of this rebaseline:          # *) 3 files in /__im_tmp for the extracted archive members          # *) 3 new baselines under '/test.checkout/LayoutTests' @@ -201,25 +240,67 @@ class TestRebaseliner(unittest.TestCase):          #    create image diffs (FIXME?) and don't display the checksums.          # *) 1 updated test_expectations file          self.assertEqual(len(filesystem.written_files), 12) -        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test/test_expectations.txt'], '') -        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.checksum'], 'new-image-checksum') -        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.png'], 'new-image-png') -        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.txt'], 'new-image-txt') +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image-expected.checksum'], 'new-image-checksum') +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image-expected.png'], 'new-image-png') +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image-expected.txt'], 'new-image-txt')      def test_all_platforms(self):          rebaseliner, filesystem = self.make_rebaseliner(              "BUGX REBASELINE : failures/expected/image.html = IMAGE") -        rebaseliner.run(False) +        rebaseliner.run()          # See comment in test_one_platform for an explanation of the 12 written tests.          # Note that even though the rebaseline is marked for all platforms, each          # rebaseliner only ever does one.          self.assertEqual(len(filesystem.written_files), 12) -        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test/test_expectations.txt'], -            'BUGX REBASELINE WIN : failures/expected/image.html = IMAGE\n' -            'BUGX REBASELINE WIN-XP : failures/expected/image.html = IMAGE\n') -        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.checksum'], 'new-image-checksum') -        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.png'], 'new-image-png') -        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac/failures/expected/image-expected.txt'], 'new-image-txt') +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image-expected.checksum'], 'new-image-checksum') +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image-expected.png'], 'new-image-png') +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image-expected.txt'], 'new-image-txt') + +    def test_png_file_with_comment(self): +        rebaseliner, filesystem = self.make_rebaseliner( +            "BUGX REBASELINE MAC : failures/expected/image_checksum.html = IMAGE") +        compile_success = rebaseliner._compile_rebaselining_tests() +        self.assertTrue(compile_success) +        self.assertEqual(set(['failures/expected/image_checksum.html']), rebaseliner._rebaselining_tests) +        rebaseliner.run() +        # There is one less file written than |test_one_platform| because we only +        # write 2 expectations (the png and the txt file). +        self.assertEqual(len(filesystem.written_files), 11) +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.png'], 'tEXtchecksum\x000123456789') +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.txt'], 'png-comment-txt') +        self.assertFalse(filesystem.files.get('/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.checksum', None)) + +    def test_png_file_with_comment_remove_old_checksum(self): +        rebaseliner, filesystem = self.make_rebaseliner( +            "BUGX REBASELINE MAC : failures/expected/image_checksum.html = IMAGE") +        filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.png'] = 'old' +        filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.checksum'] = 'old' +        filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.txt'] = 'old' + +        compile_success = rebaseliner._compile_rebaselining_tests() +        self.assertTrue(compile_success) +        self.assertEqual(set(['failures/expected/image_checksum.html']), rebaseliner._rebaselining_tests) +        rebaseliner.run() +        # There is one more file written than |test_png_file_with_comment_remove_old_checksum| +        # because we also delete the old checksum. +        self.assertEqual(len(filesystem.written_files), 12) +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.png'], 'tEXtchecksum\x000123456789') +        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.txt'], 'png-comment-txt') +        self.assertEqual(filesystem.files.get('/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.checksum', None), None) + +    def test_png_file_with_comment_as_duplicate(self): +        rebaseliner, filesystem = self.make_rebaseliner( +            "BUGX REBASELINE MAC : failures/expected/image_checksum.html = IMAGE") +        filesystem.files['/test.checkout/LayoutTests/platform/test-mac-snowleopard/failures/expected/image_checksum-expected.png'] = 'tEXtchecksum\x000123456789' +        filesystem.files['/test.checkout/LayoutTests/platform/test-mac-snowleopard/failures/expected/image_checksum-expected.txt'] = 'png-comment-txt' + +        compile_success = rebaseliner._compile_rebaselining_tests() +        self.assertTrue(compile_success) +        self.assertEqual(set(['failures/expected/image_checksum.html']), rebaseliner._rebaselining_tests) +        rebaseliner.run() +        self.assertEqual(filesystem.files.get('/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.png', None), None) +        self.assertEqual(filesystem.files.get('/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.txt', None), None) +        self.assertEqual(filesystem.files.get('/test.checkout/LayoutTests/platform/test-mac-leopard/failures/expected/image_checksum-expected.checksum', None), None)      def test_diff_baselines_txt(self):          rebaseliner, filesystem = self.make_rebaseliner("") @@ -239,28 +320,37 @@ class TestRebaseliner(unittest.TestCase):  class TestRealMain(unittest.TestCase): +    def setUp(self): +        if not hasattr(self, '_orig_archive'): +            self._orig_archive = rebaseline_chromium_webkit_tests.ARCHIVE_DIR_NAME_DICT +            rebaseline_chromium_webkit_tests.ARCHIVE_DIR_NAME_DICT = test_archive(self._orig_archive) + +    def tearDown(self): +        rebaseline_chromium_webkit_tests.ARCHIVE_DIR_NAME_DICT = self._orig_archive +      def test_all_platforms(self):          expectations = "BUGX REBASELINE : failures/expected/image.html = IMAGE"          options = test_options() -          host_port_obj, filesystem = test_host_port_and_filesystem(options, expectations)          url_fetcher = test_url_fetcher(filesystem)          zip_factory = test_zip_factory()          mock_scm = mocktool.MockSCM()          oc = outputcapture.OutputCapture()          oc.capture_output() -        rebaseline_chromium_webkit_tests.real_main(options, options, host_port_obj, -            host_port_obj, url_fetcher, zip_factory, mock_scm) +        res = rebaseline_chromium_webkit_tests.real_main(options, options, +            host_port_obj, host_port_obj, url_fetcher, zip_factory, mock_scm)          oc.restore_output() -        # We expect to have written 35 files over the course of this rebaseline: -        # *) 11 files * 3 ports for the new baselines and the diffs (see breakdown -        #    under test_one_platform, above) -        # *) the updated test_expectations file -        # *) the rebaseline results html file -        self.assertEqual(len(filesystem.written_files), 35) -        self.assertEqual(filesystem.files['/test.checkout/LayoutTests/platform/test/test_expectations.txt'], '') +        # We expect to have written 36 files over the course of this rebaseline: +        # *) 6*3 files in /__im_tmp/ for the archived members of the 6 ports +        # *) 2*3 files in /test.checkout for actually differing baselines +        # *) 1 file in /test.checkout for the updated test_expectations file +        # *) 2*4 files in /tmp for the old/new baselines for the two actual ports +        # *) 2 files in /tmp for the text diffs for the two ports +        # *) 1 file in /tmp for the rebaseline results html file +        self.assertEqual(res, 0) +        self.assertEqual(len(filesystem.written_files), 36)  class TestHtmlGenerator(unittest.TestCase): @@ -268,7 +358,7 @@ class TestHtmlGenerator(unittest.TestCase):          options = mocktool.MockOptions(configuration=None, html_directory='/tmp')          host_port = port.get('test', options, filesystem=port.unit_test_filesystem(files))          generator = rebaseline_chromium_webkit_tests.HtmlGenerator(host_port, -            target_port=None, options=options, platforms=['mac'], rebaselining_tests=tests) +            target_port=None, options=options, platforms=['test-mac-leopard'], rebaselining_tests=tests)          return generator, host_port      def test_generate_baseline_links(self): diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py index d27ea1e..e814008 100755 --- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py +++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py @@ -79,7 +79,7 @@ def run(port, options, args, regular_output=sys.stderr,          printer.cleanup()          return 0 -    last_unexpected_results = _gather_unexpected_results(port._filesystem, options) +    last_unexpected_results = _gather_unexpected_results(port)      if options.print_last_failures:          printer.write("\n".join(last_unexpected_results) + "\n")          printer.cleanup() @@ -89,11 +89,7 @@ def run(port, options, args, regular_output=sys.stderr,      # in a try/finally to ensure that we clean up the logging configuration.      num_unexpected_results = -1      try: -        if options.worker_model in ('inline', 'threads', 'processes'): -            runner = test_runner2.TestRunner2(port, options, printer) -        else: -            runner = test_runner.TestRunner(port, options, printer) - +        runner = test_runner2.TestRunner2(port, options, printer)          runner._print_config()          printer.print_update("Collecting tests ...") @@ -135,9 +131,9 @@ def _set_up_derived_options(port_obj, options):      if options.worker_model is None:          options.worker_model = port_obj.default_worker_model() -    if options.worker_model in ('inline', 'old-inline'): +    if options.worker_model == 'inline':          if options.child_processes and int(options.child_processes) > 1: -            warnings.append("--worker-model=%s overrides --child-processes" % options.worker_model) +            warnings.append("--worker-model=inline overrides --child-processes")          options.child_processes = "1"      if not options.child_processes:          options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES", @@ -152,12 +148,6 @@ def _set_up_derived_options(port_obj, options):      if not options.use_apache:          options.use_apache = sys.platform in ('darwin', 'linux2') -    if not port_obj._filesystem.isabs(options.results_directory): -        # This normalizes the path to the build dir. -        # FIXME: how this happens is not at all obvious; this is a dumb -        # interface and should be cleaned up. -        options.results_directory = port_obj.results_directory() -      if not options.time_out_ms:          if options.configuration == "Debug":              options.time_out_ms = str(2 * test_runner.TestRunner.DEFAULT_TEST_TIMEOUT_MS) @@ -165,14 +155,27 @@ def _set_up_derived_options(port_obj, options):              options.time_out_ms = str(test_runner.TestRunner.DEFAULT_TEST_TIMEOUT_MS)      options.slow_time_out_ms = str(5 * int(options.time_out_ms)) + +    if options.additional_platform_directory: +        normalized_platform_directories = [] +        for path in options.additional_platform_directory: +            if not port_obj._filesystem.isabs(path): +                warnings.append("--additional-platform-directory=%s is ignored since it is not absolute" % path) +                continue +            normalized_platform_directories.append(port_obj._filesystem.normpath(path)) +        options.additional_platform_directory = normalized_platform_directories +      return warnings -def _gather_unexpected_results(filesystem, options): +def _gather_unexpected_results(port):      """Returns the unexpected results from the previous run, if any.""" +    filesystem = port._filesystem +    results_directory = port.results_directory() +    options = port._options      last_unexpected_results = []      if options.print_last_failures or options.retest_last_failures: -        unexpected_results_filename = filesystem.join(options.results_directory, "unexpected_results.json") +        unexpected_results_filename = filesystem.join(results_directory, "unexpected_results.json")          if filesystem.exists(unexpected_results_filename):              results = json_results_generator.load_json(filesystem, unexpected_results_filename)              last_unexpected_results = results['tests'].keys() @@ -275,10 +278,7 @@ def parse_args(args=None):          optparse.make_option("--tolerance",              help="Ignore image differences less than this percentage (some "                  "ports may ignore this option)", type="float"), -        optparse.make_option("--results-directory", -            default="layout-test-results", -            help="Output results directory source dir, relative to Debug or " -                 "Release"), +        optparse.make_option("--results-directory", help="Location of test results"),          optparse.make_option("--build-directory",              help="Path to the directory under which build files are kept (should not include configuration)"),          optparse.make_option("--new-baseline", action="store_true", @@ -288,6 +288,13 @@ def parse_args(args=None):          optparse.make_option("--reset-results", action="store_true",              default=False, help="Reset any existing baselines to the "                   "generated results"), +        optparse.make_option("--additional-drt-flag", action="append", +            default=[], help="Additional command line flag to pass to DumpRenderTree " +                 "Specify multiple times to add multiple flags."), +        optparse.make_option("--additional-platform-directory", action="append", +            default=[], help="Additional directory where to look for test " +                 "baselines (will take precendence over platform baselines). " +                 "Specify multiple times to add multiple search path entries."),          optparse.make_option("--no-show-results", action="store_false",              default=True, dest="show_results",              help="Don't launch a browser with results after the tests " @@ -370,8 +377,8 @@ def parse_args(args=None):              help="Number of DumpRenderTrees to run in parallel."),          # FIXME: Display default number of child processes that will run.          optparse.make_option("--worker-model", action="store", -            default=None, help=("controls worker model. Valid values are 'old-inline', " -                                "'old-threads', 'inline', 'threads', and 'processes'.")), +            default=None, help=("controls worker model. Valid values are " +                                "'inline', 'threads', and 'processes'.")),          optparse.make_option("--experimental-fully-parallel",              action="store_true", default=False,              help="run all tests in parallel"), diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py index 7076ef2..940b4b8 100644 --- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_unittest.py @@ -35,6 +35,7 @@ from __future__ import with_statement  import codecs  import itertools  import logging +import os  import Queue  import sys  import thread @@ -53,7 +54,6 @@ from webkitpy.common.system import filesystem_mock  from webkitpy.tool import mocktool  from webkitpy.layout_tests import port  from webkitpy.layout_tests import run_webkit_tests -from webkitpy.layout_tests.layout_package import dump_render_tree_thread  from webkitpy.layout_tests.port.test import TestPort, TestDriver  from webkitpy.layout_tests.port.test_files import is_reference_html_file  from webkitpy.python24.versioning import compare_version @@ -196,16 +196,20 @@ class MainTest(unittest.TestCase):              self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))      def test_child_process_1(self): -        (res, buildbot_output, regular_output, user) = logging_run( +        _, _, regular_output, _ = logging_run(               ['--print', 'config', '--worker-model', 'threads', '--child-processes', '1']) -        self.assertTrue('Running one DumpRenderTree\n' -                        in regular_output.get()) +        self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))      def test_child_processes_2(self): -        (res, buildbot_output, regular_output, user) = logging_run( +        _, _, regular_output, _ = logging_run(               ['--print', 'config', '--worker-model', 'threads', '--child-processes', '2']) -        self.assertTrue('Running 2 DumpRenderTrees in parallel\n' -                        in regular_output.get()) +        self.assertTrue(any(['Running 2 ' in line for line in regular_output.get()])) + +    def test_child_processes_min(self): +        _, _, regular_output, _ = logging_run( +             ['--print', 'config', '--worker-model', 'threads', '--child-processes', '2', 'passes'], +             tests_included=True) +        self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))      def test_dryrun(self):          batch_tests_run = get_tests_run(['--dry-run']) @@ -252,8 +256,8 @@ class MainTest(unittest.TestCase):          fs = port.unit_test_filesystem()          # We do a logging run here instead of a passing run in order to          # suppress the output from the json generator. -        (res, buildbot_output, regular_output, user) = logging_run(['--clobber-old-results'], record_results=True, filesystem=fs) -        (res, buildbot_output, regular_output, user) = logging_run( +        res, buildbot_output, regular_output, user = logging_run(['--clobber-old-results'], record_results=True, filesystem=fs) +        res, buildbot_output, regular_output, user = logging_run(              ['--print-last-failures'], filesystem=fs)          self.assertEqual(regular_output.get(), ['\n\n'])          self.assertEqual(buildbot_output.get(), []) @@ -324,6 +328,10 @@ class MainTest(unittest.TestCase):          for batch in batch_tests_run:              self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch)) +    def test_run_singly_actually_runs_tests(self): +        res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected']) +        self.assertEquals(res, 5) +      def test_single_file(self):          tests_run = get_tests_run(['passes/text.html'], tests_included=True, flatten_batches=True)          self.assertEquals(['passes/text.html'], tests_run) @@ -336,6 +344,12 @@ class MainTest(unittest.TestCase):          tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)          self.assertEquals([], tests_run) +    def test_stderr_is_saved(self): +        fs = port.unit_test_filesystem() +        self.assertTrue(passing_run(filesystem=fs)) +        self.assertEquals(fs.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'), +                          'stuff going to stderr') +      def test_test_list(self):          fs = port.unit_test_filesystem()          filename = '/tmp/foo.txt' @@ -371,7 +385,7 @@ class MainTest(unittest.TestCase):      def test_exit_after_n_failures_upload(self):          fs = port.unit_test_filesystem() -        (res, buildbot_output, regular_output, user) = logging_run([ +        res, buildbot_output, regular_output, user = logging_run([                  'failures/unexpected/text-image-checksum.html',                  'passes/text.html',                  '--exit-after-n-failures', '1', @@ -465,10 +479,12 @@ class MainTest(unittest.TestCase):      def test_results_directory_relative(self):          # We run a configuration that should fail, to generate output, then          # look for what the output results url was. - +        fs = port.unit_test_filesystem() +        fs.maybe_make_directory('/tmp/cwd') +        fs.chdir('/tmp/cwd')          res, out, err, user = logging_run(['--results-directory=foo'], -                                          tests_included=True) -        self.assertEqual(user.opened_urls, ['/tmp/foo/results.html']) +                                          tests_included=True, filesystem=fs) +        self.assertEqual(user.opened_urls, ['/tmp/cwd/foo/results.html'])      # These next tests test that we run the tests in ascending alphabetical      # order per directory. HTTP tests are sharded separately from other tests, @@ -487,15 +503,6 @@ class MainTest(unittest.TestCase):      def test_run_order__inline(self):          self.assert_run_order('inline') -    def test_run_order__old_inline(self): -        self.assert_run_order('old-inline') - -    def test_run_order__threads(self): -        self.assert_run_order('old-inline', child_processes='2') - -    def test_run_order__old_threads(self): -        self.assert_run_order('old-threads', child_processes='2') -      def test_tolerance(self):          class ImageDiffTestPort(TestPort):              def diff_image(self, expected_contents, actual_contents, @@ -531,12 +538,6 @@ class MainTest(unittest.TestCase):          self.assertEqual(res, 0)          self.assertTrue('--worker-model=inline overrides --child-processes\n' in err.get()) -    def test_worker_model__old_inline(self): -        self.assertTrue(passing_run(['--worker-model', 'old-inline'])) - -    def test_worker_model__old_threads(self): -        self.assertTrue(passing_run(['--worker-model', 'old-threads'])) -      def test_worker_model__processes(self):          # FIXME: remove this when we fix test-webkitpy to work properly          # with the multiprocessing module (bug 54520). @@ -572,6 +573,17 @@ class MainTest(unittest.TestCase):                                    include_reference_html=True)          self.assertEquals(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run) +    def test_additional_platform_directory(self): +        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo'])) +        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo'])) +        self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', +            '--additional-platform-directory', '/tmp/bar'])) + +        res, buildbot_output, regular_output, user = logging_run( +             ['--additional-platform-directory', 'foo']) +        self.assertTrue('--additional-platform-directory=foo is ignored since it is not absolute\n' +                        in regular_output.get()) +  MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2') @@ -616,9 +628,9 @@ class RebaselineTest(unittest.TestCase):          file_list.remove('/tmp/layout-test-results/tests_run0.txt')          self.assertEqual(len(file_list), 6)          self.assertBaselines(file_list, -            "/platform/test-mac/passes/image") +            "/platform/test-mac-leopard/passes/image")          self.assertBaselines(file_list, -            "/platform/test-mac/failures/expected/missing_image") +            "/platform/test-mac-leopard/failures/expected/missing_image")  class DryrunTest(unittest.TestCase): diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp.py b/Tools/Scripts/webkitpy/style/checkers/cpp.py index 7f8a9ea..0a0db54 100644 --- a/Tools/Scripts/webkitpy/style/checkers/cpp.py +++ b/Tools/Scripts/webkitpy/style/checkers/cpp.py @@ -312,7 +312,7 @@ class _IncludeState(dict):      def visited_primary_section(self):          return self._visited_primary_section -    def check_next_include_order(self, header_type, file_is_header): +    def check_next_include_order(self, header_type, file_is_header, primary_header_exists):          """Returns a non-empty error message if the next header is out of order.          This function also updates the internal state to be ready to check @@ -357,7 +357,8 @@ class _IncludeState(dict):          else:              assert header_type == _OTHER_HEADER              if not file_is_header and self._section < self._PRIMARY_SECTION: -                error_message = before_error_message +                if primary_header_exists: +                    error_message = before_error_message              self._section = self._OTHER_SECTION          return error_message @@ -2597,6 +2598,17 @@ def _classify_include(filename, include, is_system, include_state):      return _OTHER_HEADER +def _does_primary_header_exist(filename): +    """Return a primary header file name for a file, or empty string +    if the file is not source file or primary header does not exist. +    """ +    fileinfo = FileInfo(filename) +    if not fileinfo.is_source(): +        return False +    primary_header = fileinfo.no_extension() + ".h" +    return os.path.isfile(primary_header) + +  def check_include_line(filename, file_extension, clean_lines, line_number, include_state, error):      """Check rules that are applicable to #include lines. @@ -2646,6 +2658,7 @@ def check_include_line(filename, file_extension, clean_lines, line_number, inclu          include_state[include] = line_number      header_type = _classify_include(filename, include, is_system, include_state) +    primary_header_exists = _does_primary_header_exist(filename)      include_state.header_types[line_number] = header_type      # Only proceed if this isn't a duplicate header. @@ -2657,7 +2670,9 @@ def check_include_line(filename, file_extension, clean_lines, line_number, inclu      # 2) for header files: alphabetically sorted      # The include_state object keeps track of the last type seen      # and complains if the header types are out of order or missing. -    error_message = include_state.check_next_include_order(header_type, file_extension == "h") +    error_message = include_state.check_next_include_order(header_type, +                                                           file_extension == "h", +                                                           primary_header_exists)      # Check to make sure we have a blank line after primary header.      if not error_message and header_type == _PRIMARY_HEADER: diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py index 2d2abbf..a98d0dd 100644 --- a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py +++ b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py @@ -2452,18 +2452,20 @@ class OrderOfIncludesTest(CppStyleTestBase):          # Cheat os.path.abspath called in FileInfo class.          self.os_path_abspath_orig = os.path.abspath +        self.os_path_isfile_orig = os.path.isfile          os.path.abspath = lambda value: value      def tearDown(self):          os.path.abspath = self.os_path_abspath_orig +        os.path.isfile = self.os_path_isfile_orig      def test_check_next_include_order__no_config(self):          self.assertEqual('Header file should not contain WebCore config.h.', -                         self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, True)) +                         self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, True, True))      def test_check_next_include_order__no_self(self):          self.assertEqual('Header file should not contain itself.', -                         self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, True)) +                         self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, True, True))          # Test actual code to make sure that header types are correctly assigned.          self.assert_language_rules_check('Foo.h',                                           '#include "Foo.h"\n', @@ -2475,22 +2477,22 @@ class OrderOfIncludesTest(CppStyleTestBase):      def test_check_next_include_order__likely_then_config(self):          self.assertEqual('Found header this file implements before WebCore config.h.', -                         self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False)) +                         self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True))          self.assertEqual('Found WebCore config.h after a header this file implements.', -                         self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False)) +                         self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))      def test_check_next_include_order__other_then_config(self):          self.assertEqual('Found other header before WebCore config.h.', -                         self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False)) +                         self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True))          self.assertEqual('Found WebCore config.h after other header.', -                         self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False)) +                         self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))      def test_check_next_include_order__config_then_other_then_likely(self): -        self.assertEqual('', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False)) +        self.assertEqual('', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False, True))          self.assertEqual('Found other header before a header this file implements.', -                         self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False)) +                         self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False, True))          self.assertEqual('Found header this file implements after other header.', -                         self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False)) +                         self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False, True))      def test_check_alphabetical_include_order(self):          self.assert_language_rules_check('foo.h', @@ -2586,6 +2588,34 @@ class OrderOfIncludesTest(CppStyleTestBase):                                           '#include "g.h"\n',                                           '"foo.h" already included at foo.cpp:2  [build/include] [4]') +    def test_primary_header(self): +        # File with non-existing primary header should not produce errors. +        self.assert_language_rules_check('foo.cpp', +                                         '#include "config.h"\n' +                                         '\n' +                                         '#include "bar.h"\n', +                                         '') +        # Pretend that header files exist. +        os.path.isfile = lambda filename: True +        # Missing include for existing primary header -> error. +        self.assert_language_rules_check('foo.cpp', +                                         '#include "config.h"\n' +                                         '\n' +                                         '#include "bar.h"\n', +                                         'Found other header before a header this file implements. ' +                                         'Should be: config.h, primary header, blank line, and then ' +                                         'alphabetically sorted.  [build/include_order] [4]') +        # Having include for existing primary header -> no error. +        self.assert_language_rules_check('foo.cpp', +                                         '#include "config.h"\n' +                                         '#include "foo.h"\n' +                                         '\n' +                                         '#include "bar.h"\n', +                                         '') + +        os.path.isfile = self.os_path_isfile_orig + +      def test_check_wtf_includes(self):          self.assert_language_rules_check('foo.cpp',                                           '#include "config.h"\n' diff --git a/Tools/Scripts/webkitpy/tool/bot/botinfo.py b/Tools/Scripts/webkitpy/tool/bot/botinfo.py new file mode 100644 index 0000000..b9fd938 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/botinfo.py @@ -0,0 +1,39 @@ +# Copyright (c) 2011 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +#     * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +#     * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +#     * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +# FIXME: We should consider hanging one of these off the tool object. +class BotInfo(object): +    def __init__(self, tool): +        self._tool = tool + +    def summary_text(self): +        # bot_id is also stored on the options dictionary on the tool. +        bot_id = self._tool.status_server.bot_id +        bot_id_string = "Bot: %s  " % (bot_id) if bot_id else "" +        return "%sPort: %s  Platform: %s" % (bot_id_string, self._tool.port().name(), self._tool.platform.display_name()) diff --git a/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py b/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py new file mode 100644 index 0000000..054acfc --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py @@ -0,0 +1,40 @@ +# Copyright (c) 2011 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +#     * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +#     * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +#     * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.tool.bot.botinfo import BotInfo +from webkitpy.tool.mocktool import MockTool, MockStatusServer + + +class BotInfoTest(unittest.TestCase): + +    def test_summary_text(self): +        tool = MockTool() +        tool.status_server = MockStatusServer("MockBotId") +        self.assertEqual(BotInfo(tool).summary_text(), "Bot: MockBotId  Port: MockPort  Platform: MockPlatform 1.0") diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py index c5d9001..93cbcc8 100644 --- a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py +++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py @@ -28,6 +28,7 @@  from webkitpy.common.system.executive import ScriptError  from webkitpy.common.net.layouttestresults import LayoutTestResults +from webkitpy.tool.bot.expectedfailures import ExpectedFailures  class CommitQueueTaskDelegate(object): @@ -59,6 +60,8 @@ class CommitQueueTask(object):          self._delegate = delegate          self._patch = patch          self._script_error = None +        self._results_archive_from_patch_test_run = None +        self._expected_failures = ExpectedFailures()      def _validate(self):          # Bugs might get closed, or patches might be obsoleted or r-'d while the @@ -132,7 +135,7 @@ class CommitQueueTask(object):          "Unable to build without patch")      def _test(self): -        return self._run_command([ +        success = self._run_command([              "build-and-test",              "--no-clean",              "--no-update", @@ -143,8 +146,11 @@ class CommitQueueTask(object):          "Passed tests",          "Patch does not pass tests") +        self._expected_failures.shrink_expected_failures(self._delegate.layout_test_results(), success) +        return success +      def _build_and_test_without_patch(self): -        return self._run_command([ +        success = self._run_command([              "build-and-test",              "--force-clean",              "--no-update", @@ -155,11 +161,8 @@ class CommitQueueTask(object):          "Able to pass tests without patch",          "Unable to pass tests without patch (tree is red?)") -    def _failing_results_from_last_run(self): -        results = self._delegate.layout_test_results() -        if not results: -            return []  # Makes callers slighty cleaner to not have to deal with None -        return results.failing_test_results() +        self._expected_failures.shrink_expected_failures(self._delegate.layout_test_results(), success) +        return success      def _land(self):          # Unclear if this should pass --quiet or not.  If --parent-command always does the reporting, then it should. @@ -177,36 +180,59 @@ class CommitQueueTask(object):      def _report_flaky_tests(self, flaky_test_results, results_archive):          self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive) +    def _results_failed_different_tests(self, first, second): +        first_failing_tests = [] if not first else first.failing_tests() +        second_failing_tests = [] if not second else second.failing_tests() +        return first_failing_tests != second_failing_tests +      def _test_patch(self):          if self._test():              return True -        first_results = self._failing_results_from_last_run() -        first_failing_tests = [result.filename for result in first_results] +        # Note: archive_last_layout_test_results deletes the results directory, making these calls order-sensitve. +        # We could remove this dependency by building the layout_test_results from the archive. +        first_results = self._delegate.layout_test_results()          first_results_archive = self._delegate.archive_last_layout_test_results(self._patch) + +        if self._expected_failures.failures_were_expected(first_results): +            return True +          if self._test(): -            # Only report flaky tests if we were successful at archiving results. -            if first_results_archive: -                self._report_flaky_tests(first_results, first_results_archive) +            # Only report flaky tests if we were successful at parsing results.html and archiving results. +            if first_results and first_results_archive: +                self._report_flaky_tests(first_results.failing_test_results(), first_results_archive)              return True -        second_results = self._failing_results_from_last_run() -        second_failing_tests = [result.filename for result in second_results] -        if first_failing_tests != second_failing_tests: -            # We could report flaky tests here, but since run-webkit-tests -            # is run with --exit-after-N-failures=1, we would need to -            # be careful not to report constant failures as flaky due to earlier -            # flaky test making them not fail (no results) in one of the runs. +        second_results = self._delegate.layout_test_results() +        if self._results_failed_different_tests(first_results, second_results): +            # We could report flaky tests here, but we would need to be careful +            # to use similar checks to ExpectedFailures._can_trust_results +            # to make sure we don't report constant failures as flakes when +            # we happen to hit the --exit-after-N-failures limit.              # See https://bugs.webkit.org/show_bug.cgi?id=51272              return False +        # Archive (and remove) second results so layout_test_results() after +        # build_and_test_without_patch won't use second results instead of the clean-tree results. +        second_results_archive = self._delegate.archive_last_layout_test_results(self._patch) +          if self._build_and_test_without_patch(): -            return self.report_failure()  # The error from the previous ._test() run is real, report it. -        return False  # Tree must be red, just retry later. +            # The error from the previous ._test() run is real, report it. +            return self.report_failure(first_results_archive) + +        clean_tree_results = self._delegate.layout_test_results() +        self._expected_failures.grow_expected_failures(clean_tree_results) + +        return False  # Tree must be redder than we expected, just retry later. + +    def results_archive_from_patch_test_run(self, patch): +        assert(self._patch.id() == patch.id())  # CommitQueueTask is not currently re-useable. +        return self._results_archive_from_patch_test_run -    def report_failure(self): +    def report_failure(self, results_archive=None):          if not self._validate():              return False +        self._results_archive_from_patch_test_run = results_archive          raise self._script_error      def run(self): diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py index 87d0ab5..7324d78 100644 --- a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py +++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py @@ -30,6 +30,7 @@ from datetime import datetime  import unittest  from webkitpy.common.net import bugzilla +from webkitpy.common.net.layouttestresults import LayoutTestResults  from webkitpy.common.system.deprecated_logging import error, log  from webkitpy.common.system.outputcapture import OutputCapture  from webkitpy.layout_tests.layout_package import test_results @@ -77,9 +78,6 @@ class MockCommitQueue(CommitQueueTaskDelegate):  class CommitQueueTaskTest(unittest.TestCase): -    def _mock_test_result(self, testname): -        return test_results.TestResult(testname, [test_failures.FailureTextMismatch()]) -      def _run_through_task(self, commit_queue, expected_stderr, expected_exception=None, expect_retry=False):          tool = MockTool(log_executive=True)          patch = tool.bugs.fetch_attachment(197) @@ -190,6 +188,9 @@ command_failed: failure_message='Unable to build without patch' script_error='MO              None,              ScriptError("MOCK tests failure"),          ]) +        # CommitQueueTask will only report flaky tests if we successfully parsed +        # results.html and returned a LayoutTestResults object, so we fake one. +        commit_queue.layout_test_results = lambda: LayoutTestResults([])          expected_stderr = """run_webkit_patch: ['clean']  command_passed: success_message='Cleaned working directory' patch='197'  run_webkit_patch: ['update'] @@ -217,6 +218,7 @@ command_passed: success_message='Landed patch' patch='197'              None,              ScriptError("MOCK tests failure"),          ]) +        commit_queue.layout_test_results = lambda: LayoutTestResults([])          # It's possible delegate to fail to archive layout tests, don't try to report          # flaky tests when that happens.          commit_queue.archive_last_layout_test_results = lambda patch: None @@ -237,10 +239,25 @@ command_passed: success_message='Landed patch' patch='197'  """          self._run_through_task(commit_queue, expected_stderr) -    _double_flaky_test_counter = 0 -      def test_double_flaky_test_failure(self): -        commit_queue = MockCommitQueue([ +        class DoubleFlakyCommitQueue(MockCommitQueue): +            def __init__(self, error_plan): +                MockCommitQueue.__init__(self, error_plan) +                self._double_flaky_test_counter = 0 + +            def run_command(self, command): +                self._double_flaky_test_counter += 1 +                MockCommitQueue.run_command(self, command) + +            def _mock_test_result(self, testname): +                return test_results.TestResult(testname, [test_failures.FailureTextMismatch()]) + +            def layout_test_results(self): +                if self._double_flaky_test_counter % 2: +                    return LayoutTestResults([self._mock_test_result('foo.html')]) +                return LayoutTestResults([self._mock_test_result('bar.html')]) + +        commit_queue = DoubleFlakyCommitQueue([              None,              None,              None, @@ -268,15 +285,6 @@ command_failed: failure_message='Patch does not pass tests' script_error='MOCK t          tool = MockTool(log_executive=True)          patch = tool.bugs.fetch_attachment(197)          task = CommitQueueTask(commit_queue, patch) -        self._double_flaky_test_counter = 0 - -        def mock_failing_results_from_last_run(): -            CommitQueueTaskTest._double_flaky_test_counter += 1 -            if CommitQueueTaskTest._double_flaky_test_counter % 2: -                return [self._mock_test_result('foo.html')] -            return [self._mock_test_result('bar.html')] - -        task._failing_results_from_last_run = mock_failing_results_from_last_run          success = OutputCapture().assert_outputs(self, task.run, expected_stderr=expected_stderr)          self.assertEqual(success, False) @@ -302,6 +310,7 @@ command_failed: failure_message='Patch does not pass tests' script_error='MOCK t  archive_last_layout_test_results: patch='197'  run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']  command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='197' +archive_last_layout_test_results: patch='197'  run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']  command_passed: success_message='Able to pass tests without patch' patch='197'  """ @@ -330,6 +339,7 @@ command_failed: failure_message='Patch does not pass tests' script_error='MOCK t  archive_last_layout_test_results: patch='197'  run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']  command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='197' +archive_last_layout_test_results: patch='197'  run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']  command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='197'  """ diff --git a/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py b/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py new file mode 100644 index 0000000..8736ac0 --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/expectedfailures.py @@ -0,0 +1,55 @@ +# Copyright (c) 2011 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +#     * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +#     * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +#     * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +class ExpectedFailures(object): +    def __init__(self): +        self._failures = set() + +    def _can_trust_results(self, results): +        if not results or not results.failure_limit_count(): +            return False +        return len(results.failing_tests()) != 0 and len(results.failing_tests()) != results.failure_limit_count() + +    def failures_were_expected(self, results): +        if not self._can_trust_results(results): +            return False +        return set(results.failing_tests()) <= self._failures + +    def shrink_expected_failures(self, results, run_success): +        if run_success: +            self._failures = set() +        elif self._can_trust_results(results): +            # Remove all expected failures which are not in the new failing results. +            self._failures.intersection_update(set(results.failing_tests())) + +    def grow_expected_failures(self, results): +        if not self._can_trust_results(results): +            return +        self._failures.update(results.failing_tests()) +        # FIXME: Should we assert() here that expected_failures never crosses a certain size? diff --git a/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py b/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py new file mode 100644 index 0000000..8a2702b --- /dev/null +++ b/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py @@ -0,0 +1,73 @@ +# Copyright (c) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +#     * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +#     * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +#     * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import unittest + +from webkitpy.tool.bot.expectedfailures import ExpectedFailures + + +class MockResults(object): +    def __init__(self, failing_tests=[], failure_limit=10): +        self._failing_tests = failing_tests +        self._failure_limit_count = failure_limit + +    def failure_limit_count(self): +        return self._failure_limit_count + +    def failing_tests(self): +        return self._failing_tests + + +class ExpectedFailuresTest(unittest.TestCase): +    def _assert_can_trust(self, results, can_trust): +        self.assertEquals(ExpectedFailures()._can_trust_results(results), can_trust) + +    def test_can_trust_results(self): +        self._assert_can_trust(None, False) +        self._assert_can_trust(MockResults(failing_tests=[], failure_limit=None), False) +        self._assert_can_trust(MockResults(failing_tests=[], failure_limit=10), False) +        self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=None), False) +        self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=2), True) +        self._assert_can_trust(MockResults(failing_tests=[1], failure_limit=1), False) + +    def _assert_expected(self, expected_failures, failures, expected): +        self.assertEqual(expected_failures.failures_were_expected(MockResults(failures)), expected) + +    def test_failures_were_expected(self): +        failures = ExpectedFailures() +        failures.grow_expected_failures(MockResults(['foo.html'])) +        self._assert_expected(failures, ['foo.html'], True) +        self._assert_expected(failures, ['bar.html'], False) +        failures.shrink_expected_failures(MockResults(['baz.html']), False) +        self._assert_expected(failures, ['foo.html'], False) +        self._assert_expected(failures, ['baz.html'], False) + +        failures.grow_expected_failures(MockResults(['baz.html'])) +        self._assert_expected(failures, ['baz.html'], True) +        failures.shrink_expected_failures(MockResults(), True) +        self._assert_expected(failures, ['baz.html'], False) diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py index bec593b..68e1c94 100644 --- a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py +++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py @@ -33,6 +33,7 @@ import os.path  from webkitpy.common.net.layouttestresults import path_for_layout_test, LayoutTestResults  from webkitpy.common.config import urls +from webkitpy.tool.bot.botinfo import BotInfo  from webkitpy.tool.grammar import plural, pluralize, join_with_separators  _log = logging.getLogger(__name__) @@ -42,6 +43,7 @@ class FlakyTestReporter(object):      def __init__(self, tool, bot_name):          self._tool = tool          self._bot_name = bot_name +        self._bot_info = BotInfo(tool)      def _author_emails_for_test(self, flaky_test):          test_path = path_for_layout_test(flaky_test) @@ -121,15 +123,10 @@ If you would like to track this test fix with another bug, please close this bug          authors_string = join_with_separators(sorted(author_emails))          return " (%s: %s)" % (heading_string, authors_string) -    def _bot_information(self): -        bot_id = self._tool.status_server.bot_id -        bot_id_string = "Bot: %s  " % (bot_id) if bot_id else "" -        return "%sPort: %s  Platform: %s" % (bot_id_string, self._tool.port().name(), self._tool.platform.display_name()) -      def _latest_flake_message(self, flaky_result, patch):          failure_messages = [failure.message() for failure in flaky_result.failures]          flake_message = "The %s just saw %s flake (%s) while processing attachment %s on bug %s." % (self._bot_name, flaky_result.filename, ", ".join(failure_messages), patch.id(), patch.bug_id()) -        return "%s\n%s" % (flake_message, self._bot_information()) +        return "%s\n%s" % (flake_message, self._bot_info.summary_text())      def _results_diff_path_for_test(self, test_path):          # FIXME: This is a big hack.  We should get this path from results.json diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py index 26c98c1..1e3f35a 100644 --- a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py +++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py @@ -97,12 +97,6 @@ blocked: 50856          bug = tool.bugs.fetch_bug(78)          self.assertEqual(reporter._follow_duplicate_chain(bug).id(), 76) -    def test_bot_information(self): -        tool = MockTool() -        tool.status_server = MockStatusServer("MockBotId") -        reporter = FlakyTestReporter(tool, 'dummy-queue') -        self.assertEqual(reporter._bot_information(), "Bot: MockBotId  Port: MockPort  Platform: MockPlatform 1.0") -      def test_report_flaky_tests_creating_bug(self):          tool = MockTool()          tool.filesystem = MockFileSystem({"/mock/foo/bar-diffs.txt": "mock"}) diff --git a/Tools/Scripts/webkitpy/tool/commands/download_unittest.py b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py index ced5b2f..c8e5fd6 100644 --- a/Tools/Scripts/webkitpy/tool/commands/download_unittest.py +++ b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py @@ -123,8 +123,13 @@ class DownloadCommandsTest(CommandsTest):          self.assert_execute_outputs(Land(), [42], options=self._default_options(), expected_stderr=expected_stderr, tool=mock_tool)      def test_check_style(self): -        expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 197 from bug 42.\nRunning check-webkit-style\n" -        self.assert_execute_outputs(CheckStyle(), [197], options=self._default_options(), expected_stderr=expected_stderr) +        expected_stderr = """Processing 1 patch from 1 bug. +Updating working directory +MOCK run_and_throw_if_fail: ['mock-update-webkit'] +Processing patch 197 from bug 42. +MOCK run_and_throw_if_fail: ['mock-check-webkit-style', '--git-commit', 'MOCK git commit', '--diff-files', 'MockFile1'] +""" +        self.assert_execute_outputs(CheckStyle(), [197], options=self._default_options(), expected_stderr=expected_stderr, tool=MockTool(log_executive=True))      def test_build_attachment(self):          expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 197 from bug 42.\nBuilding WebKit\n" @@ -171,7 +176,7 @@ Not closing bug 42 as attachment 197 has review=+.  Assuming there are more patc          self.assert_execute_outputs(LandFromBug(), [42], options=self._default_options(), expected_stderr=expected_stderr)      def test_prepare_rollout(self): -        expected_stderr = "Preparing rollout for bug 42.\nUpdating working directory\nRunning prepare-ChangeLog\n" +        expected_stderr = "Preparing rollout for bug 42.\nUpdating working directory\n"          self.assert_execute_outputs(PrepareRollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)      def test_create_rollout(self): @@ -184,7 +189,6 @@ Reason  component: MOCK component  cc: MOCK cc  blocked: 42 -Running prepare-ChangeLog  MOCK add_patch_to_bug: bug_id=78, description=ROLLOUT of r852, mark_for_review=False, mark_for_commit_queue=True, mark_for_landing=False  -- Begin comment --  Any committer can land this patch automatically by marking it commit-queue+.  The commit-queue will build and test the patch before landing to ensure that the rollout will be successful.  This process takes approximately 15 minutes. @@ -202,7 +206,6 @@ where ATTACHMENT_ID is the ID of this attachment.      def test_rollout(self):          expected_stderr = """Preparing rollout for bug 42.  Updating working directory -Running prepare-ChangeLog  MOCK: user.open_url: file://...  Was that diff correct?  Building WebKit diff --git a/Tools/Scripts/webkitpy/tool/commands/queues.py b/Tools/Scripts/webkitpy/tool/commands/queues.py index 9e50dd4..4eadb0e 100644 --- a/Tools/Scripts/webkitpy/tool/commands/queues.py +++ b/Tools/Scripts/webkitpy/tool/commands/queues.py @@ -44,11 +44,13 @@ from webkitpy.common.net.layouttestresults import LayoutTestResults  from webkitpy.common.net.statusserver import StatusServer  from webkitpy.common.system.deprecated_logging import error, log  from webkitpy.common.system.executive import ScriptError +from webkitpy.tool.bot.botinfo import BotInfo  from webkitpy.tool.bot.commitqueuetask import CommitQueueTask, CommitQueueTaskDelegate  from webkitpy.tool.bot.feeders import CommitQueueFeeder, EWSFeeder  from webkitpy.tool.bot.queueengine import QueueEngine, QueueEngineDelegate  from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter  from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler +from webkitpy.tool.steps.runtests import RunTests  from webkitpy.tool.multicommandtool import Command, TryAgain @@ -258,6 +260,20 @@ class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskD          self._update_status("Processing %s" % patch_text, patch)          return True +    # FIXME: This is not really specific to the commit-queue and could be shared. +    def _upload_results_archive_for_patch(self, patch, results_archive_zip): +        bot_id = self._tool.status_server.bot_id or "bot" +        description = "Archive of layout-test-results from %s" % bot_id +        # results_archive is a ZipFile object, grab the File object (.fp) to pass to Mechanize for uploading. +        results_archive_file = results_archive_zip.fp +        # Rewind the file object to start (since Mechanize won't do that automatically) +        # See https://bugs.webkit.org/show_bug.cgi?id=54593 +        results_archive_file.seek(0) +        comment_text = "The attached test failures were seen while running run-webkit-tests on the %s.\n" % (self.name) +        # FIXME: We could easily list the test failures from the archive here. +        comment_text += BotInfo(self._tool).summary_text() +        self._tool.bugs.add_attachment_to_bug(patch.bug_id(), results_archive_file, description, filename="layout-test-results.zip", comment_text=comment_text) +      def process_work_item(self, patch):          self._cc_watchers(patch.bug_id())          task = CommitQueueTask(self, patch) @@ -269,6 +285,9 @@ class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskD          except ScriptError, e:              validator = CommitterValidator(self._tool.bugs)              validator.reject_patch_from_commit_queue(patch.id(), self._error_message_for_bug(task.failure_status_id, e)) +            results_archive = task.results_archive_from_patch_test_run(patch) +            if results_archive: +                self._upload_results_archive_for_patch(patch, results_archive)              self._did_fail(patch)      def _error_message_for_bug(self, status_id, script_error): @@ -296,19 +315,28 @@ class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskD      # tool.filesystem.read_text_file.  They have different error handling at the moment.      def _read_file_contents(self, path):          try: -            with codecs.open(path, "r", "utf-8") as open_file: -                return open_file.read() -        except OSError, e:  # File does not exist or can't be read. +            return self._tool.filesystem.read_text_file(path) +        except IOError, e:  # File does not exist or can't be read.              return None -    # FIXME: This may belong on the Port object. -    def layout_test_results(self): +    # FIXME: This logic should move to the port object. +    def _create_layout_test_results(self):          results_path = self._tool.port().layout_tests_results_path()          results_html = self._read_file_contents(results_path)          if not results_html:              return None          return LayoutTestResults.results_from_string(results_html) +    def layout_test_results(self): +        results = self._create_layout_test_results() +        # FIXME: We should not have to set failure_limit_count, but we +        # do until run-webkit-tests can be updated save off the value +        # of --exit-after-N-failures in results.html/results.json. +        # https://bugs.webkit.org/show_bug.cgi?id=58481 +        if results: +            results.set_failure_limit_count(RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT) +        return results +      def _results_directory(self):          results_path = self._tool.port().layout_tests_results_path()          # FIXME: This is wrong in two ways: diff --git a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py index e2fb09f..d577baa 100644 --- a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py +++ b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py @@ -253,6 +253,7 @@ MOCK: release_work_item: commit-queue 197      def test_rollout(self):          tool = MockTool(log_executive=True) +        tool.filesystem.write_text_file('/mock/results.html', '')  # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.          tool.buildbot.light_tree_on_fire()          expected_stderr = {              "begin_work_queue": self._default_begin_work_queue_stderr("commit-queue", MockSCM.fake_checkout_root), @@ -321,6 +322,7 @@ MOCK: release_work_item: commit-queue 106      def test_manual_reject_during_processing(self):          queue = SecondThoughtsCommitQueue()          queue.bind_to_tool(MockTool()) +        queue._tool.filesystem.write_text_file('/mock/results.html', '')  # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.          queue._options = Mock()          queue._options.port = None          expected_stderr = """MOCK: update_status: commit-queue Cleaned working directory @@ -376,6 +378,17 @@ The commit-queue is continuing to process your patch.          OutputCapture().assert_outputs(self, queue.report_flaky_tests, [QueuesTest.mock_work_item, test_results, MockZipFile()], expected_stderr=expected_stderr) +    def test_missing_layout_test_results(self): +        queue = CommitQueue() +        tool = MockTool() +        results_path = '/mock/results.html' +        tool.filesystem = MockFileSystem({results_path: None}) +        queue.bind_to_tool(tool) +        # Make sure that our filesystem mock functions as we expect. +        self.assertRaises(IOError, tool.filesystem.read_text_file, results_path) +        # layout_test_results shouldn't raise even if the results.html file is missing. +        self.assertEquals(queue.layout_test_results(), None) +      def test_layout_test_results(self):          queue = CommitQueue()          queue.bind_to_tool(MockTool()) @@ -383,13 +396,30 @@ The commit-queue is continuing to process your patch.          self.assertEquals(queue.layout_test_results(), None)          queue._read_file_contents = lambda path: ""          self.assertEquals(queue.layout_test_results(), None) +        queue._create_layout_test_results = lambda: LayoutTestResults([]) +        results = queue.layout_test_results() +        self.assertNotEquals(results, None) +        self.assertEquals(results.failure_limit_count(), 10)  # This value matches RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT      def test_archive_last_layout_test_results(self):          queue = CommitQueue()          queue.bind_to_tool(MockTool())          patch = queue._tool.bugs.fetch_attachment(128) +        # This is just to test that the method doesn't raise.          queue.archive_last_layout_test_results(patch) +    def test_upload_results_archive_for_patch(self): +        queue = CommitQueue() +        queue.bind_to_tool(MockTool()) +        patch = queue._tool.bugs.fetch_attachment(128) +        expected_stderr = """MOCK add_attachment_to_bug: bug_id=42, description=Archive of layout-test-results from bot filename=layout-test-results.zip +-- Begin comment -- +The attached test failures were seen while running run-webkit-tests on the commit-queue. +Port: MockPort  Platform: MockPlatform 1.0 +-- End comment -- +""" +        OutputCapture().assert_outputs(self, queue._upload_results_archive_for_patch, [patch, Mock()], expected_stderr=expected_stderr) +  class StyleQueueTest(QueuesTest):      def test_style_queue(self): diff --git a/Tools/Scripts/webkitpy/tool/commands/queuestest.py b/Tools/Scripts/webkitpy/tool/commands/queuestest.py index 6455617..758832e 100644 --- a/Tools/Scripts/webkitpy/tool/commands/queuestest.py +++ b/Tools/Scripts/webkitpy/tool/commands/queuestest.py @@ -67,6 +67,9 @@ class QueuesTest(unittest.TestCase):      def assert_queue_outputs(self, queue, args=None, work_item=None, expected_stdout=None, expected_stderr=None, expected_exceptions=None, options=None, tool=None):          if not tool:              tool = MockTool() +            # This is a hack to make it easy for callers to not have to setup a custom MockFileSystem just to test the commit-queue +            # the cq tries to read the layout test results, and will hit a KeyError in MockFileSystem if we don't do this. +            tool.filesystem.write_text_file('/mock/results.html', "")          if not expected_stdout:              expected_stdout = {}          if not expected_stderr: diff --git a/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py b/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py index b6f69ea..da5c635 100644 --- a/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py +++ b/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py @@ -36,7 +36,6 @@ class RollCommandsTest(CommandsTest):      def test_update_chromium_deps(self):          expected_stderr = """Updating Chromium DEPS to 6764  MOCK: MockDEPS.write_variable(chromium_rev, 6764) -Running prepare-ChangeLog  MOCK: user.open_url: file://...  Was that diff correct?  Committed r49824: <http://trac.webkit.org/changeset/49824> diff --git a/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py b/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py index 4313df9..4870423 100644 --- a/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py +++ b/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py @@ -59,8 +59,7 @@ class UploadCommandsTest(CommandsTest):          options.request_commit = False          options.review = True          options.suggest_reviewers = False -        expected_stderr = """Running check-webkit-style -MOCK: user.open_url: file://... +        expected_stderr = """MOCK: user.open_url: file://...  Was that diff correct?  Obsoleting 2 old patches on bug 42  MOCK add_patch_to_bug: bug_id=42, description=MOCK description, mark_for_review=True, mark_for_commit_queue=False, mark_for_landing=False @@ -107,8 +106,7 @@ extra comment          options.request_commit = False          options.review = True          options.suggest_reviewers = False -        expected_stderr = """Running check-webkit-style -MOCK: user.open_url: file://... +        expected_stderr = """MOCK: user.open_url: file://...  Was that diff correct?  Obsoleting 2 old patches on bug 42  MOCK add_patch_to_bug: bug_id=42, description=MOCK description, mark_for_review=True, mark_for_commit_queue=False, mark_for_landing=False diff --git a/Tools/Scripts/webkitpy/tool/mocktool.py b/Tools/Scripts/webkitpy/tool/mocktool.py index 73f55a7..ad03244 100644 --- a/Tools/Scripts/webkitpy/tool/mocktool.py +++ b/Tools/Scripts/webkitpy/tool/mocktool.py @@ -458,13 +458,14 @@ class MockSCM(Mock):      fake_checkout_root = os.path.realpath("/tmp") # realpath is needed to allow for Mac OS X's /private/tmp -    def __init__(self): +    def __init__(self, filesystem=None):          Mock.__init__(self)          # FIXME: We should probably use real checkout-root detection logic here.          # os.getcwd() can't work here because other parts of the code assume that "checkout_root"          # will actually be the root.  Since getcwd() is wrong, use a globally fake root for now.          self.checkout_root = self.fake_checkout_root          self.added_paths = set() +        self._filesystem = filesystem      def add(self, destination_path, return_exit_code=False):          self.added_paths.add(destination_path) @@ -502,6 +503,12 @@ class MockSCM(Mock):      def svn_revision_from_commit_text(self, commit_text):          return "49824" +    def delete(self, path): +        if not self._filesystem: +            return +        if self._filesystem.exists(path): +            self._filesystem.remove(path) +  class MockDEPS(object):      def read_variable(self, name): @@ -686,6 +693,13 @@ class MockPort(Mock):      def layout_tests_results_path(self):          return "/mock/results.html" +    def check_webkit_style_command(self): +        return ["mock-check-webkit-style"] + +    def update_webkit_command(self): +        return ["mock-update-webkit"] + +  class MockTestPort1(object):      def skips_layout_test(self, test_name): diff --git a/Tools/Scripts/webkitpy/tool/steps/abstractstep.py b/Tools/Scripts/webkitpy/tool/steps/abstractstep.py index 2ba4291..59ea36a 100644 --- a/Tools/Scripts/webkitpy/tool/steps/abstractstep.py +++ b/Tools/Scripts/webkitpy/tool/steps/abstractstep.py @@ -26,7 +26,6 @@  # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE  # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from webkitpy.common.system.deprecated_logging import log  from webkitpy.common.system.executive import ScriptError  from webkitpy.common.config.ports import WebKitPort  from webkitpy.tool.steps.options import Options @@ -37,14 +36,6 @@ class AbstractStep(object):          self._tool = tool          self._options = options -    # FIXME: This should use tool.port() -    def _run_script(self, script_name, args=None, quiet=False, port=WebKitPort): -        log("Running %s" % script_name) -        command = port.script_shell_command(script_name) -        if args: -            command.extend(args) -        self._tool.executive.run_and_throw_if_fail(command, quiet) -      def _changed_files(self, state):          return self.cached_lookup(state, "changed_files") diff --git a/Tools/Scripts/webkitpy/tool/steps/checkstyle.py b/Tools/Scripts/webkitpy/tool/steps/checkstyle.py index af66c50..c2377e9 100644 --- a/Tools/Scripts/webkitpy/tool/steps/checkstyle.py +++ b/Tools/Scripts/webkitpy/tool/steps/checkstyle.py @@ -56,7 +56,7 @@ class CheckStyle(AbstractStep):          args.extend(self._changed_files(state))          try: -            self._run_script("check-webkit-style", args) +            self._tool.executive.run_and_throw_if_fail(self._tool.port().check_webkit_style_command() + args)          except ScriptError, e:              if self._options.non_interactive:                  # We need to re-raise the exception here to have the diff --git a/Tools/Scripts/webkitpy/tool/steps/commit.py b/Tools/Scripts/webkitpy/tool/steps/commit.py index 5dc4efb..7a03528 100644 --- a/Tools/Scripts/webkitpy/tool/steps/commit.py +++ b/Tools/Scripts/webkitpy/tool/steps/commit.py @@ -50,6 +50,7 @@ class Commit(AbstractStep):          self._state = state          username = None +        password = None          force_squash = False          num_tries = 0 @@ -58,7 +59,7 @@ class Commit(AbstractStep):              try:                  scm = self._tool.scm() -                commit_text = scm.commit_with_message(self._commit_message, git_commit=self._options.git_commit, username=username, force_squash=force_squash, changed_files=self._changed_files(state)) +                commit_text = scm.commit_with_message(self._commit_message, git_commit=self._options.git_commit, username=username, password=password, force_squash=force_squash, changed_files=self._changed_files(state))                  svn_revision = scm.svn_revision_from_commit_text(commit_text)                  log("Committed r%s: <%s>" % (svn_revision, urls.view_revision_url(svn_revision)))                  self._state["commit_text"] = commit_text @@ -72,4 +73,8 @@ class Commit(AbstractStep):              except AuthenticationError, e:                  username = self._tool.user.prompt("%s login: " % e.server_host, repeat=5)                  if not username: -                    raise ScriptError("You need to specify the username on %s to perform the commit as." % self.svn_server_host) +                    raise ScriptError("You need to specify the username on %s to perform the commit as." % e.server_host) +                if e.prompt_for_password: +                    password = self._tool.user.prompt_password("%s password for %s: " % (e.server_host, username), repeat=5) +                    if not password: +                        raise ScriptError("You need to specify the password for %s on %s to perform the commit." % (username, e.server_host)) diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py index 4be40ca..b30dd2f 100644 --- a/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py +++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py @@ -61,7 +61,7 @@ class PrepareChangeLog(AbstractStep):              self._ensure_bug_url(state)              return          os.chdir(self._tool.scm().checkout_root) -        args = self._tool.port().script_shell_command("prepare-ChangeLog") +        args = self._tool.port().prepare_changelog_command()          if state.get("bug_id"):              args.append("--bug=%s" % state["bug_id"])              args.append("--description=%s" % self._tool.bugs.fetch_bug(state["bug_id"]).title()) diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py index 39c9a9a..e636cb4 100644 --- a/Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py +++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py @@ -34,7 +34,7 @@ from webkitpy.tool.steps.abstractstep import AbstractStep  class PrepareChangeLogForDEPSRoll(AbstractStep):      def run(self, state): -        self._run_script("prepare-ChangeLog") +        self._tool.executive.run_and_throw_if_fail(self._tool.port().prepare_changelog_command())          changelog_paths = self._tool.checkout().modified_changelogs(git_commit=None)          for changelog_path in changelog_paths:              ChangeLog(changelog_path).update_with_unreviewed_message("Rolled DEPS.\n\n") diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py index dcd4b93..0a47573 100644 --- a/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py +++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py @@ -50,7 +50,7 @@ class PrepareChangeLogForRevert(AbstractStep):      def run(self, state):          # This could move to prepare-ChangeLog by adding a --revert= option. -        self._run_script("prepare-ChangeLog") +        self._tool.executive.run_and_throw_if_fail(self._tool.port().prepare_changelog_command())          changelog_paths = self._tool.checkout().modified_changelogs(git_commit=None)          bug_url = self._tool.bugs.bug_url_for_bug_id(state["bug_id"]) if state["bug_id"] else None          message = self._message_for_revert(state["revision_list"], state["reason"], bug_url) diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests.py b/Tools/Scripts/webkitpy/tool/steps/runtests.py index 282e381..793a94b 100644 --- a/Tools/Scripts/webkitpy/tool/steps/runtests.py +++ b/Tools/Scripts/webkitpy/tool/steps/runtests.py @@ -31,6 +31,9 @@ from webkitpy.tool.steps.options import Options  from webkitpy.common.system.deprecated_logging import log  class RunTests(AbstractStep): +    # FIXME: This knowledge really belongs in the commit-queue. +    NON_INTERACTIVE_FAILURE_LIMIT_COUNT = 10 +      @classmethod      def options(cls):          return AbstractStep.options() + [ @@ -59,21 +62,8 @@ class RunTests(AbstractStep):          if self._options.non_interactive:              args.append("--no-new-test-results")              args.append("--no-launch-safari") -            args.append("--exit-after-n-failures=1") +            args.append("--exit-after-n-failures=%s" % self.NON_INTERACTIVE_FAILURE_LIMIT_COUNT)              args.append("--wait-for-httpd") -            # FIXME: Hack to work around https://bugs.webkit.org/show_bug.cgi?id=38912 -            # when running the commit-queue on a mac leopard machine since compositing -            # does not work reliably on Leopard due to various graphics driver/system bugs. -            if self._tool.port().name() == "Mac" and self._tool.port().is_leopard(): -                tests_to_ignore = [] -                tests_to_ignore.append("compositing") - -                # media tests are also broken on mac leopard due to -                # a separate CoreVideo bug which causes random crashes/hangs -                # https://bugs.webkit.org/show_bug.cgi?id=38912 -                tests_to_ignore.append("media") - -                args.extend(["--ignore-tests", ",".join(tests_to_ignore)])          if self._options.quiet:              args.append("--quiet") diff --git a/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py b/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py index 783ae29..e1ace2c 100644 --- a/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py +++ b/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py @@ -66,17 +66,12 @@ class StepsTest(unittest.TestCase):          tool.user.prompt = lambda message: 42          self._run_step(PromptForBugOrTitle, tool=tool) -    def test_runtests_leopard_commit_queue_hack_step(self): -        expected_stderr = "Running Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\n" -        OutputCapture().assert_outputs(self, self._run_step, [RunTests], expected_stderr=expected_stderr) - -    def test_runtests_leopard_commit_queue_hack_command(self): +    def test_runtests_args(self):          mock_options = self._step_options()          step = RunTests(MockTool(log_executive=True), mock_options)          # FIXME: We shouldn't use a real port-object here, but there is too much to mock at the moment.          mock_port = WebKitPort()          mock_port.name = lambda: "Mac" -        mock_port.is_leopard = lambda: True          tool = MockTool(log_executive=True)          tool.port = lambda: mock_port          step = RunTests(tool, mock_options) @@ -87,6 +82,6 @@ MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitperl']  Running JavaScriptCore tests  MOCK run_and_throw_if_fail: ['Tools/Scripts/run-javascriptcore-tests']  Running run-webkit-tests -MOCK run_and_throw_if_fail: ['Tools/Scripts/run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--exit-after-n-failures=1', '--wait-for-httpd', '--ignore-tests', 'compositing,media', '--quiet'] +MOCK run_and_throw_if_fail: ['Tools/Scripts/run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--exit-after-n-failures=10', '--wait-for-httpd', '--quiet']  """          OutputCapture().assert_outputs(self, step.run, [{}], expected_stderr=expected_stderr)  | 
