summaryrefslogtreecommitdiffstats
path: root/WebKitTools/Scripts/webkitpy
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2010-02-02 14:57:50 +0000
committerSteve Block <steveblock@google.com>2010-02-04 15:06:55 +0000
commitd0825bca7fe65beaee391d30da42e937db621564 (patch)
tree7461c49eb5844ffd1f35d1ba2c8b7584c1620823 /WebKitTools/Scripts/webkitpy
parent3db770bd97c5a59b6c7574ca80a39e5a51c1defd (diff)
downloadexternal_webkit-d0825bca7fe65beaee391d30da42e937db621564.zip
external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.gz
external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.bz2
Merge webkit.org at r54127 : Initial merge by git
Change-Id: Ib661abb595522f50ea406f72d3a0ce17f7193c82
Diffstat (limited to 'WebKitTools/Scripts/webkitpy')
-rw-r--r--WebKitTools/Scripts/webkitpy/BeautifulSoup.py2000
-rw-r--r--WebKitTools/Scripts/webkitpy/BeautifulSoup.pycbin0 -> 74102 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/__init__.py8
-rw-r--r--WebKitTools/Scripts/webkitpy/__init__.pycbin0 -> 472 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/autoinstall.py335
-rw-r--r--WebKitTools/Scripts/webkitpy/autoinstall.pycbin0 -> 9596 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/bugzilla.py789
-rw-r--r--WebKitTools/Scripts/webkitpy/bugzilla.pycbin0 -> 26413 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/bugzilla_unittest.py303
-rw-r--r--WebKitTools/Scripts/webkitpy/buildbot.py133
-rw-r--r--WebKitTools/Scripts/webkitpy/buildbot.pycbin0 -> 3525 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/buildbot_unittest.py155
-rw-r--r--WebKitTools/Scripts/webkitpy/changelogs.py134
-rw-r--r--WebKitTools/Scripts/webkitpy/changelogs.pycbin0 -> 3908 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/changelogs_unittest.py179
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/__init__.py1
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/__init__.pycbin0 -> 171 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.py43
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.pycbin0 -> 1460 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/commandtest.py38
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/download.py284
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/download.pycbin0 -> 11970 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/download_unittest.py127
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/early_warning_system.py122
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/early_warning_system.pycbin0 -> 4828 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/early_warning_system_unittest.py62
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/openbugs.py63
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/openbugs_unittest.py50
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/queries.py116
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/queries.pycbin0 -> 6110 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/queries_unittest.py63
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/queues.py295
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/queues.pycbin0 -> 15126 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/queues_unittest.py102
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/queuestest.py99
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/upload.py406
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/upload.pycbin0 -> 17471 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/commands/upload_unittest.py84
-rwxr-xr-xWebKitTools/Scripts/webkitpy/comments.py43
-rw-r--r--WebKitTools/Scripts/webkitpy/comments.pycbin0 -> 775 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/committers.py265
-rw-r--r--WebKitTools/Scripts/webkitpy/committers.pycbin0 -> 13099 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/committers_unittest.py65
-rw-r--r--WebKitTools/Scripts/webkitpy/credentials.py132
-rw-r--r--WebKitTools/Scripts/webkitpy/credentials.pycbin0 -> 4021 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/credentials_unittest.py127
-rw-r--r--WebKitTools/Scripts/webkitpy/diff_parser.py162
-rw-r--r--WebKitTools/Scripts/webkitpy/diff_parser_unittest.py146
-rw-r--r--WebKitTools/Scripts/webkitpy/executive.py171
-rw-r--r--WebKitTools/Scripts/webkitpy/executive.pycbin0 -> 4861 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/executive_unittest.py41
-rw-r--r--WebKitTools/Scripts/webkitpy/grammar.py45
-rw-r--r--WebKitTools/Scripts/webkitpy/grammar.pycbin0 -> 692 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py0
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/apache_http_server.py229
-rwxr-xr-xWebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server.py279
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server_base.py67
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/httpd2.pem41
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py184
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py418
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/lighttpd.conf89
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py96
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/path_utils.py395
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils.py50
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_linux.py248
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_mac.py201
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_win.py210
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py818
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py267
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py95
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py511
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/layout_package/websocket_server.py316
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py1028
-rwxr-xr-xWebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py1697
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py0
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py72
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py224
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py266
-rw-r--r--WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py122
-rw-r--r--WebKitTools/Scripts/webkitpy/mock.py309
-rw-r--r--WebKitTools/Scripts/webkitpy/mock.pycbin0 -> 9932 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/mock_bugzillatool.py367
-rw-r--r--WebKitTools/Scripts/webkitpy/multicommandtool.py299
-rw-r--r--WebKitTools/Scripts/webkitpy/multicommandtool.pycbin0 -> 13466 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/multicommandtool_unittest.py153
-rw-r--r--WebKitTools/Scripts/webkitpy/networktransaction.py63
-rw-r--r--WebKitTools/Scripts/webkitpy/networktransaction.pycbin0 -> 2217 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/networktransaction_unittest.py80
-rw-r--r--WebKitTools/Scripts/webkitpy/outputcapture.py62
-rw-r--r--WebKitTools/Scripts/webkitpy/patchcollection.py65
-rw-r--r--WebKitTools/Scripts/webkitpy/patchcollection.pycbin0 -> 2353 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/patchcollection_unittest.py53
-rw-r--r--WebKitTools/Scripts/webkitpy/queueengine.py144
-rw-r--r--WebKitTools/Scripts/webkitpy/queueengine.pycbin0 -> 6052 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/queueengine_unittest.py170
-rw-r--r--WebKitTools/Scripts/webkitpy/scm.py513
-rw-r--r--WebKitTools/Scripts/webkitpy/scm.pycbin0 -> 26388 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/scm_unittest.py595
-rw-r--r--WebKitTools/Scripts/webkitpy/statusserver.py96
-rw-r--r--WebKitTools/Scripts/webkitpy/statusserver.pycbin0 -> 3562 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/__init__.py56
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/__init__.pycbin0 -> 2333 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/abstractstep.py69
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/abstractstep.pycbin0 -> 2750 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/applypatch.py42
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/applypatch.pycbin0 -> 1252 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.py43
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.pycbin0 -> 1336 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/build.py54
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/build.pycbin0 -> 1563 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/checkstyle.py56
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/checkstyle.pycbin0 -> 1520 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.py52
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.pycbin0 -> 1652 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.py34
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.pycbin0 -> 883 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/closebug.py51
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/closebug.pycbin0 -> 1575 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.py58
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.pycbin0 -> 1535 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff_unittest.py41
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/closepatch.py36
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/closepatch.pycbin0 -> 925 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/commit.py35
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/commit.pycbin0 -> 838 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/completerollout.py66
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/completerollout.pycbin0 -> 1973 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/confirmdiff.py47
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/confirmdiff.pycbin0 -> 1266 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/createbug.py45
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/createbug.pycbin0 -> 1192 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/editchangelog.py37
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/editchangelog.pycbin0 -> 866 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.py48
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.pycbin0 -> 1670 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.py43
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.pycbin0 -> 1336 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/metastep.py54
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/metastep.pycbin0 -> 1619 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/obsoletepatches.py51
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/obsoletepatches.pycbin0 -> 1486 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/options.py56
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/options.pycbin0 -> 3742 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/postdiff.py51
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/postdiff.pycbin0 -> 1471 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/postdiffforcommit.py41
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/preparechangelog.py59
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/preparechangelog.pycbin0 -> 1758 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.py49
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.pycbin0 -> 1292 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.py45
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.pycbin0 -> 1027 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/revertrevision.py34
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/revertrevision.pycbin0 -> 769 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/runtests.py66
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/runtests.pycbin0 -> 1871 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/steps_unittest.py56
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/update.py46
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/update.pycbin0 -> 1260 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreview_unittests.py46
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.py71
-rw-r--r--WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.pycbin0 -> 2494 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/stepsequence.py77
-rw-r--r--WebKitTools/Scripts/webkitpy/stepsequence.pycbin0 -> 2917 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/style/__init__.py1
-rw-r--r--WebKitTools/Scripts/webkitpy/style/checker.py809
-rwxr-xr-xWebKitTools/Scripts/webkitpy/style/checker_unittest.py677
-rw-r--r--WebKitTools/Scripts/webkitpy/style/error_handlers.py154
-rw-r--r--WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py163
-rw-r--r--WebKitTools/Scripts/webkitpy/style/processors/__init__.py1
-rw-r--r--WebKitTools/Scripts/webkitpy/style/processors/cpp.py3007
-rw-r--r--WebKitTools/Scripts/webkitpy/style/processors/cpp_unittest.py3706
-rw-r--r--WebKitTools/Scripts/webkitpy/style/processors/text.py56
-rw-r--r--WebKitTools/Scripts/webkitpy/style/processors/text_unittest.py94
-rw-r--r--WebKitTools/Scripts/webkitpy/style/unittests.py41
-rw-r--r--WebKitTools/Scripts/webkitpy/style_references.py72
-rw-r--r--WebKitTools/Scripts/webkitpy/user.py58
-rw-r--r--WebKitTools/Scripts/webkitpy/user.pycbin0 -> 1965 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/webkit_logging.py85
-rw-r--r--WebKitTools/Scripts/webkitpy/webkit_logging.pycbin0 -> 3160 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/webkit_logging_unittest.py61
-rw-r--r--WebKitTools/Scripts/webkitpy/webkitport.py166
-rw-r--r--WebKitTools/Scripts/webkitpy/webkitport.pycbin0 -> 7007 bytes
-rw-r--r--WebKitTools/Scripts/webkitpy/webkitport_unittest.py69
184 files changed, 29015 insertions, 0 deletions
diff --git a/WebKitTools/Scripts/webkitpy/BeautifulSoup.py b/WebKitTools/Scripts/webkitpy/BeautifulSoup.py
new file mode 100644
index 0000000..34204e7
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/BeautifulSoup.py
@@ -0,0 +1,2000 @@
+"""Beautiful Soup
+Elixir and Tonic
+"The Screen-Scraper's Friend"
+http://www.crummy.com/software/BeautifulSoup/
+
+Beautiful Soup parses a (possibly invalid) XML or HTML document into a
+tree representation. It provides methods and Pythonic idioms that make
+it easy to navigate, search, and modify the tree.
+
+A well-formed XML/HTML document yields a well-formed data
+structure. An ill-formed XML/HTML document yields a correspondingly
+ill-formed data structure. If your document is only locally
+well-formed, you can use this library to find and process the
+well-formed part of it.
+
+Beautiful Soup works with Python 2.2 and up. It has no external
+dependencies, but you'll have more success at converting data to UTF-8
+if you also install these three packages:
+
+* chardet, for auto-detecting character encodings
+ http://chardet.feedparser.org/
+* cjkcodecs and iconv_codec, which add more encodings to the ones supported
+ by stock Python.
+ http://cjkpython.i18n.org/
+
+Beautiful Soup defines classes for two main parsing strategies:
+
+ * BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
+ language that kind of looks like XML.
+
+ * BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
+ or invalid. This class has web browser-like heuristics for
+ obtaining a sensible parse tree in the face of common HTML errors.
+
+Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
+the encoding of an HTML or XML document, and converting it to
+Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
+
+For more than you ever wanted to know about Beautiful Soup, see the
+documentation:
+http://www.crummy.com/software/BeautifulSoup/documentation.html
+
+Here, have some legalese:
+
+Copyright (c) 2004-2009, Leonard Richardson
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ * Neither the name of the the Beautiful Soup Consortium and All
+ Night Kosher Bakery nor the names of its contributors may be
+ used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
+
+"""
+from __future__ import generators
+
+__author__ = "Leonard Richardson (leonardr@segfault.org)"
+__version__ = "3.1.0.1"
+__copyright__ = "Copyright (c) 2004-2009 Leonard Richardson"
+__license__ = "New-style BSD"
+
+import codecs
+import markupbase
+import types
+import re
+from HTMLParser import HTMLParser, HTMLParseError
+try:
+ from htmlentitydefs import name2codepoint
+except ImportError:
+ name2codepoint = {}
+try:
+ set
+except NameError:
+ from sets import Set as set
+
+#These hacks make Beautiful Soup able to parse XML with namespaces
+markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
+
+DEFAULT_OUTPUT_ENCODING = "utf-8"
+
+# First, the classes that represent markup elements.
+
+def sob(unicode, encoding):
+ """Returns either the given Unicode string or its encoding."""
+ if encoding is None:
+ return unicode
+ else:
+ return unicode.encode(encoding)
+
+class PageElement:
+ """Contains the navigational information for some part of the page
+ (either a tag or a piece of text)"""
+
+ def setup(self, parent=None, previous=None):
+ """Sets up the initial relations between this element and
+ other elements."""
+ self.parent = parent
+ self.previous = previous
+ self.next = None
+ self.previousSibling = None
+ self.nextSibling = None
+ if self.parent and self.parent.contents:
+ self.previousSibling = self.parent.contents[-1]
+ self.previousSibling.nextSibling = self
+
+ def replaceWith(self, replaceWith):
+ oldParent = self.parent
+ myIndex = self.parent.contents.index(self)
+ if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
+ # We're replacing this element with one of its siblings.
+ index = self.parent.contents.index(replaceWith)
+ if index and index < myIndex:
+ # Furthermore, it comes before this element. That
+ # means that when we extract it, the index of this
+ # element will change.
+ myIndex = myIndex - 1
+ self.extract()
+ oldParent.insert(myIndex, replaceWith)
+
+ def extract(self):
+ """Destructively rips this element out of the tree."""
+ if self.parent:
+ try:
+ self.parent.contents.remove(self)
+ except ValueError:
+ pass
+
+ #Find the two elements that would be next to each other if
+ #this element (and any children) hadn't been parsed. Connect
+ #the two.
+ lastChild = self._lastRecursiveChild()
+ nextElement = lastChild.next
+
+ if self.previous:
+ self.previous.next = nextElement
+ if nextElement:
+ nextElement.previous = self.previous
+ self.previous = None
+ lastChild.next = None
+
+ self.parent = None
+ if self.previousSibling:
+ self.previousSibling.nextSibling = self.nextSibling
+ if self.nextSibling:
+ self.nextSibling.previousSibling = self.previousSibling
+ self.previousSibling = self.nextSibling = None
+ return self
+
+ def _lastRecursiveChild(self):
+ "Finds the last element beneath this object to be parsed."
+ lastChild = self
+ while hasattr(lastChild, 'contents') and lastChild.contents:
+ lastChild = lastChild.contents[-1]
+ return lastChild
+
+ def insert(self, position, newChild):
+ if (isinstance(newChild, basestring)
+ or isinstance(newChild, unicode)) \
+ and not isinstance(newChild, NavigableString):
+ newChild = NavigableString(newChild)
+
+ position = min(position, len(self.contents))
+ if hasattr(newChild, 'parent') and newChild.parent != None:
+ # We're 'inserting' an element that's already one
+ # of this object's children.
+ if newChild.parent == self:
+ index = self.find(newChild)
+ if index and index < position:
+ # Furthermore we're moving it further down the
+ # list of this object's children. That means that
+ # when we extract this element, our target index
+ # will jump down one.
+ position = position - 1
+ newChild.extract()
+
+ newChild.parent = self
+ previousChild = None
+ if position == 0:
+ newChild.previousSibling = None
+ newChild.previous = self
+ else:
+ previousChild = self.contents[position-1]
+ newChild.previousSibling = previousChild
+ newChild.previousSibling.nextSibling = newChild
+ newChild.previous = previousChild._lastRecursiveChild()
+ if newChild.previous:
+ newChild.previous.next = newChild
+
+ newChildsLastElement = newChild._lastRecursiveChild()
+
+ if position >= len(self.contents):
+ newChild.nextSibling = None
+
+ parent = self
+ parentsNextSibling = None
+ while not parentsNextSibling:
+ parentsNextSibling = parent.nextSibling
+ parent = parent.parent
+ if not parent: # This is the last element in the document.
+ break
+ if parentsNextSibling:
+ newChildsLastElement.next = parentsNextSibling
+ else:
+ newChildsLastElement.next = None
+ else:
+ nextChild = self.contents[position]
+ newChild.nextSibling = nextChild
+ if newChild.nextSibling:
+ newChild.nextSibling.previousSibling = newChild
+ newChildsLastElement.next = nextChild
+
+ if newChildsLastElement.next:
+ newChildsLastElement.next.previous = newChildsLastElement
+ self.contents.insert(position, newChild)
+
+ def append(self, tag):
+ """Appends the given tag to the contents of this tag."""
+ self.insert(len(self.contents), tag)
+
+ def findNext(self, name=None, attrs={}, text=None, **kwargs):
+ """Returns the first item that matches the given criteria and
+ appears after this Tag in the document."""
+ return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
+
+ def findAllNext(self, name=None, attrs={}, text=None, limit=None,
+ **kwargs):
+ """Returns all items that match the given criteria and appear
+ after this Tag in the document."""
+ return self._findAll(name, attrs, text, limit, self.nextGenerator,
+ **kwargs)
+
+ def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
+ """Returns the closest sibling to this Tag that matches the
+ given criteria and appears after this Tag in the document."""
+ return self._findOne(self.findNextSiblings, name, attrs, text,
+ **kwargs)
+
+ def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
+ **kwargs):
+ """Returns the siblings of this Tag that match the given
+ criteria and appear after this Tag in the document."""
+ return self._findAll(name, attrs, text, limit,
+ self.nextSiblingGenerator, **kwargs)
+ fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
+
+ def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
+ """Returns the first item that matches the given criteria and
+ appears before this Tag in the document."""
+ return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
+
+ def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
+ **kwargs):
+ """Returns all items that match the given criteria and appear
+ before this Tag in the document."""
+ return self._findAll(name, attrs, text, limit, self.previousGenerator,
+ **kwargs)
+ fetchPrevious = findAllPrevious # Compatibility with pre-3.x
+
+ def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
+ """Returns the closest sibling to this Tag that matches the
+ given criteria and appears before this Tag in the document."""
+ return self._findOne(self.findPreviousSiblings, name, attrs, text,
+ **kwargs)
+
+ def findPreviousSiblings(self, name=None, attrs={}, text=None,
+ limit=None, **kwargs):
+ """Returns the siblings of this Tag that match the given
+ criteria and appear before this Tag in the document."""
+ return self._findAll(name, attrs, text, limit,
+ self.previousSiblingGenerator, **kwargs)
+ fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
+
+ def findParent(self, name=None, attrs={}, **kwargs):
+ """Returns the closest parent of this Tag that matches the given
+ criteria."""
+ # NOTE: We can't use _findOne because findParents takes a different
+ # set of arguments.
+ r = None
+ l = self.findParents(name, attrs, 1)
+ if l:
+ r = l[0]
+ return r
+
+ def findParents(self, name=None, attrs={}, limit=None, **kwargs):
+ """Returns the parents of this Tag that match the given
+ criteria."""
+
+ return self._findAll(name, attrs, None, limit, self.parentGenerator,
+ **kwargs)
+ fetchParents = findParents # Compatibility with pre-3.x
+
+ #These methods do the real heavy lifting.
+
+ def _findOne(self, method, name, attrs, text, **kwargs):
+ r = None
+ l = method(name, attrs, text, 1, **kwargs)
+ if l:
+ r = l[0]
+ return r
+
+ def _findAll(self, name, attrs, text, limit, generator, **kwargs):
+ "Iterates over a generator looking for things that match."
+
+ if isinstance(name, SoupStrainer):
+ strainer = name
+ else:
+ # Build a SoupStrainer
+ strainer = SoupStrainer(name, attrs, text, **kwargs)
+ results = ResultSet(strainer)
+ g = generator()
+ while True:
+ try:
+ i = g.next()
+ except StopIteration:
+ break
+ if i:
+ found = strainer.search(i)
+ if found:
+ results.append(found)
+ if limit and len(results) >= limit:
+ break
+ return results
+
+ #These Generators can be used to navigate starting from both
+ #NavigableStrings and Tags.
+ def nextGenerator(self):
+ i = self
+ while i:
+ i = i.next
+ yield i
+
+ def nextSiblingGenerator(self):
+ i = self
+ while i:
+ i = i.nextSibling
+ yield i
+
+ def previousGenerator(self):
+ i = self
+ while i:
+ i = i.previous
+ yield i
+
+ def previousSiblingGenerator(self):
+ i = self
+ while i:
+ i = i.previousSibling
+ yield i
+
+ def parentGenerator(self):
+ i = self
+ while i:
+ i = i.parent
+ yield i
+
+ # Utility methods
+ def substituteEncoding(self, str, encoding=None):
+ encoding = encoding or "utf-8"
+ return str.replace("%SOUP-ENCODING%", encoding)
+
+ def toEncoding(self, s, encoding=None):
+ """Encodes an object to a string in some encoding, or to Unicode.
+ ."""
+ if isinstance(s, unicode):
+ if encoding:
+ s = s.encode(encoding)
+ elif isinstance(s, str):
+ if encoding:
+ s = s.encode(encoding)
+ else:
+ s = unicode(s)
+ else:
+ if encoding:
+ s = self.toEncoding(str(s), encoding)
+ else:
+ s = unicode(s)
+ return s
+
+class NavigableString(unicode, PageElement):
+
+ def __new__(cls, value):
+ """Create a new NavigableString.
+
+ When unpickling a NavigableString, this method is called with
+ the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
+ passed in to the superclass's __new__ or the superclass won't know
+ how to handle non-ASCII characters.
+ """
+ if isinstance(value, unicode):
+ return unicode.__new__(cls, value)
+ return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
+
+ def __getnewargs__(self):
+ return (unicode(self),)
+
+ def __getattr__(self, attr):
+ """text.string gives you text. This is for backwards
+ compatibility for Navigable*String, but for CData* it lets you
+ get the string without the CData wrapper."""
+ if attr == 'string':
+ return self
+ else:
+ raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
+
+ def encode(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ return self.decode().encode(encoding)
+
+ def decodeGivenEventualEncoding(self, eventualEncoding):
+ return self
+
+class CData(NavigableString):
+
+ def decodeGivenEventualEncoding(self, eventualEncoding):
+ return u'<![CDATA[' + self + u']]>'
+
+class ProcessingInstruction(NavigableString):
+
+ def decodeGivenEventualEncoding(self, eventualEncoding):
+ output = self
+ if u'%SOUP-ENCODING%' in output:
+ output = self.substituteEncoding(output, eventualEncoding)
+ return u'<?' + output + u'?>'
+
+class Comment(NavigableString):
+ def decodeGivenEventualEncoding(self, eventualEncoding):
+ return u'<!--' + self + u'-->'
+
+class Declaration(NavigableString):
+ def decodeGivenEventualEncoding(self, eventualEncoding):
+ return u'<!' + self + u'>'
+
+class Tag(PageElement):
+
+ """Represents a found HTML tag with its attributes and contents."""
+
+ def _invert(h):
+ "Cheap function to invert a hash."
+ i = {}
+ for k,v in h.items():
+ i[v] = k
+ return i
+
+ XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
+ "quot" : '"',
+ "amp" : "&",
+ "lt" : "<",
+ "gt" : ">" }
+
+ XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
+
+ def _convertEntities(self, match):
+ """Used in a call to re.sub to replace HTML, XML, and numeric
+ entities with the appropriate Unicode characters. If HTML
+ entities are being converted, any unrecognized entities are
+ escaped."""
+ x = match.group(1)
+ if self.convertHTMLEntities and x in name2codepoint:
+ return unichr(name2codepoint[x])
+ elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
+ if self.convertXMLEntities:
+ return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
+ else:
+ return u'&%s;' % x
+ elif len(x) > 0 and x[0] == '#':
+ # Handle numeric entities
+ if len(x) > 1 and x[1] == 'x':
+ return unichr(int(x[2:], 16))
+ else:
+ return unichr(int(x[1:]))
+
+ elif self.escapeUnrecognizedEntities:
+ return u'&amp;%s;' % x
+ else:
+ return u'&%s;' % x
+
+ def __init__(self, parser, name, attrs=None, parent=None,
+ previous=None):
+ "Basic constructor."
+
+ # We don't actually store the parser object: that lets extracted
+ # chunks be garbage-collected
+ self.parserClass = parser.__class__
+ self.isSelfClosing = parser.isSelfClosingTag(name)
+ self.name = name
+ if attrs == None:
+ attrs = []
+ self.attrs = attrs
+ self.contents = []
+ self.setup(parent, previous)
+ self.hidden = False
+ self.containsSubstitutions = False
+ self.convertHTMLEntities = parser.convertHTMLEntities
+ self.convertXMLEntities = parser.convertXMLEntities
+ self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
+
+ def convert(kval):
+ "Converts HTML, XML and numeric entities in the attribute value."
+ k, val = kval
+ if val is None:
+ return kval
+ return (k, re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
+ self._convertEntities, val))
+ self.attrs = map(convert, self.attrs)
+
+ def get(self, key, default=None):
+ """Returns the value of the 'key' attribute for the tag, or
+ the value given for 'default' if it doesn't have that
+ attribute."""
+ return self._getAttrMap().get(key, default)
+
+ def has_key(self, key):
+ return self._getAttrMap().has_key(key)
+
+ def __getitem__(self, key):
+ """tag[key] returns the value of the 'key' attribute for the tag,
+ and throws an exception if it's not there."""
+ return self._getAttrMap()[key]
+
+ def __iter__(self):
+ "Iterating over a tag iterates over its contents."
+ return iter(self.contents)
+
+ def __len__(self):
+ "The length of a tag is the length of its list of contents."
+ return len(self.contents)
+
+ def __contains__(self, x):
+ return x in self.contents
+
+ def __nonzero__(self):
+ "A tag is non-None even if it has no contents."
+ return True
+
+ def __setitem__(self, key, value):
+ """Setting tag[key] sets the value of the 'key' attribute for the
+ tag."""
+ self._getAttrMap()
+ self.attrMap[key] = value
+ found = False
+ for i in range(0, len(self.attrs)):
+ if self.attrs[i][0] == key:
+ self.attrs[i] = (key, value)
+ found = True
+ if not found:
+ self.attrs.append((key, value))
+ self._getAttrMap()[key] = value
+
+ def __delitem__(self, key):
+ "Deleting tag[key] deletes all 'key' attributes for the tag."
+ for item in self.attrs:
+ if item[0] == key:
+ self.attrs.remove(item)
+ #We don't break because bad HTML can define the same
+ #attribute multiple times.
+ self._getAttrMap()
+ if self.attrMap.has_key(key):
+ del self.attrMap[key]
+
+ def __call__(self, *args, **kwargs):
+ """Calling a tag like a function is the same as calling its
+ findAll() method. Eg. tag('a') returns a list of all the A tags
+ found within this tag."""
+ return apply(self.findAll, args, kwargs)
+
+ def __getattr__(self, tag):
+ #print "Getattr %s.%s" % (self.__class__, tag)
+ if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
+ return self.find(tag[:-3])
+ elif tag.find('__') != 0:
+ return self.find(tag)
+ raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
+
+ def __eq__(self, other):
+ """Returns true iff this tag has the same name, the same attributes,
+ and the same contents (recursively) as the given tag.
+
+ NOTE: right now this will return false if two tags have the
+ same attributes in a different order. Should this be fixed?"""
+ if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
+ return False
+ for i in range(0, len(self.contents)):
+ if self.contents[i] != other.contents[i]:
+ return False
+ return True
+
+ def __ne__(self, other):
+ """Returns true iff this tag is not identical to the other tag,
+ as defined in __eq__."""
+ return not self == other
+
+ def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ """Renders this tag as a string."""
+ return self.decode(eventualEncoding=encoding)
+
+ BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ + "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ + ")")
+
+ def _sub_entity(self, x):
+ """Used with a regular expression to substitute the
+ appropriate XML entity for an XML special character."""
+ return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
+
+ def __unicode__(self):
+ return self.decode()
+
+ def __str__(self):
+ return self.encode()
+
+ def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
+ prettyPrint=False, indentLevel=0):
+ return self.decode(prettyPrint, indentLevel, encoding).encode(encoding)
+
+ def decode(self, prettyPrint=False, indentLevel=0,
+ eventualEncoding=DEFAULT_OUTPUT_ENCODING):
+ """Returns a string or Unicode representation of this tag and
+ its contents. To get Unicode, pass None for encoding."""
+
+ attrs = []
+ if self.attrs:
+ for key, val in self.attrs:
+ fmt = '%s="%s"'
+ if isString(val):
+ if (self.containsSubstitutions
+ and eventualEncoding is not None
+ and '%SOUP-ENCODING%' in val):
+ val = self.substituteEncoding(val, eventualEncoding)
+
+ # The attribute value either:
+ #
+ # * Contains no embedded double quotes or single quotes.
+ # No problem: we enclose it in double quotes.
+ # * Contains embedded single quotes. No problem:
+ # double quotes work here too.
+ # * Contains embedded double quotes. No problem:
+ # we enclose it in single quotes.
+ # * Embeds both single _and_ double quotes. This
+ # can't happen naturally, but it can happen if
+ # you modify an attribute value after parsing
+ # the document. Now we have a bit of a
+ # problem. We solve it by enclosing the
+ # attribute in single quotes, and escaping any
+ # embedded single quotes to XML entities.
+ if '"' in val:
+ fmt = "%s='%s'"
+ if "'" in val:
+ # TODO: replace with apos when
+ # appropriate.
+ val = val.replace("'", "&squot;")
+
+ # Now we're okay w/r/t quotes. But the attribute
+ # value might also contain angle brackets, or
+ # ampersands that aren't part of entities. We need
+ # to escape those to XML entities too.
+ val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
+ if val is None:
+ # Handle boolean attributes.
+ decoded = key
+ else:
+ decoded = fmt % (key, val)
+ attrs.append(decoded)
+ close = ''
+ closeTag = ''
+ if self.isSelfClosing:
+ close = ' /'
+ else:
+ closeTag = '</%s>' % self.name
+
+ indentTag, indentContents = 0, 0
+ if prettyPrint:
+ indentTag = indentLevel
+ space = (' ' * (indentTag-1))
+ indentContents = indentTag + 1
+ contents = self.decodeContents(prettyPrint, indentContents,
+ eventualEncoding)
+ if self.hidden:
+ s = contents
+ else:
+ s = []
+ attributeString = ''
+ if attrs:
+ attributeString = ' ' + ' '.join(attrs)
+ if prettyPrint:
+ s.append(space)
+ s.append('<%s%s%s>' % (self.name, attributeString, close))
+ if prettyPrint:
+ s.append("\n")
+ s.append(contents)
+ if prettyPrint and contents and contents[-1] != "\n":
+ s.append("\n")
+ if prettyPrint and closeTag:
+ s.append(space)
+ s.append(closeTag)
+ if prettyPrint and closeTag and self.nextSibling:
+ s.append("\n")
+ s = ''.join(s)
+ return s
+
+ def decompose(self):
+ """Recursively destroys the contents of this tree."""
+ contents = [i for i in self.contents]
+ for i in contents:
+ if isinstance(i, Tag):
+ i.decompose()
+ else:
+ i.extract()
+ self.extract()
+
+ def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
+ return self.encode(encoding, True)
+
+ def encodeContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
+ prettyPrint=False, indentLevel=0):
+ return self.decodeContents(prettyPrint, indentLevel).encode(encoding)
+
+ def decodeContents(self, prettyPrint=False, indentLevel=0,
+ eventualEncoding=DEFAULT_OUTPUT_ENCODING):
+ """Renders the contents of this tag as a string in the given
+ encoding. If encoding is None, returns a Unicode string.."""
+ s=[]
+ for c in self:
+ text = None
+ if isinstance(c, NavigableString):
+ text = c.decodeGivenEventualEncoding(eventualEncoding)
+ elif isinstance(c, Tag):
+ s.append(c.decode(prettyPrint, indentLevel, eventualEncoding))
+ if text and prettyPrint:
+ text = text.strip()
+ if text:
+ if prettyPrint:
+ s.append(" " * (indentLevel-1))
+ s.append(text)
+ if prettyPrint:
+ s.append("\n")
+ return ''.join(s)
+
+ #Soup methods
+
+ def find(self, name=None, attrs={}, recursive=True, text=None,
+ **kwargs):
+ """Return only the first child of this Tag matching the given
+ criteria."""
+ r = None
+ l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
+ if l:
+ r = l[0]
+ return r
+ findChild = find
+
+ def findAll(self, name=None, attrs={}, recursive=True, text=None,
+ limit=None, **kwargs):
+ """Extracts a list of Tag objects that match the given
+ criteria. You can specify the name of the Tag and any
+ attributes you want the Tag to have.
+
+ The value of a key-value pair in the 'attrs' map can be a
+ string, a list of strings, a regular expression object, or a
+ callable that takes a string and returns whether or not the
+ string matches for some custom definition of 'matches'. The
+ same is true of the tag name."""
+ generator = self.recursiveChildGenerator
+ if not recursive:
+ generator = self.childGenerator
+ return self._findAll(name, attrs, text, limit, generator, **kwargs)
+ findChildren = findAll
+
+ # Pre-3.x compatibility methods. Will go away in 4.0.
+ first = find
+ fetch = findAll
+
+ def fetchText(self, text=None, recursive=True, limit=None):
+ return self.findAll(text=text, recursive=recursive, limit=limit)
+
+ def firstText(self, text=None, recursive=True):
+ return self.find(text=text, recursive=recursive)
+
+ # 3.x compatibility methods. Will go away in 4.0.
+ def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
+ prettyPrint=False, indentLevel=0):
+ if encoding is None:
+ return self.decodeContents(prettyPrint, indentLevel, encoding)
+ else:
+ return self.encodeContents(encoding, prettyPrint, indentLevel)
+
+
+ #Private methods
+
+ def _getAttrMap(self):
+ """Initializes a map representation of this tag's attributes,
+ if not already initialized."""
+ if not getattr(self, 'attrMap'):
+ self.attrMap = {}
+ for (key, value) in self.attrs:
+ self.attrMap[key] = value
+ return self.attrMap
+
+ #Generator methods
+ def recursiveChildGenerator(self):
+ if not len(self.contents):
+ raise StopIteration
+ stopNode = self._lastRecursiveChild().next
+ current = self.contents[0]
+ while current is not stopNode:
+ yield current
+ current = current.next
+
+ def childGenerator(self):
+ if not len(self.contents):
+ raise StopIteration
+ current = self.contents[0]
+ while current:
+ yield current
+ current = current.nextSibling
+ raise StopIteration
+
+# Next, a couple classes to represent queries and their results.
+class SoupStrainer:
+ """Encapsulates a number of ways of matching a markup element (tag or
+ text)."""
+
+ def __init__(self, name=None, attrs={}, text=None, **kwargs):
+ self.name = name
+ if isString(attrs):
+ kwargs['class'] = attrs
+ attrs = None
+ if kwargs:
+ if attrs:
+ attrs = attrs.copy()
+ attrs.update(kwargs)
+ else:
+ attrs = kwargs
+ self.attrs = attrs
+ self.text = text
+
+ def __str__(self):
+ if self.text:
+ return self.text
+ else:
+ return "%s|%s" % (self.name, self.attrs)
+
+ def searchTag(self, markupName=None, markupAttrs={}):
+ found = None
+ markup = None
+ if isinstance(markupName, Tag):
+ markup = markupName
+ markupAttrs = markup
+ callFunctionWithTagData = callable(self.name) \
+ and not isinstance(markupName, Tag)
+
+ if (not self.name) \
+ or callFunctionWithTagData \
+ or (markup and self._matches(markup, self.name)) \
+ or (not markup and self._matches(markupName, self.name)):
+ if callFunctionWithTagData:
+ match = self.name(markupName, markupAttrs)
+ else:
+ match = True
+ markupAttrMap = None
+ for attr, matchAgainst in self.attrs.items():
+ if not markupAttrMap:
+ if hasattr(markupAttrs, 'get'):
+ markupAttrMap = markupAttrs
+ else:
+ markupAttrMap = {}
+ for k,v in markupAttrs:
+ markupAttrMap[k] = v
+ attrValue = markupAttrMap.get(attr)
+ if not self._matches(attrValue, matchAgainst):
+ match = False
+ break
+ if match:
+ if markup:
+ found = markup
+ else:
+ found = markupName
+ return found
+
+ def search(self, markup):
+ #print 'looking for %s in %s' % (self, markup)
+ found = None
+ # If given a list of items, scan it for a text element that
+ # matches.
+ if isList(markup) and not isinstance(markup, Tag):
+ for element in markup:
+ if isinstance(element, NavigableString) \
+ and self.search(element):
+ found = element
+ break
+ # If it's a Tag, make sure its name or attributes match.
+ # Don't bother with Tags if we're searching for text.
+ elif isinstance(markup, Tag):
+ if not self.text:
+ found = self.searchTag(markup)
+ # If it's text, make sure the text matches.
+ elif isinstance(markup, NavigableString) or \
+ isString(markup):
+ if self._matches(markup, self.text):
+ found = markup
+ else:
+ raise Exception, "I don't know how to match against a %s" \
+ % markup.__class__
+ return found
+
+ def _matches(self, markup, matchAgainst):
+ #print "Matching %s against %s" % (markup, matchAgainst)
+ result = False
+ if matchAgainst == True and type(matchAgainst) == types.BooleanType:
+ result = markup != None
+ elif callable(matchAgainst):
+ result = matchAgainst(markup)
+ else:
+ #Custom match methods take the tag as an argument, but all
+ #other ways of matching match the tag name as a string.
+ if isinstance(markup, Tag):
+ markup = markup.name
+ if markup is not None and not isString(markup):
+ markup = unicode(markup)
+ #Now we know that chunk is either a string, or None.
+ if hasattr(matchAgainst, 'match'):
+ # It's a regexp object.
+ result = markup and matchAgainst.search(markup)
+ elif (isList(matchAgainst)
+ and (markup is not None or not isString(matchAgainst))):
+ result = markup in matchAgainst
+ elif hasattr(matchAgainst, 'items'):
+ result = markup.has_key(matchAgainst)
+ elif matchAgainst and isString(markup):
+ if isinstance(markup, unicode):
+ matchAgainst = unicode(matchAgainst)
+ else:
+ matchAgainst = str(matchAgainst)
+
+ if not result:
+ result = matchAgainst == markup
+ return result
+
+class ResultSet(list):
+ """A ResultSet is just a list that keeps track of the SoupStrainer
+ that created it."""
+ def __init__(self, source):
+ list.__init__([])
+ self.source = source
+
+# Now, some helper functions.
+
+def isList(l):
+ """Convenience method that works with all 2.x versions of Python
+ to determine whether or not something is listlike."""
+ return ((hasattr(l, '__iter__') and not isString(l))
+ or (type(l) in (types.ListType, types.TupleType)))
+
+def isString(s):
+ """Convenience method that works with all 2.x versions of Python
+ to determine whether or not something is stringlike."""
+ try:
+ return isinstance(s, unicode) or isinstance(s, basestring)
+ except NameError:
+ return isinstance(s, str)
+
+def buildTagMap(default, *args):
+ """Turns a list of maps, lists, or scalars into a single map.
+ Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
+ NESTING_RESET_TAGS maps out of lists and partial maps."""
+ built = {}
+ for portion in args:
+ if hasattr(portion, 'items'):
+ #It's a map. Merge it.
+ for k,v in portion.items():
+ built[k] = v
+ elif isList(portion) and not isString(portion):
+ #It's a list. Map each item to the default.
+ for k in portion:
+ built[k] = default
+ else:
+ #It's a scalar. Map it to the default.
+ built[portion] = default
+ return built
+
+# Now, the parser classes.
+
+class HTMLParserBuilder(HTMLParser):
+
+ def __init__(self, soup):
+ HTMLParser.__init__(self)
+ self.soup = soup
+
+ # We inherit feed() and reset().
+
+ def handle_starttag(self, name, attrs):
+ if name == 'meta':
+ self.soup.extractCharsetFromMeta(attrs)
+ else:
+ self.soup.unknown_starttag(name, attrs)
+
+ def handle_endtag(self, name):
+ self.soup.unknown_endtag(name)
+
+ def handle_data(self, content):
+ self.soup.handle_data(content)
+
+ def _toStringSubclass(self, text, subclass):
+ """Adds a certain piece of text to the tree as a NavigableString
+ subclass."""
+ self.soup.endData()
+ self.handle_data(text)
+ self.soup.endData(subclass)
+
+ def handle_pi(self, text):
+ """Handle a processing instruction as a ProcessingInstruction
+ object, possibly one with a %SOUP-ENCODING% slot into which an
+ encoding will be plugged later."""
+ if text[:3] == "xml":
+ text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
+ self._toStringSubclass(text, ProcessingInstruction)
+
+ def handle_comment(self, text):
+ "Handle comments as Comment objects."
+ self._toStringSubclass(text, Comment)
+
+ def handle_charref(self, ref):
+ "Handle character references as data."
+ if self.soup.convertEntities:
+ data = unichr(int(ref))
+ else:
+ data = '&#%s;' % ref
+ self.handle_data(data)
+
+ def handle_entityref(self, ref):
+ """Handle entity references as data, possibly converting known
+ HTML and/or XML entity references to the corresponding Unicode
+ characters."""
+ data = None
+ if self.soup.convertHTMLEntities:
+ try:
+ data = unichr(name2codepoint[ref])
+ except KeyError:
+ pass
+
+ if not data and self.soup.convertXMLEntities:
+ data = self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
+
+ if not data and self.soup.convertHTMLEntities and \
+ not self.soup.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
+ # TODO: We've got a problem here. We're told this is
+ # an entity reference, but it's not an XML entity
+ # reference or an HTML entity reference. Nonetheless,
+ # the logical thing to do is to pass it through as an
+ # unrecognized entity reference.
+ #
+ # Except: when the input is "&carol;" this function
+ # will be called with input "carol". When the input is
+ # "AT&T", this function will be called with input
+ # "T". We have no way of knowing whether a semicolon
+ # was present originally, so we don't know whether
+ # this is an unknown entity or just a misplaced
+ # ampersand.
+ #
+ # The more common case is a misplaced ampersand, so I
+ # escape the ampersand and omit the trailing semicolon.
+ data = "&amp;%s" % ref
+ if not data:
+ # This case is different from the one above, because we
+ # haven't already gone through a supposedly comprehensive
+ # mapping of entities to Unicode characters. We might not
+ # have gone through any mapping at all. So the chances are
+ # very high that this is a real entity, and not a
+ # misplaced ampersand.
+ data = "&%s;" % ref
+ self.handle_data(data)
+
+ def handle_decl(self, data):
+ "Handle DOCTYPEs and the like as Declaration objects."
+ self._toStringSubclass(data, Declaration)
+
+ def parse_declaration(self, i):
+ """Treat a bogus SGML declaration as raw data. Treat a CDATA
+ declaration as a CData object."""
+ j = None
+ if self.rawdata[i:i+9] == '<![CDATA[':
+ k = self.rawdata.find(']]>', i)
+ if k == -1:
+ k = len(self.rawdata)
+ data = self.rawdata[i+9:k]
+ j = k+3
+ self._toStringSubclass(data, CData)
+ else:
+ try:
+ j = HTMLParser.parse_declaration(self, i)
+ except HTMLParseError:
+ toHandle = self.rawdata[i:]
+ self.handle_data(toHandle)
+ j = i + len(toHandle)
+ return j
+
+
+class BeautifulStoneSoup(Tag):
+
+ """This class contains the basic parser and search code. It defines
+ a parser that knows nothing about tag behavior except for the
+ following:
+
+ You can't close a tag without closing all the tags it encloses.
+ That is, "<foo><bar></foo>" actually means
+ "<foo><bar></bar></foo>".
+
+ [Another possible explanation is "<foo><bar /></foo>", but since
+ this class defines no SELF_CLOSING_TAGS, it will never use that
+ explanation.]
+
+ This class is useful for parsing XML or made-up markup languages,
+ or when BeautifulSoup makes an assumption counter to what you were
+ expecting."""
+
+ SELF_CLOSING_TAGS = {}
+ NESTABLE_TAGS = {}
+ RESET_NESTING_TAGS = {}
+ QUOTE_TAGS = {}
+ PRESERVE_WHITESPACE_TAGS = []
+
+ MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
+ lambda x: x.group(1) + ' />'),
+ (re.compile('<!\s+([^<>]*)>'),
+ lambda x: '<!' + x.group(1) + '>')
+ ]
+
+ ROOT_TAG_NAME = u'[document]'
+
+ HTML_ENTITIES = "html"
+ XML_ENTITIES = "xml"
+ XHTML_ENTITIES = "xhtml"
+ # TODO: This only exists for backwards-compatibility
+ ALL_ENTITIES = XHTML_ENTITIES
+
+ # Used when determining whether a text node is all whitespace and
+ # can be replaced with a single space. A text node that contains
+ # fancy Unicode spaces (usually non-breaking) should be left
+ # alone.
+ STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
+
+ def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
+ markupMassage=True, smartQuotesTo=XML_ENTITIES,
+ convertEntities=None, selfClosingTags=None, isHTML=False,
+ builder=HTMLParserBuilder):
+ """The Soup object is initialized as the 'root tag', and the
+ provided markup (which can be a string or a file-like object)
+ is fed into the underlying parser.
+
+ HTMLParser will process most bad HTML, and the BeautifulSoup
+ class has some tricks for dealing with some HTML that kills
+ HTMLParser, but Beautiful Soup can nonetheless choke or lose data
+ if your data uses self-closing tags or declarations
+ incorrectly.
+
+ By default, Beautiful Soup uses regexes to sanitize input,
+ avoiding the vast majority of these problems. If the problems
+ don't apply to you, pass in False for markupMassage, and
+ you'll get better performance.
+
+ The default parser massage techniques fix the two most common
+ instances of invalid HTML that choke HTMLParser:
+
+ <br/> (No space between name of closing tag and tag close)
+ <! --Comment--> (Extraneous whitespace in declaration)
+
+ You can pass in a custom list of (RE object, replace method)
+ tuples to get Beautiful Soup to scrub your input the way you
+ want."""
+
+ self.parseOnlyThese = parseOnlyThese
+ self.fromEncoding = fromEncoding
+ self.smartQuotesTo = smartQuotesTo
+ self.convertEntities = convertEntities
+ # Set the rules for how we'll deal with the entities we
+ # encounter
+ if self.convertEntities:
+ # It doesn't make sense to convert encoded characters to
+ # entities even while you're converting entities to Unicode.
+ # Just convert it all to Unicode.
+ self.smartQuotesTo = None
+ if convertEntities == self.HTML_ENTITIES:
+ self.convertXMLEntities = False
+ self.convertHTMLEntities = True
+ self.escapeUnrecognizedEntities = True
+ elif convertEntities == self.XHTML_ENTITIES:
+ self.convertXMLEntities = True
+ self.convertHTMLEntities = True
+ self.escapeUnrecognizedEntities = False
+ elif convertEntities == self.XML_ENTITIES:
+ self.convertXMLEntities = True
+ self.convertHTMLEntities = False
+ self.escapeUnrecognizedEntities = False
+ else:
+ self.convertXMLEntities = False
+ self.convertHTMLEntities = False
+ self.escapeUnrecognizedEntities = False
+
+ self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
+ self.builder = builder(self)
+ self.reset()
+
+ if hasattr(markup, 'read'): # It's a file-type object.
+ markup = markup.read()
+ self.markup = markup
+ self.markupMassage = markupMassage
+ try:
+ self._feed(isHTML=isHTML)
+ except StopParsing:
+ pass
+ self.markup = None # The markup can now be GCed.
+ self.builder = None # So can the builder.
+
+ def _feed(self, inDocumentEncoding=None, isHTML=False):
+ # Convert the document to Unicode.
+ markup = self.markup
+ if isinstance(markup, unicode):
+ if not hasattr(self, 'originalEncoding'):
+ self.originalEncoding = None
+ else:
+ dammit = UnicodeDammit\
+ (markup, [self.fromEncoding, inDocumentEncoding],
+ smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
+ markup = dammit.unicode
+ self.originalEncoding = dammit.originalEncoding
+ self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
+ if markup:
+ if self.markupMassage:
+ if not isList(self.markupMassage):
+ self.markupMassage = self.MARKUP_MASSAGE
+ for fix, m in self.markupMassage:
+ markup = fix.sub(m, markup)
+ # TODO: We get rid of markupMassage so that the
+ # soup object can be deepcopied later on. Some
+ # Python installations can't copy regexes. If anyone
+ # was relying on the existence of markupMassage, this
+ # might cause problems.
+ del(self.markupMassage)
+ self.builder.reset()
+
+ self.builder.feed(markup)
+ # Close out any unfinished strings and close all the open tags.
+ self.endData()
+ while self.currentTag.name != self.ROOT_TAG_NAME:
+ self.popTag()
+
+ def isSelfClosingTag(self, name):
+ """Returns true iff the given string is the name of a
+ self-closing tag according to this parser."""
+ return self.SELF_CLOSING_TAGS.has_key(name) \
+ or self.instanceSelfClosingTags.has_key(name)
+
+ def reset(self):
+ Tag.__init__(self, self, self.ROOT_TAG_NAME)
+ self.hidden = 1
+ self.builder.reset()
+ self.currentData = []
+ self.currentTag = None
+ self.tagStack = []
+ self.quoteStack = []
+ self.pushTag(self)
+
+ def popTag(self):
+ tag = self.tagStack.pop()
+ # Tags with just one string-owning child get the child as a
+ # 'string' property, so that soup.tag.string is shorthand for
+ # soup.tag.contents[0]
+ if len(self.currentTag.contents) == 1 and \
+ isinstance(self.currentTag.contents[0], NavigableString):
+ self.currentTag.string = self.currentTag.contents[0]
+
+ #print "Pop", tag.name
+ if self.tagStack:
+ self.currentTag = self.tagStack[-1]
+ return self.currentTag
+
+ def pushTag(self, tag):
+ #print "Push", tag.name
+ if self.currentTag:
+ self.currentTag.contents.append(tag)
+ self.tagStack.append(tag)
+ self.currentTag = self.tagStack[-1]
+
+ def endData(self, containerClass=NavigableString):
+ if self.currentData:
+ currentData = u''.join(self.currentData)
+ if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
+ not set([tag.name for tag in self.tagStack]).intersection(
+ self.PRESERVE_WHITESPACE_TAGS)):
+ if '\n' in currentData:
+ currentData = '\n'
+ else:
+ currentData = ' '
+ self.currentData = []
+ if self.parseOnlyThese and len(self.tagStack) <= 1 and \
+ (not self.parseOnlyThese.text or \
+ not self.parseOnlyThese.search(currentData)):
+ return
+ o = containerClass(currentData)
+ o.setup(self.currentTag, self.previous)
+ if self.previous:
+ self.previous.next = o
+ self.previous = o
+ self.currentTag.contents.append(o)
+
+
+ def _popToTag(self, name, inclusivePop=True):
+ """Pops the tag stack up to and including the most recent
+ instance of the given tag. If inclusivePop is false, pops the tag
+ stack up to but *not* including the most recent instqance of
+ the given tag."""
+ #print "Popping to %s" % name
+ if name == self.ROOT_TAG_NAME:
+ return
+
+ numPops = 0
+ mostRecentTag = None
+ for i in range(len(self.tagStack)-1, 0, -1):
+ if name == self.tagStack[i].name:
+ numPops = len(self.tagStack)-i
+ break
+ if not inclusivePop:
+ numPops = numPops - 1
+
+ for i in range(0, numPops):
+ mostRecentTag = self.popTag()
+ return mostRecentTag
+
+ def _smartPop(self, name):
+
+ """We need to pop up to the previous tag of this type, unless
+ one of this tag's nesting reset triggers comes between this
+ tag and the previous tag of this type, OR unless this tag is a
+ generic nesting trigger and another generic nesting trigger
+ comes between this tag and the previous tag of this type.
+
+ Examples:
+ <p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
+ <p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
+ <p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
+
+ <li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
+ <tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
+ <td><tr><td> *<td>* should pop to 'tr', not the first 'td'
+ """
+
+ nestingResetTriggers = self.NESTABLE_TAGS.get(name)
+ isNestable = nestingResetTriggers != None
+ isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
+ popTo = None
+ inclusive = True
+ for i in range(len(self.tagStack)-1, 0, -1):
+ p = self.tagStack[i]
+ if (not p or p.name == name) and not isNestable:
+ #Non-nestable tags get popped to the top or to their
+ #last occurance.
+ popTo = name
+ break
+ if (nestingResetTriggers != None
+ and p.name in nestingResetTriggers) \
+ or (nestingResetTriggers == None and isResetNesting
+ and self.RESET_NESTING_TAGS.has_key(p.name)):
+
+ #If we encounter one of the nesting reset triggers
+ #peculiar to this tag, or we encounter another tag
+ #that causes nesting to reset, pop up to but not
+ #including that tag.
+ popTo = p.name
+ inclusive = False
+ break
+ p = p.parent
+ if popTo:
+ self._popToTag(popTo, inclusive)
+
+ def unknown_starttag(self, name, attrs, selfClosing=0):
+ #print "Start tag %s: %s" % (name, attrs)
+ if self.quoteStack:
+ #This is not a real tag.
+ #print "<%s> is not real!" % name
+ attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
+ self.handle_data('<%s%s>' % (name, attrs))
+ return
+ self.endData()
+
+ if not self.isSelfClosingTag(name) and not selfClosing:
+ self._smartPop(name)
+
+ if self.parseOnlyThese and len(self.tagStack) <= 1 \
+ and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
+ return
+
+ tag = Tag(self, name, attrs, self.currentTag, self.previous)
+ if self.previous:
+ self.previous.next = tag
+ self.previous = tag
+ self.pushTag(tag)
+ if selfClosing or self.isSelfClosingTag(name):
+ self.popTag()
+ if name in self.QUOTE_TAGS:
+ #print "Beginning quote (%s)" % name
+ self.quoteStack.append(name)
+ self.literal = 1
+ return tag
+
+ def unknown_endtag(self, name):
+ #print "End tag %s" % name
+ if self.quoteStack and self.quoteStack[-1] != name:
+ #This is not a real end tag.
+ #print "</%s> is not real!" % name
+ self.handle_data('</%s>' % name)
+ return
+ self.endData()
+ self._popToTag(name)
+ if self.quoteStack and self.quoteStack[-1] == name:
+ self.quoteStack.pop()
+ self.literal = (len(self.quoteStack) > 0)
+
+ def handle_data(self, data):
+ self.currentData.append(data)
+
+ def extractCharsetFromMeta(self, attrs):
+ self.unknown_starttag('meta', attrs)
+
+
+class BeautifulSoup(BeautifulStoneSoup):
+
+ """This parser knows the following facts about HTML:
+
+ * Some tags have no closing tag and should be interpreted as being
+ closed as soon as they are encountered.
+
+ * The text inside some tags (ie. 'script') may contain tags which
+ are not really part of the document and which should be parsed
+ as text, not tags. If you want to parse the text as tags, you can
+ always fetch it and parse it explicitly.
+
+ * Tag nesting rules:
+
+ Most tags can't be nested at all. For instance, the occurance of
+ a <p> tag should implicitly close the previous <p> tag.
+
+ <p>Para1<p>Para2
+ should be transformed into:
+ <p>Para1</p><p>Para2
+
+ Some tags can be nested arbitrarily. For instance, the occurance
+ of a <blockquote> tag should _not_ implicitly close the previous
+ <blockquote> tag.
+
+ Alice said: <blockquote>Bob said: <blockquote>Blah
+ should NOT be transformed into:
+ Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
+
+ Some tags can be nested, but the nesting is reset by the
+ interposition of other tags. For instance, a <tr> tag should
+ implicitly close the previous <tr> tag within the same <table>,
+ but not close a <tr> tag in another table.
+
+ <table><tr>Blah<tr>Blah
+ should be transformed into:
+ <table><tr>Blah</tr><tr>Blah
+ but,
+ <tr>Blah<table><tr>Blah
+ should NOT be transformed into
+ <tr>Blah<table></tr><tr>Blah
+
+ Differing assumptions about tag nesting rules are a major source
+ of problems with the BeautifulSoup class. If BeautifulSoup is not
+ treating as nestable a tag your page author treats as nestable,
+ try ICantBelieveItsBeautifulSoup, MinimalSoup, or
+ BeautifulStoneSoup before writing your own subclass."""
+
+ def __init__(self, *args, **kwargs):
+ if not kwargs.has_key('smartQuotesTo'):
+ kwargs['smartQuotesTo'] = self.HTML_ENTITIES
+ kwargs['isHTML'] = True
+ BeautifulStoneSoup.__init__(self, *args, **kwargs)
+
+ SELF_CLOSING_TAGS = buildTagMap(None,
+ ['br' , 'hr', 'input', 'img', 'meta',
+ 'spacer', 'link', 'frame', 'base'])
+
+ PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
+
+ QUOTE_TAGS = {'script' : None, 'textarea' : None}
+
+ #According to the HTML standard, each of these inline tags can
+ #contain another tag of the same type. Furthermore, it's common
+ #to actually use these tags this way.
+ NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
+ 'center']
+
+ #According to the HTML standard, these block tags can contain
+ #another tag of the same type. Furthermore, it's common
+ #to actually use these tags this way.
+ NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
+
+ #Lists can contain other lists, but there are restrictions.
+ NESTABLE_LIST_TAGS = { 'ol' : [],
+ 'ul' : [],
+ 'li' : ['ul', 'ol'],
+ 'dl' : [],
+ 'dd' : ['dl'],
+ 'dt' : ['dl'] }
+
+ #Tables can contain other tables, but there are restrictions.
+ NESTABLE_TABLE_TAGS = {'table' : [],
+ 'tr' : ['table', 'tbody', 'tfoot', 'thead'],
+ 'td' : ['tr'],
+ 'th' : ['tr'],
+ 'thead' : ['table'],
+ 'tbody' : ['table'],
+ 'tfoot' : ['table'],
+ }
+
+ NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
+
+ #If one of these tags is encountered, all tags up to the next tag of
+ #this type are popped.
+ RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
+ NON_NESTABLE_BLOCK_TAGS,
+ NESTABLE_LIST_TAGS,
+ NESTABLE_TABLE_TAGS)
+
+ NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
+ NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
+
+ # Used to detect the charset in a META tag; see start_meta
+ CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
+
+ def extractCharsetFromMeta(self, attrs):
+ """Beautiful Soup can detect a charset included in a META tag,
+ try to convert the document to that charset, and re-parse the
+ document from the beginning."""
+ httpEquiv = None
+ contentType = None
+ contentTypeIndex = None
+ tagNeedsEncodingSubstitution = False
+
+ for i in range(0, len(attrs)):
+ key, value = attrs[i]
+ key = key.lower()
+ if key == 'http-equiv':
+ httpEquiv = value
+ elif key == 'content':
+ contentType = value
+ contentTypeIndex = i
+
+ if httpEquiv and contentType: # It's an interesting meta tag.
+ match = self.CHARSET_RE.search(contentType)
+ if match:
+ if (self.declaredHTMLEncoding is not None or
+ self.originalEncoding == self.fromEncoding):
+ # An HTML encoding was sniffed while converting
+ # the document to Unicode, or an HTML encoding was
+ # sniffed during a previous pass through the
+ # document, or an encoding was specified
+ # explicitly and it worked. Rewrite the meta tag.
+ def rewrite(match):
+ return match.group(1) + "%SOUP-ENCODING%"
+ newAttr = self.CHARSET_RE.sub(rewrite, contentType)
+ attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
+ newAttr)
+ tagNeedsEncodingSubstitution = True
+ else:
+ # This is our first pass through the document.
+ # Go through it again with the encoding information.
+ newCharset = match.group(3)
+ if newCharset and newCharset != self.originalEncoding:
+ self.declaredHTMLEncoding = newCharset
+ self._feed(self.declaredHTMLEncoding)
+ raise StopParsing
+ pass
+ tag = self.unknown_starttag("meta", attrs)
+ if tag and tagNeedsEncodingSubstitution:
+ tag.containsSubstitutions = True
+
+
+class StopParsing(Exception):
+ pass
+
+class ICantBelieveItsBeautifulSoup(BeautifulSoup):
+
+ """The BeautifulSoup class is oriented towards skipping over
+ common HTML errors like unclosed tags. However, sometimes it makes
+ errors of its own. For instance, consider this fragment:
+
+ <b>Foo<b>Bar</b></b>
+
+ This is perfectly valid (if bizarre) HTML. However, the
+ BeautifulSoup class will implicitly close the first b tag when it
+ encounters the second 'b'. It will think the author wrote
+ "<b>Foo<b>Bar", and didn't close the first 'b' tag, because
+ there's no real-world reason to bold something that's already
+ bold. When it encounters '</b></b>' it will close two more 'b'
+ tags, for a grand total of three tags closed instead of two. This
+ can throw off the rest of your document structure. The same is
+ true of a number of other tags, listed below.
+
+ It's much more common for someone to forget to close a 'b' tag
+ than to actually use nested 'b' tags, and the BeautifulSoup class
+ handles the common case. This class handles the not-co-common
+ case: where you can't believe someone wrote what they did, but
+ it's valid HTML and BeautifulSoup screwed up by assuming it
+ wouldn't be."""
+
+ I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
+ ['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
+ 'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
+ 'big']
+
+ I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
+
+ NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
+ I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
+ I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
+
+class MinimalSoup(BeautifulSoup):
+ """The MinimalSoup class is for parsing HTML that contains
+ pathologically bad markup. It makes no assumptions about tag
+ nesting, but it does know which tags are self-closing, that
+ <script> tags contain Javascript and should not be parsed, that
+ META tags may contain encoding information, and so on.
+
+ This also makes it better for subclassing than BeautifulStoneSoup
+ or BeautifulSoup."""
+
+ RESET_NESTING_TAGS = buildTagMap('noscript')
+ NESTABLE_TAGS = {}
+
+class BeautifulSOAP(BeautifulStoneSoup):
+ """This class will push a tag with only a single string child into
+ the tag's parent as an attribute. The attribute's name is the tag
+ name, and the value is the string child. An example should give
+ the flavor of the change:
+
+ <foo><bar>baz</bar></foo>
+ =>
+ <foo bar="baz"><bar>baz</bar></foo>
+
+ You can then access fooTag['bar'] instead of fooTag.barTag.string.
+
+ This is, of course, useful for scraping structures that tend to
+ use subelements instead of attributes, such as SOAP messages. Note
+ that it modifies its input, so don't print the modified version
+ out.
+
+ I'm not sure how many people really want to use this class; let me
+ know if you do. Mainly I like the name."""
+
+ def popTag(self):
+ if len(self.tagStack) > 1:
+ tag = self.tagStack[-1]
+ parent = self.tagStack[-2]
+ parent._getAttrMap()
+ if (isinstance(tag, Tag) and len(tag.contents) == 1 and
+ isinstance(tag.contents[0], NavigableString) and
+ not parent.attrMap.has_key(tag.name)):
+ parent[tag.name] = tag.contents[0]
+ BeautifulStoneSoup.popTag(self)
+
+#Enterprise class names! It has come to our attention that some people
+#think the names of the Beautiful Soup parser classes are too silly
+#and "unprofessional" for use in enterprise screen-scraping. We feel
+#your pain! For such-minded folk, the Beautiful Soup Consortium And
+#All-Night Kosher Bakery recommends renaming this file to
+#"RobustParser.py" (or, in cases of extreme enterprisiness,
+#"RobustParserBeanInterface.class") and using the following
+#enterprise-friendly class aliases:
+class RobustXMLParser(BeautifulStoneSoup):
+ pass
+class RobustHTMLParser(BeautifulSoup):
+ pass
+class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
+ pass
+class RobustInsanelyWackAssHTMLParser(MinimalSoup):
+ pass
+class SimplifyingSOAPParser(BeautifulSOAP):
+ pass
+
+######################################################
+#
+# Bonus library: Unicode, Dammit
+#
+# This class forces XML data into a standard format (usually to UTF-8
+# or Unicode). It is heavily based on code from Mark Pilgrim's
+# Universal Feed Parser. It does not rewrite the XML or HTML to
+# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
+# (XML) and BeautifulSoup.start_meta (HTML).
+
+# Autodetects character encodings.
+# Download from http://chardet.feedparser.org/
+try:
+ import chardet
+# import chardet.constants
+# chardet.constants._debug = 1
+except ImportError:
+ chardet = None
+
+# cjkcodecs and iconv_codec make Python know about more character encodings.
+# Both are available from http://cjkpython.i18n.org/
+# They're built in if you use Python 2.4.
+try:
+ import cjkcodecs.aliases
+except ImportError:
+ pass
+try:
+ import iconv_codec
+except ImportError:
+ pass
+
+class UnicodeDammit:
+ """A class for detecting the encoding of a *ML document and
+ converting it to a Unicode string. If the source encoding is
+ windows-1252, can replace MS smart quotes with their HTML or XML
+ equivalents."""
+
+ # This dictionary maps commonly seen values for "charset" in HTML
+ # meta tags to the corresponding Python codec names. It only covers
+ # values that aren't in Python's aliases and can't be determined
+ # by the heuristics in find_codec.
+ CHARSET_ALIASES = { "macintosh" : "mac-roman",
+ "x-sjis" : "shift-jis" }
+
+ def __init__(self, markup, overrideEncodings=[],
+ smartQuotesTo='xml', isHTML=False):
+ self.declaredHTMLEncoding = None
+ self.markup, documentEncoding, sniffedEncoding = \
+ self._detectEncoding(markup, isHTML)
+ self.smartQuotesTo = smartQuotesTo
+ self.triedEncodings = []
+ if markup == '' or isinstance(markup, unicode):
+ self.originalEncoding = None
+ self.unicode = unicode(markup)
+ return
+
+ u = None
+ for proposedEncoding in overrideEncodings:
+ u = self._convertFrom(proposedEncoding)
+ if u: break
+ if not u:
+ for proposedEncoding in (documentEncoding, sniffedEncoding):
+ u = self._convertFrom(proposedEncoding)
+ if u: break
+
+ # If no luck and we have auto-detection library, try that:
+ if not u and chardet and not isinstance(self.markup, unicode):
+ u = self._convertFrom(chardet.detect(self.markup)['encoding'])
+
+ # As a last resort, try utf-8 and windows-1252:
+ if not u:
+ for proposed_encoding in ("utf-8", "windows-1252"):
+ u = self._convertFrom(proposed_encoding)
+ if u: break
+
+ self.unicode = u
+ if not u: self.originalEncoding = None
+
+ def _subMSChar(self, match):
+ """Changes a MS smart quote character to an XML or HTML
+ entity."""
+ orig = match.group(1)
+ sub = self.MS_CHARS.get(orig)
+ if type(sub) == types.TupleType:
+ if self.smartQuotesTo == 'xml':
+ sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
+ else:
+ sub = '&'.encode() + sub[0].encode() + ';'.encode()
+ else:
+ sub = sub.encode()
+ return sub
+
+ def _convertFrom(self, proposed):
+ proposed = self.find_codec(proposed)
+ if not proposed or proposed in self.triedEncodings:
+ return None
+ self.triedEncodings.append(proposed)
+ markup = self.markup
+
+ # Convert smart quotes to HTML if coming from an encoding
+ # that might have them.
+ if self.smartQuotesTo and proposed.lower() in("windows-1252",
+ "iso-8859-1",
+ "iso-8859-2"):
+ smart_quotes_re = "([\x80-\x9f])"
+ smart_quotes_compiled = re.compile(smart_quotes_re)
+ markup = smart_quotes_compiled.sub(self._subMSChar, markup)
+
+ try:
+ # print "Trying to convert document to %s" % proposed
+ u = self._toUnicode(markup, proposed)
+ self.markup = u
+ self.originalEncoding = proposed
+ except Exception, e:
+ # print "That didn't work!"
+ # print e
+ return None
+ #print "Correct encoding: %s" % proposed
+ return self.markup
+
+ def _toUnicode(self, data, encoding):
+ '''Given a string and its encoding, decodes the string into Unicode.
+ %encoding is a string recognized by encodings.aliases'''
+
+ # strip Byte Order Mark (if present)
+ if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
+ and (data[2:4] != '\x00\x00'):
+ encoding = 'utf-16be'
+ data = data[2:]
+ elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
+ and (data[2:4] != '\x00\x00'):
+ encoding = 'utf-16le'
+ data = data[2:]
+ elif data[:3] == '\xef\xbb\xbf':
+ encoding = 'utf-8'
+ data = data[3:]
+ elif data[:4] == '\x00\x00\xfe\xff':
+ encoding = 'utf-32be'
+ data = data[4:]
+ elif data[:4] == '\xff\xfe\x00\x00':
+ encoding = 'utf-32le'
+ data = data[4:]
+ newdata = unicode(data, encoding)
+ return newdata
+
+ def _detectEncoding(self, xml_data, isHTML=False):
+ """Given a document, tries to detect its XML encoding."""
+ xml_encoding = sniffed_xml_encoding = None
+ try:
+ if xml_data[:4] == '\x4c\x6f\xa7\x94':
+ # EBCDIC
+ xml_data = self._ebcdic_to_ascii(xml_data)
+ elif xml_data[:4] == '\x00\x3c\x00\x3f':
+ # UTF-16BE
+ sniffed_xml_encoding = 'utf-16be'
+ xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
+ elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
+ and (xml_data[2:4] != '\x00\x00'):
+ # UTF-16BE with BOM
+ sniffed_xml_encoding = 'utf-16be'
+ xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
+ elif xml_data[:4] == '\x3c\x00\x3f\x00':
+ # UTF-16LE
+ sniffed_xml_encoding = 'utf-16le'
+ xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
+ elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
+ (xml_data[2:4] != '\x00\x00'):
+ # UTF-16LE with BOM
+ sniffed_xml_encoding = 'utf-16le'
+ xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
+ elif xml_data[:4] == '\x00\x00\x00\x3c':
+ # UTF-32BE
+ sniffed_xml_encoding = 'utf-32be'
+ xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
+ elif xml_data[:4] == '\x3c\x00\x00\x00':
+ # UTF-32LE
+ sniffed_xml_encoding = 'utf-32le'
+ xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
+ elif xml_data[:4] == '\x00\x00\xfe\xff':
+ # UTF-32BE with BOM
+ sniffed_xml_encoding = 'utf-32be'
+ xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
+ elif xml_data[:4] == '\xff\xfe\x00\x00':
+ # UTF-32LE with BOM
+ sniffed_xml_encoding = 'utf-32le'
+ xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
+ elif xml_data[:3] == '\xef\xbb\xbf':
+ # UTF-8 with BOM
+ sniffed_xml_encoding = 'utf-8'
+ xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
+ else:
+ sniffed_xml_encoding = 'ascii'
+ pass
+ except:
+ xml_encoding_match = None
+ xml_encoding_re = '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode()
+ xml_encoding_match = re.compile(xml_encoding_re).match(xml_data)
+ if not xml_encoding_match and isHTML:
+ meta_re = '<\s*meta[^>]+charset=([^>]*?)[;\'">]'.encode()
+ regexp = re.compile(meta_re, re.I)
+ xml_encoding_match = regexp.search(xml_data)
+ if xml_encoding_match is not None:
+ xml_encoding = xml_encoding_match.groups()[0].decode(
+ 'ascii').lower()
+ if isHTML:
+ self.declaredHTMLEncoding = xml_encoding
+ if sniffed_xml_encoding and \
+ (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
+ 'iso-10646-ucs-4', 'ucs-4', 'csucs4',
+ 'utf-16', 'utf-32', 'utf_16', 'utf_32',
+ 'utf16', 'u16')):
+ xml_encoding = sniffed_xml_encoding
+ return xml_data, xml_encoding, sniffed_xml_encoding
+
+
+ def find_codec(self, charset):
+ return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
+ or (charset and self._codec(charset.replace("-", ""))) \
+ or (charset and self._codec(charset.replace("-", "_"))) \
+ or charset
+
+ def _codec(self, charset):
+ if not charset: return charset
+ codec = None
+ try:
+ codecs.lookup(charset)
+ codec = charset
+ except (LookupError, ValueError):
+ pass
+ return codec
+
+ EBCDIC_TO_ASCII_MAP = None
+ def _ebcdic_to_ascii(self, s):
+ c = self.__class__
+ if not c.EBCDIC_TO_ASCII_MAP:
+ emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
+ 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
+ 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
+ 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
+ 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
+ 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
+ 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
+ 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
+ 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
+ 201,202,106,107,108,109,110,111,112,113,114,203,204,205,
+ 206,207,208,209,126,115,116,117,118,119,120,121,122,210,
+ 211,212,213,214,215,216,217,218,219,220,221,222,223,224,
+ 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
+ 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
+ 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
+ 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
+ 250,251,252,253,254,255)
+ import string
+ c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
+ ''.join(map(chr, range(256))), ''.join(map(chr, emap)))
+ return s.translate(c.EBCDIC_TO_ASCII_MAP)
+
+ MS_CHARS = { '\x80' : ('euro', '20AC'),
+ '\x81' : ' ',
+ '\x82' : ('sbquo', '201A'),
+ '\x83' : ('fnof', '192'),
+ '\x84' : ('bdquo', '201E'),
+ '\x85' : ('hellip', '2026'),
+ '\x86' : ('dagger', '2020'),
+ '\x87' : ('Dagger', '2021'),
+ '\x88' : ('circ', '2C6'),
+ '\x89' : ('permil', '2030'),
+ '\x8A' : ('Scaron', '160'),
+ '\x8B' : ('lsaquo', '2039'),
+ '\x8C' : ('OElig', '152'),
+ '\x8D' : '?',
+ '\x8E' : ('#x17D', '17D'),
+ '\x8F' : '?',
+ '\x90' : '?',
+ '\x91' : ('lsquo', '2018'),
+ '\x92' : ('rsquo', '2019'),
+ '\x93' : ('ldquo', '201C'),
+ '\x94' : ('rdquo', '201D'),
+ '\x95' : ('bull', '2022'),
+ '\x96' : ('ndash', '2013'),
+ '\x97' : ('mdash', '2014'),
+ '\x98' : ('tilde', '2DC'),
+ '\x99' : ('trade', '2122'),
+ '\x9a' : ('scaron', '161'),
+ '\x9b' : ('rsaquo', '203A'),
+ '\x9c' : ('oelig', '153'),
+ '\x9d' : '?',
+ '\x9e' : ('#x17E', '17E'),
+ '\x9f' : ('Yuml', ''),}
+
+#######################################################################
+
+
+#By default, act as an HTML pretty-printer.
+if __name__ == '__main__':
+ import sys
+ soup = BeautifulSoup(sys.stdin)
+ print soup.prettify()
diff --git a/WebKitTools/Scripts/webkitpy/BeautifulSoup.pyc b/WebKitTools/Scripts/webkitpy/BeautifulSoup.pyc
new file mode 100644
index 0000000..dffb144
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/BeautifulSoup.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/__init__.py b/WebKitTools/Scripts/webkitpy/__init__.py
new file mode 100644
index 0000000..94ecc70
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/__init__.py
@@ -0,0 +1,8 @@
+# Required for Python to search this directory for module files
+
+import autoinstall
+
+# List our third-party library dependencies here and where they can be
+# downloaded.
+autoinstall.bind("ClientForm", "http://pypi.python.org/packages/source/C/ClientForm/ClientForm-0.2.10.zip", "ClientForm-0.2.10")
+autoinstall.bind("mechanize", "http://pypi.python.org/packages/source/m/mechanize/mechanize-0.1.11.zip", "mechanize-0.1.11")
diff --git a/WebKitTools/Scripts/webkitpy/__init__.pyc b/WebKitTools/Scripts/webkitpy/__init__.pyc
new file mode 100644
index 0000000..d1ffa10
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/__init__.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/autoinstall.py b/WebKitTools/Scripts/webkitpy/autoinstall.py
new file mode 100644
index 0000000..467e6b4
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/autoinstall.py
@@ -0,0 +1,335 @@
+# Copyright (c) 2009, Daniel Krech All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# * Neither the name of the Daniel Krech nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""\
+package loader for auto installing Python packages.
+
+A package loader in the spirit of Zero Install that can be used to
+inject dependencies into the import process.
+
+
+To install::
+
+ easy_install -U autoinstall
+
+ or
+
+ download, unpack, python setup.py install
+
+ or
+
+ try the bootstrap loader. See below.
+
+
+To use::
+
+ # You can bind any package name to a URL pointing to something
+ # that can be imported using the zipimporter.
+
+ autoinstall.bind("pymarc", "http://pypi.python.org/packages/2.5/p/pymarc/pymarc-2.1-py2.5.egg")
+
+ import pymarc
+
+ print pymarc.__version__, pymarc.__file__
+
+
+Changelog::
+
+- added support for non top level packages.
+- cache files now use filename part from URL.
+- applied patch from Eric Seidel <eseidel@google.com> to add support
+for loading modules where the module is not at the root of the .zip
+file.
+
+
+TODO::
+
+- a description of the intended use case
+- address other issues pointed out in:
+
+ http://mail.python.org/pipermail/python-dev/2008-March/077926.html
+
+Scribbles::
+
+pull vs. push
+user vs. system
+web vs. filesystem
+auto vs. manual
+
+manage development sandboxes
+
+optional interfaces...
+
+ def get_data(pathname) -> string with file data.
+
+ Return the data associated with 'pathname'. Raise IOError if
+ the file wasn't found.");
+
+ def is_package,
+ "is_package(fullname) -> bool.
+
+ Return True if the module specified by fullname is a package.
+ Raise ZipImportError is the module couldn't be found.");
+
+ def get_code,
+ "get_code(fullname) -> code object.
+
+ Return the code object for the specified module. Raise ZipImportError
+ is the module couldn't be found.");
+
+ def get_source,
+ "get_source(fullname) -> source string.
+
+ Return the source code for the specified module. Raise ZipImportError
+ is the module couldn't be found, return None if the archive does
+ contain the module, but has no source for it.");
+
+
+Autoinstall can also be bootstraped with the nascent package loader
+bootstrap module. For example::
+
+ # or via the bootstrap
+ # loader.
+
+ try:
+ _version = "0.2"
+ import autoinstall
+ if autoinstall.__version__ != _version:
+ raise ImportError("A different version than expected found.")
+ except ImportError, e:
+ # http://svn.python.org/projects/sandbox/trunk/bootstrap/bootstrap.py
+ import bootstrap
+ pypi = "http://pypi.python.org"
+ dir = "packages/source/a/autoinstall"
+ url = "%s/%s/autoinstall-%s.tar.gz" % (pypi, dir, _version)
+ bootstrap.main((url,))
+ import autoinstall
+
+References::
+
+ http://0install.net/
+ http://www.python.org/dev/peps/pep-0302/
+ http://svn.python.org/projects/sandbox/trunk/import_in_py
+ http://0install.net/injector-find.html
+ http://roscidus.com/desktop/node/903
+
+"""
+
+# To allow use of the "with" keyword for Python 2.5 users.
+from __future__ import with_statement
+
+__version__ = "0.2"
+__docformat__ = "restructuredtext en"
+
+import os
+import new
+import sys
+import urllib
+import logging
+import tempfile
+import zipimport
+
+_logger = logging.getLogger(__name__)
+
+
+_importer = None
+
+def _getImporter():
+ global _importer
+ if _importer is None:
+ _importer = Importer()
+ sys.meta_path.append(_importer)
+ return _importer
+
+def bind(package_name, url, zip_subpath=None):
+ """bind a top level package name to a URL.
+
+ The package name should be a package name and the url should be a
+ url to something that can be imported using the zipimporter.
+
+ Optional zip_subpath parameter allows searching for modules
+ below the root level of the zip file.
+ """
+ _getImporter().bind(package_name, url, zip_subpath)
+
+
+class Cache(object):
+
+ def __init__(self, directory=None):
+ if directory is None:
+ # Default to putting the cache directory in the same directory
+ # as this file.
+ containing_directory = os.path.dirname(__file__)
+ directory = os.path.join(containing_directory, "autoinstall.cache.d");
+
+ self.directory = directory
+ try:
+ if not os.path.exists(self.directory):
+ self._create_cache_directory()
+ except Exception, err:
+ _logger.exception(err)
+ self.cache_directry = tempfile.mkdtemp()
+ _logger.info("Using cache directory '%s'." % self.directory)
+
+ def _create_cache_directory(self):
+ _logger.debug("Creating cache directory '%s'." % self.directory)
+ os.mkdir(self.directory)
+ readme_path = os.path.join(self.directory, "README")
+ with open(readme_path, "w") as f:
+ f.write("This directory was auto-generated by '%s'.\n"
+ "It is safe to delete.\n" % __file__)
+
+ def get(self, url):
+ _logger.info("Getting '%s' from cache." % url)
+ filename = url.rsplit("/")[-1]
+
+ # so that source url is significant in determining cache hits
+ d = os.path.join(self.directory, "%s" % hash(url))
+ if not os.path.exists(d):
+ os.mkdir(d)
+
+ filename = os.path.join(d, filename)
+
+ if os.path.exists(filename):
+ _logger.debug("... already cached in file '%s'." % filename)
+ else:
+ _logger.debug("... not in cache. Caching in '%s'." % filename)
+ stream = file(filename, "wb")
+ self.download(url, stream)
+ stream.close()
+ return filename
+
+ def download(self, url, stream):
+ _logger.info("Downloading: %s" % url)
+ try:
+ netstream = urllib.urlopen(url)
+ code = 200
+ if hasattr(netstream, "getcode"):
+ code = netstream.getcode()
+ if not 200 <= code < 300:
+ raise ValueError("HTTP Error code %s" % code)
+ except Exception, err:
+ _logger.exception(err)
+
+ BUFSIZE = 2**13 # 8KB
+ size = 0
+ while True:
+ data = netstream.read(BUFSIZE)
+ if not data:
+ break
+ stream.write(data)
+ size += len(data)
+ netstream.close()
+ _logger.info("Downloaded %d bytes." % size)
+
+
+class Importer(object):
+
+ def __init__(self):
+ self.packages = {}
+ self.__cache = None
+
+ def __get_store(self):
+ return self.__store
+ store = property(__get_store)
+
+ def _get_cache(self):
+ if self.__cache is None:
+ self.__cache = Cache()
+ return self.__cache
+ def _set_cache(self, cache):
+ self.__cache = cache
+ cache = property(_get_cache, _set_cache)
+
+ def find_module(self, fullname, path=None):
+ """-> self or None.
+
+ Search for a module specified by 'fullname'. 'fullname' must be
+ the fully qualified (dotted) module name. It returns the
+ zipimporter instance itself if the module was found, or None if
+ it wasn't. The optional 'path' argument is ignored -- it's
+ there for compatibility with the importer protocol.");
+ """
+ _logger.debug("find_module(%s, path=%s)" % (fullname, path))
+
+ if fullname in self.packages:
+ (url, zip_subpath) = self.packages[fullname]
+ filename = self.cache.get(url)
+ zip_path = "%s/%s" % (filename, zip_subpath) if zip_subpath else filename
+ _logger.debug("fullname: %s url: %s path: %s zip_path: %s" % (fullname, url, path, zip_path))
+ try:
+ loader = zipimport.zipimporter(zip_path)
+ _logger.debug("returning: %s" % loader)
+ except Exception, e:
+ _logger.exception(e)
+ return None
+ return loader
+ return None
+
+ def bind(self, package_name, url, zip_subpath):
+ _logger.info("binding: %s -> %s subpath: %s" % (package_name, url, zip_subpath))
+ self.packages[package_name] = (url, zip_subpath)
+
+
+if __name__=="__main__":
+ import logging
+ #logging.basicConfig()
+ logger = logging.getLogger()
+
+ console = logging.StreamHandler()
+ console.setLevel(logging.DEBUG)
+ # set a format which is simpler for console use
+ formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
+ # tell the handler to use this format
+ console.setFormatter(formatter)
+ # add the handler to the root logger
+ logger.addHandler(console)
+ logger.setLevel(logging.INFO)
+
+ bind("pymarc", "http://pypi.python.org/packages/2.5/p/pymarc/pymarc-2.1-py2.5.egg")
+
+ import pymarc
+
+ print pymarc.__version__, pymarc.__file__
+
+ assert pymarc.__version__=="2.1"
+
+ d = _getImporter().cache.directory
+ assert d in pymarc.__file__, "'%s' not found in pymarc.__file__ (%s)" % (d, pymarc.__file__)
+
+ # Can now also bind to non top level packages. The packages
+ # leading up to the package being bound will need to be defined
+ # however.
+ #
+ # bind("rdf.plugins.stores.memory",
+ # "http://pypi.python.org/packages/2.5/r/rdf.plugins.stores.memeory/rdf.plugins.stores.memory-0.9a-py2.5.egg")
+ #
+ # from rdf.plugins.stores.memory import Memory
+
+
diff --git a/WebKitTools/Scripts/webkitpy/autoinstall.pyc b/WebKitTools/Scripts/webkitpy/autoinstall.pyc
new file mode 100644
index 0000000..68e46fd
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/autoinstall.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/bugzilla.py b/WebKitTools/Scripts/webkitpy/bugzilla.py
new file mode 100644
index 0000000..c1cf41d
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/bugzilla.py
@@ -0,0 +1,789 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+# Copyright (c) 2010 Research In Motion Limited. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for interacting with Bugzilla
+
+import re
+import subprocess
+
+from datetime import datetime # used in timestamp()
+
+# Import WebKit-specific modules.
+from webkitpy.webkit_logging import error, log
+from webkitpy.committers import CommitterList
+from webkitpy.credentials import Credentials
+
+# WebKit includes a built copy of BeautifulSoup in Scripts/webkitpy
+# so this import should always succeed.
+from .BeautifulSoup import BeautifulSoup, SoupStrainer
+
+from mechanize import Browser
+
+
+def parse_bug_id(message):
+ match = re.search("http\://webkit\.org/b/(?P<bug_id>\d+)", message)
+ if match:
+ return int(match.group('bug_id'))
+ match = re.search(
+ Bugzilla.bug_server_regex + "show_bug\.cgi\?id=(?P<bug_id>\d+)",
+ message)
+ if match:
+ return int(match.group('bug_id'))
+ return None
+
+
+def timestamp():
+ return datetime.now().strftime("%Y%m%d%H%M%S")
+
+
+class Attachment(object):
+
+ def __init__(self, attachment_dictionary, bug):
+ self._attachment_dictionary = attachment_dictionary
+ self._bug = bug
+ self._reviewer = None
+ self._committer = None
+
+ def _bugzilla(self):
+ return self._bug._bugzilla
+
+ def id(self):
+ return int(self._attachment_dictionary.get("id"))
+
+ def attacher_is_committer(self):
+ return self._bugzilla.committers.committer_by_email(
+ patch.attacher_email())
+
+ def attacher_email(self):
+ return self._attachment_dictionary.get("attacher_email")
+
+ def bug(self):
+ return self._bug
+
+ def bug_id(self):
+ return int(self._attachment_dictionary.get("bug_id"))
+
+ def is_patch(self):
+ return not not self._attachment_dictionary.get("is_patch")
+
+ def is_obsolete(self):
+ return not not self._attachment_dictionary.get("is_obsolete")
+
+ def name(self):
+ return self._attachment_dictionary.get("name")
+
+ def review(self):
+ return self._attachment_dictionary.get("review")
+
+ def commit_queue(self):
+ return self._attachment_dictionary.get("commit-queue")
+
+ def url(self):
+ # FIXME: This should just return
+ # self._bugzilla().attachment_url_for_id(self.id()). scm_unittest.py
+ # depends on the current behavior.
+ return self._attachment_dictionary.get("url")
+
+ def _validate_flag_value(self, flag):
+ email = self._attachment_dictionary.get("%s_email" % flag)
+ if not email:
+ return None
+ committer = getattr(self._bugzilla().committers,
+ "%s_by_email" % flag)(email)
+ if committer:
+ return committer
+ log("Warning, attachment %s on bug %s has invalid %s (%s)" % (
+ self._attachment_dictionary['id'],
+ self._attachment_dictionary['bug_id'], flag, email))
+
+ def reviewer(self):
+ if not self._reviewer:
+ self._reviewer = self._validate_flag_value("reviewer")
+ return self._reviewer
+
+ def committer(self):
+ if not self._committer:
+ self._committer = self._validate_flag_value("committer")
+ return self._committer
+
+
+class Bug(object):
+ # FIXME: This class is kinda a hack for now. It exists so we have one
+ # place to hold bug logic, even if much of the code deals with
+ # dictionaries still.
+
+ def __init__(self, bug_dictionary, bugzilla):
+ self.bug_dictionary = bug_dictionary
+ self._bugzilla = bugzilla
+
+ def id(self):
+ return self.bug_dictionary["id"]
+
+ def assigned_to_email(self):
+ return self.bug_dictionary["assigned_to_email"]
+
+ # Rarely do we actually want obsolete attachments
+ def attachments(self, include_obsolete=False):
+ attachments = self.bug_dictionary["attachments"]
+ if not include_obsolete:
+ attachments = filter(lambda attachment:
+ not attachment["is_obsolete"], attachments)
+ return [Attachment(attachment, self) for attachment in attachments]
+
+ def patches(self, include_obsolete=False):
+ return [patch for patch in self.attachments(include_obsolete)
+ if patch.is_patch()]
+
+ def unreviewed_patches(self):
+ return [patch for patch in self.patches() if patch.review() == "?"]
+
+ def reviewed_patches(self, include_invalid=False):
+ patches = [patch for patch in self.patches() if patch.review() == "+"]
+ if include_invalid:
+ return patches
+ # Checking reviewer() ensures that it was both reviewed and has a valid
+ # reviewer.
+ return filter(lambda patch: patch.reviewer(), patches)
+
+ def commit_queued_patches(self, include_invalid=False):
+ patches = [patch for patch in self.patches()
+ if patch.commit_queue() == "+"]
+ if include_invalid:
+ return patches
+ # Checking committer() ensures that it was both commit-queue+'d and has
+ # a valid committer.
+ return filter(lambda patch: patch.committer(), patches)
+
+
+# A container for all of the logic for making and parsing buzilla queries.
+class BugzillaQueries(object):
+
+ def __init__(self, bugzilla):
+ self._bugzilla = bugzilla
+
+ # Note: _load_query and _fetch_bug are the only two methods which access
+ # self._bugzilla.
+
+ def _load_query(self, query):
+ self._bugzilla.authenticate()
+
+ full_url = "%s%s" % (self._bugzilla.bug_server_url, query)
+ return self._bugzilla.browser.open(full_url)
+
+ def _fetch_bug(self, bug_id):
+ return self._bugzilla.fetch_bug(bug_id)
+
+ def _fetch_bug_ids_advanced_query(self, query):
+ soup = BeautifulSoup(self._load_query(query))
+ # The contents of the <a> inside the cells in the first column happen
+ # to be the bug id.
+ return [int(bug_link_cell.find("a").string)
+ for bug_link_cell in soup('td', "first-child")]
+
+ def _parse_attachment_ids_request_query(self, page):
+ digits = re.compile("\d+")
+ attachment_href = re.compile("attachment.cgi\?id=\d+&action=review")
+ attachment_links = SoupStrainer("a", href=attachment_href)
+ return [int(digits.search(tag["href"]).group(0))
+ for tag in BeautifulSoup(page, parseOnlyThese=attachment_links)]
+
+ def _fetch_attachment_ids_request_query(self, query):
+ return self._parse_attachment_ids_request_query(self._load_query(query))
+
+ # List of all r+'d bugs.
+ def fetch_bug_ids_from_pending_commit_list(self):
+ needs_commit_query_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review%2B"
+ return self._fetch_bug_ids_advanced_query(needs_commit_query_url)
+
+ def fetch_patches_from_pending_commit_list(self):
+ return sum([self._fetch_bug(bug_id).reviewed_patches()
+ for bug_id in self.fetch_bug_ids_from_pending_commit_list()], [])
+
+ def fetch_bug_ids_from_commit_queue(self):
+ commit_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=commit-queue%2B&order=Last+Changed"
+ return self._fetch_bug_ids_advanced_query(commit_queue_url)
+
+ def fetch_patches_from_commit_queue(self):
+ # This function will only return patches which have valid committers
+ # set. It won't reject patches with invalid committers/reviewers.
+ return sum([self._fetch_bug(bug_id).commit_queued_patches()
+ for bug_id in self.fetch_bug_ids_from_commit_queue()], [])
+
+ def _fetch_bug_ids_from_review_queue(self):
+ review_queue_url = "buglist.cgi?query_format=advanced&bug_status=UNCONFIRMED&bug_status=NEW&bug_status=ASSIGNED&bug_status=REOPENED&field0-0-0=flagtypes.name&type0-0-0=equals&value0-0-0=review?"
+ return self._fetch_bug_ids_advanced_query(review_queue_url)
+
+ def fetch_patches_from_review_queue(self, limit=None):
+ # [:None] returns the whole array.
+ return sum([self._fetch_bug(bug_id).unreviewed_patches()
+ for bug_id in self._fetch_bug_ids_from_review_queue()[:limit]], [])
+
+ # FIXME: Why do we have both fetch_patches_from_review_queue and
+ # fetch_attachment_ids_from_review_queue??
+ # NOTE: This is also the only client of _fetch_attachment_ids_request_query
+
+ def fetch_attachment_ids_from_review_queue(self):
+ review_queue_url = "request.cgi?action=queue&type=review&group=type"
+ return self._fetch_attachment_ids_request_query(review_queue_url)
+
+
+class CommitterValidator(object):
+
+ def __init__(self, bugzilla):
+ self._bugzilla = bugzilla
+
+ # _view_source_url belongs in some sort of webkit_config.py module.
+ def _view_source_url(self, local_path):
+ return "http://trac.webkit.org/browser/trunk/%s" % local_path
+
+ def _flag_permission_rejection_message(self, setter_email, flag_name):
+ # This could be computed from CommitterList.__file__
+ committer_list = "WebKitTools/Scripts/webkitpy/committers.py"
+ # Should come from some webkit_config.py
+ contribution_guidlines = "http://webkit.org/coding/contributing.html"
+ # This could be queried from the status_server.
+ queue_administrator = "eseidel@chromium.org"
+ # This could be queried from the tool.
+ queue_name = "commit-queue"
+ message = "%s does not have %s permissions according to %s." % (
+ setter_email,
+ flag_name,
+ self._view_source_url(committer_list))
+ message += "\n\n- If you do not have %s rights please read %s for instructions on how to use bugzilla flags." % (
+ flag_name, contribution_guidlines)
+ message += "\n\n- If you have %s rights please correct the error in %s by adding yourself to the file (no review needed). " % (
+ flag_name, committer_list)
+ message += "Due to bug 30084 the %s will require a restart after your change. " % queue_name
+ message += "Please contact %s to request a %s restart. " % (
+ queue_administrator, queue_name)
+ message += "After restart the %s will correctly respect your %s rights." % (
+ queue_name, flag_name)
+ return message
+
+ def _validate_setter_email(self, patch, result_key, rejection_function):
+ committer = getattr(patch, result_key)()
+ # If the flag is set, and we don't recognize the setter, reject the
+ # flag!
+ setter_email = patch._attachment_dictionary.get("%s_email" % result_key)
+ if setter_email and not committer:
+ rejection_function(patch.id(),
+ self._flag_permission_rejection_message(setter_email,
+ result_key))
+ return False
+ return True
+
+ def patches_after_rejecting_invalid_commiters_and_reviewers(self, patches):
+ validated_patches = []
+ for patch in patches:
+ if (self._validate_setter_email(
+ patch, "reviewer", self.reject_patch_from_review_queue)
+ and self._validate_setter_email(
+ patch, "committer", self.reject_patch_from_commit_queue)):
+ validated_patches.append(patch)
+ return validated_patches
+
+ def reject_patch_from_commit_queue(self,
+ attachment_id,
+ additional_comment_text=None):
+ comment_text = "Rejecting patch %s from commit-queue." % attachment_id
+ self._bugzilla.set_flag_on_attachment(attachment_id,
+ "commit-queue",
+ "-",
+ comment_text,
+ additional_comment_text)
+
+ def reject_patch_from_review_queue(self,
+ attachment_id,
+ additional_comment_text=None):
+ comment_text = "Rejecting patch %s from review queue." % attachment_id
+ self._bugzilla.set_flag_on_attachment(attachment_id,
+ 'review',
+ '-',
+ comment_text,
+ additional_comment_text)
+
+
+class Bugzilla(object):
+
+ def __init__(self, dryrun=False, committers=CommitterList()):
+ self.dryrun = dryrun
+ self.authenticated = False
+ self.queries = BugzillaQueries(self)
+ self.committers = committers
+
+ # FIXME: We should use some sort of Browser mock object when in dryrun
+ # mode (to prevent any mistakes).
+ self.browser = Browser()
+ # Ignore bugs.webkit.org/robots.txt until we fix it to allow this
+ # script.
+ self.browser.set_handle_robots(False)
+
+ # FIXME: Much of this should go into some sort of config module:
+ bug_server_host = "bugs.webkit.org"
+ bug_server_regex = "https?://%s/" % re.sub('\.', '\\.', bug_server_host)
+ bug_server_url = "https://%s/" % bug_server_host
+ unassigned_email = "webkit-unassigned@lists.webkit.org"
+
+ def bug_url_for_bug_id(self, bug_id, xml=False):
+ content_type = "&ctype=xml" if xml else ""
+ return "%sshow_bug.cgi?id=%s%s" % (self.bug_server_url,
+ bug_id,
+ content_type)
+
+ def short_bug_url_for_bug_id(self, bug_id):
+ return "http://webkit.org/b/%s" % bug_id
+
+ def attachment_url_for_id(self, attachment_id, action="view"):
+ action_param = ""
+ if action and action != "view":
+ action_param = "&action=%s" % action
+ return "%sattachment.cgi?id=%s%s" % (self.bug_server_url,
+ attachment_id,
+ action_param)
+
+ def _parse_attachment_flag(self,
+ element,
+ flag_name,
+ attachment,
+ result_key):
+ flag = element.find('flag', attrs={'name': flag_name})
+ if flag:
+ attachment[flag_name] = flag['status']
+ if flag['status'] == '+':
+ attachment[result_key] = flag['setter']
+
+ def _parse_attachment_element(self, element, bug_id):
+ attachment = {}
+ attachment['bug_id'] = bug_id
+ attachment['is_obsolete'] = (element.has_key('isobsolete') and element['isobsolete'] == "1")
+ attachment['is_patch'] = (element.has_key('ispatch') and element['ispatch'] == "1")
+ attachment['id'] = int(element.find('attachid').string)
+ # FIXME: No need to parse out the url here.
+ attachment['url'] = self.attachment_url_for_id(attachment['id'])
+ attachment['name'] = unicode(element.find('desc').string)
+ attachment['attacher_email'] = str(element.find('attacher').string)
+ attachment['type'] = str(element.find('type').string)
+ self._parse_attachment_flag(
+ element, 'review', attachment, 'reviewer_email')
+ self._parse_attachment_flag(
+ element, 'commit-queue', attachment, 'committer_email')
+ return attachment
+
+ def _parse_bug_page(self, page):
+ soup = BeautifulSoup(page)
+ bug = {}
+ bug["id"] = int(soup.find("bug_id").string)
+ bug["title"] = unicode(soup.find("short_desc").string)
+ bug["reporter_email"] = str(soup.find("reporter").string)
+ bug["assigned_to_email"] = str(soup.find("assigned_to").string)
+ bug["cc_emails"] = [str(element.string)
+ for element in soup.findAll('cc')]
+ bug["attachments"] = [self._parse_attachment_element(element, bug["id"]) for element in soup.findAll('attachment')]
+ return bug
+
+ # Makes testing fetch_*_from_bug() possible until we have a better
+ # BugzillaNetwork abstration.
+
+ def _fetch_bug_page(self, bug_id):
+ bug_url = self.bug_url_for_bug_id(bug_id, xml=True)
+ log("Fetching: %s" % bug_url)
+ return self.browser.open(bug_url)
+
+ def fetch_bug_dictionary(self, bug_id):
+ return self._parse_bug_page(self._fetch_bug_page(bug_id))
+
+ # FIXME: A BugzillaCache object should provide all these fetch_ methods.
+
+ def fetch_bug(self, bug_id):
+ return Bug(self.fetch_bug_dictionary(bug_id), self)
+
+ def _parse_bug_id_from_attachment_page(self, page):
+ # The "Up" relation happens to point to the bug.
+ up_link = BeautifulSoup(page).find('link', rel='Up')
+ if not up_link:
+ # This attachment does not exist (or you don't have permissions to
+ # view it).
+ return None
+ match = re.search("show_bug.cgi\?id=(?P<bug_id>\d+)", up_link['href'])
+ return int(match.group('bug_id'))
+
+ def bug_id_for_attachment_id(self, attachment_id):
+ self.authenticate()
+
+ attachment_url = self.attachment_url_for_id(attachment_id, 'edit')
+ log("Fetching: %s" % attachment_url)
+ page = self.browser.open(attachment_url)
+ return self._parse_bug_id_from_attachment_page(page)
+
+ # FIXME: This should just return Attachment(id), which should be able to
+ # lazily fetch needed data.
+
+ def fetch_attachment(self, attachment_id):
+ # We could grab all the attachment details off of the attachment edit
+ # page but we already have working code to do so off of the bugs page,
+ # so re-use that.
+ bug_id = self.bug_id_for_attachment_id(attachment_id)
+ if not bug_id:
+ return None
+ attachments = self.fetch_bug(bug_id).attachments(include_obsolete=True)
+ for attachment in attachments:
+ if attachment.id() == int(attachment_id):
+ return attachment
+ return None # This should never be hit.
+
+ def authenticate(self):
+ if self.authenticated:
+ return
+
+ if self.dryrun:
+ log("Skipping log in for dry run...")
+ self.authenticated = True
+ return
+
+ attempts = 0
+ while not self.authenticated:
+ attempts += 1
+ (username, password) = Credentials(
+ self.bug_server_host, git_prefix="bugzilla").read_credentials()
+
+ log("Logging in as %s..." % username)
+ self.browser.open(self.bug_server_url +
+ "index.cgi?GoAheadAndLogIn=1")
+ self.browser.select_form(name="login")
+ self.browser['Bugzilla_login'] = username
+ self.browser['Bugzilla_password'] = password
+ response = self.browser.submit()
+
+ match = re.search("<title>(.+?)</title>", response.read())
+ # If the resulting page has a title, and it contains the word
+ # "invalid" assume it's the login failure page.
+ if match and re.search("Invalid", match.group(1), re.IGNORECASE):
+ errorMessage = "Bugzilla login failed: %s" % match.group(1)
+ # raise an exception only if this was the last attempt
+ if attempts < 5:
+ log(errorMessage)
+ else:
+ raise Exception(errorMessage)
+ else:
+ self.authenticated = True
+
+ def _fill_attachment_form(self,
+ description,
+ patch_file_object,
+ comment_text=None,
+ mark_for_review=False,
+ mark_for_commit_queue=False,
+ mark_for_landing=False, bug_id=None):
+ self.browser['description'] = description
+ self.browser['ispatch'] = ("1",)
+ self.browser['flag_type-1'] = ('?',) if mark_for_review else ('X',)
+
+ if mark_for_landing:
+ self.browser['flag_type-3'] = ('+',)
+ elif mark_for_commit_queue:
+ self.browser['flag_type-3'] = ('?',)
+ else:
+ self.browser['flag_type-3'] = ('X',)
+
+ if bug_id:
+ patch_name = "bug-%s-%s.patch" % (bug_id, timestamp())
+ else:
+ patch_name ="%s.patch" % timestamp()
+ self.browser.add_file(patch_file_object,
+ "text/plain",
+ patch_name,
+ 'data')
+
+ def add_patch_to_bug(self,
+ bug_id,
+ patch_file_object,
+ description,
+ comment_text=None,
+ mark_for_review=False,
+ mark_for_commit_queue=False,
+ mark_for_landing=False):
+ self.authenticate()
+
+ log('Adding patch "%s" to %sshow_bug.cgi?id=%s' % (description,
+ self.bug_server_url,
+ bug_id))
+
+ if self.dryrun:
+ log(comment_text)
+ return
+
+ self.browser.open("%sattachment.cgi?action=enter&bugid=%s" % (
+ self.bug_server_url, bug_id))
+ self.browser.select_form(name="entryform")
+ self._fill_attachment_form(description,
+ patch_file_object,
+ mark_for_review=mark_for_review,
+ mark_for_commit_queue=mark_for_commit_queue,
+ mark_for_landing=mark_for_landing,
+ bug_id=bug_id)
+ if comment_text:
+ log(comment_text)
+ self.browser['comment'] = comment_text
+ self.browser.submit()
+
+ def prompt_for_component(self, components):
+ log("Please pick a component:")
+ i = 0
+ for name in components:
+ i += 1
+ log("%2d. %s" % (i, name))
+ result = int(raw_input("Enter a number: ")) - 1
+ return components[result]
+
+ def _check_create_bug_response(self, response_html):
+ match = re.search("<title>Bug (?P<bug_id>\d+) Submitted</title>",
+ response_html)
+ if match:
+ return match.group('bug_id')
+
+ match = re.search(
+ '<div id="bugzilla-body">(?P<error_message>.+)<div id="footer">',
+ response_html,
+ re.DOTALL)
+ error_message = "FAIL"
+ if match:
+ text_lines = BeautifulSoup(
+ match.group('error_message')).findAll(text=True)
+ error_message = "\n" + '\n'.join(
+ [" " + line.strip()
+ for line in text_lines if line.strip()])
+ raise Exception("Bug not created: %s" % error_message)
+
+ def create_bug(self,
+ bug_title,
+ bug_description,
+ component=None,
+ patch_file_object=None,
+ patch_description=None,
+ cc=None,
+ mark_for_review=False,
+ mark_for_commit_queue=False):
+ self.authenticate()
+
+ log('Creating bug with title "%s"' % bug_title)
+ if self.dryrun:
+ log(bug_description)
+ return
+
+ self.browser.open(self.bug_server_url + "enter_bug.cgi?product=WebKit")
+ self.browser.select_form(name="Create")
+ component_items = self.browser.find_control('component').items
+ component_names = map(lambda item: item.name, component_items)
+ if not component:
+ component = "New Bugs"
+ if component not in component_names:
+ component = self.prompt_for_component(component_names)
+ self.browser['component'] = [component]
+ if cc:
+ self.browser['cc'] = cc
+ self.browser['short_desc'] = bug_title
+ self.browser['comment'] = bug_description
+
+ if patch_file_object:
+ self._fill_attachment_form(
+ patch_description,
+ patch_file_object,
+ mark_for_review=mark_for_review,
+ mark_for_commit_queue=mark_for_commit_queue)
+
+ response = self.browser.submit()
+
+ bug_id = self._check_create_bug_response(response.read())
+ log("Bug %s created." % bug_id)
+ log("%sshow_bug.cgi?id=%s" % (self.bug_server_url, bug_id))
+ return bug_id
+
+ def _find_select_element_for_flag(self, flag_name):
+ # FIXME: This will break if we ever re-order attachment flags
+ if flag_name == "review":
+ return self.browser.find_control(type='select', nr=0)
+ if flag_name == "commit-queue":
+ return self.browser.find_control(type='select', nr=1)
+ raise Exception("Don't know how to find flag named \"%s\"" % flag_name)
+
+ def clear_attachment_flags(self,
+ attachment_id,
+ additional_comment_text=None):
+ self.authenticate()
+
+ comment_text = "Clearing flags on attachment: %s" % attachment_id
+ if additional_comment_text:
+ comment_text += "\n\n%s" % additional_comment_text
+ log(comment_text)
+
+ if self.dryrun:
+ return
+
+ self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
+ self.browser.select_form(nr=1)
+ self.browser.set_value(comment_text, name='comment', nr=0)
+ self._find_select_element_for_flag('review').value = ("X",)
+ self._find_select_element_for_flag('commit-queue').value = ("X",)
+ self.browser.submit()
+
+ def set_flag_on_attachment(self,
+ attachment_id,
+ flag_name,
+ flag_value,
+ comment_text,
+ additional_comment_text):
+ # FIXME: We need a way to test this function on a live bugzilla
+ # instance.
+
+ self.authenticate()
+
+ if additional_comment_text:
+ comment_text += "\n\n%s" % additional_comment_text
+ log(comment_text)
+
+ if self.dryrun:
+ return
+
+ self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
+ self.browser.select_form(nr=1)
+ self.browser.set_value(comment_text, name='comment', nr=0)
+ self._find_select_element_for_flag(flag_name).value = (flag_value,)
+ self.browser.submit()
+
+ # FIXME: All of these bug editing methods have a ridiculous amount of
+ # copy/paste code.
+
+ def obsolete_attachment(self, attachment_id, comment_text=None):
+ self.authenticate()
+
+ log("Obsoleting attachment: %s" % attachment_id)
+ if self.dryrun:
+ log(comment_text)
+ return
+
+ self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
+ self.browser.select_form(nr=1)
+ self.browser.find_control('isobsolete').items[0].selected = True
+ # Also clear any review flag (to remove it from review/commit queues)
+ self._find_select_element_for_flag('review').value = ("X",)
+ self._find_select_element_for_flag('commit-queue').value = ("X",)
+ if comment_text:
+ log(comment_text)
+ # Bugzilla has two textareas named 'comment', one is somehow
+ # hidden. We want the first.
+ self.browser.set_value(comment_text, name='comment', nr=0)
+ self.browser.submit()
+
+ def add_cc_to_bug(self, bug_id, email_address_list):
+ self.authenticate()
+
+ log("Adding %s to the CC list for bug %s" % (email_address_list,
+ bug_id))
+ if self.dryrun:
+ return
+
+ self.browser.open(self.bug_url_for_bug_id(bug_id))
+ self.browser.select_form(name="changeform")
+ self.browser["newcc"] = ", ".join(email_address_list)
+ self.browser.submit()
+
+ def post_comment_to_bug(self, bug_id, comment_text, cc=None):
+ self.authenticate()
+
+ log("Adding comment to bug %s" % bug_id)
+ if self.dryrun:
+ log(comment_text)
+ return
+
+ self.browser.open(self.bug_url_for_bug_id(bug_id))
+ self.browser.select_form(name="changeform")
+ self.browser["comment"] = comment_text
+ if cc:
+ self.browser["newcc"] = ", ".join(cc)
+ self.browser.submit()
+
+ def close_bug_as_fixed(self, bug_id, comment_text=None):
+ self.authenticate()
+
+ log("Closing bug %s as fixed" % bug_id)
+ if self.dryrun:
+ log(comment_text)
+ return
+
+ self.browser.open(self.bug_url_for_bug_id(bug_id))
+ self.browser.select_form(name="changeform")
+ if comment_text:
+ log(comment_text)
+ self.browser['comment'] = comment_text
+ self.browser['bug_status'] = ['RESOLVED']
+ self.browser['resolution'] = ['FIXED']
+ self.browser.submit()
+
+ def reassign_bug(self, bug_id, assignee, comment_text=None):
+ self.authenticate()
+
+ log("Assigning bug %s to %s" % (bug_id, assignee))
+ if self.dryrun:
+ log(comment_text)
+ return
+
+ self.browser.open(self.bug_url_for_bug_id(bug_id))
+ self.browser.select_form(name="changeform")
+ if comment_text:
+ log(comment_text)
+ self.browser["comment"] = comment_text
+ self.browser["assigned_to"] = assignee
+ self.browser.submit()
+
+ def reopen_bug(self, bug_id, comment_text):
+ self.authenticate()
+
+ log("Re-opening bug %s" % bug_id)
+ # Bugzilla requires a comment when re-opening a bug, so we know it will
+ # never be None.
+ log(comment_text)
+ if self.dryrun:
+ return
+
+ self.browser.open(self.bug_url_for_bug_id(bug_id))
+ self.browser.select_form(name="changeform")
+ bug_status = self.browser.find_control("bug_status", type="select")
+ # This is a hack around the fact that ClientForm.ListControl seems to
+ # have no simpler way to ask if a control has an item named "REOPENED"
+ # without using exceptions for control flow.
+ possible_bug_statuses = map(lambda item: item.name, bug_status.items)
+ if "REOPENED" in possible_bug_statuses:
+ bug_status.value = ["REOPENED"]
+ else:
+ log("Did not reopen bug %s. " +
+ "It appears to already be open with status %s." % (
+ bug_id, bug_status.value))
+ self.browser['comment'] = comment_text
+ self.browser.submit()
diff --git a/WebKitTools/Scripts/webkitpy/bugzilla.pyc b/WebKitTools/Scripts/webkitpy/bugzilla.pyc
new file mode 100644
index 0000000..dfde47c
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/bugzilla.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/bugzilla_unittest.py b/WebKitTools/Scripts/webkitpy/bugzilla_unittest.py
new file mode 100644
index 0000000..d555f78
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/bugzilla_unittest.py
@@ -0,0 +1,303 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.committers import CommitterList, Reviewer, Committer
+from webkitpy.bugzilla import Bugzilla, BugzillaQueries, parse_bug_id, CommitterValidator
+from webkitpy.outputcapture import OutputCapture
+from webkitpy.mock import Mock
+
+from webkitpy.BeautifulSoup import BeautifulSoup
+
+
+class MockBrowser(object):
+ def open(self, url):
+ pass
+
+ def select_form(self, name):
+ pass
+
+ def __setitem__(self, key, value):
+ pass
+
+ def submit(self):
+ pass
+
+class CommitterValidatorTest(unittest.TestCase):
+ def test_flag_permission_rejection_message(self):
+ validator = CommitterValidator(bugzilla=None)
+ expected_messsage="""foo@foo.com does not have review permissions according to http://trac.webkit.org/browser/trunk/WebKitTools/Scripts/webkitpy/committers.py.
+
+- If you do not have review rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
+
+- If you have review rights please correct the error in WebKitTools/Scripts/webkitpy/committers.py by adding yourself to the file (no review needed). Due to bug 30084 the commit-queue will require a restart after your change. Please contact eseidel@chromium.org to request a commit-queue restart. After restart the commit-queue will correctly respect your review rights."""
+ self.assertEqual(validator._flag_permission_rejection_message("foo@foo.com", "review"), expected_messsage)
+
+
+class BugzillaTest(unittest.TestCase):
+ _example_attachment = '''
+ <attachment
+ isobsolete="1"
+ ispatch="1"
+ isprivate="0"
+ >
+ <attachid>33721</attachid>
+ <date>2009-07-29 10:23 PDT</date>
+ <desc>Fixed whitespace issue</desc>
+ <filename>patch</filename>
+ <type>text/plain</type>
+ <size>9719</size>
+ <attacher>christian.plesner.hansen@gmail.com</attacher>
+ <flag name="review"
+ id="17931"
+ status="+"
+ setter="one@test.com"
+ />
+ <flag name="commit-queue"
+ id="17932"
+ status="+"
+ setter="two@test.com"
+ />
+ </attachment>
+'''
+ _expected_example_attachment_parsing = {
+ 'bug_id' : 100,
+ 'is_obsolete' : True,
+ 'is_patch' : True,
+ 'id' : 33721,
+ 'url' : "https://bugs.webkit.org/attachment.cgi?id=33721",
+ 'name' : "Fixed whitespace issue",
+ 'type' : "text/plain",
+ 'review' : '+',
+ 'reviewer_email' : 'one@test.com',
+ 'commit-queue' : '+',
+ 'committer_email' : 'two@test.com',
+ 'attacher_email' : 'christian.plesner.hansen@gmail.com',
+ }
+
+ def test_parse_bug_id(self):
+ # FIXME: These would be all better as doctests
+ bugs = Bugzilla()
+ self.assertEquals(12345, parse_bug_id("http://webkit.org/b/12345"))
+ self.assertEquals(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345"))
+ self.assertEquals(12345, parse_bug_id(bugs.short_bug_url_for_bug_id(12345)))
+ self.assertEquals(12345, parse_bug_id(bugs.bug_url_for_bug_id(12345)))
+ self.assertEquals(12345, parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True)))
+
+ # Our bug parser is super-fragile, but at least we're testing it.
+ self.assertEquals(None, parse_bug_id("http://www.webkit.org/b/12345"))
+ self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345"))
+
+ _example_bug = """
+<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
+<!DOCTYPE bugzilla SYSTEM "https://bugs.webkit.org/bugzilla.dtd">
+<bugzilla version="3.2.3"
+ urlbase="https://bugs.webkit.org/"
+ maintainer="admin@webkit.org"
+ exporter="eric@webkit.org"
+>
+ <bug>
+ <bug_id>32585</bug_id>
+ <creation_ts>2009-12-15 15:17 PST</creation_ts>
+ <short_desc>bug to test webkit-patch and commit-queue failures</short_desc>
+ <delta_ts>2009-12-27 21:04:50 PST</delta_ts>
+ <reporter_accessible>1</reporter_accessible>
+ <cclist_accessible>1</cclist_accessible>
+ <classification_id>1</classification_id>
+ <classification>Unclassified</classification>
+ <product>WebKit</product>
+ <component>Tools / Tests</component>
+ <version>528+ (Nightly build)</version>
+ <rep_platform>PC</rep_platform>
+ <op_sys>Mac OS X 10.5</op_sys>
+ <bug_status>NEW</bug_status>
+ <priority>P2</priority>
+ <bug_severity>Normal</bug_severity>
+ <target_milestone>---</target_milestone>
+ <everconfirmed>1</everconfirmed>
+ <reporter name="Eric Seidel">eric@webkit.org</reporter>
+ <assigned_to name="Nobody">webkit-unassigned@lists.webkit.org</assigned_to>
+ <cc>foo@bar.com</cc>
+ <cc>example@example.com</cc>
+ <long_desc isprivate="0">
+ <who name="Eric Seidel">eric@webkit.org</who>
+ <bug_when>2009-12-15 15:17:28 PST</bug_when>
+ <thetext>bug to test webkit-patch and commit-queue failures
+
+Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.</thetext>
+ </long_desc>
+ <attachment
+ isobsolete="0"
+ ispatch="1"
+ isprivate="0"
+ >
+ <attachid>45548</attachid>
+ <date>2009-12-27 23:51 PST</date>
+ <desc>Patch</desc>
+ <filename>bug-32585-20091228005112.patch</filename>
+ <type>text/plain</type>
+ <size>10882</size>
+ <attacher>mjs@apple.com</attacher>
+
+ <token>1261988248-dc51409e9c421a4358f365fa8bec8357</token>
+ <data encoding="base64">SW5kZXg6IFdlYktpdC9tYWMvQ2hhbmdlTG9nCj09PT09PT09PT09PT09PT09PT09PT09PT09PT09
+removed-because-it-was-really-long
+ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg==
+</data>
+
+ <flag name="review"
+ id="27602"
+ status="?"
+ setter="mjs@apple.com"
+ />
+ </attachment>
+ </bug>
+</bugzilla>
+"""
+ _expected_example_bug_parsing = {
+ "id" : 32585,
+ "title" : u"bug to test webkit-patch and commit-queue failures",
+ "cc_emails" : ["foo@bar.com", "example@example.com"],
+ "reporter_email" : "eric@webkit.org",
+ "assigned_to_email" : "webkit-unassigned@lists.webkit.org",
+ "attachments" : [{
+ 'name': u'Patch',
+ 'url' : "https://bugs.webkit.org/attachment.cgi?id=45548",
+ 'is_obsolete': False,
+ 'review': '?',
+ 'is_patch': True,
+ 'attacher_email': 'mjs@apple.com',
+ 'bug_id': 32585,
+ 'type': 'text/plain',
+ 'id': 45548
+ }],
+ }
+
+ def _assert_dictionaries_equal(self, actual, expected):
+ # Make sure we aren't parsing more or less than we expect
+ self.assertEquals(sorted(actual.keys()), sorted(expected.keys()))
+
+ for key, expected_value in expected.items():
+ self.assertEquals(actual[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, actual[key], expected_value)))
+
+ def test_bug_parsing(self):
+ bug = Bugzilla()._parse_bug_page(self._example_bug)
+ self._assert_dictionaries_equal(bug, self._expected_example_bug_parsing)
+
+ # This could be combined into test_bug_parsing later if desired.
+ def test_attachment_parsing(self):
+ bugzilla = Bugzilla()
+ soup = BeautifulSoup(self._example_attachment)
+ attachment_element = soup.find("attachment")
+ attachment = bugzilla._parse_attachment_element(attachment_element, self._expected_example_attachment_parsing['bug_id'])
+ self.assertTrue(attachment)
+ self._assert_dictionaries_equal(attachment, self._expected_example_attachment_parsing)
+
+ _sample_attachment_detail_page = """
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+ "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+ <head>
+ <title>
+ Attachment 41073 Details for Bug 27314</title>
+<link rel="Top" href="https://bugs.webkit.org/">
+ <link rel="Up" href="show_bug.cgi?id=27314">
+"""
+
+ def test_attachment_detail_bug_parsing(self):
+ bugzilla = Bugzilla()
+ self.assertEquals(27314, bugzilla._parse_bug_id_from_attachment_page(self._sample_attachment_detail_page))
+
+ def test_add_cc_to_bug(self):
+ bugzilla = Bugzilla()
+ bugzilla.browser = MockBrowser()
+ bugzilla.authenticate = lambda: None
+ expected_stderr = "Adding ['adam@example.com'] to the CC list for bug 42\n"
+ OutputCapture().assert_outputs(self, bugzilla.add_cc_to_bug, [42, ["adam@example.com"]], expected_stderr=expected_stderr)
+
+
+class BugzillaQueriesTest(unittest.TestCase):
+ _sample_request_page = """
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+ "http://www.w3.org/TR/html4/loose.dtd">
+<html>
+ <head>
+ <title>Request Queue</title>
+ </head>
+<body>
+
+<h3>Flag: review</h3>
+ <table class="requests" cellspacing="0" cellpadding="4" border="1">
+ <tr>
+ <th>Requester</th>
+ <th>Requestee</th>
+ <th>Bug</th>
+ <th>Attachment</th>
+ <th>Created</th>
+ </tr>
+ <tr>
+ <td>Shinichiro Hamaji &lt;hamaji&#64;chromium.org&gt;</td>
+ <td></td>
+ <td><a href="show_bug.cgi?id=30015">30015: text-transform:capitalize is failing in CSS2.1 test suite</a></td>
+ <td><a href="attachment.cgi?id=40511&amp;action=review">
+40511: Patch v0</a></td>
+ <td>2009-10-02 04:58 PST</td>
+ </tr>
+ <tr>
+ <td>Zan Dobersek &lt;zandobersek&#64;gmail.com&gt;</td>
+ <td></td>
+ <td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
+ <td><a href="attachment.cgi?id=40722&amp;action=review">
+40722: Media controls, the simple approach</a></td>
+ <td>2009-10-06 09:13 PST</td>
+ </tr>
+ <tr>
+ <td>Zan Dobersek &lt;zandobersek&#64;gmail.com&gt;</td>
+ <td></td>
+ <td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
+ <td><a href="attachment.cgi?id=40723&amp;action=review">
+40723: Adjust the media slider thumb size</a></td>
+ <td>2009-10-06 09:15 PST</td>
+ </tr>
+ </table>
+</body>
+</html>
+"""
+
+ def test_request_page_parsing(self):
+ queries = BugzillaQueries(None)
+ self.assertEquals([40511, 40722, 40723], queries._parse_attachment_ids_request_query(self._sample_request_page))
+
+ def test_load_query(self):
+ queries = BugzillaQueries(Mock())
+ queries._load_query("request.cgi?action=queue&type=review&group=type")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/buildbot.py b/WebKitTools/Scripts/webkitpy/buildbot.py
new file mode 100644
index 0000000..38828fd
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/buildbot.py
@@ -0,0 +1,133 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for interacting with WebKit's buildbot
+
+import re
+import urllib2
+
+# Import WebKit-specific modules.
+from webkitpy.webkit_logging import log
+
+# WebKit includes a built copy of BeautifulSoup in Scripts/webkitpy
+# so this import should always succeed.
+from .BeautifulSoup import BeautifulSoup
+
+
+class BuildBot:
+
+ default_host = "build.webkit.org"
+
+ def __init__(self, host=default_host):
+ self.buildbot_host = host
+ self.buildbot_server_url = "http://%s/" % self.buildbot_host
+
+ # If any Leopard builder/tester, Windows builder or Chromium builder is
+ # red we should not be landing patches. Other builders should be added
+ # to this list once they are known to be reliable.
+ # See https://bugs.webkit.org/show_bug.cgi?id=33296 and related bugs.
+ self.core_builder_names_regexps = [
+ "Leopard",
+ "Windows.*Build",
+ "Chromium",
+ ]
+
+ def _parse_builder_status_from_row(self, status_row):
+ # If WebKit's buildbot has an XMLRPC interface we could use, we could
+ # do something more sophisticated here. For now we just parse out the
+ # basics, enough to support basic questions like "is the tree green?"
+ status_cells = status_row.findAll('td')
+ builder = {}
+
+ name_link = status_cells[0].find('a')
+ builder['name'] = name_link.string
+ # We could generate the builder_url from the name in a future version
+ # of this code.
+ builder['builder_url'] = self.buildbot_server_url + name_link['href']
+
+ status_link = status_cells[1].find('a')
+ if not status_link:
+ # We failed to find a link in the first cell, just give up. This
+ # can happen if a builder is just-added, the first cell will just
+ # be "no build"
+ # Other parts of the code depend on is_green being present.
+ builder['is_green'] = False
+ return builder
+ # Will be either a revision number or a build number
+ revision_string = status_link.string
+ # If revision_string has non-digits assume it's not a revision number.
+ builder['built_revision'] = int(revision_string) \
+ if not re.match('\D', revision_string) \
+ else None
+ builder['is_green'] = not re.search('fail',
+ status_cells[1].renderContents())
+ # We could parse out the build number instead, but for now just store
+ # the URL.
+ builder['build_url'] = self.buildbot_server_url + status_link['href']
+
+ # We could parse out the current activity too.
+
+ return builder
+
+ def _builder_statuses_with_names_matching_regexps(self,
+ builder_statuses,
+ name_regexps):
+ builders = []
+ for builder in builder_statuses:
+ for name_regexp in name_regexps:
+ if re.match(name_regexp, builder['name']):
+ builders.append(builder)
+ return builders
+
+ def red_core_builders(self):
+ red_builders = []
+ for builder in self._builder_statuses_with_names_matching_regexps(
+ self.builder_statuses(),
+ self.core_builder_names_regexps):
+ if not builder['is_green']:
+ red_builders.append(builder)
+ return red_builders
+
+ def red_core_builders_names(self):
+ red_builders = self.red_core_builders()
+ return map(lambda builder: builder['name'], red_builders)
+
+ def core_builders_are_green(self):
+ return not self.red_core_builders()
+
+ def builder_statuses(self):
+ build_status_url = self.buildbot_server_url + 'one_box_per_builder'
+ page = urllib2.urlopen(build_status_url)
+ soup = BeautifulSoup(page)
+
+ builders = []
+ status_table = soup.find('table')
+ for status_row in status_table.findAll('tr'):
+ builder = self._parse_builder_status_from_row(status_row)
+ builders.append(builder)
+ return builders
diff --git a/WebKitTools/Scripts/webkitpy/buildbot.pyc b/WebKitTools/Scripts/webkitpy/buildbot.pyc
new file mode 100644
index 0000000..49b1e68
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/buildbot.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/buildbot_unittest.py b/WebKitTools/Scripts/webkitpy/buildbot_unittest.py
new file mode 100644
index 0000000..bde3e04
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/buildbot_unittest.py
@@ -0,0 +1,155 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.buildbot import BuildBot
+
+from webkitpy.BeautifulSoup import BeautifulSoup
+
+class BuildBotTest(unittest.TestCase):
+
+ _example_one_box_status = '''
+ <table>
+ <tr>
+ <td class="box"><a href="builders/Windows%20Debug%20%28Tests%29">Windows Debug (Tests)</a></td>
+ <td align="center" class="LastBuild box success"><a href="builders/Windows%20Debug%20%28Tests%29/builds/3693">47380</a><br />build<br />successful</td>
+ <td align="center" class="Activity building">building<br />ETA in<br />~ 14 mins<br />at 13:40</td>
+ <tr>
+ <td class="box"><a href="builders/SnowLeopard%20Intel%20Release">SnowLeopard Intel Release</a></td>
+ <td class="LastBuild box" >no build</td>
+ <td align="center" class="Activity building">building<br />< 1 min</td>
+ <tr>
+ <td class="box"><a href="builders/Qt%20Linux%20Release">Qt Linux Release</a></td>
+ <td align="center" class="LastBuild box failure"><a href="builders/Qt%20Linux%20Release/builds/654">47383</a><br />failed<br />compile-webkit</td>
+ <td align="center" class="Activity idle">idle</td>
+ </table>
+'''
+ _expected_example_one_box_parsings = [
+ {
+ 'builder_url': u'http://build.webkit.org/builders/Windows%20Debug%20%28Tests%29',
+ 'build_url': u'http://build.webkit.org/builders/Windows%20Debug%20%28Tests%29/builds/3693',
+ 'is_green': True,
+ 'name': u'Windows Debug (Tests)',
+ 'built_revision': 47380
+ },
+ {
+ 'builder_url': u'http://build.webkit.org/builders/SnowLeopard%20Intel%20Release',
+ 'is_green': False,
+ 'name': u'SnowLeopard Intel Release',
+ },
+ {
+ 'builder_url': u'http://build.webkit.org/builders/Qt%20Linux%20Release',
+ 'build_url': u'http://build.webkit.org/builders/Qt%20Linux%20Release/builds/654',
+ 'is_green': False,
+ 'name': u'Qt Linux Release',
+ 'built_revision': 47383
+ },
+ ]
+
+ def test_status_parsing(self):
+ buildbot = BuildBot()
+
+ soup = BeautifulSoup(self._example_one_box_status)
+ status_table = soup.find("table")
+ input_rows = status_table.findAll('tr')
+
+ for x in range(len(input_rows)):
+ status_row = input_rows[x]
+ expected_parsing = self._expected_example_one_box_parsings[x]
+
+ builder = buildbot._parse_builder_status_from_row(status_row)
+
+ # Make sure we aren't parsing more or less than we expect
+ self.assertEquals(builder.keys(), expected_parsing.keys())
+
+ for key, expected_value in expected_parsing.items():
+ self.assertEquals(builder[key], expected_value, ("Builder %d parse failure for key: %s: Actual='%s' Expected='%s'" % (x, key, builder[key], expected_value)))
+
+ def test_core_builder_methods(self):
+ buildbot = BuildBot()
+
+ # Override builder_statuses function to not touch the network.
+ def example_builder_statuses(): # We could use instancemethod() to bind 'self' but we don't need to.
+ return BuildBotTest._expected_example_one_box_parsings
+ buildbot.builder_statuses = example_builder_statuses
+
+ buildbot.core_builder_names_regexps = [ 'Leopard', "Windows.*Build" ]
+ self.assertEquals(buildbot.red_core_builders_names(), [])
+ self.assertTrue(buildbot.core_builders_are_green())
+
+ buildbot.core_builder_names_regexps = [ 'SnowLeopard', 'Qt' ]
+ self.assertEquals(buildbot.red_core_builders_names(), [ u'SnowLeopard Intel Release', u'Qt Linux Release' ])
+ self.assertFalse(buildbot.core_builders_are_green())
+
+ def test_builder_name_regexps(self):
+ buildbot = BuildBot()
+
+ # For complete testing, this list should match the list of builders at build.webkit.org:
+ example_builders = [
+ { 'name': u'Tiger Intel Release', },
+ { 'name': u'Leopard Intel Release (Build)', },
+ { 'name': u'Leopard Intel Release (Tests)', },
+ { 'name': u'Leopard Intel Debug (Build)', },
+ { 'name': u'Leopard Intel Debug (Tests)', },
+ { 'name': u'SnowLeopard Intel Release (Build)', },
+ { 'name': u'SnowLeopard Intel Release (Tests)', },
+ { 'name': u'SnowLeopard Intel Leaks', },
+ { 'name': u'Windows Release (Build)', },
+ { 'name': u'Windows Release (Tests)', },
+ { 'name': u'Windows Debug (Build)', },
+ { 'name': u'Windows Debug (Tests)', },
+ { 'name': u'Qt Linux Release', },
+ { 'name': u'Gtk Linux Release', },
+ { 'name': u'Gtk Linux 32-bit Debug', },
+ { 'name': u'Gtk Linux 64-bit Debug', },
+ { 'name': u'Chromium Linux Release', },
+ { 'name': u'Chromium Mac Release', },
+ { 'name': u'Chromium Win Release', },
+ ]
+ name_regexps = [ "Leopard", "Windows.*Build", "Chromium" ]
+ expected_builders = [
+ { 'name': u'Leopard Intel Release (Build)', },
+ { 'name': u'Leopard Intel Release (Tests)', },
+ { 'name': u'Leopard Intel Debug (Build)', },
+ { 'name': u'Leopard Intel Debug (Tests)', },
+ { 'name': u'Windows Release (Build)', },
+ { 'name': u'Windows Debug (Build)', },
+ { 'name': u'Chromium Linux Release', },
+ { 'name': u'Chromium Mac Release', },
+ { 'name': u'Chromium Win Release', },
+ ]
+
+ # This test should probably be updated if the default regexp list changes
+ self.assertEquals(buildbot.core_builder_names_regexps, name_regexps)
+
+ builders = buildbot._builder_statuses_with_names_matching_regexps(example_builders, name_regexps)
+ self.assertEquals(builders, expected_builders)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/changelogs.py b/WebKitTools/Scripts/webkitpy/changelogs.py
new file mode 100644
index 0000000..ebc89c4
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/changelogs.py
@@ -0,0 +1,134 @@
+# Copyright (C) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for parsing and modifying ChangeLog files
+
+import fileinput # inplace file editing for set_reviewer_in_changelog
+import re
+import textwrap
+
+
+def view_source_url(revision_number):
+ # FIMXE: This doesn't really belong in this file, but we don't have a
+ # better home for it yet.
+ # Maybe eventually a webkit_config.py?
+ return "http://trac.webkit.org/changeset/%s" % revision_number
+
+
+class ChangeLog:
+
+ def __init__(self, path):
+ self.path = path
+
+ _changelog_indent = " " * 8
+
+ # e.g. 2009-06-03 Eric Seidel <eric@webkit.org>
+ date_line_regexp = re.compile('^(\d{4}-\d{2}-\d{2})' # Consume the date.
+ + '\s+(.+)\s+' # Consume the name.
+ + '<([^<>]+)>$') # And the email address.
+
+ @staticmethod
+ def _parse_latest_entry_from_file(changelog_file):
+ entry_lines = []
+ # The first line should be a date line.
+ first_line = changelog_file.readline()
+ if not ChangeLog.date_line_regexp.match(first_line):
+ return None
+ entry_lines.append(first_line)
+
+ for line in changelog_file:
+ # If we've hit the next entry, return.
+ if ChangeLog.date_line_regexp.match(line):
+ # Remove the extra newline at the end
+ return ''.join(entry_lines[:-1])
+ entry_lines.append(line)
+ return None # We never found a date line!
+
+ def latest_entry(self):
+ changelog_file = open(self.path)
+ try:
+ return self._parse_latest_entry_from_file(changelog_file)
+ finally:
+ changelog_file.close()
+
+ # _wrap_line and _wrap_lines exist to work around
+ # http://bugs.python.org/issue1859
+
+ def _wrap_line(self, line):
+ return textwrap.fill(line,
+ width=70,
+ initial_indent=self._changelog_indent,
+ # Don't break urls which may be longer than width.
+ break_long_words=False,
+ subsequent_indent=self._changelog_indent)
+
+ # Workaround as suggested by guido in
+ # http://bugs.python.org/issue1859#msg60040
+
+ def _wrap_lines(self, message):
+ lines = [self._wrap_line(line) for line in message.splitlines()]
+ return "\n".join(lines)
+
+ # This probably does not belong in changelogs.py
+ def _message_for_revert(self, revision, reason, bug_url):
+ message = "No review, rolling out r%s.\n" % revision
+ message += "%s\n" % view_source_url(revision)
+ if bug_url:
+ message += "%s\n" % bug_url
+ # Add an extra new line after the rollout links, before any reason.
+ message += "\n"
+ if reason:
+ message += "%s\n\n" % reason
+ return self._wrap_lines(message)
+
+ def update_for_revert(self, revision, reason, bug_url=None):
+ reviewed_by_regexp = re.compile(
+ "%sReviewed by NOBODY \(OOPS!\)\." % self._changelog_indent)
+ removing_boilerplate = False
+ # inplace=1 creates a backup file and re-directs stdout to the file
+ for line in fileinput.FileInput(self.path, inplace=1):
+ if reviewed_by_regexp.search(line):
+ message_lines = self._message_for_revert(revision,
+ reason,
+ bug_url)
+ print reviewed_by_regexp.sub(message_lines, line),
+ # Remove all the ChangeLog boilerplate between the Reviewed by
+ # line and the first changed file.
+ removing_boilerplate = True
+ elif removing_boilerplate:
+ if line.find('*') >= 0: # each changed file is preceded by a *
+ removing_boilerplate = False
+
+ if not removing_boilerplate:
+ print line,
+
+ def set_reviewer(self, reviewer):
+ # inplace=1 creates a backup file and re-directs stdout to the file
+ for line in fileinput.FileInput(self.path, inplace=1):
+ # Trailing comma suppresses printing newline
+ print line.replace("NOBODY (OOPS!)", reviewer.encode("utf-8")),
diff --git a/WebKitTools/Scripts/webkitpy/changelogs.pyc b/WebKitTools/Scripts/webkitpy/changelogs.pyc
new file mode 100644
index 0000000..2fca994
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/changelogs.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/changelogs_unittest.py b/WebKitTools/Scripts/webkitpy/changelogs_unittest.py
new file mode 100644
index 0000000..de3e60c
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/changelogs_unittest.py
@@ -0,0 +1,179 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+from changelogs import *
+
+import os
+import tempfile
+from StringIO import StringIO
+
+class ChangeLogsTest(unittest.TestCase):
+
+ _example_entry = '''2009-08-17 Peter Kasting <pkasting@google.com>
+
+ Reviewed by Steve Falkenburg.
+
+ https://bugs.webkit.org/show_bug.cgi?id=27323
+ Only add Cygwin to the path when it isn't already there. This avoids
+ causing problems for people who purposefully have non-Cygwin versions of
+ executables like svn in front of the Cygwin ones in their paths.
+
+ * DumpRenderTree/win/DumpRenderTree.vcproj:
+ * DumpRenderTree/win/ImageDiff.vcproj:
+ * DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj:
+'''
+
+ # More example text than we need. Eventually we need to support parsing this all and write tests for the parsing.
+ _example_changelog = '''2009-08-17 David Kilzer <ddkilzer@apple.com>
+
+ <http://webkit.org/b/28393> check-webkit-style: add check for use of std::max()/std::min() instead of MAX()/MIN()
+
+ Reviewed by David Levin.
+
+ * Scripts/modules/cpp_style.py:
+ (_ERROR_CATEGORIES): Added 'runtime/max_min_macros'.
+ (check_max_min_macros): Added. Returns level 4 error when MAX()
+ and MIN() macros are used in header files and C++ source files.
+ (check_style): Added call to check_max_min_macros().
+ * Scripts/modules/cpp_style_unittest.py: Added unit tests.
+ (test_max_macro): Added.
+ (test_min_macro): Added.
+
+2009-08-16 David Kilzer <ddkilzer@apple.com>
+
+ Backed out r47343 which was mistakenly committed
+
+ * Scripts/bugzilla-tool:
+ * Scripts/modules/scm.py:
+
+2009-06-18 Darin Adler <darin@apple.com>
+
+ Rubber stamped by Mark Rowe.
+
+ * DumpRenderTree/mac/DumpRenderTreeWindow.mm:
+ (-[DumpRenderTreeWindow close]): Resolved crashes seen during regression
+ tests. The close method can be called on a window that's already closed
+ so we can't assert here.
+
+== Rolled over to ChangeLog-2009-06-16 ==
+'''
+
+ def test_latest_entry_parse(self):
+ changelog_contents = "%s\n%s" % (self._example_entry, self._example_changelog)
+ changelog_file = StringIO(changelog_contents)
+ latest_entry = ChangeLog._parse_latest_entry_from_file(changelog_file)
+ self.assertEquals(self._example_entry, latest_entry)
+
+ @staticmethod
+ def _write_tmp_file_with_contents(contents):
+ (file_descriptor, file_path) = tempfile.mkstemp() # NamedTemporaryFile always deletes the file on close in python < 2.6
+ file = os.fdopen(file_descriptor, 'w')
+ file.write(contents)
+ file.close()
+ return file_path
+
+ @staticmethod
+ def _read_file_contents(file_path):
+ file = open(file_path)
+ contents = file.read()
+ file.close()
+ return contents
+
+ _new_entry_boilerplate = '''2009-08-19 Eric Seidel <eric@webkit.org>
+
+ Reviewed by NOBODY (OOPS!).
+
+ Need a short description and bug URL (OOPS!)
+
+ * Scripts/bugzilla-tool:
+'''
+
+ def test_set_reviewer(self):
+ changelog_contents = "%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
+ changelog_path = self._write_tmp_file_with_contents(changelog_contents)
+ reviewer_name = 'Test Reviewer'
+ ChangeLog(changelog_path).set_reviewer(reviewer_name)
+ actual_contents = self._read_file_contents(changelog_path)
+ expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
+ os.remove(changelog_path)
+ self.assertEquals(actual_contents, expected_contents)
+
+ _revert_message = """ No review, rolling out r12345.
+ http://trac.webkit.org/changeset/12345
+ http://example.com/123
+
+ This is a very long reason which should be long enough so that
+ _message_for_revert will need to wrap it. We'll also include
+ a
+ https://veryveryveryveryverylongbugurl.com/reallylongbugthingy.cgi?bug_id=12354
+ link so that we can make sure we wrap that right too.
+"""
+
+ def test_message_for_revert(self):
+ changelog = ChangeLog("/fake/path")
+ long_reason = "This is a very long reason which should be long enough so that _message_for_revert will need to wrap it. We'll also include a https://veryveryveryveryverylongbugurl.com/reallylongbugthingy.cgi?bug_id=12354 link so that we can make sure we wrap that right too."
+ message = changelog._message_for_revert(12345, long_reason, "http://example.com/123")
+ self.assertEquals(message, self._revert_message)
+
+ _revert_entry_with_bug_url = '''2009-08-19 Eric Seidel <eric@webkit.org>
+
+ No review, rolling out r12345.
+ http://trac.webkit.org/changeset/12345
+ http://example.com/123
+
+ Reason
+
+ * Scripts/bugzilla-tool:
+'''
+
+ _revert_entry_without_bug_url = '''2009-08-19 Eric Seidel <eric@webkit.org>
+
+ No review, rolling out r12345.
+ http://trac.webkit.org/changeset/12345
+
+ Reason
+
+ * Scripts/bugzilla-tool:
+'''
+
+ def _assert_update_for_revert_output(self, args, expected_entry):
+ changelog_contents = "%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
+ changelog_path = self._write_tmp_file_with_contents(changelog_contents)
+ changelog = ChangeLog(changelog_path)
+ changelog.update_for_revert(*args)
+ actual_entry = changelog.latest_entry()
+ os.remove(changelog_path)
+ self.assertEquals(actual_entry, expected_entry)
+
+ def test_update_for_revert(self):
+ self._assert_update_for_revert_output([12345, "Reason"], self._revert_entry_without_bug_url)
+ self._assert_update_for_revert_output([12345, "Reason", "http://example.com/123"], self._revert_entry_with_bug_url)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/commands/__init__.py b/WebKitTools/Scripts/webkitpy/commands/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/WebKitTools/Scripts/webkitpy/commands/__init__.pyc b/WebKitTools/Scripts/webkitpy/commands/__init__.pyc
new file mode 100644
index 0000000..ac801ef
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/__init__.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.py b/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.py
new file mode 100644
index 0000000..53af5b1
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.stepsequence import StepSequence
+
+
+class AbstractSequencedCommand(AbstractDeclarativeCommand):
+ steps = None
+ def __init__(self):
+ self._sequence = StepSequence(self.steps)
+ AbstractDeclarativeCommand.__init__(self, self._sequence.options())
+
+ def _prepare_state(self, options, args, tool):
+ return None
+
+ def execute(self, options, args, tool):
+ self._sequence.run_and_handle_errors(tool, options, self._prepare_state(options, args, tool))
diff --git a/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.pyc b/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.pyc
new file mode 100644
index 0000000..6d3afc4
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/abstractsequencedcommand.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/commands/commandtest.py b/WebKitTools/Scripts/webkitpy/commands/commandtest.py
new file mode 100644
index 0000000..a56cb05
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/commandtest.py
@@ -0,0 +1,38 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.mock import Mock
+from webkitpy.mock_bugzillatool import MockBugzillaTool
+from webkitpy.outputcapture import OutputCapture
+
+class CommandsTest(unittest.TestCase):
+ def assert_execute_outputs(self, command, args, expected_stdout="", expected_stderr="", options=Mock(), tool=MockBugzillaTool()):
+ command.bind_to_tool(tool)
+ OutputCapture().assert_outputs(self, command.execute, [options, args, tool], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
diff --git a/WebKitTools/Scripts/webkitpy/commands/download.py b/WebKitTools/Scripts/webkitpy/commands/download.py
new file mode 100644
index 0000000..49a6862
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/download.py
@@ -0,0 +1,284 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from optparse import make_option
+
+import webkitpy.steps as steps
+
+from webkitpy.bugzilla import parse_bug_id
+# We could instead use from modules import buildsteps and then prefix every buildstep with "buildsteps."
+from webkitpy.changelogs import ChangeLog
+from webkitpy.commands.abstractsequencedcommand import AbstractSequencedCommand
+from webkitpy.comments import bug_comment_from_commit_text
+from webkitpy.executive import ScriptError
+from webkitpy.grammar import pluralize
+from webkitpy.webkit_logging import error, log
+from webkitpy.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.stepsequence import StepSequence
+
+
+class Build(AbstractSequencedCommand):
+ name = "build"
+ help_text = "Update working copy and build"
+ steps = [
+ steps.CleanWorkingDirectory,
+ steps.Update,
+ steps.Build,
+ ]
+
+
+class BuildAndTest(AbstractSequencedCommand):
+ name = "build-and-test"
+ help_text = "Update working copy, build, and run the tests"
+ steps = [
+ steps.CleanWorkingDirectory,
+ steps.Update,
+ steps.Build,
+ steps.RunTests,
+ ]
+
+
+class Land(AbstractSequencedCommand):
+ name = "land"
+ help_text = "Land the current working directory diff and updates the associated bug if any"
+ argument_names = "[BUGID]"
+ show_in_main_help = True
+ steps = [
+ steps.EnsureBuildersAreGreen,
+ steps.UpdateChangeLogsWithReviewer,
+ steps.EnsureBuildersAreGreen,
+ steps.Build,
+ steps.RunTests,
+ steps.Commit,
+ steps.CloseBugForLandDiff,
+ ]
+ long_help = """land commits the current working copy diff (just as svn or git commit would).
+land will build and run the tests before committing.
+If a bug id is provided, or one can be found in the ChangeLog land will update the bug after committing."""
+
+ def _prepare_state(self, options, args, tool):
+ return {
+ "bug_id" : (args and args[0]) or parse_bug_id(tool.scm().create_patch()),
+ }
+
+
+class AbstractPatchProcessingCommand(AbstractDeclarativeCommand):
+ # Subclasses must implement the methods below. We don't declare them here
+ # because we want to be able to implement them with mix-ins.
+ #
+ # def _fetch_list_of_patches_to_process(self, options, args, tool):
+ # def _prepare_to_process(self, options, args, tool):
+
+ @staticmethod
+ def _collect_patches_by_bug(patches):
+ bugs_to_patches = {}
+ for patch in patches:
+ bugs_to_patches[patch.bug_id()] = bugs_to_patches.get(patch.bug_id(), []) + [patch]
+ return bugs_to_patches
+
+ def execute(self, options, args, tool):
+ self._prepare_to_process(options, args, tool)
+ patches = self._fetch_list_of_patches_to_process(options, args, tool)
+
+ # It's nice to print out total statistics.
+ bugs_to_patches = self._collect_patches_by_bug(patches)
+ log("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches))))
+
+ for patch in patches:
+ self._process_patch(patch, options, args, tool)
+
+
+class AbstractPatchSequencingCommand(AbstractPatchProcessingCommand):
+ prepare_steps = None
+ main_steps = None
+
+ def __init__(self):
+ options = []
+ self._prepare_sequence = StepSequence(self.prepare_steps)
+ self._main_sequence = StepSequence(self.main_steps)
+ options = sorted(set(self._prepare_sequence.options() + self._main_sequence.options()))
+ AbstractPatchProcessingCommand.__init__(self, options)
+
+ def _prepare_to_process(self, options, args, tool):
+ self._prepare_sequence.run_and_handle_errors(tool, options)
+
+ def _process_patch(self, patch, options, args, tool):
+ state = { "patch" : patch }
+ self._main_sequence.run_and_handle_errors(tool, options, state)
+
+
+class ProcessAttachmentsMixin(object):
+ def _fetch_list_of_patches_to_process(self, options, args, tool):
+ return map(lambda patch_id: tool.bugs.fetch_attachment(patch_id), args)
+
+
+class ProcessBugsMixin(object):
+ def _fetch_list_of_patches_to_process(self, options, args, tool):
+ all_patches = []
+ for bug_id in args:
+ patches = tool.bugs.fetch_bug(bug_id).reviewed_patches()
+ log("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id))
+ all_patches += patches
+ return all_patches
+
+
+class CheckStyle(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
+ name = "check-style"
+ help_text = "Run check-webkit-style on the specified attachments"
+ argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+ main_steps = [
+ steps.CleanWorkingDirectory,
+ steps.Update,
+ steps.ApplyPatch,
+ steps.CheckStyle,
+ ]
+
+
+class BuildAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
+ name = "build-attachment"
+ help_text = "Apply and build patches from bugzilla"
+ argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+ main_steps = [
+ steps.CleanWorkingDirectory,
+ steps.Update,
+ steps.ApplyPatch,
+ steps.Build,
+ ]
+
+
+class AbstractPatchApplyingCommand(AbstractPatchSequencingCommand):
+ prepare_steps = [
+ steps.EnsureLocalCommitIfNeeded,
+ steps.CleanWorkingDirectoryWithLocalCommits,
+ steps.Update,
+ ]
+ main_steps = [
+ steps.ApplyPatchWithLocalCommit,
+ ]
+ long_help = """Updates the working copy.
+Downloads and applies the patches, creating local commits if necessary."""
+
+
+class ApplyAttachment(AbstractPatchApplyingCommand, ProcessAttachmentsMixin):
+ name = "apply-attachment"
+ help_text = "Apply an attachment to the local working directory"
+ argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+ show_in_main_help = True
+
+
+class ApplyFromBug(AbstractPatchApplyingCommand, ProcessBugsMixin):
+ name = "apply-from-bug"
+ help_text = "Apply reviewed patches from provided bugs to the local working directory"
+ argument_names = "BUGID [BUGIDS]"
+ show_in_main_help = True
+
+
+class AbstractPatchLandingCommand(AbstractPatchSequencingCommand):
+ prepare_steps = [
+ steps.EnsureBuildersAreGreen,
+ ]
+ main_steps = [
+ steps.CleanWorkingDirectory,
+ steps.Update,
+ steps.ApplyPatch,
+ steps.EnsureBuildersAreGreen,
+ steps.Build,
+ steps.RunTests,
+ steps.Commit,
+ steps.ClosePatch,
+ steps.CloseBug,
+ ]
+ long_help = """Checks to make sure builders are green.
+Updates the working copy.
+Applies the patch.
+Builds.
+Runs the layout tests.
+Commits the patch.
+Clears the flags on the patch.
+Closes the bug if no patches are marked for review."""
+
+
+class LandAttachment(AbstractPatchLandingCommand, ProcessAttachmentsMixin):
+ name = "land-attachment"
+ help_text = "Land patches from bugzilla, optionally building and testing them first"
+ argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
+ show_in_main_help = True
+
+
+class LandFromBug(AbstractPatchLandingCommand, ProcessBugsMixin):
+ name = "land-from-bug"
+ help_text = "Land all patches on the given bugs, optionally building and testing them first"
+ argument_names = "BUGID [BUGIDS]"
+ show_in_main_help = True
+
+
+class Rollout(AbstractSequencedCommand):
+ name = "rollout"
+ show_in_main_help = True
+ help_text = "Revert the given revision in the working copy and optionally commit the revert and re-open the original bug"
+ argument_names = "REVISION REASON"
+ long_help = """Updates the working copy.
+Applies the inverse diff for the provided revision.
+Creates an appropriate rollout ChangeLog, including a trac link and bug link.
+Opens the generated ChangeLogs in $EDITOR.
+Shows the prepared diff for confirmation.
+Commits the revert and updates the bug (including re-opening the bug if necessary)."""
+ steps = [
+ steps.CleanWorkingDirectory,
+ steps.Update,
+ steps.RevertRevision,
+ steps.PrepareChangeLogForRevert,
+ steps.EditChangeLog,
+ steps.ConfirmDiff,
+ steps.CompleteRollout,
+ ]
+
+ @staticmethod
+ def _parse_bug_id_from_revision_diff(tool, revision):
+ original_diff = tool.scm().diff_for_revision(revision)
+ return parse_bug_id(original_diff)
+
+ def execute(self, options, args, tool):
+ revision = args[0]
+ reason = args[1]
+ bug_id = self._parse_bug_id_from_revision_diff(tool, revision)
+ if options.complete_rollout:
+ if bug_id:
+ log("Will re-open bug %s after rollout." % bug_id)
+ else:
+ log("Failed to parse bug number from diff. No bugs will be updated/reopened after the rollout.")
+
+ state = {
+ "revision" : revision,
+ "bug_id" : bug_id,
+ "reason" : reason,
+ }
+ self._sequence.run_and_handle_errors(tool, options, state)
diff --git a/WebKitTools/Scripts/webkitpy/commands/download.pyc b/WebKitTools/Scripts/webkitpy/commands/download.pyc
new file mode 100644
index 0000000..3a0046f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/download.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/commands/download_unittest.py b/WebKitTools/Scripts/webkitpy/commands/download_unittest.py
new file mode 100644
index 0000000..f60c5b8
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/download_unittest.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.commands.commandtest import CommandsTest
+from webkitpy.commands.download import *
+from webkitpy.mock import Mock
+
+class DownloadCommandsTest(CommandsTest):
+ def _default_options(self):
+ options = Mock()
+ options.force_clean = False
+ options.clean = True
+ options.check_builders = True
+ options.quiet = False
+ options.non_interactive = False
+ options.update = True
+ options.build = True
+ options.test = True
+ options.close_bug = True
+ options.complete_rollout = False
+ return options
+
+ def test_build(self):
+ expected_stderr = "Updating working directory\nBuilding WebKit\n"
+ self.assert_execute_outputs(Build(), [], options=self._default_options(), expected_stderr=expected_stderr)
+
+ def test_build_and_test(self):
+ expected_stderr = "Updating working directory\nBuilding WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\n"
+ self.assert_execute_outputs(BuildAndTest(), [], options=self._default_options(), expected_stderr=expected_stderr)
+
+ def test_apply_attachment(self):
+ options = self._default_options()
+ options.update = True
+ options.local_commit = True
+ expected_stderr = "Updating working directory\nProcessing 1 patch from 1 bug.\nProcessing patch 197 from bug 42.\n"
+ self.assert_execute_outputs(ApplyAttachment(), [197], options=options, expected_stderr=expected_stderr)
+
+ def test_apply_patches(self):
+ options = self._default_options()
+ options.update = True
+ options.local_commit = True
+ expected_stderr = "Updating working directory\n2 reviewed patches found on bug 42.\nProcessing 2 patches from 1 bug.\nProcessing patch 197 from bug 42.\nProcessing patch 128 from bug 42.\n"
+ self.assert_execute_outputs(ApplyFromBug(), [42], options=options, expected_stderr=expected_stderr)
+
+ def test_land_diff(self):
+ expected_stderr = "Building WebKit\nRunning Python unit tests\nRunning Perl unit tests\nRunning JavaScriptCore tests\nRunning run-webkit-tests\nUpdating bug 42\n"
+ self.assert_execute_outputs(Land(), [42], options=self._default_options(), expected_stderr=expected_stderr)
+
+ def test_check_style(self):
+ expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 197 from bug 42.\nRunning check-webkit-style\n"
+ self.assert_execute_outputs(CheckStyle(), [197], options=self._default_options(), expected_stderr=expected_stderr)
+
+ def test_build_attachment(self):
+ expected_stderr = "Processing 1 patch from 1 bug.\nUpdating working directory\nProcessing patch 197 from bug 42.\nBuilding WebKit\n"
+ self.assert_execute_outputs(BuildAttachment(), [197], options=self._default_options(), expected_stderr=expected_stderr)
+
+ def test_land_attachment(self):
+ # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags.
+ expected_stderr = """Processing 1 patch from 1 bug.
+Updating working directory
+Processing patch 197 from bug 42.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running run-webkit-tests
+Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug.
+"""
+ self.assert_execute_outputs(LandAttachment(), [197], options=self._default_options(), expected_stderr=expected_stderr)
+
+ def test_land_patches(self):
+ # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags.
+ expected_stderr = """2 reviewed patches found on bug 42.
+Processing 2 patches from 1 bug.
+Updating working directory
+Processing patch 197 from bug 42.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running run-webkit-tests
+Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug.
+Updating working directory
+Processing patch 128 from bug 42.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running run-webkit-tests
+Not closing bug 42 as attachment 197 has review=+. Assuming there are more patches to land from this bug.
+"""
+ self.assert_execute_outputs(LandFromBug(), [42], options=self._default_options(), expected_stderr=expected_stderr)
+
+ def test_rollout(self):
+ expected_stderr = "Updating working directory\nRunning prepare-ChangeLog\n\nNOTE: Rollout support is experimental.\nPlease verify the rollout diff and use \"webkit-patch land 12345\" to commit the rollout.\n"
+ self.assert_execute_outputs(Rollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
+
+ def test_complete_rollout(self):
+ options = self._default_options()
+ options.complete_rollout = True
+ expected_stderr = "Will re-open bug 12345 after rollout.\nUpdating working directory\nRunning prepare-ChangeLog\nBuilding WebKit\n"
+ self.assert_execute_outputs(Rollout(), [852, "Reason"], options=options, expected_stderr=expected_stderr)
diff --git a/WebKitTools/Scripts/webkitpy/commands/early_warning_system.py b/WebKitTools/Scripts/webkitpy/commands/early_warning_system.py
new file mode 100644
index 0000000..e3e14dd
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/early_warning_system.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from StringIO import StringIO
+
+from webkitpy.commands.queues import AbstractReviewQueue
+from webkitpy.committers import CommitterList
+from webkitpy.executive import ScriptError
+from webkitpy.webkitport import WebKitPort
+from webkitpy.queueengine import QueueEngine
+
+
+class AbstractEarlyWarningSystem(AbstractReviewQueue):
+ _build_style = "release"
+
+ def __init__(self):
+ AbstractReviewQueue.__init__(self)
+ self.port = WebKitPort.port(self.port_name)
+
+ def should_proceed_with_work_item(self, patch):
+ try:
+ self.run_webkit_patch([
+ "build",
+ self.port.flag(),
+ "--build-style=%s" % self._build_style,
+ "--force-clean",
+ "--quiet"])
+ self._update_status("Building", patch)
+ except ScriptError, e:
+ self._update_status("Unable to perform a build")
+ return False
+ return True
+
+ def _review_patch(self, patch):
+ self.run_webkit_patch([
+ "build-attachment",
+ self.port.flag(),
+ "--build-style=%s" % self._build_style,
+ "--force-clean",
+ "--quiet",
+ "--non-interactive",
+ "--parent-command=%s" % self.name,
+ "--no-update",
+ patch.id()])
+
+ @classmethod
+ def handle_script_error(cls, tool, state, script_error):
+ is_svn_apply = script_error.command_name() == "svn-apply"
+ status_id = cls._update_status_for_script_error(tool, state, script_error, is_error=is_svn_apply)
+ if is_svn_apply:
+ QueueEngine.exit_after_handled_error(script_error)
+ results_link = tool.status_server.results_url_for_status(status_id)
+ message = "Attachment %s did not build on %s:\nBuild output: %s" % (state["patch"].id(), cls.port_name, results_link)
+ tool.bugs.post_comment_to_bug(state["patch"].bug_id(), message, cc=cls.watchers)
+ exit(1)
+
+
+class GtkEWS(AbstractEarlyWarningSystem):
+ name = "gtk-ews"
+ port_name = "gtk"
+ watchers = AbstractEarlyWarningSystem.watchers + [
+ "gns@gnome.org",
+ "xan.lopez@gmail.com",
+ ]
+
+
+class QtEWS(AbstractEarlyWarningSystem):
+ name = "qt-ews"
+ port_name = "qt"
+
+
+class ChromiumEWS(AbstractEarlyWarningSystem):
+ name = "chromium-ews"
+ port_name = "chromium"
+ watchers = AbstractEarlyWarningSystem.watchers + [
+ "dglazkov@chromium.org",
+ ]
+
+
+# For platforms that we can't run inside a VM (like Mac OS X), we require
+# patches to be uploaded by committers, who are generally trustworthy folk. :)
+class AbstractCommitterOnlyEWS(AbstractEarlyWarningSystem):
+ def __init__(self, committers=CommitterList()):
+ AbstractEarlyWarningSystem.__init__(self)
+ self._committers = committers
+
+ def process_work_item(self, patch):
+ if not self._committers.committer_by_email(patch.attacher_email()):
+ self._did_error(patch, "%s cannot process patches from non-committers :(" % self.name)
+ return
+ AbstractEarlyWarningSystem.process_work_item(self, patch)
+
+
+class MacEWS(AbstractCommitterOnlyEWS):
+ name = "mac-ews"
+ port_name = "mac"
diff --git a/WebKitTools/Scripts/webkitpy/commands/early_warning_system.pyc b/WebKitTools/Scripts/webkitpy/commands/early_warning_system.pyc
new file mode 100644
index 0000000..d6e0800
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/early_warning_system.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/commands/early_warning_system_unittest.py b/WebKitTools/Scripts/webkitpy/commands/early_warning_system_unittest.py
new file mode 100644
index 0000000..d516b84
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/early_warning_system_unittest.py
@@ -0,0 +1,62 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from webkitpy.commands.early_warning_system import *
+from webkitpy.commands.queuestest import QueuesTest
+from webkitpy.mock import Mock
+
+class EarlyWarningSytemTest(QueuesTest):
+ def test_chromium_ews(self):
+ expected_stderr = {
+ "begin_work_queue" : "CAUTION: chromium-ews will discard all local changes in \"%s\"\nRunning WebKit chromium-ews.\n" % os.getcwd(),
+ "handle_unexpected_error" : "Mock error message\n",
+ }
+ self.assert_queue_outputs(ChromiumEWS(), expected_stderr=expected_stderr)
+
+ def test_qt_ews(self):
+ expected_stderr = {
+ "begin_work_queue" : "CAUTION: qt-ews will discard all local changes in \"%s\"\nRunning WebKit qt-ews.\n" % os.getcwd(),
+ "handle_unexpected_error" : "Mock error message\n",
+ }
+ self.assert_queue_outputs(QtEWS(), expected_stderr=expected_stderr)
+
+ def test_gtk_ews(self):
+ expected_stderr = {
+ "begin_work_queue" : "CAUTION: gtk-ews will discard all local changes in \"%s\"\nRunning WebKit gtk-ews.\n" % os.getcwd(),
+ "handle_unexpected_error" : "Mock error message\n",
+ }
+ self.assert_queue_outputs(GtkEWS(), expected_stderr=expected_stderr)
+
+ def test_mac_ews(self):
+ expected_stderr = {
+ "begin_work_queue" : "CAUTION: mac-ews will discard all local changes in \"%s\"\nRunning WebKit mac-ews.\n" % os.getcwd(),
+ "handle_unexpected_error" : "Mock error message\n",
+ }
+ self.assert_queue_outputs(MacEWS(), expected_stderr=expected_stderr)
diff --git a/WebKitTools/Scripts/webkitpy/commands/openbugs.py b/WebKitTools/Scripts/webkitpy/commands/openbugs.py
new file mode 100644
index 0000000..25bdefc
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/openbugs.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import sys
+
+from webkitpy.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.webkit_logging import log
+
+
+class OpenBugs(AbstractDeclarativeCommand):
+ name = "open-bugs"
+ help_text = "Finds all bug numbers passed in arguments (or stdin if no args provided) and opens them in a web browser"
+
+ bug_number_regexp = re.compile(r"\b\d{4,6}\b")
+
+ def _open_bugs(self, bug_ids):
+ for bug_id in bug_ids:
+ bug_url = self.tool.bugs.bug_url_for_bug_id(bug_id)
+ self.tool.user.open_url(bug_url)
+
+ # _find_bugs_in_string mostly exists for easy unit testing.
+ def _find_bugs_in_string(self, string):
+ return self.bug_number_regexp.findall(string)
+
+ def _find_bugs_in_iterable(self, iterable):
+ return sum([self._find_bugs_in_string(string) for string in iterable], [])
+
+ def execute(self, options, args, tool):
+ if args:
+ bug_ids = self._find_bugs_in_iterable(args)
+ else:
+ # This won't open bugs until stdin is closed but could be made to easily. That would just make unit testing slightly harder.
+ bug_ids = self._find_bugs_in_iterable(sys.stdin)
+
+ log("%s bugs found in input." % len(bug_ids))
+
+ self._open_bugs(bug_ids)
diff --git a/WebKitTools/Scripts/webkitpy/commands/openbugs_unittest.py b/WebKitTools/Scripts/webkitpy/commands/openbugs_unittest.py
new file mode 100644
index 0000000..71fefd2
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/openbugs_unittest.py
@@ -0,0 +1,50 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.commands.commandtest import CommandsTest
+from webkitpy.commands.openbugs import OpenBugs
+
+class OpenBugsTest(CommandsTest):
+
+ find_bugs_in_string_expectations = [
+ ["123", []],
+ ["1234", ["1234"]],
+ ["12345", ["12345"]],
+ ["123456", ["123456"]],
+ ["1234567", []],
+ [" 123456 234567", ["123456", "234567"]],
+ ]
+
+ def test_find_bugs_in_string(self):
+ openbugs = OpenBugs()
+ for expectation in self.find_bugs_in_string_expectations:
+ self.assertEquals(openbugs._find_bugs_in_string(expectation[0]), expectation[1])
+
+ def test_args_parsing(self):
+ expected_stderr = "2 bugs found in input.\nMOCK: user.open_url: http://example.com/12345\nMOCK: user.open_url: http://example.com/23456\n"
+ self.assert_execute_outputs(OpenBugs(), ["12345\n23456"], expected_stderr=expected_stderr)
diff --git a/WebKitTools/Scripts/webkitpy/commands/queries.py b/WebKitTools/Scripts/webkitpy/commands/queries.py
new file mode 100644
index 0000000..3ca4f42
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/queries.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from optparse import make_option
+
+from webkitpy.buildbot import BuildBot
+from webkitpy.committers import CommitterList
+from webkitpy.webkit_logging import log
+from webkitpy.multicommandtool import AbstractDeclarativeCommand
+
+
+class BugsToCommit(AbstractDeclarativeCommand):
+ name = "bugs-to-commit"
+ help_text = "List bugs in the commit-queue"
+
+ def execute(self, options, args, tool):
+ # FIXME: This command is poorly named. It's fetching the commit-queue list here. The name implies it's fetching pending-commit (all r+'d patches).
+ bug_ids = tool.bugs.queries.fetch_bug_ids_from_commit_queue()
+ for bug_id in bug_ids:
+ print "%s" % bug_id
+
+
+class PatchesInCommitQueue(AbstractDeclarativeCommand):
+ name = "patches-in-commit-queue"
+ help_text = "List patches in the commit-queue"
+
+ def execute(self, options, args, tool):
+ patches = tool.bugs.queries.fetch_patches_from_commit_queue()
+ log("Patches in commit queue:")
+ for patch in patches:
+ print patch.url()
+
+
+class PatchesToCommitQueue(AbstractDeclarativeCommand):
+ name = "patches-to-commit-queue"
+ help_text = "Patches which should be added to the commit queue"
+ def __init__(self):
+ options = [
+ make_option("--bugs", action="store_true", dest="bugs", help="Output bug links instead of patch links"),
+ ]
+ AbstractDeclarativeCommand.__init__(self, options=options)
+
+ @staticmethod
+ def _needs_commit_queue(patch):
+ if patch.commit_queue() == "+": # If it's already cq+, ignore the patch.
+ log("%s already has cq=%s" % (patch.id(), patch.commit_queue()))
+ return False
+
+ # We only need to worry about patches from contributers who are not yet committers.
+ committer_record = CommitterList().committer_by_email(patch.attacher_email())
+ if committer_record:
+ log("%s committer = %s" % (patch.id(), committer_record))
+ return not committer_record
+
+ def execute(self, options, args, tool):
+ patches = tool.bugs.queries.fetch_patches_from_pending_commit_list()
+ patches_needing_cq = filter(self._needs_commit_queue, patches)
+ if options.bugs:
+ bugs_needing_cq = map(lambda patch: patch.bug_id(), patches_needing_cq)
+ bugs_needing_cq = sorted(set(bugs_needing_cq))
+ for bug_id in bugs_needing_cq:
+ print "%s" % tool.bugs.bug_url_for_bug_id(bug_id)
+ else:
+ for patch in patches_needing_cq:
+ print "%s" % tool.bugs.attachment_url_for_id(patch.id(), action="edit")
+
+
+class PatchesToReview(AbstractDeclarativeCommand):
+ name = "patches-to-review"
+ help_text = "List patches that are pending review"
+
+ def execute(self, options, args, tool):
+ patch_ids = tool.bugs.queries.fetch_attachment_ids_from_review_queue()
+ log("Patches pending review:")
+ for patch_id in patch_ids:
+ print patch_id
+
+
+class TreeStatus(AbstractDeclarativeCommand):
+ name = "tree-status"
+ help_text = "Print the status of the %s buildbots" % BuildBot.default_host
+ long_help = """Fetches build status from http://build.webkit.org/one_box_per_builder
+and displayes the status of each builder."""
+
+ def execute(self, options, args, tool):
+ for builder in tool.buildbot.builder_statuses():
+ status_string = "ok" if builder["is_green"] else "FAIL"
+ print "%s : %s" % (status_string.ljust(4), builder["name"])
diff --git a/WebKitTools/Scripts/webkitpy/commands/queries.pyc b/WebKitTools/Scripts/webkitpy/commands/queries.pyc
new file mode 100644
index 0000000..829b6e3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/queries.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/commands/queries_unittest.py b/WebKitTools/Scripts/webkitpy/commands/queries_unittest.py
new file mode 100644
index 0000000..b858777
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/queries_unittest.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.bugzilla import Bugzilla
+from webkitpy.commands.commandtest import CommandsTest
+from webkitpy.commands.queries import *
+from webkitpy.mock import Mock
+from webkitpy.mock_bugzillatool import MockBugzillaTool
+
+class QueryCommandsTest(CommandsTest):
+ def test_bugs_to_commit(self):
+ expected_stderr = "Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)\n"
+ self.assert_execute_outputs(BugsToCommit(), None, "42\n77\n", expected_stderr)
+
+ def test_patches_in_commit_queue(self):
+ expected_stdout = "http://example.com/197\nhttp://example.com/103\n"
+ expected_stderr = "Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)\nPatches in commit queue:\n"
+ self.assert_execute_outputs(PatchesInCommitQueue(), None, expected_stdout, expected_stderr)
+
+ def test_patches_to_commit_queue(self):
+ expected_stdout = "http://example.com/104&action=edit\n"
+ expected_stderr = "197 already has cq=+\n128 already has cq=+\n105 committer = \"Eric Seidel\" <eric@webkit.org>\n"
+ options = Mock()
+ options.bugs = False
+ self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options)
+
+ expected_stdout = "http://example.com/77\n"
+ options.bugs = True
+ self.assert_execute_outputs(PatchesToCommitQueue(), None, expected_stdout, expected_stderr, options=options)
+
+ def test_patches_to_review(self):
+ expected_stdout = "103\n"
+ expected_stderr = "Patches pending review:\n"
+ self.assert_execute_outputs(PatchesToReview(), None, expected_stdout, expected_stderr)
+
+ def test_tree_status(self):
+ expected_stdout = "ok : Builder1\nok : Builder2\n"
+ self.assert_execute_outputs(TreeStatus(), None, expected_stdout)
diff --git a/WebKitTools/Scripts/webkitpy/commands/queues.py b/WebKitTools/Scripts/webkitpy/commands/queues.py
new file mode 100644
index 0000000..6ea1c48
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/queues.py
@@ -0,0 +1,295 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import traceback
+import os
+
+from datetime import datetime
+from optparse import make_option
+from StringIO import StringIO
+
+from webkitpy.bugzilla import CommitterValidator
+from webkitpy.executive import ScriptError
+from webkitpy.grammar import pluralize
+from webkitpy.webkit_logging import error, log
+from webkitpy.multicommandtool import Command
+from webkitpy.patchcollection import PersistentPatchCollection, PersistentPatchCollectionDelegate
+from webkitpy.statusserver import StatusServer
+from webkitpy.stepsequence import StepSequenceErrorHandler
+from webkitpy.queueengine import QueueEngine, QueueEngineDelegate
+
+class AbstractQueue(Command, QueueEngineDelegate):
+ watchers = [
+ "webkit-bot-watchers@googlegroups.com",
+ ]
+
+ _pass_status = "Pass"
+ _fail_status = "Fail"
+ _error_status = "Error"
+
+ def __init__(self, options=None): # Default values should never be collections (like []) as default values are shared between invocations
+ options_list = (options or []) + [
+ make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue. Dangerous!"),
+ ]
+ Command.__init__(self, "Run the %s" % self.name, options=options_list)
+
+ def _cc_watchers(self, bug_id):
+ try:
+ self.tool.bugs.add_cc_to_bug(bug_id, self.watchers)
+ except Exception, e:
+ traceback.print_exc()
+ log("Failed to CC watchers.")
+
+ def _update_status(self, message, patch=None, results_file=None):
+ self.tool.status_server.update_status(self.name, message, patch, results_file)
+
+ def _did_pass(self, patch):
+ self._update_status(self._pass_status, patch)
+
+ def _did_fail(self, patch):
+ self._update_status(self._fail_status, patch)
+
+ def _did_error(self, patch, reason):
+ message = "%s: %s" % (self._error_status, reason)
+ self._update_status(message, patch)
+
+ def queue_log_path(self):
+ return "%s.log" % self.name
+
+ def work_item_log_path(self, patch):
+ return os.path.join("%s-logs" % self.name, "%s.log" % patch.bug_id())
+
+ def begin_work_queue(self):
+ log("CAUTION: %s will discard all local changes in \"%s\"" % (self.name, self.tool.scm().checkout_root))
+ if self.options.confirm:
+ response = self.tool.user.prompt("Are you sure? Type \"yes\" to continue: ")
+ if (response != "yes"):
+ error("User declined.")
+ log("Running WebKit %s." % self.name)
+
+ def should_continue_work_queue(self):
+ return True
+
+ def next_work_item(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def should_proceed_with_work_item(self, work_item):
+ raise NotImplementedError, "subclasses must implement"
+
+ def process_work_item(self, work_item):
+ raise NotImplementedError, "subclasses must implement"
+
+ def handle_unexpected_error(self, work_item, message):
+ raise NotImplementedError, "subclasses must implement"
+
+ def run_webkit_patch(self, args):
+ webkit_patch_args = [self.tool.path()]
+ # FIXME: This is a hack, we should have a more general way to pass global options.
+ webkit_patch_args += ["--status-host=%s" % self.tool.status_server.host]
+ webkit_patch_args += map(str, args)
+ self.tool.executive.run_and_throw_if_fail(webkit_patch_args)
+
+ def log_progress(self, patch_ids):
+ log("%s in %s [%s]" % (pluralize("patch", len(patch_ids)), self.name, ", ".join(map(str, patch_ids))))
+
+ def execute(self, options, args, tool, engine=QueueEngine):
+ self.options = options
+ self.tool = tool
+ return engine(self.name, self).run()
+
+ @classmethod
+ def _update_status_for_script_error(cls, tool, state, script_error, is_error=False):
+ message = script_error.message
+ if is_error:
+ message = "Error: %s" % message
+ output = script_error.message_with_output(output_limit=5*1024*1024) # 5MB
+ return tool.status_server.update_status(cls.name, message, state["patch"], StringIO(output))
+
+
+class CommitQueue(AbstractQueue, StepSequenceErrorHandler):
+ name = "commit-queue"
+ def __init__(self):
+ AbstractQueue.__init__(self)
+
+ # AbstractQueue methods
+
+ def begin_work_queue(self):
+ AbstractQueue.begin_work_queue(self)
+ self.committer_validator = CommitterValidator(self.tool.bugs)
+
+ def _validate_patches_in_commit_queue(self):
+ # Not using BugzillaQueries.fetch_patches_from_commit_queue() so we can reject patches with invalid committers/reviewers.
+ bug_ids = self.tool.bugs.queries.fetch_bug_ids_from_commit_queue()
+ all_patches = sum([self.tool.bugs.fetch_bug(bug_id).commit_queued_patches(include_invalid=True) for bug_id in bug_ids], [])
+ return self.committer_validator.patches_after_rejecting_invalid_commiters_and_reviewers(all_patches)
+
+ def next_work_item(self):
+ patches = self._validate_patches_in_commit_queue()
+ # FIXME: We could sort the patches in a specific order here, was suggested by https://bugs.webkit.org/show_bug.cgi?id=33395
+ if not patches:
+ self._update_status("Empty queue")
+ return None
+ # Only bother logging if we have patches in the queue.
+ self.log_progress([patch.id() for patch in patches])
+ return patches[0]
+
+ def _can_build_and_test(self):
+ try:
+ self.run_webkit_patch(["build-and-test", "--force-clean", "--non-interactive", "--build-style=both", "--quiet"])
+ except ScriptError, e:
+ self._update_status("Unabled to successfully build and test", None)
+ return False
+ return True
+
+ def _builders_are_green(self):
+ red_builders_names = self.tool.buildbot.red_core_builders_names()
+ if red_builders_names:
+ red_builders_names = map(lambda name: "\"%s\"" % name, red_builders_names) # Add quotes around the names.
+ self._update_status("Builders [%s] are red. See http://build.webkit.org" % ", ".join(red_builders_names), None)
+ return False
+ return True
+
+ def should_proceed_with_work_item(self, patch):
+ if not self._builders_are_green():
+ return False
+ if not self._can_build_and_test():
+ return False
+ if not self._builders_are_green():
+ return False
+ self._update_status("Landing patch", patch)
+ return True
+
+ def process_work_item(self, patch):
+ try:
+ self._cc_watchers(patch.bug_id())
+ # We pass --no-update here because we've already validated
+ # that the current revision actually builds and passes the tests.
+ # If we update, we risk moving to a revision that doesn't!
+ self.run_webkit_patch(["land-attachment", "--force-clean", "--non-interactive", "--no-update", "--parent-command=commit-queue", "--build-style=both", "--quiet", patch.id()])
+ self._did_pass(patch)
+ except ScriptError, e:
+ self._did_fail(patch)
+ raise e
+
+ def handle_unexpected_error(self, patch, message):
+ self.committer_validator.reject_patch_from_commit_queue(patch.id(), message)
+
+ # StepSequenceErrorHandler methods
+
+ @staticmethod
+ def _error_message_for_bug(tool, status_id, script_error):
+ if not script_error.output:
+ return script_error.message_with_output()
+ results_link = tool.status_server.results_url_for_status(status_id)
+ return "%s\nFull output: %s" % (script_error.message_with_output(), results_link)
+
+ @classmethod
+ def handle_script_error(cls, tool, state, script_error):
+ status_id = cls._update_status_for_script_error(tool, state, script_error)
+ validator = CommitterValidator(tool.bugs)
+ validator.reject_patch_from_commit_queue(state["patch"].id(), cls._error_message_for_bug(tool, status_id, script_error))
+
+
+class AbstractReviewQueue(AbstractQueue, PersistentPatchCollectionDelegate, StepSequenceErrorHandler):
+ def __init__(self, options=None):
+ AbstractQueue.__init__(self, options)
+
+ def _review_patch(self, patch):
+ raise NotImplementedError, "subclasses must implement"
+
+ # PersistentPatchCollectionDelegate methods
+
+ def collection_name(self):
+ return self.name
+
+ def fetch_potential_patch_ids(self):
+ return self.tool.bugs.queries.fetch_attachment_ids_from_review_queue()
+
+ def status_server(self):
+ return self.tool.status_server
+
+ def is_terminal_status(self, status):
+ return status == "Pass" or status == "Fail" or status.startswith("Error:")
+
+ # AbstractQueue methods
+
+ def begin_work_queue(self):
+ AbstractQueue.begin_work_queue(self)
+ self._patches = PersistentPatchCollection(self)
+
+ def next_work_item(self):
+ patch_id = self._patches.next()
+ if patch_id:
+ return self.tool.bugs.fetch_attachment(patch_id)
+ self._update_status("Empty queue")
+
+ def should_proceed_with_work_item(self, patch):
+ raise NotImplementedError, "subclasses must implement"
+
+ def process_work_item(self, patch):
+ try:
+ self._review_patch(patch)
+ self._did_pass(patch)
+ except ScriptError, e:
+ if e.exit_code != QueueEngine.handled_error_code:
+ self._did_fail(patch)
+ raise e
+
+ def handle_unexpected_error(self, patch, message):
+ log(message)
+
+ # StepSequenceErrorHandler methods
+
+ @classmethod
+ def handle_script_error(cls, tool, state, script_error):
+ log(script_error.message_with_output())
+
+
+class StyleQueue(AbstractReviewQueue):
+ name = "style-queue"
+ def __init__(self):
+ AbstractReviewQueue.__init__(self)
+
+ def should_proceed_with_work_item(self, patch):
+ self._update_status("Checking style", patch)
+ return True
+
+ def _review_patch(self, patch):
+ self.run_webkit_patch(["check-style", "--force-clean", "--non-interactive", "--parent-command=style-queue", patch.id()])
+
+ @classmethod
+ def handle_script_error(cls, tool, state, script_error):
+ is_svn_apply = script_error.command_name() == "svn-apply"
+ status_id = cls._update_status_for_script_error(tool, state, script_error, is_error=is_svn_apply)
+ if is_svn_apply:
+ QueueEngine.exit_after_handled_error(script_error)
+ message = "Attachment %s did not pass %s:\n\n%s\n\nIf any of these errors are false positives, please file a bug against check-webkit-style." % (state["patch"].id(), cls.name, script_error.message_with_output(output_limit=3*1024))
+ tool.bugs.post_comment_to_bug(state["patch"].bug_id(), message, cc=cls.watchers)
+ exit(1)
diff --git a/WebKitTools/Scripts/webkitpy/commands/queues.pyc b/WebKitTools/Scripts/webkitpy/commands/queues.pyc
new file mode 100644
index 0000000..8d52d05
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/queues.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/commands/queues_unittest.py b/WebKitTools/Scripts/webkitpy/commands/queues_unittest.py
new file mode 100644
index 0000000..87cd645
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/queues_unittest.py
@@ -0,0 +1,102 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from webkitpy.commands.commandtest import CommandsTest
+from webkitpy.commands.queues import *
+from webkitpy.commands.queuestest import QueuesTest
+from webkitpy.mock_bugzillatool import MockBugzillaTool
+from webkitpy.outputcapture import OutputCapture
+
+
+class TestQueue(AbstractQueue):
+ name = "test-queue"
+
+
+class TestReviewQueue(AbstractReviewQueue):
+ name = "test-review-queue"
+
+
+class AbstractQueueTest(CommandsTest):
+ def _assert_log_progress_output(self, patch_ids, progress_output):
+ OutputCapture().assert_outputs(self, TestQueue().log_progress, [patch_ids], expected_stderr=progress_output)
+
+ def test_log_progress(self):
+ self._assert_log_progress_output([1,2,3], "3 patches in test-queue [1, 2, 3]\n")
+ self._assert_log_progress_output(["1","2","3"], "3 patches in test-queue [1, 2, 3]\n")
+ self._assert_log_progress_output([1], "1 patch in test-queue [1]\n")
+
+ def _assert_run_webkit_patch(self, run_args):
+ queue = TestQueue()
+ tool = MockBugzillaTool()
+ queue.bind_to_tool(tool)
+
+ queue.run_webkit_patch(run_args)
+ expected_run_args = ["echo", "--status-host=example.com"] + map(str, run_args)
+ tool.executive.run_and_throw_if_fail.assert_called_with(expected_run_args)
+
+ def test_run_webkit_patch(self):
+ self._assert_run_webkit_patch([1])
+ self._assert_run_webkit_patch(["one", 2])
+
+
+class AbstractReviewQueueTest(CommandsTest):
+ def test_patch_collection_delegate_methods(self):
+ queue = TestReviewQueue()
+ tool = MockBugzillaTool()
+ queue.bind_to_tool(tool)
+ self.assertEquals(queue.collection_name(), "test-review-queue")
+ self.assertEquals(queue.fetch_potential_patch_ids(), [103])
+ queue.status_server()
+ self.assertTrue(queue.is_terminal_status("Pass"))
+ self.assertTrue(queue.is_terminal_status("Fail"))
+ self.assertTrue(queue.is_terminal_status("Error: Your patch exploded"))
+ self.assertFalse(queue.is_terminal_status("Foo"))
+
+
+class CommitQueueTest(QueuesTest):
+ def test_commit_queue(self):
+ expected_stderr = {
+ "begin_work_queue" : "CAUTION: commit-queue will discard all local changes in \"%s\"\nRunning WebKit commit-queue.\n" % os.getcwd(),
+ # FIXME: The commit-queue warns about bad committers twice. This is due to the fact that we access Attachment.reviewer() twice and it logs each time.
+ "next_work_item" : """Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)
+Warning, attachment 128 on bug 42 has invalid committer (non-committer@example.com)
+2 patches in commit-queue [197, 106]
+""",
+ }
+ self.assert_queue_outputs(CommitQueue(), expected_stderr=expected_stderr)
+
+
+class StyleQueueTest(QueuesTest):
+ def test_style_queue(self):
+ expected_stderr = {
+ "begin_work_queue" : "CAUTION: style-queue will discard all local changes in \"%s\"\nRunning WebKit style-queue.\n" % os.getcwd(),
+ "handle_unexpected_error" : "Mock error message\n",
+ }
+ self.assert_queue_outputs(StyleQueue(), expected_stderr=expected_stderr)
diff --git a/WebKitTools/Scripts/webkitpy/commands/queuestest.py b/WebKitTools/Scripts/webkitpy/commands/queuestest.py
new file mode 100644
index 0000000..09d1c26
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/queuestest.py
@@ -0,0 +1,99 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.bugzilla import Attachment
+from webkitpy.mock import Mock
+from webkitpy.mock_bugzillatool import MockBugzillaTool
+from webkitpy.outputcapture import OutputCapture
+
+
+class MockQueueEngine(object):
+ def __init__(self, name, queue):
+ pass
+
+ def run(self):
+ pass
+
+
+class QueuesTest(unittest.TestCase):
+ mock_work_item = Attachment({
+ "id" : 1234,
+ "bug_id" : 345,
+ "attacher_email": "adam@example.com",
+ }, None)
+
+ def assert_queue_outputs(self, queue, args=None, work_item=None, expected_stdout=None, expected_stderr=None, options=Mock(), tool=MockBugzillaTool()):
+ if not expected_stdout:
+ expected_stdout = {}
+ if not expected_stderr:
+ expected_stderr = {}
+ if not args:
+ args = []
+ if not work_item:
+ work_item = self.mock_work_item
+ tool.user.prompt = lambda message: "yes"
+
+ queue.execute(options, args, tool, engine=MockQueueEngine)
+
+ OutputCapture().assert_outputs(self,
+ queue.queue_log_path,
+ expected_stdout=expected_stdout.get("queue_log_path", ""),
+ expected_stderr=expected_stderr.get("queue_log_path", ""))
+ OutputCapture().assert_outputs(self,
+ queue.work_item_log_path,
+ args=[work_item],
+ expected_stdout=expected_stdout.get("work_item_log_path", ""),
+ expected_stderr=expected_stderr.get("work_item_log_path", ""))
+ OutputCapture().assert_outputs(self,
+ queue.begin_work_queue,
+ expected_stdout=expected_stdout.get("begin_work_queue", ""),
+ expected_stderr=expected_stderr.get("begin_work_queue", ""))
+ OutputCapture().assert_outputs(self,
+ queue.should_continue_work_queue,
+ expected_stdout=expected_stdout.get("should_continue_work_queue", ""), expected_stderr=expected_stderr.get("should_continue_work_queue", ""))
+ OutputCapture().assert_outputs(self,
+ queue.next_work_item,
+ expected_stdout=expected_stdout.get("next_work_item", ""),
+ expected_stderr=expected_stderr.get("next_work_item", ""))
+ OutputCapture().assert_outputs(self,
+ queue.should_proceed_with_work_item,
+ args=[work_item],
+ expected_stdout=expected_stdout.get("should_proceed_with_work_item", ""),
+ expected_stderr=expected_stderr.get("should_proceed_with_work_item", ""))
+ OutputCapture().assert_outputs(self,
+ queue.process_work_item,
+ args=[work_item],
+ expected_stdout=expected_stdout.get("process_work_item", ""),
+ expected_stderr=expected_stderr.get("process_work_item", ""))
+ OutputCapture().assert_outputs(self,
+ queue.handle_unexpected_error,
+ args=[work_item, "Mock error message"],
+ expected_stdout=expected_stdout.get("handle_unexpected_error", ""),
+ expected_stderr=expected_stderr.get("handle_unexpected_error", ""))
diff --git a/WebKitTools/Scripts/webkitpy/commands/upload.py b/WebKitTools/Scripts/webkitpy/commands/upload.py
new file mode 100644
index 0000000..8d23d8b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/upload.py
@@ -0,0 +1,406 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import StringIO
+import sys
+
+from optparse import make_option
+
+import webkitpy.steps as steps
+
+from webkitpy.bugzilla import parse_bug_id
+from webkitpy.commands.abstractsequencedcommand import AbstractSequencedCommand
+from webkitpy.comments import bug_comment_from_svn_revision
+from webkitpy.committers import CommitterList
+from webkitpy.grammar import pluralize
+from webkitpy.webkit_logging import error, log
+from webkitpy.mock import Mock
+from webkitpy.multicommandtool import AbstractDeclarativeCommand
+
+class CommitMessageForCurrentDiff(AbstractDeclarativeCommand):
+ name = "commit-message"
+ help_text = "Print a commit message suitable for the uncommitted changes"
+
+ def execute(self, options, args, tool):
+ os.chdir(tool.scm().checkout_root)
+ print "%s" % tool.scm().commit_message_for_this_commit().message()
+
+
+class AssignToCommitter(AbstractDeclarativeCommand):
+ name = "assign-to-committer"
+ help_text = "Assign bug to whoever attached the most recent r+'d patch"
+
+ def _assign_bug_to_last_patch_attacher(self, bug_id):
+ committers = CommitterList()
+ bug = self.tool.bugs.fetch_bug(bug_id)
+ assigned_to_email = bug.assigned_to_email()
+ if assigned_to_email != self.tool.bugs.unassigned_email:
+ log("Bug %s is already assigned to %s (%s)." % (bug_id, assigned_to_email, committers.committer_by_email(assigned_to_email)))
+ return
+
+ reviewed_patches = bug.reviewed_patches()
+ if not reviewed_patches:
+ log("Bug %s has no non-obsolete patches, ignoring." % bug_id)
+ return
+ latest_patch = reviewed_patches[-1]
+ attacher_email = latest_patch.attacher_email()
+ committer = committers.committer_by_email(attacher_email)
+ if not committer:
+ log("Attacher %s is not a committer. Bug %s likely needs commit-queue+." % (attacher_email, bug_id))
+ return
+
+ reassign_message = "Attachment %s was posted by a committer and has review+, assigning to %s for commit." % (latest_patch.id(), committer.full_name)
+ self.tool.bugs.reassign_bug(bug_id, committer.bugzilla_email(), reassign_message)
+
+ def execute(self, options, args, tool):
+ for bug_id in tool.bugs.queries.fetch_bug_ids_from_pending_commit_list():
+ self._assign_bug_to_last_patch_attacher(bug_id)
+
+
+class ObsoleteAttachments(AbstractSequencedCommand):
+ name = "obsolete-attachments"
+ help_text = "Mark all attachments on a bug as obsolete"
+ argument_names = "BUGID"
+ steps = [
+ steps.ObsoletePatches,
+ ]
+
+ def _prepare_state(self, options, args, tool):
+ return { "bug_id" : args[0] }
+
+
+class AbstractPatchUploadingCommand(AbstractSequencedCommand):
+ def _bug_id(self, args, tool, state):
+ # Perfer a bug id passed as an argument over a bug url in the diff (i.e. ChangeLogs).
+ bug_id = args and args[0]
+ if not bug_id:
+ state["diff"] = tool.scm().create_patch()
+ bug_id = parse_bug_id(state["diff"])
+ return bug_id
+
+ def _prepare_state(self, options, args, tool):
+ state = {}
+ state["bug_id"] = self._bug_id(args, tool, state)
+ if not state["bug_id"]:
+ error("No bug id passed and no bug url found in diff.")
+ return state
+
+
+class Post(AbstractPatchUploadingCommand):
+ name = "post"
+ help_text = "Attach the current working directory diff to a bug as a patch file"
+ argument_names = "[BUGID]"
+ show_in_main_help = True
+ steps = [
+ steps.CheckStyle,
+ steps.ConfirmDiff,
+ steps.ObsoletePatches,
+ steps.PostDiff,
+ ]
+
+
+class LandSafely(AbstractPatchUploadingCommand):
+ name = "land-safely"
+ help_text = "Land the current diff via the commit-queue (Experimental)"
+ argument_names = "[BUGID]"
+ steps = [
+ steps.UpdateChangeLogsWithReviewer,
+ steps.ObsoletePatches,
+ steps.PostDiffForCommit,
+ ]
+
+
+class Prepare(AbstractSequencedCommand):
+ name = "prepare"
+ help_text = "Creates a bug (or prompts for an existing bug) and prepares the ChangeLogs"
+ argument_names = "[BUGID]"
+ show_in_main_help = True
+ steps = [
+ steps.PromptForBugOrTitle,
+ steps.CreateBug,
+ steps.PrepareChangeLog,
+ ]
+
+ def _prepare_state(self, options, args, tool):
+ bug_id = args and args[0]
+ return { "bug_id" : bug_id }
+
+
+class Upload(AbstractPatchUploadingCommand):
+ name = "upload"
+ help_text = "Automates the process of uploading a patch for review"
+ argument_names = "[BUGID]"
+ show_in_main_help = True
+ steps = [
+ steps.CheckStyle,
+ steps.PromptForBugOrTitle,
+ steps.CreateBug,
+ steps.PrepareChangeLog,
+ steps.EditChangeLog,
+ steps.ConfirmDiff,
+ steps.ObsoletePatches,
+ steps.PostDiff,
+ ]
+ long_help = """upload uploads the current diff to bugs.webkit.org.
+ If no bug id is provided, upload will create a bug.
+ If the current diff does not have a ChangeLog, upload
+ will prepare a ChangeLog. Once a patch is read, upload
+ will open the ChangeLogs for editing using the command in the
+ EDITOR environment variable and will display the diff using the
+ command in the PAGER environment variable."""
+
+ def _prepare_state(self, options, args, tool):
+ state = {}
+ state["bug_id"] = self._bug_id(args, tool, state)
+ return state
+
+
+class EditChangeLogs(AbstractSequencedCommand):
+ name = "edit-changelogs"
+ help_text = "Opens modified ChangeLogs in $EDITOR"
+ show_in_main_help = True
+ steps = [
+ steps.EditChangeLog,
+ ]
+
+
+class PostCommits(AbstractDeclarativeCommand):
+ name = "post-commits"
+ help_text = "Attach a range of local commits to bugs as patch files"
+ argument_names = "COMMITISH"
+
+ def __init__(self):
+ options = [
+ make_option("-b", "--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
+ make_option("--add-log-as-comment", action="store_true", dest="add_log_as_comment", default=False, help="Add commit log message as a comment when uploading the patch."),
+ make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: description from commit message)"),
+ steps.Options.obsolete_patches,
+ steps.Options.review,
+ steps.Options.request_commit,
+ ]
+ AbstractDeclarativeCommand.__init__(self, options=options, requires_local_commits=True)
+
+ def _comment_text_for_commit(self, options, commit_message, tool, commit_id):
+ comment_text = None
+ if (options.add_log_as_comment):
+ comment_text = commit_message.body(lstrip=True)
+ comment_text += "---\n"
+ comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
+ return comment_text
+
+ def _diff_file_for_commit(self, tool, commit_id):
+ diff = tool.scm().create_patch_from_local_commit(commit_id)
+ return StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object
+
+ def execute(self, options, args, tool):
+ commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
+ if len(commit_ids) > 10: # We could lower this limit, 10 is too many for one bug as-is.
+ error("webkit-patch does not support attaching %s at once. Are you sure you passed the right commit range?" % (pluralize("patch", len(commit_ids))))
+
+ have_obsoleted_patches = set()
+ for commit_id in commit_ids:
+ commit_message = tool.scm().commit_message_for_local_commit(commit_id)
+
+ # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs).
+ bug_id = options.bug_id or parse_bug_id(commit_message.message()) or parse_bug_id(tool.scm().create_patch_from_local_commit(commit_id))
+ if not bug_id:
+ log("Skipping %s: No bug id found in commit or specified with --bug-id." % commit_id)
+ continue
+
+ if options.obsolete_patches and bug_id not in have_obsoleted_patches:
+ state = { "bug_id": bug_id }
+ steps.ObsoletePatches(tool, options).run(state)
+ have_obsoleted_patches.add(bug_id)
+
+ diff_file = self._diff_file_for_commit(tool, commit_id)
+ description = options.description or commit_message.description(lstrip=True, strip_url=True)
+ comment_text = self._comment_text_for_commit(options, commit_message, tool, commit_id)
+ tool.bugs.add_patch_to_bug(bug_id, diff_file, description, comment_text, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+
+class MarkBugFixed(AbstractDeclarativeCommand):
+ name = "mark-bug-fixed"
+ help_text = "Mark the specified bug as fixed"
+ argument_names = "[SVN_REVISION]"
+ def __init__(self):
+ options = [
+ make_option("--bug-id", action="store", type="string", dest="bug_id", help="Specify bug id if no URL is provided in the commit log."),
+ make_option("--comment", action="store", type="string", dest="comment", help="Text to include in bug comment."),
+ make_option("--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only)."),
+ make_option("--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it."),
+ ]
+ AbstractDeclarativeCommand.__init__(self, options=options)
+
+ def _fetch_commit_log(self, tool, svn_revision):
+ if not svn_revision:
+ return tool.scm().last_svn_commit_log()
+ return tool.scm().svn_commit_log(svn_revision)
+
+ def _determine_bug_id_and_svn_revision(self, tool, bug_id, svn_revision):
+ commit_log = self._fetch_commit_log(tool, svn_revision)
+
+ if not bug_id:
+ bug_id = parse_bug_id(commit_log)
+
+ if not svn_revision:
+ match = re.search("^r(?P<svn_revision>\d+) \|", commit_log, re.MULTILINE)
+ if match:
+ svn_revision = match.group('svn_revision')
+
+ if not bug_id or not svn_revision:
+ not_found = []
+ if not bug_id:
+ not_found.append("bug id")
+ if not svn_revision:
+ not_found.append("svn revision")
+ error("Could not find %s on command-line or in %s."
+ % (" or ".join(not_found), "r%s" % svn_revision if svn_revision else "last commit"))
+
+ return (bug_id, svn_revision)
+
+ def execute(self, options, args, tool):
+ bug_id = options.bug_id
+
+ svn_revision = args and args[0]
+ if svn_revision:
+ if re.match("^r[0-9]+$", svn_revision, re.IGNORECASE):
+ svn_revision = svn_revision[1:]
+ if not re.match("^[0-9]+$", svn_revision):
+ error("Invalid svn revision: '%s'" % svn_revision)
+
+ needs_prompt = False
+ if not bug_id or not svn_revision:
+ needs_prompt = True
+ (bug_id, svn_revision) = self._determine_bug_id_and_svn_revision(tool, bug_id, svn_revision)
+
+ log("Bug: <%s> %s" % (tool.bugs.bug_url_for_bug_id(bug_id), tool.bugs.fetch_bug_dictionary(bug_id)["title"]))
+ log("Revision: %s" % svn_revision)
+
+ if options.open_bug:
+ tool.user.open_url(tool.bugs.bug_url_for_bug_id(bug_id))
+
+ if needs_prompt:
+ if not tool.user.confirm("Is this correct?"):
+ exit(1)
+
+ bug_comment = bug_comment_from_svn_revision(svn_revision)
+ if options.comment:
+ bug_comment = "%s\n\n%s" % (options.comment, bug_comment)
+
+ if options.update_only:
+ log("Adding comment to Bug %s." % bug_id)
+ tool.bugs.post_comment_to_bug(bug_id, bug_comment)
+ else:
+ log("Adding comment to Bug %s and marking as Resolved/Fixed." % bug_id)
+ tool.bugs.close_bug_as_fixed(bug_id, bug_comment)
+
+
+# FIXME: Requires unit test. Blocking issue: too complex for now.
+class CreateBug(AbstractDeclarativeCommand):
+ name = "create-bug"
+ help_text = "Create a bug from local changes or local commits"
+ argument_names = "[COMMITISH]"
+
+ def __init__(self):
+ options = [
+ steps.Options.cc,
+ steps.Options.component,
+ make_option("--no-prompt", action="store_false", dest="prompt", default=True, help="Do not prompt for bug title and comment; use commit log instead."),
+ make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
+ make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."),
+ ]
+ AbstractDeclarativeCommand.__init__(self, options=options)
+
+ def create_bug_from_commit(self, options, args, tool):
+ commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
+ if len(commit_ids) > 3:
+ error("Are you sure you want to create one bug with %s patches?" % len(commit_ids))
+
+ commit_id = commit_ids[0]
+
+ bug_title = ""
+ comment_text = ""
+ if options.prompt:
+ (bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
+ else:
+ commit_message = tool.scm().commit_message_for_local_commit(commit_id)
+ bug_title = commit_message.description(lstrip=True, strip_url=True)
+ comment_text = commit_message.body(lstrip=True)
+ comment_text += "---\n"
+ comment_text += tool.scm().files_changed_summary_for_commit(commit_id)
+
+ diff = tool.scm().create_patch_from_local_commit(commit_id)
+ diff_file = StringIO.StringIO(diff) # create_bug expects a file-like object
+ bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff_file, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+ if bug_id and len(commit_ids) > 1:
+ options.bug_id = bug_id
+ options.obsolete_patches = False
+ # FIXME: We should pass through --no-comment switch as well.
+ PostCommits.execute(self, options, commit_ids[1:], tool)
+
+ def create_bug_from_patch(self, options, args, tool):
+ bug_title = ""
+ comment_text = ""
+ if options.prompt:
+ (bug_title, comment_text) = self.prompt_for_bug_title_and_comment()
+ else:
+ commit_message = tool.scm().commit_message_for_this_commit()
+ bug_title = commit_message.description(lstrip=True, strip_url=True)
+ comment_text = commit_message.body(lstrip=True)
+
+ diff = tool.scm().create_patch()
+ diff_file = StringIO.StringIO(diff) # create_bug expects a file-like object
+ bug_id = tool.bugs.create_bug(bug_title, comment_text, options.component, diff_file, "Patch", cc=options.cc, mark_for_review=options.review, mark_for_commit_queue=options.request_commit)
+
+ def prompt_for_bug_title_and_comment(self):
+ bug_title = raw_input("Bug title: ")
+ print "Bug comment (hit ^D on blank line to end):"
+ lines = sys.stdin.readlines()
+ try:
+ sys.stdin.seek(0, os.SEEK_END)
+ except IOError:
+ # Cygwin raises an Illegal Seek (errno 29) exception when the above
+ # seek() call is made. Ignoring it seems to cause no harm.
+ # FIXME: Figure out a way to get avoid the exception in the first
+ # place.
+ pass
+ comment_text = "".join(lines)
+ return (bug_title, comment_text)
+
+ def execute(self, options, args, tool):
+ if len(args):
+ if (not tool.scm().supports_local_commits()):
+ error("Extra arguments not supported; patch is taken from working directory.")
+ self.create_bug_from_commit(options, args, tool)
+ else:
+ self.create_bug_from_patch(options, args, tool)
diff --git a/WebKitTools/Scripts/webkitpy/commands/upload.pyc b/WebKitTools/Scripts/webkitpy/commands/upload.pyc
new file mode 100644
index 0000000..a4bd81b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/upload.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/commands/upload_unittest.py b/WebKitTools/Scripts/webkitpy/commands/upload_unittest.py
new file mode 100644
index 0000000..33001ac
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/commands/upload_unittest.py
@@ -0,0 +1,84 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.commands.commandtest import CommandsTest
+from webkitpy.commands.upload import *
+from webkitpy.mock import Mock
+from webkitpy.mock_bugzillatool import MockBugzillaTool
+
+class UploadCommandsTest(CommandsTest):
+ def test_commit_message_for_current_diff(self):
+ tool = MockBugzillaTool()
+ mock_commit_message_for_this_commit = Mock()
+ mock_commit_message_for_this_commit.message = lambda: "Mock message"
+ tool._scm.commit_message_for_this_commit = lambda: mock_commit_message_for_this_commit
+ expected_stdout = "Mock message\n"
+ self.assert_execute_outputs(CommitMessageForCurrentDiff(), [], expected_stdout=expected_stdout, tool=tool)
+
+ def test_assign_to_committer(self):
+ tool = MockBugzillaTool()
+ expected_stderr = "Bug 77 is already assigned to foo@foo.com (None).\nBug 76 has no non-obsolete patches, ignoring.\n"
+ self.assert_execute_outputs(AssignToCommitter(), [], expected_stderr=expected_stderr, tool=tool)
+ tool.bugs.reassign_bug.assert_called_with(42, "eric@webkit.org", "Attachment 128 was posted by a committer and has review+, assigning to Eric Seidel for commit.")
+
+ def test_obsolete_attachments(self):
+ expected_stderr = "Obsoleting 2 old patches on bug 42\n"
+ self.assert_execute_outputs(ObsoleteAttachments(), [42], expected_stderr=expected_stderr)
+
+ def test_post(self):
+ expected_stderr = "Running check-webkit-style\nObsoleting 2 old patches on bug 42\n"
+ self.assert_execute_outputs(Post(), [42], expected_stderr=expected_stderr)
+
+ def test_post(self):
+ expected_stderr = "Obsoleting 2 old patches on bug 42\n"
+ self.assert_execute_outputs(LandSafely(), [42], expected_stderr=expected_stderr)
+
+ def test_prepare_diff_with_arg(self):
+ self.assert_execute_outputs(Prepare(), [42])
+
+ def test_prepare(self):
+ self.assert_execute_outputs(Prepare(), [])
+
+ def test_upload(self):
+ expected_stderr = "Running check-webkit-style\nObsoleting 2 old patches on bug 42\nMOCK: user.open_url: http://example.com/42\n"
+ self.assert_execute_outputs(Upload(), [42], expected_stderr=expected_stderr)
+
+ def test_mark_bug_fixed(self):
+ tool = MockBugzillaTool()
+ tool._scm.last_svn_commit_log = lambda: "r9876 |"
+ options = Mock()
+ options.bug_id = 42
+ expected_stderr = """Bug: <http://example.com/42> Bug with two r+'d and cq+'d patches, one of which has an invalid commit-queue setter.
+Revision: 9876
+MOCK: user.open_url: http://example.com/42
+Adding comment to Bug 42.
+"""
+ self.assert_execute_outputs(MarkBugFixed(), [], expected_stderr=expected_stderr, tool=tool, options=options)
+
+ def test_edit_changelog(self):
+ self.assert_execute_outputs(EditChangeLogs(), [])
diff --git a/WebKitTools/Scripts/webkitpy/comments.py b/WebKitTools/Scripts/webkitpy/comments.py
new file mode 100755
index 0000000..77ad239
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/comments.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# A tool for automating dealing with bugzilla, posting patches, committing
+# patches, etc.
+
+from webkitpy.changelogs import view_source_url
+
+
+def bug_comment_from_svn_revision(svn_revision):
+ return "Committed r%s: <%s>" % (svn_revision,
+ view_source_url(svn_revision))
+
+
+def bug_comment_from_commit_text(scm, commit_text):
+ svn_revision = scm.svn_revision_from_commit_text(commit_text)
+ return bug_comment_from_svn_revision(svn_revision)
diff --git a/WebKitTools/Scripts/webkitpy/comments.pyc b/WebKitTools/Scripts/webkitpy/comments.pyc
new file mode 100644
index 0000000..ead9e58
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/comments.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/committers.py b/WebKitTools/Scripts/webkitpy/committers.py
new file mode 100644
index 0000000..73e4172
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/committers.py
@@ -0,0 +1,265 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for committer and reviewer validation
+
+
+class Committer:
+
+ def __init__(self, name, email_or_emails):
+ self.full_name = name
+ if isinstance(email_or_emails, str):
+ self.emails = [email_or_emails]
+ else:
+ self.emails = email_or_emails
+ self.can_review = False
+
+ def bugzilla_email(self):
+ # FIXME: We're assuming the first email is a valid bugzilla email,
+ # which might not be right.
+ return self.emails[0]
+
+ def __str__(self):
+ return '"%s" <%s>' % (self.full_name, self.emails[0])
+
+
+class Reviewer(Committer):
+
+ def __init__(self, name, email_or_emails):
+ Committer.__init__(self, name, email_or_emails)
+ self.can_review = True
+
+
+# This is intended as a canonical, machine-readable list of all non-reviewer
+# committers for WebKit. If your name is missing here and you are a committer,
+# please add it. No review needed. All reviewers are committers, so this list
+# is only of committers who are not reviewers.
+
+
+committers_unable_to_review = [
+ Committer("Aaron Boodman", "aa@chromium.org"),
+ Committer("Adam Langley", "agl@chromium.org"),
+ Committer("Albert J. Wong", "ajwong@chromium.org"),
+ Committer("Alexander Kellett", ["lypanov@mac.com", "a-lists001@lypanov.net", "lypanov@kde.org"]),
+ Committer("Alexander Pavlov", "apavlov@chromium.org"),
+ Committer("Andre Boule", "aboule@apple.com"),
+ Committer("Andrew Wellington", ["andrew@webkit.org", "proton@wiretapped.net"]),
+ Committer("Andras Becsi", "abecsi@webkit.org"),
+ Committer("Anthony Ricaud", "rik@webkit.org"),
+ Committer("Anton Muhin", "antonm@chromium.org"),
+ Committer("Antonio Gomes", "tonikitoo@webkit.org"),
+ Committer("Ben Murdoch", "benm@google.com"),
+ Committer("Benjamin C Meyer", ["ben@meyerhome.net", "ben@webkit.org"]),
+ Committer("Benjamin Otte", ["otte@gnome.org", "otte@webkit.org"]),
+ Committer("Brent Fulgham", "bfulgham@webkit.org"),
+ Committer("Brett Wilson", "brettw@chromium.org"),
+ Committer("Brian Weinstein", "bweinstein@apple.com"),
+ Committer("Cameron McCormack", "cam@webkit.org"),
+ Committer("Chris Fleizach", "cfleizach@apple.com"),
+ Committer("Chris Jerdonek", "cjerdonek@webkit.org"),
+ Committer("Chris Marrin", "cmarrin@apple.com"),
+ Committer("Chris Petersen", "cpetersen@apple.com"),
+ Committer("Christian Dywan", ["christian@twotoasts.de", "christian@webkit.org"]),
+ Committer("Collin Jackson", "collinj@webkit.org"),
+ Committer("Csaba Osztrogonac", "ossy@webkit.org"),
+ Committer("Daniel Bates", "dbates@webkit.org"),
+ Committer("David Smith", ["catfish.man@gmail.com", "dsmith@webkit.org"]),
+ Committer("Dean Jackson", "dino@apple.com"),
+ Committer("Dirk Pranke", "dpranke@chromium.org"),
+ Committer("Drew Wilson", "atwilson@chromium.org"),
+ Committer("Dumitru Daniliuc", "dumi@chromium.org"),
+ Committer("Eli Fidler", "eli@staikos.net"),
+ Committer("Enrica Casucci", "enrica@apple.com"),
+ Committer("Erik Arvidsson", "arv@chromium.org"),
+ Committer("Eric Roman", "eroman@chromium.org"),
+ Committer("Feng Qian", "feng@chromium.org"),
+ Committer("Fumitoshi Ukai", "ukai@chromium.org"),
+ Committer("Gabor Loki", "loki@webkit.org"),
+ Committer("Girish Ramakrishnan", ["girish@forwardbias.in", "ramakrishnan.girish@gmail.com"]),
+ Committer("Graham Dennis", ["Graham.Dennis@gmail.com", "gdennis@webkit.org"]),
+ Committer("Greg Bolsinga", "bolsinga@apple.com"),
+ Committer("Hin-Chung Lam", ["hclam@google.com", "hclam@chromium.org"]),
+ Committer("Jakob Petsovits", ["jpetsovits@rim.com", "jpetso@gmx.at"]),
+ Committer("Jens Alfke", ["snej@chromium.org", "jens@apple.com"]),
+ Committer("Jeremy Moskovich", ["playmobil@google.com", "jeremy@chromium.org"]),
+ Committer("Jessie Berlin", ["jberlin@webkit.org", "jberlin@apple.com"]),
+ Committer("Jian Li", "jianli@chromium.org"),
+ Committer("John Abd-El-Malek", "jam@chromium.org"),
+ Committer("Joost de Valk", ["joost@webkit.org", "webkit-dev@joostdevalk.nl"]),
+ Committer("Joseph Pecoraro", "joepeck@webkit.org"),
+ Committer("Julie Parent", ["jparent@google.com", "jparent@chromium.org"]),
+ Committer("Julien Chaffraix", ["jchaffraix@webkit.org", "julien.chaffraix@gmail.com"]),
+ Committer("Jungshik Shin", "jshin@chromium.org"),
+ Committer("Keishi Hattori", "keishi@webkit.org"),
+ Committer("Kelly Norton", "knorton@google.com"),
+ Committer("Kenneth Russell", "kbr@google.com"),
+ Committer("Kent Tamura", "tkent@chromium.org"),
+ Committer("Krzysztof Kowalczyk", "kkowalczyk@gmail.com"),
+ Committer("Levi Weintraub", "lweintraub@apple.com"),
+ Committer("Mads Ager", "ager@chromium.org"),
+ Committer("Matt Lilek", ["webkit@mattlilek.com", "pewtermoose@webkit.org"]),
+ Committer("Matt Perry", "mpcomplete@chromium.org"),
+ Committer("Maxime Britto", ["maxime.britto@gmail.com", "britto@apple.com"]),
+ Committer("Maxime Simon", ["simon.maxime@gmail.com", "maxime.simon@webkit.org"]),
+ Committer("Martin Robinson", ["mrobinson@webkit.org", "martin.james.robinson@gmail.com"]),
+ Committer("Michelangelo De Simone", "michelangelo@webkit.org"),
+ Committer("Mike Belshe", ["mbelshe@chromium.org", "mike@belshe.com"]),
+ Committer("Mike Fenton", ["mike.fenton@torchmobile.com", "mifenton@rim.com"]),
+ Committer("Mike Thole", ["mthole@mikethole.com", "mthole@apple.com"]),
+ Committer("Nate Chapin", "japhet@chromium.org"),
+ Committer("Ojan Vafai", "ojan@chromium.org"),
+ Committer("Pam Greene", "pam@chromium.org"),
+ Committer("Peter Kasting", ["pkasting@google.com", "pkasting@chromium.org"]),
+ Committer("Philippe Normand", ["pnormand@igalia.com", "philn@webkit.org"]),
+ Committer("Pierre d'Herbemont", ["pdherbemont@free.fr", "pdherbemont@apple.com"]),
+ Committer("Pierre-Olivier Latour", "pol@apple.com"),
+ Committer("Roland Steiner", "rolandsteiner@chromium.org"),
+ Committer("Ryosuke Niwa", "rniwa@webkit.org"),
+ Committer("Scott Violet", "sky@chromium.org"),
+ Committer("Stephen White", "senorblanco@chromium.org"),
+ Committer("Steve Block", "steveblock@google.com"),
+ Committer("Tony Chang", "tony@chromium.org"),
+ Committer("Trey Matteson", "trey@usa.net"),
+ Committer("Tristan O'Tierney", ["tristan@otierney.net", "tristan@apple.com"]),
+ Committer("Victor Wang", "victorw@chromium.org"),
+ Committer("William Siegrist", "wsiegrist@apple.com"),
+ Committer("Yael Aharon", "yael.aharon@nokia.com"),
+ Committer("Yaar Schnitman", ["yaar@chromium.org", "yaar@google.com"]),
+ Committer("Yong Li", ["yong.li@torchmobile.com", "yong.li.webkit@gmail.com"]),
+ Committer("Yongjun Zhang", "yongjun.zhang@nokia.com"),
+ Committer("Yury Semikhatsky", "yurys@chromium.org"),
+ Committer("Zoltan Herczeg", "zherczeg@webkit.org"),
+ Committer("Zoltan Horvath", "zoltan@webkit.org"),
+]
+
+
+# This is intended as a canonical, machine-readable list of all reviewers for
+# WebKit. If your name is missing here and you are a reviewer, please add it.
+# No review needed.
+
+
+reviewers_list = [
+ Reviewer("Ada Chan", "adachan@apple.com"),
+ Reviewer("Adam Barth", "abarth@webkit.org"),
+ Reviewer("Adam Roben", "aroben@apple.com"),
+ Reviewer("Adam Treat", ["treat@kde.org", "treat@webkit.org"]),
+ Reviewer("Adele Peterson", "adele@apple.com"),
+ Reviewer("Alexey Proskuryakov", ["ap@webkit.org", "ap@apple.com"]),
+ Reviewer("Alice Liu", "alice.liu@apple.com"),
+ Reviewer("Alp Toker", ["alp@nuanti.com", "alp@atoker.com", "alp@webkit.org"]),
+ Reviewer("Anders Carlsson", ["andersca@apple.com", "acarlsson@apple.com"]),
+ Reviewer("Antti Koivisto", ["koivisto@iki.fi", "antti@apple.com"]),
+ Reviewer("Ariya Hidayat", ["ariya.hidayat@gmail.com", "ariya@webkit.org"]),
+ Reviewer("Beth Dakin", "bdakin@apple.com"),
+ Reviewer("Brady Eidson", "beidson@apple.com"),
+ Reviewer("Cameron Zwarich", ["zwarich@apple.com", "cwzwarich@apple.com", "cwzwarich@webkit.org"]),
+ Reviewer("Chris Blumenberg", "cblu@apple.com"),
+ Reviewer("Dan Bernstein", ["mitz@webkit.org", "mitz@apple.com"]),
+ Reviewer("Darin Adler", "darin@apple.com"),
+ Reviewer("Darin Fisher", ["fishd@chromium.org", "darin@chromium.org"]),
+ Reviewer("David Harrison", "harrison@apple.com"),
+ Reviewer("David Hyatt", "hyatt@apple.com"),
+ Reviewer("David Kilzer", ["ddkilzer@webkit.org", "ddkilzer@apple.com"]),
+ Reviewer("David Levin", "levin@chromium.org"),
+ Reviewer("Dimitri Glazkov", "dglazkov@chromium.org"),
+ Reviewer("Dirk Schulze", "krit@webkit.org"),
+ Reviewer("Dmitry Titov", "dimich@chromium.org"),
+ Reviewer("Don Melton", "gramps@apple.com"),
+ Reviewer("Eric Carlson", "eric.carlson@apple.com"),
+ Reviewer("Eric Seidel", "eric@webkit.org"),
+ Reviewer("Gavin Barraclough", "barraclough@apple.com"),
+ Reviewer("Geoffrey Garen", "ggaren@apple.com"),
+ Reviewer("George Staikos", ["staikos@kde.org", "staikos@webkit.org"]),
+ Reviewer("Gustavo Noronha Silva", ["gns@gnome.org", "kov@webkit.org"]),
+ Reviewer("Holger Freyther", ["zecke@selfish.org", "zecke@webkit.org"]),
+ Reviewer("Jan Alonzo", ["jmalonzo@gmail.com", "jmalonzo@webkit.org"]),
+ Reviewer("Jeremy Orlow", "jorlow@chromium.org"),
+ Reviewer("John Sullivan", "sullivan@apple.com"),
+ Reviewer("Jon Honeycutt", "jhoneycutt@apple.com"),
+ Reviewer("Justin Garcia", "justin.garcia@apple.com"),
+ Reviewer("Ken Kocienda", "kocienda@apple.com"),
+ Reviewer("Kenneth Rohde Christiansen", ["kenneth@webkit.org", "kenneth.christiansen@openbossa.org"]),
+ Reviewer("Kevin Decker", "kdecker@apple.com"),
+ Reviewer("Kevin McCullough", "kmccullough@apple.com"),
+ Reviewer("Kevin Ollivier", ["kevino@theolliviers.com", "kevino@webkit.org"]),
+ Reviewer("Lars Knoll", ["lars@trolltech.com", "lars@kde.org"]),
+ Reviewer("Laszlo Gombos", "laszlo.1.gombos@nokia.com"),
+ Reviewer("Maciej Stachowiak", "mjs@apple.com"),
+ Reviewer("Mark Rowe", "mrowe@apple.com"),
+ Reviewer("Nikolas Zimmermann", ["zimmermann@kde.org", "zimmermann@physik.rwth-aachen.de", "zimmermann@webkit.org"]),
+ Reviewer("Oliver Hunt", "oliver@apple.com"),
+ Reviewer("Pavel Feldman", "pfeldman@chromium.org"),
+ Reviewer("Richard Williamson", "rjw@apple.com"),
+ Reviewer("Rob Buis", ["rwlbuis@gmail.com", "rwlbuis@webkit.org"]),
+ Reviewer("Sam Weinig", ["sam@webkit.org", "weinig@apple.com"]),
+ Reviewer("Shinichiro Hamaji", "hamaji@chromium.org"),
+ Reviewer("Simon Fraser", "simon.fraser@apple.com"),
+ Reviewer("Simon Hausmann", ["hausmann@webkit.org", "hausmann@kde.org", "simon.hausmann@nokia.com"]),
+ Reviewer("Stephanie Lewis", "slewis@apple.com"),
+ Reviewer("Steve Falkenburg", "sfalken@apple.com"),
+ Reviewer("Tim Omernick", "timo@apple.com"),
+ Reviewer("Timothy Hatcher", ["timothy@hatcher.name", "timothy@apple.com"]),
+ Reviewer(u'Tor Arne Vestb\xf8', "vestbo@webkit.org"),
+ Reviewer("Vicki Murley", "vicki@apple.com"),
+ Reviewer("Xan Lopez", ["xan.lopez@gmail.com", "xan@gnome.org", "xan@webkit.org"]),
+ Reviewer("Zack Rusin", "zack@kde.org"),
+]
+
+
+class CommitterList:
+
+ # Committers and reviewers are passed in to allow easy testing
+
+ def __init__(self,
+ committers=committers_unable_to_review,
+ reviewers=reviewers_list):
+ self._committers = committers + reviewers
+ self._reviewers = reviewers
+ self._committers_by_email = {}
+
+ def committers(self):
+ return self._committers
+
+ def reviewers(self):
+ return self._reviewers
+
+ def _email_to_committer_map(self):
+ if not len(self._committers_by_email):
+ for committer in self._committers:
+ for email in committer.emails:
+ self._committers_by_email[email] = committer
+ return self._committers_by_email
+
+ def committer_by_email(self, email):
+ return self._email_to_committer_map().get(email)
+
+ def reviewer_by_email(self, email):
+ committer = self.committer_by_email(email)
+ if committer and not committer.can_review:
+ return None
+ return committer
diff --git a/WebKitTools/Scripts/webkitpy/committers.pyc b/WebKitTools/Scripts/webkitpy/committers.pyc
new file mode 100644
index 0000000..bce8c17
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/committers.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/committers_unittest.py b/WebKitTools/Scripts/webkitpy/committers_unittest.py
new file mode 100644
index 0000000..f5dc539
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/committers_unittest.py
@@ -0,0 +1,65 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+from committers import CommitterList, Committer, Reviewer
+
+class CommittersTest(unittest.TestCase):
+
+ def test_committer_lookup(self):
+ committer = Committer('Test One', 'one@test.com')
+ reviewer = Reviewer('Test Two', ['two@test.com', 'two@rad.com', 'so_two@gmail.com'])
+ committer_list = CommitterList(committers=[committer], reviewers=[reviewer])
+
+ # Test valid committer and reviewer lookup
+ self.assertEqual(committer_list.committer_by_email('one@test.com'), committer)
+ self.assertEqual(committer_list.reviewer_by_email('two@test.com'), reviewer)
+ self.assertEqual(committer_list.committer_by_email('two@test.com'), reviewer)
+ self.assertEqual(committer_list.committer_by_email('two@rad.com'), reviewer)
+ self.assertEqual(committer_list.reviewer_by_email('so_two@gmail.com'), reviewer)
+
+ # Test that the first email is assumed to be the Bugzilla email address (for now)
+ self.assertEqual(committer_list.committer_by_email('two@rad.com').bugzilla_email(), 'two@test.com')
+
+ # Test that a known committer is not returned during reviewer lookup
+ self.assertEqual(committer_list.reviewer_by_email('one@test.com'), None)
+
+ # Test that unknown email address fail both committer and reviewer lookup
+ self.assertEqual(committer_list.committer_by_email('bar@bar.com'), None)
+ self.assertEqual(committer_list.reviewer_by_email('bar@bar.com'), None)
+
+ # Test that emails returns a list.
+ self.assertEqual(committer.emails, ['one@test.com'])
+
+ # Test that committers returns committers and reviewers and reviewers() just reviewers.
+ self.assertEqual(committer_list.committers(), [committer, reviewer])
+ self.assertEqual(committer_list.reviewers(), [reviewer])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/credentials.py b/WebKitTools/Scripts/webkitpy/credentials.py
new file mode 100644
index 0000000..a4d8e34
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/credentials.py
@@ -0,0 +1,132 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Python module for reading stored web credentials from the OS.
+
+import getpass
+import os
+import platform
+import re
+
+from webkitpy.executive import Executive, ScriptError
+from webkitpy.webkit_logging import log
+from webkitpy.scm import Git
+
+
+class Credentials(object):
+
+ def __init__(self, host, git_prefix=None, executive=None, cwd=os.getcwd()):
+ self.host = host
+ self.git_prefix = git_prefix
+ self.executive = executive or Executive()
+ self.cwd = cwd
+
+ def _credentials_from_git(self):
+ return [self._read_git_config("username"),
+ self._read_git_config("password")]
+
+ def _read_git_config(self, key):
+ config_key = "%s.%s" % (self.git_prefix, key) if self.git_prefix \
+ else key
+ return self.executive.run_command(
+ ["git", "config", "--get", config_key],
+ error_handler=Executive.ignore_error).rstrip('\n')
+
+ def _keychain_value_with_label(self, label, source_text):
+ match = re.search("%s\"(?P<value>.+)\"" % label,
+ source_text,
+ re.MULTILINE)
+ if match:
+ return match.group('value')
+
+ def _is_mac_os_x(self):
+ return platform.mac_ver()[0]
+
+ def _parse_security_tool_output(self, security_output):
+ username = self._keychain_value_with_label("^\s*\"acct\"<blob>=",
+ security_output)
+ password = self._keychain_value_with_label("^password: ",
+ security_output)
+ return [username, password]
+
+ def _run_security_tool(self, username=None):
+ security_command = [
+ "/usr/bin/security",
+ "find-internet-password",
+ "-g",
+ "-s",
+ self.host,
+ ]
+ if username:
+ security_command += ["-a", username]
+
+ log("Reading Keychain for %s account and password. "
+ "Click \"Allow\" to continue..." % self.host)
+ try:
+ return self.executive.run_command(security_command)
+ except ScriptError:
+ # Failed to either find a keychain entry or somekind of OS-related
+ # error occured (for instance, couldn't find the /usr/sbin/security
+ # command).
+ log("Could not find a keychain entry for %s." % self.host)
+ return None
+
+ def _credentials_from_keychain(self, username=None):
+ if not self._is_mac_os_x():
+ return [username, None]
+
+ security_output = self._run_security_tool(username)
+ if security_output:
+ return self._parse_security_tool_output(security_output)
+ else:
+ return [None, None]
+
+ def read_credentials(self):
+ username = None
+ password = None
+
+ try:
+ if Git.in_working_directory(self.cwd):
+ (username, password) = self._credentials_from_git()
+ except OSError, e:
+ # Catch and ignore OSError exceptions such as "no such file
+ # or directory" (OSError errno 2), which imply that the Git
+ # command cannot be found/is not installed.
+ pass
+
+ if not username or not password:
+ (username, password) = self._credentials_from_keychain(username)
+
+ if not username:
+ username = raw_input("%s login: " % self.host)
+ if not password:
+ password = getpass.getpass("%s password for %s: " % (self.host,
+ username))
+
+ return [username, password]
diff --git a/WebKitTools/Scripts/webkitpy/credentials.pyc b/WebKitTools/Scripts/webkitpy/credentials.pyc
new file mode 100644
index 0000000..cd42568
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/credentials.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/credentials_unittest.py b/WebKitTools/Scripts/webkitpy/credentials_unittest.py
new file mode 100644
index 0000000..0bd5340
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/credentials_unittest.py
@@ -0,0 +1,127 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import tempfile
+import unittest
+from webkitpy.credentials import Credentials
+from webkitpy.executive import Executive
+from webkitpy.outputcapture import OutputCapture
+from webkitpy.mock import Mock
+
+class CredentialsTest(unittest.TestCase):
+ example_security_output = """keychain: "/Users/test/Library/Keychains/login.keychain"
+class: "inet"
+attributes:
+ 0x00000007 <blob>="bugs.webkit.org (test@webkit.org)"
+ 0x00000008 <blob>=<NULL>
+ "acct"<blob>="test@webkit.org"
+ "atyp"<blob>="form"
+ "cdat"<timedate>=0x32303039303832353233353231365A00 "20090825235216Z\000"
+ "crtr"<uint32>=<NULL>
+ "cusi"<sint32>=<NULL>
+ "desc"<blob>="Web form password"
+ "icmt"<blob>="default"
+ "invi"<sint32>=<NULL>
+ "mdat"<timedate>=0x32303039303930393137323635315A00 "20090909172651Z\000"
+ "nega"<sint32>=<NULL>
+ "path"<blob>=<NULL>
+ "port"<uint32>=0x00000000
+ "prot"<blob>=<NULL>
+ "ptcl"<uint32>="htps"
+ "scrp"<sint32>=<NULL>
+ "sdmn"<blob>=<NULL>
+ "srvr"<blob>="bugs.webkit.org"
+ "type"<uint32>=<NULL>
+password: "SECRETSAUCE"
+"""
+
+ def test_keychain_lookup_on_non_mac(self):
+ class FakeCredentials(Credentials):
+ def _is_mac_os_x(self):
+ return False
+ credentials = FakeCredentials("bugs.webkit.org")
+ self.assertEqual(credentials._is_mac_os_x(), False)
+ self.assertEqual(credentials._credentials_from_keychain("foo"), ["foo", None])
+
+ def test_security_output_parse(self):
+ credentials = Credentials("bugs.webkit.org")
+ self.assertEqual(credentials._parse_security_tool_output(self.example_security_output), ["test@webkit.org", "SECRETSAUCE"])
+
+ def test_security_output_parse_entry_not_found(self):
+ credentials = Credentials("foo.example.com")
+ if not credentials._is_mac_os_x():
+ return # This test does not run on a non-Mac.
+
+ # Note, we ignore the captured output because it is already covered
+ # by the test case CredentialsTest._assert_security_call (below).
+ outputCapture = OutputCapture()
+ outputCapture.capture_output()
+ self.assertEqual(credentials._run_security_tool(), None)
+ outputCapture.restore_output()
+
+ def _assert_security_call(self, username=None):
+ executive_mock = Mock()
+ credentials = Credentials("example.com", executive=executive_mock)
+
+ expected_stderr = "Reading Keychain for example.com account and password. Click \"Allow\" to continue...\n"
+ OutputCapture().assert_outputs(self, credentials._run_security_tool, [username], expected_stderr=expected_stderr)
+
+ security_args = ["/usr/bin/security", "find-internet-password", "-g", "-s", "example.com"]
+ if username:
+ security_args += ["-a", username]
+ executive_mock.run_command.assert_called_with(security_args)
+
+ def test_security_calls(self):
+ self._assert_security_call()
+ self._assert_security_call(username="foo")
+
+ def test_git_config_calls(self):
+ executive_mock = Mock()
+ credentials = Credentials("example.com", executive=executive_mock)
+ credentials._read_git_config("foo")
+ executive_mock.run_command.assert_called_with(["git", "config", "--get", "foo"], error_handler=Executive.ignore_error)
+
+ credentials = Credentials("example.com", git_prefix="test_prefix", executive=executive_mock)
+ credentials._read_git_config("foo")
+ executive_mock.run_command.assert_called_with(["git", "config", "--get", "test_prefix.foo"], error_handler=Executive.ignore_error)
+
+ def test_read_credentials_without_git_repo(self):
+ class FakeCredentials(Credentials):
+ def _is_mac_os_x(self):
+ return True
+ def _credentials_from_keychain(self, username):
+ return ["test@webkit.org", "SECRETSAUCE"]
+
+ temp_dir_path = tempfile.mkdtemp(suffix="not_a_git_repo")
+ credentials = FakeCredentials("bugs.webkit.org", cwd=temp_dir_path)
+ self.assertEqual(credentials.read_credentials(), ["test@webkit.org", "SECRETSAUCE"])
+ os.rmdir(temp_dir_path)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/diff_parser.py b/WebKitTools/Scripts/webkitpy/diff_parser.py
new file mode 100644
index 0000000..7dce7e8
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/diff_parser.py
@@ -0,0 +1,162 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""WebKit's Python module for interacting with patches."""
+
+import logging
+import re
+
+
+_regexp_compile_cache = {}
+
+
+def match(pattern, string):
+ """Matches the string with the pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = re.compile(pattern)
+ return _regexp_compile_cache[pattern].match(string)
+
+
+def git_diff_to_svn_diff(line):
+ """Converts a git formatted diff line to a svn formatted line.
+
+ Args:
+ line: A string representing a line of the diff.
+ """
+ conversion_patterns = (("^diff --git \w/(.+) \w/(?P<FilePath>.+)", lambda matched: "Index: " + matched.group('FilePath') + "\n"),
+ ("^new file.*", lambda matched: "\n"),
+ ("^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}", lambda matched: "===================================================================\n"),
+ ("^--- \w/(?P<FilePath>.+)", lambda matched: "--- " + matched.group('FilePath') + "\n"),
+ ("^\+\+\+ \w/(?P<FilePath>.+)", lambda matched: "+++ " + matched.group('FilePath') + "\n"))
+
+ for pattern, conversion in conversion_patterns:
+ matched = match(pattern, line)
+ if matched:
+ return conversion(matched)
+ return line
+
+
+def get_diff_converter(first_diff_line):
+ """Gets a converter function of diff lines.
+
+ Args:
+ first_diff_line: The first filename line of a diff file.
+ If this line is git formatted, we'll return a
+ converter from git to SVN.
+ """
+ if match(r"^diff --git \w/", first_diff_line):
+ return git_diff_to_svn_diff
+ return lambda input: input
+
+
+_INITIAL_STATE = 1
+_DECLARED_FILE_PATH = 2
+_PROCESSING_CHUNK = 3
+
+
+class DiffFile:
+ """Contains the information for one file in a patch.
+
+ The field "lines" is a list which contains tuples in this format:
+ (deleted_line_number, new_line_number, line_string)
+ If deleted_line_number is zero, it means this line is newly added.
+ If new_line_number is zero, it means this line is deleted.
+ """
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.lines = []
+
+ def add_new_line(self, line_number, line):
+ self.lines.append((0, line_number, line))
+
+ def add_deleted_line(self, line_number, line):
+ self.lines.append((line_number, 0, line))
+
+ def add_unchanged_line(self, deleted_line_number, new_line_number, line):
+ self.lines.append((deleted_line_number, new_line_number, line))
+
+
+class DiffParser:
+ """A parser for a patch file.
+
+ The field "files" is a dict whose key is the filename and value is
+ a DiffFile object.
+ """
+
+ def __init__(self, diff_input):
+ """Parses a diff.
+
+ Args:
+ diff_input: An iterable object.
+ """
+ state = _INITIAL_STATE
+
+ self.files = {}
+ current_file = None
+ old_diff_line = None
+ new_diff_line = None
+ for line in diff_input:
+ line = line.rstrip("\n")
+ if state == _INITIAL_STATE:
+ transform_line = get_diff_converter(line)
+ line = transform_line(line)
+
+ file_declaration = match(r"^Index: (?P<FilePath>.+)", line)
+ if file_declaration:
+ filename = file_declaration.group('FilePath')
+ current_file = DiffFile(filename)
+ self.files[filename] = current_file
+ state = _DECLARED_FILE_PATH
+ continue
+
+ lines_changed = match(r"^@@ -(?P<OldStartLine>\d+)(,\d+)? \+(?P<NewStartLine>\d+)(,\d+)? @@", line)
+ if lines_changed:
+ if state != _DECLARED_FILE_PATH and state != _PROCESSING_CHUNK:
+ logging.error('Unexpected line change without file path declaration: %r' % line)
+ old_diff_line = int(lines_changed.group('OldStartLine'))
+ new_diff_line = int(lines_changed.group('NewStartLine'))
+ state = _PROCESSING_CHUNK
+ continue
+
+ if state == _PROCESSING_CHUNK:
+ if line.startswith('+'):
+ current_file.add_new_line(new_diff_line, line[1:])
+ new_diff_line += 1
+ elif line.startswith('-'):
+ current_file.add_deleted_line(old_diff_line, line[1:])
+ old_diff_line += 1
+ elif line.startswith(' '):
+ current_file.add_unchanged_line(old_diff_line, new_diff_line, line[1:])
+ old_diff_line += 1
+ new_diff_line += 1
+ elif line == '\\ No newline at end of file':
+ # Nothing to do. We may still have some added lines.
+ pass
+ else:
+ logging.error('Unexpected diff format when parsing a chunk: %r' % line)
diff --git a/WebKitTools/Scripts/webkitpy/diff_parser_unittest.py b/WebKitTools/Scripts/webkitpy/diff_parser_unittest.py
new file mode 100644
index 0000000..7eb0eab
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/diff_parser_unittest.py
@@ -0,0 +1,146 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+import diff_parser
+import re
+
+
+class DiffParserTest(unittest.TestCase):
+
+ _PATCH = '''diff --git a/WebCore/rendering/style/StyleFlexibleBoxData.h b/WebCore/rendering/style/StyleFlexibleBoxData.h
+index f5d5e74..3b6aa92 100644
+--- a/WebCore/rendering/style/StyleFlexibleBoxData.h
++++ b/WebCore/rendering/style/StyleFlexibleBoxData.h
+@@ -47,7 +47,6 @@ public:
+
+ unsigned align : 3; // EBoxAlignment
+ unsigned pack: 3; // EBoxAlignment
+- unsigned orient: 1; // EBoxOrient
+ unsigned lines : 1; // EBoxLines
+
+ private:
+diff --git a/WebCore/rendering/style/StyleRareInheritedData.cpp b/WebCore/rendering/style/StyleRareInheritedData.cpp
+index ce21720..324929e 100644
+--- a/WebCore/rendering/style/StyleRareInheritedData.cpp
++++ b/WebCore/rendering/style/StyleRareInheritedData.cpp
+@@ -39,6 +39,7 @@ StyleRareInheritedData::StyleRareInheritedData()
+ , textSizeAdjust(RenderStyle::initialTextSizeAdjust())
+ , resize(RenderStyle::initialResize())
+ , userSelect(RenderStyle::initialUserSelect())
++ , boxOrient(RenderStyle::initialBoxOrient())
+ {
+ }
+
+@@ -58,6 +59,7 @@ StyleRareInheritedData::StyleRareInheritedData(const StyleRareInheritedData& o)
+ , textSizeAdjust(o.textSizeAdjust)
+ , resize(o.resize)
+ , userSelect(o.userSelect)
++ , boxOrient(o.boxOrient)
+ {
+ }
+
+@@ -81,7 +83,8 @@ bool StyleRareInheritedData::operator==(const StyleRareInheritedData& o) const
+ && khtmlLineBreak == o.khtmlLineBreak
+ && textSizeAdjust == o.textSizeAdjust
+ && resize == o.resize
+- && userSelect == o.userSelect;
++ && userSelect == o.userSelect
++ && boxOrient == o.boxOrient;
+ }
+
+ bool StyleRareInheritedData::shadowDataEquivalent(const StyleRareInheritedData& o) const
+diff --git a/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
+new file mode 100644
+index 0000000..6db26bd
+--- /dev/null
++++ b/LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum
+@@ -0,0 +1 @@
++61a373ee739673a9dcd7bac62b9f182e
+\ No newline at end of file
+'''
+
+ def test_diff_parser(self, parser = None):
+ if not parser:
+ parser = diff_parser.DiffParser(self._PATCH.splitlines())
+ self.assertEquals(3, len(parser.files))
+
+ self.assertTrue('WebCore/rendering/style/StyleFlexibleBoxData.h' in parser.files)
+ diff = parser.files['WebCore/rendering/style/StyleFlexibleBoxData.h']
+ self.assertEquals(7, len(diff.lines))
+ # The first two unchaged lines.
+ self.assertEquals((47, 47), diff.lines[0][0:2])
+ self.assertEquals('', diff.lines[0][2])
+ self.assertEquals((48, 48), diff.lines[1][0:2])
+ self.assertEquals(' unsigned align : 3; // EBoxAlignment', diff.lines[1][2])
+ # The deleted line
+ self.assertEquals((50, 0), diff.lines[3][0:2])
+ self.assertEquals(' unsigned orient: 1; // EBoxOrient', diff.lines[3][2])
+
+ # The first file looks OK. Let's check the next, more complicated file.
+ self.assertTrue('WebCore/rendering/style/StyleRareInheritedData.cpp' in parser.files)
+ diff = parser.files['WebCore/rendering/style/StyleRareInheritedData.cpp']
+ # There are 3 chunks.
+ self.assertEquals(7 + 7 + 9, len(diff.lines))
+ # Around an added line.
+ self.assertEquals((60, 61), diff.lines[9][0:2])
+ self.assertEquals((0, 62), diff.lines[10][0:2])
+ self.assertEquals((61, 63), diff.lines[11][0:2])
+ # Look through the last chunk, which contains both add's and delete's.
+ self.assertEquals((81, 83), diff.lines[14][0:2])
+ self.assertEquals((82, 84), diff.lines[15][0:2])
+ self.assertEquals((83, 85), diff.lines[16][0:2])
+ self.assertEquals((84, 0), diff.lines[17][0:2])
+ self.assertEquals((0, 86), diff.lines[18][0:2])
+ self.assertEquals((0, 87), diff.lines[19][0:2])
+ self.assertEquals((85, 88), diff.lines[20][0:2])
+ self.assertEquals((86, 89), diff.lines[21][0:2])
+ self.assertEquals((87, 90), diff.lines[22][0:2])
+
+ # Check if a newly added file is correctly handled.
+ diff = parser.files['LayoutTests/platform/mac/fast/flexbox/box-orient-button-expected.checksum']
+ self.assertEquals(1, len(diff.lines))
+ self.assertEquals((0, 1), diff.lines[0][0:2])
+
+ def test_git_mnemonicprefix(self):
+ p = re.compile(r' ([a|b])/')
+
+ prefixes = [
+ { 'a' : 'i', 'b' : 'w' }, # git-diff (compares the (i)ndex and the (w)ork tree)
+ { 'a' : 'c', 'b' : 'w' }, # git-diff HEAD (compares a (c)ommit and the (w)ork tree)
+ { 'a' : 'c', 'b' : 'i' }, # git diff --cached (compares a (c)ommit and the (i)ndex)
+ { 'a' : 'o', 'b' : 'w' }, # git-diff HEAD:file1 file2 (compares an (o)bject and a (w)ork tree entity)
+ { 'a' : '1', 'b' : '2' }, # git diff --no-index a b (compares two non-git things (1) and (2))
+ ]
+
+ for prefix in prefixes:
+ patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], self._PATCH)
+ self.test_diff_parser(diff_parser.DiffParser(patch.splitlines()))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/executive.py b/WebKitTools/Scripts/webkitpy/executive.py
new file mode 100644
index 0000000..50b119b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/executive.py
@@ -0,0 +1,171 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import StringIO
+import subprocess
+import sys
+
+from webkitpy.webkit_logging import tee
+
+
+class ScriptError(Exception):
+
+ def __init__(self,
+ message=None,
+ script_args=None,
+ exit_code=None,
+ output=None,
+ cwd=None):
+ if not message:
+ message = 'Failed to run "%s"' % script_args
+ if exit_code:
+ message += " exit_code: %d" % exit_code
+ if cwd:
+ message += " cwd: %s" % cwd
+
+ Exception.__init__(self, message)
+ self.script_args = script_args # 'args' is already used by Exception
+ self.exit_code = exit_code
+ self.output = output
+ self.cwd = cwd
+
+ def message_with_output(self, output_limit=500):
+ if self.output:
+ if output_limit and len(self.output) > output_limit:
+ return "%s\nLast %s characters of output:\n%s" % \
+ (self, output_limit, self.output[-output_limit:])
+ return "%s\n%s" % (self, self.output)
+ return str(self)
+
+ def command_name(self):
+ command_path = self.script_args
+ if type(command_path) is list:
+ command_path = command_path[0]
+ return os.path.basename(command_path)
+
+
+def run_command(*args, **kwargs):
+ # FIXME: This should not be a global static.
+ # New code should use Executive.run_command directly instead
+ return Executive().run_command(*args, **kwargs)
+
+
+class Executive(object):
+
+ def _run_command_with_teed_output(self, args, teed_output):
+ child_process = subprocess.Popen(args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+ # Use our own custom wait loop because Popen ignores a tee'd
+ # stderr/stdout.
+ # FIXME: This could be improved not to flatten output to stdout.
+ while True:
+ output_line = child_process.stdout.readline()
+ if output_line == "" and child_process.poll() != None:
+ return child_process.poll()
+ teed_output.write(output_line)
+
+ def run_and_throw_if_fail(self, args, quiet=False):
+ # Cache the child's output locally so it can be used for error reports.
+ child_out_file = StringIO.StringIO()
+ if quiet:
+ dev_null = open(os.devnull, "w")
+ child_stdout = tee(child_out_file, dev_null if quiet else sys.stdout)
+ exit_code = self._run_command_with_teed_output(args, child_stdout)
+ if quiet:
+ dev_null.close()
+
+ child_output = child_out_file.getvalue()
+ child_out_file.close()
+
+ if exit_code:
+ raise ScriptError(script_args=args,
+ exit_code=exit_code,
+ output=child_output)
+
+ @staticmethod
+ def cpu_count():
+ # This API exists only in Python 2.6 and higher. :(
+ try:
+ import multiprocessing
+ return multiprocessing.cpu_count()
+ except (ImportError, NotImplementedError):
+ # This quantity is a lie but probably a reasonable guess for modern
+ # machines.
+ return 2
+
+ # Error handlers do not need to be static methods once all callers are
+ # updated to use an Executive object.
+
+ @staticmethod
+ def default_error_handler(error):
+ raise error
+
+ @staticmethod
+ def ignore_error(error):
+ pass
+
+ # FIXME: This should be merged with run_and_throw_if_fail
+
+ def run_command(self,
+ args,
+ cwd=None,
+ input=None,
+ error_handler=None,
+ return_exit_code=False,
+ return_stderr=True):
+ if hasattr(input, 'read'): # Check if the input is a file.
+ stdin = input
+ string_to_communicate = None
+ else:
+ stdin = subprocess.PIPE if input else None
+ string_to_communicate = input
+ if return_stderr:
+ stderr = subprocess.STDOUT
+ else:
+ stderr = None
+
+ process = subprocess.Popen(args,
+ stdin=stdin,
+ stdout=subprocess.PIPE,
+ stderr=stderr,
+ cwd=cwd)
+ output = process.communicate(string_to_communicate)[0]
+ exit_code = process.wait()
+ if exit_code:
+ script_error = ScriptError(script_args=args,
+ exit_code=exit_code,
+ output=output,
+ cwd=cwd)
+ (error_handler or self.default_error_handler)(script_error)
+ if return_exit_code:
+ return exit_code
+ return output
diff --git a/WebKitTools/Scripts/webkitpy/executive.pyc b/WebKitTools/Scripts/webkitpy/executive.pyc
new file mode 100644
index 0000000..190fabb
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/executive.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/executive_unittest.py b/WebKitTools/Scripts/webkitpy/executive_unittest.py
new file mode 100644
index 0000000..f78e301
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/executive_unittest.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Daniel Bates (dbates@intudata.com). All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+from webkitpy.executive import Executive, run_command
+
+class ExecutiveTest(unittest.TestCase):
+
+ def test_run_command_with_bad_command(self):
+ def run_bad_command():
+ run_command(["foo_bar_command_blah"], error_handler=Executive.ignore_error, return_exit_code=True)
+ self.failUnlessRaises(OSError, run_bad_command)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/grammar.py b/WebKitTools/Scripts/webkitpy/grammar.py
new file mode 100644
index 0000000..78809e0
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/grammar.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+
+def plural(noun):
+ # This is a dumb plural() implementation that is just enough for our uses.
+ if re.search("h$", noun):
+ return noun + "es"
+ else:
+ return noun + "s"
+
+
+def pluralize(noun, count):
+ if count != 1:
+ noun = plural(noun)
+ return "%d %s" % (count, noun)
diff --git a/WebKitTools/Scripts/webkitpy/grammar.pyc b/WebKitTools/Scripts/webkitpy/grammar.pyc
new file mode 100644
index 0000000..50edeeb
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/grammar.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/__init__.py
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/apache_http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/apache_http_server.py
new file mode 100644
index 0000000..15f2065
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/apache_http_server.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A class to start/stop the apache http server used by layout tests."""
+
+import logging
+import optparse
+import os
+import re
+import subprocess
+import sys
+
+import http_server_base
+import path_utils
+import platform_utils
+
+
+class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
+
+ def __init__(self, output_dir):
+ """Args:
+ output_dir: the absolute path to the layout test result directory
+ """
+ self._output_dir = output_dir
+ self._httpd_proc = None
+ path_utils.maybe_make_directory(output_dir)
+
+ self.mappings = [{'port': 8000},
+ {'port': 8080},
+ {'port': 8081},
+ {'port': 8443, 'sslcert': True}]
+
+ # The upstream .conf file assumed the existence of /tmp/WebKit for
+ # placing apache files like the lock file there.
+ self._runtime_path = os.path.join("/tmp", "WebKit")
+ path_utils.maybe_make_directory(self._runtime_path)
+
+ # The PID returned when Apache is started goes away (due to dropping
+ # privileges?). The proper controlling PID is written to a file in the
+ # apache runtime directory.
+ self._pid_file = os.path.join(self._runtime_path, 'httpd.pid')
+
+ test_dir = path_utils.path_from_base('third_party', 'WebKit',
+ 'LayoutTests')
+ js_test_resources_dir = self._cygwin_safe_join(test_dir, "fast", "js",
+ "resources")
+ mime_types_path = self._cygwin_safe_join(test_dir, "http", "conf",
+ "mime.types")
+ cert_file = self._cygwin_safe_join(test_dir, "http", "conf",
+ "webkit-httpd.pem")
+ access_log = self._cygwin_safe_join(output_dir, "access_log.txt")
+ error_log = self._cygwin_safe_join(output_dir, "error_log.txt")
+ document_root = self._cygwin_safe_join(test_dir, "http", "tests")
+
+ executable = platform_utils.apache_executable_path()
+ if self._is_cygwin():
+ executable = self._get_cygwin_path(executable)
+
+ cmd = [executable,
+ '-f', self._get_apache_config_file_path(test_dir, output_dir),
+ '-C', "\'DocumentRoot %s\'" % document_root,
+ '-c', "\'Alias /js-test-resources %s\'" % js_test_resources_dir,
+ '-C', "\'Listen %s\'" % "127.0.0.1:8000",
+ '-C', "\'Listen %s\'" % "127.0.0.1:8081",
+ '-c', "\'TypesConfig \"%s\"\'" % mime_types_path,
+ '-c', "\'CustomLog \"%s\" common\'" % access_log,
+ '-c', "\'ErrorLog \"%s\"\'" % error_log,
+ '-C', "\'User \"%s\"\'" % os.environ.get("USERNAME",
+ os.environ.get("USER", ""))]
+
+ if self._is_cygwin():
+ cygbin = path_utils.path_from_base('third_party', 'cygwin', 'bin')
+ # Not entirely sure why, but from cygwin we need to run the
+ # httpd command through bash.
+ self._start_cmd = [
+ os.path.join(cygbin, 'bash.exe'),
+ '-c',
+ 'PATH=%s %s' % (self._get_cygwin_path(cygbin), " ".join(cmd)),
+ ]
+ else:
+ # TODO(ojan): When we get cygwin using Apache 2, use set the
+ # cert file for cygwin as well.
+ cmd.extend(['-c', "\'SSLCertificateFile %s\'" % cert_file])
+ # Join the string here so that Cygwin/Windows and Mac/Linux
+ # can use the same code. Otherwise, we could remove the single
+ # quotes above and keep cmd as a sequence.
+ self._start_cmd = " ".join(cmd)
+
+ def _is_cygwin(self):
+ return sys.platform in ("win32", "cygwin")
+
+ def _cygwin_safe_join(self, *parts):
+ """Returns a platform appropriate path."""
+ path = os.path.join(*parts)
+ if self._is_cygwin():
+ return self._get_cygwin_path(path)
+ return path
+
+ def _get_cygwin_path(self, path):
+ """Convert a Windows path to a cygwin path.
+
+ The cygpath utility insists on converting paths that it thinks are
+ Cygwin root paths to what it thinks the correct roots are. So paths
+ such as "C:\b\slave\webkit-release\build\third_party\cygwin\bin"
+ are converted to plain "/usr/bin". To avoid this, we
+ do the conversion manually.
+
+ The path is expected to be an absolute path, on any drive.
+ """
+ drive_regexp = re.compile(r'([a-z]):[/\\]', re.IGNORECASE)
+
+ def lower_drive(matchobj):
+ return '/cygdrive/%s/' % matchobj.group(1).lower()
+ path = drive_regexp.sub(lower_drive, path)
+ return path.replace('\\', '/')
+
+ def _get_apache_config_file_path(self, test_dir, output_dir):
+ """Returns the path to the apache config file to use.
+ Args:
+ test_dir: absolute path to the LayoutTests directory.
+ output_dir: absolute path to the layout test results directory.
+ """
+ httpd_config = platform_utils.apache_config_file_path()
+ httpd_config_copy = os.path.join(output_dir, "httpd.conf")
+ httpd_conf = open(httpd_config).read()
+ if self._is_cygwin():
+ # This is a gross hack, but it lets us use the upstream .conf file
+ # and our checked in cygwin. This tells the server the root
+ # directory to look in for .so modules. It will use this path
+ # plus the relative paths to the .so files listed in the .conf
+ # file. We have apache/cygwin checked into our tree so
+ # people don't have to install it into their cygwin.
+ cygusr = path_utils.path_from_base('third_party', 'cygwin', 'usr')
+ httpd_conf = httpd_conf.replace('ServerRoot "/usr"',
+ 'ServerRoot "%s"' % self._get_cygwin_path(cygusr))
+
+ # TODO(ojan): Instead of writing an extra file, checkin a conf file
+ # upstream. Or, even better, upstream/delete all our chrome http
+ # tests so we don't need this special-cased DocumentRoot and then
+ # just use the upstream
+ # conf file.
+ chrome_document_root = path_utils.path_from_base('webkit', 'data',
+ 'layout_tests')
+ if self._is_cygwin():
+ chrome_document_root = self._get_cygwin_path(chrome_document_root)
+ httpd_conf = (httpd_conf +
+ self._get_virtual_host_config(chrome_document_root, 8081))
+
+ f = open(httpd_config_copy, 'wb')
+ f.write(httpd_conf)
+ f.close()
+
+ if self._is_cygwin():
+ return self._get_cygwin_path(httpd_config_copy)
+ return httpd_config_copy
+
+ def _get_virtual_host_config(self, document_root, port, ssl=False):
+ """Returns a <VirtualHost> directive block for an httpd.conf file.
+ It will listen to 127.0.0.1 on each of the given port.
+ """
+ return '\n'.join(('<VirtualHost 127.0.0.1:%s>' % port,
+ 'DocumentRoot %s' % document_root,
+ ssl and 'SSLEngine On' or '',
+ '</VirtualHost>', ''))
+
+ def _start_httpd_process(self):
+ """Starts the httpd process and returns whether there were errors."""
+ # Use shell=True because we join the arguments into a string for
+ # the sake of Window/Cygwin and it needs quoting that breaks
+ # shell=False.
+ self._httpd_proc = subprocess.Popen(self._start_cmd,
+ stderr=subprocess.PIPE,
+ shell=True)
+ err = self._httpd_proc.stderr.read()
+ if len(err):
+ logging.debug(err)
+ return False
+ return True
+
+ def start(self):
+ """Starts the apache http server."""
+ # Stop any currently running servers.
+ self.stop()
+
+ logging.debug("Starting apache http server")
+ server_started = self.wait_for_action(self._start_httpd_process)
+ if server_started:
+ logging.debug("Apache started. Testing ports")
+ server_started = self.wait_for_action(
+ self.is_server_running_on_all_ports)
+
+ if server_started:
+ logging.debug("Server successfully started")
+ else:
+ raise Exception('Failed to start http server')
+
+ def stop(self):
+ """Stops the apache http server."""
+ logging.debug("Shutting down any running http servers")
+ httpd_pid = None
+ if os.path.exists(self._pid_file):
+ httpd_pid = int(open(self._pid_file).readline())
+ path_utils.shut_down_http_server(httpd_pid)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server.py
new file mode 100755
index 0000000..dfcb44f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server.py
@@ -0,0 +1,279 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A class to help start/stop the lighttpd server used by layout tests."""
+
+
+import logging
+import optparse
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+import urllib
+
+import http_server_base
+import path_utils
+
+class HttpdNotStarted(Exception): pass
+
+def remove_log_files(folder, starts_with):
+ files = os.listdir(folder)
+ for file in files:
+ if file.startswith(starts_with):
+ full_path = os.path.join(folder, file)
+ os.remove(full_path)
+
+
+class Lighttpd(http_server_base.HttpServerBase):
+ # Webkit tests
+ try:
+ _webkit_tests = path_utils.path_from_base('third_party', 'WebKit',
+ 'LayoutTests', 'http',
+ 'tests')
+ _js_test_resource = path_utils.path_from_base('third_party', 'WebKit',
+ 'LayoutTests', 'fast',
+ 'js', 'resources')
+ except path_utils.PathNotFound:
+ _webkit_tests = None
+ _js_test_resource = None
+
+ # Path where we can access all of the tests
+ _all_tests = path_utils.path_from_base('webkit', 'data', 'layout_tests')
+ # Self generated certificate for SSL server (for client cert get
+ # <base-path>\chrome\test\data\ssl\certs\root_ca_cert.crt)
+ _pem_file = path_utils.path_from_base(
+ os.path.dirname(os.path.abspath(__file__)), 'httpd2.pem')
+ # One mapping where we can get to everything
+ VIRTUALCONFIG = [{'port': 8081, 'docroot': _all_tests}]
+
+ if _webkit_tests:
+ VIRTUALCONFIG.extend(
+ # Three mappings (one with SSL enabled) for LayoutTests http tests
+ [{'port': 8000, 'docroot': _webkit_tests},
+ {'port': 8080, 'docroot': _webkit_tests},
+ {'port': 8443, 'docroot': _webkit_tests, 'sslcert': _pem_file}])
+
+ def __init__(self, output_dir, background=False, port=None,
+ root=None, register_cygwin=None, run_background=None):
+ """Args:
+ output_dir: the absolute path to the layout test result directory
+ """
+ self._output_dir = output_dir
+ self._process = None
+ self._port = port
+ self._root = root
+ self._register_cygwin = register_cygwin
+ self._run_background = run_background
+ if self._port:
+ self._port = int(self._port)
+
+ def is_running(self):
+ return self._process != None
+
+ def start(self):
+ if self.is_running():
+ raise 'Lighttpd already running'
+
+ base_conf_file = path_utils.path_from_base('third_party',
+ 'WebKitTools', 'Scripts', 'webkitpy', 'layout_tests',
+ 'layout_package', 'lighttpd.conf')
+ out_conf_file = os.path.join(self._output_dir, 'lighttpd.conf')
+ time_str = time.strftime("%d%b%Y-%H%M%S")
+ access_file_name = "access.log-" + time_str + ".txt"
+ access_log = os.path.join(self._output_dir, access_file_name)
+ log_file_name = "error.log-" + time_str + ".txt"
+ error_log = os.path.join(self._output_dir, log_file_name)
+
+ # Remove old log files. We only need to keep the last ones.
+ remove_log_files(self._output_dir, "access.log-")
+ remove_log_files(self._output_dir, "error.log-")
+
+ # Write out the config
+ f = file(base_conf_file, 'rb')
+ base_conf = f.read()
+ f.close()
+
+ f = file(out_conf_file, 'wb')
+ f.write(base_conf)
+
+ # Write out our cgi handlers. Run perl through env so that it
+ # processes the #! line and runs perl with the proper command
+ # line arguments. Emulate apache's mod_asis with a cat cgi handler.
+ f.write(('cgi.assign = ( ".cgi" => "/usr/bin/env",\n'
+ ' ".pl" => "/usr/bin/env",\n'
+ ' ".asis" => "/bin/cat",\n'
+ ' ".php" => "%s" )\n\n') %
+ path_utils.lighttpd_php_path())
+
+ # Setup log files
+ f.write(('server.errorlog = "%s"\n'
+ 'accesslog.filename = "%s"\n\n') % (error_log, access_log))
+
+ # Setup upload folders. Upload folder is to hold temporary upload files
+ # and also POST data. This is used to support XHR layout tests that
+ # does POST.
+ f.write(('server.upload-dirs = ( "%s" )\n\n') % (self._output_dir))
+
+ # Setup a link to where the js test templates are stored
+ f.write(('alias.url = ( "/js-test-resources" => "%s" )\n\n') %
+ (self._js_test_resource))
+
+ # dump out of virtual host config at the bottom.
+ if self._root:
+ if self._port:
+ # Have both port and root dir.
+ mappings = [{'port': self._port, 'docroot': self._root}]
+ else:
+ # Have only a root dir - set the ports as for LayoutTests.
+ # This is used in ui_tests to run http tests against a browser.
+
+ # default set of ports as for LayoutTests but with a
+ # specified root.
+ mappings = [{'port': 8000, 'docroot': self._root},
+ {'port': 8080, 'docroot': self._root},
+ {'port': 8443, 'docroot': self._root,
+ 'sslcert': Lighttpd._pem_file}]
+ else:
+ mappings = self.VIRTUALCONFIG
+ for mapping in mappings:
+ ssl_setup = ''
+ if 'sslcert' in mapping:
+ ssl_setup = (' ssl.engine = "enable"\n'
+ ' ssl.pemfile = "%s"\n' % mapping['sslcert'])
+
+ f.write(('$SERVER["socket"] == "127.0.0.1:%d" {\n'
+ ' server.document-root = "%s"\n' +
+ ssl_setup +
+ '}\n\n') % (mapping['port'], mapping['docroot']))
+ f.close()
+
+ executable = path_utils.lighttpd_executable_path()
+ module_path = path_utils.lighttpd_module_path()
+ start_cmd = [executable,
+ # Newly written config file
+ '-f', path_utils.path_from_base(self._output_dir,
+ 'lighttpd.conf'),
+ # Where it can find its module dynamic libraries
+ '-m', module_path]
+
+ if not self._run_background:
+ start_cmd.append(# Don't background
+ '-D')
+
+ # Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the
+ # bug that mod_alias.so loads it from the hard coded path.
+ if sys.platform == 'darwin':
+ tmp_module_path = '/tmp/lighttpd/lib'
+ if not os.path.exists(tmp_module_path):
+ os.makedirs(tmp_module_path)
+ lib_file = 'liblightcomp.dylib'
+ shutil.copyfile(os.path.join(module_path, lib_file),
+ os.path.join(tmp_module_path, lib_file))
+
+ # Put the cygwin directory first in the path to find cygwin1.dll
+ env = os.environ
+ if sys.platform in ('cygwin', 'win32'):
+ env['PATH'] = '%s;%s' % (
+ path_utils.path_from_base('third_party', 'cygwin', 'bin'),
+ env['PATH'])
+
+ if sys.platform == 'win32' and self._register_cygwin:
+ setup_mount = path_utils.path_from_base('third_party', 'cygwin',
+ 'setup_mount.bat')
+ subprocess.Popen(setup_mount).wait()
+
+ logging.debug('Starting http server')
+ self._process = subprocess.Popen(start_cmd, env=env)
+
+ # Wait for server to start.
+ self.mappings = mappings
+ server_started = self.wait_for_action(
+ self.is_server_running_on_all_ports)
+
+ # Our process terminated already
+ if not server_started or self._process.returncode != None:
+ raise google.httpd_utils.HttpdNotStarted('Failed to start httpd.')
+
+ logging.debug("Server successfully started")
+
+ # TODO(deanm): Find a nicer way to shutdown cleanly. Our log files are
+ # probably not being flushed, etc... why doesn't our python have os.kill ?
+
+ def stop(self, force=False):
+ if not force and not self.is_running():
+ return
+
+ httpd_pid = None
+ if self._process:
+ httpd_pid = self._process.pid
+ path_utils.shut_down_http_server(httpd_pid)
+
+ if self._process:
+ self._process.wait()
+ self._process = None
+
+if '__main__' == __name__:
+ # Provide some command line params for starting/stopping the http server
+ # manually. Also used in ui_tests to run http layout tests in a browser.
+ option_parser = optparse.OptionParser()
+ option_parser.add_option('-k', '--server',
+ help='Server action (start|stop)')
+ option_parser.add_option('-p', '--port',
+ help='Port to listen on (overrides layout test ports)')
+ option_parser.add_option('-r', '--root',
+ help='Absolute path to DocumentRoot (overrides layout test roots)')
+ option_parser.add_option('--register_cygwin', action="store_true",
+ dest="register_cygwin", help='Register Cygwin paths (on Win try bots)')
+ option_parser.add_option('--run_background', action="store_true",
+ dest="run_background",
+ help='Run on background (for running as UI test)')
+ options, args = option_parser.parse_args()
+
+ if not options.server:
+ print ('Usage: %s --server {start|stop} [--root=root_dir]'
+ ' [--port=port_number]' % sys.argv[0])
+ else:
+ if (options.root is None) and (options.port is not None):
+ # specifying root but not port means we want httpd on default
+ # set of ports that LayoutTest use, but pointing to a different
+ # source of tests. Specifying port but no root does not seem
+ # meaningful.
+ raise 'Specifying port requires also a root.'
+ httpd = Lighttpd(tempfile.gettempdir(),
+ port=options.port,
+ root=options.root,
+ register_cygwin=options.register_cygwin,
+ run_background=options.run_background)
+ if 'start' == options.server:
+ httpd.start()
+ else:
+ httpd.stop(force=True)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server_base.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server_base.py
new file mode 100644
index 0000000..2720486
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/http_server_base.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Base class with common routines between the Apache and Lighttpd servers."""
+
+import logging
+import time
+import urllib
+
+
+class HttpServerBase(object):
+
+ def wait_for_action(self, action):
+ """Repeat the action for 20 seconds or until it succeeds. Returns
+ whether it succeeded."""
+ start_time = time.time()
+ while time.time() - start_time < 20:
+ if action():
+ return True
+ time.sleep(1)
+
+ return False
+
+ def is_server_running_on_all_ports(self):
+ """Returns whether the server is running on all the desired ports."""
+ for mapping in self.mappings:
+ if 'sslcert' in mapping:
+ http_suffix = 's'
+ else:
+ http_suffix = ''
+
+ url = 'http%s://127.0.0.1:%d/' % (http_suffix, mapping['port'])
+
+ try:
+ response = urllib.urlopen(url)
+ logging.debug("Server running at %s" % url)
+ except IOError:
+ logging.debug("Server NOT running at %s" % url)
+ return False
+
+ return True
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/httpd2.pem b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/httpd2.pem
new file mode 100644
index 0000000..6349b78
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/httpd2.pem
@@ -0,0 +1,41 @@
+-----BEGIN CERTIFICATE-----
+MIIEZDCCAkygAwIBAgIBATANBgkqhkiG9w0BAQUFADBgMRAwDgYDVQQDEwdUZXN0
+IENBMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMN
+TW91bnRhaW4gVmlldzESMBAGA1UEChMJQ2VydCBUZXN0MB4XDTA4MDcyODIyMzIy
+OFoXDTEzMDcyNzIyMzIyOFowSjELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlm
+b3JuaWExEjAQBgNVBAoTCUNlcnQgVGVzdDESMBAGA1UEAxMJMTI3LjAuMC4xMIGf
+MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU3
+3BdRCd67DFM44GRrsjDSH4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYG
+qgNiV2ywxTlMj7NlN2C7SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a
+3JPJe1TaIab5GwIDAQABo4HCMIG/MAkGA1UdEwQCMAAwHQYDVR0OBBYEFCYLBv5K
+x5sLNVlpLh5FwTwhdDl7MIGSBgNVHSMEgYowgYeAFF3Of5nj1BlBMU/Gz7El9Vqv
+45cxoWSkYjBgMRAwDgYDVQQDEwdUZXN0IENBMQswCQYDVQQGEwJVUzETMBEGA1UE
+CBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNTW91bnRhaW4gVmlldzESMBAGA1UEChMJ
+Q2VydCBUZXN0ggkA1FGT1D/e2U4wDQYJKoZIhvcNAQEFBQADggIBAEtkVmLObUgk
+b2cIA2S+QDtifq1UgVfBbytvR2lFmnADOR55mo0gHQG3HHqq4g034LmoVXDHhUk8
+Gb6aFiv4QubmVhLXcUelTRXwiNvGzkW7pC6Jrq105hdPjzXMKTcmiLaopm5Fqfc7
+hj5Cn1Sjspc8pdeQjrbeMdvca7KlFrGP8YkwCU2xOOX9PiN9G0966BWfjnr/fZZp
++OQVuUFHdiAZwthEMuDpAAXHqYXIsermgdOpgJaA53cf8NqBV2QGhtFgtsJCRoiu
+7DKqhyRWBGyz19VIH2b7y+6qvQVxuHk19kKRM0nftw/yNcJnm7gtttespMUPsOMa
+a2SD1G0hm0TND6vxaBhgR3cVqpl/qIpAdFi00Tm7hTyYE7I43zPW03t+/DpCt3Um
+EMRZsQ90co5q+bcx/vQ7YAtwUh30uMb0wpibeyCwDp8cqNmSiRkEuc/FjTYes5t8
+5gR//WX1l0+qjrjusO9NmoLnq2Yk6UcioX+z+q6Z/dudGfqhLfeWD2Q0LWYA242C
+d7km5Y3KAt1PJdVsof/aiVhVdddY/OIEKTRQhWEdDbosy2eh16BCKXT2FFvhNDg1
+AYFvn6I8nj9IldMJiIc3DdhacEAEzRMeRgPdzAa1griKUGknxsyTyRii8ru0WS6w
+DCNrlDOVXdzYGEZooBI76BDVY0W0akjV
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDQj2tPWPUgbuI4H3/3dnttqVbndwU33BdRCd67DFM44GRrsjDS
+H4bY/EbFyX9D52d/iy6ZaAmDePcCz5k/fgP3DMujykYGqgNiV2ywxTlMj7NlN2C7
+SRt68fQMZr5iI7rypdxuaZt9lSMD3ENBffYtuLTyZd9a3JPJe1TaIab5GwIDAQAB
+AoGANHXu8z2YIzlhE+bwhGm8MGBpKL3qhRuKjeriqMA36tWezOw8lY4ymEAU+Ulv
+BsCdaxqydQoTYou57m4TyUHEcxq9pq3H0zB0qL709DdHi/t4zbV9XIoAzC5v0/hG
+9+Ca29TwC02FCw+qLkNrtwCpwOcQmc+bPxqvFu1iMiahURECQQD2I/Hi2413CMZz
+TBjl8fMiVO9GhA2J0sc8Qi+YcgJakaLD9xcbaiLkTzPZDlA389C1b6Ia+poAr4YA
+Ve0FFbxpAkEA2OobayyHE/QtPEqoy6NLR57jirmVBNmSWWd4lAyL5UIHIYVttJZg
+8CLvbzaU/iDGwR+wKsM664rKPHEmtlyo4wJBAMeSqYO5ZOCJGu9NWjrHjM3fdAsG
+8zs2zhiLya+fcU0iHIksBW5TBmt71Jw/wMc9R5J1K0kYvFml98653O5si1ECQBCk
+RV4/mE1rmlzZzYFyEcB47DQkcM5ictvxGEsje0gnfKyRtAz6zI0f4QbDRUMJ+LWw
+XK+rMsYHa+SfOb0b9skCQQCLdeonsIpFDv/Uv+flHISy0WA+AFkLXrRkBKh6G/OD
+dMHaNevkJgUnpceVEnkrdenp5CcEoFTI17pd+nBgDm/B
+-----END RSA PRIVATE KEY-----
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
new file mode 100644
index 0000000..b7b26e9
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+
+from layout_package import json_results_generator
+from layout_package import path_utils
+from layout_package import test_expectations
+from layout_package import test_failures
+
+
+class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator):
+ """A JSON results generator for layout tests."""
+
+ LAYOUT_TESTS_PATH = "LayoutTests"
+
+ # Additional JSON fields.
+ WONTFIX = "wontfixCounts"
+ DEFERRED = "deferredCounts"
+
+ def __init__(self, builder_name, build_name, build_number,
+ results_file_base_path, builder_base_url,
+ test_timings, expectations, result_summary, all_tests):
+ """Modifies the results.json file. Grabs it off the archive directory
+ if it is not found locally.
+
+ Args:
+ result_summary: ResultsSummary object storing the summary of the test
+ results.
+ (see the comment of JSONResultsGenerator.__init__ for other Args)
+ """
+
+ self._builder_name = builder_name
+ self._build_name = build_name
+ self._build_number = build_number
+ self._builder_base_url = builder_base_url
+ self._results_file_path = os.path.join(results_file_base_path,
+ self.RESULTS_FILENAME)
+ self._expectations = expectations
+
+ # We don't use self._skipped_tests and self._passed_tests as we
+ # override _InsertFailureSummaries.
+
+ # We want relative paths to LayoutTest root for JSON output.
+ path_to_name = self._get_path_relative_to_layout_test_root
+ self._result_summary = result_summary
+ self._failures = dict(
+ (path_to_name(test), test_failures.determine_result_type(failures))
+ for (test, failures) in result_summary.failures.iteritems())
+ self._all_tests = [path_to_name(test) for test in all_tests]
+ self._test_timings = dict(
+ (path_to_name(test_tuple.filename), test_tuple.test_run_time)
+ for test_tuple in test_timings)
+
+ self._generate_json_output()
+
+ def _get_path_relative_to_layout_test_root(self, test):
+ """Returns the path of the test relative to the layout test root.
+ For example, for:
+ src/third_party/WebKit/LayoutTests/fast/forms/foo.html
+ We would return
+ fast/forms/foo.html
+ """
+ index = test.find(self.LAYOUT_TESTS_PATH)
+ if index is not -1:
+ index += len(self.LAYOUT_TESTS_PATH)
+
+ if index is -1:
+ # Already a relative path.
+ relativePath = test
+ else:
+ relativePath = test[index + 1:]
+
+ # Make sure all paths are unix-style.
+ return relativePath.replace('\\', '/')
+
+ # override
+ def _convert_json_to_current_version(self, results_json):
+ archive_version = None
+ if self.VERSION_KEY in results_json:
+ archive_version = results_json[self.VERSION_KEY]
+
+ super(JSONLayoutResultsGenerator,
+ self)._convert_json_to_current_version(results_json)
+
+ # version 2->3
+ if archive_version == 2:
+ for results_for_builder in results_json.itervalues():
+ try:
+ test_results = results_for_builder[self.TESTS]
+ except:
+ continue
+
+ for test in test_results:
+ # Make sure all paths are relative
+ test_path = self._get_path_relative_to_layout_test_root(test)
+ if test_path != test:
+ test_results[test_path] = test_results[test]
+ del test_results[test]
+
+ # override
+ def _insert_failure_summaries(self, results_for_builder):
+ summary = self._result_summary
+
+ self._insert_item_into_raw_list(results_for_builder,
+ len((set(summary.failures.keys()) |
+ summary.tests_by_expectation[test_expectations.SKIP]) &
+ summary.tests_by_timeline[test_expectations.NOW]),
+ self.FIXABLE_COUNT)
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_failure_summary_entry(test_expectations.NOW),
+ self.FIXABLE)
+ self._insert_item_into_raw_list(results_for_builder,
+ len(self._expectations.get_tests_with_timeline(
+ test_expectations.NOW)), self.ALL_FIXABLE_COUNT)
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_failure_summary_entry(test_expectations.DEFER),
+ self.DEFERRED)
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_failure_summary_entry(test_expectations.WONTFIX),
+ self.WONTFIX)
+
+ # override
+ def _normalize_results_json(self, test, test_name, tests):
+ super(JSONLayoutResultsGenerator, self)._normalize_results_json(
+ test, test_name, tests)
+
+ # Remove tests that don't exist anymore.
+ full_path = os.path.join(path_utils.layout_tests_dir(), test_name)
+ full_path = os.path.normpath(full_path)
+ if not os.path.exists(full_path):
+ del tests[test_name]
+
+ def _get_failure_summary_entry(self, timeline):
+ """Creates a summary object to insert into the JSON.
+
+ Args:
+ summary ResultSummary object with test results
+ timeline current test_expectations timeline to build entry for
+ (e.g., test_expectations.NOW, etc.)
+ """
+ entry = {}
+ summary = self._result_summary
+ timeline_tests = summary.tests_by_timeline[timeline]
+ entry[self.SKIP_RESULT] = len(
+ summary.tests_by_expectation[test_expectations.SKIP] &
+ timeline_tests)
+ entry[self.PASS_RESULT] = len(
+ summary.tests_by_expectation[test_expectations.PASS] &
+ timeline_tests)
+ for failure_type in summary.tests_by_expectation.keys():
+ if failure_type not in self.FAILURE_TO_CHAR:
+ continue
+ count = len(summary.tests_by_expectation[failure_type] &
+ timeline_tests)
+ entry[self.FAILURE_TO_CHAR[failure_type]] = count
+ return entry
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
new file mode 100644
index 0000000..596e1e4
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -0,0 +1,418 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import subprocess
+import sys
+import time
+import urllib2
+import xml.dom.minidom
+
+from layout_package import path_utils
+from layout_package import test_expectations
+
+sys.path.append(path_utils.path_from_base('third_party', 'WebKit',
+ 'WebKitTools'))
+import simplejson
+
+
+class JSONResultsGenerator(object):
+
+ MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
+ # Min time (seconds) that will be added to the JSON.
+ MIN_TIME = 1
+ JSON_PREFIX = "ADD_RESULTS("
+ JSON_SUFFIX = ");"
+ PASS_RESULT = "P"
+ SKIP_RESULT = "X"
+ NO_DATA_RESULT = "N"
+ VERSION = 3
+ VERSION_KEY = "version"
+ RESULTS = "results"
+ TIMES = "times"
+ BUILD_NUMBERS = "buildNumbers"
+ WEBKIT_SVN = "webkitRevision"
+ CHROME_SVN = "chromeRevision"
+ TIME = "secondsSinceEpoch"
+ TESTS = "tests"
+
+ FIXABLE_COUNT = "fixableCount"
+ FIXABLE = "fixableCounts"
+ ALL_FIXABLE_COUNT = "allFixableCount"
+
+ # Note that we omit test_expectations.FAIL from this list because
+ # it should never show up (it's a legacy input expectation, never
+ # an output expectation).
+ FAILURE_TO_CHAR = {test_expectations.CRASH: "C",
+ test_expectations.TIMEOUT: "T",
+ test_expectations.IMAGE: "I",
+ test_expectations.TEXT: "F",
+ test_expectations.MISSING: "O",
+ test_expectations.IMAGE_PLUS_TEXT: "Z"}
+ FAILURE_CHARS = FAILURE_TO_CHAR.values()
+
+ RESULTS_FILENAME = "results.json"
+
+ def __init__(self, builder_name, build_name, build_number,
+ results_file_base_path, builder_base_url,
+ test_timings, failures, passed_tests, skipped_tests, all_tests):
+ """Modifies the results.json file. Grabs it off the archive directory
+ if it is not found locally.
+
+ Args
+ builder_name: the builder name (e.g. Webkit).
+ build_name: the build name (e.g. webkit-rel).
+ build_number: the build number.
+ results_file_base_path: Absolute path to the directory containing the
+ results json file.
+ builder_base_url: the URL where we have the archived test results.
+ test_timings: Map of test name to a test_run-time.
+ failures: Map of test name to a failure type (of test_expectations).
+ passed_tests: A set containing all the passed tests.
+ skipped_tests: A set containing all the skipped tests.
+ all_tests: List of all the tests that were run. This should not
+ include skipped tests.
+ """
+ self._builder_name = builder_name
+ self._build_name = build_name
+ self._build_number = build_number
+ self._builder_base_url = builder_base_url
+ self._results_file_path = os.path.join(results_file_base_path,
+ self.RESULTS_FILENAME)
+ self._test_timings = test_timings
+ self._failures = failures
+ self._passed_tests = passed_tests
+ self._skipped_tests = skipped_tests
+ self._all_tests = all_tests
+
+ self._generate_json_output()
+
+ def _generate_json_output(self):
+ """Generates the JSON output file."""
+ json = self._get_json()
+ if json:
+ results_file = open(self._results_file_path, "w")
+ results_file.write(json)
+ results_file.close()
+
+ def _get_svn_revision(self, in_directory=None):
+ """Returns the svn revision for the given directory.
+
+ Args:
+ in_directory: The directory where svn is to be run.
+ """
+ output = subprocess.Popen(["svn", "info", "--xml"],
+ cwd=in_directory,
+ shell=(sys.platform == 'win32'),
+ stdout=subprocess.PIPE).communicate()[0]
+ try:
+ dom = xml.dom.minidom.parseString(output)
+ return dom.getElementsByTagName('entry')[0].getAttribute(
+ 'revision')
+ except xml.parsers.expat.ExpatError:
+ return ""
+
+ def _get_archived_json_results(self):
+ """Reads old results JSON file if it exists.
+ Returns (archived_results, error) tuple where error is None if results
+ were successfully read.
+ """
+ results_json = {}
+ old_results = None
+ error = None
+
+ if os.path.exists(self._results_file_path):
+ old_results_file = open(self._results_file_path, "r")
+ old_results = old_results_file.read()
+ elif self._builder_base_url:
+ # Check if we have the archived JSON file on the buildbot server.
+ results_file_url = (self._builder_base_url +
+ self._build_name + "/" + self.RESULTS_FILENAME)
+ logging.error("Local results.json file does not exist. Grabbing "
+ "it off the archive at " + results_file_url)
+
+ try:
+ results_file = urllib2.urlopen(results_file_url)
+ info = results_file.info()
+ old_results = results_file.read()
+ except urllib2.HTTPError, http_error:
+ # A non-4xx status code means the bot is hosed for some reason
+ # and we can't grab the results.json file off of it.
+ if (http_error.code < 400 and http_error.code >= 500):
+ error = http_error
+ except urllib2.URLError, url_error:
+ error = url_error
+
+ if old_results:
+ # Strip the prefix and suffix so we can get the actual JSON object.
+ old_results = old_results[len(self.JSON_PREFIX):
+ len(old_results) - len(self.JSON_SUFFIX)]
+
+ try:
+ results_json = simplejson.loads(old_results)
+ except:
+ logging.debug("results.json was not valid JSON. Clobbering.")
+ # The JSON file is not valid JSON. Just clobber the results.
+ results_json = {}
+ else:
+ logging.debug('Old JSON results do not exist. Starting fresh.')
+ results_json = {}
+
+ return results_json, error
+
+ def _get_json(self):
+ """Gets the results for the results.json file."""
+ results_json, error = self._get_archived_json_results()
+ if error:
+ # If there was an error don't write a results.json
+ # file at all as it would lose all the information on the bot.
+ logging.error("Archive directory is inaccessible. Not modifying "
+ "or clobbering the results.json file: " + str(error))
+ return None
+
+ builder_name = self._builder_name
+ if results_json and builder_name not in results_json:
+ logging.debug("Builder name (%s) is not in the results.json file."
+ % builder_name)
+
+ self._convert_json_to_current_version(results_json)
+
+ if builder_name not in results_json:
+ results_json[builder_name] = (
+ self._create_results_for_builder_json())
+
+ results_for_builder = results_json[builder_name]
+
+ self._insert_generic_metadata(results_for_builder)
+
+ self._insert_failure_summaries(results_for_builder)
+
+ # Update the all failing tests with result type and time.
+ tests = results_for_builder[self.TESTS]
+ all_failing_tests = set(self._failures.iterkeys())
+ all_failing_tests.update(tests.iterkeys())
+ for test in all_failing_tests:
+ self._insert_test_time_and_result(test, tests)
+
+ # Specify separators in order to get compact encoding.
+ results_str = simplejson.dumps(results_json, separators=(',', ':'))
+ return self.JSON_PREFIX + results_str + self.JSON_SUFFIX
+
+ def _insert_failure_summaries(self, results_for_builder):
+ """Inserts aggregate pass/failure statistics into the JSON.
+ This method reads self._skipped_tests, self._passed_tests and
+ self._failures and inserts FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT
+ entries.
+
+ Args:
+ results_for_builder: Dictionary containing the test results for a
+ single builder.
+ """
+ # Insert the number of tests that failed.
+ self._insert_item_into_raw_list(results_for_builder,
+ len(set(self._failures.keys()) | self._skipped_tests),
+ self.FIXABLE_COUNT)
+
+ # Create a pass/skip/failure summary dictionary.
+ entry = {}
+ entry[self.SKIP_RESULT] = len(self._skipped_tests)
+ entry[self.PASS_RESULT] = len(self._passed_tests)
+ get = entry.get
+ for failure_type in self._failures.values():
+ failure_char = self.FAILURE_TO_CHAR[failure_type]
+ entry[failure_char] = get(failure_char, 0) + 1
+
+ # Insert the pass/skip/failure summary dictionary.
+ self._insert_item_into_raw_list(results_for_builder, entry,
+ self.FIXABLE)
+
+ # Insert the number of all the tests that are supposed to pass.
+ self._insert_item_into_raw_list(results_for_builder,
+ len(self._skipped_tests | self._all_tests),
+ self.ALL_FIXABLE_COUNT)
+
+ def _insert_item_into_raw_list(self, results_for_builder, item, key):
+ """Inserts the item into the list with the given key in the results for
+ this builder. Creates the list if no such list exists.
+
+ Args:
+ results_for_builder: Dictionary containing the test results for a
+ single builder.
+ item: Number or string to insert into the list.
+ key: Key in results_for_builder for the list to insert into.
+ """
+ if key in results_for_builder:
+ raw_list = results_for_builder[key]
+ else:
+ raw_list = []
+
+ raw_list.insert(0, item)
+ raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
+ results_for_builder[key] = raw_list
+
+ def _insert_item_run_length_encoded(self, item, encoded_results):
+ """Inserts the item into the run-length encoded results.
+
+ Args:
+ item: String or number to insert.
+ encoded_results: run-length encoded results. An array of arrays, e.g.
+ [[3,'A'],[1,'Q']] encodes AAAQ.
+ """
+ if len(encoded_results) and item == encoded_results[0][1]:
+ num_results = encoded_results[0][0]
+ if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+ encoded_results[0][0] = num_results + 1
+ else:
+ # Use a list instead of a class for the run-length encoding since
+ # we want the serialized form to be concise.
+ encoded_results.insert(0, [1, item])
+
+ def _insert_generic_metadata(self, results_for_builder):
+ """ Inserts generic metadata (such as version number, current time etc)
+ into the JSON.
+
+ Args:
+ results_for_builder: Dictionary containing the test results for
+ a single builder.
+ """
+ self._insert_item_into_raw_list(results_for_builder,
+ self._build_number, self.BUILD_NUMBERS)
+
+ path_to_webkit = path_utils.path_from_base('third_party', 'WebKit',
+ 'WebCore')
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_svn_revision(path_to_webkit),
+ self.WEBKIT_SVN)
+
+ path_to_chrome_base = path_utils.path_from_base()
+ self._insert_item_into_raw_list(results_for_builder,
+ self._get_svn_revision(path_to_chrome_base),
+ self.CHROME_SVN)
+
+ self._insert_item_into_raw_list(results_for_builder,
+ int(time.time()),
+ self.TIME)
+
+ def _insert_test_time_and_result(self, test_name, tests):
+ """ Insert a test item with its results to the given tests dictionary.
+
+ Args:
+ tests: Dictionary containing test result entries.
+ """
+
+ result = JSONResultsGenerator.PASS_RESULT
+ time = 0
+
+ if test_name not in self._all_tests:
+ result = JSONResultsGenerator.NO_DATA_RESULT
+
+ if test_name in self._failures:
+ result = self.FAILURE_TO_CHAR[self._failures[test_name]]
+
+ if test_name in self._test_timings:
+ # Floor for now to get time in seconds.
+ time = int(self._test_timings[test_name])
+
+ if test_name not in tests:
+ tests[test_name] = self._create_results_and_times_json()
+
+ thisTest = tests[test_name]
+ self._insert_item_run_length_encoded(result, thisTest[self.RESULTS])
+ self._insert_item_run_length_encoded(time, thisTest[self.TIMES])
+ self._normalize_results_json(thisTest, test_name, tests)
+
+ def _convert_json_to_current_version(self, results_json):
+ """If the JSON does not match the current version, converts it to the
+ current version and adds in the new version number.
+ """
+ if (self.VERSION_KEY in results_json and
+ results_json[self.VERSION_KEY] == self.VERSION):
+ return
+
+ results_json[self.VERSION_KEY] = self.VERSION
+
+ def _create_results_and_times_json(self):
+ results_and_times = {}
+ results_and_times[self.RESULTS] = []
+ results_and_times[self.TIMES] = []
+ return results_and_times
+
+ def _create_results_for_builder_json(self):
+ results_for_builder = {}
+ results_for_builder[self.TESTS] = {}
+ return results_for_builder
+
+ def _remove_items_over_max_number_of_builds(self, encoded_list):
+ """Removes items from the run-length encoded list after the final
+ item that exceeds the max number of builds to track.
+
+ Args:
+ encoded_results: run-length encoded results. An array of arrays, e.g.
+ [[3,'A'],[1,'Q']] encodes AAAQ.
+ """
+ num_builds = 0
+ index = 0
+ for result in encoded_list:
+ num_builds = num_builds + result[0]
+ index = index + 1
+ if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
+ return encoded_list[:index]
+ return encoded_list
+
+ def _normalize_results_json(self, test, test_name, tests):
+ """ Prune tests where all runs pass or tests that no longer exist and
+ truncate all results to maxNumberOfBuilds.
+
+ Args:
+ test: ResultsAndTimes object for this test.
+ test_name: Name of the test.
+ tests: The JSON object with all the test results for this builder.
+ """
+ test[self.RESULTS] = self._remove_items_over_max_number_of_builds(
+ test[self.RESULTS])
+ test[self.TIMES] = self._remove_items_over_max_number_of_builds(
+ test[self.TIMES])
+
+ is_all_pass = self._is_results_all_of_type(test[self.RESULTS],
+ self.PASS_RESULT)
+ is_all_no_data = self._is_results_all_of_type(test[self.RESULTS],
+ self.NO_DATA_RESULT)
+ max_time = max([time[1] for time in test[self.TIMES]])
+
+ # Remove all passes/no-data from the results to reduce noise and
+ # filesize. If a test passes every run, but takes > MIN_TIME to run,
+ # don't throw away the data.
+ if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
+ del tests[test_name]
+
+ def _is_results_all_of_type(self, results, type):
+ """Returns whether all the results are of the given type
+ (e.g. all passes)."""
+ return len(results) == 1 and results[0][1] == type
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/lighttpd.conf b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/lighttpd.conf
new file mode 100644
index 0000000..d3150dd
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/lighttpd.conf
@@ -0,0 +1,89 @@
+server.tag = "LightTPD/1.4.19 (Win32)"
+server.modules = ( "mod_accesslog",
+ "mod_alias",
+ "mod_cgi",
+ "mod_rewrite" )
+
+# default document root required
+server.document-root = "."
+
+# files to check for if .../ is requested
+index-file.names = ( "index.php", "index.pl", "index.cgi",
+ "index.html", "index.htm", "default.htm" )
+# mimetype mapping
+mimetype.assign = (
+ ".gif" => "image/gif",
+ ".jpg" => "image/jpeg",
+ ".jpeg" => "image/jpeg",
+ ".png" => "image/png",
+ ".svg" => "image/svg+xml",
+ ".css" => "text/css",
+ ".html" => "text/html",
+ ".htm" => "text/html",
+ ".xhtml" => "application/xhtml+xml",
+ ".js" => "text/javascript",
+ ".log" => "text/plain",
+ ".conf" => "text/plain",
+ ".text" => "text/plain",
+ ".txt" => "text/plain",
+ ".dtd" => "text/xml",
+ ".xml" => "text/xml",
+ ".manifest" => "text/cache-manifest",
+ )
+
+# Use the "Content-Type" extended attribute to obtain mime type if possible
+mimetype.use-xattr = "enable"
+
+##
+# which extensions should not be handle via static-file transfer
+#
+# .php, .pl, .fcgi are most often handled by mod_fastcgi or mod_cgi
+static-file.exclude-extensions = ( ".php", ".pl", ".cgi" )
+
+server.bind = "localhost"
+server.port = 8001
+
+## virtual directory listings
+dir-listing.activate = "enable"
+#dir-listing.encoding = "iso-8859-2"
+#dir-listing.external-css = "style/oldstyle.css"
+
+## enable debugging
+#debug.log-request-header = "enable"
+#debug.log-response-header = "enable"
+#debug.log-request-handling = "enable"
+#debug.log-file-not-found = "enable"
+
+#### SSL engine
+#ssl.engine = "enable"
+#ssl.pemfile = "server.pem"
+
+# Rewrite rule for utf-8 path test (LayoutTests/http/tests/uri/utf8-path.html)
+# See the apache rewrite rule at LayoutTests/http/tests/uri/intercept/.htaccess
+# Rewrite rule for LayoutTests/http/tests/appcache/cyrillic-uri.html.
+# See the apache rewrite rule at
+# LayoutTests/http/tests/appcache/resources/intercept/.htaccess
+url.rewrite-once = (
+ "^/uri/intercept/(.*)" => "/uri/resources/print-uri.php",
+ "^/appcache/resources/intercept/(.*)" => "/appcache/resources/print-uri.php"
+)
+
+# LayoutTests/http/tests/xmlhttprequest/response-encoding.html uses an htaccess
+# to override charset for reply2.txt, reply2.xml, and reply4.txt.
+$HTTP["url"] =~ "^/xmlhttprequest/resources/reply2.(txt|xml)" {
+ mimetype.assign = (
+ ".txt" => "text/plain; charset=windows-1251",
+ ".xml" => "text/xml; charset=windows-1251"
+ )
+}
+$HTTP["url"] =~ "^/xmlhttprequest/resources/reply4.txt" {
+ mimetype.assign = ( ".txt" => "text/plain; charset=koi8-r" )
+}
+
+# LayoutTests/http/tests/appcache/wrong-content-type.html uses an htaccess
+# to override mime type for wrong-content-type.manifest.
+$HTTP["url"] =~ "^/appcache/resources/wrong-content-type.manifest" {
+ mimetype.assign = ( ".manifest" => "text/plain" )
+}
+
+# Autogenerated test-specific config follows.
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
new file mode 100644
index 0000000..6c094e3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/metered_stream.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Package that implements a stream wrapper that has 'meters' as well as
+regular output. A 'meter' is a single line of text that can be erased
+and rewritten repeatedly, without producing multiple lines of output. It
+can be used to produce effects like progress bars.
+"""
+
+
+class MeteredStream:
+ """This class is a wrapper around a stream that allows you to implement
+ meters.
+
+ It can be used like a stream, but calling update() will print
+ the string followed by only a carriage return (instead of a carriage
+ return and a line feed). This can be used to implement progress bars and
+ other sorts of meters. Note that anything written by update() will be
+ erased by a subsequent update(), write(), or flush()."""
+
+ def __init__(self, verbose, stream):
+ """
+ Args:
+ verbose: whether update is a no-op
+ stream: output stream to write to
+ """
+ self._dirty = False
+ self._verbose = verbose
+ self._stream = stream
+ self._last_update = ""
+
+ def write(self, txt):
+ """Write text directly to the stream, overwriting and resetting the
+ meter."""
+ if self._dirty:
+ self.update("")
+ self._dirty = False
+ self._stream.write(txt)
+
+ def flush(self):
+ """Flush any buffered output."""
+ self._stream.flush()
+
+ def update(self, str):
+ """Write an update to the stream that will get overwritten by the next
+ update() or by a write().
+
+ This is used for progress updates that don't need to be preserved in
+ the log. Note that verbose disables this routine; we have this in
+ case we are logging lots of output and the update()s will get lost
+ or won't work properly (typically because verbose streams are
+ redirected to files.
+
+ TODO(dpranke): figure out if there is a way to detect if we're writing
+ to a stream that handles CRs correctly (e.g., terminals). That might
+ be a cleaner way of handling this.
+ """
+ if self._verbose:
+ return
+
+ # Print the necessary number of backspaces to erase the previous
+ # message.
+ self._stream.write("\b" * len(self._last_update))
+ self._stream.write(str)
+ num_remaining = len(self._last_update) - len(str)
+ if num_remaining > 0:
+ self._stream.write(" " * num_remaining + "\b" * num_remaining)
+ self._last_update = str
+ self._dirty = True
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/path_utils.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/path_utils.py
new file mode 100644
index 0000000..26d062b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/path_utils.py
@@ -0,0 +1,395 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This package contains utility methods for manipulating paths and
+filenames for test results and baselines. It also contains wrappers
+of a few routines in platform_utils.py so that platform_utils.py can
+be considered a 'protected' package - i.e., this file should be
+the only file that ever includes platform_utils. This leads to
+us including a few things that don't really have anything to do
+ with paths, unfortunately."""
+
+import errno
+import os
+import stat
+import sys
+
+import platform_utils
+import platform_utils_win
+import platform_utils_mac
+import platform_utils_linux
+
+# Cache some values so we don't have to recalculate them. _basedir is
+# used by PathFromBase() and caches the full (native) path to the top
+# of the source tree (/src). _baseline_search_path is used by
+# ExpectedBaselines() and caches the list of native paths to search
+# for baseline results.
+_basedir = None
+_baseline_search_path = None
+
+
+class PathNotFound(Exception):
+ pass
+
+
+def layout_tests_dir():
+ """Returns the fully-qualified path to the directory containing the input
+ data for the specified layout test."""
+ return path_from_base('third_party', 'WebKit', 'LayoutTests')
+
+
+def chromium_baseline_path(platform=None):
+ """Returns the full path to the directory containing expected
+ baseline results from chromium ports. If |platform| is None, the
+ currently executing platform is used.
+
+ Note: although directly referencing individual platform_utils_* files is
+ usually discouraged, we allow it here so that the rebaselining tool can
+ pull baselines for platforms other than the host platform."""
+
+ # Normalize the platform string.
+ platform = platform_name(platform)
+ if platform.startswith('chromium-mac'):
+ return platform_utils_mac.baseline_path(platform)
+ elif platform.startswith('chromium-win'):
+ return platform_utils_win.baseline_path(platform)
+ elif platform.startswith('chromium-linux'):
+ return platform_utils_linux.baseline_path(platform)
+
+ return platform_utils.baseline_path()
+
+
+def webkit_baseline_path(platform):
+ """Returns the full path to the directory containing expected
+ baseline results from WebKit ports."""
+ return path_from_base('third_party', 'WebKit', 'LayoutTests',
+ 'platform', platform)
+
+
+def baseline_search_path(platform=None):
+ """Returns the list of directories to search for baselines/results for a
+ given platform, in order of preference. Paths are relative to the top of
+ the source tree. If parameter platform is None, returns the list for the
+ current platform that the script is running on.
+
+ Note: although directly referencing individual platform_utils_* files is
+ usually discouraged, we allow it here so that the rebaselining tool can
+ pull baselines for platforms other than the host platform."""
+
+ # Normalize the platform name.
+ platform = platform_name(platform)
+ if platform.startswith('chromium-mac'):
+ return platform_utils_mac.baseline_search_path(platform)
+ elif platform.startswith('chromium-win'):
+ return platform_utils_win.baseline_search_path(platform)
+ elif platform.startswith('chromium-linux'):
+ return platform_utils_linux.baseline_search_path(platform)
+ return platform_utils.baseline_search_path()
+
+
+def expected_baselines(filename, suffix, platform=None, all_baselines=False):
+ """Given a test name, finds where the baseline results are located.
+
+ Args:
+ filename: absolute filename to test file
+ suffix: file suffix of the expected results, including dot; e.g. '.txt'
+ or '.png'. This should not be None, but may be an empty string.
+ platform: layout test platform: 'win', 'linux' or 'mac'. Defaults to
+ the current platform.
+ all_baselines: If True, return an ordered list of all baseline paths
+ for the given platform. If False, return only the first
+ one.
+ Returns
+ a list of ( platform_dir, results_filename ), where
+ platform_dir - abs path to the top of the results tree (or test tree)
+ results_filename - relative path from top of tree to the results file
+ (os.path.join of the two gives you the full path to the file,
+ unless None was returned.)
+ Return values will be in the format appropriate for the current platform
+ (e.g., "\\" for path separators on Windows). If the results file is not
+ found, then None will be returned for the directory, but the expected
+ relative pathname will still be returned.
+ """
+ global _baseline_search_path
+ global _search_path_platform
+ testname = os.path.splitext(relative_test_filename(filename))[0]
+
+ baseline_filename = testname + '-expected' + suffix
+
+ if (_baseline_search_path is None) or (_search_path_platform != platform):
+ _baseline_search_path = baseline_search_path(platform)
+ _search_path_platform = platform
+
+ baselines = []
+ for platform_dir in _baseline_search_path:
+ if os.path.exists(os.path.join(platform_dir, baseline_filename)):
+ baselines.append((platform_dir, baseline_filename))
+
+ if not all_baselines and baselines:
+ return baselines
+
+ # If it wasn't found in a platform directory, return the expected result
+ # in the test directory, even if no such file actually exists.
+ platform_dir = layout_tests_dir()
+ if os.path.exists(os.path.join(platform_dir, baseline_filename)):
+ baselines.append((platform_dir, baseline_filename))
+
+ if baselines:
+ return baselines
+
+ return [(None, baseline_filename)]
+
+
+def expected_filename(filename, suffix):
+ """Given a test name, returns an absolute path to its expected results.
+
+ If no expected results are found in any of the searched directories, the
+ directory in which the test itself is located will be returned. The return
+ value is in the format appropriate for the platform (e.g., "\\" for
+ path separators on windows).
+
+ Args:
+ filename: absolute filename to test file
+ suffix: file suffix of the expected results, including dot; e.g. '.txt'
+ or '.png'. This should not be None, but may be an empty string.
+ platform: the most-specific directory name to use to build the
+ search list of directories, e.g., 'chromium-win', or
+ 'chromium-mac-leopard' (we follow the WebKit format)
+ """
+ platform_dir, baseline_filename = expected_baselines(filename, suffix)[0]
+ if platform_dir:
+ return os.path.join(platform_dir, baseline_filename)
+ return os.path.join(layout_tests_dir(), baseline_filename)
+
+
+def relative_test_filename(filename):
+ """Provide the filename of the test relative to the layout tests
+ directory as a unix style path (a/b/c)."""
+ return _win_path_to_unix(filename[len(layout_tests_dir()) + 1:])
+
+
+def _win_path_to_unix(path):
+ """Convert a windows path to use unix-style path separators (a/b/c)."""
+ return path.replace('\\', '/')
+
+#
+# Routines that are arguably platform-specific but have been made
+# generic for now (they used to be in platform_utils_*)
+#
+
+
+def filename_to_uri(full_path):
+ """Convert a test file to a URI."""
+ LAYOUTTEST_HTTP_DIR = "http/tests/"
+ LAYOUTTEST_WEBSOCKET_DIR = "websocket/tests/"
+
+ relative_path = _win_path_to_unix(relative_test_filename(full_path))
+ port = None
+ use_ssl = False
+
+ if relative_path.startswith(LAYOUTTEST_HTTP_DIR):
+ # http/tests/ run off port 8000 and ssl/ off 8443
+ relative_path = relative_path[len(LAYOUTTEST_HTTP_DIR):]
+ port = 8000
+ elif relative_path.startswith(LAYOUTTEST_WEBSOCKET_DIR):
+ # websocket/tests/ run off port 8880 and 9323
+ # Note: the root is /, not websocket/tests/
+ port = 8880
+
+ # Make http/tests/local run as local files. This is to mimic the
+ # logic in run-webkit-tests.
+ # TODO(jianli): Consider extending this to "media/".
+ if port and not relative_path.startswith("local/"):
+ if relative_path.startswith("ssl/"):
+ port += 443
+ protocol = "https"
+ else:
+ protocol = "http"
+ return "%s://127.0.0.1:%u/%s" % (protocol, port, relative_path)
+
+ if sys.platform in ('cygwin', 'win32'):
+ return "file:///" + get_absolute_path(full_path)
+ return "file://" + get_absolute_path(full_path)
+
+
+def get_absolute_path(path):
+ """Returns an absolute UNIX path."""
+ return _win_path_to_unix(os.path.abspath(path))
+
+
+def maybe_make_directory(*path):
+ """Creates the specified directory if it doesn't already exist."""
+ try:
+ os.makedirs(os.path.join(*path))
+ except OSError, e:
+ if e.errno != errno.EEXIST:
+ raise
+
+
+def path_from_base(*comps):
+ """Returns an absolute filename from a set of components specified
+ relative to the top of the source tree. If the path does not exist,
+ the exception PathNotFound is raised."""
+ global _basedir
+ if _basedir == None:
+ # We compute the top of the source tree by finding the absolute
+ # path of this source file, and then climbing up three directories
+ # as given in subpath. If we move this file, subpath needs to be
+ # updated.
+ path = os.path.abspath(__file__)
+ subpath = os.path.join('third_party', 'WebKit')
+ _basedir = path[:path.index(subpath)]
+ path = os.path.join(_basedir, *comps)
+ if not os.path.exists(path):
+ raise PathNotFound('could not find %s' % (path))
+ return path
+
+
+def remove_directory(*path):
+ """Recursively removes a directory, even if it's marked read-only.
+
+ Remove the directory located at *path, if it exists.
+
+ shutil.rmtree() doesn't work on Windows if any of the files or directories
+ are read-only, which svn repositories and some .svn files are. We need to
+ be able to force the files to be writable (i.e., deletable) as we traverse
+ the tree.
+
+ Even with all this, Windows still sometimes fails to delete a file, citing
+ a permission error (maybe something to do with antivirus scans or disk
+ indexing). The best suggestion any of the user forums had was to wait a
+ bit and try again, so we do that too. It's hand-waving, but sometimes it
+ works. :/
+ """
+ file_path = os.path.join(*path)
+ if not os.path.exists(file_path):
+ return
+
+ win32 = False
+ if sys.platform == 'win32':
+ win32 = True
+ # Some people don't have the APIs installed. In that case we'll do
+ # without.
+ try:
+ win32api = __import__('win32api')
+ win32con = __import__('win32con')
+ except ImportError:
+ win32 = False
+
+ def remove_with_retry(rmfunc, path):
+ os.chmod(path, stat.S_IWRITE)
+ if win32:
+ win32api.SetFileAttributes(path,
+ win32con.FILE_ATTRIBUTE_NORMAL)
+ try:
+ return rmfunc(path)
+ except EnvironmentError, e:
+ if e.errno != errno.EACCES:
+ raise
+ print 'Failed to delete %s: trying again' % repr(path)
+ time.sleep(0.1)
+ return rmfunc(path)
+ else:
+
+ def remove_with_retry(rmfunc, path):
+ if os.path.islink(path):
+ return os.remove(path)
+ else:
+ return rmfunc(path)
+
+ for root, dirs, files in os.walk(file_path, topdown=False):
+ # For POSIX: making the directory writable guarantees removability.
+ # Windows will ignore the non-read-only bits in the chmod value.
+ os.chmod(root, 0770)
+ for name in files:
+ remove_with_retry(os.remove, os.path.join(root, name))
+ for name in dirs:
+ remove_with_retry(os.rmdir, os.path.join(root, name))
+
+ remove_with_retry(os.rmdir, file_path)
+
+#
+# Wrappers around platform_utils
+#
+
+
+def platform_name(platform=None):
+ """Returns the appropriate chromium platform name for |platform|. If
+ |platform| is None, returns the name of the chromium platform on the
+ currently running system. If |platform| is of the form 'chromium-*',
+ it is returned unchanged, otherwise 'chromium-' is prepended."""
+ if platform == None:
+ return platform_utils.platform_name()
+ if not platform.startswith('chromium-'):
+ platform = "chromium-" + platform
+ return platform
+
+
+def platform_version():
+ return platform_utils.platform_version()
+
+
+def lighttpd_executable_path():
+ return platform_utils.lighttpd_executable_path()
+
+
+def lighttpd_module_path():
+ return platform_utils.lighttpd_module_path()
+
+
+def lighttpd_php_path():
+ return platform_utils.lighttpd_php_path()
+
+
+def wdiff_path():
+ return platform_utils.wdiff_path()
+
+
+def test_shell_path(target):
+ return platform_utils.test_shell_path(target)
+
+
+def image_diff_path(target):
+ return platform_utils.image_diff_path(target)
+
+
+def layout_test_helper_path(target):
+ return platform_utils.layout_test_helper_path(target)
+
+
+def fuzzy_match_path():
+ return platform_utils.fuzzy_match_path()
+
+
+def shut_down_http_server(server_pid):
+ return platform_utils.shut_down_http_server(server_pid)
+
+
+def kill_all_test_shells():
+ platform_utils.kill_all_test_shells()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils.py
new file mode 100644
index 0000000..09e7b4b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Platform-specific utilities and pseudo-constants
+
+Any functions whose implementations or values differ from one platform to
+another should be defined in their respective platform_utils_<platform>.py
+modules. The appropriate one of those will be imported into this module to
+provide callers with a common, platform-independent interface.
+
+This file should only ever be imported by layout_package.path_utils.
+"""
+
+import sys
+
+# We may not support the version of Python that a user has installed (Cygwin
+# especially has had problems), but we'll allow the platform utils to be
+# included in any case so we don't get an import error.
+if sys.platform in ('cygwin', 'win32'):
+ from platform_utils_win import *
+elif sys.platform == 'darwin':
+ from platform_utils_mac import *
+elif sys.platform in ('linux', 'linux2', 'freebsd7', 'openbsd4'):
+ from platform_utils_linux import *
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_linux.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_linux.py
new file mode 100644
index 0000000..87b27c7
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_linux.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This is the Linux implementation of the layout_package.platform_utils
+ package. This file should only be imported by that package."""
+
+import os
+import signal
+import subprocess
+import sys
+import logging
+
+import path_utils
+import platform_utils_win
+
+
+def platform_name():
+ """Returns the name of the platform we're currently running on."""
+ return 'chromium-linux' + platform_version()
+
+
+def platform_version():
+ """Returns the version string for the platform, e.g. '-vista' or
+ '-snowleopard'. If the platform does not distinguish between
+ minor versions, it returns ''."""
+ return ''
+
+
+def get_num_cores():
+ """Returns the number of cores on the machine. For hyperthreaded machines,
+ this will be double the number of actual processors."""
+ num_cores = os.sysconf("SC_NPROCESSORS_ONLN")
+ if isinstance(num_cores, int) and num_cores > 0:
+ return num_cores
+ return 1
+
+
+def baseline_path(platform=None):
+ """Returns the path relative to the top of the source tree for the
+ baselines for the specified platform version. If |platform| is None,
+ then the version currently in use is used."""
+ if platform is None:
+ platform = platform_name()
+ return path_utils.path_from_base('webkit', 'data', 'layout_tests',
+ 'platform', platform, 'LayoutTests')
+
+
+def baseline_search_path(platform=None):
+ """Returns the list of directories to search for baselines/results, in
+ order of preference. Paths are relative to the top of the source tree."""
+ return [baseline_path(platform),
+ platform_utils_win.baseline_path('chromium-win'),
+ path_utils.webkit_baseline_path('win'),
+ path_utils.webkit_baseline_path('mac')]
+
+
+def apache_executable_path():
+ """Returns the executable path to start Apache"""
+ path = os.path.join("/usr", "sbin", "apache2")
+ if os.path.exists(path):
+ return path
+ print "Unable to fine Apache executable %s" % path
+ _missing_apache()
+
+
+def apache_config_file_path():
+ """Returns the path to Apache config file"""
+ return path_utils.path_from_base("third_party", "WebKit", "LayoutTests",
+ "http", "conf", "apache2-debian-httpd.conf")
+
+
+def lighttpd_executable_path():
+ """Returns the executable path to start LigHTTPd"""
+ binpath = "/usr/sbin/lighttpd"
+ if os.path.exists(binpath):
+ return binpath
+ print "Unable to find LigHTTPd executable %s" % binpath
+ _missing_lighttpd()
+
+
+def lighttpd_module_path():
+ """Returns the library module path for LigHTTPd"""
+ modpath = "/usr/lib/lighttpd"
+ if os.path.exists(modpath):
+ return modpath
+ print "Unable to find LigHTTPd modules %s" % modpath
+ _missing_lighttpd()
+
+
+def lighttpd_php_path():
+ """Returns the PHP executable path for LigHTTPd"""
+ binpath = "/usr/bin/php-cgi"
+ if os.path.exists(binpath):
+ return binpath
+ print "Unable to find PHP CGI executable %s" % binpath
+ _missing_lighttpd()
+
+
+def wdiff_path():
+ """Path to the WDiff executable, which we assume is already installed and
+ in the user's $PATH."""
+ return 'wdiff'
+
+
+def image_diff_path(target):
+ """Path to the image_diff binary.
+
+ Args:
+ target: Build target mode (debug or release)"""
+ return _path_from_build_results(target, 'image_diff')
+
+
+def layout_test_helper_path(target):
+ """Path to the layout_test helper binary, if needed, empty otherwise"""
+ return ''
+
+
+def test_shell_path(target):
+ """Return the platform-specific binary path for our TestShell.
+
+ Args:
+ target: Build target mode (debug or release) """
+ if target in ('Debug', 'Release'):
+ try:
+ debug_path = _path_from_build_results('Debug', 'test_shell')
+ release_path = _path_from_build_results('Release', 'test_shell')
+
+ debug_mtime = os.stat(debug_path).st_mtime
+ release_mtime = os.stat(release_path).st_mtime
+
+ if debug_mtime > release_mtime and target == 'Release' or \
+ release_mtime > debug_mtime and target == 'Debug':
+ logging.info('\x1b[31mWarning: you are not running the most '
+ 'recent test_shell binary. You need to pass '
+ '--debug or not to select between Debug and '
+ 'Release.\x1b[0m')
+ # This will fail if we don't have both a debug and release binary.
+ # That's fine because, in this case, we must already be running the
+ # most up-to-date one.
+ except path_utils.PathNotFound:
+ pass
+
+ return _path_from_build_results(target, 'test_shell')
+
+
+def fuzzy_match_path():
+ """Return the path to the fuzzy matcher binary."""
+ return path_utils.path_from_base('third_party', 'fuzzymatch', 'fuzzymatch')
+
+
+def shut_down_http_server(server_pid):
+ """Shut down the lighttpd web server. Blocks until it's fully shut down.
+
+ Args:
+ server_pid: The process ID of the running server.
+ """
+ # server_pid is not set when "http_server.py stop" is run manually.
+ if server_pid is None:
+ # This isn't ideal, since it could conflict with web server processes
+ # not started by http_server.py, but good enough for now.
+ kill_all_process('lighttpd')
+ kill_all_process('apache2')
+ else:
+ try:
+ os.kill(server_pid, signal.SIGTERM)
+ #TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
+ except OSError:
+ # Sometimes we get a bad PID (e.g. from a stale httpd.pid file),
+ # so if kill fails on the given PID, just try to 'killall' web
+ # servers.
+ shut_down_http_server(None)
+
+
+def kill_process(pid):
+ """Forcefully kill the process.
+
+ Args:
+ pid: The id of the process to be killed.
+ """
+ os.kill(pid, signal.SIGKILL)
+
+
+def kill_all_process(process_name):
+ null = open(os.devnull)
+ subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
+ process_name], stderr=null)
+ null.close()
+
+
+def kill_all_test_shells():
+ """Kills all instances of the test_shell binary currently running."""
+ kill_all_process('test_shell')
+
+#
+# Private helper functions
+#
+
+
+def _missing_lighttpd():
+ print 'Please install using: "sudo apt-get install lighttpd php5-cgi"'
+ print 'For complete Linux build requirements, please see:'
+ print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions'
+ sys.exit(1)
+
+
+def _missing_apache():
+ print ('Please install using: "sudo apt-get install apache2 '
+ 'libapache2-mod-php5"')
+ print 'For complete Linux build requirements, please see:'
+ print 'http://code.google.com/p/chromium/wiki/LinuxBuildInstructions'
+ sys.exit(1)
+
+
+def _path_from_build_results(*pathies):
+ # FIXME(dkegel): use latest or warn if more than one found?
+ for dir in ["sconsbuild", "out", "xcodebuild"]:
+ try:
+ return path_utils.path_from_base(dir, *pathies)
+ except:
+ pass
+ raise path_utils.PathNotFound("Unable to find %s in build tree" %
+ (os.path.join(*pathies)))
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_mac.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_mac.py
new file mode 100644
index 0000000..1eaa10c
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_mac.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This is the Mac implementation of the layout_package.platform_utils
+ package. This file should only be imported by that package."""
+
+import os
+import platform
+import signal
+import subprocess
+
+import path_utils
+
+
+def platform_name():
+ """Returns the name of the platform we're currently running on."""
+ # At the moment all chromium mac results are version-independent. At some
+ # point we may need to return 'chromium-mac' + PlatformVersion()
+ return 'chromium-mac'
+
+
+def platform_version():
+ """Returns the version string for the platform, e.g. '-vista' or
+ '-snowleopard'. If the platform does not distinguish between
+ minor versions, it returns ''."""
+ os_version_string = platform.mac_ver()[0] # e.g. "10.5.6"
+ if not os_version_string:
+ return '-leopard'
+
+ release_version = int(os_version_string.split('.')[1])
+
+ # we don't support 'tiger' or earlier releases
+ if release_version == 5:
+ return '-leopard'
+ elif release_version == 6:
+ return '-snowleopard'
+
+ return ''
+
+
+def get_num_cores():
+ """Returns the number of cores on the machine. For hyperthreaded machines,
+ this will be double the number of actual processors."""
+ return int(os.popen2("sysctl -n hw.ncpu")[1].read())
+
+
+def baseline_path(platform=None):
+ """Returns the path relative to the top of the source tree for the
+ baselines for the specified platform version. If |platform| is None,
+ then the version currently in use is used."""
+ if platform is None:
+ platform = platform_name()
+ return path_utils.path_from_base('webkit', 'data', 'layout_tests',
+ 'platform', platform, 'LayoutTests')
+
+# TODO: We should add leopard and snowleopard to the list of paths to check
+# once we start running the tests from snowleopard.
+
+
+def baseline_search_path(platform=None):
+ """Returns the list of directories to search for baselines/results, in
+ order of preference. Paths are relative to the top of the source tree."""
+ return [baseline_path(platform),
+ path_utils.webkit_baseline_path('mac' + platform_version()),
+ path_utils.webkit_baseline_path('mac')]
+
+
+def wdiff_path():
+ """Path to the WDiff executable, which we assume is already installed and
+ in the user's $PATH."""
+ return 'wdiff'
+
+
+def image_diff_path(target):
+ """Path to the image_diff executable
+
+ Args:
+ target: build type - 'Debug','Release',etc."""
+ return path_utils.path_from_base('xcodebuild', target, 'image_diff')
+
+
+def layout_test_helper_path(target):
+ """Path to the layout_test_helper executable, if needed, empty otherwise
+
+ Args:
+ target: build type - 'Debug','Release',etc."""
+ return path_utils.path_from_base('xcodebuild', target,
+ 'layout_test_helper')
+
+
+def test_shell_path(target):
+ """Path to the test_shell executable.
+
+ Args:
+ target: build type - 'Debug','Release',etc."""
+ # TODO(pinkerton): make |target| happy with case-sensitive file systems.
+ return path_utils.path_from_base('xcodebuild', target, 'TestShell.app',
+ 'Contents', 'MacOS', 'TestShell')
+
+
+def apache_executable_path():
+ """Returns the executable path to start Apache"""
+ return os.path.join("/usr", "sbin", "httpd")
+
+
+def apache_config_file_path():
+ """Returns the path to Apache config file"""
+ return path_utils.path_from_base("third_party", "WebKit", "LayoutTests",
+ "http", "conf", "apache2-httpd.conf")
+
+
+def lighttpd_executable_path():
+ """Returns the executable path to start LigHTTPd"""
+ return path_utils.path_from_base('third_party', 'lighttpd', 'mac',
+ 'bin', 'lighttpd')
+
+
+def lighttpd_module_path():
+ """Returns the library module path for LigHTTPd"""
+ return path_utils.path_from_base('third_party', 'lighttpd', 'mac', 'lib')
+
+
+def lighttpd_php_path():
+ """Returns the PHP executable path for LigHTTPd"""
+ return path_utils.path_from_base('third_party', 'lighttpd', 'mac', 'bin',
+ 'php-cgi')
+
+
+def shut_down_http_server(server_pid):
+ """Shut down the lighttpd web server. Blocks until it's fully shut down.
+
+ Args:
+ server_pid: The process ID of the running server.
+ """
+ # server_pid is not set when "http_server.py stop" is run manually.
+ if server_pid is None:
+ # TODO(mmoss) This isn't ideal, since it could conflict with lighttpd
+ # processes not started by http_server.py, but good enough for now.
+ kill_all_process('lighttpd')
+ kill_all_process('httpd')
+ else:
+ try:
+ os.kill(server_pid, signal.SIGTERM)
+ # TODO(mmoss) Maybe throw in a SIGKILL just to be sure?
+ except OSError:
+ # Sometimes we get a bad PID (e.g. from a stale httpd.pid file),
+ # so if kill fails on the given PID, just try to 'killall' web
+ # servers.
+ shut_down_http_server(None)
+
+
+def kill_process(pid):
+ """Forcefully kill the process.
+
+ Args:
+ pid: The id of the process to be killed.
+ """
+ os.kill(pid, signal.SIGKILL)
+
+
+def kill_all_process(process_name):
+ # On Mac OS X 10.6, killall has a new constraint: -SIGNALNAME or
+ # -SIGNALNUMBER must come first. Example problem:
+ # $ killall -u $USER -TERM lighttpd
+ # killall: illegal option -- T
+ # Use of the earlier -TERM placement is just fine on 10.5.
+ null = open(os.devnull)
+ subprocess.call(['killall', '-TERM', '-u', os.getenv('USER'),
+ process_name], stderr=null)
+ null.close()
+
+
+def kill_all_test_shells():
+ """Kills all instances of the test_shell binary currently running."""
+ kill_all_process('TestShell')
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_win.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_win.py
new file mode 100644
index 0000000..3cbbec3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/platform_utils_win.py
@@ -0,0 +1,210 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This is the Linux implementation of the layout_package.platform_utils
+ package. This file should only be imported by that package."""
+
+import os
+import path_utils
+import subprocess
+import sys
+
+
+def platform_name():
+ """Returns the name of the platform we're currently running on."""
+ # We're not ready for version-specific results yet. When we uncomment
+ # this, we also need to add it to the BaselineSearchPath()
+ return 'chromium-win' + platform_version()
+
+
+def platform_version():
+ """Returns the version string for the platform, e.g. '-vista' or
+ '-snowleopard'. If the platform does not distinguish between
+ minor versions, it returns ''."""
+ winver = sys.getwindowsversion()
+ if winver[0] == 6 and (winver[1] == 1):
+ return '-7'
+ if winver[0] == 6 and (winver[1] == 0):
+ return '-vista'
+ if winver[0] == 5 and (winver[1] == 1 or winver[1] == 2):
+ return '-xp'
+ return ''
+
+
+def get_num_cores():
+ """Returns the number of cores on the machine. For hyperthreaded machines,
+ this will be double the number of actual processors."""
+ return int(os.environ.get('NUMBER_OF_PROCESSORS', 1))
+
+
+def baseline_path(platform=None):
+ """Returns the path relative to the top of the source tree for the
+ baselines for the specified platform version. If |platform| is None,
+ then the version currently in use is used."""
+ if platform is None:
+ platform = platform_name()
+ return path_utils.path_from_base('webkit', 'data', 'layout_tests',
+ 'platform', platform, 'LayoutTests')
+
+
+def baseline_search_path(platform=None):
+ """Returns the list of directories to search for baselines/results, in
+ order of preference. Paths are relative to the top of the source tree."""
+ dirs = []
+ if platform is None:
+ platform = platform_name()
+
+ if platform == 'chromium-win-xp':
+ dirs.append(baseline_path(platform))
+ if platform in ('chromium-win-xp', 'chromium-win-vista'):
+ dirs.append(baseline_path('chromium-win-vista'))
+ dirs.append(baseline_path('chromium-win'))
+ dirs.append(path_utils.webkit_baseline_path('win'))
+ dirs.append(path_utils.webkit_baseline_path('mac'))
+ return dirs
+
+
+def wdiff_path():
+ """Path to the WDiff executable, whose binary is checked in on Win"""
+ return path_utils.path_from_base('third_party', 'cygwin', 'bin',
+ 'wdiff.exe')
+
+
+def image_diff_path(target):
+ """Return the platform-specific binary path for the image compare util.
+ We use this if we can't find the binary in the default location
+ in path_utils.
+
+ Args:
+ target: Build target mode (debug or release)
+ """
+ return _find_binary(target, 'image_diff.exe')
+
+
+def layout_test_helper_path(target):
+ """Return the platform-specific binary path for the layout test helper.
+ We use this if we can't find the binary in the default location
+ in path_utils.
+
+ Args:
+ target: Build target mode (debug or release)
+ """
+ return _find_binary(target, 'layout_test_helper.exe')
+
+
+def test_shell_path(target):
+ """Return the platform-specific binary path for our TestShell.
+ We use this if we can't find the binary in the default location
+ in path_utils.
+
+ Args:
+ target: Build target mode (debug or release)
+ """
+ return _find_binary(target, 'test_shell.exe')
+
+
+def apache_executable_path():
+ """Returns the executable path to start Apache"""
+ path = path_utils.path_from_base('third_party', 'cygwin', "usr", "sbin")
+ # Don't return httpd.exe since we want to use this from cygwin.
+ return os.path.join(path, "httpd")
+
+
+def apache_config_file_path():
+ """Returns the path to Apache config file"""
+ return path_utils.path_from_base("third_party", "WebKit", "LayoutTests",
+ "http", "conf", "cygwin-httpd.conf")
+
+
+def lighttpd_executable_path():
+ """Returns the executable path to start LigHTTPd"""
+ return path_utils.path_from_base('third_party', 'lighttpd', 'win',
+ 'LightTPD.exe')
+
+
+def lighttpd_module_path():
+ """Returns the library module path for LigHTTPd"""
+ return path_utils.path_from_base('third_party', 'lighttpd', 'win', 'lib')
+
+
+def lighttpd_php_path():
+ """Returns the PHP executable path for LigHTTPd"""
+ return path_utils.path_from_base('third_party', 'lighttpd', 'win', 'php5',
+ 'php-cgi.exe')
+
+
+def shut_down_http_server(server_pid):
+ """Shut down the lighttpd web server. Blocks until it's fully shut down.
+
+ Args:
+ server_pid: The process ID of the running server.
+ Unused in this implementation of the method.
+ """
+ subprocess.Popen(('taskkill.exe', '/f', '/im', 'LightTPD.exe'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE).wait()
+ subprocess.Popen(('taskkill.exe', '/f', '/im', 'httpd.exe'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE).wait()
+
+
+def kill_process(pid):
+ """Forcefully kill the process.
+
+ Args:
+ pid: The id of the process to be killed.
+ """
+ subprocess.call(('taskkill.exe', '/f', '/pid', str(pid)),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+
+def kill_all_test_shells(self):
+ """Kills all instances of the test_shell binary currently running."""
+ subprocess.Popen(('taskkill.exe', '/f', '/im', 'test_shell.exe'),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE).wait()
+
+#
+# Private helper functions.
+#
+
+
+def _find_binary(target, binary):
+ """On Windows, we look for binaries that we compile in potentially
+ two places: src/webkit/$target (preferably, which we get if we
+ built using webkit_glue.gyp), or src/chrome/$target (if compiled some
+ other way)."""
+ try:
+ return path_utils.path_from_base('webkit', target, binary)
+ except path_utils.PathNotFound:
+ try:
+ return path_utils.path_from_base('chrome', target, binary)
+ except path_utils.PathNotFound:
+ return path_utils.path_from_base('build', target, binary)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
new file mode 100644
index 0000000..f1647f7
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_expectations.py
@@ -0,0 +1,818 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A helper class for reading in and dealing with tests expectations
+for layout tests.
+"""
+
+import logging
+import os
+import re
+import sys
+import time
+import path_utils
+
+sys.path.append(path_utils.path_from_base('third_party', 'WebKit',
+ 'WebKitTools'))
+import simplejson
+
+# Test expectation and modifier constants.
+(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, TIMEOUT, CRASH, SKIP, WONTFIX,
+ DEFER, SLOW, REBASELINE, MISSING, FLAKY, NOW, NONE) = range(16)
+
+# Test expectation file update action constants
+(NO_CHANGE, REMOVE_TEST, REMOVE_PLATFORM, ADD_PLATFORMS_EXCEPT_THIS) = range(4)
+
+
+class TestExpectations:
+ TEST_LIST = "test_expectations.txt"
+
+ def __init__(self, tests, directory, platform, is_debug_mode, is_lint_mode,
+ tests_are_present=True):
+ """Reads the test expectations files from the given directory."""
+ path = os.path.join(directory, self.TEST_LIST)
+ self._expected_failures = TestExpectationsFile(path, tests, platform,
+ is_debug_mode, is_lint_mode, tests_are_present=tests_are_present)
+
+ # TODO(ojan): Allow for removing skipped tests when getting the list of
+ # tests to run, but not when getting metrics.
+ # TODO(ojan): Replace the Get* calls here with the more sane API exposed
+ # by TestExpectationsFile below. Maybe merge the two classes entirely?
+
+ def get_expectations_json_for_all_platforms(self):
+ return (
+ self._expected_failures.get_expectations_json_for_all_platforms())
+
+ def get_rebaselining_failures(self):
+ return (self._expected_failures.get_test_set(REBASELINE, FAIL) |
+ self._expected_failures.get_test_set(REBASELINE, IMAGE) |
+ self._expected_failures.get_test_set(REBASELINE, TEXT) |
+ self._expected_failures.get_test_set(REBASELINE,
+ IMAGE_PLUS_TEXT))
+
+ def get_options(self, test):
+ return self._expected_failures.get_options(test)
+
+ def get_expectations(self, test):
+ return self._expected_failures.get_expectations(test)
+
+ def get_expectations_string(self, test):
+ """Returns the expectatons for the given test as an uppercase string.
+ If there are no expectations for the test, then "PASS" is returned."""
+ expectations = self.get_expectations(test)
+ retval = []
+
+ for expectation in expectations:
+ for item in TestExpectationsFile.EXPECTATIONS.items():
+ if item[1] == expectation:
+ retval.append(item[0])
+ break
+
+ return " ".join(retval).upper()
+
+ def get_timeline_for_test(self, test):
+ return self._expected_failures.get_timeline_for_test(test)
+
+ def get_tests_with_result_type(self, result_type):
+ return self._expected_failures.get_tests_with_result_type(result_type)
+
+ def get_tests_with_timeline(self, timeline):
+ return self._expected_failures.get_tests_with_timeline(timeline)
+
+ def matches_an_expected_result(self, test, result):
+ """Returns whether we got one of the expected results for this test."""
+ return (result in self._expected_failures.get_expectations(test) or
+ (result in (IMAGE, TEXT, IMAGE_PLUS_TEXT) and
+ FAIL in self._expected_failures.get_expectations(test)) or
+ result == MISSING and self.is_rebaselining(test) or
+ result == SKIP and self._expected_failures.has_modifier(test,
+ SKIP))
+
+ def is_rebaselining(self, test):
+ return self._expected_failures.has_modifier(test, REBASELINE)
+
+ def has_modifier(self, test, modifier):
+ return self._expected_failures.has_modifier(test, modifier)
+
+ def remove_platform_from_file(self, tests, platform, backup=False):
+ return self._expected_failures.remove_platform_from_file(tests,
+ platform,
+ backup)
+
+
+def strip_comments(line):
+ """Strips comments from a line and return None if the line is empty
+ or else the contents of line with leading and trailing spaces removed
+ and all other whitespace collapsed"""
+
+ commentIndex = line.find('//')
+ if commentIndex is -1:
+ commentIndex = len(line)
+
+ line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
+ if line == '':
+ return None
+ else:
+ return line
+
+
+class ModifiersAndExpectations:
+ """A holder for modifiers and expectations on a test that serializes to
+ JSON."""
+
+ def __init__(self, modifiers, expectations):
+ self.modifiers = modifiers
+ self.expectations = expectations
+
+
+class ExpectationsJsonEncoder(simplejson.JSONEncoder):
+ """JSON encoder that can handle ModifiersAndExpectations objects.
+ """
+
+ def default(self, obj):
+ if isinstance(obj, ModifiersAndExpectations):
+ return {"modifiers": obj.modifiers,
+ "expectations": obj.expectations}
+ else:
+ return JSONEncoder.default(self, obj)
+
+
+class TestExpectationsFile:
+ """Test expectation files consist of lines with specifications of what
+ to expect from layout test cases. The test cases can be directories
+ in which case the expectations apply to all test cases in that
+ directory and any subdirectory. The format of the file is along the
+ lines of:
+
+ LayoutTests/fast/js/fixme.js = FAIL
+ LayoutTests/fast/js/flaky.js = FAIL PASS
+ LayoutTests/fast/js/crash.js = CRASH TIMEOUT FAIL PASS
+ ...
+
+ To add other options:
+ SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+ DEBUG : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+ DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+ LINUX DEBUG SKIP : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+ DEFER LINUX WIN : LayoutTests/fast/js/no-good.js = TIMEOUT PASS
+
+ SKIP: Doesn't run the test.
+ SLOW: The test takes a long time to run, but does not timeout indefinitely.
+ WONTFIX: For tests that we never intend to pass on a given platform.
+ DEFER: Test does not count in our statistics for the current release.
+ DEBUG: Expectations apply only to the debug build.
+ RELEASE: Expectations apply only to release build.
+ LINUX/WIN/WIN-XP/WIN-VISTA/WIN-7/MAC: Expectations apply only to these
+ platforms.
+
+ Notes:
+ -A test cannot be both SLOW and TIMEOUT
+ -A test cannot be both DEFER and WONTFIX
+ -A test should only be one of IMAGE, TEXT, IMAGE+TEXT, or FAIL. FAIL is
+ a migratory state that currently means either IMAGE, TEXT, or
+ IMAGE+TEXT. Once we have finished migrating the expectations, we will
+ change FAIL to have the meaning of IMAGE+TEXT and remove the IMAGE+TEXT
+ identifier.
+ -A test can be included twice, but not via the same path.
+ -If a test is included twice, then the more precise path wins.
+ -CRASH tests cannot be DEFER or WONTFIX
+ """
+
+ EXPECTATIONS = {'pass': PASS,
+ 'fail': FAIL,
+ 'text': TEXT,
+ 'image': IMAGE,
+ 'image+text': IMAGE_PLUS_TEXT,
+ 'timeout': TIMEOUT,
+ 'crash': CRASH,
+ 'missing': MISSING}
+
+ EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'),
+ PASS: ('pass', 'passes'),
+ FAIL: ('failure', 'failures'),
+ TEXT: ('text diff mismatch',
+ 'text diff mismatch'),
+ IMAGE: ('image mismatch', 'image mismatch'),
+ IMAGE_PLUS_TEXT: ('image and text mismatch',
+ 'image and text mismatch'),
+ CRASH: ('test shell crash',
+ 'test shell crashes'),
+ TIMEOUT: ('test timed out', 'tests timed out'),
+ MISSING: ('no expected result found',
+ 'no expected results found')}
+
+ EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT,
+ TEXT, IMAGE, FAIL, SKIP)
+
+ BASE_PLATFORMS = ('linux', 'mac', 'win')
+ PLATFORMS = BASE_PLATFORMS + ('win-xp', 'win-vista', 'win-7')
+
+ BUILD_TYPES = ('debug', 'release')
+
+ MODIFIERS = {'skip': SKIP,
+ 'wontfix': WONTFIX,
+ 'defer': DEFER,
+ 'slow': SLOW,
+ 'rebaseline': REBASELINE,
+ 'none': NONE}
+
+ TIMELINES = {'wontfix': WONTFIX,
+ 'now': NOW,
+ 'defer': DEFER}
+
+ RESULT_TYPES = {'skip': SKIP,
+ 'pass': PASS,
+ 'fail': FAIL,
+ 'flaky': FLAKY}
+
+ def __init__(self, path, full_test_list, platform, is_debug_mode,
+ is_lint_mode, expectations_as_str=None, suppress_errors=False,
+ tests_are_present=True):
+ """
+ path: The path to the expectation file. An error is thrown if a test is
+ listed more than once.
+ full_test_list: The list of all tests to be run pending processing of
+ the expections for those tests.
+ platform: Which platform from self.PLATFORMS to filter tests for.
+ is_debug_mode: Whether we testing a test_shell built debug mode.
+ is_lint_mode: Whether this is just linting test_expecatations.txt.
+ expectations_as_str: Contents of the expectations file. Used instead of
+ the path. This makes unittesting sane.
+ suppress_errors: Whether to suppress lint errors.
+ tests_are_present: Whether the test files are present in the local
+ filesystem. The LTTF Dashboard uses False here to avoid having to
+ keep a local copy of the tree.
+ """
+
+ self._path = path
+ self._expectations_as_str = expectations_as_str
+ self._is_lint_mode = is_lint_mode
+ self._tests_are_present = tests_are_present
+ self._full_test_list = full_test_list
+ self._suppress_errors = suppress_errors
+ self._errors = []
+ self._non_fatal_errors = []
+ self._platform = self.to_test_platform_name(platform)
+ if self._platform is None:
+ raise Exception("Unknown platform '%s'" % (platform))
+ self._is_debug_mode = is_debug_mode
+
+ # Maps relative test paths as listed in the expectations file to a
+ # list of maps containing modifiers and expectations for each time
+ # the test is listed in the expectations file.
+ self._all_expectations = {}
+
+ # Maps a test to its list of expectations.
+ self._test_to_expectations = {}
+
+ # Maps a test to its list of options (string values)
+ self._test_to_options = {}
+
+ # Maps a test to its list of modifiers: the constants associated with
+ # the options minus any bug or platform strings
+ self._test_to_modifiers = {}
+
+ # Maps a test to the base path that it was listed with in the list.
+ self._test_list_paths = {}
+
+ self._modifier_to_tests = self._dict_of_sets(self.MODIFIERS)
+ self._expectation_to_tests = self._dict_of_sets(self.EXPECTATIONS)
+ self._timeline_to_tests = self._dict_of_sets(self.TIMELINES)
+ self._result_type_to_tests = self._dict_of_sets(self.RESULT_TYPES)
+
+ self._read(self._get_iterable_expectations())
+
+ def _dict_of_sets(self, strings_to_constants):
+ """Takes a dict of strings->constants and returns a dict mapping
+ each constant to an empty set."""
+ d = {}
+ for c in strings_to_constants.values():
+ d[c] = set()
+ return d
+
+ def _get_iterable_expectations(self):
+ """Returns an object that can be iterated over. Allows for not caring
+ about whether we're iterating over a file or a new-line separated
+ string."""
+ if self._expectations_as_str:
+ iterable = [x + "\n" for x in
+ self._expectations_as_str.split("\n")]
+ # Strip final entry if it's empty to avoid added in an extra
+ # newline.
+ if iterable[len(iterable) - 1] == "\n":
+ return iterable[:len(iterable) - 1]
+ return iterable
+ else:
+ return open(self._path)
+
+ def to_test_platform_name(self, name):
+ """Returns the test expectation platform that will be used for a
+ given platform name, or None if there is no match."""
+ chromium_prefix = 'chromium-'
+ name = name.lower()
+ if name.startswith(chromium_prefix):
+ name = name[len(chromium_prefix):]
+ if name in self.PLATFORMS:
+ return name
+ return None
+
+ def get_test_set(self, modifier, expectation=None, include_skips=True):
+ if expectation is None:
+ tests = self._modifier_to_tests[modifier]
+ else:
+ tests = (self._expectation_to_tests[expectation] &
+ self._modifier_to_tests[modifier])
+
+ if not include_skips:
+ tests = tests - self.get_test_set(SKIP, expectation)
+
+ return tests
+
+ def get_tests_with_result_type(self, result_type):
+ return self._result_type_to_tests[result_type]
+
+ def get_tests_with_timeline(self, timeline):
+ return self._timeline_to_tests[timeline]
+
+ def get_options(self, test):
+ """This returns the entire set of options for the given test
+ (the modifiers plus the BUGXXXX identifier). This is used by the
+ LTTF dashboard."""
+ return self._test_to_options[test]
+
+ def has_modifier(self, test, modifier):
+ return test in self._modifier_to_tests[modifier]
+
+ def get_expectations(self, test):
+ return self._test_to_expectations[test]
+
+ def get_expectations_json_for_all_platforms(self):
+ # Specify separators in order to get compact encoding.
+ return ExpectationsJsonEncoder(separators=(',', ':')).encode(
+ self._all_expectations)
+
+ def contains(self, test):
+ return test in self._test_to_expectations
+
+ def remove_platform_from_file(self, tests, platform, backup=False):
+ """Remove the platform option from test expectations file.
+
+ If a test is in the test list and has an option that matches the given
+ platform, remove the matching platform and save the updated test back
+ to the file. If no other platforms remaining after removal, delete the
+ test from the file.
+
+ Args:
+ tests: list of tests that need to update..
+ platform: which platform option to remove.
+ backup: if true, the original test expectations file is saved as
+ [self.TEST_LIST].orig.YYYYMMDDHHMMSS
+
+ Returns:
+ no
+ """
+
+ new_file = self._path + '.new'
+ logging.debug('Original file: "%s"', self._path)
+ logging.debug('New file: "%s"', new_file)
+ f_orig = self._get_iterable_expectations()
+ f_new = open(new_file, 'w')
+
+ tests_removed = 0
+ tests_updated = 0
+ lineno = 0
+ for line in f_orig:
+ lineno += 1
+ action = self._get_platform_update_action(line, lineno, tests,
+ platform)
+ if action == NO_CHANGE:
+ # Save the original line back to the file
+ logging.debug('No change to test: %s', line)
+ f_new.write(line)
+ elif action == REMOVE_TEST:
+ tests_removed += 1
+ logging.info('Test removed: %s', line)
+ elif action == REMOVE_PLATFORM:
+ parts = line.split(':')
+ new_options = parts[0].replace(platform.upper() + ' ', '', 1)
+ new_line = ('%s:%s' % (new_options, parts[1]))
+ f_new.write(new_line)
+ tests_updated += 1
+ logging.info('Test updated: ')
+ logging.info(' old: %s', line)
+ logging.info(' new: %s', new_line)
+ elif action == ADD_PLATFORMS_EXCEPT_THIS:
+ parts = line.split(':')
+ new_options = parts[0]
+ for p in self.PLATFORMS:
+ p = p.upper();
+ # This is a temp solution for rebaselining tool.
+ # Do not add tags WIN-7 and WIN-VISTA to test expectations
+ # if the original line does not specify the platform option.
+ # TODO(victorw): Remove WIN-VISTA and WIN-7 once we have
+ # reliable Win 7 and Win Vista buildbots setup.
+ if not p in (platform.upper(), 'WIN-VISTA', 'WIN-7'):
+ new_options += p + ' '
+ new_line = ('%s:%s' % (new_options, parts[1]))
+ f_new.write(new_line)
+ tests_updated += 1
+ logging.info('Test updated: ')
+ logging.info(' old: %s', line)
+ logging.info(' new: %s', new_line)
+ else:
+ logging.error('Unknown update action: %d; line: %s',
+ action, line)
+
+ logging.info('Total tests removed: %d', tests_removed)
+ logging.info('Total tests updated: %d', tests_updated)
+
+ f_orig.close()
+ f_new.close()
+
+ if backup:
+ date_suffix = time.strftime('%Y%m%d%H%M%S',
+ time.localtime(time.time()))
+ backup_file = ('%s.orig.%s' % (self._path, date_suffix))
+ if os.path.exists(backup_file):
+ os.remove(backup_file)
+ logging.info('Saving original file to "%s"', backup_file)
+ os.rename(self._path, backup_file)
+ else:
+ os.remove(self._path)
+
+ logging.debug('Saving new file to "%s"', self._path)
+ os.rename(new_file, self._path)
+ return True
+
+ def parse_expectations_line(self, line, lineno):
+ """Parses a line from test_expectations.txt and returns a tuple
+ with the test path, options as a list, expectations as a list."""
+ line = strip_comments(line)
+ if not line:
+ return (None, None, None)
+
+ options = []
+ if line.find(":") is -1:
+ test_and_expectation = line.split("=")
+ else:
+ parts = line.split(":")
+ options = self._get_options_list(parts[0])
+ test_and_expectation = parts[1].split('=')
+
+ test = test_and_expectation[0].strip()
+ if (len(test_and_expectation) is not 2):
+ self._add_error(lineno, "Missing expectations.",
+ test_and_expectation)
+ expectations = None
+ else:
+ expectations = self._get_options_list(test_and_expectation[1])
+
+ return (test, options, expectations)
+
+ def _get_platform_update_action(self, line, lineno, tests, platform):
+ """Check the platform option and return the action needs to be taken.
+
+ Args:
+ line: current line in test expectations file.
+ lineno: current line number of line
+ tests: list of tests that need to update..
+ platform: which platform option to remove.
+
+ Returns:
+ NO_CHANGE: no change to the line (comments, test not in the list etc)
+ REMOVE_TEST: remove the test from file.
+ REMOVE_PLATFORM: remove this platform option from the test.
+ ADD_PLATFORMS_EXCEPT_THIS: add all the platforms except this one.
+ """
+ test, options, expectations = self.parse_expectations_line(line,
+ lineno)
+ if not test or test not in tests:
+ return NO_CHANGE
+
+ has_any_platform = False
+ for option in options:
+ if option in self.PLATFORMS:
+ has_any_platform = True
+ if not option == platform:
+ return REMOVE_PLATFORM
+
+ # If there is no platform specified, then it means apply to all
+ # platforms. Return the action to add all the platforms except this
+ # one.
+ if not has_any_platform:
+ return ADD_PLATFORMS_EXCEPT_THIS
+
+ return REMOVE_TEST
+
+ def _has_valid_modifiers_for_current_platform(self, options, lineno,
+ test_and_expectations, modifiers):
+ """Returns true if the current platform is in the options list or if
+ no platforms are listed and if there are no fatal errors in the
+ options list.
+
+ Args:
+ options: List of lowercase options.
+ lineno: The line in the file where the test is listed.
+ test_and_expectations: The path and expectations for the test.
+ modifiers: The set to populate with modifiers.
+ """
+ has_any_platform = False
+ has_bug_id = False
+ for option in options:
+ if option in self.MODIFIERS:
+ modifiers.add(option)
+ elif option in self.PLATFORMS:
+ has_any_platform = True
+ elif option.startswith('bug'):
+ has_bug_id = True
+ elif option not in self.BUILD_TYPES:
+ self._add_error(lineno, 'Invalid modifier for test: %s' %
+ option, test_and_expectations)
+
+ if has_any_platform and not self._match_platform(options):
+ return False
+
+ if not has_bug_id and 'wontfix' not in options:
+ # TODO(ojan): Turn this into an AddError call once all the
+ # tests have BUG identifiers.
+ self._log_non_fatal_error(lineno, 'Test lacks BUG modifier.',
+ test_and_expectations)
+
+ if 'release' in options or 'debug' in options:
+ if self._is_debug_mode and 'debug' not in options:
+ return False
+ if not self._is_debug_mode and 'release' not in options:
+ return False
+
+ if 'wontfix' in options and 'defer' in options:
+ self._add_error(lineno, 'Test cannot be both DEFER and WONTFIX.',
+ test_and_expectations)
+
+ if self._is_lint_mode and 'rebaseline' in options:
+ self._add_error(lineno,
+ 'REBASELINE should only be used for running rebaseline.py. '
+ 'Cannot be checked in.', test_and_expectations)
+
+ return True
+
+ def _match_platform(self, options):
+ """Match the list of options against our specified platform. If any
+ of the options prefix-match self._platform, return True. This handles
+ the case where a test is marked WIN and the platform is WIN-VISTA.
+
+ Args:
+ options: list of options
+ """
+ for opt in options:
+ if self._platform.startswith(opt):
+ return True
+ return False
+
+ def _add_to_all_expectations(self, test, options, expectations):
+ # Make all paths unix-style so the dashboard doesn't need to.
+ test = test.replace('\\', '/')
+ if not test in self._all_expectations:
+ self._all_expectations[test] = []
+ self._all_expectations[test].append(
+ ModifiersAndExpectations(options, expectations))
+
+ def _read(self, expectations):
+ """For each test in an expectations iterable, generate the
+ expectations for it."""
+ lineno = 0
+ for line in expectations:
+ lineno += 1
+
+ test_list_path, options, expectations = \
+ self.parse_expectations_line(line, lineno)
+ if not expectations:
+ continue
+
+ self._add_to_all_expectations(test_list_path,
+ " ".join(options).upper(),
+ " ".join(expectations).upper())
+
+ modifiers = set()
+ if options and not self._has_valid_modifiers_for_current_platform(
+ options, lineno, test_list_path, modifiers):
+ continue
+
+ expectations = self._parse_expectations(expectations, lineno,
+ test_list_path)
+
+ if 'slow' in options and TIMEOUT in expectations:
+ self._add_error(lineno,
+ 'A test can not be both slow and timeout. If it times out '
+ 'indefinitely, then it should be just timeout.',
+ test_list_path)
+
+ full_path = os.path.join(path_utils.layout_tests_dir(),
+ test_list_path)
+ full_path = os.path.normpath(full_path)
+ # WebKit's way of skipping tests is to add a -disabled suffix.
+ # So we should consider the path existing if the path or the
+ # -disabled version exists.
+ if (self._tests_are_present and not os.path.exists(full_path)
+ and not os.path.exists(full_path + '-disabled')):
+ # Log a non fatal error here since you hit this case any
+ # time you update test_expectations.txt without syncing
+ # the LayoutTests directory
+ self._log_non_fatal_error(lineno, 'Path does not exist.',
+ test_list_path)
+ continue
+
+ if not self._full_test_list:
+ tests = [test_list_path]
+ else:
+ tests = self._expand_tests(test_list_path)
+
+ self._add_tests(tests, expectations, test_list_path, lineno,
+ modifiers, options)
+
+ if not self._suppress_errors and (
+ len(self._errors) or len(self._non_fatal_errors)):
+ if self._is_debug_mode:
+ build_type = 'DEBUG'
+ else:
+ build_type = 'RELEASE'
+ print "\nFAILURES FOR PLATFORM: %s, BUILD_TYPE: %s" \
+ % (self._platform.upper(), build_type)
+
+ for error in self._non_fatal_errors:
+ logging.error(error)
+ if len(self._errors):
+ raise SyntaxError('\n'.join(map(str, self._errors)))
+
+ # Now add in the tests that weren't present in the expectations file
+ expectations = set([PASS])
+ options = []
+ modifiers = []
+ if self._full_test_list:
+ for test in self._full_test_list:
+ if not test in self._test_list_paths:
+ self._add_test(test, modifiers, expectations, options)
+
+ def _get_options_list(self, listString):
+ return [part.strip().lower() for part in listString.strip().split(' ')]
+
+ def _parse_expectations(self, expectations, lineno, test_list_path):
+ result = set()
+ for part in expectations:
+ if not part in self.EXPECTATIONS:
+ self._add_error(lineno, 'Unsupported expectation: %s' % part,
+ test_list_path)
+ continue
+ expectation = self.EXPECTATIONS[part]
+ result.add(expectation)
+ return result
+
+ def _expand_tests(self, test_list_path):
+ """Convert the test specification to an absolute, normalized
+ path and make sure directories end with the OS path separator."""
+ path = os.path.join(path_utils.layout_tests_dir(), test_list_path)
+ path = os.path.normpath(path)
+ path = self._fix_dir(path)
+
+ result = []
+ for test in self._full_test_list:
+ if test.startswith(path):
+ result.append(test)
+ return result
+
+ def _fix_dir(self, path):
+ """Check to see if the path points to a directory, and if so, append
+ the directory separator if necessary."""
+ if self._tests_are_present:
+ if os.path.isdir(path):
+ path = os.path.join(path, '')
+ else:
+ # If we can't check the filesystem to see if this is a directory,
+ # we assume that files w/o an extension are directories.
+ # TODO(dpranke): What happens w/ LayoutTests/css2.1 ?
+ if os.path.splitext(path)[1] == '':
+ path = os.path.join(path, '')
+ return path
+
+ def _add_tests(self, tests, expectations, test_list_path, lineno,
+ modifiers, options):
+ for test in tests:
+ if self._already_seen_test(test, test_list_path, lineno):
+ continue
+
+ self._clear_expectations_for_test(test, test_list_path)
+ self._add_test(test, modifiers, expectations, options)
+
+ def _add_test(self, test, modifiers, expectations, options):
+ """Sets the expected state for a given test.
+
+ This routine assumes the test has not been added before. If it has,
+ use _ClearExpectationsForTest() to reset the state prior to
+ calling this.
+
+ Args:
+ test: test to add
+ modifiers: sequence of modifier keywords ('wontfix', 'slow', etc.)
+ expectations: sequence of expectations (PASS, IMAGE, etc.)
+ options: sequence of keywords and bug identifiers."""
+ self._test_to_expectations[test] = expectations
+ for expectation in expectations:
+ self._expectation_to_tests[expectation].add(test)
+
+ self._test_to_options[test] = options
+ self._test_to_modifiers[test] = set()
+ for modifier in modifiers:
+ mod_value = self.MODIFIERS[modifier]
+ self._modifier_to_tests[mod_value].add(test)
+ self._test_to_modifiers[test].add(mod_value)
+
+ if 'wontfix' in modifiers:
+ self._timeline_to_tests[WONTFIX].add(test)
+ elif 'defer' in modifiers:
+ self._timeline_to_tests[DEFER].add(test)
+ else:
+ self._timeline_to_tests[NOW].add(test)
+
+ if 'skip' in modifiers:
+ self._result_type_to_tests[SKIP].add(test)
+ elif expectations == set([PASS]):
+ self._result_type_to_tests[PASS].add(test)
+ elif len(expectations) > 1:
+ self._result_type_to_tests[FLAKY].add(test)
+ else:
+ self._result_type_to_tests[FAIL].add(test)
+
+ def _clear_expectations_for_test(self, test, test_list_path):
+ """Remove prexisting expectations for this test.
+ This happens if we are seeing a more precise path
+ than a previous listing.
+ """
+ if test in self._test_list_paths:
+ self._test_to_expectations.pop(test, '')
+ self._remove_from_sets(test, self._expectation_to_tests)
+ self._remove_from_sets(test, self._modifier_to_tests)
+ self._remove_from_sets(test, self._timeline_to_tests)
+ self._remove_from_sets(test, self._result_type_to_tests)
+
+ self._test_list_paths[test] = os.path.normpath(test_list_path)
+
+ def _remove_from_sets(self, test, dict):
+ """Removes the given test from the sets in the dictionary.
+
+ Args:
+ test: test to look for
+ dict: dict of sets of files"""
+ for set_of_tests in dict.itervalues():
+ if test in set_of_tests:
+ set_of_tests.remove(test)
+
+ def _already_seen_test(self, test, test_list_path, lineno):
+ """Returns true if we've already seen a more precise path for this test
+ than the test_list_path.
+ """
+ if not test in self._test_list_paths:
+ return False
+
+ prev_base_path = self._test_list_paths[test]
+ if (prev_base_path == os.path.normpath(test_list_path)):
+ self._add_error(lineno, 'Duplicate expectations.', test)
+ return True
+
+ # Check if we've already seen a more precise path.
+ return prev_base_path.startswith(os.path.normpath(test_list_path))
+
+ def _add_error(self, lineno, msg, path):
+ """Reports an error that will prevent running the tests. Does not
+ immediately raise an exception because we'd like to aggregate all the
+ errors so they can all be printed out."""
+ self._errors.append('\nLine:%s %s %s' % (lineno, msg, path))
+
+ def _log_non_fatal_error(self, lineno, msg, path):
+ """Reports an error that will not prevent running the tests. These are
+ still errors, but not bad enough to warrant breaking test running."""
+ self._non_fatal_errors.append('Line:%s %s %s' % (lineno, msg, path))
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
new file mode 100644
index 0000000..6957dea
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_failures.py
@@ -0,0 +1,267 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Classes for failures that occur during tests."""
+
+import os
+import test_expectations
+
+
+def determine_result_type(failure_list):
+ """Takes a set of test_failures and returns which result type best fits
+ the list of failures. "Best fits" means we use the worst type of failure.
+
+ Returns:
+ one of the test_expectations result types - PASS, TEXT, CRASH, etc."""
+
+ if not failure_list or len(failure_list) == 0:
+ return test_expectations.PASS
+
+ failure_types = [type(f) for f in failure_list]
+ if FailureCrash in failure_types:
+ return test_expectations.CRASH
+ elif FailureTimeout in failure_types:
+ return test_expectations.TIMEOUT
+ elif (FailureMissingResult in failure_types or
+ FailureMissingImage in failure_types or
+ FailureMissingImageHash in failure_types):
+ return test_expectations.MISSING
+ else:
+ is_text_failure = FailureTextMismatch in failure_types
+ is_image_failure = (FailureImageHashIncorrect in failure_types or
+ FailureImageHashMismatch in failure_types)
+ if is_text_failure and is_image_failure:
+ return test_expectations.IMAGE_PLUS_TEXT
+ elif is_text_failure:
+ return test_expectations.TEXT
+ elif is_image_failure:
+ return test_expectations.IMAGE
+ else:
+ raise ValueError("unclassifiable set of failures: "
+ + str(failure_types))
+
+
+class TestFailure(object):
+ """Abstract base class that defines the failure interface."""
+
+ @staticmethod
+ def message():
+ """Returns a string describing the failure in more detail."""
+ raise NotImplemented
+
+ def result_html_output(self, filename):
+ """Returns an HTML string to be included on the results.html page."""
+ raise NotImplemented
+
+ def should_kill_test_shell(self):
+ """Returns True if we should kill the test shell before the next
+ test."""
+ return False
+
+ def relative_output_filename(self, filename, modifier):
+ """Returns a relative filename inside the output dir that contains
+ modifier.
+
+ For example, if filename is fast\dom\foo.html and modifier is
+ "-expected.txt", the return value is fast\dom\foo-expected.txt
+
+ Args:
+ filename: relative filename to test file
+ modifier: a string to replace the extension of filename with
+
+ Return:
+ The relative windows path to the output filename
+ """
+ return os.path.splitext(filename)[0] + modifier
+
+
+class FailureWithType(TestFailure):
+ """Base class that produces standard HTML output based on the test type.
+
+ Subclasses may commonly choose to override the ResultHtmlOutput, but still
+ use the standard OutputLinks.
+ """
+
+ def __init__(self, test_type):
+ TestFailure.__init__(self)
+ # TODO(ojan): This class no longer needs to know the test_type.
+ self._test_type = test_type
+
+ # Filename suffixes used by ResultHtmlOutput.
+ OUT_FILENAMES = []
+
+ def output_links(self, filename, out_names):
+ """Returns a string holding all applicable output file links.
+
+ Args:
+ filename: the test filename, used to construct the result file names
+ out_names: list of filename suffixes for the files. If three or more
+ suffixes are in the list, they should be [actual, expected, diff,
+ wdiff]. Two suffixes should be [actual, expected], and a
+ single item is the [actual] filename suffix.
+ If out_names is empty, returns the empty string.
+ """
+ links = ['']
+ uris = [self.relative_output_filename(filename, fn) for
+ fn in out_names]
+ if len(uris) > 1:
+ links.append("<a href='%s'>expected</a>" % uris[1])
+ if len(uris) > 0:
+ links.append("<a href='%s'>actual</a>" % uris[0])
+ if len(uris) > 2:
+ links.append("<a href='%s'>diff</a>" % uris[2])
+ if len(uris) > 3:
+ links.append("<a href='%s'>wdiff</a>" % uris[3])
+ return ' '.join(links)
+
+ def result_html_output(self, filename):
+ return self.message() + self.output_links(filename, self.OUT_FILENAMES)
+
+
+class FailureTimeout(TestFailure):
+ """Test timed out. We also want to restart the test shell if this
+ happens."""
+
+ @staticmethod
+ def message():
+ return "Test timed out"
+
+ def result_html_output(self, filename):
+ return "<strong>%s</strong>" % self.message()
+
+ def should_kill_test_shell(self):
+ return True
+
+
+class FailureCrash(TestFailure):
+ """Test shell crashed."""
+
+ @staticmethod
+ def message():
+ return "Test shell crashed"
+
+ def result_html_output(self, filename):
+ # TODO(tc): create a link to the minidump file
+ stack = self.relative_output_filename(filename, "-stack.txt")
+ return "<strong>%s</strong> <a href=%s>stack</a>" % (self.message(),
+ stack)
+
+ def should_kill_test_shell(self):
+ return True
+
+
+class FailureMissingResult(FailureWithType):
+ """Expected result was missing."""
+ OUT_FILENAMES = ["-actual.txt"]
+
+ @staticmethod
+ def message():
+ return "No expected results found"
+
+ def result_html_output(self, filename):
+ return ("<strong>%s</strong>" % self.message() +
+ self.output_links(filename, self.OUT_FILENAMES))
+
+
+class FailureTextMismatch(FailureWithType):
+ """Text diff output failed."""
+ # Filename suffixes used by ResultHtmlOutput.
+ OUT_FILENAMES = ["-actual.txt", "-expected.txt", "-diff.txt"]
+ OUT_FILENAMES_WDIFF = ["-actual.txt", "-expected.txt", "-diff.txt",
+ "-wdiff.html"]
+
+ def __init__(self, test_type, has_wdiff):
+ FailureWithType.__init__(self, test_type)
+ if has_wdiff:
+ self.OUT_FILENAMES = self.OUT_FILENAMES_WDIFF
+
+ @staticmethod
+ def message():
+ return "Text diff mismatch"
+
+
+class FailureMissingImageHash(FailureWithType):
+ """Actual result hash was missing."""
+ # Chrome doesn't know to display a .checksum file as text, so don't bother
+ # putting in a link to the actual result.
+ OUT_FILENAMES = []
+
+ @staticmethod
+ def message():
+ return "No expected image hash found"
+
+ def result_html_output(self, filename):
+ return "<strong>%s</strong>" % self.message()
+
+
+class FailureMissingImage(FailureWithType):
+ """Actual result image was missing."""
+ OUT_FILENAMES = ["-actual.png"]
+
+ @staticmethod
+ def message():
+ return "No expected image found"
+
+ def result_html_output(self, filename):
+ return ("<strong>%s</strong>" % self.message() +
+ self.output_links(filename, self.OUT_FILENAMES))
+
+
+class FailureImageHashMismatch(FailureWithType):
+ """Image hashes didn't match."""
+ OUT_FILENAMES = ["-actual.png", "-expected.png", "-diff.png"]
+
+ @staticmethod
+ def message():
+ # We call this a simple image mismatch to avoid confusion, since
+ # we link to the PNGs rather than the checksums.
+ return "Image mismatch"
+
+
+class FailureFuzzyFailure(FailureWithType):
+ """Image hashes didn't match."""
+ OUT_FILENAMES = ["-actual.png", "-expected.png"]
+
+ @staticmethod
+ def message():
+ return "Fuzzy image match also failed"
+
+
+class FailureImageHashIncorrect(FailureWithType):
+ """Actual result hash is incorrect."""
+ # Chrome doesn't know to display a .checksum file as text, so don't bother
+ # putting in a link to the actual result.
+ OUT_FILENAMES = []
+
+ @staticmethod
+ def message():
+ return "Images match, expected image hash incorrect. "
+
+ def result_html_output(self, filename):
+ return "<strong>%s</strong>" % self.message()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
new file mode 100644
index 0000000..91fe136
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_files.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This module is used to find all of the layout test files used by Chromium
+(across all platforms). It exposes one public function - GatherTestFiles() -
+which takes an optional list of paths. If a list is passed in, the returned
+list of test files is constrained to those found under the paths passed in,
+i.e. calling GatherTestFiles(["LayoutTests/fast"]) will only return files
+under that directory."""
+
+import glob
+import os
+import path_utils
+
+# When collecting test cases, we include any file with these extensions.
+_supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl',
+ '.php', '.svg'])
+# When collecting test cases, skip these directories
+_skipped_directories = set(['.svn', '_svn', 'resources', 'script-tests'])
+
+
+def gather_test_files(paths):
+ """Generate a set of test files and return them.
+
+ Args:
+ paths: a list of command line paths relative to the webkit/tests
+ directory. glob patterns are ok.
+ """
+ paths_to_walk = set()
+ # if paths is empty, provide a pre-defined list.
+ if paths:
+ for path in paths:
+ # If there's an * in the name, assume it's a glob pattern.
+ path = os.path.join(path_utils.layout_tests_dir(), path)
+ if path.find('*') > -1:
+ filenames = glob.glob(path)
+ paths_to_walk.update(filenames)
+ else:
+ paths_to_walk.add(path)
+ else:
+ paths_to_walk.add(path_utils.layout_tests_dir())
+
+ # Now walk all the paths passed in on the command line and get filenames
+ test_files = set()
+ for path in paths_to_walk:
+ if os.path.isfile(path) and _has_supported_extension(path):
+ test_files.add(os.path.normpath(path))
+ continue
+
+ for root, dirs, files in os.walk(path):
+ # don't walk skipped directories and sub directories
+ if os.path.basename(root) in _skipped_directories:
+ del dirs[:]
+ continue
+
+ for filename in files:
+ if _has_supported_extension(filename):
+ filename = os.path.join(root, filename)
+ filename = os.path.normpath(filename)
+ test_files.add(filename)
+
+ return test_files
+
+
+def _has_supported_extension(filename):
+ """Return true if filename is one of the file extensions we want to run a
+ test on."""
+ extension = os.path.splitext(filename)[1]
+ return extension in _supported_file_extensions
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py
new file mode 100644
index 0000000..10d0509
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/test_shell_thread.py
@@ -0,0 +1,511 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A Thread object for running the test shell and processing URLs from a
+shared queue.
+
+Each thread runs a separate instance of the test_shell binary and validates
+the output. When there are no more URLs to process in the shared queue, the
+thread exits.
+"""
+
+import copy
+import logging
+import os
+import Queue
+import signal
+import subprocess
+import sys
+import thread
+import threading
+import time
+
+import path_utils
+import test_failures
+
+
+def process_output(proc, test_info, test_types, test_args, target, output_dir):
+ """Receives the output from a test_shell process, subjects it to a number
+ of tests, and returns a list of failure types the test produced.
+
+ Args:
+ proc: an active test_shell process
+ test_info: Object containing the test filename, uri and timeout
+ test_types: list of test types to subject the output to
+ test_args: arguments to be passed to each test
+ target: Debug or Release
+ output_dir: directory to put crash stack traces into
+
+ Returns: a list of failure objects and times for the test being processed
+ """
+ outlines = []
+ extra_lines = []
+ failures = []
+ crash = False
+
+ # Some test args, such as the image hash, may be added or changed on a
+ # test-by-test basis.
+ local_test_args = copy.copy(test_args)
+
+ start_time = time.time()
+
+ line = proc.stdout.readline()
+
+ # Only start saving output lines once we've loaded the URL for the test.
+ url = None
+ test_string = test_info.uri.strip()
+
+ while line.rstrip() != "#EOF":
+ # Make sure we haven't crashed.
+ if line == '' and proc.poll() is not None:
+ failures.append(test_failures.FailureCrash())
+
+ # This is hex code 0xc000001d, which is used for abrupt
+ # termination. This happens if we hit ctrl+c from the prompt and
+ # we happen to be waiting on the test_shell.
+ # sdoyon: Not sure for which OS and in what circumstances the
+ # above code is valid. What works for me under Linux to detect
+ # ctrl+c is for the subprocess returncode to be negative SIGINT.
+ # And that agrees with the subprocess documentation.
+ if (-1073741510 == proc.returncode or
+ - signal.SIGINT == proc.returncode):
+ raise KeyboardInterrupt
+ crash = True
+ break
+
+ # Don't include #URL lines in our output
+ if line.startswith("#URL:"):
+ url = line.rstrip()[5:]
+ if url != test_string:
+ logging.fatal("Test got out of sync:\n|%s|\n|%s|" %
+ (url, test_string))
+ raise AssertionError("test out of sync")
+ elif line.startswith("#MD5:"):
+ local_test_args.hash = line.rstrip()[5:]
+ elif line.startswith("#TEST_TIMED_OUT"):
+ # Test timed out, but we still need to read until #EOF.
+ failures.append(test_failures.FailureTimeout())
+ elif url:
+ outlines.append(line)
+ else:
+ extra_lines.append(line)
+
+ line = proc.stdout.readline()
+
+ end_test_time = time.time()
+
+ if len(extra_lines):
+ extra = "".join(extra_lines)
+ if crash:
+ logging.debug("Stacktrace for %s:\n%s" % (test_string, extra))
+ # Strip off "file://" since RelativeTestFilename expects
+ # filesystem paths.
+ filename = os.path.join(output_dir,
+ path_utils.relative_test_filename(test_string[7:]))
+ filename = os.path.splitext(filename)[0] + "-stack.txt"
+ path_utils.maybe_make_directory(os.path.split(filename)[0])
+ open(filename, "wb").write(extra)
+ else:
+ logging.debug("Previous test output extra lines after dump:\n%s" %
+ extra)
+
+ # Check the output and save the results.
+ time_for_diffs = {}
+ for test_type in test_types:
+ start_diff_time = time.time()
+ new_failures = test_type.compare_output(test_info.filename,
+ proc, ''.join(outlines),
+ local_test_args, target)
+ # Don't add any more failures if we already have a crash, so we don't
+ # double-report those tests. We do double-report for timeouts since
+ # we still want to see the text and image output.
+ if not crash:
+ failures.extend(new_failures)
+ time_for_diffs[test_type.__class__.__name__] = (
+ time.time() - start_diff_time)
+
+ total_time_for_all_diffs = time.time() - end_test_time
+ test_run_time = end_test_time - start_time
+ return TestStats(test_info.filename, failures, test_run_time,
+ total_time_for_all_diffs, time_for_diffs)
+
+
+def start_test_shell(command, args):
+ """Returns the process for a new test_shell started in layout-tests mode.
+ """
+ cmd = []
+ # Hook for injecting valgrind or other runtime instrumentation,
+ # used by e.g. tools/valgrind/valgrind_tests.py.
+ wrapper = os.environ.get("BROWSER_WRAPPER", None)
+ if wrapper != None:
+ cmd += [wrapper]
+ cmd += command + ['--layout-tests'] + args
+ return subprocess.Popen(cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+
+
+class TestStats:
+
+ def __init__(self, filename, failures, test_run_time,
+ total_time_for_all_diffs, time_for_diffs):
+ self.filename = filename
+ self.failures = failures
+ self.test_run_time = test_run_time
+ self.total_time_for_all_diffs = total_time_for_all_diffs
+ self.time_for_diffs = time_for_diffs
+
+
+class SingleTestThread(threading.Thread):
+ """Thread wrapper for running a single test file."""
+
+ def __init__(self, test_shell_command, shell_args, test_info, test_types,
+ test_args, target, output_dir):
+ """
+ Args:
+ test_info: Object containing the test filename, uri and timeout
+ output_dir: Directory to put crash stacks into.
+ See TestShellThread for documentation of the remaining arguments.
+ """
+
+ threading.Thread.__init__(self)
+ self._command = test_shell_command
+ self._shell_args = shell_args
+ self._test_info = test_info
+ self._test_types = test_types
+ self._test_args = test_args
+ self._target = target
+ self._output_dir = output_dir
+
+ def run(self):
+ proc = start_test_shell(self._command, self._shell_args +
+ ["--time-out-ms=" + self._test_info.timeout, self._test_info.uri])
+ self._test_stats = process_output(proc, self._test_info,
+ self._test_types, self._test_args, self._target, self._output_dir)
+
+ def get_test_stats(self):
+ return self._test_stats
+
+
+class TestShellThread(threading.Thread):
+
+ def __init__(self, filename_list_queue, result_queue, test_shell_command,
+ test_types, test_args, shell_args, options):
+ """Initialize all the local state for this test shell thread.
+
+ Args:
+ filename_list_queue: A thread safe Queue class that contains lists
+ of tuples of (filename, uri) pairs.
+ result_queue: A thread safe Queue class that will contain tuples of
+ (test, failure lists) for the test results.
+ test_shell_command: A list specifying the command+args for
+ test_shell
+ test_types: A list of TestType objects to run the test output
+ against.
+ test_args: A TestArguments object to pass to each TestType.
+ shell_args: Any extra arguments to be passed to test_shell.exe.
+ options: A property dictionary as produced by optparse. The
+ command-line options should match those expected by
+ run_webkit_tests; they are typically passed via the
+ run_webkit_tests.TestRunner class."""
+ threading.Thread.__init__(self)
+ self._filename_list_queue = filename_list_queue
+ self._result_queue = result_queue
+ self._filename_list = []
+ self._test_shell_command = test_shell_command
+ self._test_types = test_types
+ self._test_args = test_args
+ self._test_shell_proc = None
+ self._shell_args = shell_args
+ self._options = options
+ self._canceled = False
+ self._exception_info = None
+ self._directory_timing_stats = {}
+ self._test_stats = []
+ self._num_tests = 0
+ self._start_time = 0
+ self._stop_time = 0
+
+ # Current directory of tests we're running.
+ self._current_dir = None
+ # Number of tests in self._current_dir.
+ self._num_tests_in_current_dir = None
+ # Time at which we started running tests from self._current_dir.
+ self._current_dir_start_time = None
+
+ def get_directory_timing_stats(self):
+ """Returns a dictionary mapping test directory to a tuple of
+ (number of tests in that directory, time to run the tests)"""
+ return self._directory_timing_stats
+
+ def get_individual_test_stats(self):
+ """Returns a list of (test_filename, time_to_run_test,
+ total_time_for_all_diffs, time_for_diffs) tuples."""
+ return self._test_stats
+
+ def cancel(self):
+ """Set a flag telling this thread to quit."""
+ self._canceled = True
+
+ def get_exception_info(self):
+ """If run() terminated on an uncaught exception, return it here
+ ((type, value, traceback) tuple).
+ Returns None if run() terminated normally. Meant to be called after
+ joining this thread."""
+ return self._exception_info
+
+ def get_total_time(self):
+ return max(self._stop_time - self._start_time, 0.0)
+
+ def get_num_tests(self):
+ return self._num_tests
+
+ def run(self):
+ """Delegate main work to a helper method and watch for uncaught
+ exceptions."""
+ self._start_time = time.time()
+ self._num_tests = 0
+ try:
+ logging.debug('%s starting' % (self.getName()))
+ self._run(test_runner=None, result_summary=None)
+ logging.debug('%s done (%d tests)' % (self.getName(),
+ self.get_num_tests()))
+ except:
+ # Save the exception for our caller to see.
+ self._exception_info = sys.exc_info()
+ self._stop_time = time.time()
+ # Re-raise it and die.
+ logging.error('%s dying: %s' % (self.getName(),
+ self._exception_info))
+ raise
+ self._stop_time = time.time()
+
+ def run_in_main_thread(self, test_runner, result_summary):
+ """This hook allows us to run the tests from the main thread if
+ --num-test-shells==1, instead of having to always run two or more
+ threads. This allows us to debug the test harness without having to
+ do multi-threaded debugging."""
+ self._run(test_runner, result_summary)
+
+ def _run(self, test_runner, result_summary):
+ """Main work entry point of the thread. Basically we pull urls from the
+ filename queue and run the tests until we run out of urls.
+
+ If test_runner is not None, then we call test_runner.UpdateSummary()
+ with the results of each test."""
+ batch_size = 0
+ batch_count = 0
+ if self._options.batch_size:
+ try:
+ batch_size = int(self._options.batch_size)
+ except:
+ logging.info("Ignoring invalid batch size '%s'" %
+ self._options.batch_size)
+
+ # Append tests we're running to the existing tests_run.txt file.
+ # This is created in run_webkit_tests.py:_PrepareListsAndPrintOutput.
+ tests_run_filename = os.path.join(self._options.results_directory,
+ "tests_run.txt")
+ tests_run_file = open(tests_run_filename, "a")
+
+ while True:
+ if self._canceled:
+ logging.info('Testing canceled')
+ tests_run_file.close()
+ return
+
+ if len(self._filename_list) is 0:
+ if self._current_dir is not None:
+ self._directory_timing_stats[self._current_dir] = \
+ (self._num_tests_in_current_dir,
+ time.time() - self._current_dir_start_time)
+
+ try:
+ self._current_dir, self._filename_list = \
+ self._filename_list_queue.get_nowait()
+ except Queue.Empty:
+ self._kill_test_shell()
+ tests_run_file.close()
+ return
+
+ self._num_tests_in_current_dir = len(self._filename_list)
+ self._current_dir_start_time = time.time()
+
+ test_info = self._filename_list.pop()
+
+ # We have a url, run tests.
+ batch_count += 1
+ self._num_tests += 1
+ if self._options.run_singly:
+ failures = self._run_test_singly(test_info)
+ else:
+ failures = self._run_test(test_info)
+
+ filename = test_info.filename
+ tests_run_file.write(filename + "\n")
+ if failures:
+ # Check and kill test shell if we need too.
+ if len([1 for f in failures if f.should_kill_test_shell()]):
+ self._kill_test_shell()
+ # Reset the batch count since the shell just bounced.
+ batch_count = 0
+ # Print the error message(s).
+ error_str = '\n'.join([' ' + f.message() for f in failures])
+ logging.debug("%s %s failed:\n%s" % (self.getName(),
+ path_utils.relative_test_filename(filename),
+ error_str))
+ else:
+ logging.debug("%s %s passed" % (self.getName(),
+ path_utils.relative_test_filename(filename)))
+ self._result_queue.put((filename, failures))
+
+ if batch_size > 0 and batch_count > batch_size:
+ # Bounce the shell and reset count.
+ self._kill_test_shell()
+ batch_count = 0
+
+ if test_runner:
+ test_runner.update_summary(result_summary)
+
+ def _run_test_singly(self, test_info):
+ """Run a test in a separate thread, enforcing a hard time limit.
+
+ Since we can only detect the termination of a thread, not any internal
+ state or progress, we can only run per-test timeouts when running test
+ files singly.
+
+ Args:
+ test_info: Object containing the test filename, uri and timeout
+
+ Return:
+ A list of TestFailure objects describing the error.
+ """
+ worker = SingleTestThread(self._test_shell_command,
+ self._shell_args,
+ test_info,
+ self._test_types,
+ self._test_args,
+ self._options.target,
+ self._options.results_directory)
+
+ worker.start()
+
+ # When we're running one test per test_shell process, we can enforce
+ # a hard timeout. the test_shell watchdog uses 2.5x the timeout
+ # We want to be larger than that.
+ worker.join(int(test_info.timeout) * 3.0 / 1000.0)
+ if worker.isAlive():
+ # If join() returned with the thread still running, the
+ # test_shell.exe is completely hung and there's nothing
+ # more we can do with it. We have to kill all the
+ # test_shells to free it up. If we're running more than
+ # one test_shell thread, we'll end up killing the other
+ # test_shells too, introducing spurious crashes. We accept that
+ # tradeoff in order to avoid losing the rest of this thread's
+ # results.
+ logging.error('Test thread hung: killing all test_shells')
+ path_utils.kill_all_test_shells()
+
+ try:
+ stats = worker.get_test_stats()
+ self._test_stats.append(stats)
+ failures = stats.failures
+ except AttributeError, e:
+ failures = []
+ logging.error('Cannot get results of test: %s' %
+ test_info.filename)
+
+ return failures
+
+ def _run_test(self, test_info):
+ """Run a single test file using a shared test_shell process.
+
+ Args:
+ test_info: Object containing the test filename, uri and timeout
+
+ Return:
+ A list of TestFailure objects describing the error.
+ """
+ self._ensure_test_shell_is_running()
+ # Args to test_shell is a space-separated list of
+ # "uri timeout pixel_hash"
+ # The timeout and pixel_hash are optional. The timeout is used if this
+ # test has a custom timeout. The pixel_hash is used to avoid doing an
+ # image dump if the checksums match, so it should be set to a blank
+ # value if we are generating a new baseline.
+ # (Otherwise, an image from a previous run will be copied into
+ # the baseline.)
+ image_hash = test_info.image_hash
+ if image_hash and self._test_args.new_baseline:
+ image_hash = ""
+ self._test_shell_proc.stdin.write(("%s %s %s\n" %
+ (test_info.uri, test_info.timeout, image_hash)))
+
+ # If the test shell is dead, the above may cause an IOError as we
+ # try to write onto the broken pipe. If this is the first test for
+ # this test shell process, than the test shell did not
+ # successfully start. If this is not the first test, then the
+ # previous tests have caused some kind of delayed crash. We don't
+ # try to recover here.
+ self._test_shell_proc.stdin.flush()
+
+ stats = process_output(self._test_shell_proc, test_info,
+ self._test_types, self._test_args,
+ self._options.target,
+ self._options.results_directory)
+
+ self._test_stats.append(stats)
+ return stats.failures
+
+ def _ensure_test_shell_is_running(self):
+ """Start the shared test shell, if it's not running. Not for use when
+ running tests singly, since those each start a separate test shell in
+ their own thread.
+ """
+ if (not self._test_shell_proc or
+ self._test_shell_proc.poll() is not None):
+ self._test_shell_proc = start_test_shell(self._test_shell_command,
+ self._shell_args)
+
+ def _kill_test_shell(self):
+ """Kill the test shell process if it's running."""
+ if self._test_shell_proc:
+ self._test_shell_proc.stdin.close()
+ self._test_shell_proc.stdout.close()
+ if self._test_shell_proc.stderr:
+ self._test_shell_proc.stderr.close()
+ if (sys.platform not in ('win32', 'cygwin') and
+ not self._test_shell_proc.poll()):
+ # Closing stdin/stdout/stderr hangs sometimes on OS X.
+ null = open(os.devnull, "w")
+ subprocess.Popen(["kill", "-9",
+ str(self._test_shell_proc.pid)], stderr=null)
+ null.close()
+ self._test_shell_proc = None
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/websocket_server.py b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/websocket_server.py
new file mode 100644
index 0000000..7fc47a0
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/layout_package/websocket_server.py
@@ -0,0 +1,316 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""A class to help start/stop the PyWebSocket server used by layout tests."""
+
+
+import logging
+import optparse
+import os
+import subprocess
+import sys
+import tempfile
+import time
+import urllib
+
+import path_utils
+import platform_utils
+import http_server
+
+_WS_LOG_PREFIX = 'pywebsocket.ws.log-'
+_WSS_LOG_PREFIX = 'pywebsocket.wss.log-'
+
+_DEFAULT_WS_PORT = 8880
+_DEFAULT_WSS_PORT = 9323
+
+
+def url_is_alive(url):
+ """Checks to see if we get an http response from |url|.
+ We poll the url 5 times with a 1 second delay. If we don't
+ get a reply in that time, we give up and assume the httpd
+ didn't start properly.
+
+ Args:
+ url: The URL to check.
+ Return:
+ True if the url is alive.
+ """
+ wait_time = 5
+ while wait_time > 0:
+ try:
+ response = urllib.urlopen(url)
+ # Server is up and responding.
+ return True
+ except IOError:
+ pass
+ wait_time -= 1
+ # Wait a second and try again.
+ time.sleep(1)
+
+ return False
+
+
+def remove_log_files(folder, starts_with):
+ files = os.listdir(folder)
+ for file in files:
+ if file.startswith(starts_with):
+ full_path = os.path.join(folder, file)
+ os.remove(full_path)
+
+
+class PyWebSocketNotStarted(Exception):
+ pass
+
+
+class PyWebSocketNotFound(Exception):
+ pass
+
+
+class PyWebSocket(http_server.Lighttpd):
+
+ def __init__(self, output_dir, port=_DEFAULT_WS_PORT,
+ root=None,
+ use_tls=False,
+ private_key=http_server.Lighttpd._pem_file,
+ certificate=http_server.Lighttpd._pem_file,
+ register_cygwin=None,
+ pidfile=None):
+ """Args:
+ output_dir: the absolute path to the layout test result directory
+ """
+ http_server.Lighttpd.__init__(self, output_dir,
+ port=port,
+ root=root,
+ register_cygwin=register_cygwin)
+ self._output_dir = output_dir
+ self._process = None
+ self._port = port
+ self._root = root
+ self._use_tls = use_tls
+ self._private_key = private_key
+ self._certificate = certificate
+ if self._port:
+ self._port = int(self._port)
+ if self._use_tls:
+ self._server_name = 'PyWebSocket(Secure)'
+ else:
+ self._server_name = 'PyWebSocket'
+ self._pidfile = pidfile
+ self._wsout = None
+
+ # Webkit tests
+ if self._root:
+ self._layout_tests = os.path.abspath(self._root)
+ self._web_socket_tests = os.path.abspath(
+ os.path.join(self._root, 'websocket', 'tests'))
+ else:
+ try:
+ self._web_socket_tests = path_utils.path_from_base(
+ 'third_party', 'WebKit', 'LayoutTests', 'websocket',
+ 'tests')
+ self._layout_tests = path_utils.path_from_base(
+ 'third_party', 'WebKit', 'LayoutTests')
+ except path_utils.PathNotFound:
+ self._web_socket_tests = None
+
+ def start(self):
+ if not self._web_socket_tests:
+ logging.info('No need to start %s server.' % self._server_name)
+ return
+ if self.is_running():
+ raise PyWebSocketNotStarted('%s is already running.' %
+ self._server_name)
+
+ time_str = time.strftime('%d%b%Y-%H%M%S')
+ if self._use_tls:
+ log_prefix = _WSS_LOG_PREFIX
+ else:
+ log_prefix = _WS_LOG_PREFIX
+ log_file_name = log_prefix + time_str
+
+ # Remove old log files. We only need to keep the last ones.
+ remove_log_files(self._output_dir, log_prefix)
+
+ error_log = os.path.join(self._output_dir, log_file_name + "-err.txt")
+
+ output_log = os.path.join(self._output_dir, log_file_name + "-out.txt")
+ self._wsout = open(output_log, "w")
+
+ python_interp = sys.executable
+ pywebsocket_base = path_utils.path_from_base(
+ 'third_party', 'WebKit', 'WebKitTools', 'pywebsocket')
+ pywebsocket_script = path_utils.path_from_base(
+ 'third_party', 'WebKit', 'WebKitTools', 'pywebsocket',
+ 'mod_pywebsocket', 'standalone.py')
+ start_cmd = [
+ python_interp, pywebsocket_script,
+ '-p', str(self._port),
+ '-d', self._layout_tests,
+ '-s', self._web_socket_tests,
+ '-l', error_log,
+ ]
+
+ handler_map_file = os.path.join(self._web_socket_tests,
+ 'handler_map.txt')
+ if os.path.exists(handler_map_file):
+ logging.debug('Using handler_map_file: %s' % handler_map_file)
+ start_cmd.append('-m')
+ start_cmd.append(handler_map_file)
+ else:
+ logging.warning('No handler_map_file found')
+
+ if self._use_tls:
+ start_cmd.extend(['-t', '-k', self._private_key,
+ '-c', self._certificate])
+
+ # Put the cygwin directory first in the path to find cygwin1.dll
+ env = os.environ
+ if sys.platform in ('cygwin', 'win32'):
+ env['PATH'] = '%s;%s' % (
+ path_utils.path_from_base('third_party', 'cygwin', 'bin'),
+ env['PATH'])
+
+ if sys.platform == 'win32' and self._register_cygwin:
+ setup_mount = path_utils.path_from_base('third_party', 'cygwin',
+ 'setup_mount.bat')
+ subprocess.Popen(setup_mount).wait()
+
+ env['PYTHONPATH'] = (pywebsocket_base + os.path.pathsep +
+ env.get('PYTHONPATH', ''))
+
+ logging.debug('Starting %s server on %d.' % (
+ self._server_name, self._port))
+ logging.debug('cmdline: %s' % ' '.join(start_cmd))
+ self._process = subprocess.Popen(start_cmd, stdout=self._wsout,
+ stderr=subprocess.STDOUT,
+ env=env)
+
+ # Wait a bit before checking the liveness of the server.
+ time.sleep(0.5)
+
+ if self._use_tls:
+ url = 'https'
+ else:
+ url = 'http'
+ url = url + '://127.0.0.1:%d/' % self._port
+ if not url_is_alive(url):
+ fp = open(output_log)
+ try:
+ for line in fp:
+ logging.error(line)
+ finally:
+ fp.close()
+ raise PyWebSocketNotStarted(
+ 'Failed to start %s server on port %s.' %
+ (self._server_name, self._port))
+
+ # Our process terminated already
+ if self._process.returncode != None:
+ raise PyWebSocketNotStarted(
+ 'Failed to start %s server.' % self._server_name)
+ if self._pidfile:
+ f = open(self._pidfile, 'w')
+ f.write("%d" % self._process.pid)
+ f.close()
+
+ def stop(self, force=False):
+ if not force and not self.is_running():
+ return
+
+ if self._process:
+ pid = self._process.pid
+ elif self._pidfile:
+ f = open(self._pidfile)
+ pid = int(f.read().strip())
+ f.close()
+
+ if not pid:
+ raise PyWebSocketNotFound(
+ 'Failed to find %s server pid.' % self._server_name)
+
+ logging.debug('Shutting down %s server %d.' % (self._server_name, pid))
+ platform_utils.kill_process(pid)
+
+ if self._process:
+ self._process.wait()
+ self._process = None
+
+ if self._wsout:
+ self._wsout.close()
+ self._wsout = None
+
+
+if '__main__' == __name__:
+ # Provide some command line params for starting the PyWebSocket server
+ # manually.
+ option_parser = optparse.OptionParser()
+ option_parser.add_option('--server', type='choice',
+ choices=['start', 'stop'], default='start',
+ help='Server action (start|stop)')
+ option_parser.add_option('-p', '--port', dest='port',
+ default=None, help='Port to listen on')
+ option_parser.add_option('-r', '--root',
+ help='Absolute path to DocumentRoot '
+ '(overrides layout test roots)')
+ option_parser.add_option('-t', '--tls', dest='use_tls',
+ action='store_true',
+ default=False, help='use TLS (wss://)')
+ option_parser.add_option('-k', '--private_key', dest='private_key',
+ default='', help='TLS private key file.')
+ option_parser.add_option('-c', '--certificate', dest='certificate',
+ default='', help='TLS certificate file.')
+ option_parser.add_option('--register_cygwin', action="store_true",
+ dest="register_cygwin",
+ help='Register Cygwin paths (on Win try bots)')
+ option_parser.add_option('--pidfile', help='path to pid file.')
+ options, args = option_parser.parse_args()
+
+ if not options.port:
+ if options.use_tls:
+ options.port = _DEFAULT_WSS_PORT
+ else:
+ options.port = _DEFAULT_WS_PORT
+
+ kwds = {'port': options.port, 'use_tls': options.use_tls}
+ if options.root:
+ kwds['root'] = options.root
+ if options.private_key:
+ kwds['private_key'] = options.private_key
+ if options.certificate:
+ kwds['certificate'] = options.certificate
+ kwds['register_cygwin'] = options.register_cygwin
+ if options.pidfile:
+ kwds['pidfile'] = options.pidfile
+
+ pywebsocket = PyWebSocket(tempfile.gettempdir(), **kwds)
+
+ if 'start' == options.server:
+ pywebsocket.start()
+ else:
+ pywebsocket.stop(force=True)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
new file mode 100644
index 0000000..1db811f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/rebaseline_chromium_webkit_tests.py
@@ -0,0 +1,1028 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Rebaselining tool that automatically produces baselines for all platforms.
+
+The script does the following for each platform specified:
+ 1. Compile a list of tests that need rebaselining.
+ 2. Download test result archive from buildbot for the platform.
+ 3. Extract baselines from the archive file for all identified files.
+ 4. Add new baselines to SVN repository.
+ 5. For each test that has been rebaselined, remove this platform option from
+ the test in test_expectation.txt. If no other platforms remain after
+ removal, delete the rebaselined test from the file.
+
+At the end, the script generates a html that compares old and new baselines.
+"""
+
+import logging
+import optparse
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+import urllib
+import webbrowser
+import zipfile
+
+from layout_package import path_utils
+from layout_package import test_expectations
+from test_types import image_diff
+from test_types import text_diff
+
+# Repository type constants.
+REPO_SVN, REPO_UNKNOWN = range(2)
+
+BASELINE_SUFFIXES = ['.txt', '.png', '.checksum']
+REBASELINE_PLATFORM_ORDER = ['mac', 'win', 'win-xp', 'win-vista', 'linux']
+ARCHIVE_DIR_NAME_DICT = {'win': 'webkit-rel',
+ 'win-vista': 'webkit-dbg-vista',
+ 'win-xp': 'webkit-rel',
+ 'mac': 'webkit-rel-mac5',
+ 'linux': 'webkit-rel-linux',
+ 'win-canary': 'webkit-rel-webkit-org',
+ 'win-vista-canary': 'webkit-dbg-vista',
+ 'win-xp-canary': 'webkit-rel-webkit-org',
+ 'mac-canary': 'webkit-rel-mac-webkit-org',
+ 'linux-canary': 'webkit-rel-linux-webkit-org'}
+
+
+def run_shell_with_return_code(command, print_output=False):
+ """Executes a command and returns the output and process return code.
+
+ Args:
+ command: program and arguments.
+ print_output: if true, print the command results to standard output.
+
+ Returns:
+ command output, return code
+ """
+
+ # Use a shell for subcommands on Windows to get a PATH search.
+ use_shell = sys.platform.startswith('win')
+ p = subprocess.Popen(command, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, shell=use_shell)
+ if print_output:
+ output_array = []
+ while True:
+ line = p.stdout.readline()
+ if not line:
+ break
+ if print_output:
+ print line.strip('\n')
+ output_array.append(line)
+ output = ''.join(output_array)
+ else:
+ output = p.stdout.read()
+ p.wait()
+ p.stdout.close()
+
+ return output, p.returncode
+
+
+def run_shell(command, print_output=False):
+ """Executes a command and returns the output.
+
+ Args:
+ command: program and arguments.
+ print_output: if true, print the command results to standard output.
+
+ Returns:
+ command output
+ """
+
+ output, return_code = run_shell_with_return_code(command, print_output)
+ return output
+
+
+def log_dashed_string(text, platform, logging_level=logging.INFO):
+ """Log text message with dashes on both sides."""
+
+ msg = text
+ if platform:
+ msg += ': ' + platform
+ if len(msg) < 78:
+ dashes = '-' * ((78 - len(msg)) / 2)
+ msg = '%s %s %s' % (dashes, msg, dashes)
+
+ if logging_level == logging.ERROR:
+ logging.error(msg)
+ elif logging_level == logging.WARNING:
+ logging.warn(msg)
+ else:
+ logging.info(msg)
+
+
+def setup_html_directory(html_directory):
+ """Setup the directory to store html results.
+
+ All html related files are stored in the "rebaseline_html" subdirectory.
+
+ Args:
+ html_directory: parent directory that stores the rebaselining results.
+ If None, a temp directory is created.
+
+ Returns:
+ the directory that stores the html related rebaselining results.
+ """
+
+ if not html_directory:
+ html_directory = tempfile.mkdtemp()
+ elif not os.path.exists(html_directory):
+ os.mkdir(html_directory)
+
+ html_directory = os.path.join(html_directory, 'rebaseline_html')
+ logging.info('Html directory: "%s"', html_directory)
+
+ if os.path.exists(html_directory):
+ shutil.rmtree(html_directory, True)
+ logging.info('Deleted file at html directory: "%s"', html_directory)
+
+ if not os.path.exists(html_directory):
+ os.mkdir(html_directory)
+ return html_directory
+
+
+def get_result_file_fullpath(html_directory, baseline_filename, platform,
+ result_type):
+ """Get full path of the baseline result file.
+
+ Args:
+ html_directory: directory that stores the html related files.
+ baseline_filename: name of the baseline file.
+ platform: win, linux or mac
+ result_type: type of the baseline result: '.txt', '.png'.
+
+ Returns:
+ Full path of the baseline file for rebaselining result comparison.
+ """
+
+ base, ext = os.path.splitext(baseline_filename)
+ result_filename = '%s-%s-%s%s' % (base, platform, result_type, ext)
+ fullpath = os.path.join(html_directory, result_filename)
+ logging.debug(' Result file full path: "%s".', fullpath)
+ return fullpath
+
+
+class Rebaseliner(object):
+ """Class to produce new baselines for a given platform."""
+
+ REVISION_REGEX = r'<a href=\"(\d+)/\">'
+
+ def __init__(self, platform, options):
+ self._file_dir = path_utils.path_from_base('webkit', 'tools',
+ 'layout_tests')
+ self._platform = platform
+ self._options = options
+ self._rebaselining_tests = []
+ self._rebaselined_tests = []
+
+ # Create tests and expectations helper which is used to:
+ # -. compile list of tests that need rebaselining.
+ # -. update the tests in test_expectations file after rebaseline
+ # is done.
+ self._test_expectations = \
+ test_expectations.TestExpectations(None,
+ self._file_dir,
+ platform,
+ False,
+ False)
+
+ self._repo_type = self._get_repo_type()
+
+ def run(self, backup):
+ """Run rebaseline process."""
+
+ log_dashed_string('Compiling rebaselining tests', self._platform)
+ if not self._compile_rebaselining_tests():
+ return True
+
+ log_dashed_string('Downloading archive', self._platform)
+ archive_file = self._download_buildbot_archive()
+ logging.info('')
+ if not archive_file:
+ logging.error('No archive found.')
+ return False
+
+ log_dashed_string('Extracting and adding new baselines',
+ self._platform)
+ if not self._extract_and_add_new_baselines(archive_file):
+ return False
+
+ log_dashed_string('Updating rebaselined tests in file',
+ self._platform)
+ self._update_rebaselined_tests_in_file(backup)
+ logging.info('')
+
+ if len(self._rebaselining_tests) != len(self._rebaselined_tests):
+ logging.warning('NOT ALL TESTS THAT NEED REBASELINING HAVE BEEN '
+ 'REBASELINED.')
+ logging.warning(' Total tests needing rebaselining: %d',
+ len(self._rebaselining_tests))
+ logging.warning(' Total tests rebaselined: %d',
+ len(self._rebaselined_tests))
+ return False
+
+ logging.warning('All tests needing rebaselining were successfully '
+ 'rebaselined.')
+
+ return True
+
+ def get_rebaselining_tests(self):
+ return self._rebaselining_tests
+
+ def _get_repo_type(self):
+ """Get the repository type that client is using."""
+ output, return_code = run_shell_with_return_code(['svn', 'info'],
+ False)
+ if return_code == 0:
+ return REPO_SVN
+
+ return REPO_UNKNOWN
+
+ def _compile_rebaselining_tests(self):
+ """Compile list of tests that need rebaselining for the platform.
+
+ Returns:
+ List of tests that need rebaselining or
+ None if there is no such test.
+ """
+
+ self._rebaselining_tests = \
+ self._test_expectations.get_rebaselining_failures()
+ if not self._rebaselining_tests:
+ logging.warn('No tests found that need rebaselining.')
+ return None
+
+ logging.info('Total number of tests needing rebaselining '
+ 'for "%s": "%d"', self._platform,
+ len(self._rebaselining_tests))
+
+ test_no = 1
+ for test in self._rebaselining_tests:
+ logging.info(' %d: %s', test_no, test)
+ test_no += 1
+
+ return self._rebaselining_tests
+
+ def _get_latest_revision(self, url):
+ """Get the latest layout test revision number from buildbot.
+
+ Args:
+ url: Url to retrieve layout test revision numbers.
+
+ Returns:
+ latest revision or
+ None on failure.
+ """
+
+ logging.debug('Url to retrieve revision: "%s"', url)
+
+ f = urllib.urlopen(url)
+ content = f.read()
+ f.close()
+
+ revisions = re.findall(self.REVISION_REGEX, content)
+ if not revisions:
+ logging.error('Failed to find revision, content: "%s"', content)
+ return None
+
+ revisions.sort(key=int)
+ logging.info('Latest revision: "%s"', revisions[len(revisions) - 1])
+ return revisions[len(revisions) - 1]
+
+ def _get_archive_dir_name(self, platform, webkit_canary):
+ """Get name of the layout test archive directory.
+
+ Returns:
+ Directory name or
+ None on failure
+ """
+
+ if webkit_canary:
+ platform += '-canary'
+
+ if platform in ARCHIVE_DIR_NAME_DICT:
+ return ARCHIVE_DIR_NAME_DICT[platform]
+ else:
+ logging.error('Cannot find platform key %s in archive '
+ 'directory name dictionary', platform)
+ return None
+
+ def _get_archive_url(self):
+ """Generate the url to download latest layout test archive.
+
+ Returns:
+ Url to download archive or
+ None on failure
+ """
+
+ dir_name = self._get_archive_dir_name(self._platform,
+ self._options.webkit_canary)
+ if not dir_name:
+ return None
+
+ logging.debug('Buildbot platform dir name: "%s"', dir_name)
+
+ url_base = '%s/%s/' % (self._options.archive_url, dir_name)
+ latest_revision = self._get_latest_revision(url_base)
+ if latest_revision is None or latest_revision <= 0:
+ return None
+
+ archive_url = ('%s%s/layout-test-results.zip' % (url_base,
+ latest_revision))
+ logging.info('Archive url: "%s"', archive_url)
+ return archive_url
+
+ def _download_buildbot_archive(self):
+ """Download layout test archive file from buildbot.
+
+ Returns:
+ True if download succeeded or
+ False otherwise.
+ """
+
+ url = self._get_archive_url()
+ if url is None:
+ return None
+
+ fn = urllib.urlretrieve(url)[0]
+ logging.info('Archive downloaded and saved to file: "%s"', fn)
+ return fn
+
+ def _extract_and_add_new_baselines(self, archive_file):
+ """Extract new baselines from archive and add them to SVN repository.
+
+ Args:
+ archive_file: full path to the archive file.
+
+ Returns:
+ List of tests that have been rebaselined or
+ None on failure.
+ """
+
+ zip_file = zipfile.ZipFile(archive_file, 'r')
+ zip_namelist = zip_file.namelist()
+
+ logging.debug('zip file namelist:')
+ for name in zip_namelist:
+ logging.debug(' ' + name)
+
+ platform = path_utils.platform_name(self._platform)
+ logging.debug('Platform dir: "%s"', platform)
+
+ test_no = 1
+ self._rebaselined_tests = []
+ for test in self._rebaselining_tests:
+ logging.info('Test %d: %s', test_no, test)
+
+ found = False
+ svn_error = False
+ test_basename = os.path.splitext(test)[0]
+ for suffix in BASELINE_SUFFIXES:
+ archive_test_name = ('layout-test-results/%s-actual%s' %
+ (test_basename, suffix))
+ logging.debug(' Archive test file name: "%s"',
+ archive_test_name)
+ if not archive_test_name in zip_namelist:
+ logging.info(' %s file not in archive.', suffix)
+ continue
+
+ found = True
+ logging.info(' %s file found in archive.', suffix)
+
+ # Extract new baseline from archive and save it to a temp file.
+ data = zip_file.read(archive_test_name)
+ temp_fd, temp_name = tempfile.mkstemp(suffix)
+ f = os.fdopen(temp_fd, 'wb')
+ f.write(data)
+ f.close()
+
+ expected_filename = '%s-expected%s' % (test_basename, suffix)
+ expected_fullpath = os.path.join(
+ path_utils.chromium_baseline_path(platform),
+ expected_filename)
+ expected_fullpath = os.path.normpath(expected_fullpath)
+ logging.debug(' Expected file full path: "%s"',
+ expected_fullpath)
+
+ # TODO(victorw): for now, the rebaselining tool checks whether
+ # or not THIS baseline is duplicate and should be skipped.
+ # We could improve the tool to check all baselines in upper
+ # and lower
+ # levels and remove all duplicated baselines.
+ if self._is_dup_baseline(temp_name,
+ expected_fullpath,
+ test,
+ suffix,
+ self._platform):
+ os.remove(temp_name)
+ self._delete_baseline(expected_fullpath)
+ continue
+
+ # Create the new baseline directory if it doesn't already
+ # exist.
+ path_utils.maybe_make_directory(
+ os.path.dirname(expected_fullpath))
+
+ shutil.move(temp_name, expected_fullpath)
+
+ if not self._svn_add(expected_fullpath):
+ svn_error = True
+ elif suffix != '.checksum':
+ self._create_html_baseline_files(expected_fullpath)
+
+ if not found:
+ logging.warn(' No new baselines found in archive.')
+ else:
+ if svn_error:
+ logging.warn(' Failed to add baselines to SVN.')
+ else:
+ logging.info(' Rebaseline succeeded.')
+ self._rebaselined_tests.append(test)
+
+ test_no += 1
+
+ zip_file.close()
+ os.remove(archive_file)
+
+ return self._rebaselined_tests
+
+ def _is_dup_baseline(self, new_baseline, baseline_path, test, suffix,
+ platform):
+ """Check whether a baseline is duplicate and can fallback to same
+ baseline for another platform. For example, if a test has same
+ baseline on linux and windows, then we only store windows
+ baseline and linux baseline will fallback to the windows version.
+
+ Args:
+ expected_filename: baseline expectation file name.
+ test: test name.
+ suffix: file suffix of the expected results, including dot;
+ e.g. '.txt' or '.png'.
+ platform: baseline platform 'mac', 'win' or 'linux'.
+
+ Returns:
+ True if the baseline is unnecessary.
+ False otherwise.
+ """
+ test_filepath = os.path.join(path_utils.layout_tests_dir(), test)
+ all_baselines = path_utils.expected_baselines(test_filepath,
+ suffix, platform, True)
+ for (fallback_dir, fallback_file) in all_baselines:
+ if fallback_dir and fallback_file:
+ fallback_fullpath = os.path.normpath(
+ os.path.join(fallback_dir, fallback_file))
+ if fallback_fullpath.lower() != baseline_path.lower():
+ if not self._diff_baselines(new_baseline,
+ fallback_fullpath):
+ logging.info(' Found same baseline at %s',
+ fallback_fullpath)
+ return True
+ else:
+ return False
+
+ return False
+
+ def _diff_baselines(self, file1, file2):
+ """Check whether two baselines are different.
+
+ Args:
+ file1, file2: full paths of the baselines to compare.
+
+ Returns:
+ True if two files are different or have different extensions.
+ False otherwise.
+ """
+
+ ext1 = os.path.splitext(file1)[1].upper()
+ ext2 = os.path.splitext(file2)[1].upper()
+ if ext1 != ext2:
+ logging.warn('Files to compare have different ext. '
+ 'File1: %s; File2: %s', file1, file2)
+ return True
+
+ if ext1 == '.PNG':
+ return image_diff.ImageDiff(self._platform, '').diff_files(file1,
+ file2)
+ else:
+ return text_diff.TestTextDiff(self._platform, '').diff_files(file1,
+ file2)
+
+ def _delete_baseline(self, filename):
+ """Remove the file from repository and delete it from disk.
+
+ Args:
+ filename: full path of the file to delete.
+ """
+
+ if not filename or not os.path.isfile(filename):
+ return
+
+ if self._repo_type == REPO_SVN:
+ parent_dir, basename = os.path.split(filename)
+ original_dir = os.getcwd()
+ os.chdir(parent_dir)
+ run_shell(['svn', 'delete', '--force', basename], False)
+ os.chdir(original_dir)
+ else:
+ os.remove(filename)
+
+ def _update_rebaselined_tests_in_file(self, backup):
+ """Update the rebaselined tests in test expectations file.
+
+ Args:
+ backup: if True, backup the original test expectations file.
+
+ Returns:
+ no
+ """
+
+ if self._rebaselined_tests:
+ self._test_expectations.remove_platform_from_file(
+ self._rebaselined_tests, self._platform, backup)
+ else:
+ logging.info('No test was rebaselined so nothing to remove.')
+
+ def _svn_add(self, filename):
+ """Add the file to SVN repository.
+
+ Args:
+ filename: full path of the file to add.
+
+ Returns:
+ True if the file already exists in SVN or is sucessfully added
+ to SVN.
+ False otherwise.
+ """
+
+ if not filename:
+ return False
+
+ parent_dir, basename = os.path.split(filename)
+ if self._repo_type != REPO_SVN or parent_dir == filename:
+ logging.info("No svn checkout found, skip svn add.")
+ return True
+
+ original_dir = os.getcwd()
+ os.chdir(parent_dir)
+ status_output = run_shell(['svn', 'status', basename], False)
+ os.chdir(original_dir)
+ output = status_output.upper()
+ if output.startswith('A') or output.startswith('M'):
+ logging.info(' File already added to SVN: "%s"', filename)
+ return True
+
+ if output.find('IS NOT A WORKING COPY') >= 0:
+ logging.info(' File is not a working copy, add its parent: "%s"',
+ parent_dir)
+ return self._svn_add(parent_dir)
+
+ os.chdir(parent_dir)
+ add_output = run_shell(['svn', 'add', basename], True)
+ os.chdir(original_dir)
+ output = add_output.upper().rstrip()
+ if output.startswith('A') and output.find(basename.upper()) >= 0:
+ logging.info(' Added new file: "%s"', filename)
+ self._svn_prop_set(filename)
+ return True
+
+ if (not status_output) and (add_output.upper().find(
+ 'ALREADY UNDER VERSION CONTROL') >= 0):
+ logging.info(' File already under SVN and has no change: "%s"',
+ filename)
+ return True
+
+ logging.warn(' Failed to add file to SVN: "%s"', filename)
+ logging.warn(' Svn status output: "%s"', status_output)
+ logging.warn(' Svn add output: "%s"', add_output)
+ return False
+
+ def _svn_prop_set(self, filename):
+ """Set the baseline property
+
+ Args:
+ filename: full path of the file to add.
+
+ Returns:
+ True if the file already exists in SVN or is sucessfully added
+ to SVN.
+ False otherwise.
+ """
+ ext = os.path.splitext(filename)[1].upper()
+ if ext != '.TXT' and ext != '.PNG' and ext != '.CHECKSUM':
+ return
+
+ parent_dir, basename = os.path.split(filename)
+ original_dir = os.getcwd()
+ os.chdir(parent_dir)
+ if ext == '.PNG':
+ cmd = ['svn', 'pset', 'svn:mime-type', 'image/png', basename]
+ else:
+ cmd = ['svn', 'pset', 'svn:eol-style', 'LF', basename]
+
+ logging.debug(' Set svn prop: %s', ' '.join(cmd))
+ run_shell(cmd, False)
+ os.chdir(original_dir)
+
+ def _create_html_baseline_files(self, baseline_fullpath):
+ """Create baseline files (old, new and diff) in html directory.
+
+ The files are used to compare the rebaselining results.
+
+ Args:
+ baseline_fullpath: full path of the expected baseline file.
+ """
+
+ if not baseline_fullpath or not os.path.exists(baseline_fullpath):
+ return
+
+ # Copy the new baseline to html directory for result comparison.
+ baseline_filename = os.path.basename(baseline_fullpath)
+ new_file = get_result_file_fullpath(self._options.html_directory,
+ baseline_filename, self._platform,
+ 'new')
+ shutil.copyfile(baseline_fullpath, new_file)
+ logging.info(' Html: copied new baseline file from "%s" to "%s".',
+ baseline_fullpath, new_file)
+
+ # Get the old baseline from SVN and save to the html directory.
+ output = run_shell(['svn', 'cat', '-r', 'BASE', baseline_fullpath])
+ if (not output) or (output.upper().rstrip().endswith(
+ 'NO SUCH FILE OR DIRECTORY')):
+ logging.info(' No base file: "%s"', baseline_fullpath)
+ return
+ base_file = get_result_file_fullpath(self._options.html_directory,
+ baseline_filename, self._platform,
+ 'old')
+ f = open(base_file, 'wb')
+ f.write(output)
+ f.close()
+ logging.info(' Html: created old baseline file: "%s".',
+ base_file)
+
+ # Get the diff between old and new baselines and save to the html dir.
+ if baseline_filename.upper().endswith('.TXT'):
+ # If the user specified a custom diff command in their svn config
+ # file, then it'll be used when we do svn diff, which we don't want
+ # to happen since we want the unified diff. Using --diff-cmd=diff
+ # doesn't always work, since they can have another diff executable
+ # in their path that gives different line endings. So we use a
+ # bogus temp directory as the config directory, which gets
+ # around these problems.
+ if sys.platform.startswith("win"):
+ parent_dir = tempfile.gettempdir()
+ else:
+ parent_dir = sys.path[0] # tempdir is not secure.
+ bogus_dir = os.path.join(parent_dir, "temp_svn_config")
+ logging.debug(' Html: temp config dir: "%s".', bogus_dir)
+ if not os.path.exists(bogus_dir):
+ os.mkdir(bogus_dir)
+ delete_bogus_dir = True
+ else:
+ delete_bogus_dir = False
+
+ output = run_shell(["svn", "diff", "--config-dir", bogus_dir,
+ baseline_fullpath])
+ if output:
+ diff_file = get_result_file_fullpath(
+ self._options.html_directory, baseline_filename,
+ self._platform, 'diff')
+ f = open(diff_file, 'wb')
+ f.write(output)
+ f.close()
+ logging.info(' Html: created baseline diff file: "%s".',
+ diff_file)
+
+ if delete_bogus_dir:
+ shutil.rmtree(bogus_dir, True)
+ logging.debug(' Html: removed temp config dir: "%s".',
+ bogus_dir)
+
+
+class HtmlGenerator(object):
+ """Class to generate rebaselining result comparison html."""
+
+ HTML_REBASELINE = ('<html>'
+ '<head>'
+ '<style>'
+ 'body {font-family: sans-serif;}'
+ '.mainTable {background: #666666;}'
+ '.mainTable td , .mainTable th {background: white;}'
+ '.detail {margin-left: 10px; margin-top: 3px;}'
+ '</style>'
+ '<title>Rebaselining Result Comparison (%(time)s)'
+ '</title>'
+ '</head>'
+ '<body>'
+ '<h2>Rebaselining Result Comparison (%(time)s)</h2>'
+ '%(body)s'
+ '</body>'
+ '</html>')
+ HTML_NO_REBASELINING_TESTS = (
+ '<p>No tests found that need rebaselining.</p>')
+ HTML_TABLE_TEST = ('<table class="mainTable" cellspacing=1 cellpadding=5>'
+ '%s</table><br>')
+ HTML_TR_TEST = ('<tr>'
+ '<th style="background-color: #CDECDE; border-bottom: '
+ '1px solid black; font-size: 18pt; font-weight: bold" '
+ 'colspan="5">'
+ '<a href="%s">%s</a>'
+ '</th>'
+ '</tr>')
+ HTML_TEST_DETAIL = ('<div class="detail">'
+ '<tr>'
+ '<th width="100">Baseline</th>'
+ '<th width="100">Platform</th>'
+ '<th width="200">Old</th>'
+ '<th width="200">New</th>'
+ '<th width="150">Difference</th>'
+ '</tr>'
+ '%s'
+ '</div>')
+ HTML_TD_NOLINK = '<td align=center><a>%s</a></td>'
+ HTML_TD_LINK = '<td align=center><a href="%(uri)s">%(name)s</a></td>'
+ HTML_TD_LINK_IMG = ('<td><a href="%(uri)s">'
+ '<img style="width: 200" src="%(uri)s" /></a></td>')
+ HTML_TR = '<tr>%s</tr>'
+
+ def __init__(self, options, platforms, rebaselining_tests):
+ self._html_directory = options.html_directory
+ self._platforms = platforms
+ self._rebaselining_tests = rebaselining_tests
+ self._html_file = os.path.join(options.html_directory,
+ 'rebaseline.html')
+
+ def generate_html(self):
+ """Generate html file for rebaselining result comparison."""
+
+ logging.info('Generating html file')
+
+ html_body = ''
+ if not self._rebaselining_tests:
+ html_body += self.HTML_NO_REBASELINING_TESTS
+ else:
+ tests = list(self._rebaselining_tests)
+ tests.sort()
+
+ test_no = 1
+ for test in tests:
+ logging.info('Test %d: %s', test_no, test)
+ html_body += self._generate_html_for_one_test(test)
+
+ html = self.HTML_REBASELINE % ({'time': time.asctime(),
+ 'body': html_body})
+ logging.debug(html)
+
+ f = open(self._html_file, 'w')
+ f.write(html)
+ f.close()
+
+ logging.info('Baseline comparison html generated at "%s"',
+ self._html_file)
+
+ def show_html(self):
+ """Launch the rebaselining html in brwoser."""
+
+ logging.info('Launching html: "%s"', self._html_file)
+
+ html_uri = path_utils.filename_to_uri(self._html_file)
+ webbrowser.open(html_uri, 1)
+
+ logging.info('Html launched.')
+
+ def _generate_baseline_links(self, test_basename, suffix, platform):
+ """Generate links for baseline results (old, new and diff).
+
+ Args:
+ test_basename: base filename of the test
+ suffix: baseline file suffixes: '.txt', '.png'
+ platform: win, linux or mac
+
+ Returns:
+ html links for showing baseline results (old, new and diff)
+ """
+
+ baseline_filename = '%s-expected%s' % (test_basename, suffix)
+ logging.debug(' baseline filename: "%s"', baseline_filename)
+
+ new_file = get_result_file_fullpath(self._html_directory,
+ baseline_filename, platform, 'new')
+ logging.info(' New baseline file: "%s"', new_file)
+ if not os.path.exists(new_file):
+ logging.info(' No new baseline file: "%s"', new_file)
+ return ''
+
+ old_file = get_result_file_fullpath(self._html_directory,
+ baseline_filename, platform, 'old')
+ logging.info(' Old baseline file: "%s"', old_file)
+ if suffix == '.png':
+ html_td_link = self.HTML_TD_LINK_IMG
+ else:
+ html_td_link = self.HTML_TD_LINK
+
+ links = ''
+ if os.path.exists(old_file):
+ links += html_td_link % {
+ 'uri': path_utils.filename_to_uri(old_file),
+ 'name': baseline_filename}
+ else:
+ logging.info(' No old baseline file: "%s"', old_file)
+ links += self.HTML_TD_NOLINK % ''
+
+ links += html_td_link % {'uri': path_utils.filename_to_uri(new_file),
+ 'name': baseline_filename}
+
+ diff_file = get_result_file_fullpath(self._html_directory,
+ baseline_filename, platform,
+ 'diff')
+ logging.info(' Baseline diff file: "%s"', diff_file)
+ if os.path.exists(diff_file):
+ links += html_td_link % {'uri': path_utils.filename_to_uri(
+ diff_file), 'name': 'Diff'}
+ else:
+ logging.info(' No baseline diff file: "%s"', diff_file)
+ links += self.HTML_TD_NOLINK % ''
+
+ return links
+
+ def _generate_html_for_one_test(self, test):
+ """Generate html for one rebaselining test.
+
+ Args:
+ test: layout test name
+
+ Returns:
+ html that compares baseline results for the test.
+ """
+
+ test_basename = os.path.basename(os.path.splitext(test)[0])
+ logging.info(' basename: "%s"', test_basename)
+ rows = []
+ for suffix in BASELINE_SUFFIXES:
+ if suffix == '.checksum':
+ continue
+
+ logging.info(' Checking %s files', suffix)
+ for platform in self._platforms:
+ links = self._generate_baseline_links(test_basename, suffix,
+ platform)
+ if links:
+ row = self.HTML_TD_NOLINK % self._get_baseline_result_type(
+ suffix)
+ row += self.HTML_TD_NOLINK % platform
+ row += links
+ logging.debug(' html row: %s', row)
+
+ rows.append(self.HTML_TR % row)
+
+ if rows:
+ test_path = os.path.join(path_utils.layout_tests_dir(), test)
+ html = self.HTML_TR_TEST % (path_utils.filename_to_uri(test_path),
+ test)
+ html += self.HTML_TEST_DETAIL % ' '.join(rows)
+
+ logging.debug(' html for test: %s', html)
+ return self.HTML_TABLE_TEST % html
+
+ return ''
+
+ def _get_baseline_result_type(self, suffix):
+ """Name of the baseline result type."""
+
+ if suffix == '.png':
+ return 'Pixel'
+ elif suffix == '.txt':
+ return 'Render Tree'
+ else:
+ return 'Other'
+
+
+def main():
+ """Main function to produce new baselines."""
+
+ option_parser = optparse.OptionParser()
+ option_parser.add_option('-v', '--verbose',
+ action='store_true',
+ default=False,
+ help='include debug-level logging.')
+
+ option_parser.add_option('-p', '--platforms',
+ default='mac,win,win-xp,win-vista,linux',
+ help=('Comma delimited list of platforms '
+ 'that need rebaselining.'))
+
+ option_parser.add_option('-u', '--archive_url',
+ default=('http://build.chromium.org/buildbot/'
+ 'layout_test_results'),
+ help=('Url to find the layout test result archive'
+ ' file.'))
+
+ option_parser.add_option('-w', '--webkit_canary',
+ action='store_true',
+ default=False,
+ help=('If True, pull baselines from webkit.org '
+ 'canary bot.'))
+
+ option_parser.add_option('-b', '--backup',
+ action='store_true',
+ default=False,
+ help=('Whether or not to backup the original test'
+ ' expectations file after rebaseline.'))
+
+ option_parser.add_option('-d', '--html_directory',
+ default='',
+ help=('The directory that stores the results for'
+ ' rebaselining comparison.'))
+
+ options = option_parser.parse_args()[0]
+
+ # Set up our logging format.
+ log_level = logging.INFO
+ if options.verbose:
+ log_level = logging.DEBUG
+ logging.basicConfig(level=log_level,
+ format=('%(asctime)s %(filename)s:%(lineno)-3d '
+ '%(levelname)s %(message)s'),
+ datefmt='%y%m%d %H:%M:%S')
+
+ # Verify 'platforms' option is valid
+ if not options.platforms:
+ logging.error('Invalid "platforms" option. --platforms must be '
+ 'specified in order to rebaseline.')
+ sys.exit(1)
+ platforms = [p.strip().lower() for p in options.platforms.split(',')]
+ for platform in platforms:
+ if not platform in REBASELINE_PLATFORM_ORDER:
+ logging.error('Invalid platform: "%s"' % (platform))
+ sys.exit(1)
+
+ # Adjust the platform order so rebaseline tool is running at the order of
+ # 'mac', 'win' and 'linux'. This is in same order with layout test baseline
+ # search paths. It simplifies how the rebaseline tool detects duplicate
+ # baselines. Check _IsDupBaseline method for details.
+ rebaseline_platforms = []
+ for platform in REBASELINE_PLATFORM_ORDER:
+ if platform in platforms:
+ rebaseline_platforms.append(platform)
+
+ options.html_directory = setup_html_directory(options.html_directory)
+
+ rebaselining_tests = set()
+ backup = options.backup
+ for platform in rebaseline_platforms:
+ rebaseliner = Rebaseliner(platform, options)
+
+ logging.info('')
+ log_dashed_string('Rebaseline started', platform)
+ if rebaseliner.run(backup):
+ # Only need to backup one original copy of test expectation file.
+ backup = False
+ log_dashed_string('Rebaseline done', platform)
+ else:
+ log_dashed_string('Rebaseline failed', platform, logging.ERROR)
+
+ rebaselining_tests |= set(rebaseliner.get_rebaselining_tests())
+
+ logging.info('')
+ log_dashed_string('Rebaselining result comparison started', None)
+ html_generator = HtmlGenerator(options,
+ rebaseline_platforms,
+ rebaselining_tests)
+ html_generator.generate_html()
+ html_generator.show_html()
+ log_dashed_string('Rebaselining result comparison done', None)
+
+ sys.exit(0)
+
+if '__main__' == __name__:
+ main()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py b/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py
new file mode 100755
index 0000000..88b97f8
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/run_chromium_webkit_tests.py
@@ -0,0 +1,1697 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Run layout tests using the test_shell.
+
+This is a port of the existing webkit test script run-webkit-tests.
+
+The TestRunner class runs a series of tests (TestType interface) against a set
+of test files. If a test file fails a TestType, it returns a list TestFailure
+objects to the TestRunner. The TestRunner then aggregates the TestFailures to
+create a final report.
+
+This script reads several files, if they exist in the test_lists subdirectory
+next to this script itself. Each should contain a list of paths to individual
+tests or entire subdirectories of tests, relative to the outermost test
+directory. Entire lines starting with '//' (comments) will be ignored.
+
+For details of the files' contents and purposes, see test_lists/README.
+"""
+
+import errno
+import glob
+import logging
+import math
+import optparse
+import os
+import Queue
+import random
+import re
+import shutil
+import subprocess
+import sys
+import time
+import traceback
+
+from layout_package import apache_http_server
+from layout_package import test_expectations
+from layout_package import http_server
+from layout_package import json_layout_results_generator
+from layout_package import metered_stream
+from layout_package import path_utils
+from layout_package import platform_utils
+from layout_package import test_failures
+from layout_package import test_shell_thread
+from layout_package import test_files
+from layout_package import websocket_server
+from test_types import fuzzy_image_diff
+from test_types import image_diff
+from test_types import test_type_base
+from test_types import text_diff
+
+sys.path.append(path_utils.path_from_base('third_party'))
+import simplejson
+
+# Indicates that we want detailed progress updates in the output (prints
+# directory-by-directory feedback).
+LOG_DETAILED_PROGRESS = 'detailed-progress'
+
+# Log any unexpected results while running (instead of just at the end).
+LOG_UNEXPECTED = 'unexpected'
+
+# Builder base URL where we have the archived test results.
+BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
+
+TestExpectationsFile = test_expectations.TestExpectationsFile
+
+
+class TestInfo:
+ """Groups information about a test for easy passing of data."""
+
+ def __init__(self, filename, timeout):
+ """Generates the URI and stores the filename and timeout for this test.
+ Args:
+ filename: Full path to the test.
+ timeout: Timeout for running the test in TestShell.
+ """
+ self.filename = filename
+ self.uri = path_utils.filename_to_uri(filename)
+ self.timeout = timeout
+ expected_hash_file = path_utils.expected_filename(filename,
+ '.checksum')
+ try:
+ self.image_hash = open(expected_hash_file, "r").read()
+ except IOError, e:
+ if errno.ENOENT != e.errno:
+ raise
+ self.image_hash = None
+
+
+class ResultSummary(object):
+ """A class for partitioning the test results we get into buckets.
+
+ This class is basically a glorified struct and it's private to this file
+ so we don't bother with any information hiding."""
+
+ def __init__(self, expectations, test_files):
+ self.total = len(test_files)
+ self.remaining = self.total
+ self.expectations = expectations
+ self.expected = 0
+ self.unexpected = 0
+ self.tests_by_expectation = {}
+ self.tests_by_timeline = {}
+ self.results = {}
+ self.unexpected_results = {}
+ self.failures = {}
+ self.tests_by_expectation[test_expectations.SKIP] = set()
+ for expectation in TestExpectationsFile.EXPECTATIONS.values():
+ self.tests_by_expectation[expectation] = set()
+ for timeline in TestExpectationsFile.TIMELINES.values():
+ self.tests_by_timeline[timeline] = (
+ expectations.get_tests_with_timeline(timeline))
+
+ def add(self, test, failures, result, expected):
+ """Add a result into the appropriate bin.
+
+ Args:
+ test: test file name
+ failures: list of failure objects from test execution
+ result: result of test (PASS, IMAGE, etc.).
+ expected: whether the result was what we expected it to be.
+ """
+
+ self.tests_by_expectation[result].add(test)
+ self.results[test] = result
+ self.remaining -= 1
+ if len(failures):
+ self.failures[test] = failures
+ if expected:
+ self.expected += 1
+ else:
+ self.unexpected_results[test] = result
+ self.unexpected += 1
+
+
+class TestRunner:
+ """A class for managing running a series of tests on a series of layout
+ test files."""
+
+ HTTP_SUBDIR = os.sep.join(['', 'http', ''])
+ WEBSOCKET_SUBDIR = os.sep.join(['', 'websocket', ''])
+
+ # The per-test timeout in milliseconds, if no --time-out-ms option was
+ # given to run_webkit_tests. This should correspond to the default timeout
+ # in test_shell.exe.
+ DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
+
+ NUM_RETRY_ON_UNEXPECTED_FAILURE = 1
+
+ def __init__(self, options, meter):
+ """Initialize test runner data structures.
+
+ Args:
+ options: a dictionary of command line options
+ meter: a MeteredStream object to record updates to.
+ """
+ self._options = options
+ self._meter = meter
+
+ if options.use_apache:
+ self._http_server = apache_http_server.LayoutTestApacheHttpd(
+ options.results_directory)
+ else:
+ self._http_server = http_server.Lighttpd(options.results_directory)
+
+ self._websocket_server = websocket_server.PyWebSocket(
+ options.results_directory)
+ # disable wss server. need to install pyOpenSSL on buildbots.
+ # self._websocket_secure_server = websocket_server.PyWebSocket(
+ # options.results_directory, use_tls=True, port=9323)
+
+ # a list of TestType objects
+ self._test_types = []
+
+ # a set of test files, and the same tests as a list
+ self._test_files = set()
+ self._test_files_list = None
+ self._file_dir = path_utils.path_from_base('webkit', 'tools',
+ 'layout_tests')
+ self._result_queue = Queue.Queue()
+
+ # These are used for --log detailed-progress to track status by
+ # directory.
+ self._current_dir = None
+ self._current_progress_str = ""
+ self._current_test_number = 0
+
+ def __del__(self):
+ logging.debug("flushing stdout")
+ sys.stdout.flush()
+ logging.debug("flushing stderr")
+ sys.stderr.flush()
+ logging.debug("stopping http server")
+ # Stop the http server.
+ self._http_server.stop()
+ # Stop the Web Socket / Web Socket Secure servers.
+ self._websocket_server.stop()
+ # self._websocket_secure_server.Stop()
+
+ def gather_file_paths(self, paths):
+ """Find all the files to test.
+
+ Args:
+ paths: a list of globs to use instead of the defaults."""
+ self._test_files = test_files.gather_test_files(paths)
+
+ def parse_expectations(self, platform, is_debug_mode):
+ """Parse the expectations from the test_list files and return a data
+ structure holding them. Throws an error if the test_list files have
+ invalid syntax."""
+ if self._options.lint_test_files:
+ test_files = None
+ else:
+ test_files = self._test_files
+
+ try:
+ self._expectations = test_expectations.TestExpectations(test_files,
+ self._file_dir, platform, is_debug_mode,
+ self._options.lint_test_files)
+ return self._expectations
+ except Exception, err:
+ if self._options.lint_test_files:
+ print str(err)
+ else:
+ raise err
+
+ def prepare_lists_and_print_output(self, write):
+ """Create appropriate subsets of test lists and returns a
+ ResultSummary object. Also prints expected test counts.
+
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ """
+
+ # Remove skipped - both fixable and ignored - files from the
+ # top-level list of files to test.
+ num_all_test_files = len(self._test_files)
+ write("Found: %d tests" % (len(self._test_files)))
+ skipped = set()
+ if num_all_test_files > 1 and not self._options.force:
+ skipped = self._expectations.get_tests_with_result_type(
+ test_expectations.SKIP)
+ self._test_files -= skipped
+
+ # Create a sorted list of test files so the subset chunk,
+ # if used, contains alphabetically consecutive tests.
+ self._test_files_list = list(self._test_files)
+ if self._options.randomize_order:
+ random.shuffle(self._test_files_list)
+ else:
+ self._test_files_list.sort()
+
+ # If the user specifies they just want to run a subset of the tests,
+ # just grab a subset of the non-skipped tests.
+ if self._options.run_chunk or self._options.run_part:
+ chunk_value = self._options.run_chunk or self._options.run_part
+ test_files = self._test_files_list
+ try:
+ (chunk_num, chunk_len) = chunk_value.split(":")
+ chunk_num = int(chunk_num)
+ assert(chunk_num >= 0)
+ test_size = int(chunk_len)
+ assert(test_size > 0)
+ except:
+ logging.critical("invalid chunk '%s'" % chunk_value)
+ sys.exit(1)
+
+ # Get the number of tests
+ num_tests = len(test_files)
+
+ # Get the start offset of the slice.
+ if self._options.run_chunk:
+ chunk_len = test_size
+ # In this case chunk_num can be really large. We need
+ # to make the slave fit in the current number of tests.
+ slice_start = (chunk_num * chunk_len) % num_tests
+ else:
+ # Validate the data.
+ assert(test_size <= num_tests)
+ assert(chunk_num <= test_size)
+
+ # To count the chunk_len, and make sure we don't skip
+ # some tests, we round to the next value that fits exactly
+ # all the parts.
+ rounded_tests = num_tests
+ if rounded_tests % test_size != 0:
+ rounded_tests = (num_tests + test_size -
+ (num_tests % test_size))
+
+ chunk_len = rounded_tests / test_size
+ slice_start = chunk_len * (chunk_num - 1)
+ # It does not mind if we go over test_size.
+
+ # Get the end offset of the slice.
+ slice_end = min(num_tests, slice_start + chunk_len)
+
+ files = test_files[slice_start:slice_end]
+
+ tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % (
+ (slice_end - slice_start), slice_start, slice_end, num_tests)
+ write(tests_run_msg)
+
+ # If we reached the end and we don't have enough tests, we run some
+ # from the beginning.
+ if (self._options.run_chunk and
+ (slice_end - slice_start < chunk_len)):
+ extra = 1 + chunk_len - (slice_end - slice_start)
+ extra_msg = (' last chunk is partial, appending [0:%d]' %
+ extra)
+ write(extra_msg)
+ tests_run_msg += "\n" + extra_msg
+ files.extend(test_files[0:extra])
+ tests_run_filename = os.path.join(self._options.results_directory,
+ "tests_run.txt")
+ tests_run_file = open(tests_run_filename, "w")
+ tests_run_file.write(tests_run_msg + "\n")
+ tests_run_file.close()
+
+ len_skip_chunk = int(len(files) * len(skipped) /
+ float(len(self._test_files)))
+ skip_chunk_list = list(skipped)[0:len_skip_chunk]
+ skip_chunk = set(skip_chunk_list)
+
+ # Update expectations so that the stats are calculated correctly.
+ # We need to pass a list that includes the right # of skipped files
+ # to ParseExpectations so that ResultSummary() will get the correct
+ # stats. So, we add in the subset of skipped files, and then
+ # subtract them back out.
+ self._test_files_list = files + skip_chunk_list
+ self._test_files = set(self._test_files_list)
+
+ self._expectations = self.parse_expectations(
+ path_utils.platform_name(), self._options.target == 'Debug')
+
+ self._test_files = set(files)
+ self._test_files_list = files
+ else:
+ skip_chunk = skipped
+
+ result_summary = ResultSummary(self._expectations,
+ self._test_files | skip_chunk)
+ self._print_expected_results_of_type(write, result_summary,
+ test_expectations.PASS, "passes")
+ self._print_expected_results_of_type(write, result_summary,
+ test_expectations.FAIL, "failures")
+ self._print_expected_results_of_type(write, result_summary,
+ test_expectations.FLAKY, "flaky")
+ self._print_expected_results_of_type(write, result_summary,
+ test_expectations.SKIP, "skipped")
+
+
+ if self._options.force:
+ write('Running all tests, including skips (--force)')
+ else:
+ # Note that we don't actually run the skipped tests (they were
+ # subtracted out of self._test_files, above), but we stub out the
+ # results here so the statistics can remain accurate.
+ for test in skip_chunk:
+ result_summary.add(test, [], test_expectations.SKIP,
+ expected=True)
+ write("")
+
+ return result_summary
+
+ def add_test_type(self, test_type):
+ """Add a TestType to the TestRunner."""
+ self._test_types.append(test_type)
+
+ def _get_dir_for_test_file(self, test_file):
+ """Returns the highest-level directory by which to shard the given
+ test file."""
+ index = test_file.rfind(os.sep + 'LayoutTests' + os.sep)
+
+ test_file = test_file[index + len('LayoutTests/'):]
+ test_file_parts = test_file.split(os.sep, 1)
+ directory = test_file_parts[0]
+ test_file = test_file_parts[1]
+
+ # The http tests are very stable on mac/linux.
+ # TODO(ojan): Make the http server on Windows be apache so we can
+ # turn shard the http tests there as well. Switching to apache is
+ # what made them stable on linux/mac.
+ return_value = directory
+ while ((directory != 'http' or sys.platform in ('darwin', 'linux2'))
+ and test_file.find(os.sep) >= 0):
+ test_file_parts = test_file.split(os.sep, 1)
+ directory = test_file_parts[0]
+ return_value = os.path.join(return_value, directory)
+ test_file = test_file_parts[1]
+
+ return return_value
+
+ def _get_test_info_for_file(self, test_file):
+ """Returns the appropriate TestInfo object for the file. Mostly this
+ is used for looking up the timeout value (in ms) to use for the given
+ test."""
+ if self._expectations.has_modifier(test_file, test_expectations.SLOW):
+ return TestInfo(test_file, self._options.slow_time_out_ms)
+ return TestInfo(test_file, self._options.time_out_ms)
+
+ def _get_test_file_queue(self, test_files):
+ """Create the thread safe queue of lists of (test filenames, test URIs)
+ tuples. Each TestShellThread pulls a list from this queue and runs
+ those tests in order before grabbing the next available list.
+
+ Shard the lists by directory. This helps ensure that tests that depend
+ on each other (aka bad tests!) continue to run together as most
+ cross-tests dependencies tend to occur within the same directory.
+
+ Return:
+ The Queue of lists of TestInfo objects.
+ """
+
+ if (self._options.experimental_fully_parallel or
+ self._is_single_threaded()):
+ filename_queue = Queue.Queue()
+ for test_file in test_files:
+ filename_queue.put(
+ ('.', [self._get_test_info_for_file(test_file)]))
+ return filename_queue
+
+ tests_by_dir = {}
+ for test_file in test_files:
+ directory = self._get_dir_for_test_file(test_file)
+ tests_by_dir.setdefault(directory, [])
+ tests_by_dir[directory].append(
+ self._get_test_info_for_file(test_file))
+
+ # Sort by the number of tests in the dir so that the ones with the
+ # most tests get run first in order to maximize parallelization.
+ # Number of tests is a good enough, but not perfect, approximation
+ # of how long that set of tests will take to run. We can't just use
+ # a PriorityQueue until we move # to Python 2.6.
+ test_lists = []
+ http_tests = None
+ for directory in tests_by_dir:
+ test_list = tests_by_dir[directory]
+ # Keep the tests in alphabetical order.
+ # TODO: Remove once tests are fixed so they can be run in any
+ # order.
+ test_list.reverse()
+ test_list_tuple = (directory, test_list)
+ if directory == 'LayoutTests' + os.sep + 'http':
+ http_tests = test_list_tuple
+ else:
+ test_lists.append(test_list_tuple)
+ test_lists.sort(lambda a, b: cmp(len(b[1]), len(a[1])))
+
+ # Put the http tests first. There are only a couple hundred of them,
+ # but each http test takes a very long time to run, so sorting by the
+ # number of tests doesn't accurately capture how long they take to run.
+ if http_tests:
+ test_lists.insert(0, http_tests)
+
+ filename_queue = Queue.Queue()
+ for item in test_lists:
+ filename_queue.put(item)
+ return filename_queue
+
+ def _get_test_shell_args(self, index):
+ """Returns the tuple of arguments for tests and for test_shell."""
+ shell_args = []
+ test_args = test_type_base.TestArguments()
+ if not self._options.no_pixel_tests:
+ png_path = os.path.join(self._options.results_directory,
+ "png_result%s.png" % index)
+ shell_args.append("--pixel-tests=" + png_path)
+ test_args.png_path = png_path
+
+ test_args.new_baseline = self._options.new_baseline
+
+ test_args.show_sources = self._options.sources
+
+ if self._options.startup_dialog:
+ shell_args.append('--testshell-startup-dialog')
+
+ if self._options.gp_fault_error_box:
+ shell_args.append('--gp-fault-error-box')
+
+ return (test_args, shell_args)
+
+ def _contains_tests(self, subdir):
+ for test_file in self._test_files_list:
+ if test_file.find(subdir) >= 0:
+ return True
+ return False
+
+ def _instantiate_test_shell_threads(self, test_shell_binary, test_files,
+ result_summary):
+ """Instantitates and starts the TestShellThread(s).
+
+ Return:
+ The list of threads.
+ """
+ test_shell_command = [test_shell_binary]
+
+ if self._options.wrapper:
+ # This split() isn't really what we want -- it incorrectly will
+ # split quoted strings within the wrapper argument -- but in
+ # practice it shouldn't come up and the --help output warns
+ # about it anyway.
+ test_shell_command = (self._options.wrapper.split() +
+ test_shell_command)
+
+ filename_queue = self._get_test_file_queue(test_files)
+
+ # Instantiate TestShellThreads and start them.
+ threads = []
+ for i in xrange(int(self._options.num_test_shells)):
+ # Create separate TestTypes instances for each thread.
+ test_types = []
+ for t in self._test_types:
+ test_types.append(t(self._options.platform,
+ self._options.results_directory))
+
+ test_args, shell_args = self._get_test_shell_args(i)
+ thread = test_shell_thread.TestShellThread(filename_queue,
+ self._result_queue,
+ test_shell_command,
+ test_types,
+ test_args,
+ shell_args,
+ self._options)
+ if self._is_single_threaded():
+ thread.run_in_main_thread(self, result_summary)
+ else:
+ thread.start()
+ threads.append(thread)
+
+ return threads
+
+ def _stop_layout_test_helper(self, proc):
+ """Stop the layout test helper and closes it down."""
+ if proc:
+ logging.debug("Stopping layout test helper")
+ proc.stdin.write("x\n")
+ proc.stdin.close()
+ proc.wait()
+
+ def _is_single_threaded(self):
+ """Returns whether we should run all the tests in the main thread."""
+ return int(self._options.num_test_shells) == 1
+
+ def _run_tests(self, test_shell_binary, file_list, result_summary):
+ """Runs the tests in the file_list.
+
+ Return: A tuple (failures, thread_timings, test_timings,
+ individual_test_timings)
+ failures is a map from test to list of failure types
+ thread_timings is a list of dicts with the total runtime
+ of each thread with 'name', 'num_tests', 'total_time' properties
+ test_timings is a list of timings for each sharded subdirectory
+ of the form [time, directory_name, num_tests]
+ individual_test_timings is a list of run times for each test
+ in the form {filename:filename, test_run_time:test_run_time}
+ result_summary: summary object to populate with the results
+ """
+ threads = self._instantiate_test_shell_threads(test_shell_binary,
+ file_list,
+ result_summary)
+
+ # Wait for the threads to finish and collect test failures.
+ failures = {}
+ test_timings = {}
+ individual_test_timings = []
+ thread_timings = []
+ try:
+ for thread in threads:
+ while thread.isAlive():
+ # Let it timeout occasionally so it can notice a
+ # KeyboardInterrupt. Actually, the timeout doesn't
+ # really matter: apparently it suffices to not use
+ # an indefinite blocking join for it to
+ # be interruptible by KeyboardInterrupt.
+ thread.join(0.1)
+ self.update_summary(result_summary)
+ thread_timings.append({'name': thread.getName(),
+ 'num_tests': thread.get_num_tests(),
+ 'total_time': thread.get_total_time()})
+ test_timings.update(thread.get_directory_timing_stats())
+ individual_test_timings.extend(
+ thread.get_individual_test_stats())
+ except KeyboardInterrupt:
+ for thread in threads:
+ thread.cancel()
+ self._stop_layout_test_helper(layout_test_helper_proc)
+ raise
+ for thread in threads:
+ # Check whether a TestShellThread died before normal completion.
+ exception_info = thread.get_exception_info()
+ if exception_info is not None:
+ # Re-raise the thread's exception here to make it clear that
+ # testing was aborted. Otherwise, the tests that did not run
+ # would be assumed to have passed.
+ raise exception_info[0], exception_info[1], exception_info[2]
+
+ # Make sure we pick up any remaining tests.
+ self.update_summary(result_summary)
+ return (thread_timings, test_timings, individual_test_timings)
+
+ def run(self, result_summary):
+ """Run all our tests on all our test files.
+
+ For each test file, we run each test type. If there are any failures,
+ we collect them for reporting.
+
+ Args:
+ result_summary: a summary object tracking the test results.
+
+ Return:
+ We return nonzero if there are regressions compared to the last run.
+ """
+ if not self._test_files:
+ return 0
+ start_time = time.time()
+ test_shell_binary = path_utils.test_shell_path(self._options.target)
+
+ # Start up any helper needed
+ layout_test_helper_proc = None
+ if not self._options.no_pixel_tests:
+ helper_path = path_utils.layout_test_helper_path(
+ self._options.target)
+ if len(helper_path):
+ logging.debug("Starting layout helper %s" % helper_path)
+ layout_test_helper_proc = subprocess.Popen(
+ [helper_path], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=None)
+ is_ready = layout_test_helper_proc.stdout.readline()
+ if not is_ready.startswith('ready'):
+ logging.error("layout_test_helper failed to be ready")
+
+ # Check that the system dependencies (themes, fonts, ...) are correct.
+ if not self._options.nocheck_sys_deps:
+ proc = subprocess.Popen([test_shell_binary,
+ "--check-layout-test-sys-deps"])
+ if proc.wait() != 0:
+ logging.info("Aborting because system dependencies check "
+ "failed.\n To override, invoke with "
+ "--nocheck-sys-deps")
+ sys.exit(1)
+
+ if self._contains_tests(self.HTTP_SUBDIR):
+ self._http_server.start()
+
+ if self._contains_tests(self.WEBSOCKET_SUBDIR):
+ self._websocket_server.start()
+ # self._websocket_secure_server.Start()
+
+ thread_timings, test_timings, individual_test_timings = (
+ self._run_tests(test_shell_binary, self._test_files_list,
+ result_summary))
+
+ # We exclude the crashes from the list of results to retry, because
+ # we want to treat even a potentially flaky crash as an error.
+ failures = self._get_failures(result_summary, include_crashes=False)
+ retries = 0
+ retry_summary = result_summary
+ while (retries < self.NUM_RETRY_ON_UNEXPECTED_FAILURE and
+ len(failures)):
+ logging.debug("Retrying %d unexpected failure(s)" % len(failures))
+ retries += 1
+ retry_summary = ResultSummary(self._expectations, failures.keys())
+ self._run_tests(test_shell_binary, failures.keys(), retry_summary)
+ failures = self._get_failures(retry_summary, include_crashes=True)
+
+ self._stop_layout_test_helper(layout_test_helper_proc)
+ end_time = time.time()
+
+ write = create_logging_writer(self._options, 'timing')
+ self._print_timing_statistics(write, end_time - start_time,
+ thread_timings, test_timings,
+ individual_test_timings,
+ result_summary)
+
+ self._meter.update("")
+
+ if self._options.verbose:
+ # We write this block to stdout for compatibility with the
+ # buildbot log parser, which only looks at stdout, not stderr :(
+ write = lambda s: sys.stdout.write("%s\n" % s)
+ else:
+ write = create_logging_writer(self._options, 'actual')
+
+ self._print_result_summary(write, result_summary)
+
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ if (LOG_DETAILED_PROGRESS in self._options.log or
+ (LOG_UNEXPECTED in self._options.log and
+ result_summary.total != result_summary.expected)):
+ print
+
+ # This summary data gets written to stdout regardless of log level
+ self._print_one_line_summary(result_summary.total,
+ result_summary.expected)
+
+ unexpected_results = self._summarize_unexpected_results(result_summary,
+ retry_summary)
+ self._print_unexpected_results(unexpected_results)
+
+ # Write the same data to log files.
+ self._write_json_files(unexpected_results, result_summary,
+ individual_test_timings)
+
+ # Write the summary to disk (results.html) and maybe open the
+ # test_shell to this file.
+ wrote_results = self._write_results_html_file(result_summary)
+ if not self._options.noshow_results and wrote_results:
+ self._show_results_html_file()
+
+ # Ignore flaky failures and unexpected passes so we don't turn the
+ # bot red for those.
+ return unexpected_results['num_regressions']
+
+ def update_summary(self, result_summary):
+ """Update the summary while running tests."""
+ while True:
+ try:
+ (test, fail_list) = self._result_queue.get_nowait()
+ result = test_failures.determine_result_type(fail_list)
+ expected = self._expectations.matches_an_expected_result(test,
+ result)
+ result_summary.add(test, fail_list, result, expected)
+ if (LOG_DETAILED_PROGRESS in self._options.log and
+ (self._options.experimental_fully_parallel or
+ self._is_single_threaded())):
+ self._display_detailed_progress(result_summary)
+ else:
+ if not expected and LOG_UNEXPECTED in self._options.log:
+ self._print_unexpected_test_result(test, result)
+ self._display_one_line_progress(result_summary)
+ except Queue.Empty:
+ return
+
+ def _display_one_line_progress(self, result_summary):
+ """Displays the progress through the test run."""
+ self._meter.update("Testing: %d ran as expected, %d didn't, %d left" %
+ (result_summary.expected, result_summary.unexpected,
+ result_summary.remaining))
+
+ def _display_detailed_progress(self, result_summary):
+ """Display detailed progress output where we print the directory name
+ and one dot for each completed test. This is triggered by
+ "--log detailed-progress"."""
+ if self._current_test_number == len(self._test_files_list):
+ return
+
+ next_test = self._test_files_list[self._current_test_number]
+ next_dir = os.path.dirname(
+ path_utils.relative_test_filename(next_test))
+ if self._current_progress_str == "":
+ self._current_progress_str = "%s: " % (next_dir)
+ self._current_dir = next_dir
+
+ while next_test in result_summary.results:
+ if next_dir != self._current_dir:
+ self._meter.write("%s\n" % (self._current_progress_str))
+ self._current_progress_str = "%s: ." % (next_dir)
+ self._current_dir = next_dir
+ else:
+ self._current_progress_str += "."
+
+ if (next_test in result_summary.unexpected_results and
+ LOG_UNEXPECTED in self._options.log):
+ result = result_summary.unexpected_results[next_test]
+ self._meter.write("%s\n" % self._current_progress_str)
+ self._print_unexpected_test_result(next_test, result)
+ self._current_progress_str = "%s: " % self._current_dir
+
+ self._current_test_number += 1
+ if self._current_test_number == len(self._test_files_list):
+ break
+
+ next_test = self._test_files_list[self._current_test_number]
+ next_dir = os.path.dirname(
+ path_utils.relative_test_filename(next_test))
+
+ if result_summary.remaining:
+ remain_str = " (%d)" % (result_summary.remaining)
+ self._meter.update("%s%s" %
+ (self._current_progress_str, remain_str))
+ else:
+ self._meter.write("%s\n" % (self._current_progress_str))
+
+ def _get_failures(self, result_summary, include_crashes):
+ """Filters a dict of results and returns only the failures.
+
+ Args:
+ result_summary: the results of the test run
+ include_crashes: whether crashes are included in the output.
+ We use False when finding the list of failures to retry
+ to see if the results were flaky. Although the crashes may also be
+ flaky, we treat them as if they aren't so that they're not ignored.
+ Returns:
+ a dict of files -> results
+ """
+ failed_results = {}
+ for test, result in result_summary.unexpected_results.iteritems():
+ if (result == test_expectations.PASS or
+ result == test_expectations.CRASH and not include_crashes):
+ continue
+ failed_results[test] = result
+
+ return failed_results
+
+ def _summarize_unexpected_results(self, result_summary, retry_summary):
+ """Summarize any unexpected results as a dict.
+
+ TODO(dpranke): split this data structure into a separate class?
+
+ Args:
+ result_summary: summary object from initial test runs
+ retry_summary: summary object from final test run of retried tests
+ Returns:
+ A dictionary containing a summary of the unexpected results from the
+ run, with the following fields:
+ 'version': a version indicator (1 in this version)
+ 'fixable': # of fixable tests (NOW - PASS)
+ 'skipped': # of skipped tests (NOW & SKIPPED)
+ 'num_regressions': # of non-flaky failures
+ 'num_flaky': # of flaky failures
+ 'num_passes': # of unexpected passes
+ 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+ """
+ results = {}
+ results['version'] = 1
+
+ tbe = result_summary.tests_by_expectation
+ tbt = result_summary.tests_by_timeline
+ results['fixable'] = len(tbt[test_expectations.NOW] -
+ tbe[test_expectations.PASS])
+ results['skipped'] = len(tbt[test_expectations.NOW] &
+ tbe[test_expectations.SKIP])
+
+ num_passes = 0
+ num_flaky = 0
+ num_regressions = 0
+ keywords = {}
+ for k, v in TestExpectationsFile.EXPECTATIONS.iteritems():
+ keywords[v] = k.upper()
+
+ tests = {}
+ for filename, result in result_summary.unexpected_results.iteritems():
+ # Note that if a test crashed in the original run, we ignore
+ # whether or not it crashed when we retried it (if we retried it),
+ # and always consider the result not flaky.
+ test = path_utils.relative_test_filename(filename)
+ expected = self._expectations.get_expectations_string(filename)
+ actual = [keywords[result]]
+
+ if result == test_expectations.PASS:
+ num_passes += 1
+ elif result == test_expectations.CRASH:
+ num_regressions += 1
+ else:
+ if filename not in retry_summary.unexpected_results:
+ actual.extend(
+ self._expectations.get_expectations_string(
+ filename).split(" "))
+ num_flaky += 1
+ else:
+ retry_result = retry_summary.unexpected_results[filename]
+ if result != retry_result:
+ actual.append(keywords[retry_result])
+ num_flaky += 1
+ else:
+ num_regressions += 1
+
+ tests[test] = {}
+ tests[test]['expected'] = expected
+ tests[test]['actual'] = " ".join(actual)
+
+ results['tests'] = tests
+ results['num_passes'] = num_passes
+ results['num_flaky'] = num_flaky
+ results['num_regressions'] = num_regressions
+
+ return results
+
+ def _write_json_files(self, unexpected_results, result_summary,
+ individual_test_timings):
+ """Writes the results of the test run as JSON files into the results
+ dir.
+
+ There are three different files written into the results dir:
+ unexpected_results.json: A short list of any unexpected results.
+ This is used by the buildbots to display results.
+ expectations.json: This is used by the flakiness dashboard.
+ results.json: A full list of the results - used by the flakiness
+ dashboard and the aggregate results dashboard.
+
+ Args:
+ unexpected_results: dict of unexpected results
+ result_summary: full summary object
+ individual_test_timings: list of test times (used by the flakiness
+ dashboard).
+ """
+ logging.debug("Writing JSON files in %s." %
+ self._options.results_directory)
+ unexpected_file = open(os.path.join(self._options.results_directory,
+ "unexpected_results.json"), "w")
+ unexpected_file.write(simplejson.dumps(unexpected_results,
+ sort_keys=True, indent=2))
+ unexpected_file.close()
+
+ # Write a json file of the test_expectations.txt file for the layout
+ # tests dashboard.
+ expectations_file = open(os.path.join(self._options.results_directory,
+ "expectations.json"), "w")
+ expectations_json = \
+ self._expectations.get_expectations_json_for_all_platforms()
+ expectations_file.write("ADD_EXPECTATIONS(" + expectations_json + ");")
+ expectations_file.close()
+
+ json_layout_results_generator.JSONLayoutResultsGenerator(
+ self._options.builder_name, self._options.build_name,
+ self._options.build_number, self._options.results_directory,
+ BUILDER_BASE_URL, individual_test_timings,
+ self._expectations, result_summary, self._test_files_list)
+
+ logging.debug("Finished writing JSON files.")
+
+ def _print_expected_results_of_type(self, write, result_summary,
+ result_type, result_type_str):
+ """Print the number of the tests in a given result class.
+
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ result_summary - the object containing all the results to report on
+ result_type - the particular result type to report in the summary.
+ result_type_str - a string description of the result_type.
+ """
+ tests = self._expectations.get_tests_with_result_type(result_type)
+ now = result_summary.tests_by_timeline[test_expectations.NOW]
+ wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
+ defer = result_summary.tests_by_timeline[test_expectations.DEFER]
+
+ # We use a fancy format string in order to print the data out in a
+ # nicely-aligned table.
+ fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd defer, %%%dd wontfix)"
+ % (self._num_digits(now), self._num_digits(defer),
+ self._num_digits(wontfix)))
+ write(fmtstr % (len(tests), result_type_str, len(tests & now),
+ len(tests & defer), len(tests & wontfix)))
+
+ def _num_digits(self, num):
+ """Returns the number of digits needed to represent the length of a
+ sequence."""
+ ndigits = 1
+ if len(num):
+ ndigits = int(math.log10(len(num))) + 1
+ return ndigits
+
+ def _print_timing_statistics(self, write, total_time, thread_timings,
+ directory_test_timings, individual_test_timings,
+ result_summary):
+ """Record timing-specific information for the test run.
+
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ total_time: total elapsed time (in seconds) for the test run
+ thread_timings: wall clock time each thread ran for
+ directory_test_timings: timing by directory
+ individual_test_timings: timing by file
+ result_summary: summary object for the test run
+ """
+ write("Test timing:")
+ write(" %6.2f total testing time" % total_time)
+ write("")
+ write("Thread timing:")
+ cuml_time = 0
+ for t in thread_timings:
+ write(" %10s: %5d tests, %6.2f secs" %
+ (t['name'], t['num_tests'], t['total_time']))
+ cuml_time += t['total_time']
+ write(" %6.2f cumulative, %6.2f optimal" %
+ (cuml_time, cuml_time / int(self._options.num_test_shells)))
+ write("")
+
+ self._print_aggregate_test_statistics(write, individual_test_timings)
+ self._print_individual_test_times(write, individual_test_timings,
+ result_summary)
+ self._print_directory_timings(write, directory_test_timings)
+
+ def _print_aggregate_test_statistics(self, write, individual_test_timings):
+ """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ individual_test_timings: List of test_shell_thread.TestStats for all
+ tests.
+ """
+ test_types = individual_test_timings[0].time_for_diffs.keys()
+ times_for_test_shell = []
+ times_for_diff_processing = []
+ times_per_test_type = {}
+ for test_type in test_types:
+ times_per_test_type[test_type] = []
+
+ for test_stats in individual_test_timings:
+ times_for_test_shell.append(test_stats.test_run_time)
+ times_for_diff_processing.append(
+ test_stats.total_time_for_all_diffs)
+ time_for_diffs = test_stats.time_for_diffs
+ for test_type in test_types:
+ times_per_test_type[test_type].append(
+ time_for_diffs[test_type])
+
+ self._print_statistics_for_test_timings(write,
+ "PER TEST TIME IN TESTSHELL (seconds):", times_for_test_shell)
+ self._print_statistics_for_test_timings(write,
+ "PER TEST DIFF PROCESSING TIMES (seconds):",
+ times_for_diff_processing)
+ for test_type in test_types:
+ self._print_statistics_for_test_timings(write,
+ "PER TEST TIMES BY TEST TYPE: %s" % test_type,
+ times_per_test_type[test_type])
+
+ def _print_individual_test_times(self, write, individual_test_timings,
+ result_summary):
+ """Prints the run times for slow, timeout and crash tests.
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ individual_test_timings: List of test_shell_thread.TestStats for all
+ tests.
+ result_summary: summary object for test run
+ """
+ # Reverse-sort by the time spent in test_shell.
+ individual_test_timings.sort(lambda a, b:
+ cmp(b.test_run_time, a.test_run_time))
+
+ num_printed = 0
+ slow_tests = []
+ timeout_or_crash_tests = []
+ unexpected_slow_tests = []
+ for test_tuple in individual_test_timings:
+ filename = test_tuple.filename
+ is_timeout_crash_or_slow = False
+ if self._expectations.has_modifier(filename,
+ test_expectations.SLOW):
+ is_timeout_crash_or_slow = True
+ slow_tests.append(test_tuple)
+
+ if filename in result_summary.failures:
+ result = result_summary.results[filename]
+ if (result == test_expectations.TIMEOUT or
+ result == test_expectations.CRASH):
+ is_timeout_crash_or_slow = True
+ timeout_or_crash_tests.append(test_tuple)
+
+ if (not is_timeout_crash_or_slow and
+ num_printed < self._options.num_slow_tests_to_log):
+ num_printed = num_printed + 1
+ unexpected_slow_tests.append(test_tuple)
+
+ write("")
+ self._print_test_list_timing(write, "%s slowest tests that are not "
+ "marked as SLOW and did not timeout/crash:" %
+ self._options.num_slow_tests_to_log, unexpected_slow_tests)
+ write("")
+ self._print_test_list_timing(write, "Tests marked as SLOW:",
+ slow_tests)
+ write("")
+ self._print_test_list_timing(write, "Tests that timed out or crashed:",
+ timeout_or_crash_tests)
+ write("")
+
+ def _print_test_list_timing(self, write, title, test_list):
+ """Print timing info for each test.
+
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ title: section heading
+ test_list: tests that fall in this section
+ """
+ write(title)
+ for test_tuple in test_list:
+ filename = test_tuple.filename[len(
+ path_utils.layout_tests_dir()) + 1:]
+ filename = filename.replace('\\', '/')
+ test_run_time = round(test_tuple.test_run_time, 1)
+ write(" %s took %s seconds" % (filename, test_run_time))
+
+ def _print_directory_timings(self, write, directory_test_timings):
+ """Print timing info by directory for any directories that
+ take > 10 seconds to run.
+
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ directory_test_timing: time info for each directory
+ """
+ timings = []
+ for directory in directory_test_timings:
+ num_tests, time_for_directory = directory_test_timings[directory]
+ timings.append((round(time_for_directory, 1), directory,
+ num_tests))
+ timings.sort()
+
+ write("Time to process slowest subdirectories:")
+ min_seconds_to_print = 10
+ for timing in timings:
+ if timing[0] > min_seconds_to_print:
+ write(" %s took %s seconds to run %s tests." % (timing[1],
+ timing[0], timing[2]))
+ write("")
+
+ def _print_statistics_for_test_timings(self, write, title, timings):
+ """Prints the median, mean and standard deviation of the values in
+ timings.
+
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ title: Title for these timings.
+ timings: A list of floats representing times.
+ """
+ write(title)
+ timings.sort()
+
+ num_tests = len(timings)
+ percentile90 = timings[int(.9 * num_tests)]
+ percentile99 = timings[int(.99 * num_tests)]
+
+ if num_tests % 2 == 1:
+ median = timings[((num_tests - 1) / 2) - 1]
+ else:
+ lower = timings[num_tests / 2 - 1]
+ upper = timings[num_tests / 2]
+ median = (float(lower + upper)) / 2
+
+ mean = sum(timings) / num_tests
+
+ for time in timings:
+ sum_of_deviations = math.pow(time - mean, 2)
+
+ std_deviation = math.sqrt(sum_of_deviations / num_tests)
+ write(" Median: %6.3f" % median)
+ write(" Mean: %6.3f" % mean)
+ write(" 90th percentile: %6.3f" % percentile90)
+ write(" 99th percentile: %6.3f" % percentile99)
+ write(" Standard dev: %6.3f" % std_deviation)
+ write("")
+
+ def _print_result_summary(self, write, result_summary):
+ """Print a short summary about how many tests passed.
+
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ result_summary: information to log
+ """
+ failed = len(result_summary.failures)
+ skipped = len(
+ result_summary.tests_by_expectation[test_expectations.SKIP])
+ total = result_summary.total
+ passed = total - failed - skipped
+ pct_passed = 0.0
+ if total > 0:
+ pct_passed = float(passed) * 100 / total
+
+ write("")
+ write("=> Results: %d/%d tests passed (%.1f%%)" %
+ (passed, total, pct_passed))
+ write("")
+ self._print_result_summary_entry(write, result_summary,
+ test_expectations.NOW, "Tests to be fixed for the current release")
+
+ write("")
+ self._print_result_summary_entry(write, result_summary,
+ test_expectations.DEFER,
+ "Tests we'll fix in the future if they fail (DEFER)")
+
+ write("")
+ self._print_result_summary_entry(write, result_summary,
+ test_expectations.WONTFIX,
+ "Tests that will only be fixed if they crash (WONTFIX)")
+
+ def _print_result_summary_entry(self, write, result_summary, timeline,
+ heading):
+ """Print a summary block of results for a particular timeline of test.
+
+ Args:
+ write: A callback to write info to (e.g., a LoggingWriter) or
+ sys.stdout.write.
+ result_summary: summary to print results for
+ timeline: the timeline to print results for (NOT, WONTFIX, etc.)
+ heading: a textual description of the timeline
+ """
+ total = len(result_summary.tests_by_timeline[timeline])
+ not_passing = (total -
+ len(result_summary.tests_by_expectation[test_expectations.PASS] &
+ result_summary.tests_by_timeline[timeline]))
+ write("=> %s (%d):" % (heading, not_passing))
+
+ for result in TestExpectationsFile.EXPECTATION_ORDER:
+ if result == test_expectations.PASS:
+ continue
+ results = (result_summary.tests_by_expectation[result] &
+ result_summary.tests_by_timeline[timeline])
+ desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result]
+ if not_passing and len(results):
+ pct = len(results) * 100.0 / not_passing
+ write(" %5d %-24s (%4.1f%%)" % (len(results),
+ desc[len(results) != 1], pct))
+
+ def _print_one_line_summary(self, total, expected):
+ """Print a one-line summary of the test run to stdout.
+
+ Args:
+ total: total number of tests run
+ expected: number of expected results
+ """
+ unexpected = total - expected
+ if unexpected == 0:
+ print "All %d tests ran as expected." % expected
+ elif expected == 1:
+ print "1 test ran as expected, %d didn't:" % unexpected
+ else:
+ print "%d tests ran as expected, %d didn't:" % (expected,
+ unexpected)
+
+ def _print_unexpected_results(self, unexpected_results):
+ """Prints any unexpected results in a human-readable form to stdout."""
+ passes = {}
+ flaky = {}
+ regressions = {}
+
+ if len(unexpected_results['tests']):
+ print ""
+
+ for test, results in unexpected_results['tests'].iteritems():
+ actual = results['actual'].split(" ")
+ expected = results['expected'].split(" ")
+ if actual == ['PASS']:
+ if 'CRASH' in expected:
+ _add_to_dict_of_lists(passes,
+ 'Expected to crash, but passed',
+ test)
+ elif 'TIMEOUT' in expected:
+ _add_to_dict_of_lists(passes,
+ 'Expected to timeout, but passed',
+ test)
+ else:
+ _add_to_dict_of_lists(passes,
+ 'Expected to fail, but passed',
+ test)
+ elif len(actual) > 1:
+ # We group flaky tests by the first actual result we got.
+ _add_to_dict_of_lists(flaky, actual[0], test)
+ else:
+ _add_to_dict_of_lists(regressions, results['actual'], test)
+
+ if len(passes):
+ for key, tests in passes.iteritems():
+ print "%s: (%d)" % (key, len(tests))
+ tests.sort()
+ for test in tests:
+ print " %s" % test
+ print
+
+ if len(flaky):
+ descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+ for key, tests in flaky.iteritems():
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ print "Unexpected flakiness: %s (%d)" % (
+ descriptions[result][1], len(tests))
+ tests.sort()
+
+ for test in tests:
+ result = unexpected_results['tests'][test]
+ actual = result['actual'].split(" ")
+ expected = result['expected'].split(" ")
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ new_expectations_list = list(set(actual) | set(expected))
+ print " %s = %s" % (test, " ".join(new_expectations_list))
+ print
+
+ if len(regressions):
+ descriptions = TestExpectationsFile.EXPECTATION_DESCRIPTIONS
+ for key, tests in regressions.iteritems():
+ result = TestExpectationsFile.EXPECTATIONS[key.lower()]
+ print "Regressions: Unexpected %s : (%d)" % (
+ descriptions[result][1], len(tests))
+ tests.sort()
+ for test in tests:
+ print " %s = %s" % (test, key)
+ print
+
+ if len(unexpected_results['tests']) and self._options.verbose:
+ print "-" * 78
+
+ def _print_unexpected_test_result(self, test, result):
+ """Prints one unexpected test result line."""
+ desc = TestExpectationsFile.EXPECTATION_DESCRIPTIONS[result][0]
+ self._meter.write(" %s -> unexpected %s\n" %
+ (path_utils.relative_test_filename(test), desc))
+
+ def _write_results_html_file(self, result_summary):
+ """Write results.html which is a summary of tests that failed.
+
+ Args:
+ result_summary: a summary of the results :)
+
+ Returns:
+ True if any results were written (since expected failures may be
+ omitted)
+ """
+ # test failures
+ if self._options.full_results_html:
+ test_files = result_summary.failures.keys()
+ else:
+ unexpected_failures = self._get_failures(result_summary,
+ include_crashes=True)
+ test_files = unexpected_failures.keys()
+ if not len(test_files):
+ return False
+
+ out_filename = os.path.join(self._options.results_directory,
+ "results.html")
+ out_file = open(out_filename, 'w')
+ # header
+ if self._options.full_results_html:
+ h2 = "Test Failures"
+ else:
+ h2 = "Unexpected Test Failures"
+ out_file.write("<html><head><title>Layout Test Results (%(time)s)"
+ "</title></head><body><h2>%(h2)s (%(time)s)</h2>\n"
+ % {'h2': h2, 'time': time.asctime()})
+
+ test_files.sort()
+ for test_file in test_files:
+ test_failures = result_summary.failures.get(test_file, [])
+ out_file.write("<p><a href='%s'>%s</a><br />\n"
+ % (path_utils.filename_to_uri(test_file),
+ path_utils.relative_test_filename(test_file)))
+ for failure in test_failures:
+ out_file.write("&nbsp;&nbsp;%s<br/>"
+ % failure.result_html_output(
+ path_utils.relative_test_filename(test_file)))
+ out_file.write("</p>\n")
+
+ # footer
+ out_file.write("</body></html>\n")
+ return True
+
+ def _show_results_html_file(self):
+ """Launches the test shell open to the results.html page."""
+ results_filename = os.path.join(self._options.results_directory,
+ "results.html")
+ subprocess.Popen([path_utils.test_shell_path(self._options.target),
+ path_utils.filename_to_uri(results_filename)])
+
+
+def _add_to_dict_of_lists(dict, key, value):
+ dict.setdefault(key, []).append(value)
+
+
+def read_test_files(files):
+ tests = []
+ for file in files:
+ for line in open(file):
+ line = test_expectations.strip_comments(line)
+ if line:
+ tests.append(line)
+ return tests
+
+
+def create_logging_writer(options, log_option):
+ """Returns a write() function that will write the string to logging.info()
+ if comp was specified in --log or if --verbose is true. Otherwise the
+ message is dropped.
+
+ Args:
+ options: list of command line options from optparse
+ log_option: option to match in options.log in order for the messages
+ to be logged (e.g., 'actual' or 'expected')
+ """
+ if options.verbose or log_option in options.log.split(","):
+ return logging.info
+ return lambda str: 1
+
+
+def main(options, args):
+ """Run the tests. Will call sys.exit when complete.
+
+ Args:
+ options: a dictionary of command line options
+ args: a list of sub directories or files to test
+ """
+
+ if options.sources:
+ options.verbose = True
+
+ # Set up our logging format.
+ meter = metered_stream.MeteredStream(options.verbose, sys.stderr)
+ log_fmt = '%(message)s'
+ log_datefmt = '%y%m%d %H:%M:%S'
+ log_level = logging.INFO
+ if options.verbose:
+ log_fmt = ('%(asctime)s %(filename)s:%(lineno)-4d %(levelname)s '
+ '%(message)s')
+ log_level = logging.DEBUG
+ logging.basicConfig(level=log_level, format=log_fmt, datefmt=log_datefmt,
+ stream=meter)
+
+ if not options.target:
+ if options.debug:
+ options.target = "Debug"
+ else:
+ options.target = "Release"
+
+ if not options.use_apache:
+ options.use_apache = sys.platform in ('darwin', 'linux2')
+
+ if options.results_directory.startswith("/"):
+ # Assume it's an absolute path and normalize.
+ options.results_directory = path_utils.get_absolute_path(
+ options.results_directory)
+ else:
+ # If it's a relative path, make the output directory relative to
+ # Debug or Release.
+ basedir = path_utils.path_from_base('webkit')
+ options.results_directory = path_utils.get_absolute_path(
+ os.path.join(basedir, options.target, options.results_directory))
+
+ if options.clobber_old_results:
+ # Just clobber the actual test results directories since the other
+ # files in the results directory are explicitly used for cross-run
+ # tracking.
+ path = os.path.join(options.results_directory, 'LayoutTests')
+ if os.path.exists(path):
+ shutil.rmtree(path)
+
+ # Ensure platform is valid and force it to the form 'chromium-<platform>'.
+ options.platform = path_utils.platform_name(options.platform)
+
+ if not options.num_test_shells:
+ # TODO(ojan): Investigate perf/flakiness impact of using numcores + 1.
+ options.num_test_shells = platform_utils.get_num_cores()
+
+ write = create_logging_writer(options, 'config')
+ write("Running %s test_shells in parallel" % options.num_test_shells)
+
+ if not options.time_out_ms:
+ if options.target == "Debug":
+ options.time_out_ms = str(2 * TestRunner.DEFAULT_TEST_TIMEOUT_MS)
+ else:
+ options.time_out_ms = str(TestRunner.DEFAULT_TEST_TIMEOUT_MS)
+
+ options.slow_time_out_ms = str(5 * int(options.time_out_ms))
+ write("Regular timeout: %s, slow test timeout: %s" %
+ (options.time_out_ms, options.slow_time_out_ms))
+
+ # Include all tests if none are specified.
+ new_args = []
+ for arg in args:
+ if arg and arg != '':
+ new_args.append(arg)
+
+ paths = new_args
+ if not paths:
+ paths = []
+ if options.test_list:
+ paths += read_test_files(options.test_list)
+
+ # Create the output directory if it doesn't already exist.
+ path_utils.maybe_make_directory(options.results_directory)
+ meter.update("Gathering files ...")
+
+ test_runner = TestRunner(options, meter)
+ test_runner.gather_file_paths(paths)
+
+ if options.lint_test_files:
+ # Creating the expecations for each platform/target pair does all the
+ # test list parsing and ensures it's correct syntax (e.g. no dupes).
+ for platform in TestExpectationsFile.PLATFORMS:
+ test_runner.parse_expectations(platform, is_debug_mode=True)
+ test_runner.parse_expectations(platform, is_debug_mode=False)
+ print ("If there are no fail messages, errors or exceptions, then the "
+ "lint succeeded.")
+ sys.exit(0)
+
+ try:
+ test_shell_binary_path = path_utils.test_shell_path(options.target)
+ except path_utils.PathNotFound:
+ print "\nERROR: test_shell is not found. Be sure that you have built"
+ print "it and that you are using the correct build. This script"
+ print "will run the Release one by default. Use --debug to use the"
+ print "Debug build.\n"
+ sys.exit(1)
+
+ write = create_logging_writer(options, "config")
+ write("Using platform '%s'" % options.platform)
+ write("Placing test results in %s" % options.results_directory)
+ if options.new_baseline:
+ write("Placing new baselines in %s" %
+ path_utils.chromium_baseline_path(options.platform))
+ write("Using %s build at %s" % (options.target, test_shell_binary_path))
+ if options.no_pixel_tests:
+ write("Not running pixel tests")
+ write("")
+
+ meter.update("Parsing expectations ...")
+ test_runner.parse_expectations(options.platform, options.target == 'Debug')
+
+ meter.update("Preparing tests ...")
+ write = create_logging_writer(options, "expected")
+ result_summary = test_runner.prepare_lists_and_print_output(write)
+
+ if 'cygwin' == sys.platform:
+ logging.warn("#" * 40)
+ logging.warn("# UNEXPECTED PYTHON VERSION")
+ logging.warn("# This script should be run using the version of python")
+ logging.warn("# in third_party/python_24/")
+ logging.warn("#" * 40)
+ sys.exit(1)
+
+ # Delete the disk cache if any to ensure a clean test run.
+ cachedir = os.path.split(test_shell_binary_path)[0]
+ cachedir = os.path.join(cachedir, "cache")
+ if os.path.exists(cachedir):
+ shutil.rmtree(cachedir)
+
+ test_runner.add_test_type(text_diff.TestTextDiff)
+ if not options.no_pixel_tests:
+ test_runner.add_test_type(image_diff.ImageDiff)
+ if options.fuzzy_pixel_tests:
+ test_runner.add_test_type(fuzzy_image_diff.FuzzyImageDiff)
+
+ meter.update("Starting ...")
+ has_new_failures = test_runner.run(result_summary)
+
+ logging.debug("Exit status: %d" % has_new_failures)
+ sys.exit(has_new_failures)
+
+
+def parse_args(args=None):
+ """Provides a default set of command line args.
+
+ Returns a tuple of options, args from optparse"""
+ option_parser = optparse.OptionParser()
+ option_parser.add_option("", "--no-pixel-tests", action="store_true",
+ default=False,
+ help="disable pixel-to-pixel PNG comparisons")
+ option_parser.add_option("", "--fuzzy-pixel-tests", action="store_true",
+ default=False,
+ help="Also use fuzzy matching to compare pixel "
+ "test outputs.")
+ option_parser.add_option("", "--results-directory",
+ default="layout-test-results",
+ help="Output results directory source dir,"
+ " relative to Debug or Release")
+ option_parser.add_option("", "--new-baseline", action="store_true",
+ default=False,
+ help="save all generated results as new baselines"
+ " into the platform directory, overwriting "
+ "whatever's already there.")
+ option_parser.add_option("", "--noshow-results", action="store_true",
+ default=False, help="don't launch the test_shell"
+ " with results after the tests are done")
+ option_parser.add_option("", "--full-results-html", action="store_true",
+ default=False, help="show all failures in "
+ "results.html, rather than only regressions")
+ option_parser.add_option("", "--clobber-old-results", action="store_true",
+ default=False, help="Clobbers test results from "
+ "previous runs.")
+ option_parser.add_option("", "--lint-test-files", action="store_true",
+ default=False, help="Makes sure the test files "
+ "parse for all configurations. Does not run any "
+ "tests.")
+ option_parser.add_option("", "--force", action="store_true",
+ default=False,
+ help="Run all tests, even those marked SKIP "
+ "in the test list")
+ option_parser.add_option("", "--num-test-shells",
+ help="Number of testshells to run in parallel.")
+ option_parser.add_option("", "--use-apache", action="store_true",
+ default=False,
+ help="Whether to use apache instead of lighttpd.")
+ option_parser.add_option("", "--time-out-ms", default=None,
+ help="Set the timeout for each test")
+ option_parser.add_option("", "--run-singly", action="store_true",
+ default=False,
+ help="run a separate test_shell for each test")
+ option_parser.add_option("", "--debug", action="store_true", default=False,
+ help="use the debug binary instead of the release"
+ " binary")
+ option_parser.add_option("", "--num-slow-tests-to-log", default=50,
+ help="Number of slow tests whose timings "
+ "to print.")
+ option_parser.add_option("", "--platform",
+ help="Override the platform for expected results")
+ option_parser.add_option("", "--target", default="",
+ help="Set the build target configuration "
+ "(overrides --debug)")
+ option_parser.add_option("", "--log", action="store",
+ default="detailed-progress,unexpected",
+ help="log various types of data. The param should"
+ " be a comma-separated list of values from: "
+ "actual,config," + LOG_DETAILED_PROGRESS +
+ ",expected,timing," + LOG_UNEXPECTED + " "
+ "(defaults to " +
+ "--log detailed-progress,unexpected)")
+ option_parser.add_option("-v", "--verbose", action="store_true",
+ default=False, help="include debug-level logging")
+ option_parser.add_option("", "--sources", action="store_true",
+ help="show expected result file path for each "
+ "test (implies --verbose)")
+ option_parser.add_option("", "--startup-dialog", action="store_true",
+ default=False,
+ help="create a dialog on test_shell.exe startup")
+ option_parser.add_option("", "--gp-fault-error-box", action="store_true",
+ default=False,
+ help="enable Windows GP fault error box")
+ option_parser.add_option("", "--wrapper",
+ help="wrapper command to insert before "
+ "invocations of test_shell; option is split "
+ "on whitespace before running. (Example: "
+ "--wrapper='valgrind --smc-check=all')")
+ option_parser.add_option("", "--test-list", action="append",
+ help="read list of tests to run from file",
+ metavar="FILE")
+ option_parser.add_option("", "--nocheck-sys-deps", action="store_true",
+ default=False,
+ help="Don't check the system dependencies "
+ "(themes)")
+ option_parser.add_option("", "--randomize-order", action="store_true",
+ default=False,
+ help=("Run tests in random order (useful for "
+ "tracking down corruption)"))
+ option_parser.add_option("", "--run-chunk",
+ default=None,
+ help=("Run a specified chunk (n:l), the "
+ "nth of len l, of the layout tests"))
+ option_parser.add_option("", "--run-part",
+ default=None,
+ help=("Run a specified part (n:m), the nth of m"
+ " parts, of the layout tests"))
+ option_parser.add_option("", "--batch-size",
+ default=None,
+ help=("Run a the tests in batches (n), after "
+ "every n tests, the test shell is "
+ "relaunched."))
+ option_parser.add_option("", "--builder-name",
+ default="DUMMY_BUILDER_NAME",
+ help=("The name of the builder shown on the "
+ "waterfall running this script e.g. "
+ "WebKit."))
+ option_parser.add_option("", "--build-name",
+ default="DUMMY_BUILD_NAME",
+ help=("The name of the builder used in its path, "
+ "e.g. webkit-rel."))
+ option_parser.add_option("", "--build-number",
+ default="DUMMY_BUILD_NUMBER",
+ help=("The build number of the builder running"
+ "this script."))
+ option_parser.add_option("", "--experimental-fully-parallel",
+ action="store_true", default=False,
+ help="run all tests in parallel")
+ return option_parser.parse_args(args)
+
+if '__main__' == __name__:
+ options, args = parse_args()
+ main(options, args)
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/__init__.py
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py
new file mode 100644
index 0000000..134b507
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/fuzzy_image_diff.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Compares the image output of a test to the expected image output using
+fuzzy matching.
+"""
+
+import errno
+import logging
+import os
+import shutil
+import subprocess
+
+from layout_package import path_utils
+from layout_package import test_failures
+from test_types import test_type_base
+
+
+class FuzzyImageDiff(test_type_base.TestTypeBase):
+
+ def compare_output(self, filename, proc, output, test_args, target):
+ """Implementation of CompareOutput that checks the output image and
+ checksum against the expected files from the LayoutTest directory.
+ """
+ failures = []
+
+ # If we didn't produce a hash file, this test must be text-only.
+ if test_args.hash is None:
+ return failures
+
+ expected_png_file = path_utils.expected_filename(filename, '.png')
+
+ if test_args.show_sources:
+ logging.debug('Using %s' % expected_png_file)
+
+ # Also report a missing expected PNG file.
+ if not os.path.isfile(expected_png_file):
+ failures.append(test_failures.FailureMissingImage(self))
+
+ # Run the fuzzymatcher
+ r = subprocess.call([path_utils.fuzzy_match_path(),
+ test_args.png_path, expected_png_file])
+ if r != 0:
+ failures.append(test_failures.FailureFuzzyFailure(self))
+
+ return failures
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py
new file mode 100644
index 0000000..b0bf189
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/image_diff.py
@@ -0,0 +1,224 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Compares the image output of a test to the expected image output.
+
+Compares hashes for the generated and expected images. If the output doesn't
+match, returns FailureImageHashMismatch and outputs both hashes into the layout
+test results directory.
+"""
+
+import errno
+import logging
+import os
+import shutil
+import subprocess
+
+from layout_package import path_utils
+from layout_package import test_failures
+from test_types import test_type_base
+
+# Cache whether we have the image_diff executable available.
+_compare_available = True
+_compare_msg_printed = False
+
+
+class ImageDiff(test_type_base.TestTypeBase):
+
+ def _copy_output_png(self, test_filename, source_image, extension):
+ """Copies result files into the output directory with appropriate
+ names.
+
+ Args:
+ test_filename: the test filename
+ source_file: path to the image file (either actual or expected)
+ extension: extension to indicate -actual.png or -expected.png
+ """
+ self._make_output_directory(test_filename)
+ dest_image = self.output_filename(test_filename, extension)
+
+ try:
+ shutil.copyfile(source_image, dest_image)
+ except IOError, e:
+ # A missing expected PNG has already been recorded as an error.
+ if errno.ENOENT != e.errno:
+ raise
+
+ def _save_baseline_files(self, filename, png_path, checksum):
+ """Saves new baselines for the PNG and checksum.
+
+ Args:
+ filename: test filename
+ png_path: path to the actual PNG result file
+ checksum: value of the actual checksum result
+ """
+ png_file = open(png_path, "rb")
+ png_data = png_file.read()
+ png_file.close()
+ self._save_baseline_data(filename, png_data, ".png")
+ self._save_baseline_data(filename, checksum, ".checksum")
+
+ def _create_image_diff(self, filename, target):
+ """Creates the visual diff of the expected/actual PNGs.
+
+ Args:
+ filename: the name of the test
+ target: Debug or Release
+ """
+ diff_filename = self.output_filename(filename,
+ self.FILENAME_SUFFIX_COMPARE)
+ actual_filename = self.output_filename(filename,
+ self.FILENAME_SUFFIX_ACTUAL + '.png')
+ expected_filename = self.output_filename(filename,
+ self.FILENAME_SUFFIX_EXPECTED + '.png')
+
+ global _compare_available
+ cmd = ''
+
+ try:
+ executable = path_utils.image_diff_path(target)
+ cmd = [executable, '--diff', actual_filename, expected_filename,
+ diff_filename]
+ except Exception, e:
+ _compare_available = False
+
+ result = 1
+ if _compare_available:
+ try:
+ result = subprocess.call(cmd)
+ except OSError, e:
+ if e.errno == errno.ENOENT or e.errno == errno.EACCES:
+ _compare_available = False
+ else:
+ raise e
+ except ValueError:
+ # work around a race condition in Python 2.4's implementation
+ # of subprocess.Popen
+ pass
+
+ global _compare_msg_printed
+
+ if not _compare_available and not _compare_msg_printed:
+ _compare_msg_printed = True
+ print('image_diff not found. Make sure you have a ' + target +
+ ' build of the image_diff executable.')
+
+ return result
+
+ def compare_output(self, filename, proc, output, test_args, target):
+ """Implementation of CompareOutput that checks the output image and
+ checksum against the expected files from the LayoutTest directory.
+ """
+ failures = []
+
+ # If we didn't produce a hash file, this test must be text-only.
+ if test_args.hash is None:
+ return failures
+
+ # If we're generating a new baseline, we pass.
+ if test_args.new_baseline:
+ self._save_baseline_files(filename, test_args.png_path,
+ test_args.hash)
+ return failures
+
+ # Compare hashes.
+ expected_hash_file = path_utils.expected_filename(filename,
+ '.checksum')
+ expected_png_file = path_utils.expected_filename(filename, '.png')
+
+ if test_args.show_sources:
+ logging.debug('Using %s' % expected_hash_file)
+ logging.debug('Using %s' % expected_png_file)
+
+ try:
+ expected_hash = open(expected_hash_file, "r").read()
+ except IOError, e:
+ if errno.ENOENT != e.errno:
+ raise
+ expected_hash = ''
+
+
+ if not os.path.isfile(expected_png_file):
+ # Report a missing expected PNG file.
+ self.write_output_files(filename, '', '.checksum', test_args.hash,
+ expected_hash, diff=False, wdiff=False)
+ self._copy_output_png(filename, test_args.png_path, '-actual.png')
+ failures.append(test_failures.FailureMissingImage(self))
+ return failures
+ elif test_args.hash == expected_hash:
+ # Hash matched (no diff needed, okay to return).
+ return failures
+
+
+ self.write_output_files(filename, '', '.checksum', test_args.hash,
+ expected_hash, diff=False, wdiff=False)
+ self._copy_output_png(filename, test_args.png_path, '-actual.png')
+ self._copy_output_png(filename, expected_png_file, '-expected.png')
+
+ # Even though we only use result in one codepath below but we
+ # still need to call CreateImageDiff for other codepaths.
+ result = self._create_image_diff(filename, target)
+ if expected_hash == '':
+ failures.append(test_failures.FailureMissingImageHash(self))
+ elif test_args.hash != expected_hash:
+ # Hashes don't match, so see if the images match. If they do, then
+ # the hash is wrong.
+ if result == 0:
+ failures.append(test_failures.FailureImageHashIncorrect(self))
+ else:
+ failures.append(test_failures.FailureImageHashMismatch(self))
+
+ return failures
+
+ def diff_files(self, file1, file2):
+ """Diff two image files.
+
+ Args:
+ file1, file2: full paths of the files to compare.
+
+ Returns:
+ True if two files are different.
+ False otherwise.
+ """
+
+ try:
+ executable = path_utils.image_diff_path('Debug')
+ except Exception, e:
+ logging.warn('Failed to find image diff executable.')
+ return True
+
+ cmd = [executable, file1, file2]
+ result = 1
+ try:
+ result = subprocess.call(cmd)
+ except OSError, e:
+ logging.warn('Failed to compare image diff: %s', e)
+ return True
+
+ return result == 1
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
new file mode 100644
index 0000000..334ae70
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/test_type_base.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Defines the interface TestTypeBase which other test types inherit from.
+
+Also defines the TestArguments "struct" to pass them additional arguments.
+"""
+
+import cgi
+import difflib
+import errno
+import logging
+import os.path
+import subprocess
+
+from layout_package import path_utils
+
+
+class TestArguments(object):
+ """Struct-like wrapper for additional arguments needed by
+ specific tests."""
+ # Whether to save new baseline results.
+ new_baseline = False
+
+ # Path to the actual PNG file generated by pixel tests
+ png_path = None
+
+ # Value of checksum generated by pixel tests.
+ hash = None
+
+ # Whether to use wdiff to generate by-word diffs.
+ wdiff = False
+
+ # Whether to report the locations of the expected result files used.
+ show_sources = False
+
+# Python bug workaround. See the wdiff code in WriteOutputFiles for an
+# explanation.
+_wdiff_available = True
+
+
+class TestTypeBase(object):
+
+ # Filename pieces when writing failures to the test results directory.
+ FILENAME_SUFFIX_ACTUAL = "-actual"
+ FILENAME_SUFFIX_EXPECTED = "-expected"
+ FILENAME_SUFFIX_DIFF = "-diff"
+ FILENAME_SUFFIX_WDIFF = "-wdiff.html"
+ FILENAME_SUFFIX_COMPARE = "-diff.png"
+
+ def __init__(self, platform, root_output_dir):
+ """Initialize a TestTypeBase object.
+
+ Args:
+ platform: the platform (e.g., 'chromium-mac-leopard')
+ identifying the platform-specific results to be used.
+ root_output_dir: The unix style path to the output dir.
+ """
+ self._root_output_dir = root_output_dir
+ self._platform = platform
+
+ def _make_output_directory(self, filename):
+ """Creates the output directory (if needed) for a given test
+ filename."""
+ output_filename = os.path.join(self._root_output_dir,
+ path_utils.relative_test_filename(filename))
+ path_utils.maybe_make_directory(os.path.split(output_filename)[0])
+
+ def _save_baseline_data(self, filename, data, modifier):
+ """Saves a new baseline file into the platform directory.
+
+ The file will be named simply "<test>-expected<modifier>", suitable for
+ use as the expected results in a later run.
+
+ Args:
+ filename: path to the test file
+ data: result to be saved as the new baseline
+ modifier: type of the result file, e.g. ".txt" or ".png"
+ """
+ relative_dir = os.path.dirname(
+ path_utils.relative_test_filename(filename))
+ output_dir = os.path.join(
+ path_utils.chromium_baseline_path(self._platform), relative_dir)
+ output_file = os.path.basename(os.path.splitext(filename)[0] +
+ self.FILENAME_SUFFIX_EXPECTED + modifier)
+
+ path_utils.maybe_make_directory(output_dir)
+ output_path = os.path.join(output_dir, output_file)
+ logging.debug('writing new baseline to "%s"' % (output_path))
+ open(output_path, "wb").write(data)
+
+ def output_filename(self, filename, modifier):
+ """Returns a filename inside the output dir that contains modifier.
+
+ For example, if filename is c:/.../fast/dom/foo.html and modifier is
+ "-expected.txt", the return value is
+ c:/cygwin/tmp/layout-test-results/fast/dom/foo-expected.txt
+
+ Args:
+ filename: absolute filename to test file
+ modifier: a string to replace the extension of filename with
+
+ Return:
+ The absolute windows path to the output filename
+ """
+ output_filename = os.path.join(self._root_output_dir,
+ path_utils.relative_test_filename(filename))
+ return os.path.splitext(output_filename)[0] + modifier
+
+ def compare_output(self, filename, proc, output, test_args, target):
+ """Method that compares the output from the test with the
+ expected value.
+
+ This is an abstract method to be implemented by all sub classes.
+
+ Args:
+ filename: absolute filename to test file
+ proc: a reference to the test_shell process
+ output: a string containing the output of the test
+ test_args: a TestArguments object holding optional additional
+ arguments
+ target: Debug or Release
+
+ Return:
+ a list of TestFailure objects, empty if the test passes
+ """
+ raise NotImplemented
+
+ def write_output_files(self, filename, test_type, file_type, output,
+ expected, diff=True, wdiff=False):
+ """Writes the test output, the expected output and optionally the diff
+ between the two to files in the results directory.
+
+ The full output filename of the actual, for example, will be
+ <filename><test_type>-actual<file_type>
+ For instance,
+ my_test-simp-actual.txt
+
+ Args:
+ filename: The test filename
+ test_type: A string describing the test type, e.g. "simp"
+ file_type: A string describing the test output file type, e.g. ".txt"
+ output: A string containing the test output
+ expected: A string containing the expected test output
+ diff: if True, write a file containing the diffs too. This should be
+ False for results that are not text
+ wdiff: if True, write an HTML file containing word-by-word diffs
+ """
+ self._make_output_directory(filename)
+ actual_filename = self.output_filename(filename,
+ test_type + self.FILENAME_SUFFIX_ACTUAL + file_type)
+ expected_filename = self.output_filename(filename,
+ test_type + self.FILENAME_SUFFIX_EXPECTED + file_type)
+ if output:
+ open(actual_filename, "wb").write(output)
+ if expected:
+ open(expected_filename, "wb").write(expected)
+
+ if not output or not expected:
+ return
+
+ if diff:
+ diff = difflib.unified_diff(expected.splitlines(True),
+ output.splitlines(True),
+ expected_filename,
+ actual_filename)
+
+ diff_filename = self.output_filename(filename,
+ test_type + self.FILENAME_SUFFIX_DIFF + file_type)
+ open(diff_filename, "wb").write(''.join(diff))
+
+ if wdiff:
+ # Shell out to wdiff to get colored inline diffs.
+ executable = path_utils.wdiff_path()
+ cmd = [executable,
+ '--start-delete=##WDIFF_DEL##',
+ '--end-delete=##WDIFF_END##',
+ '--start-insert=##WDIFF_ADD##',
+ '--end-insert=##WDIFF_END##',
+ expected_filename,
+ actual_filename]
+ filename = self.output_filename(filename,
+ test_type + self.FILENAME_SUFFIX_WDIFF)
+
+ global _wdiff_available
+
+ try:
+ # Python's Popen has a bug that causes any pipes opened to a
+ # process that can't be executed to be leaked. Since this
+ # code is specifically designed to tolerate exec failures
+ # to gracefully handle cases where wdiff is not installed,
+ # the bug results in a massive file descriptor leak. As a
+ # workaround, if an exec failure is ever experienced for
+ # wdiff, assume it's not available. This will leak one
+ # file descriptor but that's better than leaking each time
+ # wdiff would be run.
+ #
+ # http://mail.python.org/pipermail/python-list/
+ # 2008-August/505753.html
+ # http://bugs.python.org/issue3210
+ #
+ # It also has a threading bug, so we don't output wdiff if
+ # the Popen raises a ValueError.
+ # http://bugs.python.org/issue1236
+ if _wdiff_available:
+ wdiff = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE).communicate()[0]
+ wdiff_failed = False
+
+ except OSError, e:
+ if (e.errno == errno.ENOENT or e.errno == errno.EACCES or
+ e.errno == errno.ECHILD):
+ _wdiff_available = False
+ else:
+ raise e
+ except ValueError, e:
+ wdiff_failed = True
+
+ out = open(filename, 'wb')
+
+ if not _wdiff_available:
+ out.write(
+ "wdiff not installed.<br/> "
+ "If you're running OS X, you can install via macports."
+ "<br/>"
+ "If running Ubuntu linux, you can run "
+ "'sudo apt-get install wdiff'.")
+ elif wdiff_failed:
+ out.write('wdiff failed due to running with multiple '
+ 'test_shells in parallel.')
+ else:
+ wdiff = cgi.escape(wdiff)
+ wdiff = wdiff.replace('##WDIFF_DEL##', '<span class=del>')
+ wdiff = wdiff.replace('##WDIFF_ADD##', '<span class=add>')
+ wdiff = wdiff.replace('##WDIFF_END##', '</span>')
+ out.write('<head><style>.del { background: #faa; } ')
+ out.write('.add { background: #afa; }</style></head>')
+ out.write('<pre>' + wdiff + '</pre>')
+
+ out.close()
diff --git a/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py
new file mode 100644
index 0000000..8cff9e6
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/layout_tests/test_types/text_diff.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+# Copyright (C) 2010 The Chromium Authors. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Chromium name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Compares the text output of a test to the expected text output.
+
+If the output doesn't match, returns FailureTextMismatch and outputs the diff
+files into the layout test results directory.
+"""
+
+import errno
+import logging
+import os.path
+
+from layout_package import path_utils
+from layout_package import test_failures
+from test_types import test_type_base
+
+
+def is_render_tree_dump(data):
+ """Returns true if data appears to be a render tree dump as opposed to a
+ plain text dump."""
+ return data.find("RenderView at (0,0)") != -1
+
+
+class TestTextDiff(test_type_base.TestTypeBase):
+
+ def get_normalized_output_text(self, output):
+ # Some tests produce "\r\n" explicitly. Our system (Python/Cygwin)
+ # helpfully changes the "\n" to "\r\n", resulting in "\r\r\n".
+ norm = output.replace("\r\r\n", "\r\n").strip("\r\n").replace(
+ "\r\n", "\n")
+ return norm + "\n"
+
+ def get_normalized_expected_text(self, filename, show_sources):
+ """Given the filename of the test, read the expected output from a file
+ and normalize the text. Returns a string with the expected text, or ''
+ if the expected output file was not found."""
+ # Read the platform-specific expected text.
+ expected_filename = path_utils.expected_filename(filename, '.txt')
+ if show_sources:
+ logging.debug('Using %s' % expected_filename)
+
+ return self.get_normalized_text(expected_filename)
+
+ def get_normalized_text(self, filename):
+ try:
+ text = open(filename).read()
+ except IOError, e:
+ if errno.ENOENT != e.errno:
+ raise
+ return ''
+
+ # Normalize line endings
+ return text.strip("\r\n").replace("\r\n", "\n") + "\n"
+
+ def compare_output(self, filename, proc, output, test_args, target):
+ """Implementation of CompareOutput that checks the output text against
+ the expected text from the LayoutTest directory."""
+ failures = []
+
+ # If we're generating a new baseline, we pass.
+ if test_args.new_baseline:
+ self._save_baseline_data(filename, output, ".txt")
+ return failures
+
+ # Normalize text to diff
+ output = self.get_normalized_output_text(output)
+ expected = self.get_normalized_expected_text(filename,
+ test_args.show_sources)
+
+ # Write output files for new tests, too.
+ if output != expected:
+ # Text doesn't match, write output files.
+ self.write_output_files(filename, "", ".txt", output, expected,
+ diff=True, wdiff=True)
+
+ if expected == '':
+ failures.append(test_failures.FailureMissingResult(self))
+ else:
+ failures.append(test_failures.FailureTextMismatch(self, True))
+
+ return failures
+
+ def diff_files(self, file1, file2):
+ """Diff two text files.
+
+ Args:
+ file1, file2: full paths of the files to compare.
+
+ Returns:
+ True if two files are different.
+ False otherwise.
+ """
+
+ return (self.get_normalized_text(file1) !=
+ self.get_normalized_text(file2))
diff --git a/WebKitTools/Scripts/webkitpy/mock.py b/WebKitTools/Scripts/webkitpy/mock.py
new file mode 100644
index 0000000..f6f328e
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/mock.py
@@ -0,0 +1,309 @@
+# mock.py
+# Test tools for mocking and patching.
+# Copyright (C) 2007-2009 Michael Foord
+# E-mail: fuzzyman AT voidspace DOT org DOT uk
+
+# mock 0.6.0
+# http://www.voidspace.org.uk/python/mock/
+
+# Released subject to the BSD License
+# Please see http://www.voidspace.org.uk/python/license.shtml
+
+# 2009-11-25: Licence downloaded from above URL.
+# BEGIN DOWNLOADED LICENSE
+#
+# Copyright (c) 2003-2009, Michael Foord
+# All rights reserved.
+# E-mail : fuzzyman AT voidspace DOT org DOT uk
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+#
+# * Neither the name of Michael Foord nor the name of Voidspace
+# may be used to endorse or promote products derived from this
+# software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# END DOWNLOADED LICENSE
+
+# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
+# Comments, suggestions and bug reports welcome.
+
+
+__all__ = (
+ 'Mock',
+ 'patch',
+ 'patch_object',
+ 'sentinel',
+ 'DEFAULT'
+)
+
+__version__ = '0.6.0'
+
+class SentinelObject(object):
+ def __init__(self, name):
+ self.name = name
+
+ def __repr__(self):
+ return '<SentinelObject "%s">' % self.name
+
+
+class Sentinel(object):
+ def __init__(self):
+ self._sentinels = {}
+
+ def __getattr__(self, name):
+ return self._sentinels.setdefault(name, SentinelObject(name))
+
+
+sentinel = Sentinel()
+
+DEFAULT = sentinel.DEFAULT
+
+class OldStyleClass:
+ pass
+ClassType = type(OldStyleClass)
+
+def _is_magic(name):
+ return '__%s__' % name[2:-2] == name
+
+def _copy(value):
+ if type(value) in (dict, list, tuple, set):
+ return type(value)(value)
+ return value
+
+
+class Mock(object):
+
+ def __init__(self, spec=None, side_effect=None, return_value=DEFAULT,
+ name=None, parent=None, wraps=None):
+ self._parent = parent
+ self._name = name
+ if spec is not None and not isinstance(spec, list):
+ spec = [member for member in dir(spec) if not _is_magic(member)]
+
+ self._methods = spec
+ self._children = {}
+ self._return_value = return_value
+ self.side_effect = side_effect
+ self._wraps = wraps
+
+ self.reset_mock()
+
+
+ def reset_mock(self):
+ self.called = False
+ self.call_args = None
+ self.call_count = 0
+ self.call_args_list = []
+ self.method_calls = []
+ for child in self._children.itervalues():
+ child.reset_mock()
+ if isinstance(self._return_value, Mock):
+ self._return_value.reset_mock()
+
+
+ def __get_return_value(self):
+ if self._return_value is DEFAULT:
+ self._return_value = Mock()
+ return self._return_value
+
+ def __set_return_value(self, value):
+ self._return_value = value
+
+ return_value = property(__get_return_value, __set_return_value)
+
+
+ def __call__(self, *args, **kwargs):
+ self.called = True
+ self.call_count += 1
+ self.call_args = (args, kwargs)
+ self.call_args_list.append((args, kwargs))
+
+ parent = self._parent
+ name = self._name
+ while parent is not None:
+ parent.method_calls.append((name, args, kwargs))
+ if parent._parent is None:
+ break
+ name = parent._name + '.' + name
+ parent = parent._parent
+
+ ret_val = DEFAULT
+ if self.side_effect is not None:
+ if (isinstance(self.side_effect, Exception) or
+ isinstance(self.side_effect, (type, ClassType)) and
+ issubclass(self.side_effect, Exception)):
+ raise self.side_effect
+
+ ret_val = self.side_effect(*args, **kwargs)
+ if ret_val is DEFAULT:
+ ret_val = self.return_value
+
+ if self._wraps is not None and self._return_value is DEFAULT:
+ return self._wraps(*args, **kwargs)
+ if ret_val is DEFAULT:
+ ret_val = self.return_value
+ return ret_val
+
+
+ def __getattr__(self, name):
+ if self._methods is not None:
+ if name not in self._methods:
+ raise AttributeError("Mock object has no attribute '%s'" % name)
+ elif _is_magic(name):
+ raise AttributeError(name)
+
+ if name not in self._children:
+ wraps = None
+ if self._wraps is not None:
+ wraps = getattr(self._wraps, name)
+ self._children[name] = Mock(parent=self, name=name, wraps=wraps)
+
+ return self._children[name]
+
+
+ def assert_called_with(self, *args, **kwargs):
+ assert self.call_args == (args, kwargs), 'Expected: %s\nCalled with: %s' % ((args, kwargs), self.call_args)
+
+
+def _dot_lookup(thing, comp, import_path):
+ try:
+ return getattr(thing, comp)
+ except AttributeError:
+ __import__(import_path)
+ return getattr(thing, comp)
+
+
+def _importer(target):
+ components = target.split('.')
+ import_path = components.pop(0)
+ thing = __import__(import_path)
+
+ for comp in components:
+ import_path += ".%s" % comp
+ thing = _dot_lookup(thing, comp, import_path)
+ return thing
+
+
+class _patch(object):
+ def __init__(self, target, attribute, new, spec, create):
+ self.target = target
+ self.attribute = attribute
+ self.new = new
+ self.spec = spec
+ self.create = create
+ self.has_local = False
+
+
+ def __call__(self, func):
+ if hasattr(func, 'patchings'):
+ func.patchings.append(self)
+ return func
+
+ def patched(*args, **keywargs):
+ # don't use a with here (backwards compatability with 2.5)
+ extra_args = []
+ for patching in patched.patchings:
+ arg = patching.__enter__()
+ if patching.new is DEFAULT:
+ extra_args.append(arg)
+ args += tuple(extra_args)
+ try:
+ return func(*args, **keywargs)
+ finally:
+ for patching in getattr(patched, 'patchings', []):
+ patching.__exit__()
+
+ patched.patchings = [self]
+ patched.__name__ = func.__name__
+ patched.compat_co_firstlineno = getattr(func, "compat_co_firstlineno",
+ func.func_code.co_firstlineno)
+ return patched
+
+
+ def get_original(self):
+ target = self.target
+ name = self.attribute
+ create = self.create
+
+ original = DEFAULT
+ if _has_local_attr(target, name):
+ try:
+ original = target.__dict__[name]
+ except AttributeError:
+ # for instances of classes with slots, they have no __dict__
+ original = getattr(target, name)
+ elif not create and not hasattr(target, name):
+ raise AttributeError("%s does not have the attribute %r" % (target, name))
+ return original
+
+
+ def __enter__(self):
+ new, spec, = self.new, self.spec
+ original = self.get_original()
+ if new is DEFAULT:
+ # XXXX what if original is DEFAULT - shouldn't use it as a spec
+ inherit = False
+ if spec == True:
+ # set spec to the object we are replacing
+ spec = original
+ if isinstance(spec, (type, ClassType)):
+ inherit = True
+ new = Mock(spec=spec)
+ if inherit:
+ new.return_value = Mock(spec=spec)
+ self.temp_original = original
+ setattr(self.target, self.attribute, new)
+ return new
+
+
+ def __exit__(self, *_):
+ if self.temp_original is not DEFAULT:
+ setattr(self.target, self.attribute, self.temp_original)
+ else:
+ delattr(self.target, self.attribute)
+ del self.temp_original
+
+
+def patch_object(target, attribute, new=DEFAULT, spec=None, create=False):
+ return _patch(target, attribute, new, spec, create)
+
+
+def patch(target, new=DEFAULT, spec=None, create=False):
+ try:
+ target, attribute = target.rsplit('.', 1)
+ except (TypeError, ValueError):
+ raise TypeError("Need a valid target to patch. You supplied: %r" % (target,))
+ target = _importer(target)
+ return _patch(target, attribute, new, spec, create)
+
+
+
+def _has_local_attr(obj, name):
+ try:
+ return name in vars(obj)
+ except TypeError:
+ # objects without a __dict__
+ return hasattr(obj, name)
diff --git a/WebKitTools/Scripts/webkitpy/mock.pyc b/WebKitTools/Scripts/webkitpy/mock.pyc
new file mode 100644
index 0000000..c39d3f4
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/mock.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/mock_bugzillatool.py b/WebKitTools/Scripts/webkitpy/mock_bugzillatool.py
new file mode 100644
index 0000000..1aff53a
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/mock_bugzillatool.py
@@ -0,0 +1,367 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from webkitpy.bugzilla import Bug, Attachment
+from webkitpy.committers import CommitterList, Reviewer
+from webkitpy.mock import Mock
+from webkitpy.scm import CommitMessage
+from webkitpy.webkit_logging import log
+
+
+def _id_to_object_dictionary(*objects):
+ dictionary = {}
+ for thing in objects:
+ dictionary[thing["id"]] = thing
+ return dictionary
+
+
+# FIXME: The ids should be 1, 2, 3 instead of crazy numbers.
+
+
+_patch1 = {
+ "id": 197,
+ "bug_id": 42,
+ "url": "http://example.com/197",
+ "is_obsolete": False,
+ "is_patch": True,
+ "review": "+",
+ "reviewer_email": "foo@bar.com",
+ "commit-queue": "+",
+ "committer_email": "foo@bar.com",
+ "attacher_email": "Contributer1",
+}
+
+
+_patch2 = {
+ "id": 128,
+ "bug_id": 42,
+ "url": "http://example.com/128",
+ "is_obsolete": False,
+ "is_patch": True,
+ "review": "+",
+ "reviewer_email": "foo@bar.com",
+ "commit-queue": "+",
+ "committer_email": "non-committer@example.com",
+ "attacher_email": "eric@webkit.org",
+}
+
+
+_patch3 = {
+ "id": 103,
+ "bug_id": 75,
+ "url": "http://example.com/103",
+ "is_obsolete": False,
+ "is_patch": True,
+ "review": "?",
+ "attacher_email": "eric@webkit.org",
+}
+
+
+_patch4 = {
+ "id": 104,
+ "bug_id": 77,
+ "url": "http://example.com/103",
+ "is_obsolete": False,
+ "is_patch": True,
+ "review": "+",
+ "commit-queue": "?",
+ "reviewer_email": "foo@bar.com",
+ "attacher_email": "Contributer2",
+}
+
+
+_patch5 = {
+ "id": 105,
+ "bug_id": 77,
+ "url": "http://example.com/103",
+ "is_obsolete": False,
+ "is_patch": True,
+ "review": "+",
+ "reviewer_email": "foo@bar.com",
+ "attacher_email": "eric@webkit.org",
+}
+
+
+_patch6 = { # Valid committer, but no reviewer.
+ "id": 106,
+ "bug_id": 77,
+ "url": "http://example.com/103",
+ "is_obsolete": False,
+ "is_patch": True,
+ "commit-queue": "+",
+ "committer_email": "foo@bar.com",
+ "attacher_email": "eric@webkit.org",
+}
+
+
+_patch7 = { # Valid review, patch is marked obsolete.
+ "id": 107,
+ "bug_id": 76,
+ "url": "http://example.com/103",
+ "is_obsolete": True,
+ "is_patch": True,
+ "review": "+",
+ "reviewer_email": "foo@bar.com",
+ "attacher_email": "eric@webkit.org",
+}
+
+
+# This must be defined before we define the bugs, thus we don't use
+# MockBugzilla.unassigned_email directly.
+_unassigned_email = "unassigned@example.com"
+
+
+# FIXME: The ids should be 1, 2, 3 instead of crazy numbers.
+
+
+_bug1 = {
+ "id": 42,
+ "title": "Bug with two r+'d and cq+'d patches, one of which has an "
+ "invalid commit-queue setter.",
+ "assigned_to_email": _unassigned_email,
+ "attachments": [_patch1, _patch2],
+}
+
+
+_bug2 = {
+ "id": 75,
+ "title": "Bug with a patch needing review.",
+ "assigned_to_email": "foo@foo.com",
+ "attachments": [_patch3],
+}
+
+
+_bug3 = {
+ "id": 76,
+ "title": "The third bug",
+ "assigned_to_email": _unassigned_email,
+ "attachments": [_patch7],
+}
+
+
+_bug4 = {
+ "id": 77,
+ "title": "The fourth bug",
+ "assigned_to_email": "foo@foo.com",
+ "attachments": [_patch4, _patch5, _patch6],
+}
+
+
+class MockBugzillaQueries(Mock):
+
+ def __init__(self, bugzilla):
+ Mock.__init__(self)
+ self._bugzilla = bugzilla
+
+ def _all_bugs(self):
+ return map(lambda bug_dictionary: Bug(bug_dictionary, self._bugzilla),
+ self._bugzilla.bug_cache.values())
+
+ def fetch_bug_ids_from_commit_queue(self):
+ bugs_with_commit_queued_patches = filter(
+ lambda bug: bug.commit_queued_patches(),
+ self._all_bugs())
+ return map(lambda bug: bug.id(), bugs_with_commit_queued_patches)
+
+ def fetch_attachment_ids_from_review_queue(self):
+ unreviewed_patches = sum([bug.unreviewed_patches()
+ for bug in self._all_bugs()], [])
+ return map(lambda patch: patch.id(), unreviewed_patches)
+
+ def fetch_patches_from_commit_queue(self):
+ return sum([bug.commit_queued_patches()
+ for bug in self._all_bugs()], [])
+
+ def fetch_bug_ids_from_pending_commit_list(self):
+ bugs_with_reviewed_patches = filter(lambda bug: bug.reviewed_patches(),
+ self._all_bugs())
+ bug_ids = map(lambda bug: bug.id(), bugs_with_reviewed_patches)
+ # NOTE: This manual hack here is to allow testing logging in
+ # test_assign_to_committer the real pending-commit query on bugzilla
+ # will return bugs with patches which have r+, but are also obsolete.
+ return bug_ids + [76]
+
+ def fetch_patches_from_pending_commit_list(self):
+ return sum([bug.reviewed_patches() for bug in self._all_bugs()], [])
+
+
+# FIXME: Bugzilla is the wrong Mock-point. Once we have a BugzillaNetwork
+# class we should mock that instead.
+# Most of this class is just copy/paste from Bugzilla.
+
+
+class MockBugzilla(Mock):
+
+ bug_server_url = "http://example.com"
+
+ unassigned_email = _unassigned_email
+
+ bug_cache = _id_to_object_dictionary(_bug1, _bug2, _bug3, _bug4)
+
+ attachment_cache = _id_to_object_dictionary(_patch1,
+ _patch2,
+ _patch3,
+ _patch4,
+ _patch5,
+ _patch6,
+ _patch7)
+
+ def __init__(self):
+ Mock.__init__(self)
+ self.queries = MockBugzillaQueries(self)
+ self.committers = CommitterList(reviewers=[Reviewer("Foo Bar",
+ "foo@bar.com")])
+
+ def fetch_bug(self, bug_id):
+ return Bug(self.bug_cache.get(bug_id), self)
+
+ def fetch_attachment(self, attachment_id):
+ # This could be changed to .get() if we wish to allow failed lookups.
+ attachment_dictionary = self.attachment_cache[attachment_id]
+ bug = self.fetch_bug(attachment_dictionary["bug_id"])
+ for attachment in bug.attachments(include_obsolete=True):
+ if attachment.id() == int(attachment_id):
+ return attachment
+
+ def bug_url_for_bug_id(self, bug_id):
+ return "%s/%s" % (self.bug_server_url, bug_id)
+
+ def fetch_bug_dictionary(self, bug_id):
+ return self.bug_cache.get(bug_id)
+
+ def attachment_url_for_id(self, attachment_id, action="view"):
+ action_param = ""
+ if action and action != "view":
+ action_param = "&action=%s" % action
+ return "%s/%s%s" % (self.bug_server_url, attachment_id, action_param)
+
+
+class MockBuildBot(Mock):
+
+ def builder_statuses(self):
+ return [{
+ "name": "Builder1",
+ "is_green": True,
+ }, {
+ "name": "Builder2",
+ "is_green": True,
+ }]
+
+ def red_core_builders_names(self):
+ return []
+
+
+class MockSCM(Mock):
+
+ def __init__(self):
+ Mock.__init__(self)
+ self.checkout_root = os.getcwd()
+
+ def create_patch(self):
+ return "Patch1"
+
+ def commit_ids_from_commitish_arguments(self, args):
+ return ["Commitish1", "Commitish2"]
+
+ def commit_message_for_local_commit(self, commit_id):
+ if commit_id == "Commitish1":
+ return CommitMessage("CommitMessage1\n" \
+ "https://bugs.example.org/show_bug.cgi?id=42\n")
+ if commit_id == "Commitish2":
+ return CommitMessage("CommitMessage2\n" \
+ "https://bugs.example.org/show_bug.cgi?id=75\n")
+ raise Exception("Bogus commit_id in commit_message_for_local_commit.")
+
+ def create_patch_from_local_commit(self, commit_id):
+ if commit_id == "Commitish1":
+ return "Patch1"
+ if commit_id == "Commitish2":
+ return "Patch2"
+ raise Exception("Bogus commit_id in commit_message_for_local_commit.")
+
+ def diff_for_revision(self, revision):
+ return "DiffForRevision%s\n" \
+ "http://bugs.webkit.org/show_bug.cgi?id=12345" % revision
+
+ def svn_revision_from_commit_text(self, commit_text):
+ return "49824"
+
+ def modified_changelogs(self):
+ # Ideally we'd return something more interesting here. The problem is
+ # that LandDiff will try to actually read the path from disk!
+ return []
+
+
+class MockUser(object):
+
+ def prompt(self, message):
+ return "Mock user response"
+
+ def edit(self, files):
+ pass
+
+ def page(self, message):
+ pass
+
+ def confirm(self, message=None):
+ return True
+
+ def open_url(self, url):
+ log("MOCK: user.open_url: %s" % url)
+ pass
+
+
+class MockStatusServer(object):
+
+ def __init__(self):
+ self.host = "example.com"
+
+ def patch_status(self, queue_name, patch_id):
+ return None
+
+ def update_status(self, queue_name, status, patch=None, results_file=None):
+ return 187
+
+
+class MockBugzillaTool():
+
+ def __init__(self):
+ self.bugs = MockBugzilla()
+ self.buildbot = MockBuildBot()
+ self.executive = Mock()
+ self.user = MockUser()
+ self._scm = MockSCM()
+ self.status_server = MockStatusServer()
+
+ def scm(self):
+ return self._scm
+
+ def path(self):
+ return "echo"
diff --git a/WebKitTools/Scripts/webkitpy/multicommandtool.py b/WebKitTools/Scripts/webkitpy/multicommandtool.py
new file mode 100644
index 0000000..10cf426
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/multicommandtool.py
@@ -0,0 +1,299 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# MultiCommandTool provides a framework for writing svn-like/git-like tools
+# which are called with the following format:
+# tool-name [global options] command-name [command options]
+
+import sys
+
+from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option
+
+from webkitpy.grammar import pluralize
+from webkitpy.webkit_logging import log
+
+
+class Command(object):
+ name = None
+ show_in_main_help = False
+ def __init__(self, help_text, argument_names=None, options=None, long_help=None, requires_local_commits=False):
+ self.help_text = help_text
+ self.long_help = long_help
+ self.argument_names = argument_names
+ self.required_arguments = self._parse_required_arguments(argument_names)
+ self.options = options
+ self.requires_local_commits = requires_local_commits
+ self.tool = None
+ # option_parser can be overriden by the tool using set_option_parser
+ # This default parser will be used for standalone_help printing.
+ self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options)
+
+ # This design is slightly awkward, but we need the
+ # the tool to be able to create and modify the option_parser
+ # before it knows what Command to run.
+ def set_option_parser(self, option_parser):
+ self.option_parser = option_parser
+ self._add_options_to_parser()
+
+ def _add_options_to_parser(self):
+ options = self.options or []
+ for option in options:
+ self.option_parser.add_option(option)
+
+ # The tool calls bind_to_tool on each Command after adding it to its list.
+ def bind_to_tool(self, tool):
+ # Command instances can only be bound to one tool at a time.
+ if self.tool and tool != self.tool:
+ raise Exception("Command already bound to tool!")
+ self.tool = tool
+
+ @staticmethod
+ def _parse_required_arguments(argument_names):
+ required_args = []
+ if not argument_names:
+ return required_args
+ split_args = argument_names.split(" ")
+ for argument in split_args:
+ if argument[0] == '[':
+ # For now our parser is rather dumb. Do some minimal validation that
+ # we haven't confused it.
+ if argument[-1] != ']':
+ raise Exception("Failure to parse argument string %s. Argument %s is missing ending ]" % (argument_names, argument))
+ else:
+ required_args.append(argument)
+ return required_args
+
+ def name_with_arguments(self):
+ usage_string = self.name
+ if self.options:
+ usage_string += " [options]"
+ if self.argument_names:
+ usage_string += " " + self.argument_names
+ return usage_string
+
+ def parse_args(self, args):
+ return self.option_parser.parse_args(args)
+
+ def check_arguments_and_execute(self, options, args, tool=None):
+ if len(args) < len(self.required_arguments):
+ log("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % (
+ pluralize("argument", len(self.required_arguments)),
+ pluralize("argument", len(args)),
+ "'%s'" % " ".join(args),
+ " ".join(self.required_arguments),
+ tool.name(),
+ self.name))
+ return 1
+ return self.execute(options, args, tool) or 0
+
+ def standalone_help(self):
+ help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n\n"
+ if self.long_help:
+ help_text += "%s\n\n" % self.long_help
+ help_text += self.option_parser.format_option_help(IndentedHelpFormatter())
+ return help_text
+
+ def execute(self, options, args, tool):
+ raise NotImplementedError, "subclasses must implement"
+
+ # main() exists so that Commands can be turned into stand-alone scripts.
+ # Other parts of the code will likely require modification to work stand-alone.
+ def main(self, args=sys.argv):
+ (options, args) = self.parse_args(args)
+ # Some commands might require a dummy tool
+ return self.check_arguments_and_execute(options, args)
+
+
+# FIXME: This should just be rolled into Command. help_text and argument_names do not need to be instance variables.
+class AbstractDeclarativeCommand(Command):
+ help_text = None
+ argument_names = None
+ long_help = None
+ def __init__(self, options=None, **kwargs):
+ Command.__init__(self, self.help_text, self.argument_names, options=options, long_help=self.long_help, **kwargs)
+
+
+class HelpPrintingOptionParser(OptionParser):
+ def __init__(self, epilog_method=None, *args, **kwargs):
+ self.epilog_method = epilog_method
+ OptionParser.__init__(self, *args, **kwargs)
+
+ def error(self, msg):
+ self.print_usage(sys.stderr)
+ error_message = "%s: error: %s\n" % (self.get_prog_name(), msg)
+ # This method is overriden to add this one line to the output:
+ error_message += "\nType \"%s --help\" to see usage.\n" % self.get_prog_name()
+ self.exit(1, error_message)
+
+ # We override format_epilog to avoid the default formatting which would paragraph-wrap the epilog
+ # and also to allow us to compute the epilog lazily instead of in the constructor (allowing it to be context sensitive).
+ def format_epilog(self, epilog):
+ if self.epilog_method:
+ return "\n%s\n" % self.epilog_method()
+ return ""
+
+
+class HelpCommand(AbstractDeclarativeCommand):
+ name = "help"
+ help_text = "Display information about this program or its subcommands"
+ argument_names = "[COMMAND]"
+
+ def __init__(self):
+ options = [
+ make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"),
+ ]
+ AbstractDeclarativeCommand.__init__(self, options)
+ self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser.
+
+ def _help_epilog(self):
+ # Only show commands which are relevant to this checkout's SCM system. Might this be confusing to some users?
+ if self.show_all_commands:
+ epilog = "All %prog commands:\n"
+ relevant_commands = self.tool.commands[:]
+ else:
+ epilog = "Common %prog commands:\n"
+ relevant_commands = filter(self.tool.should_show_in_main_help, self.tool.commands)
+ longest_name_length = max(map(lambda command: len(command.name), relevant_commands))
+ relevant_commands.sort(lambda a, b: cmp(a.name, b.name))
+ command_help_texts = map(lambda command: " %s %s\n" % (command.name.ljust(longest_name_length), command.help_text), relevant_commands)
+ epilog += "%s\n" % "".join(command_help_texts)
+ epilog += "See '%prog help --all-commands' to list all commands.\n"
+ epilog += "See '%prog help COMMAND' for more information on a specific command.\n"
+ return epilog.replace("%prog", self.tool.name()) # Use of %prog here mimics OptionParser.expand_prog_name().
+
+ # FIXME: This is a hack so that we don't show --all-commands as a global option:
+ def _remove_help_options(self):
+ for option in self.options:
+ self.option_parser.remove_option(option.get_opt_string())
+
+ def execute(self, options, args, tool):
+ if args:
+ command = self.tool.command_by_name(args[0])
+ if command:
+ print command.standalone_help()
+ return 0
+
+ self.show_all_commands = options.show_all_commands
+ self._remove_help_options()
+ self.option_parser.print_help()
+ return 0
+
+
+class MultiCommandTool(object):
+ global_options = None
+
+ def __init__(self, name=None, commands=None):
+ self._name = name or OptionParser(prog=name).get_prog_name() # OptionParser has nice logic for fetching the name.
+ # Allow the unit tests to disable command auto-discovery.
+ self.commands = commands or [cls() for cls in self._find_all_commands() if cls.name]
+ self.help_command = self.command_by_name(HelpCommand.name)
+ # Require a help command, even if the manual test list doesn't include one.
+ if not self.help_command:
+ self.help_command = HelpCommand()
+ self.commands.append(self.help_command)
+ for command in self.commands:
+ command.bind_to_tool(self)
+
+ @classmethod
+ def _add_all_subclasses(cls, class_to_crawl, seen_classes):
+ for subclass in class_to_crawl.__subclasses__():
+ if subclass not in seen_classes:
+ seen_classes.add(subclass)
+ cls._add_all_subclasses(subclass, seen_classes)
+
+ @classmethod
+ def _find_all_commands(cls):
+ commands = set()
+ cls._add_all_subclasses(Command, commands)
+ return sorted(commands)
+
+ def name(self):
+ return self._name
+
+ def _create_option_parser(self):
+ usage = "Usage: %prog [options] COMMAND [ARGS]"
+ return HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=self.name(), usage=usage)
+
+ @staticmethod
+ def _split_command_name_from_args(args):
+ # Assume the first argument which doesn't start with "-" is the command name.
+ command_index = 0
+ for arg in args:
+ if arg[0] != "-":
+ break
+ command_index += 1
+ else:
+ return (None, args[:])
+
+ command = args[command_index]
+ return (command, args[:command_index] + args[command_index + 1:])
+
+ def command_by_name(self, command_name):
+ for command in self.commands:
+ if command_name == command.name:
+ return command
+ return None
+
+ def path(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def should_show_in_main_help(self, command):
+ return command.show_in_main_help
+
+ def should_execute_command(self, command):
+ return True
+
+ def _add_global_options(self, option_parser):
+ global_options = self.global_options or []
+ for option in global_options:
+ option_parser.add_option(option)
+
+ def handle_global_options(self, options):
+ pass
+
+ def main(self, argv=sys.argv):
+ (command_name, args) = self._split_command_name_from_args(argv[1:])
+
+ option_parser = self._create_option_parser()
+ self._add_global_options(option_parser)
+
+ command = self.command_by_name(command_name) or self.help_command
+ if not command:
+ option_parser.error("%s is not a recognized command" % command_name)
+
+ command.set_option_parser(option_parser)
+ (options, args) = command.parse_args(args)
+ self.handle_global_options(options)
+
+ (should_execute, failure_reason) = self.should_execute_command(command)
+ if not should_execute:
+ log(failure_reason)
+ return 0 # FIXME: Should this really be 0?
+
+ return command.check_arguments_and_execute(options, args, self)
diff --git a/WebKitTools/Scripts/webkitpy/multicommandtool.pyc b/WebKitTools/Scripts/webkitpy/multicommandtool.pyc
new file mode 100644
index 0000000..4584643
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/multicommandtool.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/multicommandtool_unittest.py b/WebKitTools/Scripts/webkitpy/multicommandtool_unittest.py
new file mode 100644
index 0000000..ae77e73
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/multicommandtool_unittest.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+import unittest
+from multicommandtool import MultiCommandTool, Command
+from webkitpy.outputcapture import OutputCapture
+
+from optparse import make_option
+
+class TrivialCommand(Command):
+ name = "trivial"
+ show_in_main_help = True
+ def __init__(self, **kwargs):
+ Command.__init__(self, "help text", **kwargs)
+
+ def execute(self, options, args, tool):
+ pass
+
+class UncommonCommand(TrivialCommand):
+ name = "uncommon"
+ show_in_main_help = False
+
+class CommandTest(unittest.TestCase):
+ def test_name_with_arguments(self):
+ command_with_args = TrivialCommand(argument_names="ARG1 ARG2")
+ self.assertEqual(command_with_args.name_with_arguments(), "trivial ARG1 ARG2")
+
+ command_with_args = TrivialCommand(options=[make_option("--my_option")])
+ self.assertEqual(command_with_args.name_with_arguments(), "trivial [options]")
+
+ def test_parse_required_arguments(self):
+ self.assertEqual(Command._parse_required_arguments("ARG1 ARG2"), ["ARG1", "ARG2"])
+ self.assertEqual(Command._parse_required_arguments("[ARG1] [ARG2]"), [])
+ self.assertEqual(Command._parse_required_arguments("[ARG1] ARG2"), ["ARG2"])
+ # Note: We might make our arg parsing smarter in the future and allow this type of arguments string.
+ self.assertRaises(Exception, Command._parse_required_arguments, "[ARG1 ARG2]")
+
+ def test_required_arguments(self):
+ two_required_arguments = TrivialCommand(argument_names="ARG1 ARG2 [ARG3]")
+ expected_missing_args_error = "2 arguments required, 1 argument provided. Provided: 'foo' Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n"
+ exit_code = OutputCapture().assert_outputs(self, two_required_arguments.check_arguments_and_execute, [None, ["foo"], TrivialTool()], expected_stderr=expected_missing_args_error)
+ self.assertEqual(exit_code, 1)
+
+
+class TrivialTool(MultiCommandTool):
+ def __init__(self, commands=None):
+ MultiCommandTool.__init__(self, name="trivial-tool", commands=commands)
+
+ def path():
+ return __file__
+
+ def should_execute_command(self, command):
+ return (True, None)
+
+
+class MultiCommandToolTest(unittest.TestCase):
+ def _assert_split(self, args, expected_split):
+ self.assertEqual(MultiCommandTool._split_command_name_from_args(args), expected_split)
+
+ def test_split_args(self):
+ # MultiCommandToolTest._split_command_name_from_args returns: (command, args)
+ full_args = ["--global-option", "command", "--option", "arg"]
+ full_args_expected = ("command", ["--global-option", "--option", "arg"])
+ self._assert_split(full_args, full_args_expected)
+
+ full_args = []
+ full_args_expected = (None, [])
+ self._assert_split(full_args, full_args_expected)
+
+ full_args = ["command", "arg"]
+ full_args_expected = ("command", ["arg"])
+ self._assert_split(full_args, full_args_expected)
+
+ def test_command_by_name(self):
+ # This also tests Command auto-discovery.
+ tool = TrivialTool()
+ self.assertEqual(tool.command_by_name("trivial").name, "trivial")
+ self.assertEqual(tool.command_by_name("bar"), None)
+
+ def _assert_tool_main_outputs(self, tool, main_args, expected_stdout, expected_stderr = "", expected_exit_code=0):
+ exit_code = OutputCapture().assert_outputs(self, tool.main, [main_args], expected_stdout=expected_stdout, expected_stderr=expected_stderr)
+ self.assertEqual(exit_code, expected_exit_code)
+
+ def test_global_help(self):
+ tool = TrivialTool(commands=[TrivialCommand(), UncommonCommand()])
+ expected_common_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
+
+Options:
+ -h, --help show this help message and exit
+
+Common trivial-tool commands:
+ trivial help text
+
+See 'trivial-tool help --all-commands' to list all commands.
+See 'trivial-tool help COMMAND' for more information on a specific command.
+
+"""
+ self._assert_tool_main_outputs(tool, ["tool"], expected_common_commands_help)
+ self._assert_tool_main_outputs(tool, ["tool", "help"], expected_common_commands_help)
+ expected_all_commands_help = """Usage: trivial-tool [options] COMMAND [ARGS]
+
+Options:
+ -h, --help show this help message and exit
+
+All trivial-tool commands:
+ help Display information about this program or its subcommands
+ trivial help text
+ uncommon help text
+
+See 'trivial-tool help --all-commands' to list all commands.
+See 'trivial-tool help COMMAND' for more information on a specific command.
+
+"""
+ self._assert_tool_main_outputs(tool, ["tool", "help", "--all-commands"], expected_all_commands_help)
+ # Test that arguments can be passed before commands as well
+ self._assert_tool_main_outputs(tool, ["tool", "--all-commands", "help"], expected_all_commands_help)
+
+
+ def test_command_help(self):
+ command_with_options = TrivialCommand(options=[make_option("--my_option")], long_help="LONG HELP")
+ tool = TrivialTool(commands=[command_with_options])
+ expected_subcommand_help = "trivial [options] help text\n\nLONG HELP\n\nOptions:\n --my_option=MY_OPTION\n\n"
+ self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/networktransaction.py b/WebKitTools/Scripts/webkitpy/networktransaction.py
new file mode 100644
index 0000000..65ea27d
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/networktransaction.py
@@ -0,0 +1,63 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import time
+
+from mechanize import HTTPError
+from webkitpy.webkit_logging import log
+
+
+class NetworkTimeout(Exception):
+ pass
+
+
+class NetworkTransaction(object):
+ def __init__(self, initial_backoff_seconds=10, grown_factor=1.1, timeout_seconds=5*60*60):
+ self._initial_backoff_seconds = initial_backoff_seconds
+ self._grown_factor = grown_factor
+ self._timeout_seconds = timeout_seconds
+
+ def run(self, request):
+ self._total_sleep = 0
+ self._backoff_seconds = self._initial_backoff_seconds
+ while True:
+ try:
+ return request()
+ except HTTPError, e:
+ self._check_for_timeout()
+ log("Received HTTP status %s from server. Retrying in %s seconds..." % (e.code, self._backoff_seconds))
+ self._sleep()
+
+ def _check_for_timeout(self):
+ if self._total_sleep + self._backoff_seconds > self._timeout_seconds:
+ raise NetworkTimeout()
+
+ def _sleep(self):
+ time.sleep(self._backoff_seconds)
+ self._total_sleep += self._backoff_seconds
+ self._backoff_seconds *= self._grown_factor
diff --git a/WebKitTools/Scripts/webkitpy/networktransaction.pyc b/WebKitTools/Scripts/webkitpy/networktransaction.pyc
new file mode 100644
index 0000000..fb45bcb
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/networktransaction.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/networktransaction_unittest.py b/WebKitTools/Scripts/webkitpy/networktransaction_unittest.py
new file mode 100644
index 0000000..3cffe02
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/networktransaction_unittest.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from mechanize import HTTPError
+from webkitpy.networktransaction import NetworkTransaction, NetworkTimeout
+
+class NetworkTransactionTest(unittest.TestCase):
+ exception = Exception("Test exception")
+
+ def test_success(self):
+ transaction = NetworkTransaction()
+ self.assertEqual(transaction.run(lambda: 42), 42)
+
+ def _raise_exception(self):
+ raise self.exception
+
+ def test_exception(self):
+ transaction = NetworkTransaction()
+ did_process_exception = False
+ did_throw_exception = True
+ try:
+ transaction.run(lambda: self._raise_exception())
+ did_throw_exception = False
+ except Exception, e:
+ did_process_exception = True
+ self.assertEqual(e, self.exception)
+ self.assertTrue(did_throw_exception)
+ self.assertTrue(did_process_exception)
+
+ def _raise_http_error(self):
+ self._run_count += 1
+ if self._run_count < 3:
+ raise HTTPError("http://example.com/", 500, "inteneral server error", None, None)
+ return 42
+
+ def test_retry(self):
+ self._run_count = 0
+ transaction = NetworkTransaction(initial_backoff_seconds=0)
+ self.assertEqual(transaction.run(lambda: self._raise_http_error()), 42)
+ self.assertEqual(self._run_count, 3)
+
+ def test_timeout(self):
+ self._run_count = 0
+ transaction = NetworkTransaction(initial_backoff_seconds=60*60, timeout_seconds=60)
+ did_process_exception = False
+ did_throw_exception = True
+ try:
+ transaction.run(lambda: self._raise_http_error())
+ did_throw_exception = False
+ except NetworkTimeout, e:
+ did_process_exception = True
+ self.assertTrue(did_throw_exception)
+ self.assertTrue(did_process_exception)
diff --git a/WebKitTools/Scripts/webkitpy/outputcapture.py b/WebKitTools/Scripts/webkitpy/outputcapture.py
new file mode 100644
index 0000000..592a669
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/outputcapture.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Class for unittest support. Used for capturing stderr/stdout.
+
+import sys
+from StringIO import StringIO
+
+class OutputCapture(object):
+ def __init__(self):
+ self.saved_outputs = dict()
+
+ def _capture_output_with_name(self, output_name):
+ self.saved_outputs[output_name] = getattr(sys, output_name)
+ setattr(sys, output_name, StringIO())
+
+ def _restore_output_with_name(self, output_name):
+ captured_output = getattr(sys, output_name).getvalue()
+ setattr(sys, output_name, self.saved_outputs[output_name])
+ del self.saved_outputs[output_name]
+ return captured_output
+
+ def capture_output(self):
+ self._capture_output_with_name("stdout")
+ self._capture_output_with_name("stderr")
+
+ def restore_output(self):
+ return (self._restore_output_with_name("stdout"), self._restore_output_with_name("stderr"))
+
+ def assert_outputs(self, testcase, function, args=[], kwargs={}, expected_stdout="", expected_stderr=""):
+ self.capture_output()
+ return_value = function(*args, **kwargs)
+ (stdout_string, stderr_string) = self.restore_output()
+ testcase.assertEqual(stdout_string, expected_stdout)
+ testcase.assertEqual(stderr_string, expected_stderr)
+ # This is a little strange, but I don't know where else to return this information.
+ return return_value
diff --git a/WebKitTools/Scripts/webkitpy/patchcollection.py b/WebKitTools/Scripts/webkitpy/patchcollection.py
new file mode 100644
index 0000000..7e8603c
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/patchcollection.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+class PersistentPatchCollectionDelegate:
+ def collection_name(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def fetch_potential_patch_ids(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def status_server(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def is_terminal_status(self, status):
+ raise NotImplementedError, "subclasses must implement"
+
+
+class PersistentPatchCollection:
+ def __init__(self, delegate):
+ self._delegate = delegate
+ self._name = self._delegate.collection_name()
+ self._status = self._delegate.status_server()
+ self._status_cache = {}
+
+ def _cached_status(self, patch_id):
+ cached = self._status_cache.get(patch_id)
+ if cached:
+ return cached
+ status = self._status.patch_status(self._name, patch_id)
+ if status and self._delegate.is_terminal_status(status):
+ self._status_cache[patch_id] = status
+ return status
+
+ def next(self):
+ patch_ids = self._delegate.fetch_potential_patch_ids()
+ for patch_id in patch_ids:
+ status = self._cached_status(patch_id)
+ if not status or not self._delegate.is_terminal_status(status):
+ return patch_id
diff --git a/WebKitTools/Scripts/webkitpy/patchcollection.pyc b/WebKitTools/Scripts/webkitpy/patchcollection.pyc
new file mode 100644
index 0000000..18058d3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/patchcollection.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/patchcollection_unittest.py b/WebKitTools/Scripts/webkitpy/patchcollection_unittest.py
new file mode 100644
index 0000000..811fed9
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/patchcollection_unittest.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.mock import Mock
+from webkitpy.patchcollection import PersistentPatchCollection, PersistentPatchCollectionDelegate
+
+
+class TestPersistentPatchCollectionDelegate(PersistentPatchCollectionDelegate):
+ def collection_name(self):
+ return "test-collection"
+
+ def fetch_potential_patch_ids(self):
+ return [42, 192, 87]
+
+ def status_server(self):
+ return Mock()
+
+ def is_terminal_status(self, status):
+ return False
+
+
+class PersistentPatchCollectionTest(unittest.TestCase):
+ def test_next(self):
+ collection = PersistentPatchCollection(TestPersistentPatchCollectionDelegate())
+ collection.next()
diff --git a/WebKitTools/Scripts/webkitpy/queueengine.py b/WebKitTools/Scripts/webkitpy/queueengine.py
new file mode 100644
index 0000000..d14177d
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/queueengine.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import time
+import traceback
+
+from datetime import datetime, timedelta
+
+from webkitpy.executive import ScriptError
+from webkitpy.webkit_logging import log, OutputTee
+from webkitpy.statusserver import StatusServer
+
+class QueueEngineDelegate:
+ def queue_log_path(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def work_item_log_path(self, work_item):
+ raise NotImplementedError, "subclasses must implement"
+
+ def begin_work_queue(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def should_continue_work_queue(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def next_work_item(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def should_proceed_with_work_item(self, work_item):
+ # returns (safe_to_proceed, waiting_message, patch)
+ raise NotImplementedError, "subclasses must implement"
+
+ def process_work_item(self, work_item):
+ raise NotImplementedError, "subclasses must implement"
+
+ def handle_unexpected_error(self, work_item, message):
+ raise NotImplementedError, "subclasses must implement"
+
+
+class QueueEngine:
+ def __init__(self, name, delegate):
+ self._name = name
+ self._delegate = delegate
+ self._output_tee = OutputTee()
+
+ log_date_format = "%Y-%m-%d %H:%M:%S"
+ sleep_duration_text = "5 mins"
+ seconds_to_sleep = 300
+ handled_error_code = 2
+
+ # Child processes exit with a special code to the parent queue process can detect the error was handled.
+ @classmethod
+ def exit_after_handled_error(cls, error):
+ log(error)
+ exit(cls.handled_error_code)
+
+ def run(self):
+ self._begin_logging()
+
+ self._delegate.begin_work_queue()
+ while (self._delegate.should_continue_work_queue()):
+ try:
+ self._ensure_work_log_closed()
+ work_item = self._delegate.next_work_item()
+ if not work_item:
+ self._sleep("No work item.")
+ continue
+ if not self._delegate.should_proceed_with_work_item(work_item):
+ self._sleep("Not proceeding with work item.")
+ continue
+
+ # FIXME: Work logs should not depend on bug_id specificaly.
+ # This looks fixed, no?
+ self._open_work_log(work_item)
+ try:
+ self._delegate.process_work_item(work_item)
+ except ScriptError, e:
+ # Use a special exit code to indicate that the error was already
+ # handled in the child process and we should just keep looping.
+ if e.exit_code == self.handled_error_code:
+ continue
+ message = "Unexpected failure when landing patch! Please file a bug against webkit-patch.\n%s" % e.message_with_output()
+ self._delegate.handle_unexpected_error(work_item, message)
+ except KeyboardInterrupt, e:
+ log("\nUser terminated queue.")
+ return 1
+ except Exception, e:
+ traceback.print_exc()
+ # Don't try tell the status bot, in case telling it causes an exception.
+ self._sleep("Exception while preparing queue")
+ # Never reached.
+ self._ensure_work_log_closed()
+
+ def _begin_logging(self):
+ self._queue_log = self._output_tee.add_log(self._delegate.queue_log_path())
+ self._work_log = None
+
+ def _open_work_log(self, work_item):
+ work_item_log_path = self._delegate.work_item_log_path(work_item)
+ self._work_log = self._output_tee.add_log(work_item_log_path)
+
+ def _ensure_work_log_closed(self):
+ # If we still have a bug log open, close it.
+ if self._work_log:
+ self._output_tee.remove_log(self._work_log)
+ self._work_log = None
+
+ @classmethod
+ def _sleep_message(cls, message):
+ wake_time = datetime.now() + timedelta(seconds=cls.seconds_to_sleep)
+ return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(cls.log_date_format), cls.sleep_duration_text)
+
+ @classmethod
+ def _sleep(cls, message):
+ log(cls._sleep_message(message))
+ time.sleep(cls.seconds_to_sleep)
diff --git a/WebKitTools/Scripts/webkitpy/queueengine.pyc b/WebKitTools/Scripts/webkitpy/queueengine.pyc
new file mode 100644
index 0000000..635bb57
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/queueengine.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/queueengine_unittest.py b/WebKitTools/Scripts/webkitpy/queueengine_unittest.py
new file mode 100644
index 0000000..a4036ea
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/queueengine_unittest.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import shutil
+import tempfile
+import unittest
+
+from webkitpy.executive import ScriptError
+from webkitpy.queueengine import QueueEngine, QueueEngineDelegate
+
+class LoggingDelegate(QueueEngineDelegate):
+ def __init__(self, test):
+ self._test = test
+ self._callbacks = []
+ self._run_before = False
+
+ expected_callbacks = [
+ 'queue_log_path',
+ 'begin_work_queue',
+ 'should_continue_work_queue',
+ 'next_work_item',
+ 'should_proceed_with_work_item',
+ 'work_item_log_path',
+ 'process_work_item',
+ 'should_continue_work_queue'
+ ]
+
+ def record(self, method_name):
+ self._callbacks.append(method_name)
+
+ def queue_log_path(self):
+ self.record("queue_log_path")
+ return os.path.join(self._test.temp_dir, "queue_log_path")
+
+ def work_item_log_path(self, work_item):
+ self.record("work_item_log_path")
+ return os.path.join(self._test.temp_dir, "work_log_path", "%s.log" % work_item)
+
+ def begin_work_queue(self):
+ self.record("begin_work_queue")
+
+ def should_continue_work_queue(self):
+ self.record("should_continue_work_queue")
+ if not self._run_before:
+ self._run_before = True
+ return True
+ return False
+
+ def next_work_item(self):
+ self.record("next_work_item")
+ return "work_item"
+
+ def should_proceed_with_work_item(self, work_item):
+ self.record("should_proceed_with_work_item")
+ self._test.assertEquals(work_item, "work_item")
+ fake_patch = { 'bug_id' : 42 }
+ return (True, "waiting_message", fake_patch)
+
+ def process_work_item(self, work_item):
+ self.record("process_work_item")
+ self._test.assertEquals(work_item, "work_item")
+
+ def handle_unexpected_error(self, work_item, message):
+ self.record("handle_unexpected_error")
+ self._test.assertEquals(work_item, "work_item")
+
+
+class ThrowErrorDelegate(LoggingDelegate):
+ def __init__(self, test, error_code):
+ LoggingDelegate.__init__(self, test)
+ self.error_code = error_code
+
+ def process_work_item(self, work_item):
+ self.record("process_work_item")
+ raise ScriptError(exit_code=self.error_code)
+
+
+class NotSafeToProceedDelegate(LoggingDelegate):
+ def should_proceed_with_work_item(self, work_item):
+ self.record("should_proceed_with_work_item")
+ self._test.assertEquals(work_item, "work_item")
+ return False
+
+
+class FastQueueEngine(QueueEngine):
+ def __init__(self, delegate):
+ QueueEngine.__init__(self, "fast-queue", delegate)
+
+ # No sleep for the wicked.
+ seconds_to_sleep = 0
+
+ def _sleep(self, message):
+ pass
+
+
+class QueueEngineTest(unittest.TestCase):
+ def test_trivial(self):
+ delegate = LoggingDelegate(self)
+ work_queue = QueueEngine("trivial-queue", delegate)
+ work_queue.run()
+ self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks)
+ self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "queue_log_path")))
+ self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "work_log_path", "work_item.log")))
+
+ def test_unexpected_error(self):
+ delegate = ThrowErrorDelegate(self, 3)
+ work_queue = QueueEngine("error-queue", delegate)
+ work_queue.run()
+ expected_callbacks = LoggingDelegate.expected_callbacks[:]
+ work_item_index = expected_callbacks.index('process_work_item')
+ # The unexpected error should be handled right after process_work_item starts
+ # but before any other callback. Otherwise callbacks should be normal.
+ expected_callbacks.insert(work_item_index + 1, 'handle_unexpected_error')
+ self.assertEquals(delegate._callbacks, expected_callbacks)
+
+ def test_handled_error(self):
+ delegate = ThrowErrorDelegate(self, QueueEngine.handled_error_code)
+ work_queue = QueueEngine("handled-error-queue", delegate)
+ work_queue.run()
+ self.assertEquals(delegate._callbacks, LoggingDelegate.expected_callbacks)
+
+ def test_not_safe_to_proceed(self):
+ delegate = NotSafeToProceedDelegate(self)
+ work_queue = FastQueueEngine(delegate)
+ work_queue.run()
+ expected_callbacks = LoggingDelegate.expected_callbacks[:]
+ next_work_item_index = expected_callbacks.index('next_work_item')
+ # We slice out the common part of the expected callbacks.
+ # We add 2 here to include should_proceed_with_work_item, which is
+ # a pain to search for directly because it occurs twice.
+ expected_callbacks = expected_callbacks[:next_work_item_index + 2]
+ expected_callbacks.append('should_continue_work_queue')
+ self.assertEquals(delegate._callbacks, expected_callbacks)
+
+ def setUp(self):
+ self.temp_dir = tempfile.mkdtemp(suffix="work_queue_test_logs")
+
+ def tearDown(self):
+ shutil.rmtree(self.temp_dir)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/scm.py b/WebKitTools/Scripts/webkitpy/scm.py
new file mode 100644
index 0000000..743f3fe
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/scm.py
@@ -0,0 +1,513 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Python module for interacting with an SCM system (like SVN or Git)
+
+import os
+import re
+import subprocess
+
+# Import WebKit-specific modules.
+from webkitpy.changelogs import ChangeLog
+from webkitpy.executive import Executive, run_command, ScriptError
+from webkitpy.webkit_logging import error, log
+
+def detect_scm_system(path):
+ if SVN.in_working_directory(path):
+ return SVN(cwd=path)
+
+ if Git.in_working_directory(path):
+ return Git(cwd=path)
+
+ return None
+
+def first_non_empty_line_after_index(lines, index=0):
+ first_non_empty_line = index
+ for line in lines[index:]:
+ if re.match("^\s*$", line):
+ first_non_empty_line += 1
+ else:
+ break
+ return first_non_empty_line
+
+
+class CommitMessage:
+ def __init__(self, message):
+ self.message_lines = message[first_non_empty_line_after_index(message, 0):]
+
+ def body(self, lstrip=False):
+ lines = self.message_lines[first_non_empty_line_after_index(self.message_lines, 1):]
+ if lstrip:
+ lines = [line.lstrip() for line in lines]
+ return "\n".join(lines) + "\n"
+
+ def description(self, lstrip=False, strip_url=False):
+ line = self.message_lines[0]
+ if lstrip:
+ line = line.lstrip()
+ if strip_url:
+ line = re.sub("^(\s*)<.+> ", "\1", line)
+ return line
+
+ def message(self):
+ return "\n".join(self.message_lines) + "\n"
+
+
+class CheckoutNeedsUpdate(ScriptError):
+ def __init__(self, script_args, exit_code, output, cwd):
+ ScriptError.__init__(self, script_args=script_args, exit_code=exit_code, output=output, cwd=cwd)
+
+
+def commit_error_handler(error):
+ if re.search("resource out of date", error.output):
+ raise CheckoutNeedsUpdate(script_args=error.script_args, exit_code=error.exit_code, output=error.output, cwd=error.cwd)
+ Executive.default_error_handler(error)
+
+
+class SCM:
+ def __init__(self, cwd, dryrun=False):
+ self.cwd = cwd
+ self.checkout_root = self.find_checkout_root(self.cwd)
+ self.dryrun = dryrun
+
+ def scripts_directory(self):
+ return os.path.join(self.checkout_root, "WebKitTools", "Scripts")
+
+ def script_path(self, script_name):
+ return os.path.join(self.scripts_directory(), script_name)
+
+ def ensure_clean_working_directory(self, force_clean):
+ if not force_clean and not self.working_directory_is_clean():
+ print run_command(self.status_command(), error_handler=Executive.ignore_error)
+ raise ScriptError(message="Working directory has modifications, pass --force-clean or --no-clean to continue.")
+
+ log("Cleaning working directory")
+ self.clean_working_directory()
+
+ def ensure_no_local_commits(self, force):
+ if not self.supports_local_commits():
+ return
+ commits = self.local_commits()
+ if not len(commits):
+ return
+ if not force:
+ error("Working directory has local commits, pass --force-clean to continue.")
+ self.discard_local_commits()
+
+ def apply_patch(self, patch, force=False):
+ # It's possible that the patch was not made from the root directory.
+ # We should detect and handle that case.
+ # FIXME: scm.py should not deal with fetching Attachment data. Attachment should just have a .data() accessor.
+ curl_process = subprocess.Popen(['curl', '--location', '--silent', '--show-error', patch.url()], stdout=subprocess.PIPE)
+ args = [self.script_path('svn-apply')]
+ if patch.reviewer():
+ args += ['--reviewer', patch.reviewer().full_name]
+ if force:
+ args.append('--force')
+
+ run_command(args, input=curl_process.stdout)
+
+ def run_status_and_extract_filenames(self, status_command, status_regexp):
+ filenames = []
+ for line in run_command(status_command).splitlines():
+ match = re.search(status_regexp, line)
+ if not match:
+ continue
+ # status = match.group('status')
+ filename = match.group('filename')
+ filenames.append(filename)
+ return filenames
+
+ def strip_r_from_svn_revision(self, svn_revision):
+ match = re.match("^r(?P<svn_revision>\d+)", svn_revision)
+ if (match):
+ return match.group('svn_revision')
+ return svn_revision
+
+ def svn_revision_from_commit_text(self, commit_text):
+ match = re.search(self.commit_success_regexp(), commit_text, re.MULTILINE)
+ return match.group('svn_revision')
+
+ # ChangeLog-specific code doesn't really belong in scm.py, but this function is very useful.
+ def modified_changelogs(self):
+ changelog_paths = []
+ paths = self.changed_files()
+ for path in paths:
+ if os.path.basename(path) == "ChangeLog":
+ changelog_paths.append(path)
+ return changelog_paths
+
+ # FIXME: Requires unit test
+ # FIXME: commit_message_for_this_commit and modified_changelogs don't
+ # really belong here. We should have a separate module for
+ # handling ChangeLogs.
+ def commit_message_for_this_commit(self):
+ changelog_paths = self.modified_changelogs()
+ if not len(changelog_paths):
+ raise ScriptError(message="Found no modified ChangeLogs, cannot create a commit message.\n"
+ "All changes require a ChangeLog. See:\n"
+ "http://webkit.org/coding/contributing.html")
+
+ changelog_messages = []
+ for changelog_path in changelog_paths:
+ log("Parsing ChangeLog: %s" % changelog_path)
+ changelog_entry = ChangeLog(changelog_path).latest_entry()
+ if not changelog_entry:
+ raise ScriptError(message="Failed to parse ChangeLog: " + os.path.abspath(changelog_path))
+ changelog_messages.append(changelog_entry)
+
+ # FIXME: We should sort and label the ChangeLog messages like commit-log-editor does.
+ return CommitMessage("".join(changelog_messages).splitlines())
+
+ @staticmethod
+ def in_working_directory(path):
+ raise NotImplementedError, "subclasses must implement"
+
+ @staticmethod
+ def find_checkout_root(path):
+ raise NotImplementedError, "subclasses must implement"
+
+ @staticmethod
+ def commit_success_regexp():
+ raise NotImplementedError, "subclasses must implement"
+
+ def working_directory_is_clean(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def clean_working_directory(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def status_command(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def changed_files(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def display_name(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def create_patch(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ def diff_for_revision(self, revision):
+ raise NotImplementedError, "subclasses must implement"
+
+ def apply_reverse_diff(self, revision):
+ raise NotImplementedError, "subclasses must implement"
+
+ def revert_files(self, file_paths):
+ raise NotImplementedError, "subclasses must implement"
+
+ def commit_with_message(self, message):
+ raise NotImplementedError, "subclasses must implement"
+
+ def svn_commit_log(self, svn_revision):
+ raise NotImplementedError, "subclasses must implement"
+
+ def last_svn_commit_log(self):
+ raise NotImplementedError, "subclasses must implement"
+
+ # Subclasses must indicate if they support local commits,
+ # but the SCM baseclass will only call local_commits methods when this is true.
+ @staticmethod
+ def supports_local_commits():
+ raise NotImplementedError, "subclasses must implement"
+
+ def create_patch_from_local_commit(self, commit_id):
+ error("Your source control manager does not support creating a patch from a local commit.")
+
+ def create_patch_since_local_commit(self, commit_id):
+ error("Your source control manager does not support creating a patch from a local commit.")
+
+ def commit_locally_with_message(self, message):
+ error("Your source control manager does not support local commits.")
+
+ def discard_local_commits(self):
+ pass
+
+ def local_commits(self):
+ return []
+
+
+class SVN(SCM):
+ def __init__(self, cwd, dryrun=False):
+ SCM.__init__(self, cwd, dryrun)
+ self.cached_version = None
+
+ @staticmethod
+ def in_working_directory(path):
+ return os.path.isdir(os.path.join(path, '.svn'))
+
+ @classmethod
+ def find_uuid(cls, path):
+ if not cls.in_working_directory(path):
+ return None
+ return cls.value_from_svn_info(path, 'Repository UUID')
+
+ @classmethod
+ def value_from_svn_info(cls, path, field_name):
+ svn_info_args = ['svn', 'info', path]
+ info_output = run_command(svn_info_args).rstrip()
+ match = re.search("^%s: (?P<value>.+)$" % field_name, info_output, re.MULTILINE)
+ if not match:
+ raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name)
+ return match.group('value')
+
+ @staticmethod
+ def find_checkout_root(path):
+ uuid = SVN.find_uuid(path)
+ # If |path| is not in a working directory, we're supposed to return |path|.
+ if not uuid:
+ return path
+ # Search up the directory hierarchy until we find a different UUID.
+ last_path = None
+ while True:
+ if uuid != SVN.find_uuid(path):
+ return last_path
+ last_path = path
+ (path, last_component) = os.path.split(path)
+ if last_path == path:
+ return None
+
+ @staticmethod
+ def commit_success_regexp():
+ return "^Committed revision (?P<svn_revision>\d+)\.$"
+
+ def svn_version(self):
+ if not self.cached_version:
+ self.cached_version = run_command(['svn', '--version', '--quiet'])
+
+ return self.cached_version
+
+ def working_directory_is_clean(self):
+ return run_command(['svn', 'diff']) == ""
+
+ def clean_working_directory(self):
+ run_command(['svn', 'revert', '-R', '.'])
+
+ def status_command(self):
+ return ['svn', 'status']
+
+ def changed_files(self):
+ if self.svn_version() > "1.6":
+ status_regexp = "^(?P<status>[ACDMR]).{6} (?P<filename>.+)$"
+ else:
+ status_regexp = "^(?P<status>[ACDMR]).{5} (?P<filename>.+)$"
+ return self.run_status_and_extract_filenames(self.status_command(), status_regexp)
+
+ @staticmethod
+ def supports_local_commits():
+ return False
+
+ def display_name(self):
+ return "svn"
+
+ def create_patch(self):
+ return run_command(self.script_path("svn-create-patch"), cwd=self.checkout_root, return_stderr=False)
+
+ def diff_for_revision(self, revision):
+ return run_command(['svn', 'diff', '-c', str(revision)])
+
+ def _repository_url(self):
+ return self.value_from_svn_info(self.checkout_root, 'URL')
+
+ def apply_reverse_diff(self, revision):
+ # '-c -revision' applies the inverse diff of 'revision'
+ svn_merge_args = ['svn', 'merge', '--non-interactive', '-c', '-%s' % revision, self._repository_url()]
+ log("WARNING: svn merge has been known to take more than 10 minutes to complete. It is recommended you use git for rollouts.")
+ log("Running '%s'" % " ".join(svn_merge_args))
+ run_command(svn_merge_args)
+
+ def revert_files(self, file_paths):
+ run_command(['svn', 'revert'] + file_paths)
+
+ def commit_with_message(self, message):
+ if self.dryrun:
+ # Return a string which looks like a commit so that things which parse this output will succeed.
+ return "Dry run, no commit.\nCommitted revision 0."
+ return run_command(['svn', 'commit', '-m', message], error_handler=commit_error_handler)
+
+ def svn_commit_log(self, svn_revision):
+ svn_revision = self.strip_r_from_svn_revision(str(svn_revision))
+ return run_command(['svn', 'log', '--non-interactive', '--revision', svn_revision]);
+
+ def last_svn_commit_log(self):
+ # BASE is the checkout revision, HEAD is the remote repository revision
+ # http://svnbook.red-bean.com/en/1.0/ch03s03.html
+ return self.svn_commit_log('BASE')
+
+# All git-specific logic should go here.
+class Git(SCM):
+ def __init__(self, cwd, dryrun=False):
+ SCM.__init__(self, cwd, dryrun)
+
+ @classmethod
+ def in_working_directory(cls, path):
+ return run_command(['git', 'rev-parse', '--is-inside-work-tree'], cwd=path, error_handler=Executive.ignore_error).rstrip() == "true"
+
+ @classmethod
+ def find_checkout_root(cls, path):
+ # "git rev-parse --show-cdup" would be another way to get to the root
+ (checkout_root, dot_git) = os.path.split(run_command(['git', 'rev-parse', '--git-dir'], cwd=path))
+ # If we were using 2.6 # checkout_root = os.path.relpath(checkout_root, path)
+ if not os.path.isabs(checkout_root): # Sometimes git returns relative paths
+ checkout_root = os.path.join(path, checkout_root)
+ return checkout_root
+
+ @staticmethod
+ def commit_success_regexp():
+ return "^Committed r(?P<svn_revision>\d+)$"
+
+
+ def discard_local_commits(self):
+ run_command(['git', 'reset', '--hard', 'trunk'])
+
+ def local_commits(self):
+ return run_command(['git', 'log', '--pretty=oneline', 'HEAD...trunk']).splitlines()
+
+ def rebase_in_progress(self):
+ return os.path.exists(os.path.join(self.checkout_root, '.git/rebase-apply'))
+
+ def working_directory_is_clean(self):
+ return run_command(['git', 'diff-index', 'HEAD']) == ""
+
+ def clean_working_directory(self):
+ # Could run git clean here too, but that wouldn't match working_directory_is_clean
+ run_command(['git', 'reset', '--hard', 'HEAD'])
+ # Aborting rebase even though this does not match working_directory_is_clean
+ if self.rebase_in_progress():
+ run_command(['git', 'rebase', '--abort'])
+
+ def status_command(self):
+ return ['git', 'status']
+
+ def changed_files(self):
+ status_command = ['git', 'diff', '-r', '--name-status', '-C', '-M', 'HEAD']
+ status_regexp = '^(?P<status>[ADM])\t(?P<filename>.+)$'
+ return self.run_status_and_extract_filenames(status_command, status_regexp)
+
+ @staticmethod
+ def supports_local_commits():
+ return True
+
+ def display_name(self):
+ return "git"
+
+ def create_patch(self):
+ return run_command(['git', 'diff', '--binary', 'HEAD'])
+
+ @classmethod
+ def git_commit_from_svn_revision(cls, revision):
+ # git svn find-rev always exits 0, even when the revision is not found.
+ return run_command(['git', 'svn', 'find-rev', 'r%s' % revision]).rstrip()
+
+ def diff_for_revision(self, revision):
+ git_commit = self.git_commit_from_svn_revision(revision)
+ return self.create_patch_from_local_commit(git_commit)
+
+ def apply_reverse_diff(self, revision):
+ # Assume the revision is an svn revision.
+ git_commit = self.git_commit_from_svn_revision(revision)
+ if not git_commit:
+ raise ScriptError(message='Failed to find git commit for revision %s, git svn log output: "%s"' % (revision, git_commit))
+
+ # I think this will always fail due to ChangeLogs.
+ # FIXME: We need to detec specific failure conditions and handle them.
+ run_command(['git', 'revert', '--no-commit', git_commit], error_handler=Executive.ignore_error)
+
+ # Fix any ChangeLogs if necessary.
+ changelog_paths = self.modified_changelogs()
+ if len(changelog_paths):
+ run_command([self.script_path('resolve-ChangeLogs')] + changelog_paths)
+
+ def revert_files(self, file_paths):
+ run_command(['git', 'checkout', 'HEAD'] + file_paths)
+
+ def commit_with_message(self, message):
+ self.commit_locally_with_message(message)
+ return self.push_local_commits_to_server()
+
+ def svn_commit_log(self, svn_revision):
+ svn_revision = self.strip_r_from_svn_revision(svn_revision)
+ return run_command(['git', 'svn', 'log', '-r', svn_revision])
+
+ def last_svn_commit_log(self):
+ return run_command(['git', 'svn', 'log', '--limit=1'])
+
+ # Git-specific methods:
+
+ def create_patch_from_local_commit(self, commit_id):
+ return run_command(['git', 'diff', '--binary', commit_id + "^.." + commit_id])
+
+ def create_patch_since_local_commit(self, commit_id):
+ return run_command(['git', 'diff', '--binary', commit_id])
+
+ def commit_locally_with_message(self, message):
+ run_command(['git', 'commit', '--all', '-F', '-'], input=message)
+
+ def push_local_commits_to_server(self):
+ if self.dryrun:
+ # Return a string which looks like a commit so that things which parse this output will succeed.
+ return "Dry run, no remote commit.\nCommitted r0"
+ return run_command(['git', 'svn', 'dcommit'], error_handler=commit_error_handler)
+
+ # This function supports the following argument formats:
+ # no args : rev-list trunk..HEAD
+ # A..B : rev-list A..B
+ # A...B : error!
+ # A B : [A, B] (different from git diff, which would use "rev-list A..B")
+ def commit_ids_from_commitish_arguments(self, args):
+ if not len(args):
+ # FIXME: trunk is not always the remote branch name, need a way to detect the name.
+ args.append('trunk..HEAD')
+
+ commit_ids = []
+ for commitish in args:
+ if '...' in commitish:
+ raise ScriptError(message="'...' is not supported (found in '%s'). Did you mean '..'?" % commitish)
+ elif '..' in commitish:
+ commit_ids += reversed(run_command(['git', 'rev-list', commitish]).splitlines())
+ else:
+ # Turn single commits or branch or tag names into commit ids.
+ commit_ids += run_command(['git', 'rev-parse', '--revs-only', commitish]).splitlines()
+ return commit_ids
+
+ def commit_message_for_local_commit(self, commit_id):
+ commit_lines = run_command(['git', 'cat-file', 'commit', commit_id]).splitlines()
+
+ # Skip the git headers.
+ first_line_after_headers = 0
+ for line in commit_lines:
+ first_line_after_headers += 1
+ if line == "":
+ break
+ return CommitMessage(commit_lines[first_line_after_headers:])
+
+ def files_changed_summary_for_commit(self, commit_id):
+ return run_command(['git', 'diff-tree', '--shortstat', '--no-commit-id', commit_id])
diff --git a/WebKitTools/Scripts/webkitpy/scm.pyc b/WebKitTools/Scripts/webkitpy/scm.pyc
new file mode 100644
index 0000000..520f611
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/scm.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/scm_unittest.py b/WebKitTools/Scripts/webkitpy/scm_unittest.py
new file mode 100644
index 0000000..73faf40
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/scm_unittest.py
@@ -0,0 +1,595 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import base64
+import os
+import os.path
+import re
+import stat
+import subprocess
+import tempfile
+import unittest
+import urllib
+
+from datetime import date
+from webkitpy.executive import Executive, run_command, ScriptError
+from webkitpy.scm import detect_scm_system, SCM, CheckoutNeedsUpdate, commit_error_handler
+from webkitpy.bugzilla import Attachment # FIXME: This should not be needed
+
+# Eventually we will want to write tests which work for both scms. (like update_webkit, changed_files, etc.)
+# Perhaps through some SCMTest base-class which both SVNTest and GitTest inherit from.
+
+# FIXME: This should be unified into one of the executive.py commands!
+def run_silent(args, cwd=None):
+ process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
+ process.communicate() # ignore output
+ exit_code = process.wait()
+ if exit_code:
+ raise ScriptError('Failed to run "%s" exit_code: %d cwd: %s' % (args, exit_code, cwd))
+
+def write_into_file_at_path(file_path, contents):
+ file = open(file_path, 'w')
+ file.write(contents)
+ file.close()
+
+def read_from_path(file_path):
+ file = open(file_path, 'r')
+ contents = file.read()
+ file.close()
+ return contents
+
+# Exists to share svn repository creation code between the git and svn tests
+class SVNTestRepository:
+ @staticmethod
+ def _setup_test_commits(test_object):
+ # Add some test commits
+ os.chdir(test_object.svn_checkout_path)
+ test_file = open('test_file', 'w')
+ test_file.write("test1")
+ test_file.flush()
+
+ run_command(['svn', 'add', 'test_file'])
+ run_command(['svn', 'commit', '--quiet', '--message', 'initial commit'])
+
+ test_file.write("test2")
+ test_file.flush()
+
+ run_command(['svn', 'commit', '--quiet', '--message', 'second commit'])
+
+ test_file.write("test3\n")
+ test_file.flush()
+
+ run_command(['svn', 'commit', '--quiet', '--message', 'third commit'])
+
+ test_file.write("test4\n")
+ test_file.close()
+
+ run_command(['svn', 'commit', '--quiet', '--message', 'fourth commit'])
+
+ # svn does not seem to update after commit as I would expect.
+ run_command(['svn', 'update'])
+
+ @classmethod
+ def setup(cls, test_object):
+ # Create an test SVN repository
+ test_object.svn_repo_path = tempfile.mkdtemp(suffix="svn_test_repo")
+ test_object.svn_repo_url = "file://%s" % test_object.svn_repo_path # Not sure this will work on windows
+ # git svn complains if we don't pass --pre-1.5-compatible, not sure why:
+ # Expected FS format '2'; found format '3' at /usr/local/libexec/git-core//git-svn line 1477
+ run_command(['svnadmin', 'create', '--pre-1.5-compatible', test_object.svn_repo_path])
+
+ # Create a test svn checkout
+ test_object.svn_checkout_path = tempfile.mkdtemp(suffix="svn_test_checkout")
+ run_command(['svn', 'checkout', '--quiet', test_object.svn_repo_url, test_object.svn_checkout_path])
+
+ cls._setup_test_commits(test_object)
+
+ @classmethod
+ def tear_down(cls, test_object):
+ run_command(['rm', '-rf', test_object.svn_repo_path])
+ run_command(['rm', '-rf', test_object.svn_checkout_path])
+
+# For testing the SCM baseclass directly.
+class SCMClassTests(unittest.TestCase):
+ def setUp(self):
+ self.dev_null = open(os.devnull, "w") # Used to make our Popen calls quiet.
+
+ def tearDown(self):
+ self.dev_null.close()
+
+ def test_run_command_with_pipe(self):
+ input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null)
+ self.assertEqual(run_command(['grep', 'bar'], input=input_process.stdout), "bar\n")
+
+ # Test the non-pipe case too:
+ self.assertEqual(run_command(['grep', 'bar'], input="foo\nbar"), "bar\n")
+
+ command_returns_non_zero = ['/bin/sh', '--invalid-option']
+ # Test when the input pipe process fails.
+ input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null)
+ self.assertTrue(input_process.poll() != 0)
+ self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout)
+
+ # Test when the run_command process fails.
+ input_process = subprocess.Popen(['echo', 'foo\nbar'], stdout=subprocess.PIPE, stderr=self.dev_null) # grep shows usage and calls exit(2) when called w/o arguments.
+ self.assertRaises(ScriptError, run_command, command_returns_non_zero, input=input_process.stdout)
+
+ def test_error_handlers(self):
+ git_failure_message="Merge conflict during commit: Your file or directory 'WebCore/ChangeLog' is probably out-of-date: resource out of date; try updating at /usr/local/libexec/git-core//git-svn line 469"
+ svn_failure_message="""svn: Commit failed (details follow):
+svn: File or directory 'ChangeLog' is out of date; try updating
+svn: resource out of date; try updating
+"""
+ command_does_not_exist = ['does_not_exist', 'invalid_option']
+ self.assertRaises(OSError, run_command, command_does_not_exist)
+ self.assertRaises(OSError, run_command, command_does_not_exist, error_handler=Executive.ignore_error)
+
+ command_returns_non_zero = ['/bin/sh', '--invalid-option']
+ self.assertRaises(ScriptError, run_command, command_returns_non_zero)
+ # Check if returns error text:
+ self.assertTrue(run_command(command_returns_non_zero, error_handler=Executive.ignore_error))
+
+ self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=git_failure_message))
+ self.assertRaises(CheckoutNeedsUpdate, commit_error_handler, ScriptError(output=svn_failure_message))
+ self.assertRaises(ScriptError, commit_error_handler, ScriptError(output='blah blah blah'))
+
+
+# GitTest and SVNTest inherit from this so any test_ methods here will be run once for this class and then once for each subclass.
+class SCMTest(unittest.TestCase):
+ def _create_patch(self, patch_contents):
+ patch_path = os.path.join(self.svn_checkout_path, 'patch.diff')
+ write_into_file_at_path(patch_path, patch_contents)
+ patch = {}
+ patch['reviewer'] = 'Joe Cool'
+ patch['bug_id'] = '12345'
+ patch['url'] = 'file://%s' % urllib.pathname2url(patch_path)
+ return Attachment(patch, None) # FIXME: This is a hack, scm.py shouldn't be fetching attachment data.
+
+ def _setup_webkittools_scripts_symlink(self, local_scm):
+ webkit_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__)))
+ webkit_scripts_directory = webkit_scm.scripts_directory()
+ local_scripts_directory = local_scm.scripts_directory()
+ os.mkdir(os.path.dirname(local_scripts_directory))
+ os.symlink(webkit_scripts_directory, local_scripts_directory)
+
+ # Tests which both GitTest and SVNTest should run.
+ # FIXME: There must be a simpler way to add these w/o adding a wrapper method to both subclasses
+ def _shared_test_commit_with_message(self):
+ write_into_file_at_path('test_file', 'more test content')
+ commit_text = self.scm.commit_with_message('another test commit')
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '5')
+
+ self.scm.dryrun = True
+ write_into_file_at_path('test_file', 'still more test content')
+ commit_text = self.scm.commit_with_message('yet another test commit')
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '0')
+
+ def _shared_test_reverse_diff(self):
+ self._setup_webkittools_scripts_symlink(self.scm) # Git's apply_reverse_diff uses resolve-ChangeLogs
+ # Only test the simple case, as any other will end up with conflict markers.
+ self.scm.apply_reverse_diff('4')
+ self.assertEqual(read_from_path('test_file'), "test1test2test3\n")
+
+ def _shared_test_diff_for_revision(self):
+ # Patch formats are slightly different between svn and git, so just regexp for things we know should be there.
+ r3_patch = self.scm.diff_for_revision(3)
+ self.assertTrue(re.search('test3', r3_patch))
+ self.assertFalse(re.search('test4', r3_patch))
+ self.assertTrue(re.search('test2', r3_patch))
+ self.assertTrue(re.search('test2', self.scm.diff_for_revision(2)))
+
+ def _shared_test_svn_apply_git_patch(self):
+ self._setup_webkittools_scripts_symlink(self.scm)
+ git_binary_addition = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
+new file mode 100644
+index 0000000000000000000000000000000000000000..64a9532e7794fcd791f6f12157406d90
+60151690
+GIT binary patch
+literal 512
+zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
+zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
+zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
+zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
+zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
+zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
+zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
+z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
+z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
+ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
+
+literal 0
+HcmV?d00001
+
+"""
+ self.scm.apply_patch(self._create_patch(git_binary_addition))
+ added = read_from_path('fizzbuzz7.gif')
+ self.assertEqual(512, len(added))
+ self.assertTrue(added.startswith('GIF89a'))
+ self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
+
+ # The file already exists.
+ self.assertRaises(ScriptError, self.scm.apply_patch, self._create_patch(git_binary_addition))
+
+ git_binary_modification = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
+index 64a9532e7794fcd791f6f12157406d9060151690..323fae03f4606ea9991df8befbb2fca7
+GIT binary patch
+literal 7
+OcmYex&reD$;sO8*F9L)B
+
+literal 512
+zcmZ?wbhEHbRAx|MU|?iW{Kxc~?KofD;ckY;H+&5HnHl!!GQMD7h+sU{_)e9f^V3c?
+zhJP##HdZC#4K}7F68@!1jfWQg2daCm-gs#3|JREDT>c+pG4L<_2;w##WMO#ysPPap
+zLqpAf1OE938xAsSp4!5f-o><?VKe(#0jEcwfHGF4%M1^kRs14oVBp2ZEL{E1N<-zJ
+zsfLmOtKta;2_;2c#^S1-8cf<nb!QnGl>c!Xe6RXvrEtAWBvSDTgTO1j3vA31Puw!A
+zs(87q)j_mVDTqBo-P+03-P5mHCEnJ+x}YdCuS7#bCCyePUe(ynK+|4b-3qK)T?Z&)
+zYG+`tl4h?GZv_$t82}X4*DTE|$;{DEiPyF@)U-1+FaX++T9H{&%cag`W1|zVP@`%b
+zqiSkp6{BTpWTkCr!=<C6Q=?#~R8^JfrliAF6Q^gV9Iup8RqCXqqhqC`qsyhk<-nlB
+z00f{QZvfK&|Nm#oZ0TQl`Yr$BIa6A@16O26ud7H<QM=xl`toLKnz-3h@9c9q&wm|X
+z{89I|WPyD!*M?gv?q`;L=2YFeXrJQNti4?}s!zFo=5CzeBxC69xA<zrjP<wUcCRh4
+ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
+
+"""
+ self.scm.apply_patch(self._create_patch(git_binary_modification))
+ modified = read_from_path('fizzbuzz7.gif')
+ self.assertEqual('foobar\n', modified)
+ self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
+
+ # Applying the same modification should fail.
+ self.assertRaises(ScriptError, self.scm.apply_patch, self._create_patch(git_binary_modification))
+
+ git_binary_deletion = """diff --git a/fizzbuzz7.gif b/fizzbuzz7.gif
+deleted file mode 100644
+index 323fae0..0000000
+GIT binary patch
+literal 0
+HcmV?d00001
+
+literal 7
+OcmYex&reD$;sO8*F9L)B
+
+"""
+ self.scm.apply_patch(self._create_patch(git_binary_deletion))
+ self.assertFalse(os.path.exists('fizzbuzz7.gif'))
+ self.assertFalse('fizzbuzz7.gif' in self.scm.changed_files())
+
+ # Cannot delete again.
+ self.assertRaises(ScriptError, self.scm.apply_patch, self._create_patch(git_binary_deletion))
+
+
+class SVNTest(SCMTest):
+
+ @staticmethod
+ def _set_date_and_reviewer(changelog_entry):
+ # Joe Cool matches the reviewer set in SCMTest._create_patch
+ changelog_entry = changelog_entry.replace('REVIEWER_HERE', 'Joe Cool')
+ # svn-apply will update ChangeLog entries with today's date.
+ return changelog_entry.replace('DATE_HERE', date.today().isoformat())
+
+ def test_svn_apply(self):
+ first_entry = """2009-10-26 Eric Seidel <eric@webkit.org>
+
+ Reviewed by Foo Bar.
+
+ Most awesome change ever.
+
+ * scm_unittest.py:
+"""
+ intermediate_entry = """2009-10-27 Eric Seidel <eric@webkit.org>
+
+ Reviewed by Baz Bar.
+
+ A more awesomer change yet!
+
+ * scm_unittest.py:
+"""
+ one_line_overlap_patch = """Index: ChangeLog
+===================================================================
+--- ChangeLog (revision 5)
++++ ChangeLog (working copy)
+@@ -1,5 +1,13 @@
+ 2009-10-26 Eric Seidel <eric@webkit.org>
+
++ Reviewed by NOBODY (OOPS!).
++
++ Second most awsome change ever.
++
++ * scm_unittest.py:
++
++2009-10-26 Eric Seidel <eric@webkit.org>
++
+ Reviewed by Foo Bar.
+
+ Most awesome change ever.
+"""
+ one_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
+
+ Reviewed by REVIEWER_HERE.
+
+ Second most awsome change ever.
+
+ * scm_unittest.py:
+"""
+ two_line_overlap_patch = """Index: ChangeLog
+===================================================================
+--- ChangeLog (revision 5)
++++ ChangeLog (working copy)
+@@ -2,6 +2,14 @@
+
+ Reviewed by Foo Bar.
+
++ Second most awsome change ever.
++
++ * scm_unittest.py:
++
++2009-10-26 Eric Seidel <eric@webkit.org>
++
++ Reviewed by Foo Bar.
++
+ Most awesome change ever.
+
+ * scm_unittest.py:
+"""
+ two_line_overlap_entry = """DATE_HERE Eric Seidel <eric@webkit.org>
+
+ Reviewed by Foo Bar.
+
+ Second most awsome change ever.
+
+ * scm_unittest.py:
+"""
+ write_into_file_at_path('ChangeLog', first_entry)
+ run_command(['svn', 'add', 'ChangeLog'])
+ run_command(['svn', 'commit', '--quiet', '--message', 'ChangeLog commit'])
+
+ # Patch files were created against just 'first_entry'.
+ # Add a second commit to make svn-apply have to apply the patches with fuzz.
+ changelog_contents = "%s\n%s" % (intermediate_entry, first_entry)
+ write_into_file_at_path('ChangeLog', changelog_contents)
+ run_command(['svn', 'commit', '--quiet', '--message', 'Intermediate commit'])
+
+ self._setup_webkittools_scripts_symlink(self.scm)
+ self.scm.apply_patch(self._create_patch(one_line_overlap_patch))
+ expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(one_line_overlap_entry), changelog_contents)
+ self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
+
+ self.scm.revert_files(['ChangeLog'])
+ self.scm.apply_patch(self._create_patch(two_line_overlap_patch))
+ expected_changelog_contents = "%s\n%s" % (self._set_date_and_reviewer(two_line_overlap_entry), changelog_contents)
+ self.assertEquals(read_from_path('ChangeLog'), expected_changelog_contents)
+
+ def setUp(self):
+ SVNTestRepository.setup(self)
+ os.chdir(self.svn_checkout_path)
+ self.scm = detect_scm_system(self.svn_checkout_path)
+
+ def tearDown(self):
+ SVNTestRepository.tear_down(self)
+
+ def test_create_patch_is_full_patch(self):
+ test_dir_path = os.path.join(self.svn_checkout_path, 'test_dir')
+ os.mkdir(test_dir_path)
+ test_file_path = os.path.join(test_dir_path, 'test_file2')
+ write_into_file_at_path(test_file_path, 'test content')
+ run_command(['svn', 'add', 'test_dir'])
+
+ # create_patch depends on 'svn-create-patch', so make a dummy version.
+ scripts_path = os.path.join(self.svn_checkout_path, 'WebKitTools', 'Scripts')
+ os.makedirs(scripts_path)
+ create_patch_path = os.path.join(scripts_path, 'svn-create-patch')
+ write_into_file_at_path(create_patch_path, '#!/bin/sh\necho $PWD') # We could pass -n to prevent the \n, but not all echo accept -n.
+ os.chmod(create_patch_path, stat.S_IXUSR | stat.S_IRUSR)
+
+ # Change into our test directory and run the create_patch command.
+ os.chdir(test_dir_path)
+ scm = detect_scm_system(test_dir_path)
+ self.assertEqual(scm.checkout_root, self.svn_checkout_path) # Sanity check that detection worked right.
+ patch_contents = scm.create_patch()
+ # Our fake 'svn-create-patch' returns $PWD instead of a patch, check that it was executed from the root of the repo.
+ self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents) # Add a \n because echo adds a \n.
+
+ def test_detection(self):
+ scm = detect_scm_system(self.svn_checkout_path)
+ self.assertEqual(scm.display_name(), "svn")
+ self.assertEqual(scm.supports_local_commits(), False)
+
+ def test_apply_small_binary_patch(self):
+ patch_contents = """Index: test_file.swf
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = application/octet-stream
+
+Property changes on: test_file.swf
+___________________________________________________________________
+Name: svn:mime-type
+ + application/octet-stream
+
+
+Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
+"""
+ expected_contents = base64.b64decode("Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==")
+ self._setup_webkittools_scripts_symlink(self.scm)
+ patch_file = self._create_patch(patch_contents)
+ self.scm.apply_patch(patch_file)
+ actual_contents = read_from_path("test_file.swf")
+ self.assertEqual(actual_contents, expected_contents)
+
+ def test_apply_svn_patch(self):
+ scm = detect_scm_system(self.svn_checkout_path)
+ patch = self._create_patch(run_command(['svn', 'diff', '-r4:3']))
+ self._setup_webkittools_scripts_symlink(scm)
+ scm.apply_patch(patch)
+
+ def test_apply_svn_patch_force(self):
+ scm = detect_scm_system(self.svn_checkout_path)
+ patch = self._create_patch(run_command(['svn', 'diff', '-r2:4']))
+ self._setup_webkittools_scripts_symlink(scm)
+ self.assertRaises(ScriptError, scm.apply_patch, patch, force=True)
+
+ def test_commit_logs(self):
+ # Commits have dates and usernames in them, so we can't just direct compare.
+ self.assertTrue(re.search('fourth commit', self.scm.last_svn_commit_log()))
+ self.assertTrue(re.search('second commit', self.scm.svn_commit_log(2)))
+
+ def test_commit_text_parsing(self):
+ self._shared_test_commit_with_message()
+
+ def test_reverse_diff(self):
+ self._shared_test_reverse_diff()
+
+ def test_diff_for_revision(self):
+ self._shared_test_diff_for_revision()
+
+ def test_svn_apply_git_patch(self):
+ self._shared_test_svn_apply_git_patch()
+
+class GitTest(SCMTest):
+
+ def _setup_git_clone_of_svn_repository(self):
+ self.git_checkout_path = tempfile.mkdtemp(suffix="git_test_checkout")
+ # --quiet doesn't make git svn silent, so we use run_silent to redirect output
+ run_silent(['git', 'svn', '--quiet', 'clone', self.svn_repo_url, self.git_checkout_path])
+
+ def _tear_down_git_clone_of_svn_repository(self):
+ run_command(['rm', '-rf', self.git_checkout_path])
+
+ def setUp(self):
+ SVNTestRepository.setup(self)
+ self._setup_git_clone_of_svn_repository()
+ os.chdir(self.git_checkout_path)
+ self.scm = detect_scm_system(self.git_checkout_path)
+
+ def tearDown(self):
+ SVNTestRepository.tear_down(self)
+ self._tear_down_git_clone_of_svn_repository()
+
+ def test_detection(self):
+ scm = detect_scm_system(self.git_checkout_path)
+ self.assertEqual(scm.display_name(), "git")
+ self.assertEqual(scm.supports_local_commits(), True)
+
+ def test_rebase_in_progress(self):
+ svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
+ write_into_file_at_path(svn_test_file, "svn_checkout")
+ run_command(['svn', 'commit', '--message', 'commit to conflict with git commit'], cwd=self.svn_checkout_path)
+
+ git_test_file = os.path.join(self.git_checkout_path, 'test_file')
+ write_into_file_at_path(git_test_file, "git_checkout")
+ run_command(['git', 'commit', '-a', '-m', 'commit to be thrown away by rebase abort'])
+
+ # --quiet doesn't make git svn silent, so use run_silent to redirect output
+ self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase.
+
+ scm = detect_scm_system(self.git_checkout_path)
+ self.assertTrue(scm.rebase_in_progress())
+
+ # Make sure our cleanup works.
+ scm.clean_working_directory()
+ self.assertFalse(scm.rebase_in_progress())
+
+ # Make sure cleanup doesn't throw when no rebase is in progress.
+ scm.clean_working_directory()
+
+ def test_commitish_parsing(self):
+ scm = detect_scm_system(self.git_checkout_path)
+
+ # Multiple revisions are cherry-picked.
+ self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
+ self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
+
+ # ... is an invalid range specifier
+ self.assertRaises(ScriptError, scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
+
+ def test_commitish_order(self):
+ scm = detect_scm_system(self.git_checkout_path)
+
+ commit_range = 'HEAD~3..HEAD'
+
+ actual_commits = scm.commit_ids_from_commitish_arguments([commit_range])
+ expected_commits = []
+ expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines())
+
+ self.assertEqual(actual_commits, expected_commits)
+
+ def test_apply_git_patch(self):
+ scm = detect_scm_system(self.git_checkout_path)
+ patch = self._create_patch(run_command(['git', 'diff', 'HEAD..HEAD^']))
+ self._setup_webkittools_scripts_symlink(scm)
+ scm.apply_patch(patch)
+
+ def test_apply_git_patch_force(self):
+ scm = detect_scm_system(self.git_checkout_path)
+ patch = self._create_patch(run_command(['git', 'diff', 'HEAD~2..HEAD']))
+ self._setup_webkittools_scripts_symlink(scm)
+ self.assertRaises(ScriptError, scm.apply_patch, patch, force=True)
+
+ def test_commit_text_parsing(self):
+ self._shared_test_commit_with_message()
+
+ def test_reverse_diff(self):
+ self._shared_test_reverse_diff()
+
+ def test_diff_for_revision(self):
+ self._shared_test_diff_for_revision()
+
+ def test_svn_apply_git_patch(self):
+ self._shared_test_svn_apply_git_patch()
+
+ def test_create_binary_patch(self):
+ # Create a git binary patch and check the contents.
+ scm = detect_scm_system(self.git_checkout_path)
+ test_file_name = 'binary_file'
+ test_file_path = os.path.join(self.git_checkout_path, test_file_name)
+ file_contents = ''.join(map(chr, range(256)))
+ write_into_file_at_path(test_file_path, file_contents)
+ run_command(['git', 'add', test_file_name])
+ patch = scm.create_patch()
+ self.assertTrue(re.search(r'\nliteral 0\n', patch))
+ self.assertTrue(re.search(r'\nliteral 256\n', patch))
+
+ # Check if we can apply the created patch.
+ run_command(['git', 'rm', '-f', test_file_name])
+ self._setup_webkittools_scripts_symlink(scm)
+ self.scm.apply_patch(self._create_patch(patch))
+ self.assertEqual(file_contents, read_from_path(test_file_path))
+
+ # Check if we can create a patch from a local commit.
+ write_into_file_at_path(test_file_path, file_contents)
+ run_command(['git', 'add', test_file_name])
+ run_command(['git', 'commit', '-m', 'binary diff'])
+ patch_from_local_commit = scm.create_patch_from_local_commit('HEAD')
+ self.assertTrue(re.search(r'\nliteral 0\n', patch_from_local_commit))
+ self.assertTrue(re.search(r'\nliteral 256\n', patch_from_local_commit))
+ patch_since_local_commit = scm.create_patch_since_local_commit('HEAD^1')
+ self.assertTrue(re.search(r'\nliteral 0\n', patch_since_local_commit))
+ self.assertTrue(re.search(r'\nliteral 256\n', patch_since_local_commit))
+ self.assertEqual(patch_from_local_commit, patch_since_local_commit)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/statusserver.py b/WebKitTools/Scripts/webkitpy/statusserver.py
new file mode 100644
index 0000000..ff0ddfa
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/statusserver.py
@@ -0,0 +1,96 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.networktransaction import NetworkTransaction
+from webkitpy.webkit_logging import log
+from mechanize import Browser
+
+# WebKit includes a built copy of BeautifulSoup in Scripts/webkitpy
+# so this import should always succeed.
+from .BeautifulSoup import BeautifulSoup
+
+import urllib2
+
+
+class StatusServer:
+ default_host = "webkit-commit-queue.appspot.com"
+
+ def __init__(self, host=default_host):
+ self.set_host(host)
+ self.browser = Browser()
+
+ def set_host(self, host):
+ self.host = host
+ self.url = "http://%s" % self.host
+
+ def results_url_for_status(self, status_id):
+ return "%s/results/%s" % (self.url, status_id)
+
+ def _add_patch(self, patch):
+ if not patch:
+ return
+ if patch.bug_id():
+ self.browser["bug_id"] = str(patch.bug_id())
+ if patch.id():
+ self.browser["patch_id"] = str(patch.id())
+
+ def _add_results_file(self, results_file):
+ if not results_file:
+ return
+ self.browser.add_file(results_file, "text/plain", "results.txt", 'results_file')
+
+ def _post_to_server(self, queue_name, status, patch, results_file):
+ if results_file:
+ # We might need to re-wind the file if we've already tried to post it.
+ results_file.seek(0)
+
+ update_status_url = "%s/update-status" % self.url
+ self.browser.open(update_status_url)
+ self.browser.select_form(name="update_status")
+ self.browser['queue_name'] = queue_name
+ self._add_patch(patch)
+ self.browser['status'] = status
+ self._add_results_file(results_file)
+ return self.browser.submit().read() # This is the id of the newly created status object.
+
+ def update_status(self, queue_name, status, patch=None, results_file=None):
+ # During unit testing, host is None
+ if not self.host:
+ return
+
+ log(status)
+ return NetworkTransaction().run(lambda: self._post_to_server(queue_name, status, patch, results_file))
+
+ def patch_status(self, queue_name, patch_id):
+ update_status_url = "%s/patch-status/%s/%s" % (self.url, queue_name, patch_id)
+ try:
+ return urllib2.urlopen(update_status_url).read()
+ except urllib2.HTTPError, e:
+ if e.code == 404:
+ return None
+ raise e
diff --git a/WebKitTools/Scripts/webkitpy/statusserver.pyc b/WebKitTools/Scripts/webkitpy/statusserver.pyc
new file mode 100644
index 0000000..2ba11b0
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/statusserver.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/__init__.py b/WebKitTools/Scripts/webkitpy/steps/__init__.py
new file mode 100644
index 0000000..5ae4bea
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/__init__.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# FIXME: Is this the right way to do this?
+from webkitpy.steps.applypatch import ApplyPatch
+from webkitpy.steps.applypatchwithlocalcommit import ApplyPatchWithLocalCommit
+from webkitpy.steps.build import Build
+from webkitpy.steps.checkstyle import CheckStyle
+from webkitpy.steps.cleanworkingdirectory import CleanWorkingDirectory
+from webkitpy.steps.cleanworkingdirectorywithlocalcommits import CleanWorkingDirectoryWithLocalCommits
+from webkitpy.steps.closebug import CloseBug
+from webkitpy.steps.closebugforlanddiff import CloseBugForLandDiff
+from webkitpy.steps.closepatch import ClosePatch
+from webkitpy.steps.commit import Commit
+from webkitpy.steps.completerollout import CompleteRollout
+from webkitpy.steps.confirmdiff import ConfirmDiff
+from webkitpy.steps.createbug import CreateBug
+from webkitpy.steps.editchangelog import EditChangeLog
+from webkitpy.steps.ensurebuildersaregreen import EnsureBuildersAreGreen
+from webkitpy.steps.ensurelocalcommitifneeded import EnsureLocalCommitIfNeeded
+from webkitpy.steps.obsoletepatches import ObsoletePatches
+from webkitpy.steps.options import Options
+from webkitpy.steps.postdiff import PostDiff
+from webkitpy.steps.postdiffforcommit import PostDiffForCommit
+from webkitpy.steps.preparechangelogforrevert import PrepareChangeLogForRevert
+from webkitpy.steps.preparechangelog import PrepareChangeLog
+from webkitpy.steps.promptforbugortitle import PromptForBugOrTitle
+from webkitpy.steps.revertrevision import RevertRevision
+from webkitpy.steps.runtests import RunTests
+from webkitpy.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer
+from webkitpy.steps.update import Update
diff --git a/WebKitTools/Scripts/webkitpy/steps/__init__.pyc b/WebKitTools/Scripts/webkitpy/steps/__init__.pyc
new file mode 100644
index 0000000..ccf513b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/__init__.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/abstractstep.py b/WebKitTools/Scripts/webkitpy/steps/abstractstep.py
new file mode 100644
index 0000000..639cf55
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/abstractstep.py
@@ -0,0 +1,69 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.webkit_logging import log
+from webkitpy.webkitport import WebKitPort
+
+
+class AbstractStep(object):
+ def __init__(self, tool, options):
+ self._tool = tool
+ self._options = options
+ self._port = None
+
+ def _run_script(self, script_name, quiet=False, port=WebKitPort):
+ log("Running %s" % script_name)
+ # FIXME: This should use self.port()
+ self._tool.executive.run_and_throw_if_fail(port.script_path(script_name), quiet)
+
+ # FIXME: The port should live on the tool.
+ def port(self):
+ if self._port:
+ return self._port
+ self._port = WebKitPort.port(self._options.port)
+ return self._port
+
+ _well_known_keys = {
+ "diff" : lambda self: self._tool.scm().create_patch(),
+ "changelogs" : lambda self: self._tool.scm().modified_changelogs(),
+ }
+
+ def cached_lookup(self, state, key, promise=None):
+ if state.get(key):
+ return state[key]
+ if not promise:
+ promise = self._well_known_keys.get(key)
+ state[key] = promise(self)
+ return state[key]
+
+ @classmethod
+ def options(cls):
+ return []
+
+ def run(self, state):
+ raise NotImplementedError, "subclasses must implement"
diff --git a/WebKitTools/Scripts/webkitpy/steps/abstractstep.pyc b/WebKitTools/Scripts/webkitpy/steps/abstractstep.pyc
new file mode 100644
index 0000000..d172c92
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/abstractstep.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/applypatch.py b/WebKitTools/Scripts/webkitpy/steps/applypatch.py
new file mode 100644
index 0000000..aba81ae
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/applypatch.py
@@ -0,0 +1,42 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import log
+
+class ApplyPatch(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.non_interactive,
+ ]
+
+ def run(self, state):
+ log("Processing patch %s from bug %s." % (state["patch"].id(), state["patch"].bug_id()))
+ self._tool.scm().apply_patch(state["patch"], force=self._options.non_interactive)
diff --git a/WebKitTools/Scripts/webkitpy/steps/applypatch.pyc b/WebKitTools/Scripts/webkitpy/steps/applypatch.pyc
new file mode 100644
index 0000000..0c5212d
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/applypatch.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.py b/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.py
new file mode 100644
index 0000000..bfaf52a
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.applypatch import ApplyPatch
+from webkitpy.steps.options import Options
+
+class ApplyPatchWithLocalCommit(ApplyPatch):
+ @classmethod
+ def options(cls):
+ return [
+ Options.local_commit,
+ ] + ApplyPatch.options()
+
+ def run(self, state):
+ ApplyPatch.run(self, state)
+ if self._options.local_commit:
+ commit_message = self._tool.scm().commit_message_for_this_commit()
+ self._tool.scm().commit_locally_with_message(commit_message.message() or state["patch"].name())
diff --git a/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.pyc b/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.pyc
new file mode 100644
index 0000000..67afd10
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/applypatchwithlocalcommit.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/build.py b/WebKitTools/Scripts/webkitpy/steps/build.py
new file mode 100644
index 0000000..1823cff
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/build.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import log
+
+
+class Build(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.build,
+ Options.quiet,
+ Options.build_style,
+ ]
+
+ def build(self, build_style):
+ self._tool.executive.run_and_throw_if_fail(self.port().build_webkit_command(build_style=build_style), self._options.quiet)
+
+ def run(self, state):
+ if not self._options.build:
+ return
+ log("Building WebKit")
+ if self._options.build_style == "both":
+ self.build("debug")
+ self.build("release")
+ else:
+ self.build(self._options.build_style)
diff --git a/WebKitTools/Scripts/webkitpy/steps/build.pyc b/WebKitTools/Scripts/webkitpy/steps/build.pyc
new file mode 100644
index 0000000..8e9e5ee
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/build.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/checkstyle.py b/WebKitTools/Scripts/webkitpy/steps/checkstyle.py
new file mode 100644
index 0000000..c8e20f8
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/checkstyle.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from webkitpy.executive import ScriptError
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import error
+
+class CheckStyle(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.non_interactive,
+ Options.check_style,
+ ]
+
+ def run(self, state):
+ if not self._options.check_style:
+ return
+ os.chdir(self._tool.scm().checkout_root)
+ try:
+ self._run_script("check-webkit-style")
+ except ScriptError, e:
+ if self._options.non_interactive:
+ # We need to re-raise the exception here to have the
+ # style-queue do the right thing.
+ raise e
+ if not self._tool.user.confirm("Are you sure you want to continue?"):
+ exit(1)
diff --git a/WebKitTools/Scripts/webkitpy/steps/checkstyle.pyc b/WebKitTools/Scripts/webkitpy/steps/checkstyle.pyc
new file mode 100644
index 0000000..561036b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/checkstyle.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.py b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.py
new file mode 100644
index 0000000..88e38f5
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.py
@@ -0,0 +1,52 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+
+
+class CleanWorkingDirectory(AbstractStep):
+ def __init__(self, tool, options, allow_local_commits=False):
+ AbstractStep.__init__(self, tool, options)
+ self._allow_local_commits = allow_local_commits
+
+ @classmethod
+ def options(cls):
+ return [
+ Options.force_clean,
+ Options.clean,
+ ]
+
+ def run(self, state):
+ os.chdir(self._tool.scm().checkout_root)
+ if not self._allow_local_commits:
+ self._tool.scm().ensure_no_local_commits(self._options.force_clean)
+ if self._options.clean:
+ self._tool.scm().ensure_clean_working_directory(force_clean=self._options.force_clean)
diff --git a/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.pyc b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.pyc
new file mode 100644
index 0000000..11383f2
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectory.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.py b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.py
new file mode 100644
index 0000000..cabeba2
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.cleanworkingdirectory import CleanWorkingDirectory
+
+class CleanWorkingDirectoryWithLocalCommits(CleanWorkingDirectory):
+ def __init__(self, tool, options):
+ # FIXME: This a bit of a hack. Consider doing this more cleanly.
+ CleanWorkingDirectory.__init__(self, tool, options, allow_local_commits=True)
diff --git a/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.pyc b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.pyc
new file mode 100644
index 0000000..757d19a
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/cleanworkingdirectorywithlocalcommits.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/closebug.py b/WebKitTools/Scripts/webkitpy/steps/closebug.py
new file mode 100644
index 0000000..2640ee3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/closebug.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import log
+
+
+class CloseBug(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.close_bug,
+ ]
+
+ def run(self, state):
+ if not self._options.close_bug:
+ return
+ # Check to make sure there are no r? or r+ patches on the bug before closing.
+ # Assume that r- patches are just previous patches someone forgot to obsolete.
+ patches = self._tool.bugs.fetch_bug(state["patch"].bug_id()).patches()
+ for patch in patches:
+ if patch.review() == "?" or patch.review() == "+":
+ log("Not closing bug %s as attachment %s has review=%s. Assuming there are more patches to land from this bug." % (patch.bug_id(), patch.id(), patch.review()))
+ return
+ self._tool.bugs.close_bug_as_fixed(state["patch"].bug_id(), "All reviewed patches have been landed. Closing bug.")
diff --git a/WebKitTools/Scripts/webkitpy/steps/closebug.pyc b/WebKitTools/Scripts/webkitpy/steps/closebug.pyc
new file mode 100644
index 0000000..356f430
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/closebug.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.py b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.py
new file mode 100644
index 0000000..43a0c66
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.py
@@ -0,0 +1,58 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.comments import bug_comment_from_commit_text
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import log
+
+
+class CloseBugForLandDiff(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.close_bug,
+ ]
+
+ def run(self, state):
+ comment_text = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"])
+ bug_id = state.get("bug_id")
+ if not bug_id and state.get("patch"):
+ bug_id = state.get("patch").bug_id()
+
+ if bug_id:
+ log("Updating bug %s" % bug_id)
+ if self._options.close_bug:
+ self._tool.bugs.close_bug_as_fixed(bug_id, comment_text)
+ else:
+ # FIXME: We should a smart way to figure out if the patch is attached
+ # to the bug, and if so obsolete it.
+ self._tool.bugs.post_comment_to_bug(bug_id, comment_text)
+ else:
+ log(comment_text)
+ log("No bug id provided.")
diff --git a/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.pyc b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.pyc
new file mode 100644
index 0000000..2dd3814
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff_unittest.py b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff_unittest.py
new file mode 100644
index 0000000..73561ab
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/closebugforlanddiff_unittest.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.steps.closebugforlanddiff import CloseBugForLandDiff
+from webkitpy.mock import Mock
+from webkitpy.mock_bugzillatool import MockBugzillaTool
+from webkitpy.outputcapture import OutputCapture
+
+class CloseBugForLandDiffTest(unittest.TestCase):
+ def test_empty_state(self):
+ capture = OutputCapture()
+ step = CloseBugForLandDiff(MockBugzillaTool(), Mock())
+ expected_stderr = "Committed r49824: <http://trac.webkit.org/changeset/49824>\nNo bug id provided.\n"
+ capture.assert_outputs(self, step.run, [{"commit_text" : "Mock commit text"}], expected_stderr=expected_stderr)
diff --git a/WebKitTools/Scripts/webkitpy/steps/closepatch.py b/WebKitTools/Scripts/webkitpy/steps/closepatch.py
new file mode 100644
index 0000000..f20fe7e
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/closepatch.py
@@ -0,0 +1,36 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.comments import bug_comment_from_commit_text
+from webkitpy.steps.abstractstep import AbstractStep
+
+
+class ClosePatch(AbstractStep):
+ def run(self, state):
+ comment_text = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"])
+ self._tool.bugs.clear_attachment_flags(state["patch"].id(), comment_text)
diff --git a/WebKitTools/Scripts/webkitpy/steps/closepatch.pyc b/WebKitTools/Scripts/webkitpy/steps/closepatch.pyc
new file mode 100644
index 0000000..3a2a75b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/closepatch.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/commit.py b/WebKitTools/Scripts/webkitpy/steps/commit.py
new file mode 100644
index 0000000..dd1fed7
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/commit.py
@@ -0,0 +1,35 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+
+
+class Commit(AbstractStep):
+ def run(self, state):
+ commit_message = self._tool.scm().commit_message_for_this_commit()
+ state["commit_text"] = self._tool.scm().commit_with_message(commit_message.message())
diff --git a/WebKitTools/Scripts/webkitpy/steps/commit.pyc b/WebKitTools/Scripts/webkitpy/steps/commit.pyc
new file mode 100644
index 0000000..fe9ef1a
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/commit.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/completerollout.py b/WebKitTools/Scripts/webkitpy/steps/completerollout.py
new file mode 100644
index 0000000..8534956
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/completerollout.py
@@ -0,0 +1,66 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.comments import bug_comment_from_commit_text
+from webkitpy.steps.build import Build
+from webkitpy.steps.commit import Commit
+from webkitpy.steps.metastep import MetaStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import log
+
+
+class CompleteRollout(MetaStep):
+ substeps = [
+ Build,
+ Commit,
+ ]
+
+ @classmethod
+ def options(cls):
+ collected_options = cls._collect_options_from_steps(cls.substeps)
+ collected_options.append(Options.complete_rollout)
+ return collected_options
+
+ def run(self, state):
+ bug_id = state["bug_id"]
+ # FIXME: Fully automated rollout is not 100% idiot-proof yet, so for now just log with instructions on how to complete the rollout.
+ # Once we trust rollout we will remove this option.
+ if not self._options.complete_rollout:
+ log("\nNOTE: Rollout support is experimental.\nPlease verify the rollout diff and use \"webkit-patch land %s\" to commit the rollout." % bug_id)
+ return
+
+ MetaStep.run(self, state)
+
+ commit_comment = bug_comment_from_commit_text(self._tool.scm(), state["commit_text"])
+ comment_text = "Reverted r%s for reason:\n\n%s\n\n%s" % (state["revision"], state["reason"], commit_comment)
+
+ if not bug_id:
+ log(comment_text)
+ log("No bugs were updated.")
+ return
+ self._tool.bugs.reopen_bug(bug_id, comment_text)
diff --git a/WebKitTools/Scripts/webkitpy/steps/completerollout.pyc b/WebKitTools/Scripts/webkitpy/steps/completerollout.pyc
new file mode 100644
index 0000000..47312b8
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/completerollout.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/confirmdiff.py b/WebKitTools/Scripts/webkitpy/steps/confirmdiff.py
new file mode 100644
index 0000000..fc28f8f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/confirmdiff.py
@@ -0,0 +1,47 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import error
+
+
+class ConfirmDiff(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.confirm,
+ ]
+
+ def run(self, state):
+ if not self._options.confirm:
+ return
+ diff = self.cached_lookup(state, "diff")
+ self._tool.user.page(diff)
+ if not self._tool.user.confirm("Was that diff correct?"):
+ exit(1)
diff --git a/WebKitTools/Scripts/webkitpy/steps/confirmdiff.pyc b/WebKitTools/Scripts/webkitpy/steps/confirmdiff.pyc
new file mode 100644
index 0000000..d3fc1d4
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/confirmdiff.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/createbug.py b/WebKitTools/Scripts/webkitpy/steps/createbug.py
new file mode 100644
index 0000000..75bf17f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/createbug.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+
+
+class CreateBug(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.cc,
+ Options.component,
+ ]
+
+ def run(self, state):
+ # No need to create a bug if we already have one.
+ if state.get("bug_id"):
+ return
+ state["bug_id"] = self._tool.bugs.create_bug(state["bug_title"], state["bug_description"], component=self._options.component, cc=self._options.cc)
diff --git a/WebKitTools/Scripts/webkitpy/steps/createbug.pyc b/WebKitTools/Scripts/webkitpy/steps/createbug.pyc
new file mode 100644
index 0000000..e27f5ec
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/createbug.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/editchangelog.py b/WebKitTools/Scripts/webkitpy/steps/editchangelog.py
new file mode 100644
index 0000000..d545c72
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/editchangelog.py
@@ -0,0 +1,37 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from webkitpy.steps.abstractstep import AbstractStep
+
+
+class EditChangeLog(AbstractStep):
+ def run(self, state):
+ os.chdir(self._tool.scm().checkout_root)
+ self._tool.user.edit(self.cached_lookup(state, "changelogs"))
diff --git a/WebKitTools/Scripts/webkitpy/steps/editchangelog.pyc b/WebKitTools/Scripts/webkitpy/steps/editchangelog.pyc
new file mode 100644
index 0000000..2ca5dbf
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/editchangelog.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.py b/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.py
new file mode 100644
index 0000000..96f265a
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.py
@@ -0,0 +1,48 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import error
+
+
+class EnsureBuildersAreGreen(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.check_builders,
+ ]
+
+ def run(self, state):
+ if not self._options.check_builders:
+ return
+ red_builders_names = self._tool.buildbot.red_core_builders_names()
+ if not red_builders_names:
+ return
+ red_builders_names = map(lambda name: "\"%s\"" % name, red_builders_names) # Add quotes around the names.
+ error("Builders [%s] are red, please do not commit.\nSee http://%s.\nPass --ignore-builders to bypass this check." % (", ".join(red_builders_names), self._tool.buildbot.buildbot_host))
diff --git a/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.pyc b/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.pyc
new file mode 100644
index 0000000..dd98935
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/ensurebuildersaregreen.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.py b/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.py
new file mode 100644
index 0000000..cecf891
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.py
@@ -0,0 +1,43 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import error
+
+
+class EnsureLocalCommitIfNeeded(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.local_commit,
+ ]
+
+ def run(self, state):
+ if self._options.local_commit and not self._tool.scm().supports_local_commits():
+ error("--local-commit passed, but %s does not support local commits" % self._tool.scm.display_name())
diff --git a/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.pyc b/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.pyc
new file mode 100644
index 0000000..18ce98a
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/ensurelocalcommitifneeded.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/metastep.py b/WebKitTools/Scripts/webkitpy/steps/metastep.py
new file mode 100644
index 0000000..9f368de
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/metastep.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+
+
+# FIXME: Unify with StepSequence? I'm not sure yet which is the better design.
+class MetaStep(AbstractStep):
+ substeps = [] # Override in subclasses
+ def __init__(self, tool, options):
+ AbstractStep.__init__(self, tool, options)
+ self._step_instances = []
+ for step_class in self.substeps:
+ self._step_instances.append(step_class(tool, options))
+
+ @staticmethod
+ def _collect_options_from_steps(steps):
+ collected_options = []
+ for step in steps:
+ collected_options = collected_options + step.options()
+ return collected_options
+
+ @classmethod
+ def options(cls):
+ return cls._collect_options_from_steps(cls.substeps)
+
+ def run(self, state):
+ for step in self._step_instances:
+ step.run(state)
diff --git a/WebKitTools/Scripts/webkitpy/steps/metastep.pyc b/WebKitTools/Scripts/webkitpy/steps/metastep.pyc
new file mode 100644
index 0000000..21d2bf6
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/metastep.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.py b/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.py
new file mode 100644
index 0000000..dbdbabd
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.grammar import pluralize
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import log
+
+
+class ObsoletePatches(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.obsolete_patches,
+ ]
+
+ def run(self, state):
+ if not self._options.obsolete_patches:
+ return
+ bug_id = state["bug_id"]
+ patches = self._tool.bugs.fetch_bug(bug_id).patches()
+ if not patches:
+ return
+ log("Obsoleting %s on bug %s" % (pluralize("old patch", len(patches)), bug_id))
+ for patch in patches:
+ self._tool.bugs.obsolete_attachment(patch.id())
diff --git a/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.pyc b/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.pyc
new file mode 100644
index 0000000..4586950
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/obsoletepatches.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/options.py b/WebKitTools/Scripts/webkitpy/steps/options.py
new file mode 100644
index 0000000..8b28f27
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/options.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from optparse import make_option
+
+class Options(object):
+ build = make_option("--no-build", action="store_false", dest="build", default=True, help="Commit without building first, implies --no-test.")
+ build_style = make_option("--build-style", action="store", dest="build_style", default=None, help="Whether to build debug, release, or both.")
+ cc = make_option("--cc", action="store", type="string", dest="cc", help="Comma-separated list of email addresses to carbon-copy.")
+ check_builders = make_option("--ignore-builders", action="store_false", dest="check_builders", default=True, help="Don't check to see if the build.webkit.org builders are green before landing.")
+ check_style = make_option("--ignore-style", action="store_false", dest="check_style", default=True, help="Don't check to see if the patch has proper style before uploading.")
+ clean = make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches")
+ close_bug = make_option("--no-close", action="store_false", dest="close_bug", default=True, help="Leave bug open after landing.")
+ complete_rollout = make_option("--complete-rollout", action="store_true", dest="complete_rollout", help="Commit the revert and re-open the original bug.")
+ component = make_option("--component", action="store", type="string", dest="component", help="Component for the new bug.")
+ confirm = make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Skip confirmation steps.")
+ description = make_option("-m", "--description", action="store", type="string", dest="description", help="Description string for the attachment (default: \"patch\")")
+ email = make_option("--email", action="store", type="string", dest="email", help="Email address to use in ChangeLogs.")
+ force_clean = make_option("--force-clean", action="store_true", dest="force_clean", default=False, help="Clean working directory before applying patches (removes local changes and commits)")
+ local_commit = make_option("--local-commit", action="store_true", dest="local_commit", default=False, help="Make a local commit for each applied patch")
+ non_interactive = make_option("--non-interactive", action="store_true", dest="non_interactive", default=False, help="Never prompt the user, fail as fast as possible.")
+ obsolete_patches = make_option("--no-obsolete", action="store_false", dest="obsolete_patches", default=True, help="Do not obsolete old patches before posting this one.")
+ open_bug = make_option("--open-bug", action="store_true", dest="open_bug", default=False, help="Opens the associated bug in a browser.")
+ parent_command = make_option("--parent-command", action="store", dest="parent_command", default=None, help="(Internal) The command that spawned this instance.")
+ port = make_option("--port", action="store", dest="port", default=None, help="Specify a port (e.g., mac, qt, gtk, ...).")
+ quiet = make_option("--quiet", action="store_true", dest="quiet", default=False, help="Produce less console output.")
+ request_commit = make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review.")
+ review = make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review.")
+ reviewer = make_option("-r", "--reviewer", action="store", type="string", dest="reviewer", help="Update ChangeLogs to say Reviewed by REVIEWER.")
+ test = make_option("--no-test", action="store_false", dest="test", default=True, help="Commit without running run-webkit-tests.")
+ update = make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory.")
diff --git a/WebKitTools/Scripts/webkitpy/steps/options.pyc b/WebKitTools/Scripts/webkitpy/steps/options.pyc
new file mode 100644
index 0000000..7634605
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/options.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/postdiff.py b/WebKitTools/Scripts/webkitpy/steps/postdiff.py
new file mode 100644
index 0000000..a5ba2a4
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/postdiff.py
@@ -0,0 +1,51 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+
+
+class PostDiff(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.description,
+ Options.review,
+ Options.request_commit,
+ Options.open_bug,
+ ]
+
+ def run(self, state):
+ diff = self.cached_lookup(state, "diff")
+ diff_file = StringIO.StringIO(diff) # add_patch_to_bug expects a file-like object
+ description = self._options.description or "Patch"
+ self._tool.bugs.add_patch_to_bug(state["bug_id"], diff_file, description, mark_for_review=self._options.review, mark_for_commit_queue=self._options.request_commit)
+ if self._options.open_bug:
+ self._tool.user.open_url(self._tool.bugs.bug_url_for_bug_id(state["bug_id"]))
diff --git a/WebKitTools/Scripts/webkitpy/steps/postdiff.pyc b/WebKitTools/Scripts/webkitpy/steps/postdiff.pyc
new file mode 100644
index 0000000..82f1c09
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/postdiff.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/postdiffforcommit.py b/WebKitTools/Scripts/webkitpy/steps/postdiffforcommit.py
new file mode 100644
index 0000000..449381c
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/postdiffforcommit.py
@@ -0,0 +1,41 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+
+from webkitpy.steps.abstractstep import AbstractStep
+
+
+class PostDiffForCommit(AbstractStep):
+ def run(self, state):
+ self._tool.bugs.add_patch_to_bug(
+ state["bug_id"],
+ StringIO.StringIO(self.cached_lookup(state, "diff")),
+ "Patch for landing",
+ mark_for_review=False,
+ mark_for_landing=True)
diff --git a/WebKitTools/Scripts/webkitpy/steps/preparechangelog.py b/WebKitTools/Scripts/webkitpy/steps/preparechangelog.py
new file mode 100644
index 0000000..bd41f0b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/preparechangelog.py
@@ -0,0 +1,59 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from webkitpy.executive import ScriptError
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import error
+
+
+class PrepareChangeLog(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.port,
+ Options.quiet,
+ Options.email,
+ ]
+
+ def run(self, state):
+ if self.cached_lookup(state, "changelogs"):
+ return
+ os.chdir(self._tool.scm().checkout_root)
+ args = [self.port().script_path("prepare-ChangeLog")]
+ if state["bug_id"]:
+ args.append("--bug=%s" % state["bug_id"])
+ if self._options.email:
+ args.append("--email=%s" % self._options.email)
+ try:
+ self._tool.executive.run_and_throw_if_fail(args, self._options.quiet)
+ except ScriptError, e:
+ error("Unable to prepare ChangeLogs.")
+ state["diff"] = None # We've changed the diff
diff --git a/WebKitTools/Scripts/webkitpy/steps/preparechangelog.pyc b/WebKitTools/Scripts/webkitpy/steps/preparechangelog.pyc
new file mode 100644
index 0000000..2f6edbd
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/preparechangelog.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.py b/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.py
new file mode 100644
index 0000000..88e5134
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.py
@@ -0,0 +1,49 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from webkitpy.changelogs import ChangeLog
+from webkitpy.steps.abstractstep import AbstractStep
+
+
+class PrepareChangeLogForRevert(AbstractStep):
+ def run(self, state):
+ # First, discard the ChangeLog changes from the rollout.
+ os.chdir(self._tool.scm().checkout_root)
+ changelog_paths = self._tool.scm().modified_changelogs()
+ self._tool.scm().revert_files(changelog_paths)
+
+ # Second, make new ChangeLog entries for this rollout.
+ # This could move to prepare-ChangeLog by adding a --revert= option.
+ self._run_script("prepare-ChangeLog")
+ bug_url = self._tool.bugs.bug_url_for_bug_id(state["bug_id"]) if state["bug_id"] else None
+ for changelog_path in changelog_paths:
+ # FIXME: Seems we should prepare the message outside of changelogs.py and then just pass in
+ # text that we want to use to replace the reviewed by line.
+ ChangeLog(changelog_path).update_for_revert(state["revision"], state["reason"], bug_url)
diff --git a/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.pyc b/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.pyc
new file mode 100644
index 0000000..c1f0ca4
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/preparechangelogforrevert.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.py b/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.py
new file mode 100644
index 0000000..fb2f877
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.py
@@ -0,0 +1,45 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+
+
+class PromptForBugOrTitle(AbstractStep):
+ def run(self, state):
+ # No need to prompt if we alrady have the bug_id.
+ if state.get("bug_id"):
+ return
+ user_response = self._tool.user.prompt("Please enter a bug number or a title for a new bug:\n")
+ # If the user responds with a number, we assume it's bug number.
+ # Otherwise we assume it's a bug subject.
+ try:
+ state["bug_id"] = int(user_response)
+ except ValueError, TypeError:
+ state["bug_title"] = user_response
+ # FIXME: This is kind of a lame description.
+ state["bug_description"] = user_response
diff --git a/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.pyc b/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.pyc
new file mode 100644
index 0000000..fdca409
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/promptforbugortitle.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/revertrevision.py b/WebKitTools/Scripts/webkitpy/steps/revertrevision.py
new file mode 100644
index 0000000..ce6c263
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/revertrevision.py
@@ -0,0 +1,34 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+
+
+class RevertRevision(AbstractStep):
+ def run(self, state):
+ self._tool.scm().apply_reverse_diff(state["revision"])
diff --git a/WebKitTools/Scripts/webkitpy/steps/revertrevision.pyc b/WebKitTools/Scripts/webkitpy/steps/revertrevision.pyc
new file mode 100644
index 0000000..ec08b1f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/revertrevision.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/runtests.py b/WebKitTools/Scripts/webkitpy/steps/runtests.py
new file mode 100644
index 0000000..ebe809f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/runtests.py
@@ -0,0 +1,66 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import log
+
+class RunTests(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.build,
+ Options.test,
+ Options.non_interactive,
+ Options.quiet,
+ Options.port,
+ ]
+
+ def run(self, state):
+ if not self._options.build:
+ return
+ if not self._options.test:
+ return
+
+ # Run the scripting unit tests first because they're quickest.
+ log("Running Python unit tests")
+ self._tool.executive.run_and_throw_if_fail(self.port().run_python_unittests_command())
+ log("Running Perl unit tests")
+ self._tool.executive.run_and_throw_if_fail(self.port().run_perl_unittests_command())
+ log("Running JavaScriptCore tests")
+ self._tool.executive.run_and_throw_if_fail(self.port().run_javascriptcore_tests_command(), quiet=True)
+
+ log("Running run-webkit-tests")
+ args = self.port().run_webkit_tests_command()
+ if self._options.non_interactive:
+ args.append("--no-launch-safari")
+ args.append("--exit-after-n-failures=1")
+ if self._options.quiet:
+ args.append("--quiet")
+ self._tool.executive.run_and_throw_if_fail(args)
+
diff --git a/WebKitTools/Scripts/webkitpy/steps/runtests.pyc b/WebKitTools/Scripts/webkitpy/steps/runtests.pyc
new file mode 100644
index 0000000..16908fb
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/runtests.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/steps_unittest.py b/WebKitTools/Scripts/webkitpy/steps/steps_unittest.py
new file mode 100644
index 0000000..3e6a032
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/steps_unittest.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.steps.update import Update
+from webkitpy.steps.promptforbugortitle import PromptForBugOrTitle
+from webkitpy.mock_bugzillatool import MockBugzillaTool
+from webkitpy.outputcapture import OutputCapture
+from webkitpy.mock import Mock
+
+
+class StepsTest(unittest.TestCase):
+ def _run_step(self, step, tool=None, options=None, state=None):
+ if not tool:
+ tool = MockBugzillaTool()
+ if not options:
+ options = Mock()
+ if not state:
+ state = {}
+ step(tool, options).run(state)
+
+ def test_update_step(self):
+ options = Mock()
+ options.update = True
+ self._run_step(Update, options)
+
+ def test_prompt_for_bug_or_title_step(self):
+ tool = MockBugzillaTool()
+ tool.user.prompt = lambda message: 42
+ self._run_step(PromptForBugOrTitle, tool=tool)
diff --git a/WebKitTools/Scripts/webkitpy/steps/update.py b/WebKitTools/Scripts/webkitpy/steps/update.py
new file mode 100644
index 0000000..0f45671
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/update.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import log
+
+
+class Update(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.update,
+ Options.port,
+ ]
+
+ def run(self, state):
+ if not self._options.update:
+ return
+ log("Updating working directory")
+ self._tool.executive.run_and_throw_if_fail(self.port().update_webkit_command(), quiet=True)
diff --git a/WebKitTools/Scripts/webkitpy/steps/update.pyc b/WebKitTools/Scripts/webkitpy/steps/update.pyc
new file mode 100644
index 0000000..0b9e7e9
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/update.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreview_unittests.py b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreview_unittests.py
new file mode 100644
index 0000000..102a454
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreview_unittests.py
@@ -0,0 +1,46 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer
+from webkitpy.mock import Mock
+from webkitpy.mock_bugzillatool import MockBugzillaTool
+from webkitpy.outputcapture import OutputCapture
+
+class UpdateChangeLogsWithReviewerTest(unittest.TestCase):
+ def test_guess_reviewer_from_bug(self):
+ capture = OutputCapture()
+ step = UpdateChangeLogsWithReviewer(MockBugzillaTool(), Mock())
+ expected_stderr = "0 reviewed patches on bug 75, cannot infer reviewer.\n"
+ capture.assert_outputs(self, step._guess_reviewer_from_bug, [75], expected_stderr=expected_stderr)
+
+ def test_empty_state(self):
+ capture = OutputCapture()
+ step = UpdateChangeLogsWithReviewer(MockBugzillaTool(), Mock())
+ capture.assert_outputs(self, step.run, [{}])
diff --git a/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.py b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.py
new file mode 100644
index 0000000..90fdc35
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.py
@@ -0,0 +1,71 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from webkitpy.changelogs import ChangeLog
+from webkitpy.grammar import pluralize
+from webkitpy.steps.abstractstep import AbstractStep
+from webkitpy.steps.options import Options
+from webkitpy.webkit_logging import log, error
+
+class UpdateChangeLogsWithReviewer(AbstractStep):
+ @classmethod
+ def options(cls):
+ return [
+ Options.reviewer,
+ ]
+
+ def _guess_reviewer_from_bug(self, bug_id):
+ patches = self._tool.bugs.fetch_bug(bug_id).reviewed_patches()
+ if len(patches) != 1:
+ log("%s on bug %s, cannot infer reviewer." % (pluralize("reviewed patch", len(patches)), bug_id))
+ return None
+ patch = patches[0]
+ log("Guessing \"%s\" as reviewer from attachment %s on bug %s." % (patch.reviewer().full_name, patch.id(), bug_id))
+ return patch.reviewer().full_name
+
+ def run(self, state):
+ bug_id = state.get("bug_id")
+ if not bug_id and state.get("patch"):
+ bug_id = state.get("patch").bug_id()
+
+ reviewer = self._options.reviewer
+ if not reviewer:
+ if not bug_id:
+ log("No bug id provided and --reviewer= not provided. Not updating ChangeLogs with reviewer.")
+ return
+ reviewer = self._guess_reviewer_from_bug(bug_id)
+
+ if not reviewer:
+ log("Failed to guess reviewer from bug %s and --reviewer= not provided. Not updating ChangeLogs with reviewer." % bug_id)
+ return
+
+ os.chdir(self._tool.scm().checkout_root)
+ for changelog_path in self._tool.scm().modified_changelogs():
+ ChangeLog(changelog_path).set_reviewer(reviewer)
diff --git a/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.pyc b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.pyc
new file mode 100644
index 0000000..6c9b7fd
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/steps/updatechangelogswithreviewer.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/stepsequence.py b/WebKitTools/Scripts/webkitpy/stepsequence.py
new file mode 100644
index 0000000..008b366
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/stepsequence.py
@@ -0,0 +1,77 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import webkitpy.steps as steps
+
+from webkitpy.executive import ScriptError
+from webkitpy.webkit_logging import log
+from webkitpy.scm import CheckoutNeedsUpdate
+from webkitpy.queueengine import QueueEngine
+
+
+class StepSequenceErrorHandler():
+ @classmethod
+ def handle_script_error(cls, tool, patch, script_error):
+ raise NotImplementedError, "subclasses must implement"
+
+
+class StepSequence(object):
+ def __init__(self, steps):
+ self._steps = steps or []
+
+ def options(self):
+ collected_options = [
+ steps.Options.parent_command,
+ steps.Options.quiet,
+ ]
+ for step in self._steps:
+ collected_options = collected_options + step.options()
+ # Remove duplicates.
+ collected_options = sorted(set(collected_options))
+ return collected_options
+
+ def _run(self, tool, options, state):
+ for step in self._steps:
+ step(tool, options).run(state)
+
+ def run_and_handle_errors(self, tool, options, state=None):
+ if not state:
+ state = {}
+ try:
+ self._run(tool, options, state)
+ except CheckoutNeedsUpdate, e:
+ log("Commit failed because the checkout is out of date. Please update and try again.")
+ log("You can pass --no-build to skip building/testing after update if you believe the new commits did not affect the results.")
+ QueueEngine.exit_after_handled_error(e)
+ except ScriptError, e:
+ if not options.quiet:
+ log(e.message_with_output())
+ if options.parent_command:
+ command = tool.command_by_name(options.parent_command)
+ command.handle_script_error(tool, state, e)
+ QueueEngine.exit_after_handled_error(e)
diff --git a/WebKitTools/Scripts/webkitpy/stepsequence.pyc b/WebKitTools/Scripts/webkitpy/stepsequence.pyc
new file mode 100644
index 0000000..4b3505e
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/stepsequence.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/style/__init__.py b/WebKitTools/Scripts/webkitpy/style/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/WebKitTools/Scripts/webkitpy/style/checker.py b/WebKitTools/Scripts/webkitpy/style/checker.py
new file mode 100644
index 0000000..faf954f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/checker.py
@@ -0,0 +1,809 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Front end of some style-checker modules."""
+
+import codecs
+import getopt
+import os.path
+import sys
+
+from .. style_references import parse_patch
+from error_handlers import DefaultStyleErrorHandler
+from error_handlers import PatchStyleErrorHandler
+from processors.cpp import CppProcessor
+from processors.text import TextProcessor
+
+
+# These defaults are used by check-webkit-style.
+WEBKIT_DEFAULT_VERBOSITY = 1
+WEBKIT_DEFAULT_OUTPUT_FORMAT = 'emacs'
+
+
+# FIXME: For style categories we will never want to have, remove them.
+# For categories for which we want to have similar functionality,
+# modify the implementation and enable them.
+#
+# Throughout this module, we use "filter rule" rather than "filter"
+# for an individual boolean filter flag like "+foo". This allows us to
+# reserve "filter" for what one gets by collectively applying all of
+# the filter rules.
+#
+# The _WEBKIT_FILTER_RULES are prepended to any user-specified filter
+# rules. Since by default all errors are on, only include rules that
+# begin with a - sign.
+WEBKIT_DEFAULT_FILTER_RULES = [
+ '-build/endif_comment',
+ '-build/include_what_you_use', # <string> for std::string
+ '-build/storage_class', # const static
+ '-legal/copyright',
+ '-readability/multiline_comment',
+ '-readability/braces', # int foo() {};
+ '-readability/fn_size',
+ '-readability/casting',
+ '-readability/function',
+ '-runtime/arrays', # variable length array
+ '-runtime/casting',
+ '-runtime/sizeof',
+ '-runtime/explicit', # explicit
+ '-runtime/virtual', # virtual dtor
+ '-runtime/printf',
+ '-runtime/threadsafe_fn',
+ '-runtime/rtti',
+ '-whitespace/blank_line',
+ '-whitespace/end_of_line',
+ '-whitespace/labels',
+ ]
+
+
+# Some files should be skipped when checking style. For example,
+# WebKit maintains some files in Mozilla style on purpose to ease
+# future merges.
+#
+# Include a warning for skipped files that are less obvious.
+SKIPPED_FILES_WITH_WARNING = [
+ # The Qt API and tests do not follow WebKit style.
+ # They follow Qt style. :)
+ "gtk2drawing.c", # WebCore/platform/gtk/gtk2drawing.c
+ "gtk2drawing.h", # WebCore/platform/gtk/gtk2drawing.h
+ "JavaScriptCore/qt/api/",
+ "WebKit/gtk/tests/",
+ "WebKit/qt/Api/",
+ "WebKit/qt/tests/",
+ ]
+
+
+# Don't include a warning for skipped files that are more common
+# and more obvious.
+SKIPPED_FILES_WITHOUT_WARNING = [
+ "LayoutTests/"
+ ]
+
+
+def style_categories():
+ """Return the set of all categories used by check-webkit-style."""
+ # If other processors had categories, we would take their union here.
+ return CppProcessor.categories
+
+
+def webkit_argument_defaults():
+ """Return the DefaultArguments instance for use by check-webkit-style."""
+ return ArgumentDefaults(WEBKIT_DEFAULT_OUTPUT_FORMAT,
+ WEBKIT_DEFAULT_VERBOSITY,
+ WEBKIT_DEFAULT_FILTER_RULES)
+
+
+def _create_usage(defaults):
+ """Return the usage string to display for command help.
+
+ Args:
+ defaults: An ArgumentDefaults instance.
+
+ """
+ usage = """
+Syntax: %(program_name)s [--verbose=#] [--git-commit=<SingleCommit>] [--output=vs7]
+ [--filter=-x,+y,...] [file] ...
+
+ The style guidelines this tries to follow are here:
+ http://webkit.org/coding/coding-style.html
+
+ Every style error is given a confidence score from 1-5, with 5 meaning
+ we are certain of the problem, and 1 meaning it could be a legitimate
+ construct. This can miss some errors and does not substitute for
+ code review.
+
+ To prevent specific lines from being linted, add a '// NOLINT' comment to the
+ end of the line.
+
+ Linted extensions are .cpp, .c and .h. Other file types are ignored.
+
+ The file parameter is optional and accepts multiple files. Leaving
+ out the file parameter applies the check to all files considered changed
+ by your source control management system.
+
+ Flags:
+
+ verbose=#
+ A number 1-5 that restricts output to errors with a confidence
+ score at or above this value. In particular, the value 1 displays
+ all errors. The default is %(default_verbosity)s.
+
+ git-commit=<SingleCommit>
+ Checks the style of everything from the given commit to the local tree.
+
+ output=vs7
+ The output format, which may be one of
+ emacs : to ease emacs parsing
+ vs7 : compatible with Visual Studio
+ Defaults to "%(default_output_format)s". Other formats are unsupported.
+
+ filter=-x,+y,...
+ A comma-separated list of boolean filter rules used to filter
+ which categories of style guidelines to check. The script checks
+ a category if the category passes the filter rules, as follows.
+
+ Any webkit category starts out passing. All filter rules are then
+ evaluated left to right, with later rules taking precedence. For
+ example, the rule "+foo" passes any category that starts with "foo",
+ and "-foo" fails any such category. The filter input "-whitespace,
+ +whitespace/braces" fails the category "whitespace/tab" and passes
+ "whitespace/braces".
+
+ Examples: --filter=-whitespace,+whitespace/braces
+ --filter=-whitespace,-runtime/printf,+runtime/printf_format
+ --filter=-,+build/include_what_you_use
+
+ Category names appear in error messages in brackets, for example
+ [whitespace/indent]. To see a list of all categories available to
+ %(program_name)s, along with which are enabled by default, pass
+ the empty filter as follows:
+ --filter=
+""" % {'program_name': os.path.basename(sys.argv[0]),
+ 'default_verbosity': defaults.verbosity,
+ 'default_output_format': defaults.output_format}
+
+ return usage
+
+
+class CategoryFilter(object):
+
+ """Filters whether to check style categories."""
+
+ def __init__(self, filter_rules=None):
+ """Create a category filter.
+
+ This method performs argument validation but does not strip
+ leading or trailing white space.
+
+ Args:
+ filter_rules: A list of strings that are filter rules, which
+ are strings beginning with the plus or minus
+ symbol (+/-). The list should include any
+ default filter rules at the beginning.
+ Defaults to the empty list.
+
+ Raises:
+ ValueError: Invalid filter rule if a rule does not start with
+ plus ("+") or minus ("-").
+
+ """
+ if filter_rules is None:
+ filter_rules = []
+
+ for rule in filter_rules:
+ if not (rule.startswith('+') or rule.startswith('-')):
+ raise ValueError('Invalid filter rule "%s": every rule '
+ 'rule in the --filter flag must start '
+ 'with + or -.' % rule)
+
+ self._filter_rules = filter_rules
+ self._should_check_category = {} # Cached dictionary of category to True/False
+
+ def __str__(self):
+ return ",".join(self._filter_rules)
+
+ # Useful for unit testing.
+ def __eq__(self, other):
+ """Return whether this CategoryFilter instance is equal to another."""
+ return self._filter_rules == other._filter_rules
+
+ # Useful for unit testing.
+ def __ne__(self, other):
+ # Python does not automatically deduce from __eq__().
+ return not (self == other)
+
+ def should_check(self, category):
+ """Return whether the category should be checked.
+
+ The rules for determining whether a category should be checked
+ are as follows. By default all categories should be checked.
+ Then apply the filter rules in order from first to last, with
+ later flags taking precedence.
+
+ A filter rule applies to a category if the string after the
+ leading plus/minus (+/-) matches the beginning of the category
+ name. A plus (+) means the category should be checked, while a
+ minus (-) means the category should not be checked.
+
+ """
+ if category in self._should_check_category:
+ return self._should_check_category[category]
+
+ should_check = True # All categories checked by default.
+ for rule in self._filter_rules:
+ if not category.startswith(rule[1:]):
+ continue
+ should_check = rule.startswith('+')
+ self._should_check_category[category] = should_check # Update cache.
+ return should_check
+
+
+# This class should not have knowledge of the flag key names.
+class ProcessorOptions(object):
+
+ """A container to store options to use when checking style.
+
+ Attributes:
+ output_format: A string that is the output format. The supported
+ output formats are "emacs" which emacs can parse
+ and "vs7" which Microsoft Visual Studio 7 can parse.
+
+ verbosity: An integer between 1-5 inclusive that restricts output
+ to errors with a confidence score at or above this value.
+ The default is 1, which displays all errors.
+
+ filter: A CategoryFilter instance. The default is the empty filter,
+ which means that all categories should be checked.
+
+ git_commit: A string representing the git commit to check.
+ The default is None.
+
+ extra_flag_values: A string-string dictionary of all flag key-value
+ pairs that are not otherwise represented by this
+ class. The default is the empty dictionary.
+
+ """
+
+ def __init__(self, output_format="emacs", verbosity=1, filter=None,
+ git_commit=None, extra_flag_values=None):
+ if filter is None:
+ filter = CategoryFilter()
+ if extra_flag_values is None:
+ extra_flag_values = {}
+
+ if output_format not in ("emacs", "vs7"):
+ raise ValueError('Invalid "output_format" parameter: '
+ 'value must be "emacs" or "vs7". '
+ 'Value given: "%s".' % output_format)
+
+ if (verbosity < 1) or (verbosity > 5):
+ raise ValueError('Invalid "verbosity" parameter: '
+ "value must be an integer between 1-5 inclusive. "
+ 'Value given: "%s".' % verbosity)
+
+ self.output_format = output_format
+ self.verbosity = verbosity
+ self.filter = filter
+ self.git_commit = git_commit
+ self.extra_flag_values = extra_flag_values
+
+ # Useful for unit testing.
+ def __eq__(self, other):
+ """Return whether this ProcessorOptions instance is equal to another."""
+ if self.output_format != other.output_format:
+ return False
+ if self.verbosity != other.verbosity:
+ return False
+ if self.filter != other.filter:
+ return False
+ if self.git_commit != other.git_commit:
+ return False
+ if self.extra_flag_values != other.extra_flag_values:
+ return False
+
+ return True
+
+ # Useful for unit testing.
+ def __ne__(self, other):
+ # Python does not automatically deduce from __eq__().
+ return not (self == other)
+
+ def is_reportable(self, category, confidence_in_error):
+ """Return whether an error is reportable.
+
+ An error is reportable if the confidence in the error
+ is at least the current verbosity level, and if the current
+ filter says that the category should be checked.
+
+ Args:
+ category: A string that is a style category.
+ confidence_in_error: An integer between 1 and 5, inclusive, that
+ represents the application's confidence in
+ the error. A higher number signifies greater
+ confidence.
+
+ """
+ if confidence_in_error < self.verbosity:
+ return False
+
+ if self.filter is None:
+ return True # All categories should be checked by default.
+
+ return self.filter.should_check(category)
+
+
+# This class should not have knowledge of the flag key names.
+class ArgumentDefaults(object):
+
+ """A container to store default argument values.
+
+ Attributes:
+ output_format: A string that is the default output format.
+ verbosity: An integer that is the default verbosity level.
+ filter_rules: A list of strings that are boolean filter rules
+ to prepend to any user-specified rules.
+
+ """
+
+ def __init__(self, default_output_format, default_verbosity,
+ default_filter_rules):
+ self.output_format = default_output_format
+ self.verbosity = default_verbosity
+ self.filter_rules = default_filter_rules
+
+
+class ArgumentPrinter(object):
+
+ """Supports the printing of check-webkit-style command arguments."""
+
+ def _flag_pair_to_string(self, flag_key, flag_value):
+ return '--%(key)s=%(val)s' % {'key': flag_key, 'val': flag_value }
+
+ def to_flag_string(self, options):
+ """Return a flag string yielding the given ProcessorOptions instance.
+
+ This method orders the flag values alphabetically by the flag key.
+
+ Args:
+ options: A ProcessorOptions instance.
+
+ """
+ flags = options.extra_flag_values.copy()
+
+ flags['output'] = options.output_format
+ flags['verbose'] = options.verbosity
+ if options.filter:
+ # Only include the filter flag if rules are present.
+ filter_string = str(options.filter)
+ if filter_string:
+ flags['filter'] = filter_string
+ if options.git_commit:
+ flags['git-commit'] = options.git_commit
+
+ flag_string = ''
+ # Alphabetizing lets us unit test this method.
+ for key in sorted(flags.keys()):
+ flag_string += self._flag_pair_to_string(key, flags[key]) + ' '
+
+ return flag_string.strip()
+
+
+class ArgumentParser(object):
+
+ """Supports the parsing of check-webkit-style command arguments.
+
+ Attributes:
+ defaults: An ArgumentDefaults instance.
+ create_usage: A function that accepts an ArgumentDefaults instance
+ and returns a string of usage instructions.
+ This defaults to the function used to generate the
+ usage string for check-webkit-style.
+ doc_print: A function that accepts a string parameter and that is
+ called to display help messages. This defaults to
+ sys.stderr.write().
+
+ """
+
+ def __init__(self, argument_defaults, create_usage=None, doc_print=None):
+ if create_usage is None:
+ create_usage = _create_usage
+ if doc_print is None:
+ doc_print = sys.stderr.write
+
+ self.defaults = argument_defaults
+ self.create_usage = create_usage
+ self.doc_print = doc_print
+
+ def _exit_with_usage(self, error_message=''):
+ """Exit and print a usage string with an optional error message.
+
+ Args:
+ error_message: A string that is an error message to print.
+
+ """
+ usage = self.create_usage(self.defaults)
+ self.doc_print(usage)
+ if error_message:
+ sys.exit('\nFATAL ERROR: ' + error_message)
+ else:
+ sys.exit(1)
+
+ def _exit_with_categories(self):
+ """Exit and print the style categories and default filter rules."""
+ self.doc_print('\nAll categories:\n')
+ categories = style_categories()
+ for category in sorted(categories):
+ self.doc_print(' ' + category + '\n')
+
+ self.doc_print('\nDefault filter rules**:\n')
+ for filter_rule in sorted(self.defaults.filter_rules):
+ self.doc_print(' ' + filter_rule + '\n')
+ self.doc_print('\n**The command always evaluates the above rules, '
+ 'and before any --filter flag.\n\n')
+
+ sys.exit(0)
+
+ def _parse_filter_flag(self, flag_value):
+ """Parse the value of the --filter flag.
+
+ These filters are applied when deciding whether to emit a given
+ error message.
+
+ Args:
+ flag_value: A string of comma-separated filter rules, for
+ example "-whitespace,+whitespace/indent".
+
+ """
+ filters = []
+ for uncleaned_filter in flag_value.split(','):
+ filter = uncleaned_filter.strip()
+ if not filter:
+ continue
+ filters.append(filter)
+ return filters
+
+ def parse(self, args, extra_flags=None):
+ """Parse the command line arguments to check-webkit-style.
+
+ Args:
+ args: A list of command-line arguments as returned by sys.argv[1:].
+ extra_flags: A list of flags whose values we want to extract, but
+ are not supported by the ProcessorOptions class.
+ An example flag "new_flag=". This defaults to the
+ empty list.
+
+ Returns:
+ A tuple of (filenames, options)
+
+ filenames: The list of filenames to check.
+ options: A ProcessorOptions instance.
+
+ """
+ if extra_flags is None:
+ extra_flags = []
+
+ output_format = self.defaults.output_format
+ verbosity = self.defaults.verbosity
+ filter_rules = self.defaults.filter_rules
+
+ # The flags already supported by the ProcessorOptions class.
+ flags = ['help', 'output=', 'verbose=', 'filter=', 'git-commit=']
+
+ for extra_flag in extra_flags:
+ if extra_flag in flags:
+ raise ValueError('Flag \'%(extra_flag)s is duplicated '
+ 'or already supported.' %
+ {'extra_flag': extra_flag})
+ flags.append(extra_flag)
+
+ try:
+ (opts, filenames) = getopt.getopt(args, '', flags)
+ except getopt.GetoptError:
+ # FIXME: Settle on an error handling approach: come up
+ # with a consistent guideline as to when and whether
+ # a ValueError should be raised versus calling
+ # sys.exit when needing to interrupt execution.
+ self._exit_with_usage('Invalid arguments.')
+
+ extra_flag_values = {}
+ git_commit = None
+
+ for (opt, val) in opts:
+ if opt == '--help':
+ self._exit_with_usage()
+ elif opt == '--output':
+ output_format = val
+ elif opt == '--verbose':
+ verbosity = val
+ elif opt == '--git-commit':
+ git_commit = val
+ elif opt == '--filter':
+ if not val:
+ self._exit_with_categories()
+ # Prepend the defaults.
+ filter_rules = filter_rules + self._parse_filter_flag(val)
+ else:
+ extra_flag_values[opt] = val
+
+ # Check validity of resulting values.
+ if filenames and (git_commit != None):
+ self._exit_with_usage('It is not possible to check files and a '
+ 'specific commit at the same time.')
+
+ if output_format not in ('emacs', 'vs7'):
+ raise ValueError('Invalid --output value "%s": The only '
+ 'allowed output formats are emacs and vs7.' %
+ output_format)
+
+ verbosity = int(verbosity)
+ if (verbosity < 1) or (verbosity > 5):
+ raise ValueError('Invalid --verbose value %s: value must '
+ 'be between 1-5.' % verbosity)
+
+ filter = CategoryFilter(filter_rules)
+
+ options = ProcessorOptions(output_format, verbosity, filter,
+ git_commit, extra_flag_values)
+
+ return (filenames, options)
+
+
+# Enum-like idiom
+class FileType:
+
+ NONE = 1
+ # Alphabetize remaining types
+ CPP = 2
+ TEXT = 3
+
+
+class ProcessorDispatcher(object):
+
+ """Supports determining whether and how to check style, based on path."""
+
+ cpp_file_extensions = (
+ 'c',
+ 'cpp',
+ 'h',
+ )
+
+ text_file_extensions = (
+ 'css',
+ 'html',
+ 'idl',
+ 'js',
+ 'mm',
+ 'php',
+ 'pm',
+ 'py',
+ 'txt',
+ )
+
+ def _file_extension(self, file_path):
+ """Return the file extension without the leading dot."""
+ return os.path.splitext(file_path)[1].lstrip(".")
+
+ def should_skip_with_warning(self, file_path):
+ """Return whether the given file should be skipped with a warning."""
+ for skipped_file in SKIPPED_FILES_WITH_WARNING:
+ if file_path.find(skipped_file) >= 0:
+ return True
+ return False
+
+ def should_skip_without_warning(self, file_path):
+ """Return whether the given file should be skipped without a warning."""
+ for skipped_file in SKIPPED_FILES_WITHOUT_WARNING:
+ if file_path.find(skipped_file) >= 0:
+ return True
+ return False
+
+ def _file_type(self, file_path):
+ """Return the file type corresponding to the given file."""
+ file_extension = self._file_extension(file_path)
+
+ if (file_extension in self.cpp_file_extensions) or (file_path == '-'):
+ # FIXME: Do something about the comment below and the issue it
+ # raises since cpp_style already relies on the extension.
+ #
+ # Treat stdin as C++. Since the extension is unknown when
+ # reading from stdin, cpp_style tests should not rely on
+ # the extension.
+ return FileType.CPP
+ elif ("ChangeLog" in file_path
+ or "WebKitTools/Scripts/" in file_path
+ or file_extension in self.text_file_extensions):
+ return FileType.TEXT
+ else:
+ return FileType.NONE
+
+ def _create_processor(self, file_type, file_path, handle_style_error, verbosity):
+ """Instantiate and return a style processor based on file type."""
+ if file_type == FileType.NONE:
+ processor = None
+ elif file_type == FileType.CPP:
+ file_extension = self._file_extension(file_path)
+ processor = CppProcessor(file_path, file_extension, handle_style_error, verbosity)
+ elif file_type == FileType.TEXT:
+ processor = TextProcessor(file_path, handle_style_error)
+ else:
+ raise ValueError('Invalid file type "%(file_type)s": the only valid file types '
+ "are %(NONE)s, %(CPP)s, and %(TEXT)s."
+ % {"file_type": file_type,
+ "NONE": FileType.NONE,
+ "CPP": FileType.CPP,
+ "TEXT": FileType.TEXT})
+
+ return processor
+
+ def dispatch_processor(self, file_path, handle_style_error, verbosity):
+ """Instantiate and return a style processor based on file path."""
+ file_type = self._file_type(file_path)
+
+ processor = self._create_processor(file_type,
+ file_path,
+ handle_style_error,
+ verbosity)
+ return processor
+
+
+class StyleChecker(object):
+
+ """Supports checking style in files and patches.
+
+ Attributes:
+ error_count: An integer that is the total number of reported
+ errors for the lifetime of this StyleChecker
+ instance.
+ options: A ProcessorOptions instance that controls the behavior
+ of style checking.
+
+ """
+
+ def __init__(self, options, stderr_write=None):
+ """Create a StyleChecker instance.
+
+ Args:
+ options: See options attribute.
+ stderr_write: A function that takes a string as a parameter
+ and that is called when a style error occurs.
+ Defaults to sys.stderr.write. This should be
+ used only for unit tests.
+
+ """
+ if stderr_write is None:
+ stderr_write = sys.stderr.write
+
+ self._stderr_write = stderr_write
+ self.error_count = 0
+ self.options = options
+
+ def _increment_error_count(self):
+ """Increment the total count of reported errors."""
+ self.error_count += 1
+
+ def _process_file(self, processor, file_path, handle_style_error):
+ """Process the file using the given processor."""
+ try:
+ # Support the UNIX convention of using "-" for stdin. Note that
+ # we are not opening the file with universal newline support
+ # (which codecs doesn't support anyway), so the resulting lines do
+ # contain trailing '\r' characters if we are reading a file that
+ # has CRLF endings.
+ # If after the split a trailing '\r' is present, it is removed
+ # below. If it is not expected to be present (i.e. os.linesep !=
+ # '\r\n' as in Windows), a warning is issued below if this file
+ # is processed.
+ if file_path == '-':
+ lines = codecs.StreamReaderWriter(sys.stdin,
+ codecs.getreader('utf8'),
+ codecs.getwriter('utf8'),
+ 'replace').read().split('\n')
+ else:
+ lines = codecs.open(file_path, 'r', 'utf8', 'replace').read().split('\n')
+
+ carriage_return_found = False
+ # Remove trailing '\r'.
+ for line_number in range(len(lines)):
+ if lines[line_number].endswith('\r'):
+ lines[line_number] = lines[line_number].rstrip('\r')
+ carriage_return_found = True
+
+ except IOError:
+ self._stderr_write("Skipping input '%s': Can't open for reading\n" % file_path)
+ return
+
+ processor.process(lines)
+
+ if carriage_return_found and os.linesep != '\r\n':
+ # FIXME: Make sure this error also shows up when checking
+ # patches, if appropriate.
+ #
+ # Use 0 for line_number since outputting only one error for
+ # potentially several lines.
+ handle_style_error(file_path, 0, 'whitespace/newline', 1,
+ 'One or more unexpected \\r (^M) found;'
+ 'better to use only a \\n')
+
+ def check_file(self, file_path, handle_style_error=None, process_file=None):
+ """Check style in the given file.
+
+ Args:
+ file_path: A string that is the path of the file to process.
+ handle_style_error: The function to call when a style error
+ occurs. This parameter is meant for internal
+ use within this class. Defaults to a
+ DefaultStyleErrorHandler instance.
+ process_file: The function to call to process the file. This
+ parameter should be used only for unit tests.
+ Defaults to the file processing method of this class.
+
+ """
+ if handle_style_error is None:
+ handle_style_error = DefaultStyleErrorHandler(file_path,
+ self.options,
+ self._increment_error_count,
+ self._stderr_write)
+ if process_file is None:
+ process_file = self._process_file
+
+ dispatcher = ProcessorDispatcher()
+
+ if dispatcher.should_skip_without_warning(file_path):
+ return
+ if dispatcher.should_skip_with_warning(file_path):
+ self._stderr_write('Ignoring "%s": this file is exempt from the '
+ "style guide.\n" % file_path)
+ return
+
+ verbosity = self.options.verbosity
+ processor = dispatcher.dispatch_processor(file_path,
+ handle_style_error,
+ verbosity)
+ if processor is None:
+ return
+
+ process_file(processor, file_path, handle_style_error)
+
+ def check_patch(self, patch_string):
+ """Check style in the given patch.
+
+ Args:
+ patch_string: A string that is a patch string.
+
+ """
+ patch_files = parse_patch(patch_string)
+ for file_path, diff in patch_files.iteritems():
+ style_error_handler = PatchStyleErrorHandler(diff,
+ file_path,
+ self.options,
+ self._increment_error_count,
+ self._stderr_write)
+
+ self.check_file(file_path, style_error_handler)
+
diff --git a/WebKitTools/Scripts/webkitpy/style/checker_unittest.py b/WebKitTools/Scripts/webkitpy/style/checker_unittest.py
new file mode 100755
index 0000000..4d6b2e7
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/checker_unittest.py
@@ -0,0 +1,677 @@
+#!/usr/bin/python
+# -*- coding: utf-8; -*-
+#
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for style.py."""
+
+import unittest
+
+import checker as style
+from checker import CategoryFilter
+from checker import ProcessorDispatcher
+from checker import ProcessorOptions
+from checker import StyleChecker
+from processors.cpp import CppProcessor
+from processors.text import TextProcessor
+
+class CategoryFilterTest(unittest.TestCase):
+
+ """Tests CategoryFilter class."""
+
+ def test_init(self):
+ """Test __init__ constructor."""
+ self.assertRaises(ValueError, CategoryFilter, ["no_prefix"])
+ CategoryFilter() # No ValueError: works
+ CategoryFilter(["+"]) # No ValueError: works
+ CategoryFilter(["-"]) # No ValueError: works
+
+ def test_str(self):
+ """Test __str__ "to string" operator."""
+ filter = CategoryFilter(["+a", "-b"])
+ self.assertEquals(str(filter), "+a,-b")
+
+ def test_eq(self):
+ """Test __eq__ equality function."""
+ filter1 = CategoryFilter(["+a", "+b"])
+ filter2 = CategoryFilter(["+a", "+b"])
+ filter3 = CategoryFilter(["+b", "+a"])
+
+ # == calls __eq__.
+ self.assertTrue(filter1 == filter2)
+ self.assertFalse(filter1 == filter3) # Cannot test with assertNotEqual.
+
+ def test_ne(self):
+ """Test __ne__ inequality function."""
+ # != calls __ne__.
+ # By default, __ne__ always returns true on different objects.
+ # Thus, just check the distinguishing case to verify that the
+ # code defines __ne__.
+ self.assertFalse(CategoryFilter() != CategoryFilter())
+
+ def test_should_check(self):
+ """Test should_check() method."""
+ filter = CategoryFilter()
+ self.assertTrue(filter.should_check("everything"))
+ # Check a second time to exercise cache.
+ self.assertTrue(filter.should_check("everything"))
+
+ filter = CategoryFilter(["-"])
+ self.assertFalse(filter.should_check("anything"))
+ # Check a second time to exercise cache.
+ self.assertFalse(filter.should_check("anything"))
+
+ filter = CategoryFilter(["-", "+ab"])
+ self.assertTrue(filter.should_check("abc"))
+ self.assertFalse(filter.should_check("a"))
+
+ filter = CategoryFilter(["+", "-ab"])
+ self.assertFalse(filter.should_check("abc"))
+ self.assertTrue(filter.should_check("a"))
+
+
+class ProcessorOptionsTest(unittest.TestCase):
+
+ """Tests ProcessorOptions class."""
+
+ def test_init(self):
+ """Test __init__ constructor."""
+ # Check default parameters.
+ options = ProcessorOptions()
+ self.assertEquals(options.extra_flag_values, {})
+ self.assertEquals(options.filter, CategoryFilter())
+ self.assertEquals(options.git_commit, None)
+ self.assertEquals(options.output_format, "emacs")
+ self.assertEquals(options.verbosity, 1)
+
+ # Check argument validation.
+ self.assertRaises(ValueError, ProcessorOptions, output_format="bad")
+ ProcessorOptions(output_format="emacs") # No ValueError: works
+ ProcessorOptions(output_format="vs7") # works
+ self.assertRaises(ValueError, ProcessorOptions, verbosity=0)
+ self.assertRaises(ValueError, ProcessorOptions, verbosity=6)
+ ProcessorOptions(verbosity=1) # works
+ ProcessorOptions(verbosity=5) # works
+
+ # Check attributes.
+ options = ProcessorOptions(extra_flag_values={"extra_value" : 2},
+ filter=CategoryFilter(["+"]),
+ git_commit="commit",
+ output_format="vs7",
+ verbosity=3)
+ self.assertEquals(options.extra_flag_values, {"extra_value" : 2})
+ self.assertEquals(options.filter, CategoryFilter(["+"]))
+ self.assertEquals(options.git_commit, "commit")
+ self.assertEquals(options.output_format, "vs7")
+ self.assertEquals(options.verbosity, 3)
+
+ def test_eq(self):
+ """Test __eq__ equality function."""
+ # == calls __eq__.
+ self.assertTrue(ProcessorOptions() == ProcessorOptions())
+
+ # Verify that a difference in any argument cause equality to fail.
+ options = ProcessorOptions(extra_flag_values={"extra_value" : 1},
+ filter=CategoryFilter(["+"]),
+ git_commit="commit",
+ output_format="vs7",
+ verbosity=1)
+ self.assertFalse(options == ProcessorOptions(extra_flag_values={"extra_value" : 2}))
+ self.assertFalse(options == ProcessorOptions(filter=CategoryFilter(["-"])))
+ self.assertFalse(options == ProcessorOptions(git_commit="commit2"))
+ self.assertFalse(options == ProcessorOptions(output_format="emacs"))
+ self.assertFalse(options == ProcessorOptions(verbosity=2))
+
+ def test_ne(self):
+ """Test __ne__ inequality function."""
+ # != calls __ne__.
+ # By default, __ne__ always returns true on different objects.
+ # Thus, just check the distinguishing case to verify that the
+ # code defines __ne__.
+ self.assertFalse(ProcessorOptions() != ProcessorOptions())
+
+ def test_is_reportable(self):
+ """Test is_reportable()."""
+ filter = CategoryFilter(["-xyz"])
+ options = ProcessorOptions(filter=filter, verbosity=3)
+
+ # Test verbosity
+ self.assertTrue(options.is_reportable("abc", 3))
+ self.assertFalse(options.is_reportable("abc", 2))
+
+ # Test filter
+ self.assertTrue(options.is_reportable("xy", 3))
+ self.assertFalse(options.is_reportable("xyz", 3))
+
+
+class WebKitArgumentDefaultsTest(unittest.TestCase):
+
+ """Tests validity of default arguments used by check-webkit-style."""
+
+ def defaults(self):
+ return style.webkit_argument_defaults()
+
+ def test_filter_rules(self):
+ defaults = self.defaults()
+ already_seen = []
+ all_categories = style.style_categories()
+ for rule in defaults.filter_rules:
+ # Check no leading or trailing white space.
+ self.assertEquals(rule, rule.strip())
+ # All categories are on by default, so defaults should
+ # begin with -.
+ self.assertTrue(rule.startswith('-'))
+ self.assertTrue(rule[1:] in all_categories)
+ # Check no rule occurs twice.
+ self.assertFalse(rule in already_seen)
+ already_seen.append(rule)
+
+ def test_defaults(self):
+ """Check that default arguments are valid."""
+ defaults = self.defaults()
+
+ # FIXME: We should not need to call parse() to determine
+ # whether the default arguments are valid.
+ parser = style.ArgumentParser(defaults)
+ # No need to test the return value here since we test parse()
+ # on valid arguments elsewhere.
+ parser.parse([]) # arguments valid: no error or SystemExit
+
+
+class ArgumentPrinterTest(unittest.TestCase):
+
+ """Tests the ArgumentPrinter class."""
+
+ _printer = style.ArgumentPrinter()
+
+ def _create_options(self, output_format='emacs', verbosity=3,
+ filter_rules=[], git_commit=None,
+ extra_flag_values={}):
+ filter = CategoryFilter(filter_rules)
+ return style.ProcessorOptions(output_format, verbosity, filter,
+ git_commit, extra_flag_values)
+
+ def test_to_flag_string(self):
+ options = self._create_options('vs7', 5, ['+foo', '-bar'], 'git',
+ {'a': 0, 'z': 1})
+ self.assertEquals('--a=0 --filter=+foo,-bar --git-commit=git '
+ '--output=vs7 --verbose=5 --z=1',
+ self._printer.to_flag_string(options))
+
+ # This is to check that --filter and --git-commit do not
+ # show up when not user-specified.
+ options = self._create_options()
+ self.assertEquals('--output=emacs --verbose=3',
+ self._printer.to_flag_string(options))
+
+
+class ArgumentParserTest(unittest.TestCase):
+
+ """Test the ArgumentParser class."""
+
+ def _parse(self):
+ """Return a default parse() function for testing."""
+ return self._create_parser().parse
+
+ def _create_defaults(self, default_output_format='vs7',
+ default_verbosity=3,
+ default_filter_rules=['-', '+whitespace']):
+ """Return a default ArgumentDefaults instance for testing."""
+ return style.ArgumentDefaults(default_output_format,
+ default_verbosity,
+ default_filter_rules)
+
+ def _create_parser(self, defaults=None):
+ """Return an ArgumentParser instance for testing."""
+ def create_usage(_defaults):
+ """Return a usage string for testing."""
+ return "usage"
+
+ def doc_print(message):
+ # We do not want the usage string or style categories
+ # to print during unit tests, so print nothing.
+ return
+
+ if defaults is None:
+ defaults = self._create_defaults()
+
+ return style.ArgumentParser(defaults, create_usage, doc_print)
+
+ def test_parse_documentation(self):
+ parse = self._parse()
+
+ # FIXME: Test both the printing of the usage string and the
+ # filter categories help.
+
+ # Request the usage string.
+ self.assertRaises(SystemExit, parse, ['--help'])
+ # Request default filter rules and available style categories.
+ self.assertRaises(SystemExit, parse, ['--filter='])
+
+ def test_parse_bad_values(self):
+ parse = self._parse()
+
+ # Pass an unsupported argument.
+ self.assertRaises(SystemExit, parse, ['--bad'])
+
+ self.assertRaises(ValueError, parse, ['--verbose=bad'])
+ self.assertRaises(ValueError, parse, ['--verbose=0'])
+ self.assertRaises(ValueError, parse, ['--verbose=6'])
+ parse(['--verbose=1']) # works
+ parse(['--verbose=5']) # works
+
+ self.assertRaises(ValueError, parse, ['--output=bad'])
+ parse(['--output=vs7']) # works
+
+ # Pass a filter rule not beginning with + or -.
+ self.assertRaises(ValueError, parse, ['--filter=foo'])
+ parse(['--filter=+foo']) # works
+ # Pass files and git-commit at the same time.
+ self.assertRaises(SystemExit, parse, ['--git-commit=', 'file.txt'])
+ # Pass an extra flag already supported.
+ self.assertRaises(ValueError, parse, [], ['filter='])
+ parse([], ['extra=']) # works
+ # Pass an extra flag with typo.
+ self.assertRaises(SystemExit, parse, ['--extratypo='], ['extra='])
+ parse(['--extra='], ['extra=']) # works
+ self.assertRaises(ValueError, parse, [], ['extra=', 'extra='])
+
+
+ def test_parse_default_arguments(self):
+ parse = self._parse()
+
+ (files, options) = parse([])
+
+ self.assertEquals(files, [])
+
+ self.assertEquals(options.output_format, 'vs7')
+ self.assertEquals(options.verbosity, 3)
+ self.assertEquals(options.filter,
+ CategoryFilter(["-", "+whitespace"]))
+ self.assertEquals(options.git_commit, None)
+
+ def test_parse_explicit_arguments(self):
+ parse = self._parse()
+
+ # Pass non-default explicit values.
+ (files, options) = parse(['--output=emacs'])
+ self.assertEquals(options.output_format, 'emacs')
+ (files, options) = parse(['--verbose=4'])
+ self.assertEquals(options.verbosity, 4)
+ (files, options) = parse(['--git-commit=commit'])
+ self.assertEquals(options.git_commit, 'commit')
+ (files, options) = parse(['--filter=+foo,-bar'])
+ self.assertEquals(options.filter,
+ CategoryFilter(["-", "+whitespace", "+foo", "-bar"]))
+ # Spurious white space in filter rules.
+ (files, options) = parse(['--filter=+foo ,-bar'])
+ self.assertEquals(options.filter,
+ CategoryFilter(["-", "+whitespace", "+foo", "-bar"]))
+
+ # Pass extra flag values.
+ (files, options) = parse(['--extra'], ['extra'])
+ self.assertEquals(options.extra_flag_values, {'--extra': ''})
+ (files, options) = parse(['--extra='], ['extra='])
+ self.assertEquals(options.extra_flag_values, {'--extra': ''})
+ (files, options) = parse(['--extra=x'], ['extra='])
+ self.assertEquals(options.extra_flag_values, {'--extra': 'x'})
+
+ def test_parse_files(self):
+ parse = self._parse()
+
+ (files, options) = parse(['foo.cpp'])
+ self.assertEquals(files, ['foo.cpp'])
+
+ # Pass multiple files.
+ (files, options) = parse(['--output=emacs', 'foo.cpp', 'bar.cpp'])
+ self.assertEquals(files, ['foo.cpp', 'bar.cpp'])
+
+
+class ProcessorDispatcherSkipTest(unittest.TestCase):
+
+ """Tests the "should skip" methods of the ProcessorDispatcher class."""
+
+ def test_should_skip_with_warning(self):
+ """Test should_skip_with_warning()."""
+ dispatcher = ProcessorDispatcher()
+
+ # Check a non-skipped file.
+ self.assertFalse(dispatcher.should_skip_with_warning("foo.txt"))
+
+ # Check skipped files.
+ paths_to_skip = [
+ "gtk2drawing.c",
+ "gtk2drawing.h",
+ "JavaScriptCore/qt/api/qscriptengine_p.h",
+ "WebCore/platform/gtk/gtk2drawing.c",
+ "WebCore/platform/gtk/gtk2drawing.h",
+ "WebKit/gtk/tests/testatk.c",
+ "WebKit/qt/Api/qwebpage.h",
+ "WebKit/qt/tests/qwebsecurityorigin/tst_qwebsecurityorigin.cpp",
+ ]
+
+ for path in paths_to_skip:
+ self.assertTrue(dispatcher.should_skip_with_warning(path),
+ "Checking: " + path)
+
+ def test_should_skip_without_warning(self):
+ """Test should_skip_without_warning()."""
+ dispatcher = ProcessorDispatcher()
+
+ # Check a non-skipped file.
+ self.assertFalse(dispatcher.should_skip_without_warning("foo.txt"))
+
+ # Check skipped files.
+ paths_to_skip = [
+ # LayoutTests folder
+ "LayoutTests/foo.txt",
+ ]
+
+ for path in paths_to_skip:
+ self.assertTrue(dispatcher.should_skip_without_warning(path),
+ "Checking: " + path)
+
+
+class ProcessorDispatcherDispatchTest(unittest.TestCase):
+
+ """Tests dispatch_processor() method of ProcessorDispatcher class."""
+
+ def mock_handle_style_error(self):
+ pass
+
+ def dispatch_processor(self, file_path):
+ """Call dispatch_processor() with the given file path."""
+ dispatcher = ProcessorDispatcher()
+ processor = dispatcher.dispatch_processor(file_path,
+ self.mock_handle_style_error,
+ verbosity=3)
+ return processor
+
+ def assert_processor_none(self, file_path):
+ """Assert that the dispatched processor is None."""
+ processor = self.dispatch_processor(file_path)
+ self.assertTrue(processor is None, 'Checking: "%s"' % file_path)
+
+ def assert_processor(self, file_path, expected_class):
+ """Assert the type of the dispatched processor."""
+ processor = self.dispatch_processor(file_path)
+ got_class = processor.__class__
+ self.assertEquals(got_class, expected_class,
+ 'For path "%(file_path)s" got %(got_class)s when '
+ "expecting %(expected_class)s."
+ % {"file_path": file_path,
+ "got_class": got_class,
+ "expected_class": expected_class})
+
+ def assert_processor_cpp(self, file_path):
+ """Assert that the dispatched processor is a CppProcessor."""
+ self.assert_processor(file_path, CppProcessor)
+
+ def assert_processor_text(self, file_path):
+ """Assert that the dispatched processor is a TextProcessor."""
+ self.assert_processor(file_path, TextProcessor)
+
+ def test_cpp_paths(self):
+ """Test paths that should be checked as C++."""
+ paths = [
+ "-",
+ "foo.c",
+ "foo.cpp",
+ "foo.h",
+ ]
+
+ for path in paths:
+ self.assert_processor_cpp(path)
+
+ # Check processor attributes on a typical input.
+ file_base = "foo"
+ file_extension = "c"
+ file_path = file_base + "." + file_extension
+ self.assert_processor_cpp(file_path)
+ processor = self.dispatch_processor(file_path)
+ self.assertEquals(processor.file_extension, file_extension)
+ self.assertEquals(processor.file_path, file_path)
+ self.assertEquals(processor.handle_style_error, self.mock_handle_style_error)
+ self.assertEquals(processor.verbosity, 3)
+ # Check "-" for good measure.
+ file_base = "-"
+ file_extension = ""
+ file_path = file_base
+ self.assert_processor_cpp(file_path)
+ processor = self.dispatch_processor(file_path)
+ self.assertEquals(processor.file_extension, file_extension)
+ self.assertEquals(processor.file_path, file_path)
+
+ def test_text_paths(self):
+ """Test paths that should be checked as text."""
+ paths = [
+ "ChangeLog",
+ "foo.css",
+ "foo.html",
+ "foo.idl",
+ "foo.js",
+ "foo.mm",
+ "foo.php",
+ "foo.pm",
+ "foo.py",
+ "foo.txt",
+ "FooChangeLog.bak",
+ "WebCore/ChangeLog",
+ "WebCore/inspector/front-end/inspector.js",
+ "WebKitTools/Scripts/check-webkit=style",
+ "WebKitTools/Scripts/modules/text_style.py",
+ ]
+
+ for path in paths:
+ self.assert_processor_text(path)
+
+ # Check processor attributes on a typical input.
+ file_base = "foo"
+ file_extension = "css"
+ file_path = file_base + "." + file_extension
+ self.assert_processor_text(file_path)
+ processor = self.dispatch_processor(file_path)
+ self.assertEquals(processor.file_path, file_path)
+ self.assertEquals(processor.handle_style_error, self.mock_handle_style_error)
+
+ def test_none_paths(self):
+ """Test paths that have no file type.."""
+ paths = [
+ "Makefile",
+ "foo.png",
+ "foo.exe",
+ ]
+
+ for path in paths:
+ self.assert_processor_none(path)
+
+
+class StyleCheckerTest(unittest.TestCase):
+
+ """Test the StyleChecker class.
+
+ Attributes:
+ error_messages: A string containing all of the warning messages
+ written to the mock_stderr_write method of
+ this class.
+
+ """
+
+ def _mock_stderr_write(self, message):
+ pass
+
+ def _style_checker(self, options):
+ return StyleChecker(options, self._mock_stderr_write)
+
+ def test_init(self):
+ """Test __init__ constructor."""
+ options = ProcessorOptions()
+ style_checker = self._style_checker(options)
+
+ self.assertEquals(style_checker.error_count, 0)
+ self.assertEquals(style_checker.options, options)
+
+
+class StyleCheckerCheckFileTest(unittest.TestCase):
+
+ """Test the check_file() method of the StyleChecker class.
+
+ The check_file() method calls its process_file parameter when
+ given a file that should not be skipped.
+
+ The "got_*" attributes of this class are the parameters passed
+ to process_file by calls to check_file() made by this test
+ class. These attributes allow us to check the parameter values
+ passed internally to the process_file function.
+
+ Attributes:
+ got_file_path: The file_path parameter passed by check_file()
+ to its process_file parameter.
+ got_handle_style_error: The handle_style_error parameter passed
+ by check_file() to its process_file
+ parameter.
+ got_processor: The processor parameter passed by check_file() to
+ its process_file parameter.
+ warning_messages: A string containing all of the warning messages
+ written to the mock_stderr_write method of
+ this class.
+
+ """
+ def setUp(self):
+ self.got_file_path = None
+ self.got_handle_style_error = None
+ self.got_processor = None
+ self.warning_messages = ""
+
+ def mock_stderr_write(self, warning_message):
+ self.warning_messages += warning_message
+
+ def mock_handle_style_error(self):
+ pass
+
+ def mock_process_file(self, processor, file_path, handle_style_error):
+ """A mock _process_file().
+
+ See the documentation for this class for more information
+ on this function.
+
+ """
+ self.got_file_path = file_path
+ self.got_handle_style_error = handle_style_error
+ self.got_processor = processor
+
+ def assert_attributes(self,
+ expected_file_path,
+ expected_handle_style_error,
+ expected_processor,
+ expected_warning_messages):
+ """Assert that the attributes of this class equal the given values."""
+ self.assertEquals(self.got_file_path, expected_file_path)
+ self.assertEquals(self.got_handle_style_error, expected_handle_style_error)
+ self.assertEquals(self.got_processor, expected_processor)
+ self.assertEquals(self.warning_messages, expected_warning_messages)
+
+ def call_check_file(self, file_path):
+ """Call the check_file() method of a test StyleChecker instance."""
+ # Confirm that the attributes are reset.
+ self.assert_attributes(None, None, None, "")
+
+ # Create a test StyleChecker instance.
+ #
+ # The verbosity attribute is the only ProcessorOptions
+ # attribute that needs to be checked in this test.
+ # This is because it is the only option is directly
+ # passed to the constructor of a style processor.
+ options = ProcessorOptions(verbosity=3)
+
+ style_checker = StyleChecker(options, self.mock_stderr_write)
+
+ style_checker.check_file(file_path,
+ self.mock_handle_style_error,
+ self.mock_process_file)
+
+ def test_check_file_on_skip_without_warning(self):
+ """Test check_file() for a skipped-without-warning file."""
+
+ file_path = "LayoutTests/foo.txt"
+
+ dispatcher = ProcessorDispatcher()
+ # Confirm that the input file is truly a skipped-without-warning file.
+ self.assertTrue(dispatcher.should_skip_without_warning(file_path))
+
+ # Check the outcome.
+ self.call_check_file(file_path)
+ self.assert_attributes(None, None, None, "")
+
+ def test_check_file_on_skip_with_warning(self):
+ """Test check_file() for a skipped-with-warning file."""
+
+ file_path = "gtk2drawing.c"
+
+ dispatcher = ProcessorDispatcher()
+ # Check that the input file is truly a skipped-with-warning file.
+ self.assertTrue(dispatcher.should_skip_with_warning(file_path))
+
+ # Check the outcome.
+ self.call_check_file(file_path)
+ self.assert_attributes(None, None, None,
+ 'Ignoring "gtk2drawing.c": this file is exempt from the style guide.\n')
+
+ def test_check_file_on_non_skipped(self):
+
+ # We use a C++ file since by using a CppProcessor, we can check
+ # that all of the possible information is getting passed to
+ # process_file (in particular, the verbosity).
+ file_base = "foo"
+ file_extension = "cpp"
+ file_path = file_base + "." + file_extension
+
+ dispatcher = ProcessorDispatcher()
+ # Check that the input file is truly a C++ file.
+ self.assertEquals(dispatcher._file_type(file_path), style.FileType.CPP)
+
+ # Check the outcome.
+ self.call_check_file(file_path)
+
+ expected_processor = CppProcessor(file_path, file_extension, self.mock_handle_style_error, 3)
+
+ self.assert_attributes(file_path,
+ self.mock_handle_style_error,
+ expected_processor,
+ "")
+
+
+if __name__ == '__main__':
+ import sys
+
+ unittest.main()
+
diff --git a/WebKitTools/Scripts/webkitpy/style/error_handlers.py b/WebKitTools/Scripts/webkitpy/style/error_handlers.py
new file mode 100644
index 0000000..54b1d76
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/error_handlers.py
@@ -0,0 +1,154 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Defines style error handler classes.
+
+A style error handler is a function to call when a style error is
+found. Style error handlers can also have state. A class that represents
+a style error handler should implement the following methods.
+
+Methods:
+
+ __call__(self, line_number, category, confidence, message):
+
+ Handle the occurrence of a style error.
+
+ Check whether the error is reportable. If so, report the details.
+
+ Args:
+ line_number: The integer line number of the line containing the error.
+ category: The name of the category of the error, for example
+ "whitespace/newline".
+ confidence: An integer between 1-5 that represents the level of
+ confidence in the error. The value 5 means that we are
+ certain of the problem, and the value 1 means that it
+ could be a legitimate construct.
+ message: The error message to report.
+
+"""
+
+
+import sys
+
+
+class DefaultStyleErrorHandler(object):
+
+ """The default style error handler."""
+
+ def __init__(self, file_path, options, increment_error_count,
+ stderr_write=None):
+ """Create a default style error handler.
+
+ Args:
+ file_path: The path to the file containing the error. This
+ is used for reporting to the user.
+ options: A ProcessorOptions instance.
+ increment_error_count: A function that takes no arguments and
+ increments the total count of reportable
+ errors.
+ stderr_write: A function that takes a string as a parameter
+ and that is called when a style error occurs.
+ Defaults to sys.stderr.write. This should be
+ used only for unit tests.
+
+ """
+ if stderr_write is None:
+ stderr_write = sys.stderr.write
+
+ self._file_path = file_path
+ self._increment_error_count = increment_error_count
+ self._options = options
+ self._stderr_write = stderr_write
+
+ def __call__(self, line_number, category, confidence, message):
+ """Handle the occurrence of a style error.
+
+ See the docstring of this module for more information.
+
+ """
+ if not self._options.is_reportable(category, confidence):
+ return
+
+ self._increment_error_count()
+
+ if self._options.output_format == 'vs7':
+ format_string = "%s(%s): %s [%s] [%d]\n"
+ else:
+ format_string = "%s:%s: %s [%s] [%d]\n"
+
+ self._stderr_write(format_string % (self._file_path,
+ line_number,
+ message,
+ category,
+ confidence))
+
+
+class PatchStyleErrorHandler(object):
+
+ """The style error function for patch files."""
+
+ def __init__(self, diff, file_path, options, increment_error_count,
+ stderr_write):
+ """Create a patch style error handler for the given path.
+
+ Args:
+ diff: A DiffFile instance.
+ Other arguments: see the DefaultStyleErrorHandler.__init__()
+ documentation for the other arguments.
+
+ """
+ self._diff = diff
+ self._default_error_handler = DefaultStyleErrorHandler(file_path,
+ options,
+ increment_error_count,
+ stderr_write)
+
+ # The line numbers of the modified lines. This is set lazily.
+ self._line_numbers = set()
+
+ def _get_line_numbers(self):
+ """Return the line numbers of the modified lines."""
+ if not self._line_numbers:
+ for line in self._diff.lines:
+ # When deleted line is not set, it means that
+ # the line is newly added.
+ if not line[0]:
+ self._line_numbers.add(line[1])
+
+ return self._line_numbers
+
+ def __call__(self, line_number, category, confidence, message):
+ """Handle the occurrence of a style error.
+
+ This function does not report errors occurring in lines not
+ modified or added.
+
+ Args: see the DefaultStyleErrorHandler.__call__() documentation.
+
+ """
+ if line_number not in self._get_line_numbers():
+ # Then the error is not reportable.
+ return
+
+ self._default_error_handler(line_number, category, confidence,
+ message)
+
diff --git a/WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py b/WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py
new file mode 100644
index 0000000..6a91ff2
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py
@@ -0,0 +1,163 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit tests for error_handlers.py."""
+
+
+import unittest
+
+from .. style_references import parse_patch
+from checker import ProcessorOptions
+from error_handlers import DefaultStyleErrorHandler
+from error_handlers import PatchStyleErrorHandler
+
+
+class StyleErrorHandlerTestBase(unittest.TestCase):
+
+ def setUp(self):
+ self._error_messages = ""
+ self._error_count = 0
+
+ def _mock_increment_error_count(self):
+ self._error_count += 1
+
+ def _mock_stderr_write(self, message):
+ self._error_messages += message
+
+
+class DefaultStyleErrorHandlerTest(StyleErrorHandlerTestBase):
+
+ """Tests DefaultStyleErrorHandler class."""
+
+ _category = "whitespace/tab"
+
+ def _options(self, output_format):
+ return ProcessorOptions(verbosity=3, output_format=output_format)
+
+ def _error_handler(self, options):
+ file_path = "foo.h"
+ return DefaultStyleErrorHandler(file_path,
+ options,
+ self._mock_increment_error_count,
+ self._mock_stderr_write)
+
+ def _prepare_call(self, output_format="emacs"):
+ """Return options after initializing."""
+ options = self._options(output_format)
+
+ # Test that count is initialized to zero.
+ self.assertEquals(0, self._error_count)
+ self.assertEquals("", self._error_messages)
+
+ return options
+
+ def _call_error_handler(self, options, confidence):
+ """Handle an error with given confidence."""
+ handle_error = self._error_handler(options)
+
+ line_number = 100
+ message = "message"
+
+ handle_error(line_number, self._category, confidence, message)
+
+ def test_call_non_reportable(self):
+ """Test __call__() method with a non-reportable error."""
+ confidence = 1
+ options = self._prepare_call()
+
+ # Confirm the error is not reportable.
+ self.assertFalse(options.is_reportable(self._category, confidence))
+
+ self._call_error_handler(options, confidence)
+
+ self.assertEquals(0, self._error_count)
+ self.assertEquals("", self._error_messages)
+
+ def test_call_reportable_emacs(self):
+ """Test __call__() method with a reportable error and emacs format."""
+ confidence = 5
+ options = self._prepare_call("emacs")
+
+ self._call_error_handler(options, confidence)
+
+ self.assertEquals(1, self._error_count)
+ self.assertEquals(self._error_messages,
+ "foo.h:100: message [whitespace/tab] [5]\n")
+
+ def test_call_reportable_vs7(self):
+ """Test __call__() method with a reportable error and vs7 format."""
+ confidence = 5
+ options = self._prepare_call("vs7")
+
+ self._call_error_handler(options, confidence)
+
+ self.assertEquals(1, self._error_count)
+ self.assertEquals(self._error_messages,
+ "foo.h(100): message [whitespace/tab] [5]\n")
+
+
+class PatchStyleErrorHandlerTest(StyleErrorHandlerTestBase):
+
+ """Tests PatchStyleErrorHandler class."""
+
+ file_path = "__init__.py"
+
+ patch_string = """diff --git a/__init__.py b/__init__.py
+index ef65bee..e3db70e 100644
+--- a/__init__.py
++++ b/__init__.py
+@@ -1 +1,2 @@
+ # Required for Python to search this directory for module files
++# New line
+
+"""
+
+ def test_call(self):
+ patch_files = parse_patch(self.patch_string)
+ diff = patch_files[self.file_path]
+
+ options = ProcessorOptions(verbosity=3)
+
+ handle_error = PatchStyleErrorHandler(diff,
+ self.file_path,
+ options,
+ self._mock_increment_error_count,
+ self._mock_stderr_write)
+
+ category = "whitespace/tab"
+ confidence = 5
+ message = "message"
+
+ # Confirm error is reportable.
+ self.assertTrue(options.is_reportable(category, confidence))
+
+ # Confirm error count initialized to zero.
+ self.assertEquals(0, self._error_count)
+
+ # Test error in unmodified line (error count does not increment).
+ handle_error(1, category, confidence, message)
+ self.assertEquals(0, self._error_count)
+
+ # Test error in modified line (error count increments).
+ handle_error(2, category, confidence, message)
+ self.assertEquals(1, self._error_count)
+
diff --git a/WebKitTools/Scripts/webkitpy/style/processors/__init__.py b/WebKitTools/Scripts/webkitpy/style/processors/__init__.py
new file mode 100644
index 0000000..ef65bee
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/processors/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/WebKitTools/Scripts/webkitpy/style/processors/cpp.py b/WebKitTools/Scripts/webkitpy/style/processors/cpp.py
new file mode 100644
index 0000000..e1f41a4
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/processors/cpp.py
@@ -0,0 +1,3007 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is the modified version of Google's cpplint. The original code is
+# http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
+
+"""Support for check-webkit-style."""
+
+import codecs
+import math # for log
+import os
+import os.path
+import re
+import sre_compile
+import string
+import sys
+import unicodedata
+
+
+# Headers that we consider STL headers.
+_STL_HEADERS = frozenset([
+ 'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
+ 'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
+ 'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h',
+ 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
+ 'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
+ 'utility', 'vector', 'vector.h',
+ ])
+
+
+# Non-STL C++ system headers.
+_CPP_HEADERS = frozenset([
+ 'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
+ 'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
+ 'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
+ 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
+ 'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
+ 'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
+ 'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h',
+ 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
+ 'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h',
+ 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h',
+ 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
+ 'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
+ 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
+ ])
+
+
+# Assertion macros. These are defined in base/logging.h and
+# testing/base/gunit.h. Note that the _M versions need to come first
+# for substring matching to work.
+_CHECK_MACROS = [
+ 'DCHECK', 'CHECK',
+ 'EXPECT_TRUE_M', 'EXPECT_TRUE',
+ 'ASSERT_TRUE_M', 'ASSERT_TRUE',
+ 'EXPECT_FALSE_M', 'EXPECT_FALSE',
+ 'ASSERT_FALSE_M', 'ASSERT_FALSE',
+ ]
+
+# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
+_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
+
+for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
+ ('>=', 'GE'), ('>', 'GT'),
+ ('<=', 'LE'), ('<', 'LT')]:
+ _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
+ _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
+ _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
+ _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
+ _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
+ _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
+
+for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
+ ('>=', 'LT'), ('>', 'LE'),
+ ('<=', 'GT'), ('<', 'GE')]:
+ _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
+ _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
+ _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
+
+
+# These constants define types of headers for use with
+# _IncludeState.check_next_include_order().
+_CONFIG_HEADER = 0
+_PRIMARY_HEADER = 1
+_OTHER_HEADER = 2
+_MOC_HEADER = 3
+
+
+# The regexp compilation caching is inlined in all regexp functions for
+# performance reasons; factoring it out into a separate function turns out
+# to be noticeably expensive.
+_regexp_compile_cache = {}
+
+
+def match(pattern, s):
+ """Matches the string with the pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].match(s)
+
+
+def search(pattern, s):
+ """Searches the string for the pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].search(s)
+
+
+def sub(pattern, replacement, s):
+ """Substitutes occurrences of a pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].sub(replacement, s)
+
+
+def subn(pattern, replacement, s):
+ """Substitutes occurrences of a pattern, caching the compiled regexp."""
+ if not pattern in _regexp_compile_cache:
+ _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
+ return _regexp_compile_cache[pattern].subn(replacement, s)
+
+
+def up_to_unmatched_closing_paren(s):
+ """Splits a string into two parts up to first unmatched ')'.
+
+ Args:
+ s: a string which is a substring of line after '('
+ (e.g., "a == (b + c))").
+
+ Returns:
+ A pair of strings (prefix before first unmatched ')',
+ reminder of s after first unmatched ')'), e.g.,
+ up_to_unmatched_closing_paren("a == (b + c)) { ")
+ returns "a == (b + c)", " {".
+ Returns None, None if there is no unmatched ')'
+
+ """
+ i = 1
+ for pos, c in enumerate(s):
+ if c == '(':
+ i += 1
+ elif c == ')':
+ i -= 1
+ if i == 0:
+ return s[:pos], s[pos + 1:]
+ return None, None
+
+class _IncludeState(dict):
+ """Tracks line numbers for includes, and the order in which includes appear.
+
+ As a dict, an _IncludeState object serves as a mapping between include
+ filename and line number on which that file was included.
+
+ Call check_next_include_order() once for each header in the file, passing
+ in the type constants defined above. Calls in an illegal order will
+ raise an _IncludeError with an appropriate error message.
+
+ """
+ # self._section will move monotonically through this set. If it ever
+ # needs to move backwards, check_next_include_order will raise an error.
+ _INITIAL_SECTION = 0
+ _CONFIG_SECTION = 1
+ _PRIMARY_SECTION = 2
+ _OTHER_SECTION = 3
+
+ _TYPE_NAMES = {
+ _CONFIG_HEADER: 'WebCore config.h',
+ _PRIMARY_HEADER: 'header this file implements',
+ _OTHER_HEADER: 'other header',
+ _MOC_HEADER: 'moc file',
+ }
+ _SECTION_NAMES = {
+ _INITIAL_SECTION: "... nothing.",
+ _CONFIG_SECTION: "WebCore config.h.",
+ _PRIMARY_SECTION: 'a header this file implements.',
+ _OTHER_SECTION: 'other header.',
+ }
+
+ def __init__(self):
+ dict.__init__(self)
+ self._section = self._INITIAL_SECTION
+ self._visited_primary_section = False
+ self.header_types = dict();
+
+ def visited_primary_section(self):
+ return self._visited_primary_section
+
+ def check_next_include_order(self, header_type, file_is_header):
+ """Returns a non-empty error message if the next header is out of order.
+
+ This function also updates the internal state to be ready to check
+ the next include.
+
+ Args:
+ header_type: One of the _XXX_HEADER constants defined above.
+ file_is_header: Whether the file that owns this _IncludeState is itself a header
+
+ Returns:
+ The empty string if the header is in the right order, or an
+ error message describing what's wrong.
+
+ """
+ if header_type == _CONFIG_HEADER and file_is_header:
+ return 'Header file should not contain WebCore config.h.'
+ if header_type == _PRIMARY_HEADER and file_is_header:
+ return 'Header file should not contain itself.'
+ if header_type == _MOC_HEADER:
+ return ''
+
+ error_message = ''
+ if self._section != self._OTHER_SECTION:
+ before_error_message = ('Found %s before %s' %
+ (self._TYPE_NAMES[header_type],
+ self._SECTION_NAMES[self._section + 1]))
+ after_error_message = ('Found %s after %s' %
+ (self._TYPE_NAMES[header_type],
+ self._SECTION_NAMES[self._section]))
+
+ if header_type == _CONFIG_HEADER:
+ if self._section >= self._CONFIG_SECTION:
+ error_message = after_error_message
+ self._section = self._CONFIG_SECTION
+ elif header_type == _PRIMARY_HEADER:
+ if self._section >= self._PRIMARY_SECTION:
+ error_message = after_error_message
+ elif self._section < self._CONFIG_SECTION:
+ error_message = before_error_message
+ self._section = self._PRIMARY_SECTION
+ self._visited_primary_section = True
+ else:
+ assert header_type == _OTHER_HEADER
+ if not file_is_header and self._section < self._PRIMARY_SECTION:
+ error_message = before_error_message
+ self._section = self._OTHER_SECTION
+
+ return error_message
+
+
+class _FunctionState(object):
+ """Tracks current function name and the number of lines in its body.
+
+ Attributes:
+ verbosity: The verbosity level to use while checking style.
+
+ """
+
+ _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
+ _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
+
+ def __init__(self, verbosity):
+ self.verbosity = verbosity
+ self.in_a_function = False
+ self.lines_in_function = 0
+ self.current_function = ''
+
+ def begin(self, function_name):
+ """Start analyzing function body.
+
+ Args:
+ function_name: The name of the function being tracked.
+ """
+ self.in_a_function = True
+ self.lines_in_function = 0
+ self.current_function = function_name
+
+ def count(self):
+ """Count line in current function body."""
+ if self.in_a_function:
+ self.lines_in_function += 1
+
+ def check(self, error, line_number):
+ """Report if too many lines in function body.
+
+ Args:
+ error: The function to call with any errors found.
+ line_number: The number of the line to check.
+ """
+ if match(r'T(EST|est)', self.current_function):
+ base_trigger = self._TEST_TRIGGER
+ else:
+ base_trigger = self._NORMAL_TRIGGER
+ trigger = base_trigger * 2 ** self.verbosity
+
+ if self.lines_in_function > trigger:
+ error_level = int(math.log(self.lines_in_function / base_trigger, 2))
+ # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
+ if error_level > 5:
+ error_level = 5
+ error(line_number, 'readability/fn_size', error_level,
+ 'Small and focused functions are preferred:'
+ ' %s has %d non-comment lines'
+ ' (error triggered by exceeding %d lines).' % (
+ self.current_function, self.lines_in_function, trigger))
+
+ def end(self):
+ """Stop analizing function body."""
+ self.in_a_function = False
+
+
+class _IncludeError(Exception):
+ """Indicates a problem with the include order in a file."""
+ pass
+
+
+def is_c_or_objective_c(file_extension):
+ """Return whether the file extension corresponds to C or Objective-C.
+
+ Args:
+ file_extension: The file extension without the leading dot.
+
+ """
+ return file_extension in ['c', 'm']
+
+
+class FileInfo:
+ """Provides utility functions for filenames.
+
+ FileInfo provides easy access to the components of a file's path
+ relative to the project root.
+ """
+
+ def __init__(self, filename):
+ self._filename = filename
+
+ def full_name(self):
+ """Make Windows paths like Unix."""
+ return os.path.abspath(self._filename).replace('\\', '/')
+
+ def repository_name(self):
+ """Full name after removing the local path to the repository.
+
+ If we have a real absolute path name here we can try to do something smart:
+ detecting the root of the checkout and truncating /path/to/checkout from
+ the name so that we get header guards that don't include things like
+ "C:\Documents and Settings\..." or "/home/username/..." in them and thus
+ people on different computers who have checked the source out to different
+ locations won't see bogus errors.
+ """
+ fullname = self.full_name()
+
+ if os.path.exists(fullname):
+ project_dir = os.path.dirname(fullname)
+
+ if os.path.exists(os.path.join(project_dir, ".svn")):
+ # If there's a .svn file in the current directory, we
+ # recursively look up the directory tree for the top
+ # of the SVN checkout
+ root_dir = project_dir
+ one_up_dir = os.path.dirname(root_dir)
+ while os.path.exists(os.path.join(one_up_dir, ".svn")):
+ root_dir = os.path.dirname(root_dir)
+ one_up_dir = os.path.dirname(one_up_dir)
+
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
+
+ # Not SVN? Try to find a git top level directory by
+ # searching up from the current path.
+ root_dir = os.path.dirname(fullname)
+ while (root_dir != os.path.dirname(root_dir)
+ and not os.path.exists(os.path.join(root_dir, ".git"))):
+ root_dir = os.path.dirname(root_dir)
+ if os.path.exists(os.path.join(root_dir, ".git")):
+ prefix = os.path.commonprefix([root_dir, project_dir])
+ return fullname[len(prefix) + 1:]
+
+ # Don't know what to do; header guard warnings may be wrong...
+ return fullname
+
+ def split(self):
+ """Splits the file into the directory, basename, and extension.
+
+ For 'chrome/browser/browser.cpp', Split() would
+ return ('chrome/browser', 'browser', '.cpp')
+
+ Returns:
+ A tuple of (directory, basename, extension).
+ """
+
+ googlename = self.repository_name()
+ project, rest = os.path.split(googlename)
+ return (project,) + os.path.splitext(rest)
+
+ def base_name(self):
+ """File base name - text after the final slash, before the final period."""
+ return self.split()[1]
+
+ def extension(self):
+ """File extension - text following the final period."""
+ return self.split()[2]
+
+ def no_extension(self):
+ """File has no source file extension."""
+ return '/'.join(self.split()[0:2])
+
+ def is_source(self):
+ """File has a source file extension."""
+ return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
+
+
+# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
+_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
+ r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
+# Matches strings. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
+# Matches characters. Escape codes should already be removed by ESCAPES.
+_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
+# Matches multi-line C++ comments.
+# This RE is a little bit more complicated than one might expect, because we
+# have to take care of space removals tools so we can handle comments inside
+# statements better.
+# The current rule is: We only clear spaces from both sides when we're at the
+# end of the line. Otherwise, we try to remove spaces from the right side,
+# if this doesn't work we try on left side but only if there's a non-character
+# on the right.
+_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
+ r"""(\s*/\*.*\*/\s*$|
+ /\*.*\*/\s+|
+ \s+/\*.*\*/(?=\W)|
+ /\*.*\*/)""", re.VERBOSE)
+
+
+def is_cpp_string(line):
+ """Does line terminate so, that the next symbol is in string constant.
+
+ This function does not consider single-line nor multi-line comments.
+
+ Args:
+ line: is a partial line of code starting from the 0..n.
+
+ Returns:
+ True, if next character appended to 'line' is inside a
+ string constant.
+ """
+
+ line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
+ return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
+
+
+def find_next_multi_line_comment_start(lines, line_index):
+ """Find the beginning marker for a multiline comment."""
+ while line_index < len(lines):
+ if lines[line_index].strip().startswith('/*'):
+ # Only return this marker if the comment goes beyond this line
+ if lines[line_index].strip().find('*/', 2) < 0:
+ return line_index
+ line_index += 1
+ return len(lines)
+
+
+def find_next_multi_line_comment_end(lines, line_index):
+ """We are inside a comment, find the end marker."""
+ while line_index < len(lines):
+ if lines[line_index].strip().endswith('*/'):
+ return line_index
+ line_index += 1
+ return len(lines)
+
+
+def remove_multi_line_comments_from_range(lines, begin, end):
+ """Clears a range of lines for multi-line comments."""
+ # Having // dummy comments makes the lines non-empty, so we will not get
+ # unnecessary blank line warnings later in the code.
+ for i in range(begin, end):
+ lines[i] = '// dummy'
+
+
+def remove_multi_line_comments(lines, error):
+ """Removes multiline (c-style) comments from lines."""
+ line_index = 0
+ while line_index < len(lines):
+ line_index_begin = find_next_multi_line_comment_start(lines, line_index)
+ if line_index_begin >= len(lines):
+ return
+ line_index_end = find_next_multi_line_comment_end(lines, line_index_begin)
+ if line_index_end >= len(lines):
+ error(line_index_begin + 1, 'readability/multiline_comment', 5,
+ 'Could not find end of multi-line comment')
+ return
+ remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1)
+ line_index = line_index_end + 1
+
+
+def cleanse_comments(line):
+ """Removes //-comments and single-line C-style /* */ comments.
+
+ Args:
+ line: A line of C++ source.
+
+ Returns:
+ The line with single-line comments removed.
+ """
+ comment_position = line.find('//')
+ if comment_position != -1 and not is_cpp_string(line[:comment_position]):
+ line = line[:comment_position]
+ # get rid of /* ... */
+ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
+
+
+class CleansedLines(object):
+ """Holds 3 copies of all lines with different preprocessing applied to them.
+
+ 1) elided member contains lines without strings and comments,
+ 2) lines member contains lines without comments, and
+ 3) raw member contains all the lines without processing.
+ All these three members are of <type 'list'>, and of the same length.
+ """
+
+ def __init__(self, lines):
+ self.elided = []
+ self.lines = []
+ self.raw_lines = lines
+ self._num_lines = len(lines)
+ for line_number in range(len(lines)):
+ self.lines.append(cleanse_comments(lines[line_number]))
+ elided = self.collapse_strings(lines[line_number])
+ self.elided.append(cleanse_comments(elided))
+
+ def num_lines(self):
+ """Returns the number of lines represented."""
+ return self._num_lines
+
+ @staticmethod
+ def collapse_strings(elided):
+ """Collapses strings and chars on a line to simple "" or '' blocks.
+
+ We nix strings first so we're not fooled by text like '"http://"'
+
+ Args:
+ elided: The line being processed.
+
+ Returns:
+ The line with collapsed strings.
+ """
+ if not _RE_PATTERN_INCLUDE.match(elided):
+ # Remove escaped characters first to make quote/single quote collapsing
+ # basic. Things that look like escaped characters shouldn't occur
+ # outside of strings and chars.
+ elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
+ elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
+ return elided
+
+
+def close_expression(clean_lines, line_number, pos):
+ """If input points to ( or { or [, finds the position that closes it.
+
+ If lines[line_number][pos] points to a '(' or '{' or '[', finds the the
+ line_number/pos that correspond to the closing of the expression.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ pos: A position on the line.
+
+ Returns:
+ A tuple (line, line_number, pos) pointer *past* the closing brace, or
+ (line, len(lines), -1) if we never find a close. Note we ignore
+ strings and comments when matching; and the line we return is the
+ 'cleansed' line at line_number.
+ """
+
+ line = clean_lines.elided[line_number]
+ start_character = line[pos]
+ if start_character not in '({[':
+ return (line, clean_lines.num_lines(), -1)
+ if start_character == '(':
+ end_character = ')'
+ if start_character == '[':
+ end_character = ']'
+ if start_character == '{':
+ end_character = '}'
+
+ num_open = line.count(start_character) - line.count(end_character)
+ while line_number < clean_lines.num_lines() and num_open > 0:
+ line_number += 1
+ line = clean_lines.elided[line_number]
+ num_open += line.count(start_character) - line.count(end_character)
+ # OK, now find the end_character that actually got us back to even
+ endpos = len(line)
+ while num_open >= 0:
+ endpos = line.rfind(')', 0, endpos)
+ num_open -= 1 # chopped off another )
+ return (line, line_number, endpos + 1)
+
+
+def check_for_copyright(lines, error):
+ """Logs an error if no Copyright message appears at the top of the file."""
+
+ # We'll say it should occur by line 10. Don't forget there's a
+ # dummy line at the front.
+ for line in xrange(1, min(len(lines), 11)):
+ if re.search(r'Copyright', lines[line], re.I):
+ break
+ else: # means no copyright line was found
+ error(0, 'legal/copyright', 5,
+ 'No copyright message found. '
+ 'You should have a line: "Copyright [year] <Copyright Owner>"')
+
+
+def get_header_guard_cpp_variable(filename):
+ """Returns the CPP variable that should be used as a header guard.
+
+ Args:
+ filename: The name of a C++ header file.
+
+ Returns:
+ The CPP variable that should be used as a header guard in the
+ named file.
+
+ """
+
+ return sub(r'[-.\s]', '_', os.path.basename(filename))
+
+
+def check_for_header_guard(filename, lines, error):
+ """Checks that the file contains a header guard.
+
+ Logs an error if no #ifndef header guard is present. For other
+ headers, checks that the full pathname is used.
+
+ Args:
+ filename: The name of the C++ header file.
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ cppvar = get_header_guard_cpp_variable(filename)
+
+ ifndef = None
+ ifndef_line_number = 0
+ define = None
+ for line_number, line in enumerate(lines):
+ line_split = line.split()
+ if len(line_split) >= 2:
+ # find the first occurrence of #ifndef and #define, save arg
+ if not ifndef and line_split[0] == '#ifndef':
+ # set ifndef to the header guard presented on the #ifndef line.
+ ifndef = line_split[1]
+ ifndef_line_number = line_number
+ if not define and line_split[0] == '#define':
+ define = line_split[1]
+ if define and ifndef:
+ break
+
+ if not ifndef or not define or ifndef != define:
+ error(0, 'build/header_guard', 5,
+ 'No #ifndef header guard found, suggested CPP variable is: %s' %
+ cppvar)
+ return
+
+ # The guard should be File_h.
+ if ifndef != cppvar:
+ error(ifndef_line_number, 'build/header_guard', 5,
+ '#ifndef header guard has wrong style, please use: %s' % cppvar)
+
+
+def check_for_unicode_replacement_characters(lines, error):
+ """Logs an error for each line containing Unicode replacement characters.
+
+ These indicate that either the file contained invalid UTF-8 (likely)
+ or Unicode replacement characters (which it shouldn't). Note that
+ it's possible for this to throw off line numbering if the invalid
+ UTF-8 occurred adjacent to a newline.
+
+ Args:
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+ for line_number, line in enumerate(lines):
+ if u'\ufffd' in line:
+ error(line_number, 'readability/utf8', 5,
+ 'Line contains invalid UTF-8 (or Unicode replacement character).')
+
+
+def check_for_new_line_at_eof(lines, error):
+ """Logs an error if there is no newline char at the end of the file.
+
+ Args:
+ lines: An array of strings, each representing a line of the file.
+ error: The function to call with any errors found.
+ """
+
+ # The array lines() was created by adding two newlines to the
+ # original file (go figure), then splitting on \n.
+ # To verify that the file ends in \n, we just have to make sure the
+ # last-but-two element of lines() exists and is empty.
+ if len(lines) < 3 or lines[-2]:
+ error(len(lines) - 2, 'whitespace/ending_newline', 5,
+ 'Could not find a newline character at the end of the file.')
+
+
+def check_for_multiline_comments_and_strings(clean_lines, line_number, error):
+ """Logs an error if we see /* ... */ or "..." that extend past one line.
+
+ /* ... */ comments are legit inside macros, for one line.
+ Otherwise, we prefer // comments, so it's ok to warn about the
+ other. Likewise, it's ok for strings to extend across multiple
+ lines, as long as a line continuation character (backslash)
+ terminates each line. Although not currently prohibited by the C++
+ style guide, it's ugly and unnecessary. We don't do well with either
+ in this lint program, so we warn about both.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[line_number]
+
+ # Remove all \\ (escaped backslashes) from the line. They are OK, and the
+ # second (escaped) slash may trigger later \" detection erroneously.
+ line = line.replace('\\\\', '')
+
+ if line.count('/*') > line.count('*/'):
+ error(line_number, 'readability/multiline_comment', 5,
+ 'Complex multi-line /*...*/-style comment found. '
+ 'Lint may give bogus warnings. '
+ 'Consider replacing these with //-style comments, '
+ 'with #if 0...#endif, '
+ 'or with more clearly structured multi-line comments.')
+
+ if (line.count('"') - line.count('\\"')) % 2:
+ error(line_number, 'readability/multiline_string', 5,
+ 'Multi-line string ("...") found. This lint script doesn\'t '
+ 'do well with such strings, and may give bogus warnings. They\'re '
+ 'ugly and unnecessary, and you should use concatenation instead".')
+
+
+_THREADING_LIST = (
+ ('asctime(', 'asctime_r('),
+ ('ctime(', 'ctime_r('),
+ ('getgrgid(', 'getgrgid_r('),
+ ('getgrnam(', 'getgrnam_r('),
+ ('getlogin(', 'getlogin_r('),
+ ('getpwnam(', 'getpwnam_r('),
+ ('getpwuid(', 'getpwuid_r('),
+ ('gmtime(', 'gmtime_r('),
+ ('localtime(', 'localtime_r('),
+ ('rand(', 'rand_r('),
+ ('readdir(', 'readdir_r('),
+ ('strtok(', 'strtok_r('),
+ ('ttyname(', 'ttyname_r('),
+ )
+
+
+def check_posix_threading(clean_lines, line_number, error):
+ """Checks for calls to thread-unsafe functions.
+
+ Much code has been originally written without consideration of
+ multi-threading. Also, engineers are relying on their old experience;
+ they have learned posix before threading extensions were added. These
+ tests guide the engineers to use thread-safe functions (when using
+ posix directly).
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[line_number]
+ for single_thread_function, multithread_safe_function in _THREADING_LIST:
+ index = line.find(single_thread_function)
+ # Comparisons made explicit for clarity -- pylint: disable-msg=C6403
+ if index >= 0 and (index == 0 or (not line[index - 1].isalnum()
+ and line[index - 1] not in ('_', '.', '>'))):
+ error(line_number, 'runtime/threadsafe_fn', 2,
+ 'Consider using ' + multithread_safe_function +
+ '...) instead of ' + single_thread_function +
+ '...) for improved thread safety.')
+
+
+# Matches invalid increment: *count++, which moves pointer instead of
+# incrementing a value.
+_RE_PATTERN_INVALID_INCREMENT = re.compile(
+ r'^\s*\*\w+(\+\+|--);')
+
+
+def check_invalid_increment(clean_lines, line_number, error):
+ """Checks for invalid increment *count++.
+
+ For example following function:
+ void increment_counter(int* count) {
+ *count++;
+ }
+ is invalid, because it effectively does count++, moving pointer, and should
+ be replaced with ++*count, (*count)++ or *count += 1.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+ line = clean_lines.elided[line_number]
+ if _RE_PATTERN_INVALID_INCREMENT.match(line):
+ error(line_number, 'runtime/invalid_increment', 5,
+ 'Changing pointer instead of value (or unused value of operator*).')
+
+
+class _ClassInfo(object):
+ """Stores information about a class."""
+
+ def __init__(self, name, line_number):
+ self.name = name
+ self.line_number = line_number
+ self.seen_open_brace = False
+ self.is_derived = False
+ self.virtual_method_line_number = None
+ self.has_virtual_destructor = False
+ self.brace_depth = 0
+
+
+class _ClassState(object):
+ """Holds the current state of the parse relating to class declarations.
+
+ It maintains a stack of _ClassInfos representing the parser's guess
+ as to the current nesting of class declarations. The innermost class
+ is at the top (back) of the stack. Typically, the stack will either
+ be empty or have exactly one entry.
+ """
+
+ def __init__(self):
+ self.classinfo_stack = []
+
+ def check_finished(self, error):
+ """Checks that all classes have been completely parsed.
+
+ Call this when all lines in a file have been processed.
+ Args:
+ error: The function to call with any errors found.
+ """
+ if self.classinfo_stack:
+ # Note: This test can result in false positives if #ifdef constructs
+ # get in the way of brace matching. See the testBuildClass test in
+ # cpp_style_unittest.py for an example of this.
+ error(self.classinfo_stack[0].line_number, 'build/class', 5,
+ 'Failed to find complete declaration of class %s' %
+ self.classinfo_stack[0].name)
+
+
+class _FileState(object):
+ def __init__(self):
+ self._did_inside_namespace_indent_warning = False
+
+ def set_did_inside_namespace_indent_warning(self):
+ self._did_inside_namespace_indent_warning = True
+
+ def did_inside_namespace_indent_warning(self):
+ return self._did_inside_namespace_indent_warning
+
+def check_for_non_standard_constructs(clean_lines, line_number,
+ class_state, error):
+ """Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
+
+ Complain about several constructs which gcc-2 accepts, but which are
+ not standard C++. Warning about these in lint is one way to ease the
+ transition to new compilers.
+ - put storage class first (e.g. "static const" instead of "const static").
+ - "%lld" instead of %qd" in printf-type functions.
+ - "%1$d" is non-standard in printf-type functions.
+ - "\%" is an undefined character escape sequence.
+ - text after #endif is not allowed.
+ - invalid inner-style forward declaration.
+ - >? and <? operators, and their >?= and <?= cousins.
+ - classes with virtual methods need virtual destructors (compiler warning
+ available, but not turned on yet.)
+
+ Additionally, check for constructor/destructor style violations as it
+ is very convenient to do so while checking for gcc-2 compliance.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ class_state: A _ClassState instance which maintains information about
+ the current stack of nested class declarations being parsed.
+ error: A callable to which errors are reported, which takes parameters:
+ line number, error level, and message
+ """
+
+ # Remove comments from the line, but leave in strings for now.
+ line = clean_lines.lines[line_number]
+
+ if search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
+ error(line_number, 'runtime/printf_format', 3,
+ '%q in format strings is deprecated. Use %ll instead.')
+
+ if search(r'printf\s*\(.*".*%\d+\$', line):
+ error(line_number, 'runtime/printf_format', 2,
+ '%N$ formats are unconventional. Try rewriting to avoid them.')
+
+ # Remove escaped backslashes before looking for undefined escapes.
+ line = line.replace('\\\\', '')
+
+ if search(r'("|\').*\\(%|\[|\(|{)', line):
+ error(line_number, 'build/printf_format', 3,
+ '%, [, (, and { are undefined character escapes. Unescape them.')
+
+ # For the rest, work with both comments and strings removed.
+ line = clean_lines.elided[line_number]
+
+ if search(r'\b(const|volatile|void|char|short|int|long'
+ r'|float|double|signed|unsigned'
+ r'|schar|u?int8|u?int16|u?int32|u?int64)'
+ r'\s+(auto|register|static|extern|typedef)\b',
+ line):
+ error(line_number, 'build/storage_class', 5,
+ 'Storage class (static, extern, typedef, etc) should be first.')
+
+ if match(r'\s*#\s*endif\s*[^/\s]+', line):
+ error(line_number, 'build/endif_comment', 5,
+ 'Uncommented text after #endif is non-standard. Use a comment.')
+
+ if match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
+ error(line_number, 'build/forward_decl', 5,
+ 'Inner-style forward declarations are invalid. Remove this line.')
+
+ if search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line):
+ error(line_number, 'build/deprecated', 3,
+ '>? and <? (max and min) operators are non-standard and deprecated.')
+
+ # Track class entry and exit, and attempt to find cases within the
+ # class declaration that don't meet the C++ style
+ # guidelines. Tracking is very dependent on the code matching Google
+ # style guidelines, but it seems to perform well enough in testing
+ # to be a worthwhile addition to the checks.
+ classinfo_stack = class_state.classinfo_stack
+ # Look for a class declaration
+ class_decl_match = match(
+ r'\s*(template\s*<[\w\s<>,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line)
+ if class_decl_match:
+ classinfo_stack.append(_ClassInfo(class_decl_match.group(3), line_number))
+
+ # Everything else in this function uses the top of the stack if it's
+ # not empty.
+ if not classinfo_stack:
+ return
+
+ classinfo = classinfo_stack[-1]
+
+ # If the opening brace hasn't been seen look for it and also
+ # parent class declarations.
+ if not classinfo.seen_open_brace:
+ # If the line has a ';' in it, assume it's a forward declaration or
+ # a single-line class declaration, which we won't process.
+ if line.find(';') != -1:
+ classinfo_stack.pop()
+ return
+ classinfo.seen_open_brace = (line.find('{') != -1)
+ # Look for a bare ':'
+ if search('(^|[^:]):($|[^:])', line):
+ classinfo.is_derived = True
+ if not classinfo.seen_open_brace:
+ return # Everything else in this function is for after open brace
+
+ # The class may have been declared with namespace or classname qualifiers.
+ # The constructor and destructor will not have those qualifiers.
+ base_classname = classinfo.name.split('::')[-1]
+
+ # Look for single-argument constructors that aren't marked explicit.
+ # Technically a valid construct, but against style.
+ args = match(r'(?<!explicit)\s+%s\s*\(([^,()]+)\)'
+ % re.escape(base_classname),
+ line)
+ if (args
+ and args.group(1) != 'void'
+ and not match(r'(const\s+)?%s\s*&' % re.escape(base_classname),
+ args.group(1).strip())):
+ error(line_number, 'runtime/explicit', 5,
+ 'Single-argument constructors should be marked explicit.')
+
+ # Look for methods declared virtual.
+ if search(r'\bvirtual\b', line):
+ classinfo.virtual_method_line_number = line_number
+ # Only look for a destructor declaration on the same line. It would
+ # be extremely unlikely for the destructor declaration to occupy
+ # more than one line.
+ if search(r'~%s\s*\(' % base_classname, line):
+ classinfo.has_virtual_destructor = True
+
+ # Look for class end.
+ brace_depth = classinfo.brace_depth
+ brace_depth = brace_depth + line.count('{') - line.count('}')
+ if brace_depth <= 0:
+ classinfo = classinfo_stack.pop()
+ # Try to detect missing virtual destructor declarations.
+ # For now, only warn if a non-derived class with virtual methods lacks
+ # a virtual destructor. This is to make it less likely that people will
+ # declare derived virtual destructors without declaring the base
+ # destructor virtual.
+ if ((classinfo.virtual_method_line_number is not None)
+ and (not classinfo.has_virtual_destructor)
+ and (not classinfo.is_derived)): # Only warn for base classes
+ error(classinfo.line_number, 'runtime/virtual', 4,
+ 'The class %s probably needs a virtual destructor due to '
+ 'having virtual method(s), one declared at line %d.'
+ % (classinfo.name, classinfo.virtual_method_line_number))
+ else:
+ classinfo.brace_depth = brace_depth
+
+
+def check_spacing_for_function_call(line, line_number, error):
+ """Checks for the correctness of various spacing around function calls.
+
+ Args:
+ line: The text of the line to check.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Since function calls often occur inside if/for/foreach/while/switch
+ # expressions - which have their own, more liberal conventions - we
+ # first see if we should be looking inside such an expression for a
+ # function call, to which we can apply more strict standards.
+ function_call = line # if there's no control flow construct, look at whole line
+ for pattern in (r'\bif\s*\((.*)\)\s*{',
+ r'\bfor\s*\((.*)\)\s*{',
+ r'\bforeach\s*\((.*)\)\s*{',
+ r'\bwhile\s*\((.*)\)\s*[{;]',
+ r'\bswitch\s*\((.*)\)\s*{'):
+ matched = search(pattern, line)
+ if matched:
+ function_call = matched.group(1) # look inside the parens for function calls
+ break
+
+ # Except in if/for/foreach/while/switch, there should never be space
+ # immediately inside parens (eg "f( 3, 4 )"). We make an exception
+ # for nested parens ( (a+b) + c ). Likewise, there should never be
+ # a space before a ( when it's a function argument. I assume it's a
+ # function argument when the char before the whitespace is legal in
+ # a function name (alnum + _) and we're not starting a macro. Also ignore
+ # pointers and references to arrays and functions coz they're too tricky:
+ # we use a very simple way to recognize these:
+ # " (something)(maybe-something)" or
+ # " (something)(maybe-something," or
+ # " (something)[something]"
+ # Note that we assume the contents of [] to be short enough that
+ # they'll never need to wrap.
+ if ( # Ignore control structures.
+ not search(r'\b(if|for|foreach|while|switch|return|new|delete)\b', function_call)
+ # Ignore pointers/references to functions.
+ and not search(r' \([^)]+\)\([^)]*(\)|,$)', function_call)
+ # Ignore pointers/references to arrays.
+ and not search(r' \([^)]+\)\[[^\]]+\]', function_call)):
+ if search(r'\w\s*\([ \t](?!\s*\\$)', function_call): # a ( used for a fn call
+ error(line_number, 'whitespace/parens', 4,
+ 'Extra space after ( in function call')
+ elif search(r'\([ \t]+(?!(\s*\\)|\()', function_call):
+ error(line_number, 'whitespace/parens', 2,
+ 'Extra space after (')
+ if (search(r'\w\s+\(', function_call)
+ and not search(r'#\s*define|typedef', function_call)):
+ error(line_number, 'whitespace/parens', 4,
+ 'Extra space before ( in function call')
+ # If the ) is followed only by a newline or a { + newline, assume it's
+ # part of a control statement (if/while/etc), and don't complain
+ if search(r'[^)\s]\s+\)(?!\s*$|{\s*$)', function_call):
+ error(line_number, 'whitespace/parens', 2,
+ 'Extra space before )')
+
+
+def is_blank_line(line):
+ """Returns true if the given line is blank.
+
+ We consider a line to be blank if the line is empty or consists of
+ only white spaces.
+
+ Args:
+ line: A line of a string.
+
+ Returns:
+ True, if the given line is blank.
+ """
+ return not line or line.isspace()
+
+
+def check_for_function_lengths(clean_lines, line_number, function_state, error):
+ """Reports for long function bodies.
+
+ For an overview why this is done, see:
+ http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
+
+ Uses a simplistic algorithm assuming other style guidelines
+ (especially spacing) are followed.
+ Only checks unindented functions, so class members are unchecked.
+ Trivial bodies are unchecked, so constructors with huge initializer lists
+ may be missed.
+ Blank/comment lines are not counted so as to avoid encouraging the removal
+ of vertical space and commments just to get through a lint check.
+ NOLINT *on the last line of a function* disables this check.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ function_state: Current function name and lines in body so far.
+ error: The function to call with any errors found.
+ """
+ lines = clean_lines.lines
+ line = lines[line_number]
+ raw = clean_lines.raw_lines
+ raw_line = raw[line_number]
+ joined_line = ''
+
+ starting_func = False
+ regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
+ match_result = match(regexp, line)
+ if match_result:
+ # If the name is all caps and underscores, figure it's a macro and
+ # ignore it, unless it's TEST or TEST_F.
+ function_name = match_result.group(1).split()[-1]
+ if function_name == 'TEST' or function_name == 'TEST_F' or (not match(r'[A-Z_]+$', function_name)):
+ starting_func = True
+
+ if starting_func:
+ body_found = False
+ for start_line_number in xrange(line_number, clean_lines.num_lines()):
+ start_line = lines[start_line_number]
+ joined_line += ' ' + start_line.lstrip()
+ if search(r'(;|})', start_line): # Declarations and trivial functions
+ body_found = True
+ break # ... ignore
+ if search(r'{', start_line):
+ body_found = True
+ function = search(r'((\w|:)*)\(', line).group(1)
+ if match(r'TEST', function): # Handle TEST... macros
+ parameter_regexp = search(r'(\(.*\))', joined_line)
+ if parameter_regexp: # Ignore bad syntax
+ function += parameter_regexp.group(1)
+ else:
+ function += '()'
+ function_state.begin(function)
+ break
+ if not body_found:
+ # No body for the function (or evidence of a non-function) was found.
+ error(line_number, 'readability/fn_size', 5,
+ 'Lint failed to find start of function body.')
+ elif match(r'^\}\s*$', line): # function end
+ if not search(r'\bNOLINT\b', raw_line):
+ function_state.check(error, line_number)
+ function_state.end()
+ elif not match(r'^\s*$', line):
+ function_state.count() # Count non-blank/non-comment lines.
+
+
+def check_spacing(file_extension, clean_lines, line_number, error):
+ """Checks for the correctness of various spacing issues in the code.
+
+ Things we check for: spaces around operators, spaces after
+ if/for/while/switch, no spaces around parens in function calls, two
+ spaces between code and comment, don't start a block with a blank
+ line, don't end a function with a blank line, don't have too many
+ blank lines in a row.
+
+ Args:
+ file_extension: The current file extension, without the leading dot.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ raw = clean_lines.raw_lines
+ line = raw[line_number]
+
+ # Before nixing comments, check if the line is blank for no good
+ # reason. This includes the first line after a block is opened, and
+ # blank lines at the end of a function (ie, right before a line like '}').
+ if is_blank_line(line):
+ elided = clean_lines.elided
+ previous_line = elided[line_number - 1]
+ previous_brace = previous_line.rfind('{')
+ # FIXME: Don't complain if line before blank line, and line after,
+ # both start with alnums and are indented the same amount.
+ # This ignores whitespace at the start of a namespace block
+ # because those are not usually indented.
+ if (previous_brace != -1 and previous_line[previous_brace:].find('}') == -1
+ and previous_line[:previous_brace].find('namespace') == -1):
+ # OK, we have a blank line at the start of a code block. Before we
+ # complain, we check if it is an exception to the rule: The previous
+ # non-empty line has the parameters of a function header that are indented
+ # 4 spaces (because they did not fit in a 80 column line when placed on
+ # the same line as the function name). We also check for the case where
+ # the previous line is indented 6 spaces, which may happen when the
+ # initializers of a constructor do not fit into a 80 column line.
+ exception = False
+ if match(r' {6}\w', previous_line): # Initializer list?
+ # We are looking for the opening column of initializer list, which
+ # should be indented 4 spaces to cause 6 space indentation afterwards.
+ search_position = line_number - 2
+ while (search_position >= 0
+ and match(r' {6}\w', elided[search_position])):
+ search_position -= 1
+ exception = (search_position >= 0
+ and elided[search_position][:5] == ' :')
+ else:
+ # Search for the function arguments or an initializer list. We use a
+ # simple heuristic here: If the line is indented 4 spaces; and we have a
+ # closing paren, without the opening paren, followed by an opening brace
+ # or colon (for initializer lists) we assume that it is the last line of
+ # a function header. If we have a colon indented 4 spaces, it is an
+ # initializer list.
+ exception = (match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
+ previous_line)
+ or match(r' {4}:', previous_line))
+
+ if not exception:
+ error(line_number, 'whitespace/blank_line', 2,
+ 'Blank line at the start of a code block. Is this needed?')
+ # This doesn't ignore whitespace at the end of a namespace block
+ # because that is too hard without pairing open/close braces;
+ # however, a special exception is made for namespace closing
+ # brackets which have a comment containing "namespace".
+ #
+ # Also, ignore blank lines at the end of a block in a long if-else
+ # chain, like this:
+ # if (condition1) {
+ # // Something followed by a blank line
+ #
+ # } else if (condition2) {
+ # // Something else
+ # }
+ if line_number + 1 < clean_lines.num_lines():
+ next_line = raw[line_number + 1]
+ if (next_line
+ and match(r'\s*}', next_line)
+ and next_line.find('namespace') == -1
+ and next_line.find('} else ') == -1):
+ error(line_number, 'whitespace/blank_line', 3,
+ 'Blank line at the end of a code block. Is this needed?')
+
+ # Next, we complain if there's a comment too near the text
+ comment_position = line.find('//')
+ if comment_position != -1:
+ # Check if the // may be in quotes. If so, ignore it
+ # Comparisons made explicit for clarity -- pylint: disable-msg=C6403
+ if (line.count('"', 0, comment_position) - line.count('\\"', 0, comment_position)) % 2 == 0: # not in quotes
+ # Allow one space before end of line comment.
+ if (not match(r'^\s*$', line[:comment_position])
+ and (comment_position >= 1
+ and ((line[comment_position - 1] not in string.whitespace)
+ or (comment_position >= 2
+ and line[comment_position - 2] in string.whitespace)))):
+ error(line_number, 'whitespace/comments', 5,
+ 'One space before end of line comments')
+ # There should always be a space between the // and the comment
+ commentend = comment_position + 2
+ if commentend < len(line) and not line[commentend] == ' ':
+ # but some lines are exceptions -- e.g. if they're big
+ # comment delimiters like:
+ # //----------------------------------------------------------
+ # or they begin with multiple slashes followed by a space:
+ # //////// Header comment
+ matched = (search(r'[=/-]{4,}\s*$', line[commentend:])
+ or search(r'^/+ ', line[commentend:]))
+ if not matched:
+ error(line_number, 'whitespace/comments', 4,
+ 'Should have a space between // and comment')
+
+ line = clean_lines.elided[line_number] # get rid of comments and strings
+
+ # Don't try to do spacing checks for operator methods
+ line = sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
+ # Don't try to do spacing checks for #include or #import statements at
+ # minimum because it messes up checks for spacing around /
+ if match(r'\s*#\s*(?:include|import)', line):
+ return
+ if search(r'[\w.]=[\w.]', line):
+ error(line_number, 'whitespace/operators', 4,
+ 'Missing spaces around =')
+
+ # FIXME: It's not ok to have spaces around binary operators like .
+
+ # You should always have whitespace around binary operators.
+ # Alas, we can't test < or > because they're legitimately used sans spaces
+ # (a->b, vector<int> a). The only time we can tell is a < with no >, and
+ # only if it's not template params list spilling into the next line.
+ matched = search(r'[^<>=!\s](==|!=|\+=|-=|\*=|/=|/|\|=|&=|<<=|>>=|<=|>=|\|\||\||&&|>>|<<)[^<>=!\s]', line)
+ if not matched:
+ # Note that while it seems that the '<[^<]*' term in the following
+ # regexp could be simplified to '<.*', which would indeed match
+ # the same class of strings, the [^<] means that searching for the
+ # regexp takes linear rather than quadratic time.
+ if not search(r'<[^<]*,\s*$', line): # template params spill
+ matched = search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
+ if matched:
+ error(line_number, 'whitespace/operators', 3,
+ 'Missing spaces around %s' % matched.group(1))
+
+ # There shouldn't be space around unary operators
+ matched = search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
+ if matched:
+ error(line_number, 'whitespace/operators', 4,
+ 'Extra space for operator %s' % matched.group(1))
+
+ # A pet peeve of mine: no spaces after an if, while, switch, or for
+ matched = search(r' (if\(|for\(|foreach\(|while\(|switch\()', line)
+ if matched:
+ error(line_number, 'whitespace/parens', 5,
+ 'Missing space before ( in %s' % matched.group(1))
+
+ # For if/for/foreach/while/switch, the left and right parens should be
+ # consistent about how many spaces are inside the parens, and
+ # there should either be zero or one spaces inside the parens.
+ # We don't want: "if ( foo)" or "if ( foo )".
+ # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
+ matched = search(r'\b(?P<statement>if|for|foreach|while|switch)\s*\((?P<reminder>.*)$', line)
+ if matched:
+ statement = matched.group('statement')
+ condition, rest = up_to_unmatched_closing_paren(matched.group('reminder'))
+ if condition is not None:
+ condition_match = search(r'(?P<leading>[ ]*)(?P<separator>.).*[^ ]+(?P<trailing>[ ]*)', condition)
+ if condition_match:
+ n_leading = len(condition_match.group('leading'))
+ n_trailing = len(condition_match.group('trailing'))
+ if n_leading != n_trailing:
+ for_exception = statement == 'for' and (
+ (condition.startswith(' ;') and n_trailing == 0) or
+ (condition.endswith('; ') and n_leading == 0))
+ if not for_exception:
+ error(line_number, 'whitespace/parens', 5,
+ 'Mismatching spaces inside () in %s' % statement)
+ if n_leading > 1:
+ error(line_number, 'whitespace/parens', 5,
+ 'Should have zero or one spaces inside ( and ) in %s' %
+ statement)
+
+ # Do not check for more than one command in macros
+ in_macro = match(r'\s*#define', line)
+ if not in_macro and not match(r'((\s*{\s*}?)|(\s*;?))\s*\\?$', rest):
+ error(line_number, 'whitespace/parens', 4,
+ 'More than one command on the same line in %s' % statement)
+
+ # You should always have a space after a comma (either as fn arg or operator)
+ if search(r',[^\s]', line):
+ error(line_number, 'whitespace/comma', 3,
+ 'Missing space after ,')
+
+ if file_extension == 'cpp':
+ # C++ should have the & or * beside the type not the variable name.
+ matched = match(r'\s*\w+(?<!\breturn)\s+(?P<pointer_operator>\*|\&)\w+', line)
+ if matched:
+ error(line_number, 'whitespace/declaration', 3,
+ 'Declaration has space between type name and %s in %s' % (matched.group('pointer_operator'), matched.group(0).strip()))
+
+ elif file_extension == 'c':
+ # C Pointer declaration should have the * beside the variable not the type name.
+ matched = search(r'^\s*\w+\*\s+\w+', line)
+ if matched:
+ error(line_number, 'whitespace/declaration', 3,
+ 'Declaration has space between * and variable name in %s' % matched.group(0).strip())
+
+ # Next we will look for issues with function calls.
+ check_spacing_for_function_call(line, line_number, error)
+
+ # Except after an opening paren, you should have spaces before your braces.
+ # And since you should never have braces at the beginning of a line, this is
+ # an easy test.
+ if search(r'[^ ({]{', line):
+ error(line_number, 'whitespace/braces', 5,
+ 'Missing space before {')
+
+ # Make sure '} else {' has spaces.
+ if search(r'}else', line):
+ error(line_number, 'whitespace/braces', 5,
+ 'Missing space before else')
+
+ # You shouldn't have spaces before your brackets, except maybe after
+ # 'delete []' or 'new char * []'.
+ if search(r'\w\s+\[', line) and not search(r'delete\s+\[', line):
+ error(line_number, 'whitespace/braces', 5,
+ 'Extra space before [')
+
+ # You shouldn't have a space before a semicolon at the end of the line.
+ # There's a special case for "for" since the style guide allows space before
+ # the semicolon there.
+ if search(r':\s*;\s*$', line):
+ error(line_number, 'whitespace/semicolon', 5,
+ 'Semicolon defining empty statement. Use { } instead.')
+ elif search(r'^\s*;\s*$', line):
+ error(line_number, 'whitespace/semicolon', 5,
+ 'Line contains only semicolon. If this should be an empty statement, '
+ 'use { } instead.')
+ elif (search(r'\s+;\s*$', line) and not search(r'\bfor\b', line)):
+ error(line_number, 'whitespace/semicolon', 5,
+ 'Extra space before last semicolon. If this should be an empty '
+ 'statement, use { } instead.')
+ elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line)
+ and line.count('(') == line.count(')')
+ # Allow do {} while();
+ and not search(r'}\s*while', line)):
+ error(line_number, 'whitespace/semicolon', 5,
+ 'Semicolon defining empty statement for this loop. Use { } instead.')
+
+
+def get_previous_non_blank_line(clean_lines, line_number):
+ """Return the most recent non-blank line and its line number.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file contents.
+ line_number: The number of the line to check.
+
+ Returns:
+ A tuple with two elements. The first element is the contents of the last
+ non-blank line before the current line, or the empty string if this is the
+ first non-blank line. The second is the line number of that line, or -1
+ if this is the first non-blank line.
+ """
+
+ previous_line_number = line_number - 1
+ while previous_line_number >= 0:
+ previous_line = clean_lines.elided[previous_line_number]
+ if not is_blank_line(previous_line): # if not a blank line...
+ return (previous_line, previous_line_number)
+ previous_line_number -= 1
+ return ('', -1)
+
+
+def check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error):
+ """Looks for indentation errors inside of namespaces.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_extension: The extension (dot not included) of the file.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ namespace_match = match(r'(?P<namespace_indentation>\s*)namespace\s+\S+\s*{\s*$', line)
+ if not namespace_match:
+ return
+
+ current_indentation_level = len(namespace_match.group('namespace_indentation'))
+ if current_indentation_level > 0:
+ # Don't warn about an indented namespace if we already warned about indented code.
+ if not file_state.did_inside_namespace_indent_warning():
+ error(line_number, 'whitespace/indent', 4,
+ 'namespace should never be indented.')
+ return
+ looking_for_semicolon = False;
+ line_offset = 0
+ in_preprocessor_directive = False;
+ for current_line in clean_lines.elided[line_number + 1:]:
+ line_offset += 1
+ if not current_line.strip():
+ continue
+ if not current_indentation_level:
+ if not (in_preprocessor_directive or looking_for_semicolon):
+ if not match(r'\S', current_line) and not file_state.did_inside_namespace_indent_warning():
+ file_state.set_did_inside_namespace_indent_warning()
+ error(line_number + line_offset, 'whitespace/indent', 4,
+ 'Code inside a namespace should not be indented.')
+ if in_preprocessor_directive or (current_line.strip()[0] == '#'): # This takes care of preprocessor directive syntax.
+ in_preprocessor_directive = current_line[-1] == '\\'
+ else:
+ looking_for_semicolon = ((current_line.find(';') == -1) and (current_line.strip()[-1] != '}')) or (current_line[-1] == '\\')
+ else:
+ looking_for_semicolon = False; # If we have a brace we may not need a semicolon.
+ current_indentation_level += current_line.count('{') - current_line.count('}')
+ if current_indentation_level < 0:
+ break;
+
+def check_using_std(file_extension, clean_lines, line_number, error):
+ """Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'.
+
+ Args:
+ file_extension: The extension of the current file, without the leading dot.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # This check doesn't apply to C or Objective-C implementation files.
+ if is_c_or_objective_c(file_extension):
+ return
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ using_std_match = match(r'\s*using\s+std::(?P<method_name>\S+)\s*;\s*$', line)
+ if not using_std_match:
+ return
+
+ method_name = using_std_match.group('method_name')
+ error(line_number, 'build/using_std', 4,
+ "Use 'using namespace std;' instead of 'using std::%s;'." % method_name)
+
+
+def check_max_min_macros(file_extension, clean_lines, line_number, error):
+ """Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min().
+
+ Args:
+ file_extension: The extension of the current file, without the leading dot.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # This check doesn't apply to C or Objective-C implementation files.
+ if is_c_or_objective_c(file_extension):
+ return
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ max_min_macros_search = search(r'\b(?P<max_min_macro>(MAX|MIN))\s*\(', line)
+ if not max_min_macros_search:
+ return
+
+ max_min_macro = max_min_macros_search.group('max_min_macro')
+ max_min_macro_lower = max_min_macro.lower()
+ error(line_number, 'runtime/max_min_macros', 4,
+ 'Use std::%s() or std::%s<type>() instead of the %s() macro.'
+ % (max_min_macro_lower, max_min_macro_lower, max_min_macro))
+
+
+def check_switch_indentation(clean_lines, line_number, error):
+ """Looks for indentation errors inside of switch statements.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ switch_match = match(r'(?P<switch_indentation>\s*)switch\s*\(.+\)\s*{\s*$', line)
+ if not switch_match:
+ return
+
+ switch_indentation = switch_match.group('switch_indentation')
+ inner_indentation = switch_indentation + ' ' * 4
+ line_offset = 0
+ encountered_nested_switch = False
+
+ for current_line in clean_lines.elided[line_number + 1:]:
+ line_offset += 1
+
+ # Skip not only empty lines but also those with preprocessor directives.
+ if current_line.strip() == '' or current_line.startswith('#'):
+ continue
+
+ if match(r'\s*switch\s*\(.+\)\s*{\s*$', current_line):
+ # Complexity alarm - another switch statement nested inside the one
+ # that we're currently testing. We'll need to track the extent of
+ # that inner switch if the upcoming label tests are still supposed
+ # to work correctly. Let's not do that; instead, we'll finish
+ # checking this line, and then leave it like that. Assuming the
+ # indentation is done consistently (even if incorrectly), this will
+ # still catch all indentation issues in practice.
+ encountered_nested_switch = True
+
+ current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
+ current_indentation = current_indentation_match.group('indentation')
+ remaining_line = current_indentation_match.group('remaining_line')
+
+ # End the check at the end of the switch statement.
+ if remaining_line.startswith('}') and current_indentation == switch_indentation:
+ break
+ # Case and default branches should not be indented. The regexp also
+ # catches single-line cases like "default: break;" but does not trigger
+ # on stuff like "Document::Foo();".
+ elif match(r'(default|case\s+.*)\s*:([^:].*)?$', remaining_line):
+ if current_indentation != switch_indentation:
+ error(line_number + line_offset, 'whitespace/indent', 4,
+ 'A case label should not be indented, but line up with its switch statement.')
+ # Don't throw an error for multiple badly indented labels,
+ # one should be enough to figure out the problem.
+ break
+ # We ignore goto labels at the very beginning of a line.
+ elif match(r'\w+\s*:\s*$', remaining_line):
+ continue
+ # It's not a goto label, so check if it's indented at least as far as
+ # the switch statement plus one more level of indentation.
+ elif not current_indentation.startswith(inner_indentation):
+ error(line_number + line_offset, 'whitespace/indent', 4,
+ 'Non-label code inside switch statements should be indented.')
+ # Don't throw an error for multiple badly indented statements,
+ # one should be enough to figure out the problem.
+ break
+
+ if encountered_nested_switch:
+ break
+
+
+def check_braces(clean_lines, line_number, error):
+ """Looks for misplaced braces (e.g. at the end of line).
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ if match(r'\s*{\s*$', line):
+ # We allow an open brace to start a line in the case where someone
+ # is using braces for function definition or in a block to
+ # explicitly create a new scope, which is commonly used to control
+ # the lifetime of stack-allocated variables. We don't detect this
+ # perfectly: we just don't complain if the last non-whitespace
+ # character on the previous non-blank line is ';', ':', '{', '}',
+ # ')', or ') const' and doesn't begin with 'if|for|while|switch|else'.
+ # We also allow '#' for #endif and '=' for array initialization.
+ previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
+ if ((not search(r'[;:}{)=]\s*$|\)\s*const\s*$', previous_line)
+ or search(r'\b(if|for|foreach|while|switch|else)\b', previous_line))
+ and previous_line.find('#') < 0):
+ error(line_number, 'whitespace/braces', 4,
+ 'This { should be at the end of the previous line')
+ elif (search(r'\)\s*(const\s*)?{\s*$', line)
+ and line.count('(') == line.count(')')
+ and not search(r'\b(if|for|foreach|while|switch)\b', line)
+ and not match(r'\s+[A-Z_][A-Z_0-9]+\b', line)):
+ error(line_number, 'whitespace/braces', 4,
+ 'Place brace on its own line for function definitions.')
+
+ if (match(r'\s*}\s*(else\s*({\s*)?)?$', line) and line_number > 1):
+ # We check if a closed brace has started a line to see if a
+ # one line control statement was previous.
+ previous_line = clean_lines.elided[line_number - 2]
+ if (previous_line.find('{') > 0
+ and search(r'\b(if|for|foreach|while|else)\b', previous_line)):
+ error(line_number, 'whitespace/braces', 4,
+ 'One line control clauses should not use braces.')
+
+ # An else clause should be on the same line as the preceding closing brace.
+ if match(r'\s*else\s*', line):
+ previous_line = get_previous_non_blank_line(clean_lines, line_number)[0]
+ if match(r'\s*}\s*$', previous_line):
+ error(line_number, 'whitespace/newline', 4,
+ 'An else should appear on the same line as the preceding }')
+
+ # Likewise, an else should never have the else clause on the same line
+ if search(r'\belse [^\s{]', line) and not search(r'\belse if\b', line):
+ error(line_number, 'whitespace/newline', 4,
+ 'Else clause should never be on same line as else (use 2 lines)')
+
+ # In the same way, a do/while should never be on one line
+ if match(r'\s*do [^\s{]', line):
+ error(line_number, 'whitespace/newline', 4,
+ 'do/while clauses should not be on a single line')
+
+ # Braces shouldn't be followed by a ; unless they're defining a struct
+ # or initializing an array.
+ # We can't tell in general, but we can for some common cases.
+ previous_line_number = line_number
+ while True:
+ (previous_line, previous_line_number) = get_previous_non_blank_line(clean_lines, previous_line_number)
+ if match(r'\s+{.*}\s*;', line) and not previous_line.count(';'):
+ line = previous_line + line
+ else:
+ break
+ if (search(r'{.*}\s*;', line)
+ and line.count('{') == line.count('}')
+ and not search(r'struct|class|enum|\s*=\s*{', line)):
+ error(line_number, 'readability/braces', 4,
+ "You don't need a ; after a }")
+
+
+def check_exit_statement_simplifications(clean_lines, line_number, error):
+ """Looks for else or else-if statements that should be written as an
+ if statement when the prior if concludes with a return, break, continue or
+ goto statement.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ else_match = match(r'(?P<else_indentation>\s*)(\}\s*)?else(\s+if\s*\(|(?P<else>\s*(\{\s*)?\Z))', line)
+ if not else_match:
+ return
+
+ else_indentation = else_match.group('else_indentation')
+ inner_indentation = else_indentation + ' ' * 4
+
+ previous_lines = clean_lines.elided[:line_number]
+ previous_lines.reverse()
+ line_offset = 0
+ encountered_exit_statement = False
+
+ for current_line in previous_lines:
+ line_offset -= 1
+
+ # Skip not only empty lines but also those with preprocessor directives
+ # and goto labels.
+ if current_line.strip() == '' or current_line.startswith('#') or match(r'\w+\s*:\s*$', current_line):
+ continue
+
+ # Skip lines with closing braces on the original indentation level.
+ # Even though the styleguide says they should be on the same line as
+ # the "else if" statement, we also want to check for instances where
+ # the current code does not comply with the coding style. Thus, ignore
+ # these lines and proceed to the line before that.
+ if current_line == else_indentation + '}':
+ continue
+
+ current_indentation_match = match(r'(?P<indentation>\s*)(?P<remaining_line>.*)$', current_line);
+ current_indentation = current_indentation_match.group('indentation')
+ remaining_line = current_indentation_match.group('remaining_line')
+
+ # As we're going up the lines, the first real statement to encounter
+ # has to be an exit statement (return, break, continue or goto) -
+ # otherwise, this check doesn't apply.
+ if not encountered_exit_statement:
+ # We only want to find exit statements if they are on exactly
+ # the same level of indentation as expected from the code inside
+ # the block. If the indentation doesn't strictly match then we
+ # might have a nested if or something, which must be ignored.
+ if current_indentation != inner_indentation:
+ break
+ if match(r'(return(\W+.*)|(break|continue)\s*;|goto\s*\w+;)$', remaining_line):
+ encountered_exit_statement = True
+ continue
+ break
+
+ # When code execution reaches this point, we've found an exit statement
+ # as last statement of the previous block. Now we only need to make
+ # sure that the block belongs to an "if", then we can throw an error.
+
+ # Skip lines with opening braces on the original indentation level,
+ # similar to the closing braces check above. ("if (condition)\n{")
+ if current_line == else_indentation + '{':
+ continue
+
+ # Skip everything that's further indented than our "else" or "else if".
+ if current_indentation.startswith(else_indentation) and current_indentation != else_indentation:
+ continue
+
+ # So we've got a line with same (or less) indentation. Is it an "if"?
+ # If yes: throw an error. If no: don't throw an error.
+ # Whatever the outcome, this is the end of our loop.
+ if match(r'if\s*\(', remaining_line):
+ if else_match.start('else') != -1:
+ error(line_number + line_offset, 'readability/control_flow', 4,
+ 'An else statement can be removed when the prior "if" '
+ 'concludes with a return, break, continue or goto statement.')
+ else:
+ error(line_number + line_offset, 'readability/control_flow', 4,
+ 'An else if statement should be written as an if statement '
+ 'when the prior "if" concludes with a return, break, '
+ 'continue or goto statement.')
+ break
+
+
+def replaceable_check(operator, macro, line):
+ """Determine whether a basic CHECK can be replaced with a more specific one.
+
+ For example suggest using CHECK_EQ instead of CHECK(a == b) and
+ similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
+
+ Args:
+ operator: The C++ operator used in the CHECK.
+ macro: The CHECK or EXPECT macro being called.
+ line: The current source line.
+
+ Returns:
+ True if the CHECK can be replaced with a more specific one.
+ """
+
+ # This matches decimal and hex integers, strings, and chars (in that order).
+ match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
+
+ # Expression to match two sides of the operator with something that
+ # looks like a literal, since CHECK(x == iterator) won't compile.
+ # This means we can't catch all the cases where a more specific
+ # CHECK is possible, but it's less annoying than dealing with
+ # extraneous warnings.
+ match_this = (r'\s*' + macro + r'\((\s*' +
+ match_constant + r'\s*' + operator + r'[^<>].*|'
+ r'.*[^<>]' + operator + r'\s*' + match_constant +
+ r'\s*\))')
+
+ # Don't complain about CHECK(x == NULL) or similar because
+ # CHECK_EQ(x, NULL) won't compile (requires a cast).
+ # Also, don't complain about more complex boolean expressions
+ # involving && or || such as CHECK(a == b || c == d).
+ return match(match_this, line) and not search(r'NULL|&&|\|\|', line)
+
+
+def check_check(clean_lines, line_number, error):
+ """Checks the use of CHECK and EXPECT macros.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ error: The function to call with any errors found.
+ """
+
+ # Decide the set of replacement macros that should be suggested
+ raw_lines = clean_lines.raw_lines
+ current_macro = ''
+ for macro in _CHECK_MACROS:
+ if raw_lines[line_number].find(macro) >= 0:
+ current_macro = macro
+ break
+ if not current_macro:
+ # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
+ return
+
+ line = clean_lines.elided[line_number] # get rid of comments and strings
+
+ # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
+ for operator in ['==', '!=', '>=', '>', '<=', '<']:
+ if replaceable_check(operator, current_macro, line):
+ error(line_number, 'readability/check', 2,
+ 'Consider using %s instead of %s(a %s b)' % (
+ _CHECK_REPLACEMENT[current_macro][operator],
+ current_macro, operator))
+ break
+
+
+def check_for_comparisons_to_zero(clean_lines, line_number, error):
+ # Get the line without comments and strings.
+ line = clean_lines.elided[line_number]
+
+ # Include NULL here so that users don't have to convert NULL to 0 first and then get this error.
+ if search(r'[=!]=\s*(NULL|0|true|false)\W', line) or search(r'\W(NULL|0|true|false)\s*[=!]=', line):
+ error(line_number, 'readability/comparison_to_zero', 5,
+ 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.')
+
+
+def check_for_null(file_extension, clean_lines, line_number, error):
+ # This check doesn't apply to C or Objective-C implementation files.
+ if is_c_or_objective_c(file_extension):
+ return
+
+ line = clean_lines.elided[line_number]
+
+ # Don't warn about NULL usage in g_object_{get,set}(). See Bug 32858
+ if search(r'\bg_object_[sg]et\b', line):
+ return
+
+ if search(r'\bNULL\b', line):
+ error(line_number, 'readability/null', 5, 'Use 0 instead of NULL.')
+ return
+
+ line = clean_lines.raw_lines[line_number]
+ # See if NULL occurs in any comments in the line. If the search for NULL using the raw line
+ # matches, then do the check with strings collapsed to avoid giving errors for
+ # NULLs occurring in strings.
+ if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)):
+ error(line_number, 'readability/null', 4, 'Use 0 instead of NULL.')
+
+def get_line_width(line):
+ """Determines the width of the line in column positions.
+
+ Args:
+ line: A string, which may be a Unicode string.
+
+ Returns:
+ The width of the line in column positions, accounting for Unicode
+ combining characters and wide characters.
+ """
+ if isinstance(line, unicode):
+ width = 0
+ for c in unicodedata.normalize('NFC', line):
+ if unicodedata.east_asian_width(c) in ('W', 'F'):
+ width += 2
+ elif not unicodedata.combining(c):
+ width += 1
+ return width
+ return len(line)
+
+
+def check_style(clean_lines, line_number, file_extension, file_state, error):
+ """Checks rules from the 'C++ style rules' section of cppguide.html.
+
+ Most of these rules are hard to test (naming, comment style), but we
+ do what we can. In particular we check for 4-space indents, line lengths,
+ tab usage, spaces inside code, etc.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_extension: The extension (without the dot) of the filename.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: The function to call with any errors found.
+ """
+
+ raw_lines = clean_lines.raw_lines
+ line = raw_lines[line_number]
+
+ if line.find('\t') != -1:
+ error(line_number, 'whitespace/tab', 1,
+ 'Tab found; better to use spaces')
+
+ # One or three blank spaces at the beginning of the line is weird; it's
+ # hard to reconcile that with 4-space indents.
+ # NOTE: here are the conditions rob pike used for his tests. Mine aren't
+ # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
+ # if(RLENGTH > 20) complain = 0;
+ # if(match($0, " +(error|private|public|protected):")) complain = 0;
+ # if(match(prev, "&& *$")) complain = 0;
+ # if(match(prev, "\\|\\| *$")) complain = 0;
+ # if(match(prev, "[\",=><] *$")) complain = 0;
+ # if(match($0, " <<")) complain = 0;
+ # if(match(prev, " +for \\(")) complain = 0;
+ # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
+ initial_spaces = 0
+ cleansed_line = clean_lines.elided[line_number]
+ while initial_spaces < len(line) and line[initial_spaces] == ' ':
+ initial_spaces += 1
+ if line and line[-1].isspace():
+ error(line_number, 'whitespace/end_of_line', 4,
+ 'Line ends in whitespace. Consider deleting these extra spaces.')
+ # There are certain situations we allow one space, notably for labels
+ elif ((initial_spaces >= 1 and initial_spaces <= 3)
+ and not match(r'\s*\w+\s*:\s*$', cleansed_line)):
+ error(line_number, 'whitespace/indent', 3,
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent?')
+ # Labels should always be indented at least one space.
+ elif not initial_spaces and line[:2] != '//':
+ label_match = match(r'(?P<label>[^:]+):\s*$', line)
+
+ if label_match:
+ label = label_match.group('label')
+ # Only throw errors for stuff that is definitely not a goto label,
+ # because goto labels can in fact occur at the start of the line.
+ if label in ['public', 'private', 'protected'] or label.find(' ') != -1:
+ error(line_number, 'whitespace/labels', 4,
+ 'Labels should always be indented at least one space. '
+ 'If this is a member-initializer list in a constructor, '
+ 'the colon should be on the line after the definition header.')
+
+ if (cleansed_line.count(';') > 1
+ # for loops are allowed two ;'s (and may run over two lines).
+ and cleansed_line.find('for') == -1
+ and (get_previous_non_blank_line(clean_lines, line_number)[0].find('for') == -1
+ or get_previous_non_blank_line(clean_lines, line_number)[0].find(';') != -1)
+ # It's ok to have many commands in a switch case that fits in 1 line
+ and not ((cleansed_line.find('case ') != -1
+ or cleansed_line.find('default:') != -1)
+ and cleansed_line.find('break;') != -1)
+ and not cleansed_line.startswith('#define ')):
+ error(line_number, 'whitespace/newline', 4,
+ 'More than one command on the same line')
+
+ if cleansed_line.strip().endswith('||') or cleansed_line.strip().endswith('&&'):
+ error(line_number, 'whitespace/operators', 4,
+ 'Boolean expressions that span multiple lines should have their '
+ 'operators on the left side of the line instead of the right side.')
+
+ # Some more style checks
+ check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error)
+ check_using_std(file_extension, clean_lines, line_number, error)
+ check_max_min_macros(file_extension, clean_lines, line_number, error)
+ check_switch_indentation(clean_lines, line_number, error)
+ check_braces(clean_lines, line_number, error)
+ check_exit_statement_simplifications(clean_lines, line_number, error)
+ check_spacing(file_extension, clean_lines, line_number, error)
+ check_check(clean_lines, line_number, error)
+ check_for_comparisons_to_zero(clean_lines, line_number, error)
+ check_for_null(file_extension, clean_lines, line_number, error)
+
+
+_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
+_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
+# Matches the first component of a filename delimited by -s and _s. That is:
+# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo.cpp').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo-bar_baz.cpp').group(0) == 'foo'
+# _RE_FIRST_COMPONENT.match('foo_bar-baz.cpp').group(0) == 'foo'
+_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
+
+
+def _drop_common_suffixes(filename):
+ """Drops common suffixes like _test.cpp or -inl.h from filename.
+
+ For example:
+ >>> _drop_common_suffixes('foo/foo-inl.h')
+ 'foo/foo'
+ >>> _drop_common_suffixes('foo/bar/foo.cpp')
+ 'foo/bar/foo'
+ >>> _drop_common_suffixes('foo/foo_internal.h')
+ 'foo/foo'
+ >>> _drop_common_suffixes('foo/foo_unusualinternal.h')
+ 'foo/foo_unusualinternal'
+
+ Args:
+ filename: The input filename.
+
+ Returns:
+ The filename with the common suffix removed.
+ """
+ for suffix in ('test.cpp', 'regtest.cpp', 'unittest.cpp',
+ 'inl.h', 'impl.h', 'internal.h'):
+ if (filename.endswith(suffix) and len(filename) > len(suffix)
+ and filename[-len(suffix) - 1] in ('-', '_')):
+ return filename[:-len(suffix) - 1]
+ return os.path.splitext(filename)[0]
+
+
+def _is_test_filename(filename):
+ """Determines if the given filename has a suffix that identifies it as a test.
+
+ Args:
+ filename: The input filename.
+
+ Returns:
+ True if 'filename' looks like a test, False otherwise.
+ """
+ if (filename.endswith('_test.cpp')
+ or filename.endswith('_unittest.cpp')
+ or filename.endswith('_regtest.cpp')):
+ return True
+ return False
+
+
+def _classify_include(filename, include, is_system, include_state):
+ """Figures out what kind of header 'include' is.
+
+ Args:
+ filename: The current file cpp_style is running over.
+ include: The path to a #included file.
+ is_system: True if the #include used <> rather than "".
+ include_state: An _IncludeState instance in which the headers are inserted.
+
+ Returns:
+ One of the _XXX_HEADER constants.
+
+ For example:
+ >>> _classify_include('foo.cpp', 'config.h', False)
+ _CONFIG_HEADER
+ >>> _classify_include('foo.cpp', 'foo.h', False)
+ _PRIMARY_HEADER
+ >>> _classify_include('foo.cpp', 'bar.h', False)
+ _OTHER_HEADER
+ """
+
+ # If it is a system header we know it is classified as _OTHER_HEADER.
+ if is_system:
+ return _OTHER_HEADER
+
+ # If the include is named config.h then this is WebCore/config.h.
+ if include == "config.h":
+ return _CONFIG_HEADER
+
+ # There cannot be primary includes in header files themselves. Only an
+ # include exactly matches the header filename will be is flagged as
+ # primary, so that it triggers the "don't include yourself" check.
+ if filename.endswith('.h') and filename != include:
+ return _OTHER_HEADER;
+
+ # Qt's moc files do not follow the naming and ordering rules, so they should be skipped
+ if include.startswith('moc_') and include.endswith('.cpp'):
+ return _MOC_HEADER
+
+ if include.endswith('.moc'):
+ return _MOC_HEADER
+
+ # If the target file basename starts with the include we're checking
+ # then we consider it the primary header.
+ target_base = FileInfo(filename).base_name()
+ include_base = FileInfo(include).base_name()
+
+ # If we haven't encountered a primary header, then be lenient in checking.
+ if not include_state.visited_primary_section() and target_base.find(include_base) != -1:
+ return _PRIMARY_HEADER
+ # If we already encountered a primary header, perform a strict comparison.
+ # In case the two filename bases are the same then the above lenient check
+ # probably was a false positive.
+ elif include_state.visited_primary_section() and target_base == include_base:
+ if include == "ResourceHandleWin.h":
+ # FIXME: Thus far, we've only seen one example of these, but if we
+ # start to see more, please consider generalizing this check
+ # somehow.
+ return _OTHER_HEADER
+ return _PRIMARY_HEADER
+
+ return _OTHER_HEADER
+
+
+
+def check_include_line(filename, file_extension, clean_lines, line_number, include_state, error):
+ """Check rules that are applicable to #include lines.
+
+ Strings on #include lines are NOT removed from elided line, to make
+ certain tasks easier. However, to prevent false positives, checks
+ applicable to #include lines in CheckLanguage must be put here.
+
+ Args:
+ filename: The name of the current file.
+ file_extension: The current file extension, without the leading dot.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ error: The function to call with any errors found.
+ """
+
+ if (filename.find('WebKitTools/WebKitAPITest/') >= 0
+ or filename.find('WebKit/qt/QGVLauncher/') >= 0):
+ # Files in this directory are consumers of the WebKit API and
+ # therefore do not follow the same header including discipline as
+ # WebCore.
+ return
+
+ line = clean_lines.lines[line_number]
+
+ matched = _RE_PATTERN_INCLUDE.search(line)
+ if not matched:
+ return
+
+ include = matched.group(2)
+ is_system = (matched.group(1) == '<')
+
+ # Look for any of the stream classes that are part of standard C++.
+ if match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
+ # Many unit tests use cout, so we exempt them.
+ if not _is_test_filename(filename):
+ error(line_number, 'readability/streams', 3,
+ 'Streams are highly discouraged.')
+
+ # Look for specific includes to fix.
+ if include.startswith('wtf/') and not is_system:
+ error(line_number, 'build/include', 4,
+ 'wtf includes should be <wtf/file.h> instead of "wtf/file.h".')
+
+ duplicate_header = include in include_state
+ if duplicate_header:
+ error(line_number, 'build/include', 4,
+ '"%s" already included at %s:%s' %
+ (include, filename, include_state[include]))
+ else:
+ include_state[include] = line_number
+
+ header_type = _classify_include(filename, include, is_system, include_state)
+ include_state.header_types[line_number] = header_type
+
+ # Only proceed if this isn't a duplicate header.
+ if duplicate_header:
+ return
+
+ # We want to ensure that headers appear in the right order:
+ # 1) for implementation files: config.h, primary header, blank line, alphabetically sorted
+ # 2) for header files: alphabetically sorted
+ # The include_state object keeps track of the last type seen
+ # and complains if the header types are out of order or missing.
+ error_message = include_state.check_next_include_order(header_type, file_extension == "h")
+
+ # Check to make sure we have a blank line after primary header.
+ if not error_message and header_type == _PRIMARY_HEADER:
+ next_line = clean_lines.raw_lines[line_number + 1]
+ if not is_blank_line(next_line):
+ error(line_number, 'build/include_order', 4,
+ 'You should add a blank line after implementation file\'s own header.')
+
+ # Check to make sure all headers besides config.h and the primary header are
+ # alphabetically sorted. Skip Qt's moc files.
+ if not error_message and header_type == _OTHER_HEADER:
+ previous_line_number = line_number - 1;
+ previous_line = clean_lines.lines[previous_line_number]
+ previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
+ while (not previous_match and previous_line_number > 0
+ and not search(r'\A(#if|#ifdef|#ifndef|#else|#elif|#endif)', previous_line)):
+ previous_line_number -= 1;
+ previous_line = clean_lines.lines[previous_line_number]
+ previous_match = _RE_PATTERN_INCLUDE.search(previous_line)
+ if previous_match:
+ previous_header_type = include_state.header_types[previous_line_number]
+ if previous_header_type == _OTHER_HEADER and previous_line.strip() > line.strip():
+ error(line_number, 'build/include_order', 4,
+ 'Alphabetical sorting problem.')
+
+ if error_message:
+ if file_extension == 'h':
+ error(line_number, 'build/include_order', 4,
+ '%s Should be: alphabetically sorted.' %
+ error_message)
+ else:
+ error(line_number, 'build/include_order', 4,
+ '%s Should be: config.h, primary header, blank line, and then alphabetically sorted.' %
+ error_message)
+
+
+def check_language(filename, clean_lines, line_number, file_extension, include_state,
+ error):
+ """Checks rules from the 'C++ language rules' section of cppguide.html.
+
+ Some of these rules are hard to test (function overloading, using
+ uint32 inappropriately), but we do the best we can.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_extension: The extension (without the dot) of the filename.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ error: The function to call with any errors found.
+ """
+ # If the line is empty or consists of entirely a comment, no need to
+ # check it.
+ line = clean_lines.elided[line_number]
+ if not line:
+ return
+
+ matched = _RE_PATTERN_INCLUDE.search(line)
+ if matched:
+ check_include_line(filename, file_extension, clean_lines, line_number, include_state, error)
+ return
+
+ # FIXME: figure out if they're using default arguments in fn proto.
+
+ # Check to see if they're using an conversion function cast.
+ # I just try to capture the most common basic types, though there are more.
+ # Parameterless conversion functions, such as bool(), are allowed as they are
+ # probably a member operator declaration or default constructor.
+ matched = search(
+ r'\b(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
+ if matched:
+ # gMock methods are defined using some variant of MOCK_METHODx(name, type)
+ # where type may be float(), int(string), etc. Without context they are
+ # virtually indistinguishable from int(x) casts.
+ if not match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line):
+ error(line_number, 'readability/casting', 4,
+ 'Using deprecated casting style. '
+ 'Use static_cast<%s>(...) instead' %
+ matched.group(1))
+
+ check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
+ 'static_cast',
+ r'\((int|float|double|bool|char|u?int(16|32|64))\)',
+ error)
+ # This doesn't catch all cases. Consider (const char * const)"hello".
+ check_c_style_cast(line_number, line, clean_lines.raw_lines[line_number],
+ 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
+
+ # In addition, we look for people taking the address of a cast. This
+ # is dangerous -- casts can assign to temporaries, so the pointer doesn't
+ # point where you think.
+ if search(
+ r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
+ error(line_number, 'runtime/casting', 4,
+ ('Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'))
+
+ # Check for people declaring static/global STL strings at the top level.
+ # This is dangerous because the C++ language does not guarantee that
+ # globals with constructors are initialized before the first access.
+ matched = match(
+ r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
+ line)
+ # Make sure it's not a function.
+ # Function template specialization looks like: "string foo<Type>(...".
+ # Class template definitions look like: "string Foo<Type>::Method(...".
+ if matched and not match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
+ matched.group(3)):
+ error(line_number, 'runtime/string', 4,
+ 'For a static/global string constant, use a C style string instead: '
+ '"%schar %s[]".' %
+ (matched.group(1), matched.group(2)))
+
+ # Check that we're not using RTTI outside of testing code.
+ if search(r'\bdynamic_cast<', line) and not _is_test_filename(filename):
+ error(line_number, 'runtime/rtti', 5,
+ 'Do not use dynamic_cast<>. If you need to cast within a class '
+ "hierarchy, use static_cast<> to upcast. Google doesn't support "
+ 'RTTI.')
+
+ if search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
+ error(line_number, 'runtime/init', 4,
+ 'You seem to be initializing a member variable with itself.')
+
+ if file_extension == 'h':
+ # FIXME: check that 1-arg constructors are explicit.
+ # How to tell it's a constructor?
+ # (handled in check_for_non_standard_constructs for now)
+ pass
+
+ # Check if people are using the verboten C basic types. The only exception
+ # we regularly allow is "unsigned short port" for port.
+ if search(r'\bshort port\b', line):
+ if not search(r'\bunsigned short port\b', line):
+ error(line_number, 'runtime/int', 4,
+ 'Use "unsigned short" for ports, not "short"')
+
+ # When snprintf is used, the second argument shouldn't be a literal.
+ matched = search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
+ if matched:
+ error(line_number, 'runtime/printf', 3,
+ 'If you can, use sizeof(%s) instead of %s as the 2nd arg '
+ 'to snprintf.' % (matched.group(1), matched.group(2)))
+
+ # Check if some verboten C functions are being used.
+ if search(r'\bsprintf\b', line):
+ error(line_number, 'runtime/printf', 5,
+ 'Never use sprintf. Use snprintf instead.')
+ matched = search(r'\b(strcpy|strcat)\b', line)
+ if matched:
+ error(line_number, 'runtime/printf', 4,
+ 'Almost always, snprintf is better than %s' % matched.group(1))
+
+ if search(r'\bsscanf\b', line):
+ error(line_number, 'runtime/printf', 1,
+ 'sscanf can be ok, but is slow and can overflow buffers.')
+
+ # Check for suspicious usage of "if" like
+ # } if (a == b) {
+ if search(r'\}\s*if\s*\(', line):
+ error(line_number, 'readability/braces', 4,
+ 'Did you mean "else if"? If not, start a new line for "if".')
+
+ # Check for potential format string bugs like printf(foo).
+ # We constrain the pattern not to pick things like DocidForPrintf(foo).
+ # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
+ matched = re.search(r'\b((?:string)?printf)\s*\(([\w.\->()]+)\)', line, re.I)
+ if matched:
+ error(line_number, 'runtime/printf', 4,
+ 'Potential format string bug. Do %s("%%s", %s) instead.'
+ % (matched.group(1), matched.group(2)))
+
+ # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
+ matched = search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
+ if matched and not match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", matched.group(2)):
+ error(line_number, 'runtime/memset', 4,
+ 'Did you mean "memset(%s, 0, %s)"?'
+ % (matched.group(1), matched.group(2)))
+
+ # Detect variable-length arrays.
+ matched = match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
+ if (matched and matched.group(2) != 'return' and matched.group(2) != 'delete' and
+ matched.group(3).find(']') == -1):
+ # Split the size using space and arithmetic operators as delimiters.
+ # If any of the resulting tokens are not compile time constants then
+ # report the error.
+ tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', matched.group(3))
+ is_const = True
+ skip_next = False
+ for tok in tokens:
+ if skip_next:
+ skip_next = False
+ continue
+
+ if search(r'sizeof\(.+\)', tok):
+ continue
+ if search(r'arraysize\(\w+\)', tok):
+ continue
+
+ tok = tok.lstrip('(')
+ tok = tok.rstrip(')')
+ if not tok:
+ continue
+ if match(r'\d+', tok):
+ continue
+ if match(r'0[xX][0-9a-fA-F]+', tok):
+ continue
+ if match(r'k[A-Z0-9]\w*', tok):
+ continue
+ if match(r'(.+::)?k[A-Z0-9]\w*', tok):
+ continue
+ if match(r'(.+::)?[A-Z][A-Z0-9_]*', tok):
+ continue
+ # A catch all for tricky sizeof cases, including 'sizeof expression',
+ # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
+ # requires skipping the next token becasue we split on ' ' and '*'.
+ if tok.startswith('sizeof'):
+ skip_next = True
+ continue
+ is_const = False
+ break
+ if not is_const:
+ error(line_number, 'runtime/arrays', 1,
+ 'Do not use variable-length arrays. Use an appropriately named '
+ "('k' followed by CamelCase) compile-time constant for the size.")
+
+ # Check for use of unnamed namespaces in header files. Registration
+ # macros are typically OK, so we allow use of "namespace {" on lines
+ # that end with backslashes.
+ if (file_extension == 'h'
+ and search(r'\bnamespace\s*{', line)
+ and line[-1] != '\\'):
+ error(line_number, 'build/namespaces', 4,
+ 'Do not use unnamed namespaces in header files. See '
+ 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
+ ' for more information.')
+
+ check_identifier_name_in_declaration(filename, line_number, line, error)
+
+
+def check_identifier_name_in_declaration(filename, line_number, line, error):
+ """Checks if identifier names contain any underscores.
+
+ As identifiers in libraries we are using have a bunch of
+ underscores, we only warn about the declarations of identifiers
+ and don't check use of identifiers.
+
+ Args:
+ filename: The name of the current file.
+ line_number: The number of the line to check.
+ line: The line of code to check.
+ error: The function to call with any errors found.
+ """
+ # We don't check a return statement.
+ if match(r'\s*(return|delete)\b', line):
+ return
+
+ # Basically, a declaration is a type name followed by whitespaces
+ # followed by an identifier. The type name can be complicated
+ # due to type adjectives and templates. We remove them first to
+ # simplify the process to find declarations of identifiers.
+
+ # Convert "long long", "long double", and "long long int" to
+ # simple types, but don't remove simple "long".
+ line = sub(r'long (long )?(?=long|double|int)', '', line)
+ line = sub(r'\b(unsigned|signed|inline|using|static|const|volatile|auto|register|extern|typedef|restrict|struct|class|virtual)(?=\W)', '', line)
+
+ # Remove all template parameters by removing matching < and >.
+ # Loop until no templates are removed to remove nested templates.
+ while True:
+ line, number_of_replacements = subn(r'<([\w\s:]|::)+\s*[*&]*\s*>', '', line)
+ if not number_of_replacements:
+ break
+
+ # Declarations of local variables can be in condition expressions
+ # of control flow statements (e.g., "if (RenderObject* p = o->parent())").
+ # We remove the keywords and the first parenthesis.
+ #
+ # Declarations in "while", "if", and "switch" are different from
+ # other declarations in two aspects:
+ #
+ # - There can be only one declaration between the parentheses.
+ # (i.e., you cannot write "if (int i = 0, j = 1) {}")
+ # - The variable must be initialized.
+ # (i.e., you cannot write "if (int i) {}")
+ #
+ # and we will need different treatments for them.
+ line = sub(r'^\s*for\s*\(', '', line)
+ line, control_statement = subn(r'^\s*(while|else if|if|switch)\s*\(', '', line)
+
+ # Detect variable and functions.
+ type_regexp = r'\w([\w]|\s*[*&]\s*|::)+'
+ identifier_regexp = r'(?P<identifier>[\w:]+)'
+ character_after_identifier_regexp = r'(?P<character_after_identifier>[[;()=,])(?!=)'
+ declaration_without_type_regexp = r'\s*' + identifier_regexp + r'\s*' + character_after_identifier_regexp
+ declaration_with_type_regexp = r'\s*' + type_regexp + r'\s' + declaration_without_type_regexp
+ is_function_arguments = False
+ number_of_identifiers = 0
+ while True:
+ # If we are seeing the first identifier or arguments of a
+ # function, there should be a type name before an identifier.
+ if not number_of_identifiers or is_function_arguments:
+ declaration_regexp = declaration_with_type_regexp
+ else:
+ declaration_regexp = declaration_without_type_regexp
+
+ matched = match(declaration_regexp, line)
+ if not matched:
+ return
+ identifier = matched.group('identifier')
+ character_after_identifier = matched.group('character_after_identifier')
+
+ # If we removed a non-for-control statement, the character after
+ # the identifier should be '='. With this rule, we can avoid
+ # warning for cases like "if (val & INT_MAX) {".
+ if control_statement and character_after_identifier != '=':
+ return
+
+ is_function_arguments = is_function_arguments or character_after_identifier == '('
+
+ # Remove "m_" and "s_" to allow them.
+ modified_identifier = sub(r'(^|(?<=::))[ms]_', '', identifier)
+ if modified_identifier.find('_') >= 0:
+ # Various exceptions to the rule: JavaScript op codes functions, const_iterator.
+ if (not (filename.find('JavaScriptCore') >= 0 and modified_identifier.find('_op_') >= 0)
+ and not filename.find('WebKit/gtk/webkit/') >= 0
+ and not modified_identifier.startswith('tst_')
+ and not modified_identifier.startswith('webkit_dom_object_')
+ and not modified_identifier.startswith('qt_')
+ and not modified_identifier.find('::qt_') >= 0
+ and not modified_identifier == "const_iterator"):
+ error(line_number, 'readability/naming', 4, identifier + " is incorrectly named. Don't use underscores in your identifier names.")
+
+ # There can be only one declaration in non-for-control statements.
+ if control_statement:
+ return
+ # We should continue checking if this is a function
+ # declaration because we need to check its arguments.
+ # Also, we need to check multiple declarations.
+ if character_after_identifier != '(' and character_after_identifier != ',':
+ return
+
+ number_of_identifiers += 1
+ line = line[matched.end():]
+
+
+def check_c_style_cast(line_number, line, raw_line, cast_type, pattern,
+ error):
+ """Checks for a C-style cast by looking for the pattern.
+
+ This also handles sizeof(type) warnings, due to similarity of content.
+
+ Args:
+ line_number: The number of the line to check.
+ line: The line of code to check.
+ raw_line: The raw line of code to check, with comments.
+ cast_type: The string for the C++ cast to recommend. This is either
+ reinterpret_cast or static_cast, depending.
+ pattern: The regular expression used to find C-style casts.
+ error: The function to call with any errors found.
+ """
+ matched = search(pattern, line)
+ if not matched:
+ return
+
+ # e.g., sizeof(int)
+ sizeof_match = match(r'.*sizeof\s*$', line[0:matched.start(1) - 1])
+ if sizeof_match:
+ error(line_number, 'runtime/sizeof', 1,
+ 'Using sizeof(type). Use sizeof(varname) instead if possible')
+ return
+
+ remainder = line[matched.end(0):]
+
+ # The close paren is for function pointers as arguments to a function.
+ # eg, void foo(void (*bar)(int));
+ # The semicolon check is a more basic function check; also possibly a
+ # function pointer typedef.
+ # eg, void foo(int); or void foo(int) const;
+ # The equals check is for function pointer assignment.
+ # eg, void *(*foo)(int) = ...
+ #
+ # Right now, this will only catch cases where there's a single argument, and
+ # it's unnamed. It should probably be expanded to check for multiple
+ # arguments with some unnamed.
+ function_match = match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)))', remainder)
+ if function_match:
+ if (not function_match.group(3)
+ or function_match.group(3) == ';'
+ or raw_line.find('/*') < 0):
+ error(line_number, 'readability/function', 3,
+ 'All parameters should be named in a function')
+ return
+
+ # At this point, all that should be left is actual casts.
+ error(line_number, 'readability/casting', 4,
+ 'Using C-style cast. Use %s<%s>(...) instead' %
+ (cast_type, matched.group(1)))
+
+
+_HEADERS_CONTAINING_TEMPLATES = (
+ ('<deque>', ('deque',)),
+ ('<functional>', ('unary_function', 'binary_function',
+ 'plus', 'minus', 'multiplies', 'divides', 'modulus',
+ 'negate',
+ 'equal_to', 'not_equal_to', 'greater', 'less',
+ 'greater_equal', 'less_equal',
+ 'logical_and', 'logical_or', 'logical_not',
+ 'unary_negate', 'not1', 'binary_negate', 'not2',
+ 'bind1st', 'bind2nd',
+ 'pointer_to_unary_function',
+ 'pointer_to_binary_function',
+ 'ptr_fun',
+ 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
+ 'mem_fun_ref_t',
+ 'const_mem_fun_t', 'const_mem_fun1_t',
+ 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
+ 'mem_fun_ref',
+ )),
+ ('<limits>', ('numeric_limits',)),
+ ('<list>', ('list',)),
+ ('<map>', ('map', 'multimap',)),
+ ('<memory>', ('allocator',)),
+ ('<queue>', ('queue', 'priority_queue',)),
+ ('<set>', ('set', 'multiset',)),
+ ('<stack>', ('stack',)),
+ ('<string>', ('char_traits', 'basic_string',)),
+ ('<utility>', ('pair',)),
+ ('<vector>', ('vector',)),
+
+ # gcc extensions.
+ # Note: std::hash is their hash, ::hash is our hash
+ ('<hash_map>', ('hash_map', 'hash_multimap',)),
+ ('<hash_set>', ('hash_set', 'hash_multiset',)),
+ ('<slist>', ('slist',)),
+ )
+
+_HEADERS_ACCEPTED_BUT_NOT_PROMOTED = {
+ # We can trust with reasonable confidence that map gives us pair<>, too.
+ 'pair<>': ('map', 'multimap', 'hash_map', 'hash_multimap')
+}
+
+_RE_PATTERN_STRING = re.compile(r'\bstring\b')
+
+_re_pattern_algorithm_header = []
+for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
+ 'transform'):
+ # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
+ # type::max().
+ _re_pattern_algorithm_header.append(
+ (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
+ _template,
+ '<algorithm>'))
+
+_re_pattern_templates = []
+for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
+ for _template in _templates:
+ _re_pattern_templates.append(
+ (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
+ _template + '<>',
+ _header))
+
+
+def files_belong_to_same_module(filename_cpp, filename_h):
+ """Check if these two filenames belong to the same module.
+
+ The concept of a 'module' here is a as follows:
+ foo.h, foo-inl.h, foo.cpp, foo_test.cpp and foo_unittest.cpp belong to the
+ same 'module' if they are in the same directory.
+ some/path/public/xyzzy and some/path/internal/xyzzy are also considered
+ to belong to the same module here.
+
+ If the filename_cpp contains a longer path than the filename_h, for example,
+ '/absolute/path/to/base/sysinfo.cpp', and this file would include
+ 'base/sysinfo.h', this function also produces the prefix needed to open the
+ header. This is used by the caller of this function to more robustly open the
+ header file. We don't have access to the real include paths in this context,
+ so we need this guesswork here.
+
+ Known bugs: tools/base/bar.cpp and base/bar.h belong to the same module
+ according to this implementation. Because of this, this function gives
+ some false positives. This should be sufficiently rare in practice.
+
+ Args:
+ filename_cpp: is the path for the .cpp file
+ filename_h: is the path for the header path
+
+ Returns:
+ Tuple with a bool and a string:
+ bool: True if filename_cpp and filename_h belong to the same module.
+ string: the additional prefix needed to open the header file.
+ """
+
+ if not filename_cpp.endswith('.cpp'):
+ return (False, '')
+ filename_cpp = filename_cpp[:-len('.cpp')]
+ if filename_cpp.endswith('_unittest'):
+ filename_cpp = filename_cpp[:-len('_unittest')]
+ elif filename_cpp.endswith('_test'):
+ filename_cpp = filename_cpp[:-len('_test')]
+ filename_cpp = filename_cpp.replace('/public/', '/')
+ filename_cpp = filename_cpp.replace('/internal/', '/')
+
+ if not filename_h.endswith('.h'):
+ return (False, '')
+ filename_h = filename_h[:-len('.h')]
+ if filename_h.endswith('-inl'):
+ filename_h = filename_h[:-len('-inl')]
+ filename_h = filename_h.replace('/public/', '/')
+ filename_h = filename_h.replace('/internal/', '/')
+
+ files_belong_to_same_module = filename_cpp.endswith(filename_h)
+ common_path = ''
+ if files_belong_to_same_module:
+ common_path = filename_cpp[:-len(filename_h)]
+ return files_belong_to_same_module, common_path
+
+
+def update_include_state(filename, include_state, io=codecs):
+ """Fill up the include_state with new includes found from the file.
+
+ Args:
+ filename: the name of the header to read.
+ include_state: an _IncludeState instance in which the headers are inserted.
+ io: The io factory to use to read the file. Provided for testability.
+
+ Returns:
+ True if a header was succesfully added. False otherwise.
+ """
+ header_file = None
+ try:
+ header_file = io.open(filename, 'r', 'utf8', 'replace')
+ except IOError:
+ return False
+ line_number = 0
+ for line in header_file:
+ line_number += 1
+ clean_line = cleanse_comments(line)
+ matched = _RE_PATTERN_INCLUDE.search(clean_line)
+ if matched:
+ include = matched.group(2)
+ # The value formatting is cute, but not really used right now.
+ # What matters here is that the key is in include_state.
+ include_state.setdefault(include, '%s:%d' % (filename, line_number))
+ return True
+
+
+def check_for_include_what_you_use(filename, clean_lines, include_state, error,
+ io=codecs):
+ """Reports for missing stl includes.
+
+ This function will output warnings to make sure you are including the headers
+ necessary for the stl containers and functions that you use. We only give one
+ reason to include a header. For example, if you use both equal_to<> and
+ less<> in a .h file, only one (the latter in the file) of these will be
+ reported as a reason to include the <functional>.
+
+ Args:
+ filename: The name of the current file.
+ clean_lines: A CleansedLines instance containing the file.
+ include_state: An _IncludeState instance.
+ error: The function to call with any errors found.
+ io: The IO factory to use to read the header file. Provided for unittest
+ injection.
+ """
+ required = {} # A map of header name to line_number and the template entity.
+ # Example of required: { '<functional>': (1219, 'less<>') }
+
+ for line_number in xrange(clean_lines.num_lines()):
+ line = clean_lines.elided[line_number]
+ if not line or line[0] == '#':
+ continue
+
+ # String is special -- it is a non-templatized type in STL.
+ if _RE_PATTERN_STRING.search(line):
+ required['<string>'] = (line_number, 'string')
+
+ for pattern, template, header in _re_pattern_algorithm_header:
+ if pattern.search(line):
+ required[header] = (line_number, template)
+
+ # The following function is just a speed up, no semantics are changed.
+ if not '<' in line: # Reduces the cpu time usage by skipping lines.
+ continue
+
+ for pattern, template, header in _re_pattern_templates:
+ if pattern.search(line):
+ required[header] = (line_number, template)
+
+ # The policy is that if you #include something in foo.h you don't need to
+ # include it again in foo.cpp. Here, we will look at possible includes.
+ # Let's copy the include_state so it is only messed up within this function.
+ include_state = include_state.copy()
+
+ # Did we find the header for this file (if any) and succesfully load it?
+ header_found = False
+
+ # Use the absolute path so that matching works properly.
+ abs_filename = os.path.abspath(filename)
+
+ # For Emacs's flymake.
+ # If cpp_style is invoked from Emacs's flymake, a temporary file is generated
+ # by flymake and that file name might end with '_flymake.cpp'. In that case,
+ # restore original file name here so that the corresponding header file can be
+ # found.
+ # e.g. If the file name is 'foo_flymake.cpp', we should search for 'foo.h'
+ # instead of 'foo_flymake.h'
+ emacs_flymake_suffix = '_flymake.cpp'
+ if abs_filename.endswith(emacs_flymake_suffix):
+ abs_filename = abs_filename[:-len(emacs_flymake_suffix)] + '.cpp'
+
+ # include_state is modified during iteration, so we iterate over a copy of
+ # the keys.
+ for header in include_state.keys(): #NOLINT
+ (same_module, common_path) = files_belong_to_same_module(abs_filename, header)
+ fullpath = common_path + header
+ if same_module and update_include_state(fullpath, include_state, io):
+ header_found = True
+
+ # If we can't find the header file for a .cpp, assume it's because we don't
+ # know where to look. In that case we'll give up as we're not sure they
+ # didn't include it in the .h file.
+ # FIXME: Do a better job of finding .h files so we are confident that
+ # not having the .h file means there isn't one.
+ if filename.endswith('.cpp') and not header_found:
+ return
+
+ # All the lines have been processed, report the errors found.
+ for required_header_unstripped in required:
+ template = required[required_header_unstripped][1]
+ if template in _HEADERS_ACCEPTED_BUT_NOT_PROMOTED:
+ headers = _HEADERS_ACCEPTED_BUT_NOT_PROMOTED[template]
+ if [True for header in headers if header in include_state]:
+ continue
+ if required_header_unstripped.strip('<>"') not in include_state:
+ error(required[required_header_unstripped][0],
+ 'build/include_what_you_use', 4,
+ 'Add #include ' + required_header_unstripped + ' for ' + template)
+
+
+def process_line(filename, file_extension,
+ clean_lines, line, include_state, function_state,
+ class_state, file_state, error):
+ """Processes a single line in the file.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The extension (dot not included) of the file.
+ clean_lines: An array of strings, each representing a line of the file,
+ with comments stripped.
+ line: Number of line being processed.
+ include_state: An _IncludeState instance in which the headers are inserted.
+ function_state: A _FunctionState instance which counts function lines, etc.
+ class_state: A _ClassState instance which maintains information about
+ the current stack of nested class declarations being parsed.
+ file_state: A _FileState instance which maintains information about
+ the state of things in the file.
+ error: A callable to which errors are reported, which takes arguments:
+ line number, error level, and message
+
+ """
+ raw_lines = clean_lines.raw_lines
+ check_for_function_lengths(clean_lines, line, function_state, error)
+ if search(r'\bNOLINT\b', raw_lines[line]): # ignore nolint lines
+ return
+ check_for_multiline_comments_and_strings(clean_lines, line, error)
+ check_style(clean_lines, line, file_extension, file_state, error)
+ check_language(filename, clean_lines, line, file_extension, include_state,
+ error)
+ check_for_non_standard_constructs(clean_lines, line, class_state, error)
+ check_posix_threading(clean_lines, line, error)
+ check_invalid_increment(clean_lines, line, error)
+
+
+def _process_lines(filename, file_extension, lines, error, verbosity):
+ """Performs lint checks and reports any errors to the given error function.
+
+ Args:
+ filename: Filename of the file that is being processed.
+ file_extension: The extension (dot not included) of the file.
+ lines: An array of strings, each representing a line of the file, with the
+ last element being empty if the file is termined with a newline.
+ error: A callable to which errors are reported, which takes 4 arguments:
+ """
+ lines = (['// marker so line numbers and indices both start at 1'] + lines +
+ ['// marker so line numbers end in a known way'])
+
+ include_state = _IncludeState()
+ function_state = _FunctionState(verbosity)
+ class_state = _ClassState()
+ file_state = _FileState()
+
+ check_for_copyright(lines, error)
+
+ if file_extension == 'h':
+ check_for_header_guard(filename, lines, error)
+
+ remove_multi_line_comments(lines, error)
+ clean_lines = CleansedLines(lines)
+ for line in xrange(clean_lines.num_lines()):
+ process_line(filename, file_extension, clean_lines, line,
+ include_state, function_state, class_state, file_state, error)
+ class_state.check_finished(error)
+
+ check_for_include_what_you_use(filename, clean_lines, include_state, error)
+
+ # We check here rather than inside process_line so that we see raw
+ # lines rather than "cleaned" lines.
+ check_for_unicode_replacement_characters(lines, error)
+
+ check_for_new_line_at_eof(lines, error)
+
+
+class CppProcessor(object):
+
+ """Processes C++ lines for checking style."""
+
+ # This list is used to--
+ #
+ # (1) generate an explicit list of all possible categories,
+ # (2) unit test that all checked categories have valid names, and
+ # (3) unit test that all categories are getting unit tested.
+ #
+ categories = set([
+ 'build/class',
+ 'build/deprecated',
+ 'build/endif_comment',
+ 'build/forward_decl',
+ 'build/header_guard',
+ 'build/include',
+ 'build/include_order',
+ 'build/include_what_you_use',
+ 'build/namespaces',
+ 'build/printf_format',
+ 'build/storage_class',
+ 'build/using_std',
+ 'legal/copyright',
+ 'readability/braces',
+ 'readability/casting',
+ 'readability/check',
+ 'readability/comparison_to_zero',
+ 'readability/constructors',
+ 'readability/control_flow',
+ 'readability/fn_size',
+ 'readability/function',
+ 'readability/multiline_comment',
+ 'readability/multiline_string',
+ 'readability/naming',
+ 'readability/null',
+ 'readability/streams',
+ 'readability/todo',
+ 'readability/utf8',
+ 'runtime/arrays',
+ 'runtime/casting',
+ 'runtime/explicit',
+ 'runtime/init',
+ 'runtime/int',
+ 'runtime/invalid_increment',
+ 'runtime/max_min_macros',
+ 'runtime/memset',
+ 'runtime/printf',
+ 'runtime/printf_format',
+ 'runtime/references',
+ 'runtime/rtti',
+ 'runtime/sizeof',
+ 'runtime/string',
+ 'runtime/threadsafe_fn',
+ 'runtime/virtual',
+ 'whitespace/blank_line',
+ 'whitespace/braces',
+ 'whitespace/comma',
+ 'whitespace/comments',
+ 'whitespace/declaration',
+ 'whitespace/end_of_line',
+ 'whitespace/ending_newline',
+ 'whitespace/indent',
+ 'whitespace/labels',
+ 'whitespace/line_length',
+ 'whitespace/newline',
+ 'whitespace/operators',
+ 'whitespace/parens',
+ 'whitespace/semicolon',
+ 'whitespace/tab',
+ 'whitespace/todo',
+ ])
+
+ def __init__(self, file_path, file_extension, handle_style_error, verbosity):
+ """Create a CppProcessor instance.
+
+ Args:
+ file_extension: A string that is the file extension, without
+ the leading dot.
+
+ """
+ self.file_extension = file_extension
+ self.file_path = file_path
+ self.handle_style_error = handle_style_error
+ self.verbosity = verbosity
+
+ # Useful for unit testing.
+ def __eq__(self, other):
+ """Return whether this CppProcessor instance is equal to another."""
+ if self.file_extension != other.file_extension:
+ return False
+ if self.file_path != other.file_path:
+ return False
+ if self.handle_style_error != other.handle_style_error:
+ return False
+ if self.verbosity != other.verbosity:
+ return False
+
+ return True
+
+ # Useful for unit testing.
+ def __ne__(self, other):
+ # Python does not automatically deduce __ne__() from __eq__().
+ return not self.__eq__(other)
+
+ def process(self, lines):
+ _process_lines(self.file_path, self.file_extension, lines,
+ self.handle_style_error, self.verbosity)
+
+
+# FIXME: Remove this function (requires refactoring unit tests).
+def process_file_data(filename, file_extension, lines, error, verbosity):
+ processor = CppProcessor(filename, file_extension, error, verbosity)
+ processor.process(lines)
+
diff --git a/WebKitTools/Scripts/webkitpy/style/processors/cpp_unittest.py b/WebKitTools/Scripts/webkitpy/style/processors/cpp_unittest.py
new file mode 100644
index 0000000..e556cd3
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/processors/cpp_unittest.py
@@ -0,0 +1,3706 @@
+#!/usr/bin/python
+# -*- coding: utf-8; -*-
+#
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2009 Torch Mobile Inc.
+# Copyright (C) 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for cpp_style.py."""
+
+# FIXME: Add a good test that tests UpdateIncludeState.
+
+import codecs
+import os
+import random
+import re
+import unittest
+import cpp as cpp_style
+from cpp import CppProcessor
+
+# This class works as an error collector and replaces cpp_style.Error
+# function for the unit tests. We also verify each category we see
+# is in STYLE_CATEGORIES, to help keep that list up to date.
+class ErrorCollector:
+ _all_style_categories = CppProcessor.categories
+ # This is a list including all categories seen in any unit test.
+ _seen_style_categories = {}
+
+ def __init__(self, assert_fn):
+ """assert_fn: a function to call when we notice a problem."""
+ self._assert_fn = assert_fn
+ self._errors = []
+
+ def __call__(self, unused_linenum, category, confidence, message):
+ self._assert_fn(category in self._all_style_categories,
+ 'Message "%s" has category "%s",'
+ ' which is not in STYLE_CATEGORIES' % (message, category))
+ self._seen_style_categories[category] = 1
+ self._errors.append('%s [%s] [%d]' % (message, category, confidence))
+
+ def results(self):
+ if len(self._errors) < 2:
+ return ''.join(self._errors) # Most tests expect to have a string.
+ else:
+ return self._errors # Let's give a list if there is more than one.
+
+ def result_list(self):
+ return self._errors
+
+ def verify_all_categories_are_seen(self):
+ """Fails if there's a category in _all_style_categories - _seen_style_categories.
+
+ This should only be called after all tests are run, so
+ _seen_style_categories has had a chance to fully populate. Since
+ this isn't called from within the normal unittest framework, we
+ can't use the normal unittest assert macros. Instead we just exit
+ when we see an error. Good thing this test is always run last!
+ """
+ for category in self._all_style_categories:
+ if category not in self._seen_style_categories:
+ import sys
+ sys.exit('FATAL ERROR: There are no tests for category "%s"' % category)
+
+ def remove_if_present(self, substr):
+ for (index, error) in enumerate(self._errors):
+ if error.find(substr) != -1:
+ self._errors = self._errors[0:index] + self._errors[(index + 1):]
+ break
+
+
+# This class is a lame mock of codecs. We do not verify filename, mode, or
+# encoding, but for the current use case it is not needed.
+class MockIo:
+ def __init__(self, mock_file):
+ self.mock_file = mock_file
+
+ def open(self, unused_filename, unused_mode, unused_encoding, _): # NOLINT
+ # (lint doesn't like open as a method name)
+ return self.mock_file
+
+
+class CppFunctionsTest(unittest.TestCase):
+
+ """Supports testing functions that do not need CppStyleTestBase."""
+
+ def test_is_c_or_objective_c(self):
+ self.assertTrue(cpp_style.is_c_or_objective_c("c"))
+ self.assertTrue(cpp_style.is_c_or_objective_c("m"))
+ self.assertFalse(cpp_style.is_c_or_objective_c("cpp"))
+
+
+class CppStyleTestBase(unittest.TestCase):
+ """Provides some useful helper functions for cpp_style tests.
+
+ Attributes:
+ verbosity: An integer that is the current verbosity level for
+ the tests.
+
+ """
+
+ # FIXME: Refactor the unit tests so the verbosity level is passed
+ # explicitly, just like it is in the real code.
+ verbosity = 1;
+
+ # Helper function to avoid needing to explicitly pass verbosity
+ # in all the unit test calls to cpp_style.process_file_data().
+ def process_file_data(self, filename, file_extension, lines, error):
+ """Call cpp_style.process_file_data() with the current verbosity."""
+ return cpp_style.process_file_data(filename, file_extension, lines, error, self.verbosity)
+
+ # Perform lint on single line of input and return the error message.
+ def perform_single_line_lint(self, code, file_name):
+ error_collector = ErrorCollector(self.assert_)
+ lines = code.split('\n')
+ cpp_style.remove_multi_line_comments(lines, error_collector)
+ clean_lines = cpp_style.CleansedLines(lines)
+ include_state = cpp_style._IncludeState()
+ function_state = cpp_style._FunctionState(self.verbosity)
+ ext = file_name[file_name.rfind('.') + 1:]
+ class_state = cpp_style._ClassState()
+ file_state = cpp_style._FileState()
+ cpp_style.process_line(file_name, ext, clean_lines, 0,
+ include_state, function_state,
+ class_state, file_state, error_collector)
+ # Single-line lint tests are allowed to fail the 'unlintable function'
+ # check.
+ error_collector.remove_if_present(
+ 'Lint failed to find start of function body.')
+ return error_collector.results()
+
+ # Perform lint over multiple lines and return the error message.
+ def perform_multi_line_lint(self, code, file_extension):
+ error_collector = ErrorCollector(self.assert_)
+ lines = code.split('\n')
+ cpp_style.remove_multi_line_comments(lines, error_collector)
+ lines = cpp_style.CleansedLines(lines)
+ class_state = cpp_style._ClassState()
+ file_state = cpp_style._FileState()
+ for i in xrange(lines.num_lines()):
+ cpp_style.check_style(lines, i, file_extension, file_state, error_collector)
+ cpp_style.check_for_non_standard_constructs(lines, i, class_state,
+ error_collector)
+ class_state.check_finished(error_collector)
+ return error_collector.results()
+
+ # Similar to perform_multi_line_lint, but calls check_language instead of
+ # check_for_non_standard_constructs
+ def perform_language_rules_check(self, file_name, code):
+ error_collector = ErrorCollector(self.assert_)
+ include_state = cpp_style._IncludeState()
+ lines = code.split('\n')
+ cpp_style.remove_multi_line_comments(lines, error_collector)
+ lines = cpp_style.CleansedLines(lines)
+ ext = file_name[file_name.rfind('.') + 1:]
+ for i in xrange(lines.num_lines()):
+ cpp_style.check_language(file_name, lines, i, ext, include_state,
+ error_collector)
+ return error_collector.results()
+
+ def perform_function_lengths_check(self, code):
+ """Perform Lint function length check on block of code and return warnings.
+
+ Builds up an array of lines corresponding to the code and strips comments
+ using cpp_style functions.
+
+ Establishes an error collector and invokes the function length checking
+ function following cpp_style's pattern.
+
+ Args:
+ code: C++ source code expected to generate a warning message.
+
+ Returns:
+ The accumulated errors.
+ """
+ error_collector = ErrorCollector(self.assert_)
+ function_state = cpp_style._FunctionState(self.verbosity)
+ lines = code.split('\n')
+ cpp_style.remove_multi_line_comments(lines, error_collector)
+ lines = cpp_style.CleansedLines(lines)
+ for i in xrange(lines.num_lines()):
+ cpp_style.check_for_function_lengths(lines, i,
+ function_state, error_collector)
+ return error_collector.results()
+
+ def perform_include_what_you_use(self, code, filename='foo.h', io=codecs):
+ # First, build up the include state.
+ error_collector = ErrorCollector(self.assert_)
+ include_state = cpp_style._IncludeState()
+ lines = code.split('\n')
+ cpp_style.remove_multi_line_comments(lines, error_collector)
+ lines = cpp_style.CleansedLines(lines)
+ file_extension = filename[filename.rfind('.') + 1:]
+ for i in xrange(lines.num_lines()):
+ cpp_style.check_language(filename, lines, i, file_extension, include_state,
+ error_collector)
+ # We could clear the error_collector here, but this should
+ # also be fine, since our IncludeWhatYouUse unittests do not
+ # have language problems.
+
+ # Second, look for missing includes.
+ cpp_style.check_for_include_what_you_use(filename, lines, include_state,
+ error_collector, io)
+ return error_collector.results()
+
+ # Perform lint and compare the error message with "expected_message".
+ def assert_lint(self, code, expected_message, file_name='foo.cpp'):
+ self.assertEquals(expected_message, self.perform_single_line_lint(code, file_name))
+
+ def assert_lint_one_of_many_errors_re(self, code, expected_message_re, file_name='foo.cpp'):
+ messages = self.perform_single_line_lint(code, file_name)
+ for message in messages:
+ if re.search(expected_message_re, message):
+ return
+
+ self.assertEquals(expected_message, messages)
+
+ def assert_multi_line_lint(self, code, expected_message, file_name='foo.h'):
+ file_extension = file_name[file_name.rfind('.') + 1:]
+ self.assertEquals(expected_message, self.perform_multi_line_lint(code, file_extension))
+
+ def assert_multi_line_lint_re(self, code, expected_message_re, file_name='foo.h'):
+ file_extension = file_name[file_name.rfind('.') + 1:]
+ message = self.perform_multi_line_lint(code, file_extension)
+ if not re.search(expected_message_re, message):
+ self.fail('Message was:\n' + message + 'Expected match to "' + expected_message_re + '"')
+
+ def assert_language_rules_check(self, file_name, code, expected_message):
+ self.assertEquals(expected_message,
+ self.perform_language_rules_check(file_name, code))
+
+ def assert_include_what_you_use(self, code, expected_message):
+ self.assertEquals(expected_message,
+ self.perform_include_what_you_use(code))
+
+ def assert_blank_lines_check(self, lines, start_errors, end_errors):
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data('foo.cpp', 'cpp', lines, error_collector)
+ self.assertEquals(
+ start_errors,
+ error_collector.results().count(
+ 'Blank line at the start of a code block. Is this needed?'
+ ' [whitespace/blank_line] [2]'))
+ self.assertEquals(
+ end_errors,
+ error_collector.results().count(
+ 'Blank line at the end of a code block. Is this needed?'
+ ' [whitespace/blank_line] [3]'))
+
+
+class CppStyleTest(CppStyleTestBase):
+
+ # Test get line width.
+ def test_get_line_width(self):
+ self.assertEquals(0, cpp_style.get_line_width(''))
+ self.assertEquals(10, cpp_style.get_line_width(u'x' * 10))
+ self.assertEquals(16, cpp_style.get_line_width(u'都|道|府|県|支庁'))
+
+ def test_find_next_multi_line_comment_start(self):
+ self.assertEquals(1, cpp_style.find_next_multi_line_comment_start([''], 0))
+
+ lines = ['a', 'b', '/* c']
+ self.assertEquals(2, cpp_style.find_next_multi_line_comment_start(lines, 0))
+
+ lines = ['char a[] = "/*";'] # not recognized as comment.
+ self.assertEquals(1, cpp_style.find_next_multi_line_comment_start(lines, 0))
+
+ def test_find_next_multi_line_comment_end(self):
+ self.assertEquals(1, cpp_style.find_next_multi_line_comment_end([''], 0))
+ lines = ['a', 'b', ' c */']
+ self.assertEquals(2, cpp_style.find_next_multi_line_comment_end(lines, 0))
+
+ def test_remove_multi_line_comments_from_range(self):
+ lines = ['a', ' /* comment ', ' * still comment', ' comment */ ', 'b']
+ cpp_style.remove_multi_line_comments_from_range(lines, 1, 4)
+ self.assertEquals(['a', '// dummy', '// dummy', '// dummy', 'b'], lines)
+
+ def test_spaces_at_end_of_line(self):
+ self.assert_lint(
+ '// Hello there ',
+ 'Line ends in whitespace. Consider deleting these extra spaces.'
+ ' [whitespace/end_of_line] [4]')
+
+ # Test C-style cast cases.
+ def test_cstyle_cast(self):
+ self.assert_lint(
+ 'int a = (int)1.0;',
+ 'Using C-style cast. Use static_cast<int>(...) instead'
+ ' [readability/casting] [4]')
+ self.assert_lint(
+ 'int *a = (int *)DEFINED_VALUE;',
+ 'Using C-style cast. Use reinterpret_cast<int *>(...) instead'
+ ' [readability/casting] [4]', 'foo.c')
+ self.assert_lint(
+ 'uint16 a = (uint16)1.0;',
+ 'Using C-style cast. Use static_cast<uint16>(...) instead'
+ ' [readability/casting] [4]')
+ self.assert_lint(
+ 'int32 a = (int32)1.0;',
+ 'Using C-style cast. Use static_cast<int32>(...) instead'
+ ' [readability/casting] [4]')
+ self.assert_lint(
+ 'uint64 a = (uint64)1.0;',
+ 'Using C-style cast. Use static_cast<uint64>(...) instead'
+ ' [readability/casting] [4]')
+
+ # Test taking address of casts (runtime/casting)
+ def test_runtime_casting(self):
+ self.assert_lint(
+ 'int* x = &static_cast<int*>(foo);',
+ 'Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'
+ ' [runtime/casting] [4]')
+
+ self.assert_lint(
+ 'int* x = &dynamic_cast<int *>(foo);',
+ ['Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'
+ ' [runtime/casting] [4]',
+ 'Do not use dynamic_cast<>. If you need to cast within a class '
+ 'hierarchy, use static_cast<> to upcast. Google doesn\'t support '
+ 'RTTI. [runtime/rtti] [5]'])
+
+ self.assert_lint(
+ 'int* x = &reinterpret_cast<int *>(foo);',
+ 'Are you taking an address of a cast? '
+ 'This is dangerous: could be a temp var. '
+ 'Take the address before doing the cast, rather than after'
+ ' [runtime/casting] [4]')
+
+ # It's OK to cast an address.
+ self.assert_lint(
+ 'int* x = reinterpret_cast<int *>(&foo);',
+ '')
+
+ def test_runtime_selfinit(self):
+ self.assert_lint(
+ 'Foo::Foo(Bar r, Bel l) : r_(r_), l_(l_) { }',
+ 'You seem to be initializing a member variable with itself.'
+ ' [runtime/init] [4]')
+ self.assert_lint(
+ 'Foo::Foo(Bar r, Bel l) : r_(r), l_(l) { }',
+ '')
+ self.assert_lint(
+ 'Foo::Foo(Bar r) : r_(r), l_(r_), ll_(l_) { }',
+ '')
+
+ def test_runtime_rtti(self):
+ statement = 'int* x = dynamic_cast<int*>(&foo);'
+ error_message = (
+ 'Do not use dynamic_cast<>. If you need to cast within a class '
+ 'hierarchy, use static_cast<> to upcast. Google doesn\'t support '
+ 'RTTI. [runtime/rtti] [5]')
+ # dynamic_cast is disallowed in most files.
+ self.assert_language_rules_check('foo.cpp', statement, error_message)
+ self.assert_language_rules_check('foo.h', statement, error_message)
+ # It is explicitly allowed in tests, however.
+ self.assert_language_rules_check('foo_test.cpp', statement, '')
+ self.assert_language_rules_check('foo_unittest.cpp', statement, '')
+ self.assert_language_rules_check('foo_regtest.cpp', statement, '')
+
+ # We cannot test this functionality because of difference of
+ # function definitions. Anyway, we may never enable this.
+ #
+ # # Test for unnamed arguments in a method.
+ # def test_check_for_unnamed_params(self):
+ # message = ('All parameters should be named in a function'
+ # ' [readability/function] [3]')
+ # self.assert_lint('virtual void A(int*) const;', message)
+ # self.assert_lint('virtual void B(void (*fn)(int*));', message)
+ # self.assert_lint('virtual void C(int*);', message)
+ # self.assert_lint('void *(*f)(void *) = x;', message)
+ # self.assert_lint('void Method(char*) {', message)
+ # self.assert_lint('void Method(char*);', message)
+ # self.assert_lint('void Method(char* /*x*/);', message)
+ # self.assert_lint('typedef void (*Method)(int32);', message)
+ # self.assert_lint('static void operator delete[](void*) throw();', message)
+ #
+ # self.assert_lint('virtual void D(int* p);', '')
+ # self.assert_lint('void operator delete(void* x) throw();', '')
+ # self.assert_lint('void Method(char* x)\n{', '')
+ # self.assert_lint('void Method(char* /*x*/)\n{', '')
+ # self.assert_lint('void Method(char* x);', '')
+ # self.assert_lint('typedef void (*Method)(int32 x);', '')
+ # self.assert_lint('static void operator delete[](void* x) throw();', '')
+ # self.assert_lint('static void operator delete[](void* /*x*/) throw();', '')
+ #
+ # # This one should technically warn, but doesn't because the function
+ # # pointer is confusing.
+ # self.assert_lint('virtual void E(void (*fn)(int* p));', '')
+
+ # Test deprecated casts such as int(d)
+ def test_deprecated_cast(self):
+ self.assert_lint(
+ 'int a = int(2.2);',
+ 'Using deprecated casting style. '
+ 'Use static_cast<int>(...) instead'
+ ' [readability/casting] [4]')
+ # Checks for false positives...
+ self.assert_lint(
+ 'int a = int(); // Constructor, o.k.',
+ '')
+ self.assert_lint(
+ 'X::X() : a(int()) {} // default Constructor, o.k.',
+ '')
+ self.assert_lint(
+ 'operator bool(); // Conversion operator, o.k.',
+ '')
+
+ # The second parameter to a gMock method definition is a function signature
+ # that often looks like a bad cast but should not picked up by lint.
+ def test_mock_method(self):
+ self.assert_lint(
+ 'MOCK_METHOD0(method, int());',
+ '')
+ self.assert_lint(
+ 'MOCK_CONST_METHOD1(method, float(string));',
+ '')
+ self.assert_lint(
+ 'MOCK_CONST_METHOD2_T(method, double(float, float));',
+ '')
+
+ # Test sizeof(type) cases.
+ def test_sizeof_type(self):
+ self.assert_lint(
+ 'sizeof(int);',
+ 'Using sizeof(type). Use sizeof(varname) instead if possible'
+ ' [runtime/sizeof] [1]')
+ self.assert_lint(
+ 'sizeof(int *);',
+ 'Using sizeof(type). Use sizeof(varname) instead if possible'
+ ' [runtime/sizeof] [1]')
+
+ # Test typedef cases. There was a bug that cpp_style misidentified
+ # typedef for pointer to function as C-style cast and produced
+ # false-positive error messages.
+ def test_typedef_for_pointer_to_function(self):
+ self.assert_lint(
+ 'typedef void (*Func)(int x);',
+ '')
+ self.assert_lint(
+ 'typedef void (*Func)(int *x);',
+ '')
+ self.assert_lint(
+ 'typedef void Func(int x);',
+ '')
+ self.assert_lint(
+ 'typedef void Func(int *x);',
+ '')
+
+ def test_include_what_you_use_no_implementation_files(self):
+ code = 'std::vector<int> foo;'
+ self.assertEquals('Add #include <vector> for vector<>'
+ ' [build/include_what_you_use] [4]',
+ self.perform_include_what_you_use(code, 'foo.h'))
+ self.assertEquals('',
+ self.perform_include_what_you_use(code, 'foo.cpp'))
+
+ def test_include_what_you_use(self):
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ std::vector<int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <map>
+ std::pair<int,int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <multimap>
+ std::pair<int,int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <hash_map>
+ std::pair<int,int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <utility>
+ std::pair<int,int> foo;
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ DECLARE_string(foobar);
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ DEFINE_string(foobar, "", "");
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ std::pair<int,int> foo;
+ ''',
+ 'Add #include <utility> for pair<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ std::vector<int> foo;
+ ''',
+ 'Add #include <vector> for vector<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include <vector>
+ std::set<int> foo;
+ ''',
+ 'Add #include <set> for set<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ hash_map<int, int> foobar;
+ ''',
+ 'Add #include <hash_map> for hash_map<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = std::less<int>(0,1);
+ ''',
+ 'Add #include <functional> for less<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = min<int>(0,1);
+ ''',
+ 'Add #include <algorithm> for min [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ 'void a(const string &foobar);',
+ 'Add #include <string> for string [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = swap(0,1);
+ ''',
+ 'Add #include <algorithm> for swap [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = transform(a.begin(), a.end(), b.start(), Foo);
+ ''',
+ 'Add #include <algorithm> for transform '
+ '[build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include "base/foobar.h"
+ bool foobar = min_element(a.begin(), a.end());
+ ''',
+ 'Add #include <algorithm> for min_element '
+ '[build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''foo->swap(0,1);
+ foo.swap(0,1);
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include <string>
+ void a(const std::multimap<int,string> &foobar);
+ ''',
+ 'Add #include <map> for multimap<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include <queue>
+ void a(const std::priority_queue<int> &foobar);
+ ''',
+ '')
+ self.assert_include_what_you_use(
+ '''#include "base/basictypes.h"
+ #include "base/port.h"
+ #include <assert.h>
+ #include <string>
+ #include <vector>
+ vector<string> hajoa;''', '')
+ self.assert_include_what_you_use(
+ '''#include <string>
+ int i = numeric_limits<int>::max()
+ ''',
+ 'Add #include <limits> for numeric_limits<>'
+ ' [build/include_what_you_use] [4]')
+ self.assert_include_what_you_use(
+ '''#include <limits>
+ int i = numeric_limits<int>::max()
+ ''',
+ '')
+
+ # Test the UpdateIncludeState code path.
+ mock_header_contents = ['#include "blah/foo.h"', '#include "blah/bar.h"']
+ message = self.perform_include_what_you_use(
+ '#include "config.h"\n'
+ '#include "blah/a.h"\n',
+ filename='blah/a.cpp',
+ io=MockIo(mock_header_contents))
+ self.assertEquals(message, '')
+
+ mock_header_contents = ['#include <set>']
+ message = self.perform_include_what_you_use(
+ '''#include "config.h"
+ #include "blah/a.h"
+
+ std::set<int> foo;''',
+ filename='blah/a.cpp',
+ io=MockIo(mock_header_contents))
+ self.assertEquals(message, '')
+
+ # If there's just a .cpp and the header can't be found then it's ok.
+ message = self.perform_include_what_you_use(
+ '''#include "config.h"
+ #include "blah/a.h"
+
+ std::set<int> foo;''',
+ filename='blah/a.cpp')
+ self.assertEquals(message, '')
+
+ # Make sure we find the headers with relative paths.
+ mock_header_contents = ['']
+ message = self.perform_include_what_you_use(
+ '''#include "config.h"
+ #include "%s/a.h"
+
+ std::set<int> foo;''' % os.path.basename(os.getcwd()),
+ filename='a.cpp',
+ io=MockIo(mock_header_contents))
+ self.assertEquals(message, 'Add #include <set> for set<> '
+ '[build/include_what_you_use] [4]')
+
+ def test_files_belong_to_same_module(self):
+ f = cpp_style.files_belong_to_same_module
+ self.assertEquals((True, ''), f('a.cpp', 'a.h'))
+ self.assertEquals((True, ''), f('base/google.cpp', 'base/google.h'))
+ self.assertEquals((True, ''), f('base/google_test.cpp', 'base/google.h'))
+ self.assertEquals((True, ''),
+ f('base/google_unittest.cpp', 'base/google.h'))
+ self.assertEquals((True, ''),
+ f('base/internal/google_unittest.cpp',
+ 'base/public/google.h'))
+ self.assertEquals((True, 'xxx/yyy/'),
+ f('xxx/yyy/base/internal/google_unittest.cpp',
+ 'base/public/google.h'))
+ self.assertEquals((True, 'xxx/yyy/'),
+ f('xxx/yyy/base/google_unittest.cpp',
+ 'base/public/google.h'))
+ self.assertEquals((True, ''),
+ f('base/google_unittest.cpp', 'base/google-inl.h'))
+ self.assertEquals((True, '/home/build/google3/'),
+ f('/home/build/google3/base/google.cpp', 'base/google.h'))
+
+ self.assertEquals((False, ''),
+ f('/home/build/google3/base/google.cpp', 'basu/google.h'))
+ self.assertEquals((False, ''), f('a.cpp', 'b.h'))
+
+ def test_cleanse_line(self):
+ self.assertEquals('int foo = 0; ',
+ cpp_style.cleanse_comments('int foo = 0; // danger!'))
+ self.assertEquals('int o = 0;',
+ cpp_style.cleanse_comments('int /* foo */ o = 0;'))
+ self.assertEquals('foo(int a, int b);',
+ cpp_style.cleanse_comments('foo(int a /* abc */, int b);'))
+ self.assertEqual('f(a, b);',
+ cpp_style.cleanse_comments('f(a, /* name */ b);'))
+ self.assertEqual('f(a, b);',
+ cpp_style.cleanse_comments('f(a /* name */, b);'))
+ self.assertEqual('f(a, b);',
+ cpp_style.cleanse_comments('f(a, /* name */b);'))
+
+ def test_multi_line_comments(self):
+ # missing explicit is bad
+ self.assert_multi_line_lint(
+ r'''int a = 0;
+ /* multi-liner
+ class Foo {
+ Foo(int f); // should cause a lint warning in code
+ }
+ */ ''',
+ '')
+ self.assert_multi_line_lint(
+ r'''/* int a = 0; multi-liner
+ static const int b = 0;''',
+ 'Could not find end of multi-line comment'
+ ' [readability/multiline_comment] [5]')
+ self.assert_multi_line_lint(r''' /* multi-line comment''',
+ 'Could not find end of multi-line comment'
+ ' [readability/multiline_comment] [5]')
+ self.assert_multi_line_lint(r''' // /* comment, but not multi-line''', '')
+
+ def test_multiline_strings(self):
+ multiline_string_error_message = (
+ 'Multi-line string ("...") found. This lint script doesn\'t '
+ 'do well with such strings, and may give bogus warnings. They\'re '
+ 'ugly and unnecessary, and you should use concatenation instead".'
+ ' [readability/multiline_string] [5]')
+
+ file_path = 'mydir/foo.cpp'
+
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(file_path, 'cpp',
+ ['const char* str = "This is a\\',
+ ' multiline string.";'],
+ error_collector)
+ self.assertEquals(
+ 2, # One per line.
+ error_collector.result_list().count(multiline_string_error_message))
+
+ # Test non-explicit single-argument constructors
+ def test_explicit_single_argument_constructors(self):
+ # missing explicit is bad
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Foo(int f);
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # missing explicit is bad, even with whitespace
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Foo (int f);
+ };''',
+ ['Extra space before ( in function call [whitespace/parens] [4]',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]'])
+ # missing explicit, with distracting comment, is still bad
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Foo(int f); // simpler than Foo(blargh, blarg)
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # missing explicit, with qualified classname
+ self.assert_multi_line_lint(
+ '''class Qualifier::AnotherOne::Foo {
+ Foo(int f);
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # structs are caught as well.
+ self.assert_multi_line_lint(
+ '''struct Foo {
+ Foo(int f);
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # Templatized classes are caught as well.
+ self.assert_multi_line_lint(
+ '''template<typename T> class Foo {
+ Foo(int f);
+ };''',
+ 'Single-argument constructors should be marked explicit.'
+ ' [runtime/explicit] [5]')
+ # proper style is okay
+ self.assert_multi_line_lint(
+ '''class Foo {
+ explicit Foo(int f);
+ };''',
+ '')
+ # two argument constructor is okay
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Foo(int f, int b);
+ };''',
+ '')
+ # two argument constructor, across two lines, is okay
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Foo(int f,
+ int b);
+ };''',
+ '')
+ # non-constructor (but similar name), is okay
+ self.assert_multi_line_lint(
+ '''class Foo {
+ aFoo(int f);
+ };''',
+ '')
+ # constructor with void argument is okay
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Foo(void);
+ };''',
+ '')
+ # single argument method is okay
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Bar(int b);
+ };''',
+ '')
+ # comments should be ignored
+ self.assert_multi_line_lint(
+ '''class Foo {
+ // Foo(int f);
+ };''',
+ '')
+ # single argument function following class definition is okay
+ # (okay, it's not actually valid, but we don't want a false positive)
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Foo(int f, int b);
+ };
+ Foo(int f);''',
+ '')
+ # single argument function is okay
+ self.assert_multi_line_lint(
+ '''static Foo(int f);''',
+ '')
+ # single argument copy constructor is okay.
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Foo(const Foo&);
+ };''',
+ '')
+ self.assert_multi_line_lint(
+ '''class Foo {
+ Foo(Foo&);
+ };''',
+ '')
+
+ def test_slash_star_comment_on_single_line(self):
+ self.assert_multi_line_lint(
+ '''/* static */ Foo(int f);''',
+ '')
+ self.assert_multi_line_lint(
+ '''/*/ static */ Foo(int f);''',
+ '')
+ self.assert_multi_line_lint(
+ '''/*/ static Foo(int f);''',
+ 'Could not find end of multi-line comment'
+ ' [readability/multiline_comment] [5]')
+ self.assert_multi_line_lint(
+ ''' /*/ static Foo(int f);''',
+ 'Could not find end of multi-line comment'
+ ' [readability/multiline_comment] [5]')
+ self.assert_multi_line_lint(
+ ''' /**/ static Foo(int f);''',
+ '')
+
+ # Test suspicious usage of "if" like this:
+ # if (a == b) {
+ # DoSomething();
+ # } if (a == c) { // Should be "else if".
+ # DoSomething(); // This gets called twice if a == b && a == c.
+ # }
+ def test_suspicious_usage_of_if(self):
+ self.assert_lint(
+ ' if (a == b) {',
+ '')
+ self.assert_lint(
+ ' } if (a == b) {',
+ 'Did you mean "else if"? If not, start a new line for "if".'
+ ' [readability/braces] [4]')
+
+ # Test suspicious usage of memset. Specifically, a 0
+ # as the final argument is almost certainly an error.
+ def test_suspicious_usage_of_memset(self):
+ # Normal use is okay.
+ self.assert_lint(
+ ' memset(buf, 0, sizeof(buf))',
+ '')
+
+ # A 0 as the final argument is almost certainly an error.
+ self.assert_lint(
+ ' memset(buf, sizeof(buf), 0)',
+ 'Did you mean "memset(buf, 0, sizeof(buf))"?'
+ ' [runtime/memset] [4]')
+ self.assert_lint(
+ ' memset(buf, xsize * ysize, 0)',
+ 'Did you mean "memset(buf, 0, xsize * ysize)"?'
+ ' [runtime/memset] [4]')
+
+ # There is legitimate test code that uses this form.
+ # This is okay since the second argument is a literal.
+ self.assert_lint(
+ " memset(buf, 'y', 0)",
+ '')
+ self.assert_lint(
+ ' memset(buf, 4, 0)',
+ '')
+ self.assert_lint(
+ ' memset(buf, -1, 0)',
+ '')
+ self.assert_lint(
+ ' memset(buf, 0xF1, 0)',
+ '')
+ self.assert_lint(
+ ' memset(buf, 0xcd, 0)',
+ '')
+
+ def test_check_posix_threading(self):
+ self.assert_lint('sctime_r()', '')
+ self.assert_lint('strtok_r()', '')
+ self.assert_lint(' strtok_r(foo, ba, r)', '')
+ self.assert_lint('brand()', '')
+ self.assert_lint('_rand()', '')
+ self.assert_lint('.rand()', '')
+ self.assert_lint('>rand()', '')
+ self.assert_lint('rand()',
+ 'Consider using rand_r(...) instead of rand(...)'
+ ' for improved thread safety.'
+ ' [runtime/threadsafe_fn] [2]')
+ self.assert_lint('strtok()',
+ 'Consider using strtok_r(...) '
+ 'instead of strtok(...)'
+ ' for improved thread safety.'
+ ' [runtime/threadsafe_fn] [2]')
+
+ # Test potential format string bugs like printf(foo).
+ def test_format_strings(self):
+ self.assert_lint('printf("foo")', '')
+ self.assert_lint('printf("foo: %s", foo)', '')
+ self.assert_lint('DocidForPrintf(docid)', '') # Should not trigger.
+ self.assert_lint(
+ 'printf(foo)',
+ 'Potential format string bug. Do printf("%s", foo) instead.'
+ ' [runtime/printf] [4]')
+ self.assert_lint(
+ 'printf(foo.c_str())',
+ 'Potential format string bug. '
+ 'Do printf("%s", foo.c_str()) instead.'
+ ' [runtime/printf] [4]')
+ self.assert_lint(
+ 'printf(foo->c_str())',
+ 'Potential format string bug. '
+ 'Do printf("%s", foo->c_str()) instead.'
+ ' [runtime/printf] [4]')
+ self.assert_lint(
+ 'StringPrintf(foo)',
+ 'Potential format string bug. Do StringPrintf("%s", foo) instead.'
+ ''
+ ' [runtime/printf] [4]')
+
+ # Variable-length arrays are not permitted.
+ def test_variable_length_array_detection(self):
+ errmsg = ('Do not use variable-length arrays. Use an appropriately named '
+ "('k' followed by CamelCase) compile-time constant for the size."
+ ' [runtime/arrays] [1]')
+
+ self.assert_lint('int a[any_old_variable];', errmsg)
+ self.assert_lint('int doublesize[some_var * 2];', errmsg)
+ self.assert_lint('int a[afunction()];', errmsg)
+ self.assert_lint('int a[function(kMaxFooBars)];', errmsg)
+ self.assert_lint('bool aList[items_->size()];', errmsg)
+ self.assert_lint('namespace::Type buffer[len+1];', errmsg)
+
+ self.assert_lint('int a[64];', '')
+ self.assert_lint('int a[0xFF];', '')
+ self.assert_lint('int first[256], second[256];', '')
+ self.assert_lint('int arrayName[kCompileTimeConstant];', '')
+ self.assert_lint('char buf[somenamespace::kBufSize];', '')
+ self.assert_lint('int arrayName[ALL_CAPS];', '')
+ self.assert_lint('AClass array1[foo::bar::ALL_CAPS];', '')
+ self.assert_lint('int a[kMaxStrLen + 1];', '')
+ self.assert_lint('int a[sizeof(foo)];', '')
+ self.assert_lint('int a[sizeof(*foo)];', '')
+ self.assert_lint('int a[sizeof foo];', '')
+ self.assert_lint('int a[sizeof(struct Foo)];', '')
+ self.assert_lint('int a[128 - sizeof(const bar)];', '')
+ self.assert_lint('int a[(sizeof(foo) * 4)];', '')
+ self.assert_lint('int a[(arraysize(fixed_size_array)/2) << 1];', 'Missing spaces around / [whitespace/operators] [3]')
+ self.assert_lint('delete a[some_var];', '')
+ self.assert_lint('return a[some_var];', '')
+
+ # Brace usage
+ def test_braces(self):
+ # Braces shouldn't be followed by a ; unless they're defining a struct
+ # or initializing an array
+ self.assert_lint('int a[3] = { 1, 2, 3 };', '')
+ self.assert_lint(
+ '''const int foo[] =
+ {1, 2, 3 };''',
+ '')
+ # For single line, unmatched '}' with a ';' is ignored (not enough context)
+ self.assert_multi_line_lint(
+ '''int a[3] = { 1,
+ 2,
+ 3 };''',
+ '')
+ self.assert_multi_line_lint(
+ '''int a[2][3] = { { 1, 2 },
+ { 3, 4 } };''',
+ '')
+ self.assert_multi_line_lint(
+ '''int a[2][3] =
+ { { 1, 2 },
+ { 3, 4 } };''',
+ '')
+
+ # CHECK/EXPECT_TRUE/EXPECT_FALSE replacements
+ def test_check_check(self):
+ self.assert_lint('CHECK(x == 42)',
+ 'Consider using CHECK_EQ instead of CHECK(a == b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x != 42)',
+ 'Consider using CHECK_NE instead of CHECK(a != b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x >= 42)',
+ 'Consider using CHECK_GE instead of CHECK(a >= b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x > 42)',
+ 'Consider using CHECK_GT instead of CHECK(a > b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x <= 42)',
+ 'Consider using CHECK_LE instead of CHECK(a <= b)'
+ ' [readability/check] [2]')
+ self.assert_lint('CHECK(x < 42)',
+ 'Consider using CHECK_LT instead of CHECK(a < b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint('DCHECK(x == 42)',
+ 'Consider using DCHECK_EQ instead of DCHECK(a == b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x != 42)',
+ 'Consider using DCHECK_NE instead of DCHECK(a != b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x >= 42)',
+ 'Consider using DCHECK_GE instead of DCHECK(a >= b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x > 42)',
+ 'Consider using DCHECK_GT instead of DCHECK(a > b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x <= 42)',
+ 'Consider using DCHECK_LE instead of DCHECK(a <= b)'
+ ' [readability/check] [2]')
+ self.assert_lint('DCHECK(x < 42)',
+ 'Consider using DCHECK_LT instead of DCHECK(a < b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint(
+ 'EXPECT_TRUE("42" == x)',
+ 'Consider using EXPECT_EQ instead of EXPECT_TRUE(a == b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE("42" != x)',
+ 'Consider using EXPECT_NE instead of EXPECT_TRUE(a != b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE(+42 >= x)',
+ 'Consider using EXPECT_GE instead of EXPECT_TRUE(a >= b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE_M(-42 > x)',
+ 'Consider using EXPECT_GT_M instead of EXPECT_TRUE_M(a > b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE_M(42U <= x)',
+ 'Consider using EXPECT_LE_M instead of EXPECT_TRUE_M(a <= b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE_M(42L < x)',
+ 'Consider using EXPECT_LT_M instead of EXPECT_TRUE_M(a < b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint(
+ 'EXPECT_FALSE(x == 42)',
+ 'Consider using EXPECT_NE instead of EXPECT_FALSE(a == b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_FALSE(x != 42)',
+ 'Consider using EXPECT_EQ instead of EXPECT_FALSE(a != b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_FALSE(x >= 42)',
+ 'Consider using EXPECT_LT instead of EXPECT_FALSE(a >= b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'ASSERT_FALSE(x > 42)',
+ 'Consider using ASSERT_LE instead of ASSERT_FALSE(a > b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'ASSERT_FALSE(x <= 42)',
+ 'Consider using ASSERT_GT instead of ASSERT_FALSE(a <= b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'ASSERT_FALSE_M(x < 42)',
+ 'Consider using ASSERT_GE_M instead of ASSERT_FALSE_M(a < b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint('CHECK(some_iterator == obj.end())', '')
+ self.assert_lint('EXPECT_TRUE(some_iterator == obj.end())', '')
+ self.assert_lint('EXPECT_FALSE(some_iterator == obj.end())', '')
+
+ self.assert_lint('CHECK(CreateTestFile(dir, (1 << 20)));', '')
+ self.assert_lint('CHECK(CreateTestFile(dir, (1 >> 20)));', '')
+
+ self.assert_lint('CHECK(x<42)',
+ ['Missing spaces around <'
+ ' [whitespace/operators] [3]',
+ 'Consider using CHECK_LT instead of CHECK(a < b)'
+ ' [readability/check] [2]'])
+ self.assert_lint('CHECK(x>42)',
+ 'Consider using CHECK_GT instead of CHECK(a > b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint(
+ ' EXPECT_TRUE(42 < x) // Random comment.',
+ 'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
+ ' [readability/check] [2]')
+ self.assert_lint(
+ 'EXPECT_TRUE( 42 < x )',
+ ['Extra space after ( in function call'
+ ' [whitespace/parens] [4]',
+ 'Consider using EXPECT_LT instead of EXPECT_TRUE(a < b)'
+ ' [readability/check] [2]'])
+ self.assert_lint(
+ 'CHECK("foo" == "foo")',
+ 'Consider using CHECK_EQ instead of CHECK(a == b)'
+ ' [readability/check] [2]')
+
+ self.assert_lint('CHECK_EQ("foo", "foo")', '')
+
+ def test_brace_at_begin_of_line(self):
+ self.assert_lint('{',
+ 'This { should be at the end of the previous line'
+ ' [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ '#endif\n'
+ '{\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition) {',
+ '')
+ self.assert_multi_line_lint(
+ ' MACRO1(macroArg) {',
+ '')
+ self.assert_multi_line_lint(
+ 'ACCESSOR_GETTER(MessageEventPorts) {',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'int foo() {',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'int foo() const {',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'int foo() const\n'
+ '{\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition\n'
+ ' && condition2\n'
+ ' && condition3) {\n'
+ '}\n',
+ '')
+
+ def test_mismatching_spaces_in_parens(self):
+ self.assert_lint('if (foo ) {', 'Mismatching spaces inside () in if'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('switch ( foo) {', 'Mismatching spaces inside () in switch'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('for (foo; ba; bar ) {', 'Mismatching spaces inside () in for'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('for ((foo); (ba); (bar) ) {', 'Mismatching spaces inside () in for'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('for (; foo; bar) {', '')
+ self.assert_lint('for (; (foo); (bar)) {', '')
+ self.assert_lint('for ( ; foo; bar) {', '')
+ self.assert_lint('for ( ; (foo); (bar)) {', '')
+ self.assert_lint('for ( ; foo; bar ) {', '')
+ self.assert_lint('for ( ; (foo); (bar) ) {', '')
+ self.assert_lint('for (foo; bar; ) {', '')
+ self.assert_lint('for ((foo); (bar); ) {', '')
+ self.assert_lint('foreach (foo, foos ) {', 'Mismatching spaces inside () in foreach'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('foreach ( foo, foos) {', 'Mismatching spaces inside () in foreach'
+ ' [whitespace/parens] [5]')
+ self.assert_lint('while ( foo ) {', 'Should have zero or one spaces inside'
+ ' ( and ) in while [whitespace/parens] [5]')
+
+ def test_spacing_for_fncall(self):
+ self.assert_lint('if (foo) {', '')
+ self.assert_lint('for (foo;bar;baz) {', '')
+ self.assert_lint('foreach (foo, foos) {', '')
+ self.assert_lint('while (foo) {', '')
+ self.assert_lint('switch (foo) {', '')
+ self.assert_lint('new (RenderArena()) RenderInline(document())', '')
+ self.assert_lint('foo( bar)', 'Extra space after ( in function call'
+ ' [whitespace/parens] [4]')
+ self.assert_lint('foobar( \\', '')
+ self.assert_lint('foobar( \\', '')
+ self.assert_lint('( a + b)', 'Extra space after ('
+ ' [whitespace/parens] [2]')
+ self.assert_lint('((a+b))', '')
+ self.assert_lint('foo (foo)', 'Extra space before ( in function call'
+ ' [whitespace/parens] [4]')
+ self.assert_lint('typedef foo (*foo)(foo)', '')
+ self.assert_lint('typedef foo (*foo12bar_)(foo)', '')
+ self.assert_lint('typedef foo (Foo::*bar)(foo)', '')
+ self.assert_lint('foo (Foo::*bar)(',
+ 'Extra space before ( in function call'
+ ' [whitespace/parens] [4]')
+ self.assert_lint('typedef foo (Foo::*bar)(', '')
+ self.assert_lint('(foo)(bar)', '')
+ self.assert_lint('Foo (*foo)(bar)', '')
+ self.assert_lint('Foo (*foo)(Bar bar,', '')
+ self.assert_lint('char (*p)[sizeof(foo)] = &foo', '')
+ self.assert_lint('char (&ref)[sizeof(foo)] = &foo', '')
+ self.assert_lint('const char32 (*table[])[6];', '')
+
+ def test_spacing_before_braces(self):
+ self.assert_lint('if (foo){', 'Missing space before {'
+ ' [whitespace/braces] [5]')
+ self.assert_lint('for{', 'Missing space before {'
+ ' [whitespace/braces] [5]')
+ self.assert_lint('for {', '')
+ self.assert_lint('EXPECT_DEBUG_DEATH({', '')
+
+ def test_spacing_around_else(self):
+ self.assert_lint('}else {', 'Missing space before else'
+ ' [whitespace/braces] [5]')
+ self.assert_lint('} else{', 'Missing space before {'
+ ' [whitespace/braces] [5]')
+ self.assert_lint('} else {', '')
+ self.assert_lint('} else if', '')
+
+ def test_spacing_for_binary_ops(self):
+ self.assert_lint('if (foo<=bar) {', 'Missing spaces around <='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('if (foo<bar) {', 'Missing spaces around <'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('if (foo<bar->baz) {', 'Missing spaces around <'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('if (foo<bar->bar) {', 'Missing spaces around <'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('typedef hash_map<Foo, Bar', 'Missing spaces around <'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('typedef hash_map<FoooooType, BaaaaarType,', '')
+ self.assert_lint('a<Foo> t+=b;', 'Missing spaces around +='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo> t-=b;', 'Missing spaces around -='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t*=b;', 'Missing spaces around *='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t/=b;', 'Missing spaces around /='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t|=b;', 'Missing spaces around |='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t&=b;', 'Missing spaces around &='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t<<=b;', 'Missing spaces around <<='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t>>=b;', 'Missing spaces around >>='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t>>=&b|c;', 'Missing spaces around >>='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t<<=*b/c;', 'Missing spaces around <<='
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo> t -= b;', '')
+ self.assert_lint('a<Foo> t += b;', '')
+ self.assert_lint('a<Foo*> t *= b;', '')
+ self.assert_lint('a<Foo*> t /= b;', '')
+ self.assert_lint('a<Foo*> t |= b;', '')
+ self.assert_lint('a<Foo*> t &= b;', '')
+ self.assert_lint('a<Foo*> t <<= b;', '')
+ self.assert_lint('a<Foo*> t >>= b;', '')
+ self.assert_lint('a<Foo*> t >>= &b|c;', 'Missing spaces around |'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= *b/c;', 'Missing spaces around /'
+ ' [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b/c; //Test', [
+ 'Should have a space between // and comment '
+ '[whitespace/comments] [4]', 'Missing'
+ ' spaces around / [whitespace/operators] [3]'])
+ self.assert_lint('a<Foo*> t <<= b||c; //Test', ['One space before end'
+ ' of line comments [whitespace/comments] [5]',
+ 'Should have a space between // and comment '
+ '[whitespace/comments] [4]',
+ 'Missing spaces around || [whitespace/operators] [3]'])
+ self.assert_lint('a<Foo*> t <<= b&&c; // Test', 'Missing spaces around'
+ ' && [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b&&&c; // Test', 'Missing spaces around'
+ ' && [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b&&*c; // Test', 'Missing spaces around'
+ ' && [whitespace/operators] [3]')
+ self.assert_lint('a<Foo*> t <<= b && *c; // Test', '')
+ self.assert_lint('a<Foo*> t <<= b && &c; // Test', '')
+ self.assert_lint('a<Foo*> t <<= b || &c; /*Test', 'Complex multi-line '
+ '/*...*/-style comment found. Lint may give bogus '
+ 'warnings. Consider replacing these with //-style'
+ ' comments, with #if 0...#endif, or with more clearly'
+ ' structured multi-line comments. [readability/multiline_comment] [5]')
+ self.assert_lint('a<Foo&> t <<= &b | &c;', '')
+ self.assert_lint('a<Foo*> t <<= &b & &c; // Test', '')
+ self.assert_lint('a<Foo*> t <<= *b / &c; // Test', '')
+ self.assert_lint('if (a=b == 1)', 'Missing spaces around = [whitespace/operators] [4]')
+ self.assert_lint('a = 1<<20', 'Missing spaces around << [whitespace/operators] [3]')
+ self.assert_lint('if (a = b == 1)', '')
+ self.assert_lint('a = 1 << 20', '')
+ self.assert_multi_line_lint('#include "config.h"\n#include <sys/io.h>\n',
+ '')
+ self.assert_multi_line_lint('#include "config.h"\n#import <foo/bar.h>\n',
+ '')
+
+ def test_spacing_before_last_semicolon(self):
+ self.assert_lint('call_function() ;',
+ 'Extra space before last semicolon. If this should be an '
+ 'empty statement, use { } instead.'
+ ' [whitespace/semicolon] [5]')
+ self.assert_lint('while (true) ;',
+ 'Extra space before last semicolon. If this should be an '
+ 'empty statement, use { } instead.'
+ ' [whitespace/semicolon] [5]')
+ self.assert_lint('default:;',
+ 'Semicolon defining empty statement. Use { } instead.'
+ ' [whitespace/semicolon] [5]')
+ self.assert_lint(' ;',
+ 'Line contains only semicolon. If this should be an empty '
+ 'statement, use { } instead.'
+ ' [whitespace/semicolon] [5]')
+ self.assert_lint('for (int i = 0; ;', '')
+
+ # Static or global STL strings.
+ def test_static_or_global_stlstrings(self):
+ self.assert_lint('string foo;',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "char foo[]".'
+ ' [runtime/string] [4]')
+ self.assert_lint('string kFoo = "hello"; // English',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "char kFoo[]".'
+ ' [runtime/string] [4]')
+ self.assert_lint('static string foo;',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "static char foo[]".'
+ ' [runtime/string] [4]')
+ self.assert_lint('static const string foo;',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "static const char foo[]".'
+ ' [runtime/string] [4]')
+ self.assert_lint('string Foo::bar;',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "char Foo::bar[]".'
+ ' [runtime/string] [4]')
+ # Rare case.
+ self.assert_lint('string foo("foobar");',
+ 'For a static/global string constant, use a C style '
+ 'string instead: "char foo[]".'
+ ' [runtime/string] [4]')
+ # Should not catch local or member variables.
+ self.assert_lint(' string foo', '')
+ # Should not catch functions.
+ self.assert_lint('string EmptyString() { return ""; }', '')
+ self.assert_lint('string EmptyString () { return ""; }', '')
+ self.assert_lint('string VeryLongNameFunctionSometimesEndsWith(\n'
+ ' VeryLongNameType very_long_name_variable) {}', '')
+ self.assert_lint('template<>\n'
+ 'string FunctionTemplateSpecialization<SomeType>(\n'
+ ' int x) { return ""; }', '')
+ self.assert_lint('template<>\n'
+ 'string FunctionTemplateSpecialization<vector<A::B>* >(\n'
+ ' int x) { return ""; }', '')
+
+ # should not catch methods of template classes.
+ self.assert_lint('string Class<Type>::Method() const\n'
+ '{\n'
+ ' return "";\n'
+ '}\n', '')
+ self.assert_lint('string Class<Type>::Method(\n'
+ ' int arg) const\n'
+ '{\n'
+ ' return "";\n'
+ '}\n', '')
+
+ def test_no_spaces_in_function_calls(self):
+ self.assert_lint('TellStory(1, 3);',
+ '')
+ self.assert_lint('TellStory(1, 3 );',
+ 'Extra space before )'
+ ' [whitespace/parens] [2]')
+ self.assert_lint('TellStory(1 /* wolf */, 3 /* pigs */);',
+ '')
+ self.assert_multi_line_lint('#endif\n );',
+ '')
+
+ def test_two_spaces_between_code_and_comments(self):
+ self.assert_lint('} // namespace foo',
+ '')
+ self.assert_lint('}// namespace foo',
+ 'One space before end of line comments'
+ ' [whitespace/comments] [5]')
+ self.assert_lint('printf("foo"); // Outside quotes.',
+ '')
+ self.assert_lint('int i = 0; // Having one space is fine.','')
+ self.assert_lint('int i = 0; // Having two spaces is bad.',
+ 'One space before end of line comments'
+ ' [whitespace/comments] [5]')
+ self.assert_lint('int i = 0; // Having three spaces is bad.',
+ 'One space before end of line comments'
+ ' [whitespace/comments] [5]')
+ self.assert_lint('// Top level comment', '')
+ self.assert_lint(' // Line starts with four spaces.', '')
+ self.assert_lint('foo();\n'
+ '{ // A scope is opening.', '')
+ self.assert_lint(' foo();\n'
+ ' { // An indented scope is opening.', '')
+ self.assert_lint('if (foo) { // not a pure scope',
+ '')
+ self.assert_lint('printf("// In quotes.")', '')
+ self.assert_lint('printf("\\"%s // In quotes.")', '')
+ self.assert_lint('printf("%s", "// In quotes.")', '')
+
+ def test_space_after_comment_marker(self):
+ self.assert_lint('//', '')
+ self.assert_lint('//x', 'Should have a space between // and comment'
+ ' [whitespace/comments] [4]')
+ self.assert_lint('// x', '')
+ self.assert_lint('//----', '')
+ self.assert_lint('//====', '')
+ self.assert_lint('//////', '')
+ self.assert_lint('////// x', '')
+ self.assert_lint('/// x', '')
+ self.assert_lint('////x', 'Should have a space between // and comment'
+ ' [whitespace/comments] [4]')
+
+ def test_newline_at_eof(self):
+ def do_test(self, data, is_missing_eof):
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data('foo.cpp', 'cpp', data.split('\n'),
+ error_collector)
+ # The warning appears only once.
+ self.assertEquals(
+ int(is_missing_eof),
+ error_collector.results().count(
+ 'Could not find a newline character at the end of the file.'
+ ' [whitespace/ending_newline] [5]'))
+
+ do_test(self, '// Newline\n// at EOF\n', False)
+ do_test(self, '// No newline\n// at EOF', True)
+
+ def test_invalid_utf8(self):
+ def do_test(self, raw_bytes, has_invalid_utf8):
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data('foo.cpp', 'cpp',
+ unicode(raw_bytes, 'utf8', 'replace').split('\n'),
+ error_collector)
+ # The warning appears only once.
+ self.assertEquals(
+ int(has_invalid_utf8),
+ error_collector.results().count(
+ 'Line contains invalid UTF-8'
+ ' (or Unicode replacement character).'
+ ' [readability/utf8] [5]'))
+
+ do_test(self, 'Hello world\n', False)
+ do_test(self, '\xe9\x8e\xbd\n', False)
+ do_test(self, '\xe9x\x8e\xbd\n', True)
+ # This is the encoding of the replacement character itself (which
+ # you can see by evaluating codecs.getencoder('utf8')(u'\ufffd')).
+ do_test(self, '\xef\xbf\xbd\n', True)
+
+ def test_is_blank_line(self):
+ self.assert_(cpp_style.is_blank_line(''))
+ self.assert_(cpp_style.is_blank_line(' '))
+ self.assert_(cpp_style.is_blank_line(' \t\r\n'))
+ self.assert_(not cpp_style.is_blank_line('int a;'))
+ self.assert_(not cpp_style.is_blank_line('{'))
+
+ def test_blank_lines_check(self):
+ self.assert_blank_lines_check(['{\n', '\n', '\n', '}\n'], 1, 1)
+ self.assert_blank_lines_check([' if (foo) {\n', '\n', ' }\n'], 1, 1)
+ self.assert_blank_lines_check(
+ ['\n', '// {\n', '\n', '\n', '// Comment\n', '{\n', '}\n'], 0, 0)
+ self.assert_blank_lines_check(['\n', 'run("{");\n', '\n'], 0, 0)
+ self.assert_blank_lines_check(['\n', ' if (foo) { return 0; }\n', '\n'], 0, 0)
+
+ def test_allow_blank_line_before_closing_namespace(self):
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data('foo.cpp', 'cpp',
+ ['namespace {', '', '} // namespace'],
+ error_collector)
+ self.assertEquals(0, error_collector.results().count(
+ 'Blank line at the end of a code block. Is this needed?'
+ ' [whitespace/blank_line] [3]'))
+
+ def test_allow_blank_line_before_if_else_chain(self):
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data('foo.cpp', 'cpp',
+ ['if (hoge) {',
+ '', # No warning
+ '} else if (piyo) {',
+ '', # No warning
+ '} else if (piyopiyo) {',
+ ' hoge = true;', # No warning
+ '} else {',
+ '', # Warning on this line
+ '}'],
+ error_collector)
+ self.assertEquals(1, error_collector.results().count(
+ 'Blank line at the end of a code block. Is this needed?'
+ ' [whitespace/blank_line] [3]'))
+
+ def test_else_on_same_line_as_closing_braces(self):
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data('foo.cpp', 'cpp',
+ ['if (hoge) {',
+ '',
+ '}',
+ ' else {' # Warning on this line
+ '',
+ '}'],
+ error_collector)
+ self.assertEquals(1, error_collector.results().count(
+ 'An else should appear on the same line as the preceding }'
+ ' [whitespace/newline] [4]'))
+
+ def test_else_clause_not_on_same_line_as_else(self):
+ self.assert_lint(' else DoSomethingElse();',
+ 'Else clause should never be on same line as else '
+ '(use 2 lines) [whitespace/newline] [4]')
+ self.assert_lint(' else ifDoSomethingElse();',
+ 'Else clause should never be on same line as else '
+ '(use 2 lines) [whitespace/newline] [4]')
+ self.assert_lint(' else if (blah) {', '')
+ self.assert_lint(' variable_ends_in_else = true;', '')
+
+ def test_comma(self):
+ self.assert_lint('a = f(1,2);',
+ 'Missing space after , [whitespace/comma] [3]')
+ self.assert_lint('int tmp=a,a=b,b=tmp;',
+ ['Missing spaces around = [whitespace/operators] [4]',
+ 'Missing space after , [whitespace/comma] [3]'])
+ self.assert_lint('f(a, /* name */ b);', '')
+ self.assert_lint('f(a, /* name */b);', '')
+
+ def test_pointer_reference_marker_location(self):
+ self.assert_lint('int* b;', '', 'foo.cpp')
+ self.assert_lint('int *b;',
+ 'Declaration has space between type name and * in int *b [whitespace/declaration] [3]',
+ 'foo.cpp')
+ self.assert_lint('return *b;', '', 'foo.cpp')
+ self.assert_lint('int *b;', '', 'foo.c')
+ self.assert_lint('int* b;',
+ 'Declaration has space between * and variable name in int* b [whitespace/declaration] [3]',
+ 'foo.c')
+ self.assert_lint('int& b;', '', 'foo.cpp')
+ self.assert_lint('int &b;',
+ 'Declaration has space between type name and & in int &b [whitespace/declaration] [3]',
+ 'foo.cpp')
+ self.assert_lint('return &b;', '', 'foo.cpp')
+
+ def test_indent(self):
+ self.assert_lint('static int noindent;', '')
+ self.assert_lint(' int fourSpaceIndent;', '')
+ self.assert_lint(' int oneSpaceIndent;',
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_lint(' int threeSpaceIndent;',
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_lint(' char* oneSpaceIndent = "public:";',
+ 'Weird number of spaces at line-start. '
+ 'Are you using a 4-space indent? [whitespace/indent] [3]')
+ self.assert_lint(' public:', '')
+ self.assert_lint(' public:', '')
+ self.assert_lint(' public:', '')
+
+ def test_label(self):
+ self.assert_lint('public:',
+ 'Labels should always be indented at least one space. '
+ 'If this is a member-initializer list in a constructor, '
+ 'the colon should be on the line after the definition '
+ 'header. [whitespace/labels] [4]')
+ self.assert_lint(' public:', '')
+ self.assert_lint(' public:', '')
+ self.assert_lint(' public:', '')
+ self.assert_lint(' public:', '')
+ self.assert_lint(' public:', '')
+
+ def test_not_alabel(self):
+ self.assert_lint('MyVeryLongNamespace::MyVeryLongClassName::', '')
+
+ def test_tab(self):
+ self.assert_lint('\tint a;',
+ 'Tab found; better to use spaces [whitespace/tab] [1]')
+ self.assert_lint('int a = 5;\t// set a to 5',
+ 'Tab found; better to use spaces [whitespace/tab] [1]')
+
+ def test_unnamed_namespaces_in_headers(self):
+ self.assert_language_rules_check(
+ 'foo.h', 'namespace {',
+ 'Do not use unnamed namespaces in header files. See'
+ ' http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
+ ' for more information. [build/namespaces] [4]')
+ # namespace registration macros are OK.
+ self.assert_language_rules_check('foo.h', 'namespace { \\', '')
+ # named namespaces are OK.
+ self.assert_language_rules_check('foo.h', 'namespace foo {', '')
+ self.assert_language_rules_check('foo.h', 'namespace foonamespace {', '')
+ self.assert_language_rules_check('foo.cpp', 'namespace {', '')
+ self.assert_language_rules_check('foo.cpp', 'namespace foo {', '')
+
+ def test_build_class(self):
+ # Test that the linter can parse to the end of class definitions,
+ # and that it will report when it can't.
+ # Use multi-line linter because it performs the ClassState check.
+ self.assert_multi_line_lint(
+ 'class Foo {',
+ 'Failed to find complete declaration of class Foo'
+ ' [build/class] [5]')
+ # Don't warn on forward declarations of various types.
+ self.assert_multi_line_lint(
+ 'class Foo;',
+ '')
+ self.assert_multi_line_lint(
+ '''struct Foo*
+ foo = NewFoo();''',
+ '')
+ # Here is an example where the linter gets confused, even though
+ # the code doesn't violate the style guide.
+ self.assert_multi_line_lint(
+ '''class Foo
+ #ifdef DERIVE_FROM_GOO
+ : public Goo {
+ #else
+ : public Hoo {
+ #endif
+ };''',
+ 'Failed to find complete declaration of class Foo'
+ ' [build/class] [5]')
+
+ def test_build_end_comment(self):
+ # The crosstool compiler we currently use will fail to compile the
+ # code in this test, so we might consider removing the lint check.
+ self.assert_lint('#endif Not a comment',
+ 'Uncommented text after #endif is non-standard.'
+ ' Use a comment.'
+ ' [build/endif_comment] [5]')
+
+ def test_build_forward_decl(self):
+ # The crosstool compiler we currently use will fail to compile the
+ # code in this test, so we might consider removing the lint check.
+ self.assert_lint('class Foo::Goo;',
+ 'Inner-style forward declarations are invalid.'
+ ' Remove this line.'
+ ' [build/forward_decl] [5]')
+
+ def test_build_header_guard(self):
+ file_path = 'mydir/Foo.h'
+
+ # We can't rely on our internal stuff to get a sane path on the open source
+ # side of things, so just parse out the suggested header guard. This
+ # doesn't allow us to test the suggested header guard, but it does let us
+ # test all the other header tests.
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(file_path, 'h', [], error_collector)
+ expected_guard = ''
+ matcher = re.compile(
+ 'No \#ifndef header guard found\, suggested CPP variable is\: ([A-Za-z_0-9]+) ')
+ for error in error_collector.result_list():
+ matches = matcher.match(error)
+ if matches:
+ expected_guard = matches.group(1)
+ break
+
+ # Make sure we extracted something for our header guard.
+ self.assertNotEqual(expected_guard, '')
+
+ # Wrong guard
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef FOO_H', '#define FOO_H'], error_collector)
+ self.assertEquals(
+ 1,
+ error_collector.result_list().count(
+ '#ifndef header guard has wrong style, please use: %s'
+ ' [build/header_guard] [5]' % expected_guard),
+ error_collector.result_list())
+
+ # No define
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef %s' % expected_guard], error_collector)
+ self.assertEquals(
+ 1,
+ error_collector.result_list().count(
+ 'No #ifndef header guard found, suggested CPP variable is: %s'
+ ' [build/header_guard] [5]' % expected_guard),
+ error_collector.result_list())
+
+ # Mismatched define
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef %s' % expected_guard,
+ '#define FOO_H'],
+ error_collector)
+ self.assertEquals(
+ 1,
+ error_collector.result_list().count(
+ 'No #ifndef header guard found, suggested CPP variable is: %s'
+ ' [build/header_guard] [5]' % expected_guard),
+ error_collector.result_list())
+
+ # No header guard errors
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef %s' % expected_guard,
+ '#define %s' % expected_guard,
+ '#endif // %s' % expected_guard],
+ error_collector)
+ for line in error_collector.result_list():
+ if line.find('build/header_guard') != -1:
+ self.fail('Unexpected error: %s' % line)
+
+ # Completely incorrect header guard
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(file_path, 'h',
+ ['#ifndef FOO',
+ '#define FOO',
+ '#endif // FOO'],
+ error_collector)
+ self.assertEquals(
+ 1,
+ error_collector.result_list().count(
+ '#ifndef header guard has wrong style, please use: %s'
+ ' [build/header_guard] [5]' % expected_guard),
+ error_collector.result_list())
+
+ def test_build_printf_format(self):
+ self.assert_lint(
+ r'printf("\%%d", value);',
+ '%, [, (, and { are undefined character escapes. Unescape them.'
+ ' [build/printf_format] [3]')
+
+ self.assert_lint(
+ r'snprintf(buffer, sizeof(buffer), "\[%d", value);',
+ '%, [, (, and { are undefined character escapes. Unescape them.'
+ ' [build/printf_format] [3]')
+
+ self.assert_lint(
+ r'fprintf(file, "\(%d", value);',
+ '%, [, (, and { are undefined character escapes. Unescape them.'
+ ' [build/printf_format] [3]')
+
+ self.assert_lint(
+ r'vsnprintf(buffer, sizeof(buffer), "\\\{%d", ap);',
+ '%, [, (, and { are undefined character escapes. Unescape them.'
+ ' [build/printf_format] [3]')
+
+ # Don't warn if double-slash precedes the symbol
+ self.assert_lint(r'printf("\\%%%d", value);',
+ '')
+
+ def test_runtime_printf_format(self):
+ self.assert_lint(
+ r'fprintf(file, "%q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'aprintf(file, "The number is %12q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'printf(file, "The number is" "%-12q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'printf(file, "The number is" "%+12q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'printf(file, "The number is" "% 12q", value);',
+ '%q in format strings is deprecated. Use %ll instead.'
+ ' [runtime/printf_format] [3]')
+
+ self.assert_lint(
+ r'snprintf(file, "Never mix %d and %1$d parmaeters!", value);',
+ '%N$ formats are unconventional. Try rewriting to avoid them.'
+ ' [runtime/printf_format] [2]')
+
+ def assert_lintLogCodeOnError(self, code, expected_message):
+ # Special assert_lint which logs the input code on error.
+ result = self.perform_single_line_lint(code, 'foo.cpp')
+ if result != expected_message:
+ self.fail('For code: "%s"\nGot: "%s"\nExpected: "%s"'
+ % (code, result, expected_message))
+
+ def test_build_storage_class(self):
+ qualifiers = [None, 'const', 'volatile']
+ signs = [None, 'signed', 'unsigned']
+ types = ['void', 'char', 'int', 'float', 'double',
+ 'schar', 'int8', 'uint8', 'int16', 'uint16',
+ 'int32', 'uint32', 'int64', 'uint64']
+ storage_classes = ['auto', 'extern', 'register', 'static', 'typedef']
+
+ build_storage_class_error_message = (
+ 'Storage class (static, extern, typedef, etc) should be first.'
+ ' [build/storage_class] [5]')
+
+ # Some explicit cases. Legal in C++, deprecated in C99.
+ self.assert_lint('const int static foo = 5;',
+ build_storage_class_error_message)
+
+ self.assert_lint('char static foo;',
+ build_storage_class_error_message)
+
+ self.assert_lint('double const static foo = 2.0;',
+ build_storage_class_error_message)
+
+ self.assert_lint('uint64 typedef unsignedLongLong;',
+ build_storage_class_error_message)
+
+ self.assert_lint('int register foo = 0;',
+ build_storage_class_error_message)
+
+ # Since there are a very large number of possibilities, randomly
+ # construct declarations.
+ # Make sure that the declaration is logged if there's an error.
+ # Seed generator with an integer for absolute reproducibility.
+ random.seed(25)
+ for unused_i in range(10):
+ # Build up random list of non-storage-class declaration specs.
+ other_decl_specs = [random.choice(qualifiers), random.choice(signs),
+ random.choice(types)]
+ # remove None
+ other_decl_specs = filter(lambda x: x is not None, other_decl_specs)
+
+ # shuffle
+ random.shuffle(other_decl_specs)
+
+ # insert storage class after the first
+ storage_class = random.choice(storage_classes)
+ insertion_point = random.randint(1, len(other_decl_specs))
+ decl_specs = (other_decl_specs[0:insertion_point]
+ + [storage_class]
+ + other_decl_specs[insertion_point:])
+
+ self.assert_lintLogCodeOnError(
+ ' '.join(decl_specs) + ';',
+ build_storage_class_error_message)
+
+ # but no error if storage class is first
+ self.assert_lintLogCodeOnError(
+ storage_class + ' ' + ' '.join(other_decl_specs),
+ '')
+
+ def test_legal_copyright(self):
+ legal_copyright_message = (
+ 'No copyright message found. '
+ 'You should have a line: "Copyright [year] <Copyright Owner>"'
+ ' [legal/copyright] [5]')
+
+ copyright_line = '// Copyright 2008 Google Inc. All Rights Reserved.'
+
+ file_path = 'mydir/googleclient/foo.cpp'
+
+ # There should be a copyright message in the first 10 lines
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(file_path, 'cpp', [], error_collector)
+ self.assertEquals(
+ 1,
+ error_collector.result_list().count(legal_copyright_message))
+
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(
+ file_path, 'cpp',
+ ['' for unused_i in range(10)] + [copyright_line],
+ error_collector)
+ self.assertEquals(
+ 1,
+ error_collector.result_list().count(legal_copyright_message))
+
+ # Test that warning isn't issued if Copyright line appears early enough.
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(file_path, 'cpp', [copyright_line], error_collector)
+ for message in error_collector.result_list():
+ if message.find('legal/copyright') != -1:
+ self.fail('Unexpected error: %s' % message)
+
+ error_collector = ErrorCollector(self.assert_)
+ self.process_file_data(
+ file_path, 'cpp',
+ ['' for unused_i in range(9)] + [copyright_line],
+ error_collector)
+ for message in error_collector.result_list():
+ if message.find('legal/copyright') != -1:
+ self.fail('Unexpected error: %s' % message)
+
+ def test_invalid_increment(self):
+ self.assert_lint('*count++;',
+ 'Changing pointer instead of value (or unused value of '
+ 'operator*). [runtime/invalid_increment] [5]')
+
+
+class CleansedLinesTest(unittest.TestCase):
+ def test_init(self):
+ lines = ['Line 1',
+ 'Line 2',
+ 'Line 3 // Comment test',
+ 'Line 4 "foo"']
+
+ clean_lines = cpp_style.CleansedLines(lines)
+ self.assertEquals(lines, clean_lines.raw_lines)
+ self.assertEquals(4, clean_lines.num_lines())
+
+ self.assertEquals(['Line 1',
+ 'Line 2',
+ 'Line 3 ',
+ 'Line 4 "foo"'],
+ clean_lines.lines)
+
+ self.assertEquals(['Line 1',
+ 'Line 2',
+ 'Line 3 ',
+ 'Line 4 ""'],
+ clean_lines.elided)
+
+ def test_init_empty(self):
+ clean_lines = cpp_style.CleansedLines([])
+ self.assertEquals([], clean_lines.raw_lines)
+ self.assertEquals(0, clean_lines.num_lines())
+
+ def test_collapse_strings(self):
+ collapse = cpp_style.CleansedLines.collapse_strings
+ self.assertEquals('""', collapse('""')) # "" (empty)
+ self.assertEquals('"""', collapse('"""')) # """ (bad)
+ self.assertEquals('""', collapse('"xyz"')) # "xyz" (string)
+ self.assertEquals('""', collapse('"\\\""')) # "\"" (string)
+ self.assertEquals('""', collapse('"\'"')) # "'" (string)
+ self.assertEquals('"\"', collapse('"\"')) # "\" (bad)
+ self.assertEquals('""', collapse('"\\\\"')) # "\\" (string)
+ self.assertEquals('"', collapse('"\\\\\\"')) # "\\\" (bad)
+ self.assertEquals('""', collapse('"\\\\\\\\"')) # "\\\\" (string)
+
+ self.assertEquals('\'\'', collapse('\'\'')) # '' (empty)
+ self.assertEquals('\'\'', collapse('\'a\'')) # 'a' (char)
+ self.assertEquals('\'\'', collapse('\'\\\'\'')) # '\'' (char)
+ self.assertEquals('\'', collapse('\'\\\'')) # '\' (bad)
+ self.assertEquals('', collapse('\\012')) # '\012' (char)
+ self.assertEquals('', collapse('\\xfF0')) # '\xfF0' (char)
+ self.assertEquals('', collapse('\\n')) # '\n' (char)
+ self.assertEquals('\#', collapse('\\#')) # '\#' (bad)
+
+ self.assertEquals('StringReplace(body, "", "");',
+ collapse('StringReplace(body, "\\\\", "\\\\\\\\");'))
+ self.assertEquals('\'\' ""',
+ collapse('\'"\' "foo"'))
+
+
+class OrderOfIncludesTest(CppStyleTestBase):
+ def setUp(self):
+ self.include_state = cpp_style._IncludeState()
+
+ # Cheat os.path.abspath called in FileInfo class.
+ self.os_path_abspath_orig = os.path.abspath
+ os.path.abspath = lambda value: value
+
+ def tearDown(self):
+ os.path.abspath = self.os_path_abspath_orig
+
+ def test_try_drop_common_suffixes(self):
+ self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h'))
+ self.assertEqual('foo/bar/foo',
+ cpp_style._drop_common_suffixes('foo/bar/foo_inl.h'))
+ self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp'))
+ self.assertEqual('foo/foo_unusualinternal',
+ cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h'))
+ self.assertEqual('',
+ cpp_style._drop_common_suffixes('_test.cpp'))
+ self.assertEqual('test',
+ cpp_style._drop_common_suffixes('test.cpp'))
+
+
+class OrderOfIncludesTest(CppStyleTestBase):
+ def setUp(self):
+ self.include_state = cpp_style._IncludeState()
+
+ # Cheat os.path.abspath called in FileInfo class.
+ self.os_path_abspath_orig = os.path.abspath
+ os.path.abspath = lambda value: value
+
+ def tearDown(self):
+ os.path.abspath = self.os_path_abspath_orig
+
+ def test_check_next_include_order__no_config(self):
+ self.assertEqual('Header file should not contain WebCore config.h.',
+ self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, True))
+
+ def test_check_next_include_order__no_self(self):
+ self.assertEqual('Header file should not contain itself.',
+ self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, True))
+ # Test actual code to make sure that header types are correctly assigned.
+ self.assert_language_rules_check('Foo.h',
+ '#include "Foo.h"\n',
+ 'Header file should not contain itself. Should be: alphabetically sorted.'
+ ' [build/include_order] [4]')
+ self.assert_language_rules_check('FooBar.h',
+ '#include "Foo.h"\n',
+ '')
+
+ def test_check_next_include_order__likely_then_config(self):
+ self.assertEqual('Found header this file implements before WebCore config.h.',
+ self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False))
+ self.assertEqual('Found WebCore config.h after a header this file implements.',
+ self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False))
+
+ def test_check_next_include_order__other_then_config(self):
+ self.assertEqual('Found other header before WebCore config.h.',
+ self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False))
+ self.assertEqual('Found WebCore config.h after other header.',
+ self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False))
+
+ def test_check_next_include_order__config_then_other_then_likely(self):
+ self.assertEqual('', self.include_state.check_next_include_order(cpp_style._CONFIG_HEADER, False))
+ self.assertEqual('Found other header before a header this file implements.',
+ self.include_state.check_next_include_order(cpp_style._OTHER_HEADER, False))
+ self.assertEqual('Found header this file implements after other header.',
+ self.include_state.check_next_include_order(cpp_style._PRIMARY_HEADER, False))
+
+ def test_check_alphabetical_include_order(self):
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "c.h"\n'
+ '#include "b.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "b.h"\n'
+ '#include "c.h"\n',
+ '')
+
+ self.assert_language_rules_check('foo.h',
+ '#include <assert.h>\n'
+ '#include "bar.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.h',
+ '#include "bar.h"\n'
+ '#include <assert.h>\n',
+ '')
+
+ def test_webkit_api_test_excluded(self):
+ self.assert_language_rules_check('WebKitTools/WebKitAPITest/Test.h',
+ '#include "foo.h"\n',
+ '')
+
+ def test_webkit_api_test_excluded(self):
+ self.assert_language_rules_check('WebKit/qt/QGVLauncher/main.cpp',
+ '#include "foo.h"\n',
+ '')
+
+ def test_check_line_break_after_own_header(self):
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '#include "bar.h"\n',
+ 'You should add a blank line after implementation file\'s own header. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include "bar.h"\n',
+ '')
+
+ def test_check_preprocessor_in_include_section(self):
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#ifdef BAZ\n'
+ '#include "baz.h"\n'
+ '#else\n'
+ '#include "foobar.h"\n'
+ '#endif"\n'
+ '#include "bar.h"\n', # No flag because previous is in preprocessor section
+ '')
+
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#ifdef BAZ\n'
+ '#include "baz.h"\n'
+ '#endif"\n'
+ '#include "bar.h"\n'
+ '#include "a.h"\n', # Should still flag this.
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#ifdef BAZ\n'
+ '#include "baz.h"\n'
+ '#include "bar.h"\n' #Should still flag this
+ '#endif"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#ifdef BAZ\n'
+ '#include "baz.h"\n'
+ '#endif"\n'
+ '#ifdef FOOBAR\n'
+ '#include "foobar.h"\n'
+ '#endif"\n'
+ '#include "bar.h"\n'
+ '#include "a.h"\n', # Should still flag this.
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
+ # Check that after an already included error, the sorting rules still work.
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include "foo.h"\n'
+ '#include "g.h"\n',
+ '"foo.h" already included at foo.cpp:1 [build/include] [4]')
+
+ def test_check_wtf_includes(self):
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include <wtf/Assertions.h>\n',
+ '')
+ self.assert_language_rules_check('foo.cpp',
+ '#include "config.h"\n'
+ '#include "foo.h"\n'
+ '\n'
+ '#include "wtf/Assertions.h"\n',
+ 'wtf includes should be <wtf/file.h> instead of "wtf/file.h".'
+ ' [build/include] [4]')
+
+ def test_classify_include(self):
+ classify_include = cpp_style._classify_include
+ include_state = cpp_style._IncludeState()
+ self.assertEqual(cpp_style._CONFIG_HEADER,
+ classify_include('foo/foo.cpp',
+ 'config.h',
+ False, include_state))
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('foo/internal/foo.cpp',
+ 'foo/public/foo.h',
+ False, include_state))
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('foo/internal/foo.cpp',
+ 'foo/other/public/foo.h',
+ False, include_state))
+ self.assertEqual(cpp_style._OTHER_HEADER,
+ classify_include('foo/internal/foo.cpp',
+ 'foo/other/public/foop.h',
+ False, include_state))
+ self.assertEqual(cpp_style._OTHER_HEADER,
+ classify_include('foo/foo.cpp',
+ 'string',
+ True, include_state))
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('fooCustom.cpp',
+ 'foo.h',
+ False, include_state))
+ self.assertEqual(cpp_style._PRIMARY_HEADER,
+ classify_include('PrefixFooCustom.cpp',
+ 'Foo.h',
+ False, include_state))
+ self.assertEqual(cpp_style._MOC_HEADER,
+ classify_include('foo.cpp',
+ 'foo.moc',
+ False, include_state))
+ self.assertEqual(cpp_style._MOC_HEADER,
+ classify_include('foo.cpp',
+ 'moc_foo.cpp',
+ False, include_state))
+ # Tricky example where both includes might be classified as primary.
+ self.assert_language_rules_check('ScrollbarThemeWince.cpp',
+ '#include "config.h"\n'
+ '#include "ScrollbarThemeWince.h"\n'
+ '\n'
+ '#include "Scrollbar.h"\n',
+ '')
+ self.assert_language_rules_check('ScrollbarThemeWince.cpp',
+ '#include "config.h"\n'
+ '#include "Scrollbar.h"\n'
+ '\n'
+ '#include "ScrollbarThemeWince.h"\n',
+ 'Found header this file implements after a header this file implements.'
+ ' Should be: config.h, primary header, blank line, and then alphabetically sorted.'
+ ' [build/include_order] [4]')
+ self.assert_language_rules_check('ResourceHandleWin.cpp',
+ '#include "config.h"\n'
+ '#include "ResourceHandle.h"\n'
+ '\n'
+ '#include "ResourceHandleWin.h"\n',
+ '')
+
+ def test_try_drop_common_suffixes(self):
+ self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo-inl.h'))
+ self.assertEqual('foo/bar/foo',
+ cpp_style._drop_common_suffixes('foo/bar/foo_inl.h'))
+ self.assertEqual('foo/foo', cpp_style._drop_common_suffixes('foo/foo.cpp'))
+ self.assertEqual('foo/foo_unusualinternal',
+ cpp_style._drop_common_suffixes('foo/foo_unusualinternal.h'))
+ self.assertEqual('',
+ cpp_style._drop_common_suffixes('_test.cpp'))
+ self.assertEqual('test',
+ cpp_style._drop_common_suffixes('test.cpp'))
+ self.assertEqual('test',
+ cpp_style._drop_common_suffixes('test.cpp'))
+
+class CheckForFunctionLengthsTest(CppStyleTestBase):
+ def setUp(self):
+ # Reducing these thresholds for the tests speeds up tests significantly.
+ self.old_normal_trigger = cpp_style._FunctionState._NORMAL_TRIGGER
+ self.old_test_trigger = cpp_style._FunctionState._TEST_TRIGGER
+
+ cpp_style._FunctionState._NORMAL_TRIGGER = 10
+ cpp_style._FunctionState._TEST_TRIGGER = 25
+
+ def tearDown(self):
+ cpp_style._FunctionState._NORMAL_TRIGGER = self.old_normal_trigger
+ cpp_style._FunctionState._TEST_TRIGGER = self.old_test_trigger
+
+ # FIXME: Eliminate the need for this function.
+ def set_verbosity(self, verbosity):
+ """Set new test verbosity and return old test verbosity."""
+ old_verbosity = self.verbosity
+ self.verbosity = verbosity
+ return old_verbosity
+
+ def assert_function_lengths_check(self, code, expected_message):
+ """Check warnings for long function bodies are as expected.
+
+ Args:
+ code: C++ source code expected to generate a warning message.
+ expected_message: Message expected to be generated by the C++ code.
+ """
+ self.assertEquals(expected_message,
+ self.perform_function_lengths_check(code))
+
+ def trigger_lines(self, error_level):
+ """Return number of lines needed to trigger a function length warning.
+
+ Args:
+ error_level: --v setting for cpp_style.
+
+ Returns:
+ Number of lines needed to trigger a function length warning.
+ """
+ return cpp_style._FunctionState._NORMAL_TRIGGER * 2 ** error_level
+
+ def trigger_test_lines(self, error_level):
+ """Return number of lines needed to trigger a test function length warning.
+
+ Args:
+ error_level: --v setting for cpp_style.
+
+ Returns:
+ Number of lines needed to trigger a test function length warning.
+ """
+ return cpp_style._FunctionState._TEST_TRIGGER * 2 ** error_level
+
+ def assert_function_length_check_definition(self, lines, error_level):
+ """Generate long function definition and check warnings are as expected.
+
+ Args:
+ lines: Number of lines to generate.
+ error_level: --v setting for cpp_style.
+ """
+ trigger_level = self.trigger_lines(self.verbosity)
+ self.assert_function_lengths_check(
+ 'void test(int x)' + self.function_body(lines),
+ ('Small and focused functions are preferred: '
+ 'test() has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]'
+ % (lines, trigger_level, error_level)))
+
+ def assert_function_length_check_definition_ok(self, lines):
+ """Generate shorter function definition and check no warning is produced.
+
+ Args:
+ lines: Number of lines to generate.
+ """
+ self.assert_function_lengths_check(
+ 'void test(int x)' + self.function_body(lines),
+ '')
+
+ def assert_function_length_check_at_error_level(self, error_level):
+ """Generate and check function at the trigger level for --v setting.
+
+ Args:
+ error_level: --v setting for cpp_style.
+ """
+ self.assert_function_length_check_definition(self.trigger_lines(error_level),
+ error_level)
+
+ def assert_function_length_check_below_error_level(self, error_level):
+ """Generate and check function just below the trigger level for --v setting.
+
+ Args:
+ error_level: --v setting for cpp_style.
+ """
+ self.assert_function_length_check_definition(self.trigger_lines(error_level) - 1,
+ error_level - 1)
+
+ def assert_function_length_check_above_error_level(self, error_level):
+ """Generate and check function just above the trigger level for --v setting.
+
+ Args:
+ error_level: --v setting for cpp_style.
+ """
+ self.assert_function_length_check_definition(self.trigger_lines(error_level) + 1,
+ error_level)
+
+ def function_body(self, number_of_lines):
+ return ' {\n' + ' this_is_just_a_test();\n' * number_of_lines + '}'
+
+ def function_body_with_blank_lines(self, number_of_lines):
+ return ' {\n' + ' this_is_just_a_test();\n\n' * number_of_lines + '}'
+
+ def function_body_with_no_lints(self, number_of_lines):
+ return ' {\n' + ' this_is_just_a_test(); // NOLINT\n' * number_of_lines + '}'
+
+ # Test line length checks.
+ def test_function_length_check_declaration(self):
+ self.assert_function_lengths_check(
+ 'void test();', # Not a function definition
+ '')
+
+ def test_function_length_check_declaration_with_block_following(self):
+ self.assert_function_lengths_check(
+ ('void test();\n'
+ + self.function_body(66)), # Not a function definition
+ '')
+
+ def test_function_length_check_class_definition(self):
+ self.assert_function_lengths_check( # Not a function definition
+ 'class Test' + self.function_body(66) + ';',
+ '')
+
+ def test_function_length_check_trivial(self):
+ self.assert_function_lengths_check(
+ 'void test() {}', # Not counted
+ '')
+
+ def test_function_length_check_empty(self):
+ self.assert_function_lengths_check(
+ 'void test() {\n}',
+ '')
+
+ def test_function_length_check_definition_below_severity0(self):
+ old_verbosity = self.set_verbosity(0)
+ self.assert_function_length_check_definition_ok(self.trigger_lines(0) - 1)
+ self.set_verbosity(old_verbosity)
+
+ def test_function_length_check_definition_at_severity0(self):
+ old_verbosity = self.set_verbosity(0)
+ self.assert_function_length_check_definition_ok(self.trigger_lines(0))
+ self.set_verbosity(old_verbosity)
+
+ def test_function_length_check_definition_above_severity0(self):
+ old_verbosity = self.set_verbosity(0)
+ self.assert_function_length_check_above_error_level(0)
+ self.set_verbosity(old_verbosity)
+
+ def test_function_length_check_definition_below_severity1v0(self):
+ old_verbosity = self.set_verbosity(0)
+ self.assert_function_length_check_below_error_level(1)
+ self.set_verbosity(old_verbosity)
+
+ def test_function_length_check_definition_at_severity1v0(self):
+ old_verbosity = self.set_verbosity(0)
+ self.assert_function_length_check_at_error_level(1)
+ self.set_verbosity(old_verbosity)
+
+ def test_function_length_check_definition_below_severity1(self):
+ self.assert_function_length_check_definition_ok(self.trigger_lines(1) - 1)
+
+ def test_function_length_check_definition_at_severity1(self):
+ self.assert_function_length_check_definition_ok(self.trigger_lines(1))
+
+ def test_function_length_check_definition_above_severity1(self):
+ self.assert_function_length_check_above_error_level(1)
+
+ def test_function_length_check_definition_severity1_plus_blanks(self):
+ error_level = 1
+ error_lines = self.trigger_lines(error_level) + 1
+ trigger_level = self.trigger_lines(self.verbosity)
+ self.assert_function_lengths_check(
+ 'void test_blanks(int x)' + self.function_body(error_lines),
+ ('Small and focused functions are preferred: '
+ 'test_blanks() has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_complex_definition_severity1(self):
+ error_level = 1
+ error_lines = self.trigger_lines(error_level) + 1
+ trigger_level = self.trigger_lines(self.verbosity)
+ self.assert_function_lengths_check(
+ ('my_namespace::my_other_namespace::MyVeryLongTypeName*\n'
+ 'my_namespace::my_other_namespace::MyFunction(int arg1, char* arg2)'
+ + self.function_body(error_lines)),
+ ('Small and focused functions are preferred: '
+ 'my_namespace::my_other_namespace::MyFunction()'
+ ' has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_for_test(self):
+ error_level = 1
+ error_lines = self.trigger_test_lines(error_level) + 1
+ trigger_level = self.trigger_test_lines(self.verbosity)
+ self.assert_function_lengths_check(
+ 'TEST_F(Test, Mutator)' + self.function_body(error_lines),
+ ('Small and focused functions are preferred: '
+ 'TEST_F(Test, Mutator) has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_for_split_line_test(self):
+ error_level = 1
+ error_lines = self.trigger_test_lines(error_level) + 1
+ trigger_level = self.trigger_test_lines(self.verbosity)
+ self.assert_function_lengths_check(
+ ('TEST_F(GoogleUpdateRecoveryRegistryProtectedTest,\n'
+ ' FixGoogleUpdate_AllValues_MachineApp)' # note: 4 spaces
+ + self.function_body(error_lines)),
+ ('Small and focused functions are preferred: '
+ 'TEST_F(GoogleUpdateRecoveryRegistryProtectedTest, ' # 1 space
+ 'FixGoogleUpdate_AllValues_MachineApp) has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines+1, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_for_bad_test_doesnt_break(self):
+ error_level = 1
+ error_lines = self.trigger_test_lines(error_level) + 1
+ trigger_level = self.trigger_test_lines(self.verbosity)
+ self.assert_function_lengths_check(
+ ('TEST_F('
+ + self.function_body(error_lines)),
+ ('Small and focused functions are preferred: '
+ 'TEST_F has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_with_embedded_no_lints(self):
+ error_level = 1
+ error_lines = self.trigger_lines(error_level) + 1
+ trigger_level = self.trigger_lines(self.verbosity)
+ self.assert_function_lengths_check(
+ 'void test(int x)' + self.function_body_with_no_lints(error_lines),
+ ('Small and focused functions are preferred: '
+ 'test() has %d non-comment lines '
+ '(error triggered by exceeding %d lines).'
+ ' [readability/fn_size] [%d]')
+ % (error_lines, trigger_level, error_level))
+
+ def test_function_length_check_definition_severity1_with_no_lint(self):
+ self.assert_function_lengths_check(
+ ('void test(int x)' + self.function_body(self.trigger_lines(1))
+ + ' // NOLINT -- long function'),
+ '')
+
+ def test_function_length_check_definition_below_severity2(self):
+ self.assert_function_length_check_below_error_level(2)
+
+ def test_function_length_check_definition_severity2(self):
+ self.assert_function_length_check_at_error_level(2)
+
+ def test_function_length_check_definition_above_severity2(self):
+ self.assert_function_length_check_above_error_level(2)
+
+ def test_function_length_check_definition_below_severity3(self):
+ self.assert_function_length_check_below_error_level(3)
+
+ def test_function_length_check_definition_severity3(self):
+ self.assert_function_length_check_at_error_level(3)
+
+ def test_function_length_check_definition_above_severity3(self):
+ self.assert_function_length_check_above_error_level(3)
+
+ def test_function_length_check_definition_below_severity4(self):
+ self.assert_function_length_check_below_error_level(4)
+
+ def test_function_length_check_definition_severity4(self):
+ self.assert_function_length_check_at_error_level(4)
+
+ def test_function_length_check_definition_above_severity4(self):
+ self.assert_function_length_check_above_error_level(4)
+
+ def test_function_length_check_definition_below_severity5(self):
+ self.assert_function_length_check_below_error_level(5)
+
+ def test_function_length_check_definition_at_severity5(self):
+ self.assert_function_length_check_at_error_level(5)
+
+ def test_function_length_check_definition_above_severity5(self):
+ self.assert_function_length_check_above_error_level(5)
+
+ def test_function_length_check_definition_huge_lines(self):
+ # 5 is the limit
+ self.assert_function_length_check_definition(self.trigger_lines(10), 5)
+
+ def test_function_length_not_determinable(self):
+ # Macro invocation without terminating semicolon.
+ self.assert_function_lengths_check(
+ 'MACRO(arg)',
+ '')
+
+ # Macro with underscores
+ self.assert_function_lengths_check(
+ 'MACRO_WITH_UNDERSCORES(arg1, arg2, arg3)',
+ '')
+
+ self.assert_function_lengths_check(
+ 'NonMacro(arg)',
+ 'Lint failed to find start of function body.'
+ ' [readability/fn_size] [5]')
+
+
+class NoNonVirtualDestructorsTest(CppStyleTestBase):
+
+ def test_no_error(self):
+ self.assert_multi_line_lint(
+ '''class Foo {
+ virtual ~Foo();
+ virtual void foo();
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ '''class Foo {
+ virtual inline ~Foo();
+ virtual void foo();
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ '''class Foo {
+ inline virtual ~Foo();
+ virtual void foo();
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ '''class Foo::Goo {
+ virtual ~Goo();
+ virtual void goo();
+ };''',
+ '')
+ self.assert_multi_line_lint(
+ 'class Foo { void foo(); };',
+ 'More than one command on the same line [whitespace/newline] [4]')
+
+ self.assert_multi_line_lint(
+ '''class Qualified::Goo : public Foo {
+ virtual void goo();
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ # Line-ending :
+ '''class Goo :
+ public Foo {
+ virtual void goo();
+ };''',
+ 'Labels should always be indented at least one space. If this is a '
+ 'member-initializer list in a constructor, the colon should be on the '
+ 'line after the definition header. [whitespace/labels] [4]')
+
+ def test_no_destructor_when_virtual_needed(self):
+ self.assert_multi_line_lint_re(
+ '''class Foo {
+ virtual void foo();
+ };''',
+ 'The class Foo probably needs a virtual destructor')
+
+ def test_destructor_non_virtual_when_virtual_needed(self):
+ self.assert_multi_line_lint_re(
+ '''class Foo {
+ ~Foo();
+ virtual void foo();
+ };''',
+ 'The class Foo probably needs a virtual destructor')
+
+ def test_no_warn_when_derived(self):
+ self.assert_multi_line_lint(
+ '''class Foo : public Goo {
+ virtual void foo();
+ };''',
+ '')
+
+ def test_internal_braces(self):
+ self.assert_multi_line_lint_re(
+ '''class Foo {
+ enum Goo {
+ GOO
+ };
+ virtual void foo();
+ };''',
+ 'The class Foo probably needs a virtual destructor')
+
+ def test_inner_class_needs_virtual_destructor(self):
+ self.assert_multi_line_lint_re(
+ '''class Foo {
+ class Goo {
+ virtual void goo();
+ };
+ };''',
+ 'The class Goo probably needs a virtual destructor')
+
+ def test_outer_class_needs_virtual_destructor(self):
+ self.assert_multi_line_lint_re(
+ '''class Foo {
+ class Goo {
+ };
+ virtual void foo();
+ };''',
+ 'The class Foo probably needs a virtual destructor')
+
+ def test_qualified_class_needs_virtual_destructor(self):
+ self.assert_multi_line_lint_re(
+ '''class Qualified::Foo {
+ virtual void foo();
+ };''',
+ 'The class Qualified::Foo probably needs a virtual destructor')
+
+ def test_multi_line_declaration_no_error(self):
+ self.assert_multi_line_lint_re(
+ '''class Foo
+ : public Goo {
+ virtual void foo();
+ };''',
+ '')
+
+ def test_multi_line_declaration_with_error(self):
+ self.assert_multi_line_lint(
+ '''class Foo
+ {
+ virtual void foo();
+ };''',
+ ['This { should be at the end of the previous line '
+ '[whitespace/braces] [4]',
+ 'The class Foo probably needs a virtual destructor due to having '
+ 'virtual method(s), one declared at line 2. [runtime/virtual] [4]'])
+
+
+class WebKitStyleTest(CppStyleTestBase):
+
+ # for http://webkit.org/coding/coding-style.html
+ def test_indentation(self):
+ # 1. Use spaces, not tabs. Tabs should only appear in files that
+ # require them for semantic meaning, like Makefiles.
+ self.assert_multi_line_lint(
+ 'class Foo {\n'
+ ' int goo;\n'
+ '};',
+ '')
+ self.assert_multi_line_lint(
+ 'class Foo {\n'
+ '\tint goo;\n'
+ '};',
+ 'Tab found; better to use spaces [whitespace/tab] [1]')
+
+ # 2. The indent size is 4 spaces.
+ self.assert_multi_line_lint(
+ 'class Foo {\n'
+ ' int goo;\n'
+ '};',
+ '')
+ self.assert_multi_line_lint(
+ 'class Foo {\n'
+ ' int goo;\n'
+ '};',
+ 'Weird number of spaces at line-start. Are you using a 4-space indent? [whitespace/indent] [3]')
+ # FIXME: No tests for 8-spaces.
+
+ # 3. In a header, code inside a namespace should not be indented.
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'class Document {\n'
+ ' int myVariable;\n'
+ '};\n'
+ '}',
+ '',
+ 'foo.h')
+ self.assert_multi_line_lint(
+ 'namespace OuterNamespace {\n'
+ ' namespace InnerNamespace {\n'
+ ' class Document {\n'
+ '};\n'
+ '};\n'
+ '}',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.h')
+ self.assert_multi_line_lint(
+ 'namespace OuterNamespace {\n'
+ ' class Document {\n'
+ ' namespace InnerNamespace {\n'
+ '};\n'
+ '};\n'
+ '}',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.h')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ '#if 0\n'
+ ' class Document {\n'
+ '};\n'
+ '#endif\n'
+ '}',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.h')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ 'class Document {\n'
+ '};\n'
+ '}',
+ '',
+ 'foo.h')
+
+ # 4. In an implementation file (files with the extension .cpp, .c
+ # or .mm), code inside a namespace should not be indented.
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'Document::Foo()\n'
+ ' : foo(bar)\n'
+ ' , boo(far)\n'
+ '{\n'
+ ' stuff();\n'
+ '}',
+ '',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace OuterNamespace {\n'
+ 'namespace InnerNamespace {\n'
+ 'Document::Foo() { }\n'
+ ' void* p;\n'
+ '}\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace OuterNamespace {\n'
+ 'namespace InnerNamespace {\n'
+ 'Document::Foo() { }\n'
+ '}\n'
+ ' void* p;\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ ' const char* foo = "start:;"\n'
+ ' "dfsfsfs";\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'const char* foo(void* a = ";", // ;\n'
+ ' void* b);\n'
+ ' void* p;\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'const char* foo[] = {\n'
+ ' "void* b);", // ;\n'
+ ' "asfdf",\n'
+ ' }\n'
+ ' void* p;\n'
+ '}\n',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n\n'
+ 'const char* foo[] = {\n'
+ ' "void* b);", // }\n'
+ ' "asfdf",\n'
+ ' }\n'
+ '}\n',
+ '',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ ' namespace WebCore {\n\n'
+ ' void Document::Foo()\n'
+ ' {\n'
+ 'start: // infinite loops are fun!\n'
+ ' goto start;\n'
+ ' }',
+ 'namespace should never be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ ' Document::Foo() { }\n'
+ '}',
+ 'Code inside a namespace should not be indented.'
+ ' [whitespace/indent] [4]',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ '#define abc(x) x; \\\n'
+ ' x\n'
+ '}',
+ '',
+ 'foo.cpp')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ '#define abc(x) x; \\\n'
+ ' x\n'
+ ' void* x;'
+ '}',
+ 'Code inside a namespace should not be indented. [whitespace/indent] [4]',
+ 'foo.cpp')
+
+ # 5. A case label should line up with its switch statement. The
+ # case statement is indented.
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' i++;\n'
+ ' break;\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ '')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' switch (otherCondition) {\n'
+ ' default:\n'
+ ' return;\n'
+ ' }\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ '')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition: break;\n'
+ ' default: return;\n'
+ ' }\n',
+ '')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' i++;\n'
+ ' break;\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'A case label should not be indented, but line up with its switch statement.'
+ ' [whitespace/indent] [4]')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' break;\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'A case label should not be indented, but line up with its switch statement.'
+ ' [whitespace/indent] [4]')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' switch (otherCondition) {\n'
+ ' default:\n'
+ ' return;\n'
+ ' }\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'A case label should not be indented, but line up with its switch statement.'
+ ' [whitespace/indent] [4]')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' i++;\n'
+ ' break;\n\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'Non-label code inside switch statements should be indented.'
+ ' [whitespace/indent] [4]')
+ self.assert_multi_line_lint(
+ ' switch (condition) {\n'
+ ' case fooCondition:\n'
+ ' case barCondition:\n'
+ ' switch (otherCondition) {\n'
+ ' default:\n'
+ ' return;\n'
+ ' }\n'
+ ' default:\n'
+ ' i--;\n'
+ ' }\n',
+ 'Non-label code inside switch statements should be indented.'
+ ' [whitespace/indent] [4]')
+
+ # 6. Boolean expressions at the same nesting level that span
+ # multiple lines should have their operators on the left side of
+ # the line instead of the right side.
+ self.assert_multi_line_lint(
+ ' return attr->name() == srcAttr\n'
+ ' || attr->name() == lowsrcAttr;\n',
+ '')
+ self.assert_multi_line_lint(
+ ' return attr->name() == srcAttr ||\n'
+ ' attr->name() == lowsrcAttr;\n',
+ 'Boolean expressions that span multiple lines should have their '
+ 'operators on the left side of the line instead of the right side.'
+ ' [whitespace/operators] [4]')
+
+ def test_spacing(self):
+ # 1. Do not place spaces around unary operators.
+ self.assert_multi_line_lint(
+ 'i++;',
+ '')
+ self.assert_multi_line_lint(
+ 'i ++;',
+ 'Extra space for operator ++; [whitespace/operators] [4]')
+
+ # 2. Do place spaces around binary and ternary operators.
+ self.assert_multi_line_lint(
+ 'y = m * x + b;',
+ '')
+ self.assert_multi_line_lint(
+ 'f(a, b);',
+ '')
+ self.assert_multi_line_lint(
+ 'c = a | b;',
+ '')
+ self.assert_multi_line_lint(
+ 'return condition ? 1 : 0;',
+ '')
+ self.assert_multi_line_lint(
+ 'y=m*x+b;',
+ 'Missing spaces around = [whitespace/operators] [4]')
+ self.assert_multi_line_lint(
+ 'f(a,b);',
+ 'Missing space after , [whitespace/comma] [3]')
+ self.assert_multi_line_lint(
+ 'c = a|b;',
+ 'Missing spaces around | [whitespace/operators] [3]')
+ # FIXME: We cannot catch this lint error.
+ # self.assert_multi_line_lint(
+ # 'return condition ? 1:0;',
+ # '')
+
+ # 3. Place spaces between control statements and their parentheses.
+ self.assert_multi_line_lint(
+ ' if (condition)\n'
+ ' doIt();\n',
+ '')
+ self.assert_multi_line_lint(
+ ' if(condition)\n'
+ ' doIt();\n',
+ 'Missing space before ( in if( [whitespace/parens] [5]')
+
+ # 4. Do not place spaces between a function and its parentheses,
+ # or between a parenthesis and its content.
+ self.assert_multi_line_lint(
+ 'f(a, b);',
+ '')
+ self.assert_multi_line_lint(
+ 'f (a, b);',
+ 'Extra space before ( in function call [whitespace/parens] [4]')
+ self.assert_multi_line_lint(
+ 'f( a, b );',
+ ['Extra space after ( in function call [whitespace/parens] [4]',
+ 'Extra space before ) [whitespace/parens] [2]'])
+
+ def test_line_breaking(self):
+ # 1. Each statement should get its own line.
+ self.assert_multi_line_lint(
+ ' x++;\n'
+ ' y++;\n'
+ ' if (condition);\n'
+ ' doIt();\n',
+ '')
+ self.assert_multi_line_lint(
+ ' if (condition) \\\n'
+ ' doIt();\n',
+ '')
+ self.assert_multi_line_lint(
+ ' x++; y++;',
+ 'More than one command on the same line [whitespace/newline] [4]')
+ self.assert_multi_line_lint(
+ ' if (condition) doIt();\n',
+ 'More than one command on the same line in if [whitespace/parens] [4]')
+
+ # 2. An else statement should go on the same line as a preceding
+ # close brace if one is present, else it should line up with the
+ # if statement.
+ self.assert_multi_line_lint(
+ 'if (condition) {\n'
+ ' doSomething();\n'
+ ' doSomethingAgain();\n'
+ '} else {\n'
+ ' doSomethingElse();\n'
+ ' doSomethingElseAgain();\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething();\n'
+ 'else\n'
+ ' doSomethingElse();\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething();\n'
+ 'else {\n'
+ ' doSomethingElse();\n'
+ ' doSomethingElseAgain();\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n',
+ '')
+ self.assert_multi_line_lint(
+ '#define TEST_ASSERT(expression) do { if ( !(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0)\n',
+ 'Mismatching spaces inside () in if [whitespace/parens] [5]')
+ # FIXME: currently we only check first conditional, so we cannot detect errors in next ones.
+ # self.assert_multi_line_lint(
+ # '#define TEST_ASSERT(expression) do { if (!(expression)) { TestsController::shared().testFailed(__FILE__, __LINE__, #expression); return; } } while (0 )\n',
+ # 'Mismatching spaces inside () in if [whitespace/parens] [5]')
+ self.assert_multi_line_lint(
+ 'if (condition) {\n'
+ ' doSomething();\n'
+ ' doSomethingAgain();\n'
+ '}\n'
+ 'else {\n'
+ ' doSomethingElse();\n'
+ ' doSomethingElseAgain();\n'
+ '}\n',
+ 'An else should appear on the same line as the preceding } [whitespace/newline] [4]')
+ self.assert_multi_line_lint(
+ 'if (condition) doSomething(); else doSomethingElse();\n',
+ ['More than one command on the same line [whitespace/newline] [4]',
+ 'Else clause should never be on same line as else (use 2 lines) [whitespace/newline] [4]',
+ 'More than one command on the same line in if [whitespace/parens] [4]'])
+ self.assert_multi_line_lint(
+ 'if (condition) doSomething(); else {\n'
+ ' doSomethingElse();\n'
+ '}\n',
+ ['More than one command on the same line in if [whitespace/parens] [4]',
+ 'One line control clauses should not use braces. [whitespace/braces] [4]'])
+
+ # 3. An else if statement should be written as an if statement
+ # when the prior if concludes with a return statement.
+ self.assert_multi_line_lint(
+ 'if (motivated) {\n'
+ ' if (liquid)\n'
+ ' return money;\n'
+ '} else if (tired)\n'
+ ' break;\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething();\n'
+ 'else if (otherCondition)\n'
+ ' doSomethingElse();\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething();\n'
+ 'else\n'
+ ' doSomethingElse();\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' returnValue = foo;\n'
+ 'else if (otherCondition)\n'
+ ' returnValue = bar;\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' returnValue = foo;\n'
+ 'else\n'
+ ' returnValue = bar;\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ ' doSomething();\n'
+ 'else if (liquid)\n'
+ ' return money;\n'
+ 'else if (broke)\n'
+ ' return favor;\n'
+ 'else\n'
+ ' sleep(28800);\n',
+ '')
+ self.assert_multi_line_lint(
+ 'if (liquid) {\n'
+ ' prepare();\n'
+ ' return money;\n'
+ '} else if (greedy) {\n'
+ ' keep();\n'
+ ' return nothing;\n'
+ '}\n',
+ 'An else if statement should be written as an if statement when the '
+ 'prior "if" concludes with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+ self.assert_multi_line_lint(
+ ' if (stupid) {\n'
+ 'infiniteLoop:\n'
+ ' goto infiniteLoop;\n'
+ ' } else if (evil)\n'
+ ' goto hell;\n',
+ 'An else if statement should be written as an if statement when the '
+ 'prior "if" concludes with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+ self.assert_multi_line_lint(
+ 'if (liquid)\n'
+ '{\n'
+ ' prepare();\n'
+ ' return money;\n'
+ '}\n'
+ 'else if (greedy)\n'
+ ' keep();\n',
+ ['This { should be at the end of the previous line [whitespace/braces] [4]',
+ 'An else should appear on the same line as the preceding } [whitespace/newline] [4]',
+ 'An else if statement should be written as an if statement when the '
+ 'prior "if" concludes with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]'])
+ self.assert_multi_line_lint(
+ 'if (gone)\n'
+ ' return;\n'
+ 'else if (here)\n'
+ ' go();\n',
+ 'An else if statement should be written as an if statement when the '
+ 'prior "if" concludes with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+ self.assert_multi_line_lint(
+ 'if (gone)\n'
+ ' return;\n'
+ 'else\n'
+ ' go();\n',
+ 'An else statement can be removed when the prior "if" concludes '
+ 'with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+ self.assert_multi_line_lint(
+ 'if (motivated) {\n'
+ ' prepare();\n'
+ ' continue;\n'
+ '} else {\n'
+ ' cleanUp();\n'
+ ' break;\n'
+ '}\n',
+ 'An else statement can be removed when the prior "if" concludes '
+ 'with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+ self.assert_multi_line_lint(
+ 'if (tired)\n'
+ ' break;\n'
+ 'else {\n'
+ ' prepare();\n'
+ ' continue;\n'
+ '}\n',
+ 'An else statement can be removed when the prior "if" concludes '
+ 'with a return, break, continue or goto statement.'
+ ' [readability/control_flow] [4]')
+
+ def test_braces(self):
+ # 1. Function definitions: place each brace on its own line.
+ self.assert_multi_line_lint(
+ 'int main()\n'
+ '{\n'
+ ' doSomething();\n'
+ '}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'int main() {\n'
+ ' doSomething();\n'
+ '}\n',
+ 'Place brace on its own line for function definitions. [whitespace/braces] [4]')
+
+ # 2. Other braces: place the open brace on the line preceding the
+ # code block; place the close brace on its own line.
+ self.assert_multi_line_lint(
+ 'class MyClass {\n'
+ ' int foo;\n'
+ '};\n',
+ '')
+ self.assert_multi_line_lint(
+ 'namespace WebCore {\n'
+ 'int foo;\n'
+ '};\n',
+ '')
+ self.assert_multi_line_lint(
+ 'for (int i = 0; i < 10; i++) {\n'
+ ' DoSomething();\n'
+ '};\n',
+ '')
+ self.assert_multi_line_lint(
+ 'class MyClass\n'
+ '{\n'
+ ' int foo;\n'
+ '};\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'for (int i = 0; i < 10; i++)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'while (true)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'foreach (Foo* foo, foos)\n'
+ '{\n'
+ ' int bar;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'switch (type)\n'
+ '{\n'
+ 'case foo: return;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'if (condition)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'for (int i = 0; i < 10; i++)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'while (true)\n'
+ '{\n'
+ ' int foo;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'switch (type)\n'
+ '{\n'
+ 'case foo: return;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+ self.assert_multi_line_lint(
+ 'else if (type)\n'
+ '{\n'
+ 'case foo: return;\n'
+ '}\n',
+ 'This { should be at the end of the previous line [whitespace/braces] [4]')
+
+ # 3. One-line control clauses should not use braces unless
+ # comments are included or a single statement spans multiple
+ # lines.
+ self.assert_multi_line_lint(
+ 'if (true) {\n'
+ ' int foo;\n'
+ '}\n',
+ 'One line control clauses should not use braces. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'for (; foo; bar) {\n'
+ ' int foo;\n'
+ '}\n',
+ 'One line control clauses should not use braces. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'foreach (foo, foos) {\n'
+ ' int bar;\n'
+ '}\n',
+ 'One line control clauses should not use braces. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'while (true) {\n'
+ ' int foo;\n'
+ '}\n',
+ 'One line control clauses should not use braces. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (true)\n'
+ ' int foo;\n'
+ 'else {\n'
+ ' int foo;\n'
+ '}\n',
+ 'One line control clauses should not use braces. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (true) {\n'
+ ' int foo;\n'
+ '} else\n'
+ ' int foo;\n',
+ 'One line control clauses should not use braces. [whitespace/braces] [4]')
+
+ self.assert_multi_line_lint(
+ 'if (true) {\n'
+ ' // Some comment\n'
+ ' int foo;\n'
+ '}\n',
+ '')
+
+ self.assert_multi_line_lint(
+ 'if (true) {\n'
+ ' myFunction(reallyLongParam1, reallyLongParam2,\n'
+ ' reallyLongParam3);\n'
+ '}\n',
+ '')
+
+ # 4. Control clauses without a body should use empty braces.
+ self.assert_multi_line_lint(
+ 'for ( ; current; current = current->next) { }\n',
+ '')
+ self.assert_multi_line_lint(
+ 'for ( ; current;\n'
+ ' current = current->next) {}\n',
+ '')
+ self.assert_multi_line_lint(
+ 'for ( ; current; current = current->next);\n',
+ 'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]')
+ self.assert_multi_line_lint(
+ 'while (true);\n',
+ 'Semicolon defining empty statement for this loop. Use { } instead. [whitespace/semicolon] [5]')
+ self.assert_multi_line_lint(
+ '} while (true);\n',
+ '')
+
+ def test_null_false_zero(self):
+ # 1. In C++, the null pointer value should be written as 0. In C,
+ # it should be written as NULL. In Objective-C and Objective-C++,
+ # follow the guideline for C or C++, respectively, but use nil to
+ # represent a null Objective-C object.
+ self.assert_lint(
+ 'functionCall(NULL)',
+ 'Use 0 instead of NULL.'
+ ' [readability/null] [5]',
+ 'foo.cpp')
+ self.assert_lint(
+ "// Don't use NULL in comments since it isn't in code.",
+ 'Use 0 instead of NULL.'
+ ' [readability/null] [4]',
+ 'foo.cpp')
+ self.assert_lint(
+ '"A string with NULL" // and a comment with NULL is tricky to flag correctly in cpp_style.',
+ 'Use 0 instead of NULL.'
+ ' [readability/null] [4]',
+ 'foo.cpp')
+ self.assert_lint(
+ '"A string containing NULL is ok"',
+ '',
+ 'foo.cpp')
+ self.assert_lint(
+ 'if (aboutNULL)',
+ '',
+ 'foo.cpp')
+ self.assert_lint(
+ 'myVariable = NULLify',
+ '',
+ 'foo.cpp')
+ # Make sure that the NULL check does not apply to C and Objective-C files.
+ self.assert_lint(
+ 'functionCall(NULL)',
+ '',
+ 'foo.c')
+ self.assert_lint(
+ 'functionCall(NULL)',
+ '',
+ 'foo.m')
+
+ # Make sure that the NULL check does not apply to g_object_{set,get}
+ self.assert_lint(
+ 'g_object_get(foo, "prop", &bar, NULL);',
+ '')
+ self.assert_lint(
+ 'g_object_set(foo, "prop", bar, NULL);',
+ '')
+
+ # 2. C++ and C bool values should be written as true and
+ # false. Objective-C BOOL values should be written as YES and NO.
+ # FIXME: Implement this.
+
+ # 3. Tests for true/false, null/non-null, and zero/non-zero should
+ # all be done without equality comparisons.
+ self.assert_lint(
+ 'if (count == 0)',
+ 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+ ' [readability/comparison_to_zero] [5]')
+ self.assert_lint_one_of_many_errors_re(
+ 'if (string != NULL)',
+ r'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons\.')
+ self.assert_lint(
+ 'if (condition == true)',
+ 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+ ' [readability/comparison_to_zero] [5]')
+ self.assert_lint(
+ 'if (myVariable != /* Why would anyone put a comment here? */ false)',
+ 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+ ' [readability/comparison_to_zero] [5]')
+
+ self.assert_lint(
+ 'if (0 /* This comment also looks odd to me. */ != aLongerVariableName)',
+ 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+ ' [readability/comparison_to_zero] [5]')
+ self.assert_lint_one_of_many_errors_re(
+ 'if (NULL == thisMayBeNull)',
+ r'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons\.')
+ self.assert_lint(
+ 'if (true != anotherCondition)',
+ 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+ ' [readability/comparison_to_zero] [5]')
+ self.assert_lint(
+ 'if (false == myBoolValue)',
+ 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.'
+ ' [readability/comparison_to_zero] [5]')
+
+ self.assert_lint(
+ 'if (fontType == trueType)',
+ '')
+ self.assert_lint(
+ 'if (othertrue == fontType)',
+ '')
+
+ def test_using_std(self):
+ self.assert_lint(
+ 'using std::min;',
+ "Use 'using namespace std;' instead of 'using std::min;'."
+ " [build/using_std] [4]",
+ 'foo.cpp')
+
+ def test_max_macro(self):
+ self.assert_lint(
+ 'int i = MAX(0, 1);',
+ '',
+ 'foo.c')
+
+ self.assert_lint(
+ 'int i = MAX(0, 1);',
+ 'Use std::max() or std::max<type>() instead of the MAX() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.cpp')
+
+ self.assert_lint(
+ 'inline int foo() { return MAX(0, 1); }',
+ 'Use std::max() or std::max<type>() instead of the MAX() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.h')
+
+ def test_min_macro(self):
+ self.assert_lint(
+ 'int i = MIN(0, 1);',
+ '',
+ 'foo.c')
+
+ self.assert_lint(
+ 'int i = MIN(0, 1);',
+ 'Use std::min() or std::min<type>() instead of the MIN() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.cpp')
+
+ self.assert_lint(
+ 'inline int foo() { return MIN(0, 1); }',
+ 'Use std::min() or std::min<type>() instead of the MIN() macro.'
+ ' [runtime/max_min_macros] [4]',
+ 'foo.h')
+
+ def test_names(self):
+ name_error_message = " is incorrectly named. Don't use underscores in your identifier names. [readability/naming] [4]"
+
+ # Basic cases from WebKit style guide.
+ self.assert_lint('struct Data;', '')
+ self.assert_lint('size_t bufferSize;', '')
+ self.assert_lint('class HTMLDocument;', '')
+ self.assert_lint('String mimeType();', '')
+ self.assert_lint('size_t buffer_size;',
+ 'buffer_size' + name_error_message)
+ self.assert_lint('short m_length;', '')
+ self.assert_lint('short _length;',
+ '_length' + name_error_message)
+ self.assert_lint('short length_;',
+ 'length_' + name_error_message)
+
+ # Pointers, references, functions, templates, and adjectives.
+ self.assert_lint('char* under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('const int UNDER_SCORE;',
+ 'UNDER_SCORE' + name_error_message)
+ self.assert_lint('static inline const char const& const under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('WebCore::RenderObject* under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('int func_name();',
+ 'func_name' + name_error_message)
+ self.assert_lint('RefPtr<RenderObject*> under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('WTF::Vector<WTF::RefPtr<const RenderObject* const> > under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('int under_score[];',
+ 'under_score' + name_error_message)
+ self.assert_lint('struct dirent* under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('long under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('long long under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('long double under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('long long int under_score;',
+ 'under_score' + name_error_message)
+
+ # Declarations in control statement.
+ self.assert_lint('if (int under_score = 42) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('else if (int under_score = 42) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('for (int under_score = 42; cond; i++) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('while (foo & under_score = bar) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('for (foo * under_score = p; cond; i++) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('for (foo * under_score; cond; i++) {',
+ 'under_score' + name_error_message)
+ self.assert_lint('while (foo & value_in_thirdparty_library) {', '')
+ self.assert_lint('while (foo * value_in_thirdparty_library) {', '')
+ self.assert_lint('if (mli && S_OK == mli->foo()) {', '')
+
+ # More member variables and functions.
+ self.assert_lint('int SomeClass::s_validName', '')
+ self.assert_lint('int m_under_score;',
+ 'm_under_score' + name_error_message)
+ self.assert_lint('int SomeClass::s_under_score = 0;',
+ 'SomeClass::s_under_score' + name_error_message)
+ self.assert_lint('int SomeClass::under_score = 0;',
+ 'SomeClass::under_score' + name_error_message)
+
+ # Other statements.
+ self.assert_lint('return INT_MAX;', '')
+ self.assert_lint('return_t under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('goto under_score;',
+ 'under_score' + name_error_message)
+ self.assert_lint('delete static_cast<Foo*>(p);', '')
+
+ # Multiple variables in one line.
+ self.assert_lint('void myFunction(int variable1, int another_variable);',
+ 'another_variable' + name_error_message)
+ self.assert_lint('int variable1, another_variable;',
+ 'another_variable' + name_error_message)
+ self.assert_lint('int first_variable, secondVariable;',
+ 'first_variable' + name_error_message)
+ self.assert_lint('void my_function(int variable_1, int variable_2);',
+ ['my_function' + name_error_message,
+ 'variable_1' + name_error_message,
+ 'variable_2' + name_error_message])
+ self.assert_lint('for (int variable_1, variable_2;;) {',
+ ['variable_1' + name_error_message,
+ 'variable_2' + name_error_message])
+
+ # There is an exception for op code functions but only in the JavaScriptCore directory.
+ self.assert_lint('void this_op_code(int var1, int var2)', '', 'JavaScriptCore/foo.cpp')
+ self.assert_lint('void this_op_code(int var1, int var2)', 'this_op_code' + name_error_message)
+
+ # GObject requires certain magical names in class declarations.
+ self.assert_lint('void webkit_dom_object_init();', '')
+ self.assert_lint('void webkit_dom_object_class_init();', '')
+
+ # The GTK+ APIs use GTK+ naming style, which includes lower-cased, _-separated values.
+ self.assert_lint('void this_is_a_gtk_style_name(int var1, int var2)', '', 'WebKit/gtk/webkit/foo.cpp')
+
+ # There is an exception for some unit tests that begin with "tst_".
+ self.assert_lint('void tst_QWebFrame::arrayObjectEnumerable(int var1, int var2)', '')
+
+ # The Qt API uses names that begin with "qt_".
+ self.assert_lint('void QTFrame::qt_drt_is_awesome(int var1, int var2)', '')
+ self.assert_lint('void qt_drt_is_awesome(int var1, int var2);', '')
+
+ # const_iterator is allowed as well.
+ self.assert_lint('typedef VectorType::const_iterator const_iterator;', '')
+
+
+ def test_comments(self):
+ # A comment at the beginning of a line is ok.
+ self.assert_lint('// comment', '')
+ self.assert_lint(' // comment', '')
+
+ self.assert_lint('} // namespace WebCore',
+ 'One space before end of line comments'
+ ' [whitespace/comments] [5]')
+
+ def test_other(self):
+ # FIXME: Implement this.
+ pass
+
+
+class CppProcessorTest(unittest.TestCase):
+
+ """Tests CppProcessor class."""
+
+ def mock_handle_style_error(self):
+ pass
+
+ def _processor(self):
+ return CppProcessor("foo", "h", self.mock_handle_style_error, 3)
+
+ def test_init(self):
+ """Test __init__ constructor."""
+ processor = self._processor()
+ self.assertEquals(processor.file_extension, "h")
+ self.assertEquals(processor.file_path, "foo")
+ self.assertEquals(processor.handle_style_error, self.mock_handle_style_error)
+ self.assertEquals(processor.verbosity, 3)
+
+ def test_eq(self):
+ """Test __eq__ equality function."""
+ processor1 = self._processor()
+ processor2 = self._processor()
+
+ # == calls __eq__.
+ self.assertTrue(processor1 == processor2)
+
+ def mock_handle_style_error2(self):
+ pass
+
+ # Verify that a difference in any argument cause equality to fail.
+ processor = CppProcessor("foo", "h", self.mock_handle_style_error, 3)
+ self.assertFalse(processor == CppProcessor("bar", "h", self.mock_handle_style_error, 3))
+ self.assertFalse(processor == CppProcessor("foo", "c", self.mock_handle_style_error, 3))
+ self.assertFalse(processor == CppProcessor("foo", "h", mock_handle_style_error2, 3))
+ self.assertFalse(processor == CppProcessor("foo", "h", self.mock_handle_style_error, 4))
+
+ def test_ne(self):
+ """Test __ne__ inequality function."""
+ processor1 = self._processor()
+ processor2 = self._processor()
+
+ # != calls __ne__.
+ # By default, __ne__ always returns true on different objects.
+ # Thus, just check the distinguishing case to verify that the
+ # code defines __ne__.
+ self.assertFalse(processor1 != processor2)
+
+
+def tearDown():
+ """A global check to make sure all error-categories have been tested.
+
+ The main tearDown() routine is the only code we can guarantee will be
+ run after all other tests have been executed.
+ """
+ try:
+ if _run_verifyallcategoriesseen:
+ ErrorCollector(None).verify_all_categories_are_seen()
+ except NameError:
+ # If nobody set the global _run_verifyallcategoriesseen, then
+ # we assume we shouldn't run the test
+ pass
+
+if __name__ == '__main__':
+ import sys
+ # We don't want to run the verify_all_categories_are_seen() test unless
+ # we're running the full test suite: if we only run one test,
+ # obviously we're not going to see all the error categories. So we
+ # only run verify_all_categories_are_seen() when no commandline flags
+ # are passed in.
+ global _run_verifyallcategoriesseen
+ _run_verifyallcategoriesseen = (len(sys.argv) == 1)
+
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/style/processors/text.py b/WebKitTools/Scripts/webkitpy/style/processors/text.py
new file mode 100644
index 0000000..307e5b8
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/processors/text.py
@@ -0,0 +1,56 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Checks WebKit style for text files."""
+
+
+class TextProcessor(object):
+
+ """Processes text lines for checking style."""
+
+ def __init__(self, file_path, handle_style_error):
+ self.file_path = file_path
+ self.handle_style_error = handle_style_error
+
+ def process(self, lines):
+ lines = (["// adjust line numbers to make the first line 1."] + lines)
+
+ # FIXME: share with cpp_style.
+ for line_number, line in enumerate(lines):
+ if "\t" in line:
+ self.handle_style_error(line_number,
+ "whitespace/tab", 5,
+ "Line contains tab character.")
+
+
+# FIXME: Remove this function (requires refactoring unit tests).
+def process_file_data(filename, lines, error):
+ processor = TextProcessor(filename, error)
+ processor.process(lines)
+
diff --git a/WebKitTools/Scripts/webkitpy/style/processors/text_unittest.py b/WebKitTools/Scripts/webkitpy/style/processors/text_unittest.py
new file mode 100644
index 0000000..62f825b
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/processors/text_unittest.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for text_style.py."""
+
+import unittest
+
+import text as text_style
+from text import TextProcessor
+
+class TextStyleTestCase(unittest.TestCase):
+ """TestCase for text_style.py"""
+
+ def assertNoError(self, lines):
+ """Asserts that the specified lines has no errors."""
+ self.had_error = False
+
+ def error_for_test(line_number, category, confidence, message):
+ """Records if an error occurs."""
+ self.had_error = True
+
+ text_style.process_file_data('', lines, error_for_test)
+ self.assert_(not self.had_error, '%s should not have any errors.' % lines)
+
+ def assertError(self, lines, expected_line_number):
+ """Asserts that the specified lines has an error."""
+ self.had_error = False
+
+ def error_for_test(line_number, category, confidence, message):
+ """Checks if the expected error occurs."""
+ self.assertEquals(expected_line_number, line_number)
+ self.assertEquals('whitespace/tab', category)
+ self.had_error = True
+
+ text_style.process_file_data('', lines, error_for_test)
+ self.assert_(self.had_error, '%s should have an error [whitespace/tab].' % lines)
+
+
+ def test_no_error(self):
+ """Tests for no error cases."""
+ self.assertNoError([''])
+ self.assertNoError(['abc def', 'ggg'])
+
+
+ def test_error(self):
+ """Tests for error cases."""
+ self.assertError(['2009-12-16\tKent Tamura\t<tkent@chromium.org>'], 1)
+ self.assertError(['2009-12-16 Kent Tamura <tkent@chromium.org>',
+ '',
+ '\tReviewed by NOBODY.'], 3)
+
+
+class TextProcessorTest(unittest.TestCase):
+
+ """Tests TextProcessor class."""
+
+ def mock_handle_style_error(self):
+ pass
+
+ def test_init(self):
+ """Test __init__ constructor."""
+ processor = TextProcessor("foo.txt", self.mock_handle_style_error)
+ self.assertEquals(processor.file_path, "foo.txt")
+ self.assertEquals(processor.handle_style_error, self.mock_handle_style_error)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/style/unittests.py b/WebKitTools/Scripts/webkitpy/style/unittests.py
new file mode 100644
index 0000000..11c10e7
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style/unittests.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+# its contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Runs style package unit tests."""
+
+# This module is imported by test-webkitpy.
+
+import sys
+import unittest
+
+from checker_unittest import *
+from error_handlers_unittest import *
+from processors.cpp_unittest import *
+from processors.text_unittest import *
diff --git a/WebKitTools/Scripts/webkitpy/style_references.py b/WebKitTools/Scripts/webkitpy/style_references.py
new file mode 100644
index 0000000..2528c4d
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/style_references.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""References to non-style modules used by the style package."""
+
+# This module is a simple facade to the functionality used by the
+# style package that comes from WebKit modules outside the style
+# package.
+#
+# With this module, the only intra-package references (i.e.
+# references to webkitpy modules outside the style folder) that
+# the style package needs to make are relative references to
+# this module. For example--
+#
+# > from .. style_references import parse_patch
+#
+# Similarly, people maintaining non-style code are not beholden
+# to the contents of the style package when refactoring or
+# otherwise changing non-style code. They only have to be aware
+# of this module.
+
+import os
+
+from diff_parser import DiffParser
+from scm import detect_scm_system
+
+
+def parse_patch(patch_string):
+
+ """Parse a patch string and return the affected files."""
+
+ patch = DiffParser(patch_string.splitlines())
+ return patch.files
+
+
+class SimpleScm(object):
+
+ """Simple facade to SCM for use by style package."""
+
+ def __init__(self):
+ cwd = os.path.abspath('.')
+ self._scm = detect_scm_system(cwd)
+
+ def checkout_root(self):
+ """Return the source control root as an absolute path."""
+ return self._scm.checkout_root
+
+ def create_patch(self):
+ return self._scm.create_patch()
+
+ def create_patch_since_local_commit(self, commit):
+ return self._scm.create_patch_since_local_commit(commit)
+
diff --git a/WebKitTools/Scripts/webkitpy/user.py b/WebKitTools/Scripts/webkitpy/user.py
new file mode 100644
index 0000000..8dbf74c
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/user.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import shlex
+import subprocess
+import webbrowser
+
+class User(object):
+ def prompt(self, message):
+ return raw_input(message)
+
+ def edit(self, files):
+ editor = os.environ.get("EDITOR") or "vi"
+ args = shlex.split(editor)
+ subprocess.call(args + files)
+
+ def page(self, message):
+ pager = os.environ.get("PAGER") or "less"
+ try:
+ child_process = subprocess.Popen([pager], stdin=subprocess.PIPE)
+ child_process.communicate(input=message)
+ except IOError, e:
+ pass
+
+ def confirm(self, message=None):
+ if not message:
+ message = "Continue?"
+ response = raw_input("%s [Y/n]: " % message)
+ return not response or response.lower() == "y"
+
+ def open_url(self, url):
+ webbrowser.open(url)
diff --git a/WebKitTools/Scripts/webkitpy/user.pyc b/WebKitTools/Scripts/webkitpy/user.pyc
new file mode 100644
index 0000000..7d6b687
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/user.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/webkit_logging.py b/WebKitTools/Scripts/webkitpy/webkit_logging.py
new file mode 100644
index 0000000..ba1c5eb
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/webkit_logging.py
@@ -0,0 +1,85 @@
+# Copyright (c) 2009, Google Inc. All rights reserved.
+# Copyright (c) 2009 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for logging
+# This module is now deprecated in favor of python's built-in logging.py.
+
+import os
+import sys
+
+def log(string):
+ print >> sys.stderr, string
+
+def error(string):
+ log("ERROR: %s" % string)
+ exit(1)
+
+# Simple class to split output between multiple destinations
+class tee:
+ def __init__(self, *files):
+ self.files = files
+
+ def write(self, string):
+ for file in self.files:
+ file.write(string)
+
+class OutputTee:
+ def __init__(self):
+ self._original_stdout = None
+ self._original_stderr = None
+ self._files_for_output = []
+
+ def add_log(self, path):
+ log_file = self._open_log_file(path)
+ self._files_for_output.append(log_file)
+ self._tee_outputs_to_files(self._files_for_output)
+ return log_file
+
+ def remove_log(self, log_file):
+ self._files_for_output.remove(log_file)
+ self._tee_outputs_to_files(self._files_for_output)
+ log_file.close()
+
+ @staticmethod
+ def _open_log_file(log_path):
+ (log_directory, log_name) = os.path.split(log_path)
+ if log_directory and not os.path.exists(log_directory):
+ os.makedirs(log_directory)
+ return open(log_path, 'a+')
+
+ def _tee_outputs_to_files(self, files):
+ if not self._original_stdout:
+ self._original_stdout = sys.stdout
+ self._original_stderr = sys.stderr
+ if files and len(files):
+ sys.stdout = tee(self._original_stdout, *files)
+ sys.stderr = tee(self._original_stderr, *files)
+ else:
+ sys.stdout = self._original_stdout
+ sys.stderr = self._original_stderr
diff --git a/WebKitTools/Scripts/webkitpy/webkit_logging.pyc b/WebKitTools/Scripts/webkitpy/webkit_logging.pyc
new file mode 100644
index 0000000..137f042
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/webkit_logging.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/webkit_logging_unittest.py b/WebKitTools/Scripts/webkitpy/webkit_logging_unittest.py
new file mode 100644
index 0000000..b940a4d
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/webkit_logging_unittest.py
@@ -0,0 +1,61 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import subprocess
+import StringIO
+import tempfile
+import unittest
+
+from webkitpy.executive import ScriptError
+from webkitpy.webkit_logging import *
+
+class LoggingTest(unittest.TestCase):
+
+ def assert_log_equals(self, log_input, expected_output):
+ original_stderr = sys.stderr
+ test_stderr = StringIO.StringIO()
+ sys.stderr = test_stderr
+
+ try:
+ log(log_input)
+ actual_output = test_stderr.getvalue()
+ finally:
+ original_stderr = original_stderr
+
+ self.assertEquals(actual_output, expected_output, "log(\"%s\") expected: %s actual: %s" % (log_input, expected_output, actual_output))
+
+ def test_log(self):
+ self.assert_log_equals("test", "test\n")
+
+ # Test that log() does not throw an exception when passed an object instead of a string.
+ self.assert_log_equals(ScriptError(message="ScriptError"), "ScriptError\n")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/WebKitTools/Scripts/webkitpy/webkitport.py b/WebKitTools/Scripts/webkitpy/webkitport.py
new file mode 100644
index 0000000..cd60a54
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/webkitport.py
@@ -0,0 +1,166 @@
+# Copyright (C) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# WebKit's Python module for understanding the various ports
+
+import os
+
+from optparse import make_option
+from webkitpy.executive import Executive
+
+
+class WebKitPort(object):
+
+ # We might need to pass scm into this function for scm.checkout_root
+ @classmethod
+ def script_path(cls, script_name):
+ return os.path.join("WebKitTools", "Scripts", script_name)
+
+ @staticmethod
+ def port(port_name):
+ ports = {
+ "chromium": ChromiumPort,
+ "gtk": GtkPort,
+ "mac": MacPort,
+ "qt": QtPort,
+ }
+ # FIXME: We should default to WinPort on Windows.
+ return ports.get(port_name, MacPort)
+
+ @classmethod
+ def name(cls):
+ raise NotImplementedError("subclasses must implement")
+
+ @classmethod
+ def flag(cls):
+ raise NotImplementedError("subclasses must implement")
+
+ @classmethod
+ def update_webkit_command(cls):
+ return [cls.script_path("update-webkit")]
+
+ @classmethod
+ def build_webkit_command(cls, build_style=None):
+ command = [cls.script_path("build-webkit")]
+ if build_style == "debug":
+ command.append("--debug")
+ if build_style == "release":
+ command.append("--release")
+ return command
+
+ @classmethod
+ def run_javascriptcore_tests_command(cls):
+ return [cls.script_path("run-javascriptcore-tests")]
+
+ @classmethod
+ def run_webkit_tests_command(cls):
+ return [cls.script_path("run-webkit-tests")]
+
+ @classmethod
+ def run_python_unittests_command(cls):
+ return [cls.script_path("test-webkitpy")]
+
+ @classmethod
+ def run_perl_unittests_command(cls):
+ return [cls.script_path("test-webkitperl")]
+
+
+class MacPort(WebKitPort):
+
+ @classmethod
+ def name(cls):
+ return "Mac"
+
+ @classmethod
+ def flag(cls):
+ return "--port=mac"
+
+
+class GtkPort(WebKitPort):
+
+ @classmethod
+ def name(cls):
+ return "Gtk"
+
+ @classmethod
+ def flag(cls):
+ return "--port=gtk"
+
+ @classmethod
+ def build_webkit_command(cls, build_style=None):
+ command = WebKitPort.build_webkit_command(build_style=build_style)
+ command.append("--gtk")
+ command.append('--makeargs="-j%s"' % Executive.cpu_count())
+ return command
+
+ @classmethod
+ def run_webkit_tests_command(cls):
+ command = WebKitPort.run_webkit_tests_command()
+ command.append("--gtk")
+ return command
+
+
+class QtPort(WebKitPort):
+
+ @classmethod
+ def name(cls):
+ return "Qt"
+
+ @classmethod
+ def flag(cls):
+ return "--port=qt"
+
+ @classmethod
+ def build_webkit_command(cls, build_style=None):
+ command = WebKitPort.build_webkit_command(build_style=build_style)
+ command.append("--qt")
+ command.append('--makeargs="-j%s"' % Executive.cpu_count())
+ return command
+
+
+class ChromiumPort(WebKitPort):
+
+ @classmethod
+ def name(cls):
+ return "Chromium"
+
+ @classmethod
+ def flag(cls):
+ return "--port=chromium"
+
+ @classmethod
+ def update_webkit_command(cls):
+ command = WebKitPort.update_webkit_command()
+ command.append("--chromium")
+ return command
+
+ @classmethod
+ def build_webkit_command(cls, build_style=None):
+ command = WebKitPort.build_webkit_command(build_style=build_style)
+ command.append("--chromium")
+ return command
diff --git a/WebKitTools/Scripts/webkitpy/webkitport.pyc b/WebKitTools/Scripts/webkitpy/webkitport.pyc
new file mode 100644
index 0000000..e344aca
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/webkitport.pyc
Binary files differ
diff --git a/WebKitTools/Scripts/webkitpy/webkitport_unittest.py b/WebKitTools/Scripts/webkitpy/webkitport_unittest.py
new file mode 100644
index 0000000..202234f
--- /dev/null
+++ b/WebKitTools/Scripts/webkitpy/webkitport_unittest.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# Copyright (c) 2009, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.executive import Executive
+from webkitpy.webkitport import WebKitPort, MacPort, GtkPort, QtPort, ChromiumPort
+
+
+class WebKitPortTest(unittest.TestCase):
+ def test_mac_port(self):
+ self.assertEquals(MacPort.name(), "Mac")
+ self.assertEquals(MacPort.flag(), "--port=mac")
+ self.assertEquals(MacPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")])
+ self.assertEquals(MacPort.build_webkit_command(), [WebKitPort.script_path("build-webkit")])
+ self.assertEquals(MacPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug"])
+ self.assertEquals(MacPort.build_webkit_command(build_style="release"), [WebKitPort.script_path("build-webkit"), "--release"])
+
+ def test_gtk_port(self):
+ self.assertEquals(GtkPort.name(), "Gtk")
+ self.assertEquals(GtkPort.flag(), "--port=gtk")
+ self.assertEquals(GtkPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests"), "--gtk"])
+ self.assertEquals(GtkPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--gtk", '--makeargs="-j%s"' % Executive.cpu_count()])
+ self.assertEquals(GtkPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug", "--gtk", '--makeargs="-j%s"' % Executive.cpu_count()])
+
+ def test_qt_port(self):
+ self.assertEquals(QtPort.name(), "Qt")
+ self.assertEquals(QtPort.flag(), "--port=qt")
+ self.assertEquals(QtPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")])
+ self.assertEquals(QtPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--qt", '--makeargs="-j%s"' % Executive.cpu_count()])
+ self.assertEquals(QtPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug", "--qt", '--makeargs="-j%s"' % Executive.cpu_count()])
+
+ def test_chromium_port(self):
+ self.assertEquals(ChromiumPort.name(), "Chromium")
+ self.assertEquals(ChromiumPort.flag(), "--port=chromium")
+ self.assertEquals(ChromiumPort.run_webkit_tests_command(), [WebKitPort.script_path("run-webkit-tests")])
+ self.assertEquals(ChromiumPort.build_webkit_command(), [WebKitPort.script_path("build-webkit"), "--chromium"])
+ self.assertEquals(ChromiumPort.build_webkit_command(build_style="debug"), [WebKitPort.script_path("build-webkit"), "--debug", "--chromium"])
+ self.assertEquals(ChromiumPort.update_webkit_command(), [WebKitPort.script_path("update-webkit"), "--chromium"])
+
+
+if __name__ == '__main__':
+ unittest.main()