From d0825bca7fe65beaee391d30da42e937db621564 Mon Sep 17 00:00:00 2001 From: Steve Block Date: Tue, 2 Feb 2010 14:57:50 +0000 Subject: Merge webkit.org at r54127 : Initial merge by git Change-Id: Ib661abb595522f50ea406f72d3a0ce17f7193c82 --- WebKitTools/Scripts/webkitpy/style/__init__.py | 1 + WebKitTools/Scripts/webkitpy/style/checker.py | 809 +++++ .../Scripts/webkitpy/style/checker_unittest.py | 677 ++++ .../Scripts/webkitpy/style/error_handlers.py | 154 + .../webkitpy/style/error_handlers_unittest.py | 163 + .../Scripts/webkitpy/style/processors/__init__.py | 1 + .../Scripts/webkitpy/style/processors/cpp.py | 3007 ++++++++++++++++ .../webkitpy/style/processors/cpp_unittest.py | 3706 ++++++++++++++++++++ .../Scripts/webkitpy/style/processors/text.py | 56 + .../webkitpy/style/processors/text_unittest.py | 94 + WebKitTools/Scripts/webkitpy/style/unittests.py | 41 + 11 files changed, 8709 insertions(+) create mode 100644 WebKitTools/Scripts/webkitpy/style/__init__.py create mode 100644 WebKitTools/Scripts/webkitpy/style/checker.py create mode 100755 WebKitTools/Scripts/webkitpy/style/checker_unittest.py create mode 100644 WebKitTools/Scripts/webkitpy/style/error_handlers.py create mode 100644 WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py create mode 100644 WebKitTools/Scripts/webkitpy/style/processors/__init__.py create mode 100644 WebKitTools/Scripts/webkitpy/style/processors/cpp.py create mode 100644 WebKitTools/Scripts/webkitpy/style/processors/cpp_unittest.py create mode 100644 WebKitTools/Scripts/webkitpy/style/processors/text.py create mode 100644 WebKitTools/Scripts/webkitpy/style/processors/text_unittest.py create mode 100644 WebKitTools/Scripts/webkitpy/style/unittests.py (limited to 'WebKitTools/Scripts/webkitpy/style') diff --git a/WebKitTools/Scripts/webkitpy/style/__init__.py b/WebKitTools/Scripts/webkitpy/style/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/WebKitTools/Scripts/webkitpy/style/checker.py b/WebKitTools/Scripts/webkitpy/style/checker.py new file mode 100644 index 0000000..faf954f --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/checker.py @@ -0,0 +1,809 @@ +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Front end of some style-checker modules.""" + +import codecs +import getopt +import os.path +import sys + +from .. style_references import parse_patch +from error_handlers import DefaultStyleErrorHandler +from error_handlers import PatchStyleErrorHandler +from processors.cpp import CppProcessor +from processors.text import TextProcessor + + +# These defaults are used by check-webkit-style. +WEBKIT_DEFAULT_VERBOSITY = 1 +WEBKIT_DEFAULT_OUTPUT_FORMAT = 'emacs' + + +# FIXME: For style categories we will never want to have, remove them. +# For categories for which we want to have similar functionality, +# modify the implementation and enable them. +# +# Throughout this module, we use "filter rule" rather than "filter" +# for an individual boolean filter flag like "+foo". This allows us to +# reserve "filter" for what one gets by collectively applying all of +# the filter rules. +# +# The _WEBKIT_FILTER_RULES are prepended to any user-specified filter +# rules. Since by default all errors are on, only include rules that +# begin with a - sign. +WEBKIT_DEFAULT_FILTER_RULES = [ + '-build/endif_comment', + '-build/include_what_you_use', # for std::string + '-build/storage_class', # const static + '-legal/copyright', + '-readability/multiline_comment', + '-readability/braces', # int foo() {}; + '-readability/fn_size', + '-readability/casting', + '-readability/function', + '-runtime/arrays', # variable length array + '-runtime/casting', + '-runtime/sizeof', + '-runtime/explicit', # explicit + '-runtime/virtual', # virtual dtor + '-runtime/printf', + '-runtime/threadsafe_fn', + '-runtime/rtti', + '-whitespace/blank_line', + '-whitespace/end_of_line', + '-whitespace/labels', + ] + + +# Some files should be skipped when checking style. For example, +# WebKit maintains some files in Mozilla style on purpose to ease +# future merges. +# +# Include a warning for skipped files that are less obvious. +SKIPPED_FILES_WITH_WARNING = [ + # The Qt API and tests do not follow WebKit style. + # They follow Qt style. :) + "gtk2drawing.c", # WebCore/platform/gtk/gtk2drawing.c + "gtk2drawing.h", # WebCore/platform/gtk/gtk2drawing.h + "JavaScriptCore/qt/api/", + "WebKit/gtk/tests/", + "WebKit/qt/Api/", + "WebKit/qt/tests/", + ] + + +# Don't include a warning for skipped files that are more common +# and more obvious. +SKIPPED_FILES_WITHOUT_WARNING = [ + "LayoutTests/" + ] + + +def style_categories(): + """Return the set of all categories used by check-webkit-style.""" + # If other processors had categories, we would take their union here. + return CppProcessor.categories + + +def webkit_argument_defaults(): + """Return the DefaultArguments instance for use by check-webkit-style.""" + return ArgumentDefaults(WEBKIT_DEFAULT_OUTPUT_FORMAT, + WEBKIT_DEFAULT_VERBOSITY, + WEBKIT_DEFAULT_FILTER_RULES) + + +def _create_usage(defaults): + """Return the usage string to display for command help. + + Args: + defaults: An ArgumentDefaults instance. + + """ + usage = """ +Syntax: %(program_name)s [--verbose=#] [--git-commit=] [--output=vs7] + [--filter=-x,+y,...] [file] ... + + The style guidelines this tries to follow are here: + http://webkit.org/coding/coding-style.html + + Every style error is given a confidence score from 1-5, with 5 meaning + we are certain of the problem, and 1 meaning it could be a legitimate + construct. This can miss some errors and does not substitute for + code review. + + To prevent specific lines from being linted, add a '// NOLINT' comment to the + end of the line. + + Linted extensions are .cpp, .c and .h. Other file types are ignored. + + The file parameter is optional and accepts multiple files. Leaving + out the file parameter applies the check to all files considered changed + by your source control management system. + + Flags: + + verbose=# + A number 1-5 that restricts output to errors with a confidence + score at or above this value. In particular, the value 1 displays + all errors. The default is %(default_verbosity)s. + + git-commit= + Checks the style of everything from the given commit to the local tree. + + output=vs7 + The output format, which may be one of + emacs : to ease emacs parsing + vs7 : compatible with Visual Studio + Defaults to "%(default_output_format)s". Other formats are unsupported. + + filter=-x,+y,... + A comma-separated list of boolean filter rules used to filter + which categories of style guidelines to check. The script checks + a category if the category passes the filter rules, as follows. + + Any webkit category starts out passing. All filter rules are then + evaluated left to right, with later rules taking precedence. For + example, the rule "+foo" passes any category that starts with "foo", + and "-foo" fails any such category. The filter input "-whitespace, + +whitespace/braces" fails the category "whitespace/tab" and passes + "whitespace/braces". + + Examples: --filter=-whitespace,+whitespace/braces + --filter=-whitespace,-runtime/printf,+runtime/printf_format + --filter=-,+build/include_what_you_use + + Category names appear in error messages in brackets, for example + [whitespace/indent]. To see a list of all categories available to + %(program_name)s, along with which are enabled by default, pass + the empty filter as follows: + --filter= +""" % {'program_name': os.path.basename(sys.argv[0]), + 'default_verbosity': defaults.verbosity, + 'default_output_format': defaults.output_format} + + return usage + + +class CategoryFilter(object): + + """Filters whether to check style categories.""" + + def __init__(self, filter_rules=None): + """Create a category filter. + + This method performs argument validation but does not strip + leading or trailing white space. + + Args: + filter_rules: A list of strings that are filter rules, which + are strings beginning with the plus or minus + symbol (+/-). The list should include any + default filter rules at the beginning. + Defaults to the empty list. + + Raises: + ValueError: Invalid filter rule if a rule does not start with + plus ("+") or minus ("-"). + + """ + if filter_rules is None: + filter_rules = [] + + for rule in filter_rules: + if not (rule.startswith('+') or rule.startswith('-')): + raise ValueError('Invalid filter rule "%s": every rule ' + 'rule in the --filter flag must start ' + 'with + or -.' % rule) + + self._filter_rules = filter_rules + self._should_check_category = {} # Cached dictionary of category to True/False + + def __str__(self): + return ",".join(self._filter_rules) + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this CategoryFilter instance is equal to another.""" + return self._filter_rules == other._filter_rules + + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce from __eq__(). + return not (self == other) + + def should_check(self, category): + """Return whether the category should be checked. + + The rules for determining whether a category should be checked + are as follows. By default all categories should be checked. + Then apply the filter rules in order from first to last, with + later flags taking precedence. + + A filter rule applies to a category if the string after the + leading plus/minus (+/-) matches the beginning of the category + name. A plus (+) means the category should be checked, while a + minus (-) means the category should not be checked. + + """ + if category in self._should_check_category: + return self._should_check_category[category] + + should_check = True # All categories checked by default. + for rule in self._filter_rules: + if not category.startswith(rule[1:]): + continue + should_check = rule.startswith('+') + self._should_check_category[category] = should_check # Update cache. + return should_check + + +# This class should not have knowledge of the flag key names. +class ProcessorOptions(object): + + """A container to store options to use when checking style. + + Attributes: + output_format: A string that is the output format. The supported + output formats are "emacs" which emacs can parse + and "vs7" which Microsoft Visual Studio 7 can parse. + + verbosity: An integer between 1-5 inclusive that restricts output + to errors with a confidence score at or above this value. + The default is 1, which displays all errors. + + filter: A CategoryFilter instance. The default is the empty filter, + which means that all categories should be checked. + + git_commit: A string representing the git commit to check. + The default is None. + + extra_flag_values: A string-string dictionary of all flag key-value + pairs that are not otherwise represented by this + class. The default is the empty dictionary. + + """ + + def __init__(self, output_format="emacs", verbosity=1, filter=None, + git_commit=None, extra_flag_values=None): + if filter is None: + filter = CategoryFilter() + if extra_flag_values is None: + extra_flag_values = {} + + if output_format not in ("emacs", "vs7"): + raise ValueError('Invalid "output_format" parameter: ' + 'value must be "emacs" or "vs7". ' + 'Value given: "%s".' % output_format) + + if (verbosity < 1) or (verbosity > 5): + raise ValueError('Invalid "verbosity" parameter: ' + "value must be an integer between 1-5 inclusive. " + 'Value given: "%s".' % verbosity) + + self.output_format = output_format + self.verbosity = verbosity + self.filter = filter + self.git_commit = git_commit + self.extra_flag_values = extra_flag_values + + # Useful for unit testing. + def __eq__(self, other): + """Return whether this ProcessorOptions instance is equal to another.""" + if self.output_format != other.output_format: + return False + if self.verbosity != other.verbosity: + return False + if self.filter != other.filter: + return False + if self.git_commit != other.git_commit: + return False + if self.extra_flag_values != other.extra_flag_values: + return False + + return True + + # Useful for unit testing. + def __ne__(self, other): + # Python does not automatically deduce from __eq__(). + return not (self == other) + + def is_reportable(self, category, confidence_in_error): + """Return whether an error is reportable. + + An error is reportable if the confidence in the error + is at least the current verbosity level, and if the current + filter says that the category should be checked. + + Args: + category: A string that is a style category. + confidence_in_error: An integer between 1 and 5, inclusive, that + represents the application's confidence in + the error. A higher number signifies greater + confidence. + + """ + if confidence_in_error < self.verbosity: + return False + + if self.filter is None: + return True # All categories should be checked by default. + + return self.filter.should_check(category) + + +# This class should not have knowledge of the flag key names. +class ArgumentDefaults(object): + + """A container to store default argument values. + + Attributes: + output_format: A string that is the default output format. + verbosity: An integer that is the default verbosity level. + filter_rules: A list of strings that are boolean filter rules + to prepend to any user-specified rules. + + """ + + def __init__(self, default_output_format, default_verbosity, + default_filter_rules): + self.output_format = default_output_format + self.verbosity = default_verbosity + self.filter_rules = default_filter_rules + + +class ArgumentPrinter(object): + + """Supports the printing of check-webkit-style command arguments.""" + + def _flag_pair_to_string(self, flag_key, flag_value): + return '--%(key)s=%(val)s' % {'key': flag_key, 'val': flag_value } + + def to_flag_string(self, options): + """Return a flag string yielding the given ProcessorOptions instance. + + This method orders the flag values alphabetically by the flag key. + + Args: + options: A ProcessorOptions instance. + + """ + flags = options.extra_flag_values.copy() + + flags['output'] = options.output_format + flags['verbose'] = options.verbosity + if options.filter: + # Only include the filter flag if rules are present. + filter_string = str(options.filter) + if filter_string: + flags['filter'] = filter_string + if options.git_commit: + flags['git-commit'] = options.git_commit + + flag_string = '' + # Alphabetizing lets us unit test this method. + for key in sorted(flags.keys()): + flag_string += self._flag_pair_to_string(key, flags[key]) + ' ' + + return flag_string.strip() + + +class ArgumentParser(object): + + """Supports the parsing of check-webkit-style command arguments. + + Attributes: + defaults: An ArgumentDefaults instance. + create_usage: A function that accepts an ArgumentDefaults instance + and returns a string of usage instructions. + This defaults to the function used to generate the + usage string for check-webkit-style. + doc_print: A function that accepts a string parameter and that is + called to display help messages. This defaults to + sys.stderr.write(). + + """ + + def __init__(self, argument_defaults, create_usage=None, doc_print=None): + if create_usage is None: + create_usage = _create_usage + if doc_print is None: + doc_print = sys.stderr.write + + self.defaults = argument_defaults + self.create_usage = create_usage + self.doc_print = doc_print + + def _exit_with_usage(self, error_message=''): + """Exit and print a usage string with an optional error message. + + Args: + error_message: A string that is an error message to print. + + """ + usage = self.create_usage(self.defaults) + self.doc_print(usage) + if error_message: + sys.exit('\nFATAL ERROR: ' + error_message) + else: + sys.exit(1) + + def _exit_with_categories(self): + """Exit and print the style categories and default filter rules.""" + self.doc_print('\nAll categories:\n') + categories = style_categories() + for category in sorted(categories): + self.doc_print(' ' + category + '\n') + + self.doc_print('\nDefault filter rules**:\n') + for filter_rule in sorted(self.defaults.filter_rules): + self.doc_print(' ' + filter_rule + '\n') + self.doc_print('\n**The command always evaluates the above rules, ' + 'and before any --filter flag.\n\n') + + sys.exit(0) + + def _parse_filter_flag(self, flag_value): + """Parse the value of the --filter flag. + + These filters are applied when deciding whether to emit a given + error message. + + Args: + flag_value: A string of comma-separated filter rules, for + example "-whitespace,+whitespace/indent". + + """ + filters = [] + for uncleaned_filter in flag_value.split(','): + filter = uncleaned_filter.strip() + if not filter: + continue + filters.append(filter) + return filters + + def parse(self, args, extra_flags=None): + """Parse the command line arguments to check-webkit-style. + + Args: + args: A list of command-line arguments as returned by sys.argv[1:]. + extra_flags: A list of flags whose values we want to extract, but + are not supported by the ProcessorOptions class. + An example flag "new_flag=". This defaults to the + empty list. + + Returns: + A tuple of (filenames, options) + + filenames: The list of filenames to check. + options: A ProcessorOptions instance. + + """ + if extra_flags is None: + extra_flags = [] + + output_format = self.defaults.output_format + verbosity = self.defaults.verbosity + filter_rules = self.defaults.filter_rules + + # The flags already supported by the ProcessorOptions class. + flags = ['help', 'output=', 'verbose=', 'filter=', 'git-commit='] + + for extra_flag in extra_flags: + if extra_flag in flags: + raise ValueError('Flag \'%(extra_flag)s is duplicated ' + 'or already supported.' % + {'extra_flag': extra_flag}) + flags.append(extra_flag) + + try: + (opts, filenames) = getopt.getopt(args, '', flags) + except getopt.GetoptError: + # FIXME: Settle on an error handling approach: come up + # with a consistent guideline as to when and whether + # a ValueError should be raised versus calling + # sys.exit when needing to interrupt execution. + self._exit_with_usage('Invalid arguments.') + + extra_flag_values = {} + git_commit = None + + for (opt, val) in opts: + if opt == '--help': + self._exit_with_usage() + elif opt == '--output': + output_format = val + elif opt == '--verbose': + verbosity = val + elif opt == '--git-commit': + git_commit = val + elif opt == '--filter': + if not val: + self._exit_with_categories() + # Prepend the defaults. + filter_rules = filter_rules + self._parse_filter_flag(val) + else: + extra_flag_values[opt] = val + + # Check validity of resulting values. + if filenames and (git_commit != None): + self._exit_with_usage('It is not possible to check files and a ' + 'specific commit at the same time.') + + if output_format not in ('emacs', 'vs7'): + raise ValueError('Invalid --output value "%s": The only ' + 'allowed output formats are emacs and vs7.' % + output_format) + + verbosity = int(verbosity) + if (verbosity < 1) or (verbosity > 5): + raise ValueError('Invalid --verbose value %s: value must ' + 'be between 1-5.' % verbosity) + + filter = CategoryFilter(filter_rules) + + options = ProcessorOptions(output_format, verbosity, filter, + git_commit, extra_flag_values) + + return (filenames, options) + + +# Enum-like idiom +class FileType: + + NONE = 1 + # Alphabetize remaining types + CPP = 2 + TEXT = 3 + + +class ProcessorDispatcher(object): + + """Supports determining whether and how to check style, based on path.""" + + cpp_file_extensions = ( + 'c', + 'cpp', + 'h', + ) + + text_file_extensions = ( + 'css', + 'html', + 'idl', + 'js', + 'mm', + 'php', + 'pm', + 'py', + 'txt', + ) + + def _file_extension(self, file_path): + """Return the file extension without the leading dot.""" + return os.path.splitext(file_path)[1].lstrip(".") + + def should_skip_with_warning(self, file_path): + """Return whether the given file should be skipped with a warning.""" + for skipped_file in SKIPPED_FILES_WITH_WARNING: + if file_path.find(skipped_file) >= 0: + return True + return False + + def should_skip_without_warning(self, file_path): + """Return whether the given file should be skipped without a warning.""" + for skipped_file in SKIPPED_FILES_WITHOUT_WARNING: + if file_path.find(skipped_file) >= 0: + return True + return False + + def _file_type(self, file_path): + """Return the file type corresponding to the given file.""" + file_extension = self._file_extension(file_path) + + if (file_extension in self.cpp_file_extensions) or (file_path == '-'): + # FIXME: Do something about the comment below and the issue it + # raises since cpp_style already relies on the extension. + # + # Treat stdin as C++. Since the extension is unknown when + # reading from stdin, cpp_style tests should not rely on + # the extension. + return FileType.CPP + elif ("ChangeLog" in file_path + or "WebKitTools/Scripts/" in file_path + or file_extension in self.text_file_extensions): + return FileType.TEXT + else: + return FileType.NONE + + def _create_processor(self, file_type, file_path, handle_style_error, verbosity): + """Instantiate and return a style processor based on file type.""" + if file_type == FileType.NONE: + processor = None + elif file_type == FileType.CPP: + file_extension = self._file_extension(file_path) + processor = CppProcessor(file_path, file_extension, handle_style_error, verbosity) + elif file_type == FileType.TEXT: + processor = TextProcessor(file_path, handle_style_error) + else: + raise ValueError('Invalid file type "%(file_type)s": the only valid file types ' + "are %(NONE)s, %(CPP)s, and %(TEXT)s." + % {"file_type": file_type, + "NONE": FileType.NONE, + "CPP": FileType.CPP, + "TEXT": FileType.TEXT}) + + return processor + + def dispatch_processor(self, file_path, handle_style_error, verbosity): + """Instantiate and return a style processor based on file path.""" + file_type = self._file_type(file_path) + + processor = self._create_processor(file_type, + file_path, + handle_style_error, + verbosity) + return processor + + +class StyleChecker(object): + + """Supports checking style in files and patches. + + Attributes: + error_count: An integer that is the total number of reported + errors for the lifetime of this StyleChecker + instance. + options: A ProcessorOptions instance that controls the behavior + of style checking. + + """ + + def __init__(self, options, stderr_write=None): + """Create a StyleChecker instance. + + Args: + options: See options attribute. + stderr_write: A function that takes a string as a parameter + and that is called when a style error occurs. + Defaults to sys.stderr.write. This should be + used only for unit tests. + + """ + if stderr_write is None: + stderr_write = sys.stderr.write + + self._stderr_write = stderr_write + self.error_count = 0 + self.options = options + + def _increment_error_count(self): + """Increment the total count of reported errors.""" + self.error_count += 1 + + def _process_file(self, processor, file_path, handle_style_error): + """Process the file using the given processor.""" + try: + # Support the UNIX convention of using "-" for stdin. Note that + # we are not opening the file with universal newline support + # (which codecs doesn't support anyway), so the resulting lines do + # contain trailing '\r' characters if we are reading a file that + # has CRLF endings. + # If after the split a trailing '\r' is present, it is removed + # below. If it is not expected to be present (i.e. os.linesep != + # '\r\n' as in Windows), a warning is issued below if this file + # is processed. + if file_path == '-': + lines = codecs.StreamReaderWriter(sys.stdin, + codecs.getreader('utf8'), + codecs.getwriter('utf8'), + 'replace').read().split('\n') + else: + lines = codecs.open(file_path, 'r', 'utf8', 'replace').read().split('\n') + + carriage_return_found = False + # Remove trailing '\r'. + for line_number in range(len(lines)): + if lines[line_number].endswith('\r'): + lines[line_number] = lines[line_number].rstrip('\r') + carriage_return_found = True + + except IOError: + self._stderr_write("Skipping input '%s': Can't open for reading\n" % file_path) + return + + processor.process(lines) + + if carriage_return_found and os.linesep != '\r\n': + # FIXME: Make sure this error also shows up when checking + # patches, if appropriate. + # + # Use 0 for line_number since outputting only one error for + # potentially several lines. + handle_style_error(file_path, 0, 'whitespace/newline', 1, + 'One or more unexpected \\r (^M) found;' + 'better to use only a \\n') + + def check_file(self, file_path, handle_style_error=None, process_file=None): + """Check style in the given file. + + Args: + file_path: A string that is the path of the file to process. + handle_style_error: The function to call when a style error + occurs. This parameter is meant for internal + use within this class. Defaults to a + DefaultStyleErrorHandler instance. + process_file: The function to call to process the file. This + parameter should be used only for unit tests. + Defaults to the file processing method of this class. + + """ + if handle_style_error is None: + handle_style_error = DefaultStyleErrorHandler(file_path, + self.options, + self._increment_error_count, + self._stderr_write) + if process_file is None: + process_file = self._process_file + + dispatcher = ProcessorDispatcher() + + if dispatcher.should_skip_without_warning(file_path): + return + if dispatcher.should_skip_with_warning(file_path): + self._stderr_write('Ignoring "%s": this file is exempt from the ' + "style guide.\n" % file_path) + return + + verbosity = self.options.verbosity + processor = dispatcher.dispatch_processor(file_path, + handle_style_error, + verbosity) + if processor is None: + return + + process_file(processor, file_path, handle_style_error) + + def check_patch(self, patch_string): + """Check style in the given patch. + + Args: + patch_string: A string that is a patch string. + + """ + patch_files = parse_patch(patch_string) + for file_path, diff in patch_files.iteritems(): + style_error_handler = PatchStyleErrorHandler(diff, + file_path, + self.options, + self._increment_error_count, + self._stderr_write) + + self.check_file(file_path, style_error_handler) + diff --git a/WebKitTools/Scripts/webkitpy/style/checker_unittest.py b/WebKitTools/Scripts/webkitpy/style/checker_unittest.py new file mode 100755 index 0000000..4d6b2e7 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/checker_unittest.py @@ -0,0 +1,677 @@ +#!/usr/bin/python +# -*- coding: utf-8; -*- +# +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile Inc. +# Copyright (C) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (chris.jerdonek@gmail.com) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for style.py.""" + +import unittest + +import checker as style +from checker import CategoryFilter +from checker import ProcessorDispatcher +from checker import ProcessorOptions +from checker import StyleChecker +from processors.cpp import CppProcessor +from processors.text import TextProcessor + +class CategoryFilterTest(unittest.TestCase): + + """Tests CategoryFilter class.""" + + def test_init(self): + """Test __init__ constructor.""" + self.assertRaises(ValueError, CategoryFilter, ["no_prefix"]) + CategoryFilter() # No ValueError: works + CategoryFilter(["+"]) # No ValueError: works + CategoryFilter(["-"]) # No ValueError: works + + def test_str(self): + """Test __str__ "to string" operator.""" + filter = CategoryFilter(["+a", "-b"]) + self.assertEquals(str(filter), "+a,-b") + + def test_eq(self): + """Test __eq__ equality function.""" + filter1 = CategoryFilter(["+a", "+b"]) + filter2 = CategoryFilter(["+a", "+b"]) + filter3 = CategoryFilter(["+b", "+a"]) + + # == calls __eq__. + self.assertTrue(filter1 == filter2) + self.assertFalse(filter1 == filter3) # Cannot test with assertNotEqual. + + def test_ne(self): + """Test __ne__ inequality function.""" + # != calls __ne__. + # By default, __ne__ always returns true on different objects. + # Thus, just check the distinguishing case to verify that the + # code defines __ne__. + self.assertFalse(CategoryFilter() != CategoryFilter()) + + def test_should_check(self): + """Test should_check() method.""" + filter = CategoryFilter() + self.assertTrue(filter.should_check("everything")) + # Check a second time to exercise cache. + self.assertTrue(filter.should_check("everything")) + + filter = CategoryFilter(["-"]) + self.assertFalse(filter.should_check("anything")) + # Check a second time to exercise cache. + self.assertFalse(filter.should_check("anything")) + + filter = CategoryFilter(["-", "+ab"]) + self.assertTrue(filter.should_check("abc")) + self.assertFalse(filter.should_check("a")) + + filter = CategoryFilter(["+", "-ab"]) + self.assertFalse(filter.should_check("abc")) + self.assertTrue(filter.should_check("a")) + + +class ProcessorOptionsTest(unittest.TestCase): + + """Tests ProcessorOptions class.""" + + def test_init(self): + """Test __init__ constructor.""" + # Check default parameters. + options = ProcessorOptions() + self.assertEquals(options.extra_flag_values, {}) + self.assertEquals(options.filter, CategoryFilter()) + self.assertEquals(options.git_commit, None) + self.assertEquals(options.output_format, "emacs") + self.assertEquals(options.verbosity, 1) + + # Check argument validation. + self.assertRaises(ValueError, ProcessorOptions, output_format="bad") + ProcessorOptions(output_format="emacs") # No ValueError: works + ProcessorOptions(output_format="vs7") # works + self.assertRaises(ValueError, ProcessorOptions, verbosity=0) + self.assertRaises(ValueError, ProcessorOptions, verbosity=6) + ProcessorOptions(verbosity=1) # works + ProcessorOptions(verbosity=5) # works + + # Check attributes. + options = ProcessorOptions(extra_flag_values={"extra_value" : 2}, + filter=CategoryFilter(["+"]), + git_commit="commit", + output_format="vs7", + verbosity=3) + self.assertEquals(options.extra_flag_values, {"extra_value" : 2}) + self.assertEquals(options.filter, CategoryFilter(["+"])) + self.assertEquals(options.git_commit, "commit") + self.assertEquals(options.output_format, "vs7") + self.assertEquals(options.verbosity, 3) + + def test_eq(self): + """Test __eq__ equality function.""" + # == calls __eq__. + self.assertTrue(ProcessorOptions() == ProcessorOptions()) + + # Verify that a difference in any argument cause equality to fail. + options = ProcessorOptions(extra_flag_values={"extra_value" : 1}, + filter=CategoryFilter(["+"]), + git_commit="commit", + output_format="vs7", + verbosity=1) + self.assertFalse(options == ProcessorOptions(extra_flag_values={"extra_value" : 2})) + self.assertFalse(options == ProcessorOptions(filter=CategoryFilter(["-"]))) + self.assertFalse(options == ProcessorOptions(git_commit="commit2")) + self.assertFalse(options == ProcessorOptions(output_format="emacs")) + self.assertFalse(options == ProcessorOptions(verbosity=2)) + + def test_ne(self): + """Test __ne__ inequality function.""" + # != calls __ne__. + # By default, __ne__ always returns true on different objects. + # Thus, just check the distinguishing case to verify that the + # code defines __ne__. + self.assertFalse(ProcessorOptions() != ProcessorOptions()) + + def test_is_reportable(self): + """Test is_reportable().""" + filter = CategoryFilter(["-xyz"]) + options = ProcessorOptions(filter=filter, verbosity=3) + + # Test verbosity + self.assertTrue(options.is_reportable("abc", 3)) + self.assertFalse(options.is_reportable("abc", 2)) + + # Test filter + self.assertTrue(options.is_reportable("xy", 3)) + self.assertFalse(options.is_reportable("xyz", 3)) + + +class WebKitArgumentDefaultsTest(unittest.TestCase): + + """Tests validity of default arguments used by check-webkit-style.""" + + def defaults(self): + return style.webkit_argument_defaults() + + def test_filter_rules(self): + defaults = self.defaults() + already_seen = [] + all_categories = style.style_categories() + for rule in defaults.filter_rules: + # Check no leading or trailing white space. + self.assertEquals(rule, rule.strip()) + # All categories are on by default, so defaults should + # begin with -. + self.assertTrue(rule.startswith('-')) + self.assertTrue(rule[1:] in all_categories) + # Check no rule occurs twice. + self.assertFalse(rule in already_seen) + already_seen.append(rule) + + def test_defaults(self): + """Check that default arguments are valid.""" + defaults = self.defaults() + + # FIXME: We should not need to call parse() to determine + # whether the default arguments are valid. + parser = style.ArgumentParser(defaults) + # No need to test the return value here since we test parse() + # on valid arguments elsewhere. + parser.parse([]) # arguments valid: no error or SystemExit + + +class ArgumentPrinterTest(unittest.TestCase): + + """Tests the ArgumentPrinter class.""" + + _printer = style.ArgumentPrinter() + + def _create_options(self, output_format='emacs', verbosity=3, + filter_rules=[], git_commit=None, + extra_flag_values={}): + filter = CategoryFilter(filter_rules) + return style.ProcessorOptions(output_format, verbosity, filter, + git_commit, extra_flag_values) + + def test_to_flag_string(self): + options = self._create_options('vs7', 5, ['+foo', '-bar'], 'git', + {'a': 0, 'z': 1}) + self.assertEquals('--a=0 --filter=+foo,-bar --git-commit=git ' + '--output=vs7 --verbose=5 --z=1', + self._printer.to_flag_string(options)) + + # This is to check that --filter and --git-commit do not + # show up when not user-specified. + options = self._create_options() + self.assertEquals('--output=emacs --verbose=3', + self._printer.to_flag_string(options)) + + +class ArgumentParserTest(unittest.TestCase): + + """Test the ArgumentParser class.""" + + def _parse(self): + """Return a default parse() function for testing.""" + return self._create_parser().parse + + def _create_defaults(self, default_output_format='vs7', + default_verbosity=3, + default_filter_rules=['-', '+whitespace']): + """Return a default ArgumentDefaults instance for testing.""" + return style.ArgumentDefaults(default_output_format, + default_verbosity, + default_filter_rules) + + def _create_parser(self, defaults=None): + """Return an ArgumentParser instance for testing.""" + def create_usage(_defaults): + """Return a usage string for testing.""" + return "usage" + + def doc_print(message): + # We do not want the usage string or style categories + # to print during unit tests, so print nothing. + return + + if defaults is None: + defaults = self._create_defaults() + + return style.ArgumentParser(defaults, create_usage, doc_print) + + def test_parse_documentation(self): + parse = self._parse() + + # FIXME: Test both the printing of the usage string and the + # filter categories help. + + # Request the usage string. + self.assertRaises(SystemExit, parse, ['--help']) + # Request default filter rules and available style categories. + self.assertRaises(SystemExit, parse, ['--filter=']) + + def test_parse_bad_values(self): + parse = self._parse() + + # Pass an unsupported argument. + self.assertRaises(SystemExit, parse, ['--bad']) + + self.assertRaises(ValueError, parse, ['--verbose=bad']) + self.assertRaises(ValueError, parse, ['--verbose=0']) + self.assertRaises(ValueError, parse, ['--verbose=6']) + parse(['--verbose=1']) # works + parse(['--verbose=5']) # works + + self.assertRaises(ValueError, parse, ['--output=bad']) + parse(['--output=vs7']) # works + + # Pass a filter rule not beginning with + or -. + self.assertRaises(ValueError, parse, ['--filter=foo']) + parse(['--filter=+foo']) # works + # Pass files and git-commit at the same time. + self.assertRaises(SystemExit, parse, ['--git-commit=', 'file.txt']) + # Pass an extra flag already supported. + self.assertRaises(ValueError, parse, [], ['filter=']) + parse([], ['extra=']) # works + # Pass an extra flag with typo. + self.assertRaises(SystemExit, parse, ['--extratypo='], ['extra=']) + parse(['--extra='], ['extra=']) # works + self.assertRaises(ValueError, parse, [], ['extra=', 'extra=']) + + + def test_parse_default_arguments(self): + parse = self._parse() + + (files, options) = parse([]) + + self.assertEquals(files, []) + + self.assertEquals(options.output_format, 'vs7') + self.assertEquals(options.verbosity, 3) + self.assertEquals(options.filter, + CategoryFilter(["-", "+whitespace"])) + self.assertEquals(options.git_commit, None) + + def test_parse_explicit_arguments(self): + parse = self._parse() + + # Pass non-default explicit values. + (files, options) = parse(['--output=emacs']) + self.assertEquals(options.output_format, 'emacs') + (files, options) = parse(['--verbose=4']) + self.assertEquals(options.verbosity, 4) + (files, options) = parse(['--git-commit=commit']) + self.assertEquals(options.git_commit, 'commit') + (files, options) = parse(['--filter=+foo,-bar']) + self.assertEquals(options.filter, + CategoryFilter(["-", "+whitespace", "+foo", "-bar"])) + # Spurious white space in filter rules. + (files, options) = parse(['--filter=+foo ,-bar']) + self.assertEquals(options.filter, + CategoryFilter(["-", "+whitespace", "+foo", "-bar"])) + + # Pass extra flag values. + (files, options) = parse(['--extra'], ['extra']) + self.assertEquals(options.extra_flag_values, {'--extra': ''}) + (files, options) = parse(['--extra='], ['extra=']) + self.assertEquals(options.extra_flag_values, {'--extra': ''}) + (files, options) = parse(['--extra=x'], ['extra=']) + self.assertEquals(options.extra_flag_values, {'--extra': 'x'}) + + def test_parse_files(self): + parse = self._parse() + + (files, options) = parse(['foo.cpp']) + self.assertEquals(files, ['foo.cpp']) + + # Pass multiple files. + (files, options) = parse(['--output=emacs', 'foo.cpp', 'bar.cpp']) + self.assertEquals(files, ['foo.cpp', 'bar.cpp']) + + +class ProcessorDispatcherSkipTest(unittest.TestCase): + + """Tests the "should skip" methods of the ProcessorDispatcher class.""" + + def test_should_skip_with_warning(self): + """Test should_skip_with_warning().""" + dispatcher = ProcessorDispatcher() + + # Check a non-skipped file. + self.assertFalse(dispatcher.should_skip_with_warning("foo.txt")) + + # Check skipped files. + paths_to_skip = [ + "gtk2drawing.c", + "gtk2drawing.h", + "JavaScriptCore/qt/api/qscriptengine_p.h", + "WebCore/platform/gtk/gtk2drawing.c", + "WebCore/platform/gtk/gtk2drawing.h", + "WebKit/gtk/tests/testatk.c", + "WebKit/qt/Api/qwebpage.h", + "WebKit/qt/tests/qwebsecurityorigin/tst_qwebsecurityorigin.cpp", + ] + + for path in paths_to_skip: + self.assertTrue(dispatcher.should_skip_with_warning(path), + "Checking: " + path) + + def test_should_skip_without_warning(self): + """Test should_skip_without_warning().""" + dispatcher = ProcessorDispatcher() + + # Check a non-skipped file. + self.assertFalse(dispatcher.should_skip_without_warning("foo.txt")) + + # Check skipped files. + paths_to_skip = [ + # LayoutTests folder + "LayoutTests/foo.txt", + ] + + for path in paths_to_skip: + self.assertTrue(dispatcher.should_skip_without_warning(path), + "Checking: " + path) + + +class ProcessorDispatcherDispatchTest(unittest.TestCase): + + """Tests dispatch_processor() method of ProcessorDispatcher class.""" + + def mock_handle_style_error(self): + pass + + def dispatch_processor(self, file_path): + """Call dispatch_processor() with the given file path.""" + dispatcher = ProcessorDispatcher() + processor = dispatcher.dispatch_processor(file_path, + self.mock_handle_style_error, + verbosity=3) + return processor + + def assert_processor_none(self, file_path): + """Assert that the dispatched processor is None.""" + processor = self.dispatch_processor(file_path) + self.assertTrue(processor is None, 'Checking: "%s"' % file_path) + + def assert_processor(self, file_path, expected_class): + """Assert the type of the dispatched processor.""" + processor = self.dispatch_processor(file_path) + got_class = processor.__class__ + self.assertEquals(got_class, expected_class, + 'For path "%(file_path)s" got %(got_class)s when ' + "expecting %(expected_class)s." + % {"file_path": file_path, + "got_class": got_class, + "expected_class": expected_class}) + + def assert_processor_cpp(self, file_path): + """Assert that the dispatched processor is a CppProcessor.""" + self.assert_processor(file_path, CppProcessor) + + def assert_processor_text(self, file_path): + """Assert that the dispatched processor is a TextProcessor.""" + self.assert_processor(file_path, TextProcessor) + + def test_cpp_paths(self): + """Test paths that should be checked as C++.""" + paths = [ + "-", + "foo.c", + "foo.cpp", + "foo.h", + ] + + for path in paths: + self.assert_processor_cpp(path) + + # Check processor attributes on a typical input. + file_base = "foo" + file_extension = "c" + file_path = file_base + "." + file_extension + self.assert_processor_cpp(file_path) + processor = self.dispatch_processor(file_path) + self.assertEquals(processor.file_extension, file_extension) + self.assertEquals(processor.file_path, file_path) + self.assertEquals(processor.handle_style_error, self.mock_handle_style_error) + self.assertEquals(processor.verbosity, 3) + # Check "-" for good measure. + file_base = "-" + file_extension = "" + file_path = file_base + self.assert_processor_cpp(file_path) + processor = self.dispatch_processor(file_path) + self.assertEquals(processor.file_extension, file_extension) + self.assertEquals(processor.file_path, file_path) + + def test_text_paths(self): + """Test paths that should be checked as text.""" + paths = [ + "ChangeLog", + "foo.css", + "foo.html", + "foo.idl", + "foo.js", + "foo.mm", + "foo.php", + "foo.pm", + "foo.py", + "foo.txt", + "FooChangeLog.bak", + "WebCore/ChangeLog", + "WebCore/inspector/front-end/inspector.js", + "WebKitTools/Scripts/check-webkit=style", + "WebKitTools/Scripts/modules/text_style.py", + ] + + for path in paths: + self.assert_processor_text(path) + + # Check processor attributes on a typical input. + file_base = "foo" + file_extension = "css" + file_path = file_base + "." + file_extension + self.assert_processor_text(file_path) + processor = self.dispatch_processor(file_path) + self.assertEquals(processor.file_path, file_path) + self.assertEquals(processor.handle_style_error, self.mock_handle_style_error) + + def test_none_paths(self): + """Test paths that have no file type..""" + paths = [ + "Makefile", + "foo.png", + "foo.exe", + ] + + for path in paths: + self.assert_processor_none(path) + + +class StyleCheckerTest(unittest.TestCase): + + """Test the StyleChecker class. + + Attributes: + error_messages: A string containing all of the warning messages + written to the mock_stderr_write method of + this class. + + """ + + def _mock_stderr_write(self, message): + pass + + def _style_checker(self, options): + return StyleChecker(options, self._mock_stderr_write) + + def test_init(self): + """Test __init__ constructor.""" + options = ProcessorOptions() + style_checker = self._style_checker(options) + + self.assertEquals(style_checker.error_count, 0) + self.assertEquals(style_checker.options, options) + + +class StyleCheckerCheckFileTest(unittest.TestCase): + + """Test the check_file() method of the StyleChecker class. + + The check_file() method calls its process_file parameter when + given a file that should not be skipped. + + The "got_*" attributes of this class are the parameters passed + to process_file by calls to check_file() made by this test + class. These attributes allow us to check the parameter values + passed internally to the process_file function. + + Attributes: + got_file_path: The file_path parameter passed by check_file() + to its process_file parameter. + got_handle_style_error: The handle_style_error parameter passed + by check_file() to its process_file + parameter. + got_processor: The processor parameter passed by check_file() to + its process_file parameter. + warning_messages: A string containing all of the warning messages + written to the mock_stderr_write method of + this class. + + """ + def setUp(self): + self.got_file_path = None + self.got_handle_style_error = None + self.got_processor = None + self.warning_messages = "" + + def mock_stderr_write(self, warning_message): + self.warning_messages += warning_message + + def mock_handle_style_error(self): + pass + + def mock_process_file(self, processor, file_path, handle_style_error): + """A mock _process_file(). + + See the documentation for this class for more information + on this function. + + """ + self.got_file_path = file_path + self.got_handle_style_error = handle_style_error + self.got_processor = processor + + def assert_attributes(self, + expected_file_path, + expected_handle_style_error, + expected_processor, + expected_warning_messages): + """Assert that the attributes of this class equal the given values.""" + self.assertEquals(self.got_file_path, expected_file_path) + self.assertEquals(self.got_handle_style_error, expected_handle_style_error) + self.assertEquals(self.got_processor, expected_processor) + self.assertEquals(self.warning_messages, expected_warning_messages) + + def call_check_file(self, file_path): + """Call the check_file() method of a test StyleChecker instance.""" + # Confirm that the attributes are reset. + self.assert_attributes(None, None, None, "") + + # Create a test StyleChecker instance. + # + # The verbosity attribute is the only ProcessorOptions + # attribute that needs to be checked in this test. + # This is because it is the only option is directly + # passed to the constructor of a style processor. + options = ProcessorOptions(verbosity=3) + + style_checker = StyleChecker(options, self.mock_stderr_write) + + style_checker.check_file(file_path, + self.mock_handle_style_error, + self.mock_process_file) + + def test_check_file_on_skip_without_warning(self): + """Test check_file() for a skipped-without-warning file.""" + + file_path = "LayoutTests/foo.txt" + + dispatcher = ProcessorDispatcher() + # Confirm that the input file is truly a skipped-without-warning file. + self.assertTrue(dispatcher.should_skip_without_warning(file_path)) + + # Check the outcome. + self.call_check_file(file_path) + self.assert_attributes(None, None, None, "") + + def test_check_file_on_skip_with_warning(self): + """Test check_file() for a skipped-with-warning file.""" + + file_path = "gtk2drawing.c" + + dispatcher = ProcessorDispatcher() + # Check that the input file is truly a skipped-with-warning file. + self.assertTrue(dispatcher.should_skip_with_warning(file_path)) + + # Check the outcome. + self.call_check_file(file_path) + self.assert_attributes(None, None, None, + 'Ignoring "gtk2drawing.c": this file is exempt from the style guide.\n') + + def test_check_file_on_non_skipped(self): + + # We use a C++ file since by using a CppProcessor, we can check + # that all of the possible information is getting passed to + # process_file (in particular, the verbosity). + file_base = "foo" + file_extension = "cpp" + file_path = file_base + "." + file_extension + + dispatcher = ProcessorDispatcher() + # Check that the input file is truly a C++ file. + self.assertEquals(dispatcher._file_type(file_path), style.FileType.CPP) + + # Check the outcome. + self.call_check_file(file_path) + + expected_processor = CppProcessor(file_path, file_extension, self.mock_handle_style_error, 3) + + self.assert_attributes(file_path, + self.mock_handle_style_error, + expected_processor, + "") + + +if __name__ == '__main__': + import sys + + unittest.main() + diff --git a/WebKitTools/Scripts/webkitpy/style/error_handlers.py b/WebKitTools/Scripts/webkitpy/style/error_handlers.py new file mode 100644 index 0000000..54b1d76 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/error_handlers.py @@ -0,0 +1,154 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Defines style error handler classes. + +A style error handler is a function to call when a style error is +found. Style error handlers can also have state. A class that represents +a style error handler should implement the following methods. + +Methods: + + __call__(self, line_number, category, confidence, message): + + Handle the occurrence of a style error. + + Check whether the error is reportable. If so, report the details. + + Args: + line_number: The integer line number of the line containing the error. + category: The name of the category of the error, for example + "whitespace/newline". + confidence: An integer between 1-5 that represents the level of + confidence in the error. The value 5 means that we are + certain of the problem, and the value 1 means that it + could be a legitimate construct. + message: The error message to report. + +""" + + +import sys + + +class DefaultStyleErrorHandler(object): + + """The default style error handler.""" + + def __init__(self, file_path, options, increment_error_count, + stderr_write=None): + """Create a default style error handler. + + Args: + file_path: The path to the file containing the error. This + is used for reporting to the user. + options: A ProcessorOptions instance. + increment_error_count: A function that takes no arguments and + increments the total count of reportable + errors. + stderr_write: A function that takes a string as a parameter + and that is called when a style error occurs. + Defaults to sys.stderr.write. This should be + used only for unit tests. + + """ + if stderr_write is None: + stderr_write = sys.stderr.write + + self._file_path = file_path + self._increment_error_count = increment_error_count + self._options = options + self._stderr_write = stderr_write + + def __call__(self, line_number, category, confidence, message): + """Handle the occurrence of a style error. + + See the docstring of this module for more information. + + """ + if not self._options.is_reportable(category, confidence): + return + + self._increment_error_count() + + if self._options.output_format == 'vs7': + format_string = "%s(%s): %s [%s] [%d]\n" + else: + format_string = "%s:%s: %s [%s] [%d]\n" + + self._stderr_write(format_string % (self._file_path, + line_number, + message, + category, + confidence)) + + +class PatchStyleErrorHandler(object): + + """The style error function for patch files.""" + + def __init__(self, diff, file_path, options, increment_error_count, + stderr_write): + """Create a patch style error handler for the given path. + + Args: + diff: A DiffFile instance. + Other arguments: see the DefaultStyleErrorHandler.__init__() + documentation for the other arguments. + + """ + self._diff = diff + self._default_error_handler = DefaultStyleErrorHandler(file_path, + options, + increment_error_count, + stderr_write) + + # The line numbers of the modified lines. This is set lazily. + self._line_numbers = set() + + def _get_line_numbers(self): + """Return the line numbers of the modified lines.""" + if not self._line_numbers: + for line in self._diff.lines: + # When deleted line is not set, it means that + # the line is newly added. + if not line[0]: + self._line_numbers.add(line[1]) + + return self._line_numbers + + def __call__(self, line_number, category, confidence, message): + """Handle the occurrence of a style error. + + This function does not report errors occurring in lines not + modified or added. + + Args: see the DefaultStyleErrorHandler.__call__() documentation. + + """ + if line_number not in self._get_line_numbers(): + # Then the error is not reportable. + return + + self._default_error_handler(line_number, category, confidence, + message) + diff --git a/WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py b/WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py new file mode 100644 index 0000000..6a91ff2 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/error_handlers_unittest.py @@ -0,0 +1,163 @@ +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY +# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit tests for error_handlers.py.""" + + +import unittest + +from .. style_references import parse_patch +from checker import ProcessorOptions +from error_handlers import DefaultStyleErrorHandler +from error_handlers import PatchStyleErrorHandler + + +class StyleErrorHandlerTestBase(unittest.TestCase): + + def setUp(self): + self._error_messages = "" + self._error_count = 0 + + def _mock_increment_error_count(self): + self._error_count += 1 + + def _mock_stderr_write(self, message): + self._error_messages += message + + +class DefaultStyleErrorHandlerTest(StyleErrorHandlerTestBase): + + """Tests DefaultStyleErrorHandler class.""" + + _category = "whitespace/tab" + + def _options(self, output_format): + return ProcessorOptions(verbosity=3, output_format=output_format) + + def _error_handler(self, options): + file_path = "foo.h" + return DefaultStyleErrorHandler(file_path, + options, + self._mock_increment_error_count, + self._mock_stderr_write) + + def _prepare_call(self, output_format="emacs"): + """Return options after initializing.""" + options = self._options(output_format) + + # Test that count is initialized to zero. + self.assertEquals(0, self._error_count) + self.assertEquals("", self._error_messages) + + return options + + def _call_error_handler(self, options, confidence): + """Handle an error with given confidence.""" + handle_error = self._error_handler(options) + + line_number = 100 + message = "message" + + handle_error(line_number, self._category, confidence, message) + + def test_call_non_reportable(self): + """Test __call__() method with a non-reportable error.""" + confidence = 1 + options = self._prepare_call() + + # Confirm the error is not reportable. + self.assertFalse(options.is_reportable(self._category, confidence)) + + self._call_error_handler(options, confidence) + + self.assertEquals(0, self._error_count) + self.assertEquals("", self._error_messages) + + def test_call_reportable_emacs(self): + """Test __call__() method with a reportable error and emacs format.""" + confidence = 5 + options = self._prepare_call("emacs") + + self._call_error_handler(options, confidence) + + self.assertEquals(1, self._error_count) + self.assertEquals(self._error_messages, + "foo.h:100: message [whitespace/tab] [5]\n") + + def test_call_reportable_vs7(self): + """Test __call__() method with a reportable error and vs7 format.""" + confidence = 5 + options = self._prepare_call("vs7") + + self._call_error_handler(options, confidence) + + self.assertEquals(1, self._error_count) + self.assertEquals(self._error_messages, + "foo.h(100): message [whitespace/tab] [5]\n") + + +class PatchStyleErrorHandlerTest(StyleErrorHandlerTestBase): + + """Tests PatchStyleErrorHandler class.""" + + file_path = "__init__.py" + + patch_string = """diff --git a/__init__.py b/__init__.py +index ef65bee..e3db70e 100644 +--- a/__init__.py ++++ b/__init__.py +@@ -1 +1,2 @@ + # Required for Python to search this directory for module files ++# New line + +""" + + def test_call(self): + patch_files = parse_patch(self.patch_string) + diff = patch_files[self.file_path] + + options = ProcessorOptions(verbosity=3) + + handle_error = PatchStyleErrorHandler(diff, + self.file_path, + options, + self._mock_increment_error_count, + self._mock_stderr_write) + + category = "whitespace/tab" + confidence = 5 + message = "message" + + # Confirm error is reportable. + self.assertTrue(options.is_reportable(category, confidence)) + + # Confirm error count initialized to zero. + self.assertEquals(0, self._error_count) + + # Test error in unmodified line (error count does not increment). + handle_error(1, category, confidence, message) + self.assertEquals(0, self._error_count) + + # Test error in modified line (error count increments). + handle_error(2, category, confidence, message) + self.assertEquals(1, self._error_count) + diff --git a/WebKitTools/Scripts/webkitpy/style/processors/__init__.py b/WebKitTools/Scripts/webkitpy/style/processors/__init__.py new file mode 100644 index 0000000..ef65bee --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/processors/__init__.py @@ -0,0 +1 @@ +# Required for Python to search this directory for module files diff --git a/WebKitTools/Scripts/webkitpy/style/processors/cpp.py b/WebKitTools/Scripts/webkitpy/style/processors/cpp.py new file mode 100644 index 0000000..e1f41a4 --- /dev/null +++ b/WebKitTools/Scripts/webkitpy/style/processors/cpp.py @@ -0,0 +1,3007 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2009 Google Inc. All rights reserved. +# Copyright (C) 2009 Torch Mobile Inc. +# Copyright (C) 2009 Apple Inc. All rights reserved. +# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org) +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# This is the modified version of Google's cpplint. The original code is +# http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py + +"""Support for check-webkit-style.""" + +import codecs +import math # for log +import os +import os.path +import re +import sre_compile +import string +import sys +import unicodedata + + +# Headers that we consider STL headers. +_STL_HEADERS = frozenset([ + 'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception', + 'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set', + 'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'pair.h', + 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack', + 'stl_alloc.h', 'stl_relops.h', 'type_traits.h', + 'utility', 'vector', 'vector.h', + ]) + + +# Non-STL C++ system headers. +_CPP_HEADERS = frozenset([ + 'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype', + 'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath', + 'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef', + 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype', + 'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream', + 'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip', + 'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream.h', + 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h', + 'numeric', 'ostream.h', 'parsestream.h', 'pfstream.h', 'PlotFile.h', + 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 'ropeimpl.h', + 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept', + 'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string', + 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray', + ]) + + +# Assertion macros. These are defined in base/logging.h and +# testing/base/gunit.h. Note that the _M versions need to come first +# for substring matching to work. +_CHECK_MACROS = [ + 'DCHECK', 'CHECK', + 'EXPECT_TRUE_M', 'EXPECT_TRUE', + 'ASSERT_TRUE_M', 'ASSERT_TRUE', + 'EXPECT_FALSE_M', 'EXPECT_FALSE', + 'ASSERT_FALSE_M', 'ASSERT_FALSE', + ] + +# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE +_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) + +for op, replacement in [('==', 'EQ'), ('!=', 'NE'), + ('>=', 'GE'), ('>', 'GT'), + ('<=', 'LE'), ('<', 'LT')]: + _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement + _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement + +for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), + ('>=', 'LT'), ('>', 'LE'), + ('<=', 'GT'), ('<', 'GE')]: + _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement + _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement + + +# These constants define types of headers for use with +# _IncludeState.check_next_include_order(). +_CONFIG_HEADER = 0 +_PRIMARY_HEADER = 1 +_OTHER_HEADER = 2 +_MOC_HEADER = 3 + + +# The regexp compilation caching is inlined in all regexp functions for +# performance reasons; factoring it out into a separate function turns out +# to be noticeably expensive. +_regexp_compile_cache = {} + + +def match(pattern, s): + """Matches the string with the pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].match(s) + + +def search(pattern, s): + """Searches the string for the pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].search(s) + + +def sub(pattern, replacement, s): + """Substitutes occurrences of a pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].sub(replacement, s) + + +def subn(pattern, replacement, s): + """Substitutes occurrences of a pattern, caching the compiled regexp.""" + if not pattern in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].subn(replacement, s) + + +def up_to_unmatched_closing_paren(s): + """Splits a string into two parts up to first unmatched ')'. + + Args: + s: a string which is a substring of line after '(' + (e.g., "a == (b + c))"). + + Returns: + A pair of strings (prefix before first unmatched ')', + reminder of s after first unmatched ')'), e.g., + up_to_unmatched_closing_paren("a == (b + c)) { ") + returns "a == (b + c)", " {". + Returns None, None if there is no unmatched ')' + + """ + i = 1 + for pos, c in enumerate(s): + if c == '(': + i += 1 + elif c == ')': + i -= 1 + if i == 0: + return s[:pos], s[pos + 1:] + return None, None + +class _IncludeState(dict): + """Tracks line numbers for includes, and the order in which includes appear. + + As a dict, an _IncludeState object serves as a mapping between include + filename and line number on which that file was included. + + Call check_next_include_order() once for each header in the file, passing + in the type constants defined above. Calls in an illegal order will + raise an _IncludeError with an appropriate error message. + + """ + # self._section will move monotonically through this set. If it ever + # needs to move backwards, check_next_include_order will raise an error. + _INITIAL_SECTION = 0 + _CONFIG_SECTION = 1 + _PRIMARY_SECTION = 2 + _OTHER_SECTION = 3 + + _TYPE_NAMES = { + _CONFIG_HEADER: 'WebCore config.h', + _PRIMARY_HEADER: 'header this file implements', + _OTHER_HEADER: 'other header', + _MOC_HEADER: 'moc file', + } + _SECTION_NAMES = { + _INITIAL_SECTION: "... nothing.", + _CONFIG_SECTION: "WebCore config.h.", + _PRIMARY_SECTION: 'a header this file implements.', + _OTHER_SECTION: 'other header.', + } + + def __init__(self): + dict.__init__(self) + self._section = self._INITIAL_SECTION + self._visited_primary_section = False + self.header_types = dict(); + + def visited_primary_section(self): + return self._visited_primary_section + + def check_next_include_order(self, header_type, file_is_header): + """Returns a non-empty error message if the next header is out of order. + + This function also updates the internal state to be ready to check + the next include. + + Args: + header_type: One of the _XXX_HEADER constants defined above. + file_is_header: Whether the file that owns this _IncludeState is itself a header + + Returns: + The empty string if the header is in the right order, or an + error message describing what's wrong. + + """ + if header_type == _CONFIG_HEADER and file_is_header: + return 'Header file should not contain WebCore config.h.' + if header_type == _PRIMARY_HEADER and file_is_header: + return 'Header file should not contain itself.' + if header_type == _MOC_HEADER: + return '' + + error_message = '' + if self._section != self._OTHER_SECTION: + before_error_message = ('Found %s before %s' % + (self._TYPE_NAMES[header_type], + self._SECTION_NAMES[self._section + 1])) + after_error_message = ('Found %s after %s' % + (self._TYPE_NAMES[header_type], + self._SECTION_NAMES[self._section])) + + if header_type == _CONFIG_HEADER: + if self._section >= self._CONFIG_SECTION: + error_message = after_error_message + self._section = self._CONFIG_SECTION + elif header_type == _PRIMARY_HEADER: + if self._section >= self._PRIMARY_SECTION: + error_message = after_error_message + elif self._section < self._CONFIG_SECTION: + error_message = before_error_message + self._section = self._PRIMARY_SECTION + self._visited_primary_section = True + else: + assert header_type == _OTHER_HEADER + if not file_is_header and self._section < self._PRIMARY_SECTION: + error_message = before_error_message + self._section = self._OTHER_SECTION + + return error_message + + +class _FunctionState(object): + """Tracks current function name and the number of lines in its body. + + Attributes: + verbosity: The verbosity level to use while checking style. + + """ + + _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. + _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. + + def __init__(self, verbosity): + self.verbosity = verbosity + self.in_a_function = False + self.lines_in_function = 0 + self.current_function = '' + + def begin(self, function_name): + """Start analyzing function body. + + Args: + function_name: The name of the function being tracked. + """ + self.in_a_function = True + self.lines_in_function = 0 + self.current_function = function_name + + def count(self): + """Count line in current function body.""" + if self.in_a_function: + self.lines_in_function += 1 + + def check(self, error, line_number): + """Report if too many lines in function body. + + Args: + error: The function to call with any errors found. + line_number: The number of the line to check. + """ + if match(r'T(EST|est)', self.current_function): + base_trigger = self._TEST_TRIGGER + else: + base_trigger = self._NORMAL_TRIGGER + trigger = base_trigger * 2 ** self.verbosity + + if self.lines_in_function > trigger: + error_level = int(math.log(self.lines_in_function / base_trigger, 2)) + # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... + if error_level > 5: + error_level = 5 + error(line_number, 'readability/fn_size', error_level, + 'Small and focused functions are preferred:' + ' %s has %d non-comment lines' + ' (error triggered by exceeding %d lines).' % ( + self.current_function, self.lines_in_function, trigger)) + + def end(self): + """Stop analizing function body.""" + self.in_a_function = False + + +class _IncludeError(Exception): + """Indicates a problem with the include order in a file.""" + pass + + +def is_c_or_objective_c(file_extension): + """Return whether the file extension corresponds to C or Objective-C. + + Args: + file_extension: The file extension without the leading dot. + + """ + return file_extension in ['c', 'm'] + + +class FileInfo: + """Provides utility functions for filenames. + + FileInfo provides easy access to the components of a file's path + relative to the project root. + """ + + def __init__(self, filename): + self._filename = filename + + def full_name(self): + """Make Windows paths like Unix.""" + return os.path.abspath(self._filename).replace('\\', '/') + + def repository_name(self): + """Full name after removing the local path to the repository. + + If we have a real absolute path name here we can try to do something smart: + detecting the root of the checkout and truncating /path/to/checkout from + the name so that we get header guards that don't include things like + "C:\Documents and Settings\..." or "/home/username/..." in them and thus + people on different computers who have checked the source out to different + locations won't see bogus errors. + """ + fullname = self.full_name() + + if os.path.exists(fullname): + project_dir = os.path.dirname(fullname) + + if os.path.exists(os.path.join(project_dir, ".svn")): + # If there's a .svn file in the current directory, we + # recursively look up the directory tree for the top + # of the SVN checkout + root_dir = project_dir + one_up_dir = os.path.dirname(root_dir) + while os.path.exists(os.path.join(one_up_dir, ".svn")): + root_dir = os.path.dirname(root_dir) + one_up_dir = os.path.dirname(one_up_dir) + + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Not SVN? Try to find a git top level directory by + # searching up from the current path. + root_dir = os.path.dirname(fullname) + while (root_dir != os.path.dirname(root_dir) + and not os.path.exists(os.path.join(root_dir, ".git"))): + root_dir = os.path.dirname(root_dir) + if os.path.exists(os.path.join(root_dir, ".git")): + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Don't know what to do; header guard warnings may be wrong... + return fullname + + def split(self): + """Splits the file into the directory, basename, and extension. + + For 'chrome/browser/browser.cpp', Split() would + return ('chrome/browser', 'browser', '.cpp') + + Returns: + A tuple of (directory, basename, extension). + """ + + googlename = self.repository_name() + project, rest = os.path.split(googlename) + return (project,) + os.path.splitext(rest) + + def base_name(self): + """File base name - text after the final slash, before the final period.""" + return self.split()[1] + + def extension(self): + """File extension - text following the final period.""" + return self.split()[2] + + def no_extension(self): + """File has no source file extension.""" + return '/'.join(self.split()[0:2]) + + def is_source(self): + """File has a source file extension.""" + return self.extension()[1:] in ('c', 'cc', 'cpp', 'cxx') + + +# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard. +_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( + r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') +# Matches strings. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') +# Matches characters. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") +# Matches multi-line C++ comments. +# This RE is a little bit more complicated than one might expect, because we +# have to take care of space removals tools so we can handle comments inside +# statements better. +# The current rule is: We only clear spaces from both sides when we're at the +# end of the line. Otherwise, we try to remove spaces from the right side, +# if this doesn't work we try on left side but only if there's a non-character +# on the right. +_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( + r"""(\s*/\*.*\*/\s*$| + /\*.*\*/\s+| + \s+/\*.*\*/(?=\W)| + /\*.*\*/)""", re.VERBOSE) + + +def is_cpp_string(line): + """Does line terminate so, that the next symbol is in string constant. + + This function does not consider single-line nor multi-line comments. + + Args: + line: is a partial line of code starting from the 0..n. + + Returns: + True, if next character appended to 'line' is inside a + string constant. + """ + + line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" + return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 + + +def find_next_multi_line_comment_start(lines, line_index): + """Find the beginning marker for a multiline comment.""" + while line_index < len(lines): + if lines[line_index].strip().startswith('/*'): + # Only return this marker if the comment goes beyond this line + if lines[line_index].strip().find('*/', 2) < 0: + return line_index + line_index += 1 + return len(lines) + + +def find_next_multi_line_comment_end(lines, line_index): + """We are inside a comment, find the end marker.""" + while line_index < len(lines): + if lines[line_index].strip().endswith('*/'): + return line_index + line_index += 1 + return len(lines) + + +def remove_multi_line_comments_from_range(lines, begin, end): + """Clears a range of lines for multi-line comments.""" + # Having // dummy comments makes the lines non-empty, so we will not get + # unnecessary blank line warnings later in the code. + for i in range(begin, end): + lines[i] = '// dummy' + + +def remove_multi_line_comments(lines, error): + """Removes multiline (c-style) comments from lines.""" + line_index = 0 + while line_index < len(lines): + line_index_begin = find_next_multi_line_comment_start(lines, line_index) + if line_index_begin >= len(lines): + return + line_index_end = find_next_multi_line_comment_end(lines, line_index_begin) + if line_index_end >= len(lines): + error(line_index_begin + 1, 'readability/multiline_comment', 5, + 'Could not find end of multi-line comment') + return + remove_multi_line_comments_from_range(lines, line_index_begin, line_index_end + 1) + line_index = line_index_end + 1 + + +def cleanse_comments(line): + """Removes //-comments and single-line C-style /* */ comments. + + Args: + line: A line of C++ source. + + Returns: + The line with single-line comments removed. + """ + comment_position = line.find('//') + if comment_position != -1 and not is_cpp_string(line[:comment_position]): + line = line[:comment_position] + # get rid of /* ... */ + return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) + + +class CleansedLines(object): + """Holds 3 copies of all lines with different preprocessing applied to them. + + 1) elided member contains lines without strings and comments, + 2) lines member contains lines without comments, and + 3) raw member contains all the lines without processing. + All these three members are of , and of the same length. + """ + + def __init__(self, lines): + self.elided = [] + self.lines = [] + self.raw_lines = lines + self._num_lines = len(lines) + for line_number in range(len(lines)): + self.lines.append(cleanse_comments(lines[line_number])) + elided = self.collapse_strings(lines[line_number]) + self.elided.append(cleanse_comments(elided)) + + def num_lines(self): + """Returns the number of lines represented.""" + return self._num_lines + + @staticmethod + def collapse_strings(elided): + """Collapses strings and chars on a line to simple "" or '' blocks. + + We nix strings first so we're not fooled by text like '"http://"' + + Args: + elided: The line being processed. + + Returns: + The line with collapsed strings. + """ + if not _RE_PATTERN_INCLUDE.match(elided): + # Remove escaped characters first to make quote/single quote collapsing + # basic. Things that look like escaped characters shouldn't occur + # outside of strings and chars. + elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) + elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) + elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) + return elided + + +def close_expression(clean_lines, line_number, pos): + """If input points to ( or { or [, finds the position that closes it. + + If lines[line_number][pos] points to a '(' or '{' or '[', finds the the + line_number/pos that correspond to the closing of the expression. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + pos: A position on the line. + + Returns: + A tuple (line, line_number, pos) pointer *past* the closing brace, or + (line, len(lines), -1) if we never find a close. Note we ignore + strings and comments when matching; and the line we return is the + 'cleansed' line at line_number. + """ + + line = clean_lines.elided[line_number] + start_character = line[pos] + if start_character not in '({[': + return (line, clean_lines.num_lines(), -1) + if start_character == '(': + end_character = ')' + if start_character == '[': + end_character = ']' + if start_character == '{': + end_character = '}' + + num_open = line.count(start_character) - line.count(end_character) + while line_number < clean_lines.num_lines() and num_open > 0: + line_number += 1 + line = clean_lines.elided[line_number] + num_open += line.count(start_character) - line.count(end_character) + # OK, now find the end_character that actually got us back to even + endpos = len(line) + while num_open >= 0: + endpos = line.rfind(')', 0, endpos) + num_open -= 1 # chopped off another ) + return (line, line_number, endpos + 1) + + +def check_for_copyright(lines, error): + """Logs an error if no Copyright message appears at the top of the file.""" + + # We'll say it should occur by line 10. Don't forget there's a + # dummy line at the front. + for line in xrange(1, min(len(lines), 11)): + if re.search(r'Copyright', lines[line], re.I): + break + else: # means no copyright line was found + error(0, 'legal/copyright', 5, + 'No copyright message found. ' + 'You should have a line: "Copyright [year] "') + + +def get_header_guard_cpp_variable(filename): + """Returns the CPP variable that should be used as a header guard. + + Args: + filename: The name of a C++ header file. + + Returns: + The CPP variable that should be used as a header guard in the + named file. + + """ + + return sub(r'[-.\s]', '_', os.path.basename(filename)) + + +def check_for_header_guard(filename, lines, error): + """Checks that the file contains a header guard. + + Logs an error if no #ifndef header guard is present. For other + headers, checks that the full pathname is used. + + Args: + filename: The name of the C++ header file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + cppvar = get_header_guard_cpp_variable(filename) + + ifndef = None + ifndef_line_number = 0 + define = None + for line_number, line in enumerate(lines): + line_split = line.split() + if len(line_split) >= 2: + # find the first occurrence of #ifndef and #define, save arg + if not ifndef and line_split[0] == '#ifndef': + # set ifndef to the header guard presented on the #ifndef line. + ifndef = line_split[1] + ifndef_line_number = line_number + if not define and line_split[0] == '#define': + define = line_split[1] + if define and ifndef: + break + + if not ifndef or not define or ifndef != define: + error(0, 'build/header_guard', 5, + 'No #ifndef header guard found, suggested CPP variable is: %s' % + cppvar) + return + + # The guard should be File_h. + if ifndef != cppvar: + error(ifndef_line_number, 'build/header_guard', 5, + '#ifndef header guard has wrong style, please use: %s' % cppvar) + + +def check_for_unicode_replacement_characters(lines, error): + """Logs an error for each line containing Unicode replacement characters. + + These indicate that either the file contained invalid UTF-8 (likely) + or Unicode replacement characters (which it shouldn't). Note that + it's possible for this to throw off line numbering if the invalid + UTF-8 occurred adjacent to a newline. + + Args: + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + for line_number, line in enumerate(lines): + if u'\ufffd' in line: + error(line_number, 'readability/utf8', 5, + 'Line contains invalid UTF-8 (or Unicode replacement character).') + + +def check_for_new_line_at_eof(lines, error): + """Logs an error if there is no newline char at the end of the file. + + Args: + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + # The array lines() was created by adding two newlines to the + # original file (go figure), then splitting on \n. + # To verify that the file ends in \n, we just have to make sure the + # last-but-two element of lines() exists and is empty. + if len(lines) < 3 or lines[-2]: + error(len(lines) - 2, 'whitespace/ending_newline', 5, + 'Could not find a newline character at the end of the file.') + + +def check_for_multiline_comments_and_strings(clean_lines, line_number, error): + """Logs an error if we see /* ... */ or "..." that extend past one line. + + /* ... */ comments are legit inside macros, for one line. + Otherwise, we prefer // comments, so it's ok to warn about the + other. Likewise, it's ok for strings to extend across multiple + lines, as long as a line continuation character (backslash) + terminates each line. Although not currently prohibited by the C++ + style guide, it's ugly and unnecessary. We don't do well with either + in this lint program, so we warn about both. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[line_number] + + # Remove all \\ (escaped backslashes) from the line. They are OK, and the + # second (escaped) slash may trigger later \" detection erroneously. + line = line.replace('\\\\', '') + + if line.count('/*') > line.count('*/'): + error(line_number, 'readability/multiline_comment', 5, + 'Complex multi-line /*...*/-style comment found. ' + 'Lint may give bogus warnings. ' + 'Consider replacing these with //-style comments, ' + 'with #if 0...#endif, ' + 'or with more clearly structured multi-line comments.') + + if (line.count('"') - line.count('\\"')) % 2: + error(line_number, 'readability/multiline_string', 5, + 'Multi-line string ("...") found. This lint script doesn\'t ' + 'do well with such strings, and may give bogus warnings. They\'re ' + 'ugly and unnecessary, and you should use concatenation instead".') + + +_THREADING_LIST = ( + ('asctime(', 'asctime_r('), + ('ctime(', 'ctime_r('), + ('getgrgid(', 'getgrgid_r('), + ('getgrnam(', 'getgrnam_r('), + ('getlogin(', 'getlogin_r('), + ('getpwnam(', 'getpwnam_r('), + ('getpwuid(', 'getpwuid_r('), + ('gmtime(', 'gmtime_r('), + ('localtime(', 'localtime_r('), + ('rand(', 'rand_r('), + ('readdir(', 'readdir_r('), + ('strtok(', 'strtok_r('), + ('ttyname(', 'ttyname_r('), + ) + + +def check_posix_threading(clean_lines, line_number, error): + """Checks for calls to thread-unsafe functions. + + Much code has been originally written without consideration of + multi-threading. Also, engineers are relying on their old experience; + they have learned posix before threading extensions were added. These + tests guide the engineers to use thread-safe functions (when using + posix directly). + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[line_number] + for single_thread_function, multithread_safe_function in _THREADING_LIST: + index = line.find(single_thread_function) + # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 + if index >= 0 and (index == 0 or (not line[index - 1].isalnum() + and line[index - 1] not in ('_', '.', '>'))): + error(line_number, 'runtime/threadsafe_fn', 2, + 'Consider using ' + multithread_safe_function + + '...) instead of ' + single_thread_function + + '...) for improved thread safety.') + + +# Matches invalid increment: *count++, which moves pointer instead of +# incrementing a value. +_RE_PATTERN_INVALID_INCREMENT = re.compile( + r'^\s*\*\w+(\+\+|--);') + + +def check_invalid_increment(clean_lines, line_number, error): + """Checks for invalid increment *count++. + + For example following function: + void increment_counter(int* count) { + *count++; + } + is invalid, because it effectively does count++, moving pointer, and should + be replaced with ++*count, (*count)++ or *count += 1. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[line_number] + if _RE_PATTERN_INVALID_INCREMENT.match(line): + error(line_number, 'runtime/invalid_increment', 5, + 'Changing pointer instead of value (or unused value of operator*).') + + +class _ClassInfo(object): + """Stores information about a class.""" + + def __init__(self, name, line_number): + self.name = name + self.line_number = line_number + self.seen_open_brace = False + self.is_derived = False + self.virtual_method_line_number = None + self.has_virtual_destructor = False + self.brace_depth = 0 + + +class _ClassState(object): + """Holds the current state of the parse relating to class declarations. + + It maintains a stack of _ClassInfos representing the parser's guess + as to the current nesting of class declarations. The innermost class + is at the top (back) of the stack. Typically, the stack will either + be empty or have exactly one entry. + """ + + def __init__(self): + self.classinfo_stack = [] + + def check_finished(self, error): + """Checks that all classes have been completely parsed. + + Call this when all lines in a file have been processed. + Args: + error: The function to call with any errors found. + """ + if self.classinfo_stack: + # Note: This test can result in false positives if #ifdef constructs + # get in the way of brace matching. See the testBuildClass test in + # cpp_style_unittest.py for an example of this. + error(self.classinfo_stack[0].line_number, 'build/class', 5, + 'Failed to find complete declaration of class %s' % + self.classinfo_stack[0].name) + + +class _FileState(object): + def __init__(self): + self._did_inside_namespace_indent_warning = False + + def set_did_inside_namespace_indent_warning(self): + self._did_inside_namespace_indent_warning = True + + def did_inside_namespace_indent_warning(self): + return self._did_inside_namespace_indent_warning + +def check_for_non_standard_constructs(clean_lines, line_number, + class_state, error): + """Logs an error if we see certain non-ANSI constructs ignored by gcc-2. + + Complain about several constructs which gcc-2 accepts, but which are + not standard C++. Warning about these in lint is one way to ease the + transition to new compilers. + - put storage class first (e.g. "static const" instead of "const static"). + - "%lld" instead of %qd" in printf-type functions. + - "%1$d" is non-standard in printf-type functions. + - "\%" is an undefined character escape sequence. + - text after #endif is not allowed. + - invalid inner-style forward declaration. + - >? and ?= and )\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', line): + error(line_number, 'build/deprecated', 3, + '>? and ,:]*>\s*)?(class|struct)\s+(\w+(::\w+)*)', line) + if class_decl_match: + classinfo_stack.append(_ClassInfo(class_decl_match.group(3), line_number)) + + # Everything else in this function uses the top of the stack if it's + # not empty. + if not classinfo_stack: + return + + classinfo = classinfo_stack[-1] + + # If the opening brace hasn't been seen look for it and also + # parent class declarations. + if not classinfo.seen_open_brace: + # If the line has a ';' in it, assume it's a forward declaration or + # a single-line class declaration, which we won't process. + if line.find(';') != -1: + classinfo_stack.pop() + return + classinfo.seen_open_brace = (line.find('{') != -1) + # Look for a bare ':' + if search('(^|[^:]):($|[^:])', line): + classinfo.is_derived = True + if not classinfo.seen_open_brace: + return # Everything else in this function is for after open brace + + # The class may have been declared with namespace or classname qualifiers. + # The constructor and destructor will not have those qualifiers. + base_classname = classinfo.name.split('::')[-1] + + # Look for single-argument constructors that aren't marked explicit. + # Technically a valid construct, but against style. + args = match(r'(?= 0 + and match(r' {6}\w', elided[search_position])): + search_position -= 1 + exception = (search_position >= 0 + and elided[search_position][:5] == ' :') + else: + # Search for the function arguments or an initializer list. We use a + # simple heuristic here: If the line is indented 4 spaces; and we have a + # closing paren, without the opening paren, followed by an opening brace + # or colon (for initializer lists) we assume that it is the last line of + # a function header. If we have a colon indented 4 spaces, it is an + # initializer list. + exception = (match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', + previous_line) + or match(r' {4}:', previous_line)) + + if not exception: + error(line_number, 'whitespace/blank_line', 2, + 'Blank line at the start of a code block. Is this needed?') + # This doesn't ignore whitespace at the end of a namespace block + # because that is too hard without pairing open/close braces; + # however, a special exception is made for namespace closing + # brackets which have a comment containing "namespace". + # + # Also, ignore blank lines at the end of a block in a long if-else + # chain, like this: + # if (condition1) { + # // Something followed by a blank line + # + # } else if (condition2) { + # // Something else + # } + if line_number + 1 < clean_lines.num_lines(): + next_line = raw[line_number + 1] + if (next_line + and match(r'\s*}', next_line) + and next_line.find('namespace') == -1 + and next_line.find('} else ') == -1): + error(line_number, 'whitespace/blank_line', 3, + 'Blank line at the end of a code block. Is this needed?') + + # Next, we complain if there's a comment too near the text + comment_position = line.find('//') + if comment_position != -1: + # Check if the // may be in quotes. If so, ignore it + # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 + if (line.count('"', 0, comment_position) - line.count('\\"', 0, comment_position)) % 2 == 0: # not in quotes + # Allow one space before end of line comment. + if (not match(r'^\s*$', line[:comment_position]) + and (comment_position >= 1 + and ((line[comment_position - 1] not in string.whitespace) + or (comment_position >= 2 + and line[comment_position - 2] in string.whitespace)))): + error(line_number, 'whitespace/comments', 5, + 'One space before end of line comments') + # There should always be a space between the // and the comment + commentend = comment_position + 2 + if commentend < len(line) and not line[commentend] == ' ': + # but some lines are exceptions -- e.g. if they're big + # comment delimiters like: + # //---------------------------------------------------------- + # or they begin with multiple slashes followed by a space: + # //////// Header comment + matched = (search(r'[=/-]{4,}\s*$', line[commentend:]) + or search(r'^/+ ', line[commentend:])) + if not matched: + error(line_number, 'whitespace/comments', 4, + 'Should have a space between // and comment') + + line = clean_lines.elided[line_number] # get rid of comments and strings + + # Don't try to do spacing checks for operator methods + line = sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line) + # Don't try to do spacing checks for #include or #import statements at + # minimum because it messes up checks for spacing around / + if match(r'\s*#\s*(?:include|import)', line): + return + if search(r'[\w.]=[\w.]', line): + error(line_number, 'whitespace/operators', 4, + 'Missing spaces around =') + + # FIXME: It's not ok to have spaces around binary operators like . + + # You should always have whitespace around binary operators. + # Alas, we can't test < or > because they're legitimately used sans spaces + # (a->b, vector a). The only time we can tell is a < with no >, and + # only if it's not template params list spilling into the next line. + matched = search(r'[^<>=!\s](==|!=|\+=|-=|\*=|/=|/|\|=|&=|<<=|>>=|<=|>=|\|\||\||&&|>>|<<)[^<>=!\s]', line) + if not matched: + # Note that while it seems that the '<[^<]*' term in the following + # regexp could be simplified to '<.*', which would indeed match + # the same class of strings, the [^<] means that searching for the + # regexp takes linear rather than quadratic time. + if not search(r'<[^<]*,\s*$', line): # template params spill + matched = search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line) + if matched: + error(line_number, 'whitespace/operators', 3, + 'Missing spaces around %s' % matched.group(1)) + + # There shouldn't be space around unary operators + matched = search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) + if matched: + error(line_number, 'whitespace/operators', 4, + 'Extra space for operator %s' % matched.group(1)) + + # A pet peeve of mine: no spaces after an if, while, switch, or for + matched = search(r' (if\(|for\(|foreach\(|while\(|switch\()', line) + if matched: + error(line_number, 'whitespace/parens', 5, + 'Missing space before ( in %s' % matched.group(1)) + + # For if/for/foreach/while/switch, the left and right parens should be + # consistent about how many spaces are inside the parens, and + # there should either be zero or one spaces inside the parens. + # We don't want: "if ( foo)" or "if ( foo )". + # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. + matched = search(r'\b(?Pif|for|foreach|while|switch)\s*\((?P.*)$', line) + if matched: + statement = matched.group('statement') + condition, rest = up_to_unmatched_closing_paren(matched.group('reminder')) + if condition is not None: + condition_match = search(r'(?P[ ]*)(?P.).*[^ ]+(?P[ ]*)', condition) + if condition_match: + n_leading = len(condition_match.group('leading')) + n_trailing = len(condition_match.group('trailing')) + if n_leading != n_trailing: + for_exception = statement == 'for' and ( + (condition.startswith(' ;') and n_trailing == 0) or + (condition.endswith('; ') and n_leading == 0)) + if not for_exception: + error(line_number, 'whitespace/parens', 5, + 'Mismatching spaces inside () in %s' % statement) + if n_leading > 1: + error(line_number, 'whitespace/parens', 5, + 'Should have zero or one spaces inside ( and ) in %s' % + statement) + + # Do not check for more than one command in macros + in_macro = match(r'\s*#define', line) + if not in_macro and not match(r'((\s*{\s*}?)|(\s*;?))\s*\\?$', rest): + error(line_number, 'whitespace/parens', 4, + 'More than one command on the same line in %s' % statement) + + # You should always have a space after a comma (either as fn arg or operator) + if search(r',[^\s]', line): + error(line_number, 'whitespace/comma', 3, + 'Missing space after ,') + + if file_extension == 'cpp': + # C++ should have the & or * beside the type not the variable name. + matched = match(r'\s*\w+(?\*|\&)\w+', line) + if matched: + error(line_number, 'whitespace/declaration', 3, + 'Declaration has space between type name and %s in %s' % (matched.group('pointer_operator'), matched.group(0).strip())) + + elif file_extension == 'c': + # C Pointer declaration should have the * beside the variable not the type name. + matched = search(r'^\s*\w+\*\s+\w+', line) + if matched: + error(line_number, 'whitespace/declaration', 3, + 'Declaration has space between * and variable name in %s' % matched.group(0).strip()) + + # Next we will look for issues with function calls. + check_spacing_for_function_call(line, line_number, error) + + # Except after an opening paren, you should have spaces before your braces. + # And since you should never have braces at the beginning of a line, this is + # an easy test. + if search(r'[^ ({]{', line): + error(line_number, 'whitespace/braces', 5, + 'Missing space before {') + + # Make sure '} else {' has spaces. + if search(r'}else', line): + error(line_number, 'whitespace/braces', 5, + 'Missing space before else') + + # You shouldn't have spaces before your brackets, except maybe after + # 'delete []' or 'new char * []'. + if search(r'\w\s+\[', line) and not search(r'delete\s+\[', line): + error(line_number, 'whitespace/braces', 5, + 'Extra space before [') + + # You shouldn't have a space before a semicolon at the end of the line. + # There's a special case for "for" since the style guide allows space before + # the semicolon there. + if search(r':\s*;\s*$', line): + error(line_number, 'whitespace/semicolon', 5, + 'Semicolon defining empty statement. Use { } instead.') + elif search(r'^\s*;\s*$', line): + error(line_number, 'whitespace/semicolon', 5, + 'Line contains only semicolon. If this should be an empty statement, ' + 'use { } instead.') + elif (search(r'\s+;\s*$', line) and not search(r'\bfor\b', line)): + error(line_number, 'whitespace/semicolon', 5, + 'Extra space before last semicolon. If this should be an empty ' + 'statement, use { } instead.') + elif (search(r'\b(for|while)\s*\(.*\)\s*;\s*$', line) + and line.count('(') == line.count(')') + # Allow do {} while(); + and not search(r'}\s*while', line)): + error(line_number, 'whitespace/semicolon', 5, + 'Semicolon defining empty statement for this loop. Use { } instead.') + + +def get_previous_non_blank_line(clean_lines, line_number): + """Return the most recent non-blank line and its line number. + + Args: + clean_lines: A CleansedLines instance containing the file contents. + line_number: The number of the line to check. + + Returns: + A tuple with two elements. The first element is the contents of the last + non-blank line before the current line, or the empty string if this is the + first non-blank line. The second is the line number of that line, or -1 + if this is the first non-blank line. + """ + + previous_line_number = line_number - 1 + while previous_line_number >= 0: + previous_line = clean_lines.elided[previous_line_number] + if not is_blank_line(previous_line): # if not a blank line... + return (previous_line, previous_line_number) + previous_line_number -= 1 + return ('', -1) + + +def check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error): + """Looks for indentation errors inside of namespaces. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + file_extension: The extension (dot not included) of the file. + file_state: A _FileState instance which maintains information about + the state of things in the file. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + namespace_match = match(r'(?P\s*)namespace\s+\S+\s*{\s*$', line) + if not namespace_match: + return + + current_indentation_level = len(namespace_match.group('namespace_indentation')) + if current_indentation_level > 0: + # Don't warn about an indented namespace if we already warned about indented code. + if not file_state.did_inside_namespace_indent_warning(): + error(line_number, 'whitespace/indent', 4, + 'namespace should never be indented.') + return + looking_for_semicolon = False; + line_offset = 0 + in_preprocessor_directive = False; + for current_line in clean_lines.elided[line_number + 1:]: + line_offset += 1 + if not current_line.strip(): + continue + if not current_indentation_level: + if not (in_preprocessor_directive or looking_for_semicolon): + if not match(r'\S', current_line) and not file_state.did_inside_namespace_indent_warning(): + file_state.set_did_inside_namespace_indent_warning() + error(line_number + line_offset, 'whitespace/indent', 4, + 'Code inside a namespace should not be indented.') + if in_preprocessor_directive or (current_line.strip()[0] == '#'): # This takes care of preprocessor directive syntax. + in_preprocessor_directive = current_line[-1] == '\\' + else: + looking_for_semicolon = ((current_line.find(';') == -1) and (current_line.strip()[-1] != '}')) or (current_line[-1] == '\\') + else: + looking_for_semicolon = False; # If we have a brace we may not need a semicolon. + current_indentation_level += current_line.count('{') - current_line.count('}') + if current_indentation_level < 0: + break; + +def check_using_std(file_extension, clean_lines, line_number, error): + """Looks for 'using std::foo;' statements which should be replaced with 'using namespace std;'. + + Args: + file_extension: The extension of the current file, without the leading dot. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + # This check doesn't apply to C or Objective-C implementation files. + if is_c_or_objective_c(file_extension): + return + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + using_std_match = match(r'\s*using\s+std::(?P\S+)\s*;\s*$', line) + if not using_std_match: + return + + method_name = using_std_match.group('method_name') + error(line_number, 'build/using_std', 4, + "Use 'using namespace std;' instead of 'using std::%s;'." % method_name) + + +def check_max_min_macros(file_extension, clean_lines, line_number, error): + """Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min(). + + Args: + file_extension: The extension of the current file, without the leading dot. + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + # This check doesn't apply to C or Objective-C implementation files. + if is_c_or_objective_c(file_extension): + return + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + max_min_macros_search = search(r'\b(?P(MAX|MIN))\s*\(', line) + if not max_min_macros_search: + return + + max_min_macro = max_min_macros_search.group('max_min_macro') + max_min_macro_lower = max_min_macro.lower() + error(line_number, 'runtime/max_min_macros', 4, + 'Use std::%s() or std::%s() instead of the %s() macro.' + % (max_min_macro_lower, max_min_macro_lower, max_min_macro)) + + +def check_switch_indentation(clean_lines, line_number, error): + """Looks for indentation errors inside of switch statements. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + switch_match = match(r'(?P\s*)switch\s*\(.+\)\s*{\s*$', line) + if not switch_match: + return + + switch_indentation = switch_match.group('switch_indentation') + inner_indentation = switch_indentation + ' ' * 4 + line_offset = 0 + encountered_nested_switch = False + + for current_line in clean_lines.elided[line_number + 1:]: + line_offset += 1 + + # Skip not only empty lines but also those with preprocessor directives. + if current_line.strip() == '' or current_line.startswith('#'): + continue + + if match(r'\s*switch\s*\(.+\)\s*{\s*$', current_line): + # Complexity alarm - another switch statement nested inside the one + # that we're currently testing. We'll need to track the extent of + # that inner switch if the upcoming label tests are still supposed + # to work correctly. Let's not do that; instead, we'll finish + # checking this line, and then leave it like that. Assuming the + # indentation is done consistently (even if incorrectly), this will + # still catch all indentation issues in practice. + encountered_nested_switch = True + + current_indentation_match = match(r'(?P\s*)(?P.*)$', current_line); + current_indentation = current_indentation_match.group('indentation') + remaining_line = current_indentation_match.group('remaining_line') + + # End the check at the end of the switch statement. + if remaining_line.startswith('}') and current_indentation == switch_indentation: + break + # Case and default branches should not be indented. The regexp also + # catches single-line cases like "default: break;" but does not trigger + # on stuff like "Document::Foo();". + elif match(r'(default|case\s+.*)\s*:([^:].*)?$', remaining_line): + if current_indentation != switch_indentation: + error(line_number + line_offset, 'whitespace/indent', 4, + 'A case label should not be indented, but line up with its switch statement.') + # Don't throw an error for multiple badly indented labels, + # one should be enough to figure out the problem. + break + # We ignore goto labels at the very beginning of a line. + elif match(r'\w+\s*:\s*$', remaining_line): + continue + # It's not a goto label, so check if it's indented at least as far as + # the switch statement plus one more level of indentation. + elif not current_indentation.startswith(inner_indentation): + error(line_number + line_offset, 'whitespace/indent', 4, + 'Non-label code inside switch statements should be indented.') + # Don't throw an error for multiple badly indented statements, + # one should be enough to figure out the problem. + break + + if encountered_nested_switch: + break + + +def check_braces(clean_lines, line_number, error): + """Looks for misplaced braces (e.g. at the end of line). + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + if match(r'\s*{\s*$', line): + # We allow an open brace to start a line in the case where someone + # is using braces for function definition or in a block to + # explicitly create a new scope, which is commonly used to control + # the lifetime of stack-allocated variables. We don't detect this + # perfectly: we just don't complain if the last non-whitespace + # character on the previous non-blank line is ';', ':', '{', '}', + # ')', or ') const' and doesn't begin with 'if|for|while|switch|else'. + # We also allow '#' for #endif and '=' for array initialization. + previous_line = get_previous_non_blank_line(clean_lines, line_number)[0] + if ((not search(r'[;:}{)=]\s*$|\)\s*const\s*$', previous_line) + or search(r'\b(if|for|foreach|while|switch|else)\b', previous_line)) + and previous_line.find('#') < 0): + error(line_number, 'whitespace/braces', 4, + 'This { should be at the end of the previous line') + elif (search(r'\)\s*(const\s*)?{\s*$', line) + and line.count('(') == line.count(')') + and not search(r'\b(if|for|foreach|while|switch)\b', line) + and not match(r'\s+[A-Z_][A-Z_0-9]+\b', line)): + error(line_number, 'whitespace/braces', 4, + 'Place brace on its own line for function definitions.') + + if (match(r'\s*}\s*(else\s*({\s*)?)?$', line) and line_number > 1): + # We check if a closed brace has started a line to see if a + # one line control statement was previous. + previous_line = clean_lines.elided[line_number - 2] + if (previous_line.find('{') > 0 + and search(r'\b(if|for|foreach|while|else)\b', previous_line)): + error(line_number, 'whitespace/braces', 4, + 'One line control clauses should not use braces.') + + # An else clause should be on the same line as the preceding closing brace. + if match(r'\s*else\s*', line): + previous_line = get_previous_non_blank_line(clean_lines, line_number)[0] + if match(r'\s*}\s*$', previous_line): + error(line_number, 'whitespace/newline', 4, + 'An else should appear on the same line as the preceding }') + + # Likewise, an else should never have the else clause on the same line + if search(r'\belse [^\s{]', line) and not search(r'\belse if\b', line): + error(line_number, 'whitespace/newline', 4, + 'Else clause should never be on same line as else (use 2 lines)') + + # In the same way, a do/while should never be on one line + if match(r'\s*do [^\s{]', line): + error(line_number, 'whitespace/newline', 4, + 'do/while clauses should not be on a single line') + + # Braces shouldn't be followed by a ; unless they're defining a struct + # or initializing an array. + # We can't tell in general, but we can for some common cases. + previous_line_number = line_number + while True: + (previous_line, previous_line_number) = get_previous_non_blank_line(clean_lines, previous_line_number) + if match(r'\s+{.*}\s*;', line) and not previous_line.count(';'): + line = previous_line + line + else: + break + if (search(r'{.*}\s*;', line) + and line.count('{') == line.count('}') + and not search(r'struct|class|enum|\s*=\s*{', line)): + error(line_number, 'readability/braces', 4, + "You don't need a ; after a }") + + +def check_exit_statement_simplifications(clean_lines, line_number, error): + """Looks for else or else-if statements that should be written as an + if statement when the prior if concludes with a return, break, continue or + goto statement. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[line_number] # Get rid of comments and strings. + + else_match = match(r'(?P\s*)(\}\s*)?else(\s+if\s*\(|(?P\s*(\{\s*)?\Z))', line) + if not else_match: + return + + else_indentation = else_match.group('else_indentation') + inner_indentation = else_indentation + ' ' * 4 + + previous_lines = clean_lines.elided[:line_number] + previous_lines.reverse() + line_offset = 0 + encountered_exit_statement = False + + for current_line in previous_lines: + line_offset -= 1 + + # Skip not only empty lines but also those with preprocessor directives + # and goto labels. + if current_line.strip() == '' or current_line.startswith('#') or match(r'\w+\s*:\s*$', current_line): + continue + + # Skip lines with closing braces on the original indentation level. + # Even though the styleguide says they should be on the same line as + # the "else if" statement, we also want to check for instances where + # the current code does not comply with the coding style. Thus, ignore + # these lines and proceed to the line before that. + if current_line == else_indentation + '}': + continue + + current_indentation_match = match(r'(?P\s*)(?P.*)$', current_line); + current_indentation = current_indentation_match.group('indentation') + remaining_line = current_indentation_match.group('remaining_line') + + # As we're going up the lines, the first real statement to encounter + # has to be an exit statement (return, break, continue or goto) - + # otherwise, this check doesn't apply. + if not encountered_exit_statement: + # We only want to find exit statements if they are on exactly + # the same level of indentation as expected from the code inside + # the block. If the indentation doesn't strictly match then we + # might have a nested if or something, which must be ignored. + if current_indentation != inner_indentation: + break + if match(r'(return(\W+.*)|(break|continue)\s*;|goto\s*\w+;)$', remaining_line): + encountered_exit_statement = True + continue + break + + # When code execution reaches this point, we've found an exit statement + # as last statement of the previous block. Now we only need to make + # sure that the block belongs to an "if", then we can throw an error. + + # Skip lines with opening braces on the original indentation level, + # similar to the closing braces check above. ("if (condition)\n{") + if current_line == else_indentation + '{': + continue + + # Skip everything that's further indented than our "else" or "else if". + if current_indentation.startswith(else_indentation) and current_indentation != else_indentation: + continue + + # So we've got a line with same (or less) indentation. Is it an "if"? + # If yes: throw an error. If no: don't throw an error. + # Whatever the outcome, this is the end of our loop. + if match(r'if\s*\(', remaining_line): + if else_match.start('else') != -1: + error(line_number + line_offset, 'readability/control_flow', 4, + 'An else statement can be removed when the prior "if" ' + 'concludes with a return, break, continue or goto statement.') + else: + error(line_number + line_offset, 'readability/control_flow', 4, + 'An else if statement should be written as an if statement ' + 'when the prior "if" concludes with a return, break, ' + 'continue or goto statement.') + break + + +def replaceable_check(operator, macro, line): + """Determine whether a basic CHECK can be replaced with a more specific one. + + For example suggest using CHECK_EQ instead of CHECK(a == b) and + similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE. + + Args: + operator: The C++ operator used in the CHECK. + macro: The CHECK or EXPECT macro being called. + line: The current source line. + + Returns: + True if the CHECK can be replaced with a more specific one. + """ + + # This matches decimal and hex integers, strings, and chars (in that order). + match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')' + + # Expression to match two sides of the operator with something that + # looks like a literal, since CHECK(x == iterator) won't compile. + # This means we can't catch all the cases where a more specific + # CHECK is possible, but it's less annoying than dealing with + # extraneous warnings. + match_this = (r'\s*' + macro + r'\((\s*' + + match_constant + r'\s*' + operator + r'[^<>].*|' + r'.*[^<>]' + operator + r'\s*' + match_constant + + r'\s*\))') + + # Don't complain about CHECK(x == NULL) or similar because + # CHECK_EQ(x, NULL) won't compile (requires a cast). + # Also, don't complain about more complex boolean expressions + # involving && or || such as CHECK(a == b || c == d). + return match(match_this, line) and not search(r'NULL|&&|\|\|', line) + + +def check_check(clean_lines, line_number, error): + """Checks the use of CHECK and EXPECT macros. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + error: The function to call with any errors found. + """ + + # Decide the set of replacement macros that should be suggested + raw_lines = clean_lines.raw_lines + current_macro = '' + for macro in _CHECK_MACROS: + if raw_lines[line_number].find(macro) >= 0: + current_macro = macro + break + if not current_macro: + # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT' + return + + line = clean_lines.elided[line_number] # get rid of comments and strings + + # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc. + for operator in ['==', '!=', '>=', '>', '<=', '<']: + if replaceable_check(operator, current_macro, line): + error(line_number, 'readability/check', 2, + 'Consider using %s instead of %s(a %s b)' % ( + _CHECK_REPLACEMENT[current_macro][operator], + current_macro, operator)) + break + + +def check_for_comparisons_to_zero(clean_lines, line_number, error): + # Get the line without comments and strings. + line = clean_lines.elided[line_number] + + # Include NULL here so that users don't have to convert NULL to 0 first and then get this error. + if search(r'[=!]=\s*(NULL|0|true|false)\W', line) or search(r'\W(NULL|0|true|false)\s*[=!]=', line): + error(line_number, 'readability/comparison_to_zero', 5, + 'Tests for true/false, null/non-null, and zero/non-zero should all be done without equality comparisons.') + + +def check_for_null(file_extension, clean_lines, line_number, error): + # This check doesn't apply to C or Objective-C implementation files. + if is_c_or_objective_c(file_extension): + return + + line = clean_lines.elided[line_number] + + # Don't warn about NULL usage in g_object_{get,set}(). See Bug 32858 + if search(r'\bg_object_[sg]et\b', line): + return + + if search(r'\bNULL\b', line): + error(line_number, 'readability/null', 5, 'Use 0 instead of NULL.') + return + + line = clean_lines.raw_lines[line_number] + # See if NULL occurs in any comments in the line. If the search for NULL using the raw line + # matches, then do the check with strings collapsed to avoid giving errors for + # NULLs occurring in strings. + if search(r'\bNULL\b', line) and search(r'\bNULL\b', CleansedLines.collapse_strings(line)): + error(line_number, 'readability/null', 4, 'Use 0 instead of NULL.') + +def get_line_width(line): + """Determines the width of the line in column positions. + + Args: + line: A string, which may be a Unicode string. + + Returns: + The width of the line in column positions, accounting for Unicode + combining characters and wide characters. + """ + if isinstance(line, unicode): + width = 0 + for c in unicodedata.normalize('NFC', line): + if unicodedata.east_asian_width(c) in ('W', 'F'): + width += 2 + elif not unicodedata.combining(c): + width += 1 + return width + return len(line) + + +def check_style(clean_lines, line_number, file_extension, file_state, error): + """Checks rules from the 'C++ style rules' section of cppguide.html. + + Most of these rules are hard to test (naming, comment style), but we + do what we can. In particular we check for 4-space indents, line lengths, + tab usage, spaces inside code, etc. + + Args: + clean_lines: A CleansedLines instance containing the file. + line_number: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + file_state: A _FileState instance which maintains information about + the state of things in the file. + error: The function to call with any errors found. + """ + + raw_lines = clean_lines.raw_lines + line = raw_lines[line_number] + + if line.find('\t') != -1: + error(line_number, 'whitespace/tab', 1, + 'Tab found; better to use spaces') + + # One or three blank spaces at the beginning of the line is weird; it's + # hard to reconcile that with 4-space indents. + # NOTE: here are the conditions rob pike used for his tests. Mine aren't + # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces + # if(RLENGTH > 20) complain = 0; + # if(match($0, " +(error|private|public|protected):")) complain = 0; + # if(match(prev, "&& *$")) complain = 0; + # if(match(prev, "\\|\\| *$")) complain = 0; + # if(match(prev, "[\",=><] *$")) complain = 0; + # if(match($0, " <<")) complain = 0; + # if(match(prev, " +for \\(")) complain = 0; + # if(prevodd && match(prevprev, " +for \\(")) complain = 0; + initial_spaces = 0 + cleansed_line = clean_lines.elided[line_number] + while initial_spaces < len(line) and line[initial_spaces] == ' ': + initial_spaces += 1 + if line and line[-1].isspace(): + error(line_number, 'whitespace/end_of_line', 4, + 'Line ends in whitespace. Consider deleting these extra spaces.') + # There are certain situations we allow one space, notably for labels + elif ((initial_spaces >= 1 and initial_spaces <= 3) + and not match(r'\s*\w+\s*:\s*$', cleansed_line)): + error(line_number, 'whitespace/indent', 3, + 'Weird number of spaces at line-start. ' + 'Are you using a 4-space indent?') + # Labels should always be indented at least one space. + elif not initial_spaces and line[:2] != '//': + label_match = match(r'(?P