Plugins/CheckerPlugins/Pep8/pep8.py

changeset 2863
62171fa4a6a4
parent 2862
a1448560d7dc
child 2864
d973dab8b715
--- a/Plugins/CheckerPlugins/Pep8/pep8.py	Fri Aug 23 20:08:42 2013 +0200
+++ b/Plugins/CheckerPlugins/Pep8/pep8.py	Fri Aug 23 20:10:36 2013 +0200
@@ -712,8 +712,6 @@
             if start[1] not in indent_chances:
                 # allow to line up tokens
                 indent_chances[start[1]] = text
-##
-##        last_token_multiline = (start[0] != end[0])
 
     if indent_next and expand_indent(line) == indent_level + 4:
         yield last_indent, "E125"
@@ -1889,448 +1887,6 @@
         return sorted(checks)
 
 
-##class Checker(object):
-##    """
-##    Load a Python source file, tokenize it, check coding style.
-##    """
-##
-##    def __init__(self, filename, lines=None):
-##        self.filename = filename
-##        if filename is None:
-##            self.filename = 'stdin'
-##            self.lines = lines or []
-##        elif lines is None:
-##            self.lines = readlines(filename)
-##        else:
-##            self.lines = lines
-##        options.counters['physical lines'] += len(self.lines)
-##
-##    def readline(self):
-##        """
-##        Get the next line from the input buffer.
-##        """
-##        self.line_number += 1
-##        if self.line_number > len(self.lines):
-##            return ''
-##        return self.lines[self.line_number - 1]
-##
-##    def readline_check_physical(self):
-##        """
-##        Check and return the next physical line. This method can be
-##        used to feed tokenize.generate_tokens.
-##        """
-##        line = self.readline()
-##        if line:
-##            self.check_physical(line)
-##        return line
-##
-##    def run_check(self, check, argument_names):
-##        """
-##        Run a check plugin.
-##        """
-##        arguments = []
-##        for name in argument_names:
-##            arguments.append(getattr(self, name))
-##        return check(*arguments)
-##
-##    def check_physical(self, line):
-##        """
-##        Run all physical checks on a raw input line.
-##        """
-##        self.physical_line = line
-##        if self.indent_char is None and len(line) and line[0] in ' \t':
-##            self.indent_char = line[0]
-##        for name, check, argument_names in options.physical_checks:
-##            result = self.run_check(check, argument_names)
-##            if result is not None:
-##                offset, code, *args = result
-##                self.report_error_args(self.line_number, offset, code, check,
-##                    *args)
-##
-##    def build_tokens_line(self):
-##        """
-##        Build a logical line from tokens.
-##        """
-##        self.mapping = []
-##        logical = []
-##        length = 0
-##        previous = None
-##        for token in self.tokens:
-##            token_type, text = token[0:2]
-##            if token_type in SKIP_TOKENS:
-##                continue
-##            if token_type == tokenize.STRING:
-##                text = mute_string(text)
-##            if previous:
-##                end_line, end = previous[3]
-##                start_line, start = token[2]
-##                if end_line != start_line:  # different row
-##                    prev_text = self.lines[end_line - 1][end - 1]
-##                    if prev_text == ',' or (prev_text not in '{[('
-##                                            and text not in '}])'):
-##                        logical.append(' ')
-##                        length += 1
-##                elif end != start:  # different column
-##                    fill = self.lines[end_line - 1][end:start]
-##                    logical.append(fill)
-##                    length += len(fill)
-##            self.mapping.append((length, token))
-##            logical.append(text)
-##            length += len(text)
-##            previous = token
-##        self.logical_line = ''.join(logical)
-##        assert self.logical_line.lstrip() == self.logical_line
-##        assert self.logical_line.rstrip() == self.logical_line
-##
-##    def check_logical(self):
-##        """
-##        Build a line from tokens and run all logical checks on it.
-##        """
-##        options.counters['logical lines'] += 1
-##        self.build_tokens_line()
-##        first_line = self.lines[self.mapping[0][1][2][0] - 1]
-##        indent = first_line[:self.mapping[0][1][2][1]]
-##        self.previous_indent_level = self.indent_level
-##        self.indent_level = expand_indent(indent)
-##        if options.verbose >= 2:
-##            print(self.logical_line[:80].rstrip())
-##        for name, check, argument_names in options.logical_checks:
-##            if options.verbose >= 4:
-##                print('   ' + name)
-##            result = self.run_check(check, argument_names)
-##            if result is not None:
-##                offset, code, *args = result
-##                if isinstance(offset, tuple):
-##                    original_number, original_offset = offset
-##                else:
-##                    for token_offset, token in self.mapping:
-##                        if offset >= token_offset:
-##                            original_number = token[2][0]
-##                            original_offset = (token[2][1]
-##                                               + offset - token_offset)
-##                self.report_error_args(original_number, original_offset,
-##                                       code, check, *args)
-##        self.previous_logical = self.logical_line
-##
-##    def check_all(self, expected=None, line_offset=0):
-##        """
-##        Run all checks on the input file.
-##        """
-##        self.expected = expected or ()
-##        self.line_offset = line_offset
-##        self.line_number = 0
-##        self.file_errors = 0
-##        self.indent_char = None
-##        self.indent_level = 0
-##        self.previous_logical = ''
-##        self.blank_lines = 0
-##        self.blank_lines_before_comment = 0
-##        self.tokens = []
-##        parens = 0
-##        try:
-##            for token in tokenize.generate_tokens(self.readline_check_physical):
-##                if options.verbose >= 3:
-##                    if token[2][0] == token[3][0]:
-##                        pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
-##                    else:
-##                        pos = 'l.%s' % token[3][0]
-##                    print('l.%s\t%s\t%s\t%r' %
-##                        (token[2][0], pos, tokenize.tok_name[token[0]], token[1]))
-##                self.tokens.append(token)
-##                token_type, text = token[0:2]
-##                if token_type == tokenize.OP and text in '([{':
-##                    parens += 1
-##                if token_type == tokenize.OP and text in '}])':
-##                    parens -= 1
-##                if token_type == tokenize.NEWLINE and not parens:
-##                    self.check_logical()
-##                    self.blank_lines = 0
-##                    self.blank_lines_before_comment = 0
-##                    self.tokens = []
-##                if token_type == tokenize.NL and not parens:
-##                    if len(self.tokens) <= 1:
-##                        # The physical line contains only this token.
-##                        self.blank_lines += 1
-##                    self.tokens = []
-##                if token_type == tokenize.COMMENT:
-##                    source_line = token[4]
-##                    token_start = token[2][1]
-##                    if source_line[:token_start].strip() == '':
-##                        self.blank_lines_before_comment = max(self.blank_lines,
-##                            self.blank_lines_before_comment)
-##                        self.blank_lines = 0
-##                    if text.endswith('\n') and not parens:
-##                        # The comment also ends a physical line.  This works around
-##                        # Python < 2.6 behaviour, which does not generate NL after
-##                        # a comment which is on a line by itself.
-##                        self.tokens = []
-##        except tokenize.TokenError as err:
-##            msg, (lnum, pos) = err.args
-##            self.report_error_args(lnum, pos, "E901", "TokenError", msg)
-##        return self.file_errors
-##
-##    def report_error(self, line_number, offset, text, check):
-##        """
-##        Report an error, according to options.
-##        """
-##        code = text[:4]
-##        if ignore_code(code):
-##            return
-##        if options.quiet == 1 and not self.file_errors:
-##            message(self.filename)
-##        if code in options.counters:
-##            options.counters[code] += 1
-##        else:
-##            options.counters[code] = 1
-##            options.messages[code] = text[5:]
-##        if options.quiet or code in self.expected:
-##            # Don't care about expected errors or warnings
-##            return
-##        self.file_errors += 1
-##        if options.counters[code] == 1 or options.repeat:
-##            message("%s:%s:%d: %s" %
-##                    (self.filename, self.line_offset + line_number,
-##                     offset + 1, text))
-##            if options.show_source:
-##                line = self.lines[line_number - 1]
-##                message(line.rstrip())
-##                message(' ' * offset + '^')
-##            if options.show_pep8:
-##                message(check.__doc__.lstrip('\n').rstrip())
-##
-##
-##def input_file(filename):
-##    """
-##    Run all checks on a Python source file.
-##    """
-##    if options.verbose:
-##        message('checking ' + filename)
-##    Checker(filename).check_all()
-##
-##
-##def input_dir(dirname, runner=None):
-##    """
-##    Check all Python source files in this directory and all subdirectories.
-##    """
-##    dirname = dirname.rstrip('/')
-##    if excluded(dirname):
-##        return
-##    if runner is None:
-##        runner = input_file
-##    for root, dirs, files in os.walk(dirname):
-##        if options.verbose:
-##            message('directory ' + root)
-##        options.counters['directories'] += 1
-##        dirs.sort()
-##        for subdir in dirs:
-##            if excluded(subdir):
-##                dirs.remove(subdir)
-##        files.sort()
-##        for filename in files:
-##            if filename_match(filename) and not excluded(filename):
-##                options.counters['files'] += 1
-##                runner(os.path.join(root, filename))
-##
-##
-##def excluded(filename):
-##    """
-##    Check if options.exclude contains a pattern that matches filename.
-##    """
-##    basename = os.path.basename(filename)
-##    for pattern in options.exclude:
-##        if fnmatch(basename, pattern):
-##            # print basename, 'excluded because it matches', pattern
-##            return True
-##
-##
-##def filename_match(filename):
-##    """
-##    Check if options.filename contains a pattern that matches filename.
-##    If options.filename is unspecified, this always returns True.
-##    """
-##    if not options.filename:
-##        return True
-##    for pattern in options.filename:
-##        if fnmatch(filename, pattern):
-##            return True
-##
-##
-##def ignore_code(code):
-##    """
-##    Check if options.ignore contains a prefix of the error code.
-##    If options.select contains a prefix of the error code, do not ignore it.
-##    """
-##    for select in options.select:
-##        if code.startswith(select):
-##            return False
-##    for ignore in options.ignore:
-##        if code.startswith(ignore):
-##            return True
-##
-##
-##def reset_counters():
-##    for key in list(options.counters.keys()):
-##        if key not in BENCHMARK_KEYS:
-##            del options.counters[key]
-##    options.messages = {}
-##
-##
-##def get_error_statistics():
-##    """Get error statistics."""
-##    return get_statistics("E")
-##
-##
-##def get_warning_statistics():
-##    """Get warning statistics."""
-##    return get_statistics("W")
-##
-##
-##def get_statistics(prefix=''):
-##    """
-##    Get statistics for message codes that start with the prefix.
-##
-##    prefix='' matches all errors and warnings
-##    prefix='E' matches all errors
-##    prefix='W' matches all warnings
-##    prefix='E4' matches all errors that have to do with imports
-##    """
-##    stats = []
-##    keys = list(options.messages.keys())
-##    keys.sort()
-##    for key in keys:
-##        if key.startswith(prefix):
-##            stats.append('%-7s %s %s' %
-##                         (options.counters[key], key, options.messages[key]))
-##    return stats
-##
-##
-##def get_count(prefix=''):
-##    """Return the total count of errors and warnings."""
-##    keys = list(options.messages.keys())
-##    count = 0
-##    for key in keys:
-##        if key.startswith(prefix):
-##            count += options.counters[key]
-##    return count
-##
-##
-##def print_statistics(prefix=''):
-##    """Print overall statistics (number of errors and warnings)."""
-##    for line in get_statistics(prefix):
-##        print(line)
-##
-##
-##def print_benchmark(elapsed):
-##    """
-##    Print benchmark numbers.
-##    """
-##    print('%-7.2f %s' % (elapsed, 'seconds elapsed'))
-##    for key in BENCHMARK_KEYS:
-##        print('%-7d %s per second (%d total)' % (
-##            options.counters[key] / elapsed, key,
-##            options.counters[key]))
-##
-##
-##def run_tests(filename):
-##    """
-##    Run all the tests from a file.
-##
-##    A test file can provide many tests.  Each test starts with a declaration.
-##    This declaration is a single line starting with '#:'.
-##    It declares codes of expected failures, separated by spaces or 'Okay'
-##    if no failure is expected.
-##    If the file does not contain such declaration, it should pass all tests.
-##    If the declaration is empty, following lines are not checked, until next
-##    declaration.
-##
-##    Examples:
-##
-##     * Only E224 and W701 are expected:         #: E224 W701
-##     * Following example is conform:            #: Okay
-##     * Don't check these lines:                 #:
-##    """
-##    lines = readlines(filename) + ['#:\n']
-##    line_offset = 0
-##    codes = ['Okay']
-##    testcase = []
-##    for index, line in enumerate(lines):
-##        if not line.startswith('#:'):
-##            if codes:
-##                # Collect the lines of the test case
-##                testcase.append(line)
-##            continue
-##        if codes and index > 0:
-##            label = '%s:%s:1' % (filename, line_offset + 1)
-##            codes = [c for c in codes if c != 'Okay']
-##            # Run the checker
-##            errors = Checker(filename, testcase).check_all(codes, line_offset)
-##            # Check if the expected errors were found
-##            for code in codes:
-##                if not options.counters.get(code):
-##                    errors += 1
-##                    message('%s: error %s not found' % (label, code))
-##            if options.verbose and not errors:
-##                message('%s: passed (%s)' % (label, ' '.join(codes)))
-##            # Keep showing errors for multiple tests
-##            reset_counters()
-##        # output the real line numbers
-##        line_offset = index
-##        # configure the expected errors
-##        codes = line.split()[1:]
-##        # empty the test case buffer
-##        del testcase[:]
-##
-##
-##def selftest():
-##    """
-##    Test all check functions with test cases in docstrings.
-##    """
-##    count_passed = 0
-##    count_failed = 0
-##    checks = options.physical_checks + options.logical_checks
-##    for name, check, argument_names in checks:
-##        for line in check.__doc__.splitlines():
-##            line = line.lstrip()
-##            match = SELFTEST_REGEX.match(line)
-##            if match is None:
-##                continue
-##            code, source = match.groups()
-##            checker = Checker(None)
-##            for part in source.split(r'\n'):
-##                part = part.replace(r'\t', '\t')
-##                part = part.replace(r'\s', ' ')
-##                checker.lines.append(part + '\n')
-##            options.quiet = 2
-##            checker.check_all()
-##            error = None
-##            if code == 'Okay':
-##                if len(options.counters) > len(BENCHMARK_KEYS):
-##                    codes = [key for key in options.counters.keys()
-##                             if key not in BENCHMARK_KEYS]
-##                    error = "incorrectly found %s" % ', '.join(codes)
-##            elif not options.counters.get(code):
-##                error = "failed to find %s" % code
-##            # Reset the counters
-##            reset_counters()
-##            if not error:
-##                count_passed += 1
-##            else:
-##                count_failed += 1
-##                if len(checker.lines) == 1:
-##                    print("pep8.py: %s: %s" %
-##                          (error, checker.lines[0].rstrip()))
-##                else:
-##                    print("pep8.py: %s:" % error)
-##                    for line in checker.lines:
-##                        print(line.rstrip())
-##    if options.verbose:
-##        print("%d passed and %d failed." % (count_passed, count_failed))
-##        if count_failed:
-##            print("Test failed.")
-##        else:
-##            print("Test passed.")
-##
-##
 def get_parser(prog='pep8', version=__version__):
     parser = OptionParser(prog=prog, version=version,
                           usage="%prog [options] input ...")

eric ide

mercurial