Plugins/CheckerPlugins/Pep8/pep8.py

changeset 2862
a1448560d7dc
parent 2861
cdcbca0cea82
child 2863
62171fa4a6a4
equal deleted inserted replaced
2861:cdcbca0cea82 2862:a1448560d7dc
1 # -*- coding: utf-8 -*- 1 # -*- coding: utf-8 -*-
2 2
3 # 3 #
4 # pep8.py - Check Python source code formatting, according to PEP 8 4 # pep8.py - Check Python source code formatting, according to PEP 8
5 # Copyright (C) 2006 Johann C. Rocholl <johann@rocholl.net> 5 # Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
6 # Copyright (C) 2009-2013 Florent Xicluna <florent.xicluna@gmail.com>
6 # 7 #
7 # Permission is hereby granted, free of charge, to any person 8 # Permission is hereby granted, free of charge, to any person
8 # obtaining a copy of this software and associated documentation files 9 # obtaining a copy of this software and associated documentation files
9 # (the "Software"), to deal in the Software without restriction, 10 # (the "Software"), to deal in the Software without restriction,
10 # including without limitation the rights to use, copy, modify, merge, 11 # including without limitation the rights to use, copy, modify, merge,
42 300 blank lines 43 300 blank lines
43 400 imports 44 400 imports
44 500 line length 45 500 line length
45 600 deprecation 46 600 deprecation
46 700 statements 47 700 statements
47 900 processing errors 48 900 syntax error
48
49 You can add checks to this program by writing plugins. Each plugin is
50 a simple function that is called for each line of source code, either
51 physical or logical.
52
53 Physical line:
54 - Raw line of text from the input file.
55
56 Logical line:
57 - Multi-line statements converted to a single line.
58 - Stripped left and right.
59 - Contents of strings replaced with 'xxx' of same length.
60 - Comments removed.
61
62 The check function requests physical or logical lines by the name of
63 the first argument:
64
65 def maximum_line_length(physical_line)
66 def extraneous_whitespace(logical_line)
67 def blank_lines(logical_line, blank_lines, indent_level, line_number)
68
69 The last example above demonstrates how check plugins can request
70 additional information with extra arguments. All attributes of the
71 Checker object are available. Some examples:
72
73 lines: a list of the raw lines from the input file
74 tokens: the tokens that contribute to this logical line
75 line_number: line number in the input file
76 blank_lines: blank lines before this one
77 indent_char: first indentation character in this file (' ' or '\t')
78 indent_level: indentation (with tabs expanded to multiples of 8)
79 previous_indent_level: indentation on previous line
80 previous_logical: previous logical line
81
82 The docstring of each check function shall be the relevant part of
83 text from PEP 8. It is printed if the user enables --show-pep8.
84 Several docstrings contain examples directly from the PEP 8 document.
85
86 Okay: spam(ham[1], {eggs: 2})
87 E201: spam( ham[1], {eggs: 2})
88
89 These examples are verified automatically when pep8.py is run with the
90 --doctest option. You can add examples for your own check functions.
91 The format is simple: "Okay" or error/warning code followed by colon
92 and space, the rest of the line is example source code. If you put 'r'
93 before the docstring, you can use \n for newline, \t for tab and \s
94 for space.
95
96 """ 49 """
97 50
98 # 51 #
99 # This is a modified version to make the original pep8.py better suitable 52 # This is a modified version to make the original pep8.py better suitable
100 # for being called from within the eric5 IDE. The modifications are as 53 # for being called from within the eric5 IDE. The modifications are as
103 # - made messages translatable via Qt 56 # - made messages translatable via Qt
104 # 57 #
105 # Copyright (c) 2011 - 2013 Detlev Offenbach <detlev@die-offenbachs.de> 58 # Copyright (c) 2011 - 2013 Detlev Offenbach <detlev@die-offenbachs.de>
106 # 59 #
107 60
108 __version__ = '0.6.1' 61 __version__ = '1.4.6'
109 62
110 import os 63 import os
111 import sys 64 import sys
112 import re 65 import re
113 import time 66 import time
115 import keyword 68 import keyword
116 import tokenize 69 import tokenize
117 from optparse import OptionParser 70 from optparse import OptionParser
118 from fnmatch import fnmatch 71 from fnmatch import fnmatch
119 try: 72 try:
120 frozenset 73 from configparser import RawConfigParser
121 except NameError: 74 from io import TextIOWrapper
122 from sets import ImmutableSet as frozenset 75 except ImportError:
76 from ConfigParser import RawConfigParser # __IGNORE_WARNING__
123 77
124 from PyQt4.QtCore import QCoreApplication, QT_TRANSLATE_NOOP 78 from PyQt4.QtCore import QCoreApplication, QT_TRANSLATE_NOOP
125 79
126 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git' 80 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__'
127 DEFAULT_IGNORE = 'E24' 81 DEFAULT_IGNORE = 'E123,E226,E24'
82 if sys.platform == 'win32':
83 DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8')
84 else:
85 DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or
86 os.path.expanduser('~/.config'), 'pep8')
87 PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
88 TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
128 MAX_LINE_LENGTH = 79 89 MAX_LINE_LENGTH = 79
90 REPORT_FORMAT = {
91 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
92 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
93 }
94
95 PyCF_ONLY_AST = 1024
96 SINGLETONS = frozenset(['False', 'None', 'True'])
97 KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
98 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
99 ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
100 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
101 WS_NEEDED_OPERATORS = frozenset([
102 '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
103 '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
104 WHITESPACE = frozenset(' \t')
105 SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.NEWLINE,
106 tokenize.INDENT, tokenize.DEDENT])
107 BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
129 108
130 INDENT_REGEX = re.compile(r'([ \t]*)') 109 INDENT_REGEX = re.compile(r'([ \t]*)')
131 RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*(,)') 110 RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
132 SELFTEST_REGEX = re.compile(r'(Okay|[EW]\d{3}):\s(.*)') 111 RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,\s*\w+\s*,\s*\w+')
133 ERRORCODE_REGEX = re.compile(r'[EW]\d{3}') 112 ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
134 DOCSTRING_REGEX = re.compile(r'u?r?["\']') 113 DOCSTRING_REGEX = re.compile(r'u?r?["\']')
135 WHITESPACE_AROUND_OPERATOR_REGEX = \
136 re.compile('([^\w\s]*)\s*(\t| )\s*([^\w\s]*)')
137 EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]') 114 EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
138 WHITESPACE_AROUND_NAMED_PARAMETER_REGEX = \ 115 WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
139 re.compile(r'[()]|\s=[^=]|[^=!<>]=\s') 116 COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)')
140 117 COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
141 118 r'|\s*\(\s*([^)]*[^ )])\s*\))')
142 WHITESPACE = ' \t' 119 KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
143 120 OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
144 BINARY_OPERATORS = frozenset(['**=', '*=', '+=', '-=', '!=', '<>', 121 LAMBDA_REGEX = re.compile(r'\blambda\b')
145 '%=', '^=', '&=', '|=', '==', '/=', '//=', '<=', '>=', '<<=', '>>=', 122 HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
146 '%', '^', '&', '|', '=', '/', '//', '<', '>', '<<']) 123
147 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) 124 # Work around Python < 2.6 behaviour, which does not generate NL after
148 OPERATORS = BINARY_OPERATORS | UNARY_OPERATORS 125 # a comment which is on a line by itself.
149 SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.INDENT, 126 COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
150 tokenize.DEDENT, tokenize.NEWLINE])
151 E225NOT_KEYWORDS = (frozenset(keyword.kwlist + ['print']) -
152 frozenset(['False', 'None', 'True']))
153 BENCHMARK_KEYS = ('directories', 'files', 'logical lines', 'physical lines')
154
155 options = None
156 args = None
157 127
158 128
159 ############################################################################## 129 ##############################################################################
160 # Helper functions for translated and formatted messages 130 # Helper functions for translated and formatted messages
161 ############################################################################## 131 ##############################################################################
168 "indentation is not a multiple of four"), 138 "indentation is not a multiple of four"),
169 "E112": QT_TRANSLATE_NOOP("pep8", 139 "E112": QT_TRANSLATE_NOOP("pep8",
170 "expected an indented block"), 140 "expected an indented block"),
171 "E113": QT_TRANSLATE_NOOP("pep8", 141 "E113": QT_TRANSLATE_NOOP("pep8",
172 "unexpected indentation"), 142 "unexpected indentation"),
143 "E121": QT_TRANSLATE_NOOP("pep8",
144 "continuation line indentation is not a multiple of four"),
145 "E122": QT_TRANSLATE_NOOP("pep8",
146 "continuation line missing indentation or outdented"),
147 "E123": QT_TRANSLATE_NOOP("pep8",
148 "closing bracket does not match indentation of opening bracket's line"),
149 "E124": QT_TRANSLATE_NOOP("pep8",
150 "closing bracket does not match visual indentation"),
151 "E125": QT_TRANSLATE_NOOP("pep8",
152 "continuation line does not distinguish itself from next logical line"),
153 "E126": QT_TRANSLATE_NOOP("pep8",
154 "continuation line over-indented for hanging indent"),
155 "E127": QT_TRANSLATE_NOOP("pep8",
156 "continuation line over-indented for visual indent"),
157 "E128": QT_TRANSLATE_NOOP("pep8",
158 "continuation line under-indented for visual indent"),
159 "E133": QT_TRANSLATE_NOOP("pep8",
160 "closing bracket is missing indentation"),
173 "W191": QT_TRANSLATE_NOOP("pep8", 161 "W191": QT_TRANSLATE_NOOP("pep8",
174 "indentation contains tabs"), 162 "indentation contains tabs"),
175 "E201": QT_TRANSLATE_NOOP("pep8", 163 "E201": QT_TRANSLATE_NOOP("pep8",
176 "whitespace after '{0}'"), 164 "whitespace after '{0}'"),
177 "E202": QT_TRANSLATE_NOOP("pep8", 165 "E202": QT_TRANSLATE_NOOP("pep8",
188 "tab before operator"), 176 "tab before operator"),
189 "E224": QT_TRANSLATE_NOOP("pep8", 177 "E224": QT_TRANSLATE_NOOP("pep8",
190 "tab after operator"), 178 "tab after operator"),
191 "E225": QT_TRANSLATE_NOOP("pep8", 179 "E225": QT_TRANSLATE_NOOP("pep8",
192 "missing whitespace around operator"), 180 "missing whitespace around operator"),
181 "E226": QT_TRANSLATE_NOOP("pep8",
182 "missing whitespace around arithmetic operator"),
183 "E227": QT_TRANSLATE_NOOP("pep8",
184 "missing whitespace around bitwise or shift operator"),
185 "E228": QT_TRANSLATE_NOOP("pep8",
186 "missing whitespace around modulo operator"),
193 "E231": QT_TRANSLATE_NOOP("pep8", 187 "E231": QT_TRANSLATE_NOOP("pep8",
194 "missing whitespace after '{0}'"), 188 "missing whitespace after '{0}'"),
195 "E241": QT_TRANSLATE_NOOP("pep8", 189 "E241": QT_TRANSLATE_NOOP("pep8",
196 "multiple spaces after '{0}'"), 190 "multiple spaces after '{0}'"),
197 "E242": QT_TRANSLATE_NOOP("pep8", 191 "E242": QT_TRANSLATE_NOOP("pep8",
198 "tab after '{0}'"), 192 "tab after '{0}'"),
199 "E251": QT_TRANSLATE_NOOP("pep8", 193 "E251": QT_TRANSLATE_NOOP("pep8",
200 "no spaces around keyword / parameter equals"), 194 "unexpected spaces around keyword / parameter equals"),
201 "E261": QT_TRANSLATE_NOOP("pep8", 195 "E261": QT_TRANSLATE_NOOP("pep8",
202 "at least two spaces before inline comment"), 196 "at least two spaces before inline comment"),
203 "E262": QT_TRANSLATE_NOOP("pep8", 197 "E262": QT_TRANSLATE_NOOP("pep8",
204 "inline comment should start with '# '"), 198 "inline comment should start with '# '"),
199 "E271": QT_TRANSLATE_NOOP("pep8",
200 "multiple spaces after keyword"),
201 "E272": QT_TRANSLATE_NOOP("pep8",
202 "multiple spaces before keyword"),
203 "E273": QT_TRANSLATE_NOOP("pep8",
204 "tab after keyword"),
205 "E274": QT_TRANSLATE_NOOP("pep8",
206 "tab before keyword"),
205 "W291": QT_TRANSLATE_NOOP("pep8", 207 "W291": QT_TRANSLATE_NOOP("pep8",
206 "trailing whitespace"), 208 "trailing whitespace"),
207 "W292": QT_TRANSLATE_NOOP("pep8", 209 "W292": QT_TRANSLATE_NOOP("pep8",
208 "no newline at end of file"), 210 "no newline at end of file"),
209 "W293": QT_TRANSLATE_NOOP("pep8", 211 "W293": QT_TRANSLATE_NOOP("pep8",
219 "W391": QT_TRANSLATE_NOOP("pep8", 221 "W391": QT_TRANSLATE_NOOP("pep8",
220 "blank line at end of file"), 222 "blank line at end of file"),
221 "E401": QT_TRANSLATE_NOOP("pep8", 223 "E401": QT_TRANSLATE_NOOP("pep8",
222 "multiple imports on one line"), 224 "multiple imports on one line"),
223 "E501": QT_TRANSLATE_NOOP("pep8", 225 "E501": QT_TRANSLATE_NOOP("pep8",
224 "line too long ({0} characters)"), 226 "line too long ({0} > {1} characters)"),
227 "E502": QT_TRANSLATE_NOOP("pep8",
228 "the backslash is redundant between brackets"),
225 "W601": QT_TRANSLATE_NOOP("pep8", 229 "W601": QT_TRANSLATE_NOOP("pep8",
226 ".has_key() is deprecated, use 'in'"), 230 ".has_key() is deprecated, use 'in'"),
227 "W602": QT_TRANSLATE_NOOP("pep8", 231 "W602": QT_TRANSLATE_NOOP("pep8",
228 "deprecated form of raising exception"), 232 "deprecated form of raising exception"),
229 "W603": QT_TRANSLATE_NOOP("pep8", 233 "W603": QT_TRANSLATE_NOOP("pep8",
232 "backticks are deprecated, use 'repr()'"), 236 "backticks are deprecated, use 'repr()'"),
233 "E701": QT_TRANSLATE_NOOP("pep8", 237 "E701": QT_TRANSLATE_NOOP("pep8",
234 "multiple statements on one line (colon)"), 238 "multiple statements on one line (colon)"),
235 "E702": QT_TRANSLATE_NOOP("pep8", 239 "E702": QT_TRANSLATE_NOOP("pep8",
236 "multiple statements on one line (semicolon)"), 240 "multiple statements on one line (semicolon)"),
241 "E703": QT_TRANSLATE_NOOP("pep8",
242 "statement ends with a semicolon"),
243 "E711": QT_TRANSLATE_NOOP("pep8",
244 "comparison to {0} should be {1}"),
245 "E712": QT_TRANSLATE_NOOP("pep8",
246 "comparison to {0} should be {1}"),
247 "E721": QT_TRANSLATE_NOOP("pep8",
248 "do not compare types, use 'isinstance()'"),
237 "E901": QT_TRANSLATE_NOOP("pep8", 249 "E901": QT_TRANSLATE_NOOP("pep8",
238 "Token Error: {0}"), 250 "{0}: {1}"),
239 } 251 }
240 252
241 pep8_messages_sample_args = { 253 pep8_messages_sample_args = {
242 "E201": ["([{"], 254 "E201": ["([{"],
243 "E202": ["}])"], 255 "E202": ["}])"],
246 "E231": [",;:"], 258 "E231": [",;:"],
247 "E241": [",;:"], 259 "E241": [",;:"],
248 "E242": [",;:"], 260 "E242": [",;:"],
249 "E302": [1], 261 "E302": [1],
250 "E303": [3], 262 "E303": [3],
251 "E501": [85], 263 "E501": [85, 79],
264 "E711": ["None", "'if cond is None:'"],
265 "E712": ["True", "'if cond is True:' or 'if cond:'"],
266 "E901": ["SyntaxError", "Invalid Syntax"],
252 } 267 }
253 268
254 269
255 def getMessage(code, *args): 270 def getMessage(code, *args):
256 """ 271 """
299 314
300 Okay: if True:\n return 315 Okay: if True:\n return
301 W191: if True:\n\treturn 316 W191: if True:\n\treturn
302 """ 317 """
303 indent = INDENT_REGEX.match(physical_line).group(1) 318 indent = INDENT_REGEX.match(physical_line).group(1)
304 if indent.count('\t'): 319 if '\t' in indent:
305 return indent.index('\t'), "W191" 320 return indent.index('\t'), "W191"
306 321
307 322
308 def trailing_whitespace(physical_line): 323 def trailing_whitespace(physical_line):
309 r""" 324 r"""
318 [1] http://docs.python.org/reference/lexical_analysis.html#blank-lines 333 [1] http://docs.python.org/reference/lexical_analysis.html#blank-lines
319 334
320 The warning returned varies on whether the line itself is blank, for easier 335 The warning returned varies on whether the line itself is blank, for easier
321 filtering for those who want to indent their blank lines. 336 filtering for those who want to indent their blank lines.
322 337
323 Okay: spam(1) 338 Okay: spam(1)\n#
324 W291: spam(1)\s 339 W291: spam(1) \n#
325 W293: class Foo(object):\n \n bang = 12 340 W293: class Foo(object):\n \n bang = 12
326 """ 341 """
327 physical_line = physical_line.rstrip('\n') # chr(10), newline 342 physical_line = physical_line.rstrip('\n') # chr(10), newline
328 physical_line = physical_line.rstrip('\r') # chr(13), carriage return 343 physical_line = physical_line.rstrip('\r') # chr(13), carriage return
329 physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L 344 physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
330 stripped = physical_line.rstrip() 345 stripped = physical_line.rstrip(' \t\v')
331 if physical_line != stripped: 346 if physical_line != stripped:
332 if stripped: 347 if stripped:
333 return len(stripped), "W291" 348 return len(stripped), "W291"
334 else: 349 else:
335 return 0, "W293" 350 return 0, "W293"
340 JCR: Trailing blank lines are superfluous. 355 JCR: Trailing blank lines are superfluous.
341 356
342 Okay: spam(1) 357 Okay: spam(1)
343 W391: spam(1)\n 358 W391: spam(1)\n
344 """ 359 """
345 if physical_line.strip() == '' and line_number == len(lines): 360 if not physical_line.rstrip() and line_number == len(lines):
346 return 0, "W391" 361 return 0, "W391"
347 362
348 363
349 def missing_newline(physical_line): 364 def missing_newline(physical_line):
350 """ 365 """
351 JCR: The last line should have a newline. 366 JCR: The last line should have a newline.
367
368 Reports warning W292.
352 """ 369 """
353 if physical_line.rstrip() == physical_line: 370 if physical_line.rstrip() == physical_line:
354 return len(physical_line), "W292" 371 return len(physical_line), "W292"
355 372
356 373
357 def maximum_line_length(physical_line): 374 def maximum_line_length(physical_line, max_line_length):
358 """ 375 """
359 Limit all lines to a maximum of 79 characters. 376 Limit all lines to a maximum of 79 characters.
360 377
361 There are still many devices around that are limited to 80 character 378 There are still many devices around that are limited to 80 character
362 lines; plus, limiting windows to 80 characters makes it possible to have 379 lines; plus, limiting windows to 80 characters makes it possible to have
363 several windows side-by-side. The default wrapping on such devices looks 380 several windows side-by-side. The default wrapping on such devices looks
364 ugly. Therefore, please limit all lines to a maximum of 79 characters. 381 ugly. Therefore, please limit all lines to a maximum of 79 characters.
365 For flowing long blocks of text (docstrings or comments), limiting the 382 For flowing long blocks of text (docstrings or comments), limiting the
366 length to 72 characters is recommended. 383 length to 72 characters is recommended.
384
385 Reports error E501.
367 """ 386 """
368 line = physical_line.rstrip() 387 line = physical_line.rstrip()
369 length = len(line) 388 length = len(line)
370 if length > MAX_LINE_LENGTH: 389 if length > max_line_length and not noqa(line):
371 try: 390 if hasattr(line, 'decode'): # Python 2
372 # The line could contain multi-byte characters 391 # The line could contain multi-byte characters
373 if hasattr(line, 'decode'): # Python 2 only 392 try:
374 length = len(line.decode('utf-8')) 393 length = len(line.decode('utf-8'))
375 except UnicodeDecodeError: 394 except UnicodeError:
376 pass 395 pass
377 if length > MAX_LINE_LENGTH: 396 if length > max_line_length:
378 return MAX_LINE_LENGTH, "E501", length 397 return max_line_length, "E501", length, max_line_length
379 398
380 399
381 ############################################################################## 400 ##############################################################################
382 # Plugins (check functions) for logical lines 401 # Plugins (check functions) for logical lines
383 ############################################################################## 402 ##############################################################################
384 403
385 404
386 def blank_lines(logical_line, blank_lines, indent_level, line_number, 405 def blank_lines(logical_line, blank_lines, indent_level, line_number,
387 previous_logical, previous_indent_level, 406 previous_logical, previous_indent_level):
388 blank_lines_before_comment):
389 r""" 407 r"""
390 Separate top-level function and class definitions with two blank lines. 408 Separate top-level function and class definitions with two blank lines.
391 409
392 Method definitions inside a class are separated by a single blank line. 410 Method definitions inside a class are separated by a single blank line.
393 411
404 E302: def a():\n pass\n\ndef b(n):\n pass 422 E302: def a():\n pass\n\ndef b(n):\n pass
405 E303: def a():\n pass\n\n\n\ndef b(n):\n pass 423 E303: def a():\n pass\n\n\n\ndef b(n):\n pass
406 E303: def a():\n\n\n\n pass 424 E303: def a():\n\n\n\n pass
407 E304: @decorator\n\ndef a():\n pass 425 E304: @decorator\n\ndef a():\n pass
408 """ 426 """
409 if line_number == 1: 427 if line_number < 3 and not previous_logical:
410 return # Don't expect blank lines before the first line 428 return # Don't expect blank lines before the first line
411 max_blank_lines = max(blank_lines, blank_lines_before_comment)
412 if previous_logical.startswith('@'): 429 if previous_logical.startswith('@'):
413 if max_blank_lines: 430 if blank_lines:
414 return 0, "E304" 431 yield 0, "E304"
415 elif (logical_line.startswith('def ') or 432 elif blank_lines > 2 or (indent_level and blank_lines == 2):
416 logical_line.startswith('class ') or 433 yield 0, "E303", blank_lines
417 logical_line.startswith('@')): 434 elif logical_line.startswith(('def ', 'class ', '@')):
418 if indent_level: 435 if indent_level:
419 if not (max_blank_lines or previous_indent_level < indent_level or 436 if not (blank_lines or previous_indent_level < indent_level or
420 DOCSTRING_REGEX.match(previous_logical)): 437 DOCSTRING_REGEX.match(previous_logical)):
421 return 0, "E301" 438 yield 0, "E301"
422 elif max_blank_lines != 2: 439 elif blank_lines != 2:
423 return 0, "E302", max_blank_lines 440 yield 0, "E302", blank_lines
424 elif max_blank_lines > 2 or (indent_level and max_blank_lines == 2):
425 return 0, "E303", max_blank_lines
426 441
427 442
428 def extraneous_whitespace(logical_line): 443 def extraneous_whitespace(logical_line):
429 """ 444 """
430 Avoid extraneous whitespace in the following situations: 445 Avoid extraneous whitespace in the following situations:
448 line = logical_line 463 line = logical_line
449 for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): 464 for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
450 text = match.group() 465 text = match.group()
451 char = text.strip() 466 char = text.strip()
452 found = match.start() 467 found = match.start()
453 if text == char + ' ' and char in '([{': 468 if text == char + ' ':
454 return found + 1, "E201", char 469 # assert char in '([{'
455 if text == ' ' + char and line[found - 1] != ',': 470 yield found + 1, "E201", char
456 if char in '}])': 471 elif line[found - 1] != ',':
457 return found, "E202", char 472 code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
458 if char in ',;:': 473 yield found, code, char
459 return found, "E203", char 474
475
476 def whitespace_around_keywords(logical_line):
477 r"""
478 Avoid extraneous whitespace around keywords.
479
480 Okay: True and False
481 E271: True and False
482 E272: True and False
483 E273: True and\tFalse
484 E274: True\tand False
485 """
486 for match in KEYWORD_REGEX.finditer(logical_line):
487 before, after = match.groups()
488
489 if '\t' in before:
490 yield match.start(1), "E274"
491 elif len(before) > 1:
492 yield match.start(1), "E272"
493
494 if '\t' in after:
495 yield match.start(2), "E273"
496 elif len(after) > 1:
497 yield match.start(2), "E271"
460 498
461 499
462 def missing_whitespace(logical_line): 500 def missing_whitespace(logical_line):
463 """ 501 """
464 JCR: Each comma, semicolon or colon should be followed by whitespace. 502 JCR: Each comma, semicolon or colon should be followed by whitespace.
469 Okay: a[:4] 507 Okay: a[:4]
470 Okay: a[1:] 508 Okay: a[1:]
471 Okay: a[1:4:2] 509 Okay: a[1:4:2]
472 E231: ['a','b'] 510 E231: ['a','b']
473 E231: foo(bar,baz) 511 E231: foo(bar,baz)
512 E231: [{'a':'b'}]
474 """ 513 """
475 line = logical_line 514 line = logical_line
476 for index in range(len(line) - 1): 515 for index in range(len(line) - 1):
477 char = line[index] 516 char = line[index]
478 if char in ',;:' and line[index + 1] not in WHITESPACE: 517 if char in ',;:' and line[index + 1] not in WHITESPACE:
479 before = line[:index] 518 before = line[:index]
480 if char == ':' and before.count('[') > before.count(']'): 519 if char == ':' and before.count('[') > before.count(']') and \
520 before.rfind('{') < before.rfind('['):
481 continue # Slice syntax, no space required 521 continue # Slice syntax, no space required
482 if char == ',' and line[index + 1] == ')': 522 if char == ',' and line[index + 1] == ')':
483 continue # Allow tuple with only one element: (3,) 523 continue # Allow tuple with only one element: (3,)
484 return index, "E231", char 524 yield index, "E231", char
485 525
486 526
487 def indentation(logical_line, previous_logical, indent_char, 527 def indentation(logical_line, previous_logical, indent_char,
488 indent_level, previous_indent_level): 528 indent_level, previous_indent_level):
489 r""" 529 r"""
501 541
502 Okay: a = 1\nb = 2 542 Okay: a = 1\nb = 2
503 E113: a = 1\n b = 2 543 E113: a = 1\n b = 2
504 """ 544 """
505 if indent_char == ' ' and indent_level % 4: 545 if indent_char == ' ' and indent_level % 4:
506 return 0, "E111" 546 yield 0, "E111"
507 indent_expect = previous_logical.endswith(':') 547 indent_expect = previous_logical.endswith(':')
508 if indent_expect and indent_level <= previous_indent_level: 548 if indent_expect and indent_level <= previous_indent_level:
509 return 0, "E112" 549 yield 0, "E112"
510 if indent_level > previous_indent_level and not indent_expect: 550 if indent_level > previous_indent_level and not indent_expect:
511 return 0, "E113" 551 yield 0, "E113"
552
553
554 def continued_indentation(logical_line, tokens, indent_level, hang_closing,
555 noqa, verbose):
556 r"""
557 Continuation lines should align wrapped elements either vertically using
558 Python's implicit line joining inside parentheses, brackets and braces, or
559 using a hanging indent.
560
561 When using a hanging indent the following considerations should be applied:
562
563 - there should be no arguments on the first line, and
564
565 - further indentation should be used to clearly distinguish itself as a
566 continuation line.
567
568 Okay: a = (\n)
569 E123: a = (\n )
570
571 Okay: a = (\n 42)
572 E121: a = (\n 42)
573 E122: a = (\n42)
574 E123: a = (\n 42\n )
575 E124: a = (24,\n 42\n)
576 E125: if (a or\n b):\n pass
577 E126: a = (\n 42)
578 E127: a = (24,\n 42)
579 E128: a = (24,\n 42)
580 """
581 first_row = tokens[0][2][0]
582 nrows = 1 + tokens[-1][2][0] - first_row
583 if noqa or nrows == 1:
584 return
585
586 # indent_next tells us whether the next block is indented; assuming
587 # that it is indented by 4 spaces, then we should not allow 4-space
588 # indents on the final continuation line; in turn, some other
589 # indents are allowed to have an extra 4 spaces.
590 indent_next = logical_line.endswith(':')
591
592 row = depth = 0
593 # remember how many brackets were opened on each line
594 parens = [0] * nrows
595 # relative indents of physical lines
596 rel_indent = [0] * nrows
597 # visual indents
598 indent_chances = {}
599 last_indent = tokens[0][2]
600 indent = [last_indent[1]]
601 if verbose >= 3:
602 print(">>> " + tokens[0][4].rstrip())
603
604 for token_type, text, start, end, line in tokens:
605
606 last_token_multiline = (start[0] != end[0])
607 newline = row < start[0] - first_row
608 if newline:
609 row = start[0] - first_row
610 newline = (not last_token_multiline and
611 token_type not in (tokenize.NL, tokenize.NEWLINE))
612
613 if newline:
614 # this is the beginning of a continuation line.
615 last_indent = start
616 if verbose >= 3:
617 print("... " + line.rstrip())
618
619 # record the initial indent.
620 rel_indent[row] = expand_indent(line) - indent_level
621
622 if depth:
623 # a bracket expression in a continuation line.
624 # find the line that it was opened on
625 for open_row in range(row - 1, -1, -1):
626 if parens[open_row]:
627 break
628 else:
629 # an unbracketed continuation line (ie, backslash)
630 open_row = 0
631 hang = rel_indent[row] - rel_indent[open_row]
632 close_bracket = (token_type == tokenize.OP and text in ']})')
633 visual_indent = (not close_bracket and hang > 0 and
634 indent_chances.get(start[1]))
635
636 if close_bracket and indent[depth]:
637 # closing bracket for visual indent
638 if start[1] != indent[depth]:
639 yield start, "E124"
640 elif close_bracket and not hang:
641 # closing bracket matches indentation of opening bracket's line
642 if hang_closing:
643 yield start, "E133"
644 elif visual_indent is True:
645 # visual indent is verified
646 if not indent[depth]:
647 indent[depth] = start[1]
648 elif visual_indent in (text, str):
649 # ignore token lined up with matching one from a previous line
650 pass
651 elif indent[depth] and start[1] < indent[depth]:
652 # visual indent is broken
653 yield start, "E128"
654 elif hang == 4 or (indent_next and rel_indent[row] == 8):
655 # hanging indent is verified
656 if close_bracket and not hang_closing:
657 yield (start, "E123")
658 else:
659 # indent is broken
660 if hang <= 0:
661 error = "E122"
662 elif indent[depth]:
663 error = "E127"
664 elif hang % 4:
665 error = "E121"
666 else:
667 error = "E126"
668 yield start, error
669
670 # look for visual indenting
671 if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT)
672 and not indent[depth]):
673 indent[depth] = start[1]
674 indent_chances[start[1]] = True
675 if verbose >= 4:
676 print("bracket depth %s indent to %s" % (depth, start[1]))
677 # deal with implicit string concatenation
678 elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
679 text in ('u', 'ur', 'b', 'br')):
680 indent_chances[start[1]] = str
681 # special case for the "if" statement because len("if (") == 4
682 elif not indent_chances and not row and not depth and text == 'if':
683 indent_chances[end[1] + 1] = True
684
685 # keep track of bracket depth
686 if token_type == tokenize.OP:
687 if text in '([{':
688 depth += 1
689 indent.append(0)
690 parens[row] += 1
691 if verbose >= 4:
692 print("bracket depth %s seen, col %s, visual min = %s" %
693 (depth, start[1], indent[depth]))
694 elif text in ')]}' and depth > 0:
695 # parent indents should not be more than this one
696 prev_indent = indent.pop() or last_indent[1]
697 for d in range(depth):
698 if indent[d] > prev_indent:
699 indent[d] = 0
700 for ind in list(indent_chances):
701 if ind >= prev_indent:
702 del indent_chances[ind]
703 depth -= 1
704 if depth:
705 indent_chances[indent[depth]] = True
706 for idx in range(row, -1, -1):
707 if parens[idx]:
708 parens[idx] -= 1
709 rel_indent[row] = rel_indent[idx]
710 break
711 assert len(indent) == depth + 1
712 if start[1] not in indent_chances:
713 # allow to line up tokens
714 indent_chances[start[1]] = text
715 ##
716 ## last_token_multiline = (start[0] != end[0])
717
718 if indent_next and expand_indent(line) == indent_level + 4:
719 yield last_indent, "E125"
512 720
513 721
514 def whitespace_before_parameters(logical_line, tokens): 722 def whitespace_before_parameters(logical_line, tokens):
515 """ 723 """
516 Avoid extraneous whitespace in the following situations: 724 Avoid extraneous whitespace in the following situations:
526 734
527 Okay: dict['key'] = list[index] 735 Okay: dict['key'] = list[index]
528 E211: dict ['key'] = list[index] 736 E211: dict ['key'] = list[index]
529 E211: dict['key'] = list [index] 737 E211: dict['key'] = list [index]
530 """ 738 """
531 prev_type = tokens[0][0] 739 prev_type, prev_text, __, prev_end, __ = tokens[0]
532 prev_text = tokens[0][1]
533 prev_end = tokens[0][3]
534 for index in range(1, len(tokens)): 740 for index in range(1, len(tokens)):
535 token_type, text, start, end, line = tokens[index] 741 token_type, text, start, end, __ = tokens[index]
536 if (token_type == tokenize.OP and 742 if (token_type == tokenize.OP and
537 text in '([' and 743 text in '([' and
538 start != prev_end and 744 start != prev_end and
539 (prev_type == tokenize.NAME or prev_text in '}])') and 745 (prev_type == tokenize.NAME or prev_text in '}])') and
540 # Syntax "class A (B):" is allowed, but avoid it 746 # Syntax "class A (B):" is allowed, but avoid it
541 (index < 2 or tokens[index - 2][1] != 'class') and 747 (index < 2 or tokens[index - 2][1] != 'class') and
542 # Allow "return (a.foo for a in range(5))" 748 # Allow "return (a.foo for a in range(5))"
543 (not keyword.iskeyword(prev_text))): 749 not keyword.iskeyword(prev_text)):
544 return prev_end, "E211", text 750 yield prev_end, "E211", text
545 prev_type = token_type 751 prev_type = token_type
546 prev_text = text 752 prev_text = text
547 prev_end = end 753 prev_end = end
548 754
549 755
558 E221: a = 4 + 5 764 E221: a = 4 + 5
559 E222: a = 4 + 5 765 E222: a = 4 + 5
560 E223: a = 4\t+ 5 766 E223: a = 4\t+ 5
561 E224: a = 4 +\t5 767 E224: a = 4 +\t5
562 """ 768 """
563 for match in WHITESPACE_AROUND_OPERATOR_REGEX.finditer(logical_line): 769 for match in OPERATOR_REGEX.finditer(logical_line):
564 before, whitespace, after = match.groups() 770 before, after = match.groups()
565 tab = whitespace == '\t' 771
566 offset = match.start(2) 772 if '\t' in before:
567 if before in OPERATORS: 773 yield match.start(1), "E223"
568 return offset, (tab and "E224" or "E222") 774 elif len(before) > 1:
569 elif after in OPERATORS: 775 yield match.start(1), "E221"
570 return offset, (tab and "E223" or "E221") 776
571 777 if '\t' in after:
778 yield match.start(2), "E224"
779 elif len(after) > 1:
780 yield match.start(2), "E222"
572 781
573 def missing_whitespace_around_operator(logical_line, tokens): 782 def missing_whitespace_around_operator(logical_line, tokens):
574 r""" 783 r"""
575 - Always surround these binary operators with a single space on 784 - Always surround these binary operators with a single space on
576 either side: assignment (=), augmented assignment (+=, -= etc.), 785 either side: assignment (=), augmented assignment (+=, -= etc.),
583 Okay: submitted += 1 792 Okay: submitted += 1
584 Okay: x = x * 2 - 1 793 Okay: x = x * 2 - 1
585 Okay: hypot2 = x * x + y * y 794 Okay: hypot2 = x * x + y * y
586 Okay: c = (a + b) * (a - b) 795 Okay: c = (a + b) * (a - b)
587 Okay: foo(bar, key='word', *args, **kwargs) 796 Okay: foo(bar, key='word', *args, **kwargs)
588 Okay: baz(**kwargs)
589 Okay: negative = -1
590 Okay: spam(-1)
591 Okay: alpha[:-i] 797 Okay: alpha[:-i]
592 Okay: if not -5 < x < +5:\n pass
593 Okay: lambda *args, **kw: (args, kw)
594 798
595 E225: i=i+1 799 E225: i=i+1
596 E225: submitted +=1 800 E225: submitted +=1
597 E225: x = x*2 - 1 801 E225: x = x /2 - 1
598 E225: hypot2 = x*x + y*y
599 E225: c = (a+b) * (a-b)
600 E225: c = alpha -4
601 E225: z = x **y 802 E225: z = x **y
803 E226: c = (a+b) * (a-b)
804 E226: hypot2 = x*x + y*y
805 E227: c = a|b
806 E228: msg = fmt%(errno, errmsg)
602 """ 807 """
603 parens = 0 808 parens = 0
604 need_space = False 809 need_space = False
605 prev_type = tokenize.OP 810 prev_type = tokenize.OP
606 prev_text = prev_end = None 811 prev_text = prev_end = None
607 for token_type, text, start, end, line in tokens: 812 for token_type, text, start, end, line in tokens:
608 if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN): 813 if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN):
609 # ERRORTOKEN is triggered by backticks in Python 3000 814 # ERRORTOKEN is triggered by backticks in Python 3
610 continue 815 continue
611 if text in ('(', 'lambda'): 816 if text in ('(', 'lambda'):
612 parens += 1 817 parens += 1
613 elif text == ')': 818 elif text == ')':
614 parens -= 1 819 parens -= 1
615 if need_space: 820 if need_space:
616 if start != prev_end: 821 if start != prev_end:
822 # Found a (probably) needed space
823 if need_space is not True and not need_space[1]:
824 yield need_space[0], "E225"
617 need_space = False 825 need_space = False
618 elif text == '>' and prev_text == '<': 826 elif text == '>' and prev_text in ('<', '-'):
619 # Tolerate the "<>" operator, even if running Python 3 827 # Tolerate the "<>" operator, even if running Python 3
828 # Deal with Python 3's annotated return value "->"
620 pass 829 pass
621 else: 830 else:
622 return prev_end, "E225" 831 if need_space is True or need_space[1]:
832 # A needed trailing space was not found
833 yield prev_end, "E225"
834 else:
835 code = 'E226'
836 if prev_text == '%':
837 code = 'E228'
838 elif prev_text not in ARITHMETIC_OP:
839 code = 'E227'
840 yield need_space[0], code
841 need_space = False
623 elif token_type == tokenize.OP and prev_end is not None: 842 elif token_type == tokenize.OP and prev_end is not None:
624 if text == '=' and parens: 843 if text == '=' and parens:
625 # Allow keyword args or defaults: foo(bar=None). 844 # Allow keyword args or defaults: foo(bar=None).
626 pass 845 pass
627 elif text in BINARY_OPERATORS: 846 elif text in WS_NEEDED_OPERATORS:
628 need_space = True 847 need_space = True
629 elif text in UNARY_OPERATORS: 848 elif text in UNARY_OPERATORS:
849 # Check if the operator is being used as a binary operator
630 # Allow unary operators: -123, -x, +1. 850 # Allow unary operators: -123, -x, +1.
631 # Allow argument unpacking: foo(*args, **kwargs). 851 # Allow argument unpacking: foo(*args, **kwargs).
632 if prev_type == tokenize.OP: 852 if prev_type == tokenize.OP:
633 if prev_text in '}])': 853 binary_usage = (prev_text in '}])')
634 need_space = True
635 elif prev_type == tokenize.NAME: 854 elif prev_type == tokenize.NAME:
636 if prev_text not in E225NOT_KEYWORDS: 855 binary_usage = (prev_text not in KEYWORDS)
637 need_space = True
638 else: 856 else:
639 need_space = True 857 binary_usage = (prev_type not in SKIP_TOKENS)
640 if need_space and start == prev_end: 858
641 return prev_end, "E225" 859 if binary_usage:
860 need_space = None
861 elif text in WS_OPTIONAL_OPERATORS:
862 need_space = None
863
864 if need_space is None:
865 # Surrounding space is optional, but ensure that
866 # trailing space matches opening space
867 need_space = (prev_end, start != prev_end)
868 elif need_space and start == prev_end:
869 # A needed opening space was not found
870 yield prev_end, "E225"
871 need_space = False
642 prev_type = token_type 872 prev_type = token_type
643 prev_text = text 873 prev_text = text
644 prev_end = end 874 prev_end = end
645 875
646 876
657 Okay: a = (1, 2) 887 Okay: a = (1, 2)
658 E241: a = (1, 2) 888 E241: a = (1, 2)
659 E242: a = (1,\t2) 889 E242: a = (1,\t2)
660 """ 890 """
661 line = logical_line 891 line = logical_line
662 for separator in ',;:': 892 for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
663 found = line.find(separator + ' ') 893 found = m.start() + 1
664 if found > -1: 894 if '\t' in m.group():
665 return found + 1, "E241", separator 895 yield found, "E242", m.group()[0]
666 found = line.find(separator + '\t') 896 else:
667 if found > -1: 897 yield found, "E241", m.group()[0]
668 return found + 1, "E242", separator 898
669 899
670 900 def whitespace_around_named_parameter_equals(logical_line, tokens):
671 def whitespace_around_named_parameter_equals(logical_line):
672 """ 901 """
673 Don't use spaces around the '=' sign when used to indicate a 902 Don't use spaces around the '=' sign when used to indicate a
674 keyword argument or a default parameter value. 903 keyword argument or a default parameter value.
675 904
676 Okay: def complex(real, imag=0.0): 905 Okay: def complex(real, imag=0.0):
682 911
683 E251: def complex(real, imag = 0.0): 912 E251: def complex(real, imag = 0.0):
684 E251: return magic(r = real, i = imag) 913 E251: return magic(r = real, i = imag)
685 """ 914 """
686 parens = 0 915 parens = 0
687 for match in WHITESPACE_AROUND_NAMED_PARAMETER_REGEX.finditer( 916 no_space = False
688 logical_line): 917 prev_end = None
689 text = match.group() 918 message = "E251"
690 if parens and len(text) == 3: 919 for token_type, text, start, end, line in tokens:
691 issue = "E251" 920 if no_space:
692 return match.start(), issue 921 no_space = False
693 if text == '(': 922 if start != prev_end:
694 parens += 1 923 yield prev_end, message
695 elif text == ')': 924 elif token_type == tokenize.OP:
696 parens -= 1 925 if text == '(':
926 parens += 1
927 elif text == ')':
928 parens -= 1
929 elif parens and text == '=':
930 no_space = True
931 if start != prev_end:
932 yield prev_end, message
933 prev_end = end
697 934
698 935
699 def whitespace_before_inline_comment(logical_line, tokens): 936 def whitespace_before_inline_comment(logical_line, tokens):
700 """ 937 """
701 Separate inline comments by at least two spaces. 938 Separate inline comments by at least two spaces.
710 E262: x = x + 1 #Increment x 947 E262: x = x + 1 #Increment x
711 E262: x = x + 1 # Increment x 948 E262: x = x + 1 # Increment x
712 """ 949 """
713 prev_end = (0, 0) 950 prev_end = (0, 0)
714 for token_type, text, start, end, line in tokens: 951 for token_type, text, start, end, line in tokens:
715 if token_type == tokenize.NL:
716 continue
717 if token_type == tokenize.COMMENT: 952 if token_type == tokenize.COMMENT:
718 if not line[:start[1]].strip(): 953 if not line[:start[1]].strip():
719 continue 954 continue
720 if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: 955 if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
721 return (prev_end, "E261") 956 yield prev_end, "E261"
722 if (len(text) > 1 and text.startswith('# ') 957 symbol, sp, comment = text.partition(' ')
723 or not text.startswith('# ')): 958 if symbol not in ('#', '#:') or comment[:1].isspace():
724 return start, "E262" 959 yield start, "E262"
725 else: 960 elif token_type != tokenize.NL:
726 prev_end = end 961 prev_end = end
727 962
728 963
729 def imports_on_separate_lines(logical_line): 964 def imports_on_separate_lines(logical_line):
730 r""" 965 r"""
740 Okay: import foo.bar.yourclass 975 Okay: import foo.bar.yourclass
741 """ 976 """
742 line = logical_line 977 line = logical_line
743 if line.startswith('import '): 978 if line.startswith('import '):
744 found = line.find(',') 979 found = line.find(',')
745 if found > -1: 980 if -1 < found and ';' not in line[:found]:
746 return found, "E401" 981 yield found, "E401"
747 982
748 983
749 def compound_statements(logical_line): 984 def compound_statements(logical_line):
750 r""" 985 r"""
751 Compound statements (multiple statements on the same line) are 986 Compound statements (multiple statements on the same line) are
768 E701: try: something() 1003 E701: try: something()
769 E701: finally: cleanup() 1004 E701: finally: cleanup()
770 E701: if foo == 'blah': one(); two(); three() 1005 E701: if foo == 'blah': one(); two(); three()
771 1006
772 E702: do_one(); do_two(); do_three() 1007 E702: do_one(); do_two(); do_three()
1008 E703: do_four(); # useless semicolon
773 """ 1009 """
774 line = logical_line 1010 line = logical_line
1011 last_char = len(line) - 1
775 found = line.find(':') 1012 found = line.find(':')
776 if -1 < found < len(line) - 1: 1013 while -1 < found < last_char:
777 before = line[:found] 1014 before = line[:found]
778 if (before.count('{') <= before.count('}') and # {'a': 1} (dict) 1015 if (before.count('{') <= before.count('}') and # {'a': 1} (dict)
779 before.count('[') <= before.count(']') and # [1:2] (slice) 1016 before.count('[') <= before.count(']') and # [1:2] (slice)
780 not re.search(r'\blambda\b', before)): # lambda x: x 1017 before.count('(') <= before.count(')') and # (Python 3 annotation)
781 return found, "E701" 1018 not LAMBDA_REGEX.search(before)): # lambda x: x
1019 yield found, "E701"
1020 found = line.find(':', found + 1)
782 found = line.find(';') 1021 found = line.find(';')
783 if -1 < found: 1022 while -1 < found:
784 return found, "E702" 1023 if found < last_char:
785 1024 yield found, "E702"
786 1025 else:
787 def python_3000_has_key(logical_line): 1026 yield found, "E703"
788 """ 1027 found = line.find(';', found + 1)
789 The {}.has_key() method will be removed in the future version of 1028
790 Python. Use the 'in' operation instead, like: 1029
791 d = {"a": 1, "b": 2} 1030 def explicit_line_join(logical_line, tokens):
792 if "b" in d: 1031 r"""
793 print d["b"] 1032 Avoid explicit line join between brackets.
1033
1034 The preferred way of wrapping long lines is by using Python's implied line
1035 continuation inside parentheses, brackets and braces. Long lines can be
1036 broken over multiple lines by wrapping expressions in parentheses. These
1037 should be used in preference to using a backslash for line continuation.
1038
1039 E502: aaa = [123, \\n 123]
1040 E502: aaa = ("bbb " \\n "ccc")
1041
1042 Okay: aaa = [123,\n 123]
1043 Okay: aaa = ("bbb "\n "ccc")
1044 Okay: aaa = "bbb " \\n "ccc"
1045 """
1046 prev_start = prev_end = parens = 0
1047 backslash = None
1048 for token_type, text, start, end, line in tokens:
1049 if start[0] != prev_start and parens and backslash:
1050 yield backslash, "E502"
1051 if end[0] != prev_end:
1052 if line.rstrip('\r\n').endswith('\\'):
1053 backslash = (end[0], len(line.splitlines()[-1]) - 1)
1054 else:
1055 backslash = None
1056 prev_start = prev_end = end[0]
1057 else:
1058 prev_start = start[0]
1059 if token_type == tokenize.OP:
1060 if text in '([{':
1061 parens += 1
1062 elif text in ')]}':
1063 parens -= 1
1064
1065
1066 def comparison_to_singleton(logical_line, noqa):
1067 """
1068 Comparisons to singletons like None should always be done
1069 with "is" or "is not", never the equality operators.
1070
1071 Okay: if arg is not None:
1072 E711: if arg != None:
1073 E712: if arg == True:
1074
1075 Also, beware of writing if x when you really mean if x is not None --
1076 e.g. when testing whether a variable or argument that defaults to None was
1077 set to some other value. The other value might have a type (such as a
1078 container) that could be false in a boolean context!
1079 """
1080 match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
1081 if match:
1082 same = (match.group(1) == '==')
1083 singleton = match.group(2)
1084 msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
1085 if singleton in ('None',):
1086 code = 'E711'
1087 else:
1088 code = 'E712'
1089 nonzero = ((singleton == 'True' and same) or
1090 (singleton == 'False' and not same))
1091 msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
1092 yield match.start(1), code, singleton, msg
1093
1094
1095 def comparison_type(logical_line):
1096 """
1097 Object type comparisons should always use isinstance() instead of
1098 comparing types directly.
1099
1100 Okay: if isinstance(obj, int):
1101 E721: if type(obj) is type(1):
1102
1103 When checking if an object is a string, keep in mind that it might be a
1104 unicode string too! In Python 2.3, str and unicode have a common base
1105 class, basestring, so you can do:
1106
1107 Okay: if isinstance(obj, basestring):
1108 Okay: if type(a1) is type(b1):
1109 """
1110 match = COMPARE_TYPE_REGEX.search(logical_line)
1111 if match:
1112 inst = match.group(1)
1113 if inst and isidentifier(inst) and inst not in SINGLETONS:
1114 return # Allow comparison for types which are not obvious
1115 yield match.start(), "E721"
1116
1117
1118 r"""
1119 The {}.has_key() method is removed in the Python 3.
1120 Use the 'in' operation instead.
1121
1122 Okay: if "alph" in d:\n print d["alph"]
1123 W601: assert d.has_key('alph')
794 """ 1124 """
795 pos = logical_line.find('.has_key(') 1125 pos = logical_line.find('.has_key(')
796 if pos > -1: 1126 if pos > -1:
797 return pos, "W601" 1127 yield pos, "W601"
798 1128
799 1129
800 def python_3000_raise_comma(logical_line): 1130 def python_3000_raise_comma(logical_line):
801 """ 1131 """
802 When raising an exception, use "raise ValueError('message')" 1132 When raising an exception, use "raise ValueError('message')"
803 instead of the older form "raise ValueError, 'message'". 1133 instead of the older form "raise ValueError, 'message'".
804 1134
805 The paren-using form is preferred because when the exception arguments 1135 The paren-using form is preferred because when the exception arguments
806 are long or include string formatting, you don't need to use line 1136 are long or include string formatting, you don't need to use line
807 continuation characters thanks to the containing parentheses. The older 1137 continuation characters thanks to the containing parentheses. The older
808 form will be removed in Python 3000. 1138 form is removed in Python 3.
1139
1140 Okay: raise DummyError("Message")
1141 W602: raise DummyError, "Message"
809 """ 1142 """
810 match = RAISE_COMMA_REGEX.match(logical_line) 1143 match = RAISE_COMMA_REGEX.match(logical_line)
811 if match: 1144 if match and not RERAISE_COMMA_REGEX.match(logical_line):
812 return match.start(1), "W602" 1145 yield match.end() - 1, "W602"
813 1146
814 1147
815 def python_3000_not_equal(logical_line): 1148 def python_3000_not_equal(logical_line):
816 """ 1149 """
817 != can also be written <>, but this is an obsolete usage kept for 1150 != can also be written <>, but this is an obsolete usage kept for
818 backwards compatibility only. New code should always use !=. 1151 backwards compatibility only. New code should always use !=.
819 The older syntax is removed in Python 3000. 1152 The older syntax is removed in Python 3.
1153
1154 Okay: if a != 'no':
1155 W603: if a <> 'no':
820 """ 1156 """
821 pos = logical_line.find('<>') 1157 pos = logical_line.find('<>')
822 if pos > -1: 1158 if pos > -1:
823 return pos, "W603" 1159 yield pos, "W603"
824 1160
825 1161
826 def python_3000_backticks(logical_line): 1162 def python_3000_backticks(logical_line):
827 """ 1163 """
828 Backticks are removed in Python 3000. 1164 Backticks are removed in Python 3.
829 Use repr() instead. 1165 Use repr() instead.
1166
1167 Okay: val = repr(1 + 2)
1168 W604: val = `1 + 2`
830 """ 1169 """
831 pos = logical_line.find('`') 1170 pos = logical_line.find('`')
832 if pos > -1: 1171 if pos > -1:
833 return pos, "W604" 1172 yield pos, "W604"
834 1173
835 1174
836 ############################################################################## 1175 ##############################################################################
837 # Helper functions 1176 # Helper functions
838 ############################################################################## 1177 ##############################################################################
839 1178
840 1179
841 if '' == ''.encode(): 1180 if '' == ''.encode():
842 # Python 2: implicit encoding. 1181 # Python 2: implicit encoding.
843 def readlines(filename): 1182 def readlines(filename):
844 return open(filename).readlines() 1183 f = open(filename)
1184 try:
1185 return f.readlines()
1186 finally:
1187 f.close()
1188 isidentifier = re.compile(r'[a-zA-Z_]\w*').match
1189 stdin_get_value = sys.stdin.read
845 else: 1190 else:
846 # Python 3: decode to latin-1. 1191 # Python 3
847 # This function is lazy, it does not read the encoding declaration. 1192 def readlines(filename): # __IGNORE_WARNING__
848 # XXX: use tokenize.detect_encoding() 1193 f = open(filename, 'rb')
849 def readlines(filename): # __IGNORE_WARNING__ 1194 try:
850 return open(filename, encoding='latin-1').readlines() 1195 coding, lines = tokenize.detect_encoding(f.readline)
1196 f = TextIOWrapper(f, coding, line_buffering=True)
1197 return [l.decode(coding) for l in lines] + f.readlines()
1198 except (LookupError, SyntaxError, UnicodeError):
1199 f.close()
1200 # Fall back if files are improperly declared
1201 f = open(filename, encoding='latin-1')
1202 return f.readlines()
1203 finally:
1204 f.close()
1205 isidentifier = str.isidentifier
1206
1207 def stdin_get_value():
1208 return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
1209 readlines.__doc__ = " Read the source code."
1210 noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
851 1211
852 1212
853 def expand_indent(line): 1213 def expand_indent(line):
854 """ 1214 r"""
855 Return the amount of indentation. 1215 Return the amount of indentation.
856 Tabs are expanded to the next multiple of 8. 1216 Tabs are expanded to the next multiple of 8.
857 1217
858 >>> expand_indent(' ') 1218 >>> expand_indent(' ')
859 4 1219 4
860 >>> expand_indent('\\t') 1220 >>> expand_indent('\t')
861 8 1221 8
862 >>> expand_indent(' \\t') 1222 >>> expand_indent(' \t')
863 8 1223 8
864 >>> expand_indent(' \\t') 1224 >>> expand_indent(' \t')
865 8 1225 8
866 >>> expand_indent(' \\t') 1226 >>> expand_indent(' \t')
867 16 1227 16
868 """ 1228 """
1229 if '\t' not in line:
1230 return len(line) - len(line.lstrip())
869 result = 0 1231 result = 0
870 for char in line: 1232 for char in line:
871 if char == '\t': 1233 if char == '\t':
872 result = result // 8 * 8 + 8 1234 result = result // 8 * 8 + 8
873 elif char == ' ': 1235 elif char == ' ':
886 >>> mute_string("'''abc'''") 1248 >>> mute_string("'''abc'''")
887 "'''xxx'''" 1249 "'''xxx'''"
888 >>> mute_string("r'abc'") 1250 >>> mute_string("r'abc'")
889 "r'xxx'" 1251 "r'xxx'"
890 """ 1252 """
891 start = 1 1253 # String modifiers (e.g. u or r)
1254 start = text.index(text[-1]) + 1
892 end = len(text) - 1 1255 end = len(text) - 1
893 # String modifiers (e.g. u or r)
894 if text.endswith('"'):
895 start += text.index('"')
896 elif text.endswith("'"):
897 start += text.index("'")
898 # Triple quotes 1256 # Triple quotes
899 if text.endswith('"""') or text.endswith("'''"): 1257 if text[-3:] in ('"""', "'''"):
900 start += 2 1258 start += 2
901 end -= 2 1259 end -= 2
902 return text[:start] + 'x' * (end - start) + text[end:] 1260 return text[:start] + 'x' * (end - start) + text[end:]
903 1261
904 1262
905 def message(text): 1263 def parse_udiff(diff, patterns=None, parent='.'):
906 """Print a message.""" 1264 """Return a dictionary of matching lines."""
907 # print >> sys.stderr, options.prog + ': ' + text 1265 # For each file of the diff, the entry key is the filename,
908 # print >> sys.stderr, text 1266 # and the value is a set of row numbers to consider.
909 print(text) 1267 rv = {}
1268 path = nrows = None
1269 for line in diff.splitlines():
1270 if nrows:
1271 if line[:1] != '-':
1272 nrows -= 1
1273 continue
1274 if line[:3] == '@@ ':
1275 hunk_match = HUNK_REGEX.match(line)
1276 row, nrows = [int(g or '1') for g in hunk_match.groups()]
1277 rv[path].update(range(row, row + nrows))
1278 elif line[:3] == '+++':
1279 path = line[4:].split('\t', 1)[0]
1280 if path[:2] == 'b/':
1281 path = path[2:]
1282 rv[path] = set()
1283 return dict([(os.path.join(parent, path), rows)
1284 for (path, rows) in rv.items()
1285 if rows and filename_match(path, patterns)])
1286
1287
1288 def filename_match(filename, patterns, default=True):
1289 """
1290 Check if patterns contains a pattern that matches filename.
1291 If patterns is unspecified, this always returns True.
1292 """
1293 if not patterns:
1294 return default
1295 return any(fnmatch(filename, pattern) for pattern in patterns)
910 1296
911 1297
912 ############################################################################## 1298 ##############################################################################
913 # Framework to run all checks 1299 # Framework to run all checks
914 ############################################################################## 1300 ##############################################################################
915 1301
916 1302
917 def find_checks(argument_name): 1303 _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
918 """ 1304
919 Find all globally visible functions where the first argument name 1305
920 starts with argument_name. 1306 def register_check(check, codes=None):
921 """ 1307 """
922 checks = [] 1308 Register a new check object.
923 for name, function in globals().items(): 1309 """
924 if not inspect.isfunction(function): 1310 def _add_check(check, kind, codes, args):
925 continue 1311 if check in _checks[kind]:
926 args = inspect.getargspec(function)[0] 1312 _checks[kind][check][0].extend(codes or [])
927 if args and args[0].startswith(argument_name): 1313 else:
928 codes = ERRORCODE_REGEX.findall(inspect.getdoc(function) or '') 1314 _checks[kind][check] = (codes or [''], args)
929 for code in codes or ['']: 1315 if inspect.isfunction(check):
930 if not code or not ignore_code(code): 1316 args = inspect.getargspec(check)[0]
931 checks.append((name, function, args)) 1317 if args and args[0] in ('physical_line', 'logical_line'):
932 break 1318 if codes is None:
933 checks.sort() 1319 codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
934 return checks 1320 _add_check(check, args[0], codes, args)
1321 elif inspect.isclass(check):
1322 if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']:
1323 _add_check(check, 'tree', codes, None)
1324
1325
1326 def init_checks_registry():
1327 """
1328 Register all globally visible functions where the first argument name
1329 is 'physical_line' or 'logical_line'.
1330 """
1331 mod = inspect.getmodule(register_check)
1332 for (name, function) in inspect.getmembers(mod, inspect.isfunction):
1333 register_check(function)
1334 init_checks_registry()
935 1335
936 1336
937 class Checker(object): 1337 class Checker(object):
938 """ 1338 """
939 Load a Python source file, tokenize it, check coding style. 1339 Load a Python source file, tokenize it, check coding style.
940 """ 1340 """
941 1341
942 def __init__(self, filename, lines=None): 1342 def __init__(self, filename=None, lines=None,
1343 options=None, report=None, **kwargs):
1344 if options is None:
1345 options = StyleGuide(kwargs).options
1346 else:
1347 assert not kwargs
1348 self._io_error = None
1349 self._physical_checks = options.physical_checks
1350 self._logical_checks = options.logical_checks
1351 self._ast_checks = options.ast_checks
1352 self.max_line_length = options.max_line_length
1353 self.hang_closing = options.hang_closing
1354 self.verbose = options.verbose
943 self.filename = filename 1355 self.filename = filename
944 if filename is None: 1356 if filename is None:
945 self.filename = 'stdin' 1357 self.filename = 'stdin'
946 self.lines = lines or [] 1358 self.lines = lines or []
1359 elif filename == '-':
1360 self.filename = 'stdin'
1361 self.lines = stdin_get_value().splitlines(True)
947 elif lines is None: 1362 elif lines is None:
948 self.lines = readlines(filename) 1363 try:
1364 self.lines = readlines(filename)
1365 except IOError:
1366 exc_type, exc = sys.exc_info()[:2]
1367 self._io_error = '%s: %s' % (exc_type.__name__, exc)
1368 self.lines = []
949 else: 1369 else:
950 self.lines = lines 1370 self.lines = lines
951 options.counters['physical lines'] += len(self.lines) 1371 if self.lines:
1372 ord0 = ord(self.lines[0][0])
1373 if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
1374 if ord0 == 0xfeff:
1375 self.lines[0] = self.lines[0][1:]
1376 elif self.lines[0][:3] == '\xef\xbb\xbf':
1377 self.lines[0] = self.lines[0][3:]
1378 self.report = report or options.report
1379 self.report_error = self.report.error
1380 self.report_error_args = self.report.error_args
1381
1382 def report_invalid_syntax(self):
1383 exc_type, exc = sys.exc_info()[:2]
1384 if len(exc.args) > 1:
1385 offset = exc.args[1]
1386 if len(offset) > 2:
1387 offset = offset[1:3]
1388 else:
1389 offset = (1, 0)
1390 self.report_error_args(offset[0], offset[1] or 0,
1391 'E901', exc_type.__name__, exc.args[0],
1392 self.report_invalid_syntax)
1393 report_invalid_syntax.__doc__ = " Check if the syntax is valid."
952 1394
953 def readline(self): 1395 def readline(self):
954 """ 1396 """
955 Get the next line from the input buffer. 1397 Get the next line from the input buffer.
956 """ 1398 """
981 def check_physical(self, line): 1423 def check_physical(self, line):
982 """ 1424 """
983 Run all physical checks on a raw input line. 1425 Run all physical checks on a raw input line.
984 """ 1426 """
985 self.physical_line = line 1427 self.physical_line = line
986 if self.indent_char is None and len(line) and line[0] in ' \t': 1428 if self.indent_char is None and line[:1] in WHITESPACE:
987 self.indent_char = line[0] 1429 self.indent_char = line[0]
988 for name, check, argument_names in options.physical_checks: 1430 for name, check, argument_names in self._physical_checks:
989 result = self.run_check(check, argument_names) 1431 result = self.run_check(check, argument_names)
990 if result is not None: 1432 if result is not None:
991 offset, code, *args = result 1433 offset, code, *args = result
992 self.report_error_args(self.line_number, offset, code, check, 1434 self.report_error_args(self.line_number, offset, code, check,
993 *args) 1435 *args)
996 """ 1438 """
997 Build a logical line from tokens. 1439 Build a logical line from tokens.
998 """ 1440 """
999 self.mapping = [] 1441 self.mapping = []
1000 logical = [] 1442 logical = []
1443 comments = []
1001 length = 0 1444 length = 0
1002 previous = None 1445 previous = None
1003 for token in self.tokens: 1446 for token in self.tokens:
1004 token_type, text = token[0:2] 1447 token_type, text = token[0:2]
1448 if token_type == tokenize.COMMENT:
1449 comments.append(text)
1450 continue
1005 if token_type in SKIP_TOKENS: 1451 if token_type in SKIP_TOKENS:
1006 continue 1452 continue
1007 if token_type == tokenize.STRING: 1453 if token_type == tokenize.STRING:
1008 text = mute_string(text) 1454 text = mute_string(text)
1009 if previous: 1455 if previous:
1010 end_line, end = previous[3] 1456 end_row, end = previous[3]
1011 start_line, start = token[2] 1457 start_row, start = token[2]
1012 if end_line != start_line: # different row 1458 if end_row != start_row: # different row
1013 prev_text = self.lines[end_line - 1][end - 1] 1459 prev_text = self.lines[end_row - 1][end - 1]
1014 if prev_text == ',' or (prev_text not in '{[(' 1460 if prev_text == ',' or (prev_text not in '{[('
1015 and text not in '}])'): 1461 and text not in '}])'):
1016 logical.append(' ') 1462 logical.append(' ')
1017 length += 1 1463 length += 1
1018 elif end != start: # different column 1464 elif end != start: # different column
1019 fill = self.lines[end_line - 1][end:start] 1465 fill = self.lines[end_row - 1][end:start]
1020 logical.append(fill) 1466 logical.append(fill)
1021 length += len(fill) 1467 length += len(fill)
1022 self.mapping.append((length, token)) 1468 self.mapping.append((length, token))
1023 logical.append(text) 1469 logical.append(text)
1024 length += len(text) 1470 length += len(text)
1025 previous = token 1471 previous = token
1026 self.logical_line = ''.join(logical) 1472 self.logical_line = ''.join(logical)
1027 assert self.logical_line.lstrip() == self.logical_line 1473 self.noqa = comments and noqa(''.join(comments))
1028 assert self.logical_line.rstrip() == self.logical_line 1474 # With Python 2, if the line ends with '\r\r\n' the assertion fails
1475 # assert self.logical_line.strip() == self.logical_line
1029 1476
1030 def check_logical(self): 1477 def check_logical(self):
1031 """ 1478 """
1032 Build a line from tokens and run all logical checks on it. 1479 Build a line from tokens and run all logical checks on it.
1033 """ 1480 """
1034 options.counters['logical lines'] += 1
1035 self.build_tokens_line() 1481 self.build_tokens_line()
1482 self.report.increment_logical_line()
1036 first_line = self.lines[self.mapping[0][1][2][0] - 1] 1483 first_line = self.lines[self.mapping[0][1][2][0] - 1]
1037 indent = first_line[:self.mapping[0][1][2][1]] 1484 indent = first_line[:self.mapping[0][1][2][1]]
1038 self.previous_indent_level = self.indent_level 1485 self.previous_indent_level = self.indent_level
1039 self.indent_level = expand_indent(indent) 1486 self.indent_level = expand_indent(indent)
1040 if options.verbose >= 2: 1487 if self.verbose >= 2:
1041 print(self.logical_line[:80].rstrip()) 1488 print(self.logical_line[:80].rstrip())
1042 for name, check, argument_names in options.logical_checks: 1489 for name, check, argument_names in self._logical_checks:
1043 if options.verbose >= 4: 1490 if self.verbose >= 4:
1044 print(' ' + name) 1491 print(' ' + name)
1045 result = self.run_check(check, argument_names) 1492 for result in self.run_check(check, argument_names):
1046 if result is not None:
1047 offset, code, *args = result 1493 offset, code, *args = result
1048 if isinstance(offset, tuple): 1494 if isinstance(offset, tuple):
1049 original_number, original_offset = offset 1495 orig_number, orig_offset = offset
1050 else: 1496 else:
1051 for token_offset, token in self.mapping: 1497 for token_offset, token in self.mapping:
1052 if offset >= token_offset: 1498 if offset >= token_offset:
1053 original_number = token[2][0] 1499 orig_number = token[2][0]
1054 original_offset = (token[2][1] 1500 orig_offset = (token[2][1] + offset - token_offset)
1055 + offset - token_offset) 1501 self.report_error_args(orig_number, orig_offset, code, check,
1056 self.report_error_args(original_number, original_offset, 1502 *args)
1057 code, check, *args)
1058 self.previous_logical = self.logical_line 1503 self.previous_logical = self.logical_line
1504
1505 def check_ast(self):
1506 try:
1507 tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
1508 except (SyntaxError, TypeError):
1509 return self.report_invalid_syntax()
1510 for name, cls, _ in self._ast_checks:
1511 checker = cls(tree, self.filename)
1512 for lineno, offset, code, check, *args in checker.run():
1513 if not noqa(self.lines[lineno - 1]):
1514 self.report_error_args(lineno, offset, code, check, *args)
1515
1516 def generate_tokens(self):
1517 if self._io_error:
1518 self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
1519 tokengen = tokenize.generate_tokens(self.readline_check_physical)
1520 try:
1521 for token in tokengen:
1522 yield token
1523 except (SyntaxError, tokenize.TokenError):
1524 self.report_invalid_syntax()
1059 1525
1060 def check_all(self, expected=None, line_offset=0): 1526 def check_all(self, expected=None, line_offset=0):
1061 """ 1527 """
1062 Run all checks on the input file. 1528 Run all checks on the input file.
1063 """ 1529 """
1064 self.expected = expected or () 1530 self.report.init_file(self.filename, self.lines, expected, line_offset)
1065 self.line_offset = line_offset 1531 if self._ast_checks:
1532 self.check_ast()
1066 self.line_number = 0 1533 self.line_number = 0
1067 self.file_errors = 0
1068 self.indent_char = None 1534 self.indent_char = None
1069 self.indent_level = 0 1535 self.indent_level = 0
1070 self.previous_logical = '' 1536 self.previous_logical = ''
1071 self.blank_lines = 0
1072 self.blank_lines_before_comment = 0
1073 self.tokens = [] 1537 self.tokens = []
1538 self.blank_lines = blank_lines_before_comment = 0
1074 parens = 0 1539 parens = 0
1075 try: 1540 for token in self.generate_tokens():
1076 for token in tokenize.generate_tokens(self.readline_check_physical): 1541 self.tokens.append(token)
1077 if options.verbose >= 3: 1542 token_type, text = token[0:2]
1078 if token[2][0] == token[3][0]: 1543 if self.verbose >= 3:
1079 pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) 1544 if token[2][0] == token[3][0]:
1080 else: 1545 pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
1081 pos = 'l.%s' % token[3][0] 1546 else:
1082 print('l.%s\t%s\t%s\t%r' % 1547 pos = 'l.%s' % token[3][0]
1083 (token[2][0], pos, tokenize.tok_name[token[0]], token[1])) 1548 print('l.%s\t%s\t%s\t%r' %
1084 self.tokens.append(token) 1549 (token[2][0], pos, tokenize.tok_name[token[0]], text))
1085 token_type, text = token[0:2] 1550 if token_type == tokenize.OP:
1086 if token_type == tokenize.OP and text in '([{': 1551 if text in '([{':
1087 parens += 1 1552 parens += 1
1088 if token_type == tokenize.OP and text in '}])': 1553 elif text in '}])':
1089 parens -= 1 1554 parens -= 1
1090 if token_type == tokenize.NEWLINE and not parens: 1555 elif not parens:
1556 if token_type == tokenize.NEWLINE:
1557 if self.blank_lines < blank_lines_before_comment:
1558 self.blank_lines = blank_lines_before_comment
1091 self.check_logical() 1559 self.check_logical()
1092 self.blank_lines = 0
1093 self.blank_lines_before_comment = 0
1094 self.tokens = [] 1560 self.tokens = []
1095 if token_type == tokenize.NL and not parens: 1561 self.blank_lines = blank_lines_before_comment = 0
1096 if len(self.tokens) <= 1: 1562 elif token_type == tokenize.NL:
1563 if len(self.tokens) == 1:
1097 # The physical line contains only this token. 1564 # The physical line contains only this token.
1098 self.blank_lines += 1 1565 self.blank_lines += 1
1099 self.tokens = [] 1566 self.tokens = []
1100 if token_type == tokenize.COMMENT: 1567 elif token_type == tokenize.COMMENT and len(self.tokens) == 1:
1101 source_line = token[4] 1568 if blank_lines_before_comment < self.blank_lines:
1102 token_start = token[2][1] 1569 blank_lines_before_comment = self.blank_lines
1103 if source_line[:token_start].strip() == '': 1570 self.blank_lines = 0
1104 self.blank_lines_before_comment = max(self.blank_lines, 1571 if COMMENT_WITH_NL:
1105 self.blank_lines_before_comment) 1572 # The comment also ends a physical line
1106 self.blank_lines = 0
1107 if text.endswith('\n') and not parens:
1108 # The comment also ends a physical line. This works around
1109 # Python < 2.6 behaviour, which does not generate NL after
1110 # a comment which is on a line by itself.
1111 self.tokens = [] 1573 self.tokens = []
1112 except tokenize.TokenError as err: 1574 return self.report.get_file_results()
1113 msg, (lnum, pos) = err.args 1575
1114 self.report_error_args(lnum, pos, "E901", "TokenError", msg) 1576
1577 class BaseReport(object):
1578 """Collect the results of the checks."""
1579 print_filename = False
1580
1581 def __init__(self, options):
1582 self._benchmark_keys = options.benchmark_keys
1583 self._ignore_code = options.ignore_code
1584 # Results
1585 self.elapsed = 0
1586 self.total_errors = 0
1587 self.counters = dict.fromkeys(self._benchmark_keys, 0)
1588 self.messages = {}
1589
1590 def start(self):
1591 """Start the timer."""
1592 self._start_time = time.time()
1593
1594 def stop(self):
1595 """Stop the timer."""
1596 self.elapsed = time.time() - self._start_time
1597
1598 def init_file(self, filename, lines, expected, line_offset):
1599 """Signal a new file."""
1600 self.filename = filename
1601 self.lines = lines
1602 self.expected = expected or ()
1603 self.line_offset = line_offset
1604 self.file_errors = 0
1605 self.counters['files'] += 1
1606 self.counters['physical lines'] += len(lines)
1607
1608 def increment_logical_line(self):
1609 """Signal a new logical line."""
1610 self.counters['logical lines'] += 1
1611
1612 def error(self, line_number, offset, text, check):
1613 """Report an error, according to options."""
1614 code = text[:4]
1615 if self._ignore_code(code):
1616 return
1617 if code in self.counters:
1618 self.counters[code] += 1
1619 else:
1620 self.counters[code] = 1
1621 self.messages[code] = text[5:]
1622 # Don't care about expected errors or warnings
1623 if code in self.expected:
1624 return
1625 if self.print_filename and not self.file_errors:
1626 print(self.filename)
1627 self.file_errors += 1
1628 self.total_errors += 1
1629 return code
1630
1631 def error_args(self, line_number, offset, code, check, *args):
1632 """Report an error, according to options."""
1633 if self._ignore_code(code):
1634 return
1635 text = getMessage(code, *args)
1636 if code in self.counters:
1637 self.counters[code] += 1
1638 else:
1639 self.counters[code] = 1
1640 self.messages[code] = text[5:]
1641 # Don't care about expected errors or warnings
1642 if code in self.expected:
1643 return
1644 if self.print_filename and not self.file_errors:
1645 print(self.filename)
1646 self.file_errors += 1
1647 self.total_errors += 1
1648 return code
1649
1650 def get_file_results(self):
1651 """Return the count of errors and warnings for this file."""
1115 return self.file_errors 1652 return self.file_errors
1116 1653
1117 def report_error(self, line_number, offset, text, check): 1654 def get_count(self, prefix=''):
1655 """Return the total count of errors and warnings."""
1656 return sum([self.counters[key]
1657 for key in self.messages if key.startswith(prefix)])
1658
1659 def get_statistics(self, prefix=''):
1118 """ 1660 """
1119 Report an error, according to options. 1661 Get statistics for message codes that start with the prefix.
1662
1663 prefix='' matches all errors and warnings
1664 prefix='E' matches all errors
1665 prefix='W' matches all warnings
1666 prefix='E4' matches all errors that have to do with imports
1120 """ 1667 """
1121 code = text[:4] 1668 return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
1122 if ignore_code(code): 1669 for key in sorted(self.messages) if key.startswith(prefix)]
1670
1671 def print_statistics(self, prefix=''):
1672 """Print overall statistics (number of errors and warnings)."""
1673 for line in self.get_statistics(prefix):
1674 print(line)
1675
1676 def print_benchmark(self):
1677 """Print benchmark numbers."""
1678 print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
1679 if self.elapsed:
1680 for key in self._benchmark_keys:
1681 print('%-7d %s per second (%d total)' %
1682 (self.counters[key] / self.elapsed, key,
1683 self.counters[key]))
1684
1685
1686 class FileReport(BaseReport):
1687 """Collect the results of the checks and print only the filenames."""
1688 print_filename = True
1689
1690
1691 class StandardReport(BaseReport):
1692 """Collect and print the results of the checks."""
1693
1694 def __init__(self, options):
1695 super(StandardReport, self).__init__(options)
1696 self._fmt = REPORT_FORMAT.get(options.format.lower(),
1697 options.format)
1698 self._repeat = options.repeat
1699 self._show_source = options.show_source
1700 self._show_pep8 = options.show_pep8
1701
1702 def init_file(self, filename, lines, expected, line_offset):
1703 """Signal a new file."""
1704 self._deferred_print = []
1705 return super(StandardReport, self).init_file(
1706 filename, lines, expected, line_offset)
1707
1708 def error(self, line_number, offset, text, check):
1709 """Report an error, according to options."""
1710 code = super(StandardReport, self).error(line_number, offset,
1711 text, check)
1712 if code and (self.counters[code] == 1 or self._repeat):
1713 self._deferred_print.append(
1714 (line_number, offset, code, text[5:], check.__doc__))
1715 return code
1716
1717 def error_args(self, line_number, offset, code, check, *args):
1718 """Report an error, according to options."""
1719 code = super(StandardReport, self).error_args(line_number, offset,
1720 code, check, *args)
1721 if code and (self.counters[code] == 1 or self._repeat):
1722 text = getMessage(code, *args)
1723 self._deferred_print.append(
1724 (line_number, offset, code, text[5:], check.__doc__))
1725 return code
1726
1727 def get_file_results(self):
1728 """Print the result and return the overall count for this file."""
1729 self._deferred_print.sort()
1730 for line_number, offset, code, text, doc in self._deferred_print:
1731 print(self._fmt % {
1732 'path': self.filename,
1733 'row': self.line_offset + line_number, 'col': offset + 1,
1734 'code': code, 'text': text,
1735 })
1736 if self._show_source:
1737 if line_number > len(self.lines):
1738 line = ''
1739 else:
1740 line = self.lines[line_number - 1]
1741 print(line.rstrip())
1742 print(' ' * offset + '^')
1743 if self._show_pep8 and doc:
1744 print(doc.lstrip('\n').rstrip())
1745 return self.file_errors
1746
1747
1748 class DiffReport(StandardReport):
1749 """Collect and print the results for the changed lines only."""
1750
1751 def __init__(self, options):
1752 super(DiffReport, self).__init__(options)
1753 self._selected = options.selected_lines
1754
1755 def error(self, line_number, offset, text, check):
1756 if line_number not in self._selected[self.filename]:
1123 return 1757 return
1124 if options.quiet == 1 and not self.file_errors: 1758 return super(DiffReport, self).error(line_number, offset, text, check)
1125 message(self.filename) 1759
1126 if code in options.counters: 1760
1127 options.counters[code] += 1 1761 class StyleGuide(object):
1762 """Initialize a PEP-8 instance with few options."""
1763
1764 def __init__(self, *args, **kwargs):
1765 # build options from the command line
1766 self.checker_class = kwargs.pop('checker_class', Checker)
1767 parse_argv = kwargs.pop('parse_argv', False)
1768 config_file = kwargs.pop('config_file', None)
1769 parser = kwargs.pop('parser', None)
1770 options, self.paths = process_options(
1771 parse_argv=parse_argv, config_file=config_file, parser=parser)
1772 if args or kwargs:
1773 # build options from dict
1774 options_dict = dict(*args, **kwargs)
1775 options.__dict__.update(options_dict)
1776 if 'paths' in options_dict:
1777 self.paths = options_dict['paths']
1778
1779 self.runner = self.input_file
1780 self.options = options
1781
1782 if not options.reporter:
1783 options.reporter = BaseReport if options.quiet else StandardReport
1784
1785 for index, value in enumerate(options.exclude):
1786 options.exclude[index] = value.rstrip('/')
1787 options.select = tuple(options.select or ())
1788 if not (options.select or options.ignore or
1789 options.testsuite or options.doctest) and DEFAULT_IGNORE:
1790 # The default choice: ignore controversial checks
1791 options.ignore = tuple(DEFAULT_IGNORE.split(','))
1128 else: 1792 else:
1129 options.counters[code] = 1 1793 # Ignore all checks which are not explicitly selected
1130 options.messages[code] = text[5:] 1794 options.ignore = ('',) if options.select else tuple(options.ignore)
1131 if options.quiet or code in self.expected: 1795 options.benchmark_keys = BENCHMARK_KEYS[:]
1132 # Don't care about expected errors or warnings 1796 options.ignore_code = self.ignore_code
1133 return 1797 options.physical_checks = self.get_checks('physical_line')
1134 self.file_errors += 1 1798 options.logical_checks = self.get_checks('logical_line')
1135 if options.counters[code] == 1 or options.repeat: 1799 options.ast_checks = self.get_checks('tree')
1136 message("%s:%s:%d: %s" % 1800 self.init_report()
1137 (self.filename, self.line_offset + line_number, 1801
1138 offset + 1, text)) 1802 def init_report(self, reporter=None):
1139 if options.show_source: 1803 """Initialize the report instance."""
1140 line = self.lines[line_number - 1] 1804 self.options.report = (reporter or self.options.reporter)(self.options)
1141 message(line.rstrip()) 1805 return self.options.report
1142 message(' ' * offset + '^') 1806
1143 if options.show_pep8: 1807 def check_files(self, paths=None):
1144 message(check.__doc__.lstrip('\n').rstrip()) 1808 """Run all checks on the paths."""
1145 1809 if paths is None:
1146 1810 paths = self.paths
1147 def input_file(filename): 1811 report = self.options.report
1148 """ 1812 runner = self.runner
1149 Run all checks on a Python source file. 1813 report.start()
1150 """ 1814 try:
1151 if options.verbose: 1815 for path in paths:
1152 message('checking ' + filename) 1816 if os.path.isdir(path):
1153 Checker(filename).check_all() 1817 self.input_dir(path)
1154 1818 elif not self.excluded(path):
1155 1819 runner(path)
1156 def input_dir(dirname, runner=None): 1820 except KeyboardInterrupt:
1157 """ 1821 print('... stopped')
1158 Check all Python source files in this directory and all subdirectories. 1822 report.stop()
1159 """ 1823 return report
1160 dirname = dirname.rstrip('/') 1824
1161 if excluded(dirname): 1825 def input_file(self, filename, lines=None, expected=None, line_offset=0):
1162 return 1826 """Run all checks on a Python source file."""
1163 if runner is None: 1827 if self.options.verbose:
1164 runner = input_file 1828 print('checking %s' % filename)
1165 for root, dirs, files in os.walk(dirname): 1829 fchecker = self.checker_class(
1166 if options.verbose: 1830 filename, lines=lines, options=self.options)
1167 message('directory ' + root) 1831 return fchecker.check_all(expected=expected, line_offset=line_offset)
1168 options.counters['directories'] += 1 1832
1169 dirs.sort() 1833 def input_dir(self, dirname):
1170 for subdir in dirs: 1834 """Check all files in this directory and all subdirectories."""
1171 if excluded(subdir): 1835 dirname = dirname.rstrip('/')
1172 dirs.remove(subdir) 1836 if self.excluded(dirname):
1173 files.sort() 1837 return 0
1174 for filename in files: 1838 counters = self.options.report.counters
1175 if filename_match(filename) and not excluded(filename): 1839 verbose = self.options.verbose
1176 options.counters['files'] += 1 1840 filepatterns = self.options.filename
1177 runner(os.path.join(root, filename)) 1841 runner = self.runner
1178 1842 for root, dirs, files in os.walk(dirname):
1179 1843 if verbose:
1180 def excluded(filename): 1844 print('directory ' + root)
1181 """ 1845 counters['directories'] += 1
1182 Check if options.exclude contains a pattern that matches filename. 1846 for subdir in sorted(dirs):
1183 """ 1847 if self.excluded(subdir, root):
1184 basename = os.path.basename(filename) 1848 dirs.remove(subdir)
1185 for pattern in options.exclude: 1849 for filename in sorted(files):
1186 if fnmatch(basename, pattern): 1850 # contain a pattern that matches?
1187 # print basename, 'excluded because it matches', pattern 1851 if ((filename_match(filename, filepatterns) and
1852 not self.excluded(filename, root))):
1853 runner(os.path.join(root, filename))
1854
1855 def excluded(self, filename, parent=None):
1856 """
1857 Check if options.exclude contains a pattern that matches filename.
1858 """
1859 if not self.options.exclude:
1860 return False
1861 basename = os.path.basename(filename)
1862 if filename_match(basename, self.options.exclude):
1188 return True 1863 return True
1189 1864 if parent:
1190 1865 filename = os.path.join(parent, filename)
1191 def filename_match(filename): 1866 return filename_match(filename, self.options.exclude)
1192 """ 1867
1193 Check if options.filename contains a pattern that matches filename. 1868 def ignore_code(self, code):
1194 If options.filename is unspecified, this always returns True. 1869 """
1195 """ 1870 Check if the error code should be ignored.
1196 if not options.filename: 1871
1197 return True 1872 If 'options.select' contains a prefix of the error code,
1198 for pattern in options.filename: 1873 return False. Else, if 'options.ignore' contains a prefix of
1199 if fnmatch(filename, pattern): 1874 the error code, return True.
1200 return True 1875 """
1201 1876 return (code.startswith(self.options.ignore) and
1202 1877 not code.startswith(self.options.select))
1203 def ignore_code(code): 1878
1204 """ 1879 def get_checks(self, argument_name):
1205 Check if options.ignore contains a prefix of the error code. 1880 """
1206 If options.select contains a prefix of the error code, do not ignore it. 1881 Find all globally visible functions where the first argument name
1207 """ 1882 starts with argument_name and which contain selected tests.
1208 for select in options.select: 1883 """
1209 if code.startswith(select): 1884 checks = []
1210 return False 1885 for check, attrs in _checks[argument_name].items():
1211 for ignore in options.ignore: 1886 (codes, args) = attrs
1212 if code.startswith(ignore): 1887 if any(not (code and self.ignore_code(code)) for code in codes):
1213 return True 1888 checks.append((check.__name__, check, args))
1214 1889 return sorted(checks)
1215 1890
1216 def reset_counters(): 1891
1217 for key in list(options.counters.keys()): 1892 ##class Checker(object):
1218 if key not in BENCHMARK_KEYS: 1893 ## """
1219 del options.counters[key] 1894 ## Load a Python source file, tokenize it, check coding style.
1220 options.messages = {} 1895 ## """
1221 1896 ##
1222 1897 ## def __init__(self, filename, lines=None):
1223 def get_error_statistics(): 1898 ## self.filename = filename
1224 """Get error statistics.""" 1899 ## if filename is None:
1225 return get_statistics("E") 1900 ## self.filename = 'stdin'
1226 1901 ## self.lines = lines or []
1227 1902 ## elif lines is None:
1228 def get_warning_statistics(): 1903 ## self.lines = readlines(filename)
1229 """Get warning statistics.""" 1904 ## else:
1230 return get_statistics("W") 1905 ## self.lines = lines
1231 1906 ## options.counters['physical lines'] += len(self.lines)
1232 1907 ##
1233 def get_statistics(prefix=''): 1908 ## def readline(self):
1234 """ 1909 ## """
1235 Get statistics for message codes that start with the prefix. 1910 ## Get the next line from the input buffer.
1236 1911 ## """
1237 prefix='' matches all errors and warnings 1912 ## self.line_number += 1
1238 prefix='E' matches all errors 1913 ## if self.line_number > len(self.lines):
1239 prefix='W' matches all warnings 1914 ## return ''
1240 prefix='E4' matches all errors that have to do with imports 1915 ## return self.lines[self.line_number - 1]
1241 """ 1916 ##
1242 stats = [] 1917 ## def readline_check_physical(self):
1243 keys = list(options.messages.keys()) 1918 ## """
1244 keys.sort() 1919 ## Check and return the next physical line. This method can be
1245 for key in keys: 1920 ## used to feed tokenize.generate_tokens.
1246 if key.startswith(prefix): 1921 ## """
1247 stats.append('%-7s %s %s' % 1922 ## line = self.readline()
1248 (options.counters[key], key, options.messages[key])) 1923 ## if line:
1249 return stats 1924 ## self.check_physical(line)
1250 1925 ## return line
1251 1926 ##
1252 def get_count(prefix=''): 1927 ## def run_check(self, check, argument_names):
1253 """Return the total count of errors and warnings.""" 1928 ## """
1254 keys = list(options.messages.keys()) 1929 ## Run a check plugin.
1255 count = 0 1930 ## """
1256 for key in keys: 1931 ## arguments = []
1257 if key.startswith(prefix): 1932 ## for name in argument_names:
1258 count += options.counters[key] 1933 ## arguments.append(getattr(self, name))
1259 return count 1934 ## return check(*arguments)
1260 1935 ##
1261 1936 ## def check_physical(self, line):
1262 def print_statistics(prefix=''): 1937 ## """
1263 """Print overall statistics (number of errors and warnings).""" 1938 ## Run all physical checks on a raw input line.
1264 for line in get_statistics(prefix): 1939 ## """
1265 print(line) 1940 ## self.physical_line = line
1266 1941 ## if self.indent_char is None and len(line) and line[0] in ' \t':
1267 1942 ## self.indent_char = line[0]
1268 def print_benchmark(elapsed): 1943 ## for name, check, argument_names in options.physical_checks:
1269 """ 1944 ## result = self.run_check(check, argument_names)
1270 Print benchmark numbers. 1945 ## if result is not None:
1271 """ 1946 ## offset, code, *args = result
1272 print('%-7.2f %s' % (elapsed, 'seconds elapsed')) 1947 ## self.report_error_args(self.line_number, offset, code, check,
1273 for key in BENCHMARK_KEYS: 1948 ## *args)
1274 print('%-7d %s per second (%d total)' % ( 1949 ##
1275 options.counters[key] / elapsed, key, 1950 ## def build_tokens_line(self):
1276 options.counters[key])) 1951 ## """
1277 1952 ## Build a logical line from tokens.
1278 1953 ## """
1279 def run_tests(filename): 1954 ## self.mapping = []
1280 """ 1955 ## logical = []
1281 Run all the tests from a file. 1956 ## length = 0
1282 1957 ## previous = None
1283 A test file can provide many tests. Each test starts with a declaration. 1958 ## for token in self.tokens:
1284 This declaration is a single line starting with '#:'. 1959 ## token_type, text = token[0:2]
1285 It declares codes of expected failures, separated by spaces or 'Okay' 1960 ## if token_type in SKIP_TOKENS:
1286 if no failure is expected. 1961 ## continue
1287 If the file does not contain such declaration, it should pass all tests. 1962 ## if token_type == tokenize.STRING:
1288 If the declaration is empty, following lines are not checked, until next 1963 ## text = mute_string(text)
1289 declaration. 1964 ## if previous:
1290 1965 ## end_line, end = previous[3]
1291 Examples: 1966 ## start_line, start = token[2]
1292 1967 ## if end_line != start_line: # different row
1293 * Only E224 and W701 are expected: #: E224 W701 1968 ## prev_text = self.lines[end_line - 1][end - 1]
1294 * Following example is conform: #: Okay 1969 ## if prev_text == ',' or (prev_text not in '{[('
1295 * Don't check these lines: #: 1970 ## and text not in '}])'):
1296 """ 1971 ## logical.append(' ')
1297 lines = readlines(filename) + ['#:\n'] 1972 ## length += 1
1298 line_offset = 0 1973 ## elif end != start: # different column
1299 codes = ['Okay'] 1974 ## fill = self.lines[end_line - 1][end:start]
1300 testcase = [] 1975 ## logical.append(fill)
1301 for index, line in enumerate(lines): 1976 ## length += len(fill)
1302 if not line.startswith('#:'): 1977 ## self.mapping.append((length, token))
1303 if codes: 1978 ## logical.append(text)
1304 # Collect the lines of the test case 1979 ## length += len(text)
1305 testcase.append(line) 1980 ## previous = token
1306 continue 1981 ## self.logical_line = ''.join(logical)
1307 if codes and index > 0: 1982 ## assert self.logical_line.lstrip() == self.logical_line
1308 label = '%s:%s:1' % (filename, line_offset + 1) 1983 ## assert self.logical_line.rstrip() == self.logical_line
1309 codes = [c for c in codes if c != 'Okay'] 1984 ##
1310 # Run the checker 1985 ## def check_logical(self):
1311 errors = Checker(filename, testcase).check_all(codes, line_offset) 1986 ## """
1312 # Check if the expected errors were found 1987 ## Build a line from tokens and run all logical checks on it.
1313 for code in codes: 1988 ## """
1314 if not options.counters.get(code): 1989 ## options.counters['logical lines'] += 1
1315 errors += 1 1990 ## self.build_tokens_line()
1316 message('%s: error %s not found' % (label, code)) 1991 ## first_line = self.lines[self.mapping[0][1][2][0] - 1]
1317 if options.verbose and not errors: 1992 ## indent = first_line[:self.mapping[0][1][2][1]]
1318 message('%s: passed (%s)' % (label, ' '.join(codes))) 1993 ## self.previous_indent_level = self.indent_level
1319 # Keep showing errors for multiple tests 1994 ## self.indent_level = expand_indent(indent)
1320 reset_counters() 1995 ## if options.verbose >= 2:
1321 # output the real line numbers 1996 ## print(self.logical_line[:80].rstrip())
1322 line_offset = index 1997 ## for name, check, argument_names in options.logical_checks:
1323 # configure the expected errors 1998 ## if options.verbose >= 4:
1324 codes = line.split()[1:] 1999 ## print(' ' + name)
1325 # empty the test case buffer 2000 ## result = self.run_check(check, argument_names)
1326 del testcase[:] 2001 ## if result is not None:
1327 2002 ## offset, code, *args = result
1328 2003 ## if isinstance(offset, tuple):
1329 def selftest(): 2004 ## original_number, original_offset = offset
1330 """ 2005 ## else:
1331 Test all check functions with test cases in docstrings. 2006 ## for token_offset, token in self.mapping:
1332 """ 2007 ## if offset >= token_offset:
1333 count_passed = 0 2008 ## original_number = token[2][0]
1334 count_failed = 0 2009 ## original_offset = (token[2][1]
1335 checks = options.physical_checks + options.logical_checks 2010 ## + offset - token_offset)
1336 for name, check, argument_names in checks: 2011 ## self.report_error_args(original_number, original_offset,
1337 for line in check.__doc__.splitlines(): 2012 ## code, check, *args)
1338 line = line.lstrip() 2013 ## self.previous_logical = self.logical_line
1339 match = SELFTEST_REGEX.match(line) 2014 ##
1340 if match is None: 2015 ## def check_all(self, expected=None, line_offset=0):
1341 continue 2016 ## """
1342 code, source = match.groups() 2017 ## Run all checks on the input file.
1343 checker = Checker(None) 2018 ## """
1344 for part in source.split(r'\n'): 2019 ## self.expected = expected or ()
1345 part = part.replace(r'\t', '\t') 2020 ## self.line_offset = line_offset
1346 part = part.replace(r'\s', ' ') 2021 ## self.line_number = 0
1347 checker.lines.append(part + '\n') 2022 ## self.file_errors = 0
1348 options.quiet = 2 2023 ## self.indent_char = None
1349 checker.check_all() 2024 ## self.indent_level = 0
1350 error = None 2025 ## self.previous_logical = ''
1351 if code == 'Okay': 2026 ## self.blank_lines = 0
1352 if len(options.counters) > len(BENCHMARK_KEYS): 2027 ## self.blank_lines_before_comment = 0
1353 codes = [key for key in options.counters.keys() 2028 ## self.tokens = []
1354 if key not in BENCHMARK_KEYS] 2029 ## parens = 0
1355 error = "incorrectly found %s" % ', '.join(codes) 2030 ## try:
1356 elif not options.counters.get(code): 2031 ## for token in tokenize.generate_tokens(self.readline_check_physical):
1357 error = "failed to find %s" % code 2032 ## if options.verbose >= 3:
1358 # Reset the counters 2033 ## if token[2][0] == token[3][0]:
1359 reset_counters() 2034 ## pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
1360 if not error: 2035 ## else:
1361 count_passed += 1 2036 ## pos = 'l.%s' % token[3][0]
1362 else: 2037 ## print('l.%s\t%s\t%s\t%r' %
1363 count_failed += 1 2038 ## (token[2][0], pos, tokenize.tok_name[token[0]], token[1]))
1364 if len(checker.lines) == 1: 2039 ## self.tokens.append(token)
1365 print("pep8.py: %s: %s" % 2040 ## token_type, text = token[0:2]
1366 (error, checker.lines[0].rstrip())) 2041 ## if token_type == tokenize.OP and text in '([{':
1367 else: 2042 ## parens += 1
1368 print("pep8.py: %s:" % error) 2043 ## if token_type == tokenize.OP and text in '}])':
1369 for line in checker.lines: 2044 ## parens -= 1
1370 print(line.rstrip()) 2045 ## if token_type == tokenize.NEWLINE and not parens:
1371 if options.verbose: 2046 ## self.check_logical()
1372 print("%d passed and %d failed." % (count_passed, count_failed)) 2047 ## self.blank_lines = 0
1373 if count_failed: 2048 ## self.blank_lines_before_comment = 0
1374 print("Test failed.") 2049 ## self.tokens = []
1375 else: 2050 ## if token_type == tokenize.NL and not parens:
1376 print("Test passed.") 2051 ## if len(self.tokens) <= 1:
1377 2052 ## # The physical line contains only this token.
1378 2053 ## self.blank_lines += 1
1379 def process_options(arglist=None): 2054 ## self.tokens = []
1380 """ 2055 ## if token_type == tokenize.COMMENT:
1381 Process options passed either via arglist or via command line args. 2056 ## source_line = token[4]
1382 """ 2057 ## token_start = token[2][1]
1383 global options, args 2058 ## if source_line[:token_start].strip() == '':
1384 parser = OptionParser(version=__version__, 2059 ## self.blank_lines_before_comment = max(self.blank_lines,
2060 ## self.blank_lines_before_comment)
2061 ## self.blank_lines = 0
2062 ## if text.endswith('\n') and not parens:
2063 ## # The comment also ends a physical line. This works around
2064 ## # Python < 2.6 behaviour, which does not generate NL after
2065 ## # a comment which is on a line by itself.
2066 ## self.tokens = []
2067 ## except tokenize.TokenError as err:
2068 ## msg, (lnum, pos) = err.args
2069 ## self.report_error_args(lnum, pos, "E901", "TokenError", msg)
2070 ## return self.file_errors
2071 ##
2072 ## def report_error(self, line_number, offset, text, check):
2073 ## """
2074 ## Report an error, according to options.
2075 ## """
2076 ## code = text[:4]
2077 ## if ignore_code(code):
2078 ## return
2079 ## if options.quiet == 1 and not self.file_errors:
2080 ## message(self.filename)
2081 ## if code in options.counters:
2082 ## options.counters[code] += 1
2083 ## else:
2084 ## options.counters[code] = 1
2085 ## options.messages[code] = text[5:]
2086 ## if options.quiet or code in self.expected:
2087 ## # Don't care about expected errors or warnings
2088 ## return
2089 ## self.file_errors += 1
2090 ## if options.counters[code] == 1 or options.repeat:
2091 ## message("%s:%s:%d: %s" %
2092 ## (self.filename, self.line_offset + line_number,
2093 ## offset + 1, text))
2094 ## if options.show_source:
2095 ## line = self.lines[line_number - 1]
2096 ## message(line.rstrip())
2097 ## message(' ' * offset + '^')
2098 ## if options.show_pep8:
2099 ## message(check.__doc__.lstrip('\n').rstrip())
2100 ##
2101 ##
2102 ##def input_file(filename):
2103 ## """
2104 ## Run all checks on a Python source file.
2105 ## """
2106 ## if options.verbose:
2107 ## message('checking ' + filename)
2108 ## Checker(filename).check_all()
2109 ##
2110 ##
2111 ##def input_dir(dirname, runner=None):
2112 ## """
2113 ## Check all Python source files in this directory and all subdirectories.
2114 ## """
2115 ## dirname = dirname.rstrip('/')
2116 ## if excluded(dirname):
2117 ## return
2118 ## if runner is None:
2119 ## runner = input_file
2120 ## for root, dirs, files in os.walk(dirname):
2121 ## if options.verbose:
2122 ## message('directory ' + root)
2123 ## options.counters['directories'] += 1
2124 ## dirs.sort()
2125 ## for subdir in dirs:
2126 ## if excluded(subdir):
2127 ## dirs.remove(subdir)
2128 ## files.sort()
2129 ## for filename in files:
2130 ## if filename_match(filename) and not excluded(filename):
2131 ## options.counters['files'] += 1
2132 ## runner(os.path.join(root, filename))
2133 ##
2134 ##
2135 ##def excluded(filename):
2136 ## """
2137 ## Check if options.exclude contains a pattern that matches filename.
2138 ## """
2139 ## basename = os.path.basename(filename)
2140 ## for pattern in options.exclude:
2141 ## if fnmatch(basename, pattern):
2142 ## # print basename, 'excluded because it matches', pattern
2143 ## return True
2144 ##
2145 ##
2146 ##def filename_match(filename):
2147 ## """
2148 ## Check if options.filename contains a pattern that matches filename.
2149 ## If options.filename is unspecified, this always returns True.
2150 ## """
2151 ## if not options.filename:
2152 ## return True
2153 ## for pattern in options.filename:
2154 ## if fnmatch(filename, pattern):
2155 ## return True
2156 ##
2157 ##
2158 ##def ignore_code(code):
2159 ## """
2160 ## Check if options.ignore contains a prefix of the error code.
2161 ## If options.select contains a prefix of the error code, do not ignore it.
2162 ## """
2163 ## for select in options.select:
2164 ## if code.startswith(select):
2165 ## return False
2166 ## for ignore in options.ignore:
2167 ## if code.startswith(ignore):
2168 ## return True
2169 ##
2170 ##
2171 ##def reset_counters():
2172 ## for key in list(options.counters.keys()):
2173 ## if key not in BENCHMARK_KEYS:
2174 ## del options.counters[key]
2175 ## options.messages = {}
2176 ##
2177 ##
2178 ##def get_error_statistics():
2179 ## """Get error statistics."""
2180 ## return get_statistics("E")
2181 ##
2182 ##
2183 ##def get_warning_statistics():
2184 ## """Get warning statistics."""
2185 ## return get_statistics("W")
2186 ##
2187 ##
2188 ##def get_statistics(prefix=''):
2189 ## """
2190 ## Get statistics for message codes that start with the prefix.
2191 ##
2192 ## prefix='' matches all errors and warnings
2193 ## prefix='E' matches all errors
2194 ## prefix='W' matches all warnings
2195 ## prefix='E4' matches all errors that have to do with imports
2196 ## """
2197 ## stats = []
2198 ## keys = list(options.messages.keys())
2199 ## keys.sort()
2200 ## for key in keys:
2201 ## if key.startswith(prefix):
2202 ## stats.append('%-7s %s %s' %
2203 ## (options.counters[key], key, options.messages[key]))
2204 ## return stats
2205 ##
2206 ##
2207 ##def get_count(prefix=''):
2208 ## """Return the total count of errors and warnings."""
2209 ## keys = list(options.messages.keys())
2210 ## count = 0
2211 ## for key in keys:
2212 ## if key.startswith(prefix):
2213 ## count += options.counters[key]
2214 ## return count
2215 ##
2216 ##
2217 ##def print_statistics(prefix=''):
2218 ## """Print overall statistics (number of errors and warnings)."""
2219 ## for line in get_statistics(prefix):
2220 ## print(line)
2221 ##
2222 ##
2223 ##def print_benchmark(elapsed):
2224 ## """
2225 ## Print benchmark numbers.
2226 ## """
2227 ## print('%-7.2f %s' % (elapsed, 'seconds elapsed'))
2228 ## for key in BENCHMARK_KEYS:
2229 ## print('%-7d %s per second (%d total)' % (
2230 ## options.counters[key] / elapsed, key,
2231 ## options.counters[key]))
2232 ##
2233 ##
2234 ##def run_tests(filename):
2235 ## """
2236 ## Run all the tests from a file.
2237 ##
2238 ## A test file can provide many tests. Each test starts with a declaration.
2239 ## This declaration is a single line starting with '#:'.
2240 ## It declares codes of expected failures, separated by spaces or 'Okay'
2241 ## if no failure is expected.
2242 ## If the file does not contain such declaration, it should pass all tests.
2243 ## If the declaration is empty, following lines are not checked, until next
2244 ## declaration.
2245 ##
2246 ## Examples:
2247 ##
2248 ## * Only E224 and W701 are expected: #: E224 W701
2249 ## * Following example is conform: #: Okay
2250 ## * Don't check these lines: #:
2251 ## """
2252 ## lines = readlines(filename) + ['#:\n']
2253 ## line_offset = 0
2254 ## codes = ['Okay']
2255 ## testcase = []
2256 ## for index, line in enumerate(lines):
2257 ## if not line.startswith('#:'):
2258 ## if codes:
2259 ## # Collect the lines of the test case
2260 ## testcase.append(line)
2261 ## continue
2262 ## if codes and index > 0:
2263 ## label = '%s:%s:1' % (filename, line_offset + 1)
2264 ## codes = [c for c in codes if c != 'Okay']
2265 ## # Run the checker
2266 ## errors = Checker(filename, testcase).check_all(codes, line_offset)
2267 ## # Check if the expected errors were found
2268 ## for code in codes:
2269 ## if not options.counters.get(code):
2270 ## errors += 1
2271 ## message('%s: error %s not found' % (label, code))
2272 ## if options.verbose and not errors:
2273 ## message('%s: passed (%s)' % (label, ' '.join(codes)))
2274 ## # Keep showing errors for multiple tests
2275 ## reset_counters()
2276 ## # output the real line numbers
2277 ## line_offset = index
2278 ## # configure the expected errors
2279 ## codes = line.split()[1:]
2280 ## # empty the test case buffer
2281 ## del testcase[:]
2282 ##
2283 ##
2284 ##def selftest():
2285 ## """
2286 ## Test all check functions with test cases in docstrings.
2287 ## """
2288 ## count_passed = 0
2289 ## count_failed = 0
2290 ## checks = options.physical_checks + options.logical_checks
2291 ## for name, check, argument_names in checks:
2292 ## for line in check.__doc__.splitlines():
2293 ## line = line.lstrip()
2294 ## match = SELFTEST_REGEX.match(line)
2295 ## if match is None:
2296 ## continue
2297 ## code, source = match.groups()
2298 ## checker = Checker(None)
2299 ## for part in source.split(r'\n'):
2300 ## part = part.replace(r'\t', '\t')
2301 ## part = part.replace(r'\s', ' ')
2302 ## checker.lines.append(part + '\n')
2303 ## options.quiet = 2
2304 ## checker.check_all()
2305 ## error = None
2306 ## if code == 'Okay':
2307 ## if len(options.counters) > len(BENCHMARK_KEYS):
2308 ## codes = [key for key in options.counters.keys()
2309 ## if key not in BENCHMARK_KEYS]
2310 ## error = "incorrectly found %s" % ', '.join(codes)
2311 ## elif not options.counters.get(code):
2312 ## error = "failed to find %s" % code
2313 ## # Reset the counters
2314 ## reset_counters()
2315 ## if not error:
2316 ## count_passed += 1
2317 ## else:
2318 ## count_failed += 1
2319 ## if len(checker.lines) == 1:
2320 ## print("pep8.py: %s: %s" %
2321 ## (error, checker.lines[0].rstrip()))
2322 ## else:
2323 ## print("pep8.py: %s:" % error)
2324 ## for line in checker.lines:
2325 ## print(line.rstrip())
2326 ## if options.verbose:
2327 ## print("%d passed and %d failed." % (count_passed, count_failed))
2328 ## if count_failed:
2329 ## print("Test failed.")
2330 ## else:
2331 ## print("Test passed.")
2332 ##
2333 ##
2334 def get_parser(prog='pep8', version=__version__):
2335 parser = OptionParser(prog=prog, version=version,
1385 usage="%prog [options] input ...") 2336 usage="%prog [options] input ...")
2337 parser.config_options = [
2338 'exclude', 'filename', 'select', 'ignore', 'max-line-length',
2339 'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
2340 'show-source', 'statistics', 'verbose']
1386 parser.add_option('-v', '--verbose', default=0, action='count', 2341 parser.add_option('-v', '--verbose', default=0, action='count',
1387 help="print status messages, or debug with -vv") 2342 help="print status messages, or debug with -vv")
1388 parser.add_option('-q', '--quiet', default=0, action='count', 2343 parser.add_option('-q', '--quiet', default=0, action='count',
1389 help="report only file names, or nothing with -qq") 2344 help="report only file names, or nothing with -qq")
1390 parser.add_option('-r', '--repeat', action='store_true', 2345 parser.add_option('-r', '--repeat', default=True, action='store_true',
1391 help="show all occurrences of the same error") 2346 help="(obsolete) show all occurrences of the same error")
2347 parser.add_option('--first', action='store_false', dest='repeat',
2348 help="show first occurrence of each error")
1392 parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, 2349 parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
1393 help="exclude files or directories which match these " 2350 help="exclude files or directories which match these "
1394 "comma separated patterns (default: %s)" % 2351 "comma separated patterns (default: %default)")
1395 DEFAULT_EXCLUDE)
1396 parser.add_option('--filename', metavar='patterns', default='*.py', 2352 parser.add_option('--filename', metavar='patterns', default='*.py',
1397 help="when parsing directories, only check filenames " 2353 help="when parsing directories, only check filenames "
1398 "matching these comma separated patterns (default: " 2354 "matching these comma separated patterns "
1399 "*.py)") 2355 "(default: %default)")
1400 parser.add_option('--select', metavar='errors', default='', 2356 parser.add_option('--select', metavar='errors', default='',
1401 help="select errors and warnings (e.g. E,W6)") 2357 help="select errors and warnings (e.g. E,W6)")
1402 parser.add_option('--ignore', metavar='errors', default='', 2358 parser.add_option('--ignore', metavar='errors', default='',
1403 help="skip errors and warnings (e.g. E4,W)") 2359 help="skip errors and warnings (e.g. E4,W)")
1404 parser.add_option('--show-source', action='store_true', 2360 parser.add_option('--show-source', action='store_true',
1405 help="show source code for each error") 2361 help="show source code for each error")
1406 parser.add_option('--show-pep8', action='store_true', 2362 parser.add_option('--show-pep8', action='store_true',
1407 help="show text of PEP 8 for each error") 2363 help="show text of PEP 8 for each error "
2364 "(implies --first)")
1408 parser.add_option('--statistics', action='store_true', 2365 parser.add_option('--statistics', action='store_true',
1409 help="count errors and warnings") 2366 help="count errors and warnings")
1410 parser.add_option('--count', action='store_true', 2367 parser.add_option('--count', action='store_true',
1411 help="print total number of errors and warnings " 2368 help="print total number of errors and warnings "
1412 "to standard error and set exit code to 1 if " 2369 "to standard error and set exit code to 1 if "
1413 "total is not null") 2370 "total is not null")
1414 parser.add_option('--benchmark', action='store_true', 2371 parser.add_option('--max-line-length', type='int', metavar='n',
1415 help="measure processing speed") 2372 default=MAX_LINE_LENGTH,
1416 parser.add_option('--testsuite', metavar='dir', 2373 help="set maximum allowed line length "
1417 help="run regression tests from dir") 2374 "(default: %default)")
1418 parser.add_option('--doctest', action='store_true', 2375 parser.add_option('--hang-closing', action='store_true',
1419 help="run doctest on myself") 2376 help="hang closing bracket instead of matching "
2377 "indentation of opening bracket's line")
2378 parser.add_option('--format', metavar='format', default='default',
2379 help="set the error format [default|pylint|<custom>]")
2380 parser.add_option('--diff', action='store_true',
2381 help="report only lines changed according to the "
2382 "unified diff received on STDIN")
2383 group = parser.add_option_group("Testing Options")
2384 if os.path.exists(TESTSUITE_PATH):
2385 group.add_option('--testsuite', metavar='dir',
2386 help="run regression tests from dir")
2387 group.add_option('--doctest', action='store_true',
2388 help="run doctest on myself")
2389 group.add_option('--benchmark', action='store_true',
2390 help="measure processing speed")
2391 return parser
2392
2393
2394 def read_config(options, args, arglist, parser):
2395 """Read both user configuration and local configuration."""
2396 config = RawConfigParser()
2397
2398 user_conf = options.config
2399 if user_conf and os.path.isfile(user_conf):
2400 if options.verbose:
2401 print('user configuration: %s' % user_conf)
2402 config.read(user_conf)
2403
2404 parent = tail = args and os.path.abspath(os.path.commonprefix(args))
2405 while tail:
2406 if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]):
2407 if options.verbose:
2408 print('local configuration: in %s' % parent)
2409 break
2410 parent, tail = os.path.split(parent)
2411
2412 pep8_section = parser.prog
2413 if config.has_section(pep8_section):
2414 option_list = dict([(o.dest, o.type or o.action)
2415 for o in parser.option_list])
2416
2417 # First, read the default values
2418 new_options, _ = parser.parse_args([])
2419
2420 # Second, parse the configuration
2421 for opt in config.options(pep8_section):
2422 if options.verbose > 1:
2423 print(" %s = %s" % (opt, config.get(pep8_section, opt)))
2424 if opt.replace('_', '-') not in parser.config_options:
2425 print("Unknown option: '%s'\n not in [%s]" %
2426 (opt, ' '.join(parser.config_options)))
2427 sys.exit(1)
2428 normalized_opt = opt.replace('-', '_')
2429 opt_type = option_list[normalized_opt]
2430 if opt_type in ('int', 'count'):
2431 value = config.getint(pep8_section, opt)
2432 elif opt_type == 'string':
2433 value = config.get(pep8_section, opt)
2434 else:
2435 assert opt_type in ('store_true', 'store_false')
2436 value = config.getboolean(pep8_section, opt)
2437 setattr(new_options, normalized_opt, value)
2438
2439 # Third, overwrite with the command-line options
2440 options, _ = parser.parse_args(arglist, values=new_options)
2441 options.doctest = options.testsuite = False
2442 return options
2443
2444
2445 def process_options(arglist=None, parse_argv=False, config_file=None,
2446 parser=None):
2447 """Process options passed either via arglist or via command line args."""
2448 if not arglist and not parse_argv:
2449 # Don't read the command line if the module is used as a library.
2450 arglist = []
2451 if not parser:
2452 parser = get_parser()
2453 if not parser.has_option('--config'):
2454 if config_file is True:
2455 config_file = DEFAULT_CONFIG
2456 group = parser.add_option_group("Configuration", description=(
2457 "The project options are read from the [%s] section of the "
2458 "tox.ini file or the setup.cfg file located in any parent folder "
2459 "of the path(s) being processed. Allowed options are: %s." %
2460 (parser.prog, ', '.join(parser.config_options))))
2461 group.add_option('--config', metavar='path', default=config_file,
2462 help="user config file location (default: %default)")
1420 options, args = parser.parse_args(arglist) 2463 options, args = parser.parse_args(arglist)
1421 if options.testsuite: 2464 options.reporter = None
2465
2466 if options.ensure_value('testsuite', False):
1422 args.append(options.testsuite) 2467 args.append(options.testsuite)
1423 if not args and not options.doctest: 2468 elif not options.ensure_value('doctest', False):
1424 parser.error('input not specified') 2469 if parse_argv and not args:
1425 options.prog = os.path.basename(sys.argv[0]) 2470 if options.diff or any(os.path.exists(name)
2471 for name in PROJECT_CONFIG):
2472 args = ['.']
2473 else:
2474 parser.error('input not specified')
2475 options = read_config(options, args, arglist, parser)
2476 options.reporter = parse_argv and options.quiet == 1 and FileReport
2477
2478 options.filename = options.filename and options.filename.split(',')
1426 options.exclude = options.exclude.split(',') 2479 options.exclude = options.exclude.split(',')
1427 for index in range(len(options.exclude)): 2480 options.select = options.select and options.select.split(',')
1428 options.exclude[index] = options.exclude[index].rstrip('/') 2481 options.ignore = options.ignore and options.ignore.split(',')
1429 if options.filename: 2482
1430 options.filename = options.filename.split(',') 2483 if options.diff:
1431 if options.select: 2484 options.reporter = DiffReport
1432 options.select = options.select.split(',') 2485 stdin = stdin_get_value()
2486 options.selected_lines = parse_udiff(stdin, options.filename, args[0])
2487 args = sorted(options.selected_lines)
2488
2489 return options, args
2490
2491
2492 def _main():
2493 """Parse options and run checks on Python source."""
2494 pep8style = StyleGuide(parse_argv=True, config_file=True)
2495 options = pep8style.options
2496 if options.doctest or options.testsuite:
2497 from testsuite.support import run_tests
2498 report = run_tests(pep8style)
1433 else: 2499 else:
1434 options.select = [] 2500 report = pep8style.check_files()
1435 if options.ignore:
1436 options.ignore = options.ignore.split(',')
1437 elif options.select:
1438 # Ignore all checks which are not explicitly selected
1439 options.ignore = ['']
1440 elif options.testsuite or options.doctest:
1441 # For doctest and testsuite, all checks are required
1442 options.ignore = []
1443 else:
1444 # The default choice: ignore controversial checks
1445 options.ignore = DEFAULT_IGNORE.split(',')
1446 options.physical_checks = find_checks('physical_line')
1447 options.logical_checks = find_checks('logical_line')
1448 options.counters = dict.fromkeys(BENCHMARK_KEYS, 0)
1449 options.messages = {}
1450 return options, args
1451
1452
1453 def _main():
1454 """
1455 Parse options and run checks on Python source.
1456 """
1457 options, args = process_options()
1458 if options.doctest:
1459 import doctest
1460 doctest.testmod(verbose=options.verbose)
1461 selftest()
1462 if options.testsuite:
1463 runner = run_tests
1464 else:
1465 runner = input_file
1466 start_time = time.time()
1467 for path in args:
1468 if os.path.isdir(path):
1469 input_dir(path, runner=runner)
1470 elif not excluded(path):
1471 options.counters['files'] += 1
1472 runner(path)
1473 elapsed = time.time() - start_time
1474 if options.statistics: 2501 if options.statistics:
1475 print_statistics() 2502 report.print_statistics()
1476 if options.benchmark: 2503 if options.benchmark:
1477 print_benchmark(elapsed) 2504 report.print_benchmark()
1478 count = get_count() 2505 if options.testsuite and not options.quiet:
1479 if count: 2506 report.print_results()
2507 if report.total_errors:
1480 if options.count: 2508 if options.count:
1481 sys.stderr.write(str(count) + '\n') 2509 sys.stderr.write(str(report.total_errors) + '\n')
1482 sys.exit(1) 2510 sys.exit(1)
1483
1484 2511
1485 if __name__ == '__main__': 2512 if __name__ == '__main__':
1486 _main() 2513 _main()

eric ide

mercurial