95 sys.version_info < (3, 10) and |
86 sys.version_info < (3, 10) and |
96 callable(getattr(tokenize, '_compile', None)) |
87 callable(getattr(tokenize, '_compile', None)) |
97 ): # pragma: no cover (<py310) |
88 ): # pragma: no cover (<py310) |
98 tokenize._compile = lru_cache()(tokenize._compile) # type: ignore |
89 tokenize._compile = lru_cache()(tokenize._compile) # type: ignore |
99 |
90 |
100 __version__ = '2.8.0-eric' |
91 __version__ = '2.9.1-eric' |
101 |
92 |
102 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' |
93 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' |
103 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503,W504' |
94 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503,W504' |
104 try: |
95 try: |
105 if sys.platform == 'win32': |
96 if sys.platform == 'win32': |
133 SINGLETONS = frozenset(['False', 'None', 'True']) |
124 SINGLETONS = frozenset(['False', 'None', 'True']) |
134 KEYWORDS = frozenset(keyword.kwlist + ['print', 'async']) - SINGLETONS |
125 KEYWORDS = frozenset(keyword.kwlist + ['print', 'async']) - SINGLETONS |
135 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) |
126 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) |
136 ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-', '@']) |
127 ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-', '@']) |
137 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) |
128 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) |
138 # Warn for -> function annotation operator in py3.5+ (issue 803) |
|
139 FUNCTION_RETURN_ANNOTATION_OP = ['->'] if sys.version_info >= (3, 5) else [] |
|
140 ASSIGNMENT_EXPRESSION_OP = [':='] if sys.version_info >= (3, 8) else [] |
129 ASSIGNMENT_EXPRESSION_OP = [':='] if sys.version_info >= (3, 8) else [] |
141 WS_NEEDED_OPERATORS = frozenset([ |
130 WS_NEEDED_OPERATORS = frozenset([ |
142 '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>', |
131 '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>', |
143 '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '=', |
132 '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '=', |
144 'and', 'in', 'is', 'or'] + |
133 'and', 'in', 'is', 'or', '->'] + |
145 FUNCTION_RETURN_ANNOTATION_OP + |
|
146 ASSIGNMENT_EXPRESSION_OP) |
134 ASSIGNMENT_EXPRESSION_OP) |
147 WHITESPACE = frozenset(' \t') |
135 WHITESPACE = frozenset(' \t\xa0') |
148 NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE]) |
136 NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE]) |
149 SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT]) |
137 SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT]) |
150 # ERRORTOKEN is triggered by backticks in Python 3 |
138 # ERRORTOKEN is triggered by backticks in Python 3 |
151 SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN]) |
139 SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN]) |
152 BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] |
140 BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] |
163 COMPARE_NEGATIVE_REGEX = re.compile(r'\b(?<!is\s)(not)\s+[^][)(}{ ]+\s+' |
151 COMPARE_NEGATIVE_REGEX = re.compile(r'\b(?<!is\s)(not)\s+[^][)(}{ ]+\s+' |
164 r'(in|is)\s') |
152 r'(in|is)\s') |
165 COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s+type(?:s.\w+Type' |
153 COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s+type(?:s.\w+Type' |
166 r'|\s*\(\s*([^)]*[^ )])\s*\))') |
154 r'|\s*\(\s*([^)]*[^ )])\s*\))') |
167 KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) |
155 KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) |
168 OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)') |
156 OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+|:=)(\s*)') |
169 LAMBDA_REGEX = re.compile(r'\blambda\b') |
157 LAMBDA_REGEX = re.compile(r'\blambda\b') |
170 HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') |
158 HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') |
171 STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\b') |
159 STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\b') |
172 STARTSWITH_TOP_LEVEL_REGEX = re.compile(r'^(async\s+def\s+|def\s+|class\s+|@)') |
160 STARTSWITH_TOP_LEVEL_REGEX = re.compile(r'^(async\s+def\s+|def\s+|class\s+|@)') |
173 STARTSWITH_INDENT_STATEMENT_REGEX = re.compile( |
161 STARTSWITH_INDENT_STATEMENT_REGEX = re.compile( |
174 r'^\s*({0})\b'.format('|'.join(s.replace(' ', r'\s+') for s in ( |
162 r'^\s*({})\b'.format('|'.join(s.replace(' ', r'\s+') for s in ( |
175 'def', 'async def', |
163 'def', 'async def', |
176 'for', 'async for', |
164 'for', 'async for', |
177 'if', 'elif', 'else', |
165 'if', 'elif', 'else', |
178 'try', 'except', 'finally', |
166 'try', 'except', 'finally', |
179 'with', 'async with', |
167 'with', 'async with', |
186 |
174 |
187 _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} |
175 _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} |
188 |
176 |
189 |
177 |
190 def _get_parameters(function): |
178 def _get_parameters(function): |
191 if sys.version_info >= (3, 3): |
179 return [parameter.name |
192 return [parameter.name |
180 for parameter |
193 for parameter |
181 in inspect.signature(function).parameters.values() |
194 in inspect.signature(function).parameters.values() |
182 if parameter.kind == parameter.POSITIONAL_OR_KEYWORD] |
195 if parameter.kind == parameter.POSITIONAL_OR_KEYWORD] |
|
196 else: |
|
197 return inspect.getargspec(function)[0] |
|
198 |
183 |
199 |
184 |
200 def register_check(check, codes=None): |
185 def register_check(check, codes=None): |
201 """Register a new check object.""" |
186 """Register a new check object.""" |
202 def _add_check(check, kind, codes, args): |
187 def _add_check(check, kind, codes, args): |
318 chunks = line.split() |
303 chunks = line.split() |
319 if ((len(chunks) == 1 and multiline) or |
304 if ((len(chunks) == 1 and multiline) or |
320 (len(chunks) == 2 and chunks[0] == '#')) and \ |
305 (len(chunks) == 2 and chunks[0] == '#')) and \ |
321 len(line) - len(chunks[-1]) < max_line_length - 7: |
306 len(line) - len(chunks[-1]) < max_line_length - 7: |
322 return |
307 return |
323 if hasattr(line, 'decode'): # Python 2 |
|
324 # The line could contain multi-byte characters |
|
325 try: |
|
326 length = len(line.decode('utf-8')) |
|
327 except UnicodeError: |
|
328 pass |
|
329 if length > max_line_length: |
308 if length > max_line_length: |
330 return (max_line_length, "E501 line too long " |
309 return (max_line_length, "E501 line too long " |
331 "(%d > %d characters)" % (length, max_line_length), |
310 "(%d > %d characters)" % (length, max_line_length), |
332 length, max_line_length) |
311 length, max_line_length) |
333 |
312 |
448 ancestor_level = expand_indent(line) |
427 ancestor_level = expand_indent(line) |
449 nested = STARTSWITH_DEF_REGEX.match(line.lstrip()) |
428 nested = STARTSWITH_DEF_REGEX.match(line.lstrip()) |
450 if nested or ancestor_level == 0: |
429 if nested or ancestor_level == 0: |
451 break |
430 break |
452 if nested: |
431 if nested: |
453 yield (0, "E306 expected %s blank lines before a " |
432 yield (0, "E306 expected {} blank lines before a " |
454 "nested definition, found %d", method_lines, |
433 "nested definition, found {}", method_lines, |
455 blank_before) |
434 blank_before) |
456 else: |
435 else: |
457 yield (0, "E301 expected %s blank lines, found %d", |
436 yield (0, "E301 expected {} blank lines, found %d", |
458 method_lines, blank_before) |
437 method_lines, blank_before) |
459 elif blank_before != top_level_lines: |
438 elif blank_before != top_level_lines: |
460 yield (0, "E302 expected %s blank lines, found %d", |
439 yield (0, "E302 expected {} blank lines, found {}", |
461 top_level_lines, blank_before) |
440 top_level_lines, blank_before) |
462 elif (logical_line and |
441 elif (logical_line and |
463 not indent_level and |
442 not indent_level and |
464 blank_before != top_level_lines and |
443 blank_before != top_level_lines and |
465 previous_unindented_logical_line.startswith(('def ', 'class ')) |
444 previous_unindented_logical_line.startswith(('def ', 'class ')) |
466 ): |
445 ): |
467 yield (0, "E305 expected %s blank lines after " \ |
446 yield (0, "E305 expected {} blank lines after " \ |
468 "class or function definition, found %d", |
447 "class or function definition, found {}", |
469 top_level_lines, blank_before) |
448 top_level_lines, blank_before) |
470 |
449 |
471 |
450 |
472 @register_check |
451 @register_check |
473 def extraneous_whitespace(logical_line): |
452 def extraneous_whitespace(logical_line): |
497 if text[-1].isspace(): |
476 if text[-1].isspace(): |
498 # assert char in '([{' |
477 # assert char in '([{' |
499 yield found + 1, "E201 whitespace after '%s'", char |
478 yield found + 1, "E201 whitespace after '%s'", char |
500 elif line[found - 1] != ',': |
479 elif line[found - 1] != ',': |
501 code = ('E202' if char in '}])' else 'E203') # if char in ',;:' |
480 code = ('E202' if char in '}])' else 'E203') # if char in ',;:' |
502 yield found, "%s whitespace before '%s'" % (code, char), char |
481 yield found, f"{code} whitespace before '{char}'", char |
503 |
482 |
504 |
483 |
505 @register_check |
484 @register_check |
506 def whitespace_around_keywords(logical_line): |
485 def whitespace_around_keywords(logical_line): |
507 r"""Avoid extraneous whitespace around keywords. |
486 r"""Avoid extraneous whitespace around keywords. |
525 elif len(after) > 1: |
504 elif len(after) > 1: |
526 yield match.start(2), "E271 multiple spaces after keyword" |
505 yield match.start(2), "E271 multiple spaces after keyword" |
527 |
506 |
528 |
507 |
529 @register_check |
508 @register_check |
530 def missing_whitespace_after_import_keyword(logical_line): |
509 def missing_whitespace_after_keyword(logical_line, tokens): |
531 r"""Multiple imports in form from x import (a, b, c) should have |
510 r"""Keywords should be followed by whitespace. |
532 space between import statement and parenthesised name list. |
|
533 |
511 |
534 Okay: from foo import (bar, baz) |
512 Okay: from foo import (bar, baz) |
535 E275: from foo import(bar, baz) |
513 E275: from foo import(bar, baz) |
536 E275: from importable.module import(bar, baz) |
514 E275: from importable.module import(bar, baz) |
537 """ |
515 E275: if(foo): bar |
538 line = logical_line |
516 """ |
539 indicator = ' import(' |
517 for tok0, tok1 in zip(tokens, tokens[1:]): |
540 if line.startswith('from '): |
518 # This must exclude the True/False/None singletons, which can |
541 found = line.find(indicator) |
519 # appear e.g. as "if x is None:", and async/await, which were |
542 if -1 < found: |
520 # valid identifier names in old Python versions. |
543 pos = found + len(indicator) - 1 |
521 if (tok0.end == tok1.start and |
544 yield pos, "E275 missing whitespace after keyword" |
522 keyword.iskeyword(tok0.string) and |
|
523 tok0.string not in SINGLETONS and |
|
524 tok0.string not in ('async', 'await') and |
|
525 not (tok0.string == 'except' and tok1.string == '*') and |
|
526 not (tok0.string == 'yield' and tok1.string == ')') and |
|
527 tok1.string not in ':\n'): |
|
528 yield tok0.end, "E275 missing whitespace after keyword" |
545 |
529 |
546 |
530 |
547 @register_check |
531 @register_check |
548 def missing_whitespace(logical_line): |
532 def missing_whitespace(logical_line): |
549 r"""Each comma, semicolon or colon should be followed by whitespace. |
533 r"""Each comma, semicolon or colon should be followed by whitespace. |
758 token_type not in (tokenize.NL, tokenize.COMMENT) and |
742 token_type not in (tokenize.NL, tokenize.COMMENT) and |
759 not indent[depth]): |
743 not indent[depth]): |
760 indent[depth] = start[1] |
744 indent[depth] = start[1] |
761 indent_chances[start[1]] = True |
745 indent_chances[start[1]] = True |
762 if verbose >= 4: |
746 if verbose >= 4: |
763 print("bracket depth %s indent to %s" % (depth, start[1])) |
747 print(f"bracket depth {depth} indent to {start[1]}") |
764 # deal with implicit string concatenation |
748 # deal with implicit string concatenation |
765 elif (token_type in (tokenize.STRING, tokenize.COMMENT) or |
749 elif (token_type in (tokenize.STRING, tokenize.COMMENT) or |
766 text in ('u', 'ur', 'b', 'br')): |
750 text in ('u', 'ur', 'b', 'br')): |
767 indent_chances[start[1]] = str |
751 indent_chances[start[1]] = str |
768 # visual indent after assert/raise/with |
752 # visual indent after assert/raise/with |
1087 prev_end = end |
1071 prev_end = end |
1088 |
1072 |
1089 |
1073 |
1090 @register_check |
1074 @register_check |
1091 def whitespace_before_comment(logical_line, tokens): |
1075 def whitespace_before_comment(logical_line, tokens): |
1092 r"""Separate inline comments by at least two spaces. |
1076 """Separate inline comments by at least two spaces. |
1093 |
1077 |
1094 An inline comment is a comment on the same line as a statement. |
1078 An inline comment is a comment on the same line as a statement. |
1095 Inline comments should be separated by at least two spaces from the |
1079 Inline comments should be separated by at least two spaces from the |
1096 statement. They should start with a # and a single space. |
1080 statement. They should start with a # and a single space. |
1097 |
1081 |
1098 Each line of a block comment starts with a # and a single space |
1082 Each line of a block comment starts with a # and one or multiple |
1099 (unless it is indented text inside the comment). |
1083 spaces as there can be indented text inside the comment. |
1100 |
1084 |
1101 Okay: x = x + 1 # Increment x |
1085 Okay: x = x + 1 # Increment x |
1102 Okay: x = x + 1 # Increment x |
1086 Okay: x = x + 1 # Increment x |
1103 Okay: # Block comment |
1087 Okay: # Block comments: |
|
1088 Okay: # - Block comment list |
|
1089 Okay: # \xa0- Block comment list |
1104 E261: x = x + 1 # Increment x |
1090 E261: x = x + 1 # Increment x |
1105 E262: x = x + 1 #Increment x |
1091 E262: x = x + 1 #Increment x |
1106 E262: x = x + 1 # Increment x |
1092 E262: x = x + 1 # Increment x |
|
1093 E262: x = x + 1 # \xa0Increment x |
1107 E265: #Block comment |
1094 E265: #Block comment |
1108 E266: ### Block comment |
1095 E266: ### Block comment |
1109 """ |
1096 """ |
1110 prev_end = (0, 0) |
1097 prev_end = (0, 0) |
1111 for token_type, text, start, end, line in tokens: |
1098 for token_type, text, start, end, line in tokens: |
1251 not (sys.version_info >= (3, 8) and |
1238 not (sys.version_info >= (3, 8) and |
1252 line[found + 1] == '=')): # assignment expression |
1239 line[found + 1] == '=')): # assignment expression |
1253 lambda_kw = LAMBDA_REGEX.search(line, 0, found) |
1240 lambda_kw = LAMBDA_REGEX.search(line, 0, found) |
1254 if lambda_kw: |
1241 if lambda_kw: |
1255 before = line[:lambda_kw.start()].rstrip() |
1242 before = line[:lambda_kw.start()].rstrip() |
1256 if before[-1:] == '=' and isidentifier(before[:-1].strip()): |
1243 if before[-1:] == '=' and before[:-1].strip().isidentifier(): |
1257 yield 0, ("E731 do not assign a lambda expression, use a " |
1244 yield 0, ("E731 do not assign a lambda expression, use a " |
1258 "def") |
1245 "def") |
1259 break |
1246 break |
1260 if STARTSWITH_DEF_REGEX.match(line): |
1247 if STARTSWITH_DEF_REGEX.match(line): |
1261 yield 0, "E704 multiple statements on one line (def)" |
1248 yield 0, "E704 multiple statements on one line (def)" |
1311 parens += 1 |
1298 parens += 1 |
1312 elif text in ')]}': |
1299 elif text in ')]}': |
1313 parens -= 1 |
1300 parens -= 1 |
1314 |
1301 |
1315 |
1302 |
|
1303 # The % character is strictly speaking a binary operator, but the |
|
1304 # common usage seems to be to put it next to the format parameters, |
|
1305 # after a line break. |
1316 _SYMBOLIC_OPS = frozenset("()[]{},:.;@=%~") | frozenset(("...",)) |
1306 _SYMBOLIC_OPS = frozenset("()[]{},:.;@=%~") | frozenset(("...",)) |
1317 |
1307 |
1318 |
1308 |
1319 def _is_binary_operator(token_type, text): |
1309 def _is_binary_operator(token_type, text): |
1320 is_op_token = token_type == tokenize.OP |
1310 return ( |
1321 is_conjunction = text in ['and', 'or'] |
1311 token_type == tokenize.OP or |
1322 # NOTE(sigmavirus24): Previously the not_a_symbol check was executed |
1312 text in {'and', 'or'} |
1323 # conditionally. Since it is now *always* executed, text may be |
1313 ) and ( |
1324 # None. In that case we get a TypeError for `text not in str`. |
1314 text not in _SYMBOLIC_OPS |
1325 not_a_symbol = text and text not in _SYMBOLIC_OPS |
1315 ) |
1326 # The % character is strictly speaking a binary operator, but the |
|
1327 # common usage seems to be to put it next to the format parameters, |
|
1328 # after a line break. |
|
1329 return ((is_op_token or is_conjunction) and not_a_symbol) |
|
1330 |
1316 |
1331 |
1317 |
1332 def _break_around_binary_operators(tokens): |
1318 def _break_around_binary_operators(tokens): |
1333 """Private function to reduce duplication. |
1319 """Private function to reduce duplication. |
1334 |
1320 |
1814 if token_type in (tokenize.STRING, tokenize.COMMENT): |
1800 if token_type in (tokenize.STRING, tokenize.COMMENT): |
1815 # Only check comment-only lines |
1801 # Only check comment-only lines |
1816 if prev_token is None or prev_token in SKIP_TOKENS: |
1802 if prev_token is None or prev_token in SKIP_TOKENS: |
1817 lines = line.splitlines() |
1803 lines = line.splitlines() |
1818 for line_num, physical_line in enumerate(lines): |
1804 for line_num, physical_line in enumerate(lines): |
1819 if hasattr(physical_line, 'decode'): # Python 2 |
|
1820 # The line could contain multi-byte characters |
|
1821 try: |
|
1822 physical_line = physical_line.decode('utf-8') |
|
1823 except UnicodeError: |
|
1824 pass |
|
1825 if start[0] + line_num == 1 and line.startswith('#!'): |
1805 if start[0] + line_num == 1 and line.startswith('#!'): |
1826 return |
1806 return |
1827 length = len(physical_line) |
1807 length = len(physical_line) |
1828 chunks = physical_line.split() |
1808 chunks = physical_line.split() |
1829 if token_type == tokenize.COMMENT: |
1809 if token_type == tokenize.COMMENT: |
1845 ######################################################################## |
1825 ######################################################################## |
1846 # Helper functions |
1826 # Helper functions |
1847 ######################################################################## |
1827 ######################################################################## |
1848 |
1828 |
1849 |
1829 |
1850 if sys.version_info < (3,): |
1830 def readlines(filename): |
1851 # Python 2: implicit encoding. |
1831 """Read the source code.""" |
1852 def readlines(filename): |
1832 try: |
1853 """Read the source code.""" |
1833 with tokenize.open(filename) as f: |
1854 with open(filename, 'rU') as f: |
|
1855 return f.readlines() |
1834 return f.readlines() |
1856 isidentifier = re.compile(r'[a-zA-Z_]\w*$').match |
1835 except (LookupError, SyntaxError, UnicodeError): |
1857 stdin_get_value = sys.stdin.read |
1836 # Fall back if file encoding is improperly declared |
1858 else: |
1837 with open(filename, encoding='latin-1') as f: |
1859 # Python 3 |
1838 return f.readlines() |
1860 def readlines(filename): |
1839 |
1861 """Read the source code.""" |
1840 def stdin_get_value(): |
1862 try: |
1841 """Read the value from stdin.""" |
1863 with tokenize.open(filename) as f: |
1842 return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() |
1864 return f.readlines() |
|
1865 except (LookupError, SyntaxError, UnicodeError): |
|
1866 # Fall back if file encoding is improperly declared |
|
1867 with open(filename, encoding='latin-1') as f: |
|
1868 return f.readlines() |
|
1869 isidentifier = str.isidentifier |
|
1870 |
|
1871 def stdin_get_value(): |
|
1872 """Read the value from stdin.""" |
|
1873 return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() |
|
1874 |
1843 |
1875 noqa = lru_cache(512)(re.compile(r'# no(?:qa|pep8)\b', re.I).search) |
1844 noqa = lru_cache(512)(re.compile(r'# no(?:qa|pep8)\b', re.I).search) |
1876 |
1845 |
1877 |
1846 |
1878 def expand_indent(line): |
1847 def expand_indent(line): |
1934 if line[:1] != '-': |
1903 if line[:1] != '-': |
1935 nrows -= 1 |
1904 nrows -= 1 |
1936 continue |
1905 continue |
1937 if line[:3] == '@@ ': |
1906 if line[:3] == '@@ ': |
1938 hunk_match = HUNK_REGEX.match(line) |
1907 hunk_match = HUNK_REGEX.match(line) |
1939 (row, nrows) = [int(g or '1') for g in hunk_match.groups()] |
1908 (row, nrows) = (int(g or '1') for g in hunk_match.groups()) |
1940 rv[path].update(range(row, row + nrows)) |
1909 rv[path].update(range(row, row + nrows)) |
1941 elif line[:3] == '+++': |
1910 elif line[:3] == '+++': |
1942 path = line[4:].split('\t', 1)[0] |
1911 path = line[4:].split('\t', 1)[0] |
1943 # Git diff will use (i)ndex, (w)ork tree, (c)ommit and |
1912 # Git diff will use (i)ndex, (w)ork tree, (c)ommit and |
1944 # (o)bject instead of a/b/c/d as prefixes for patches |
1913 # (o)bject instead of a/b/c/d as prefixes for patches |
1995 ######################################################################## |
1964 ######################################################################## |
1996 # Framework to run all checks |
1965 # Framework to run all checks |
1997 ######################################################################## |
1966 ######################################################################## |
1998 |
1967 |
1999 |
1968 |
2000 class Checker(object): |
1969 class Checker: |
2001 """Load a Python source file, tokenize it, check coding style.""" |
1970 """Load a Python source file, tokenize it, check coding style.""" |
2002 |
1971 |
2003 def __init__(self, filename=None, lines=None, |
1972 def __init__(self, filename=None, lines=None, |
2004 options=None, report=None, **kwargs): |
1973 options=None, report=None, **kwargs): |
2005 if options is None: |
1974 if options is None: |
2258 for token in self.generate_tokens(): |
2227 for token in self.generate_tokens(): |
2259 self.tokens.append(token) |
2228 self.tokens.append(token) |
2260 token_type, text = token[0:2] |
2229 token_type, text = token[0:2] |
2261 if self.verbose >= 3: |
2230 if self.verbose >= 3: |
2262 if token[2][0] == token[3][0]: |
2231 if token[2][0] == token[3][0]: |
2263 pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) |
2232 pos = '[{}:{}]'.format(token[2][1] or '', token[3][1]) |
2264 else: |
2233 else: |
2265 pos = 'l.%s' % token[3][0] |
2234 pos = 'l.%s' % token[3][0] |
2266 print('l.%s\t%s\t%s\t%r' % |
2235 print('l.%s\t%s\t%s\t%r' % |
2267 (token[2][0], pos, tokenize.tok_name[token[0]], text)) |
2236 (token[2][0], pos, tokenize.tok_name[token[0]], text)) |
2268 if token_type == tokenize.OP: |
2237 if token_type == tokenize.OP: |
2386 for line in self.get_statistics(prefix): |
2355 for line in self.get_statistics(prefix): |
2387 print(line) |
2356 print(line) |
2388 |
2357 |
2389 def print_benchmark(self): |
2358 def print_benchmark(self): |
2390 """Print benchmark numbers.""" |
2359 """Print benchmark numbers.""" |
2391 print('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) |
2360 print('{:<7.2f} {}'.format(self.elapsed, 'seconds elapsed')) |
2392 if self.elapsed: |
2361 if self.elapsed: |
2393 for key in self._benchmark_keys: |
2362 for key in self._benchmark_keys: |
2394 print('%-7d %s per second (%d total)' % |
2363 print('%-7d %s per second (%d total)' % |
2395 (self.counters[key] / self.elapsed, key, |
2364 (self.counters[key] / self.elapsed, key, |
2396 self.counters[key])) |
2365 self.counters[key])) |
2419 return super().init_file( |
2388 return super().init_file( |
2420 filename, lines, expected, line_offset) |
2389 filename, lines, expected, line_offset) |
2421 |
2390 |
2422 def error(self, line_number, offset, text, check): |
2391 def error(self, line_number, offset, text, check): |
2423 """Report an error, according to options.""" |
2392 """Report an error, according to options.""" |
2424 code = super().error(line_number, offset, |
2393 code = super().error(line_number, offset, text, check) |
2425 text, check) |
|
2426 if code and (self.counters[code] == 1 or self._repeat): |
2394 if code and (self.counters[code] == 1 or self._repeat): |
2427 self._deferred_print.append( |
2395 self._deferred_print.append( |
2428 (line_number, offset, code, text[5:], check.__doc__)) |
2396 (line_number, offset, code, text[5:], check.__doc__)) |
2429 return code |
2397 return code |
2430 |
2398 |
2477 if line_number not in self._selected[self.filename]: |
2445 if line_number not in self._selected[self.filename]: |
2478 return |
2446 return |
2479 return super().error(line_number, offset, text, check) |
2447 return super().error(line_number, offset, text, check) |
2480 |
2448 |
2481 |
2449 |
2482 class StyleGuide(object): |
2450 class StyleGuide: |
2483 """Initialize a PEP-8 instance with few options.""" |
2451 """Initialize a PEP-8 instance with few options.""" |
2484 |
2452 |
2485 def __init__(self, *args, **kwargs): |
2453 def __init__(self, *args, **kwargs): |
2486 # build options from the command line |
2454 # build options from the command line |
2487 self.checker_class = kwargs.pop('checker_class', Checker) |
2455 self.checker_class = kwargs.pop('checker_class', Checker) |
2570 for subdir in sorted(dirs): |
2538 for subdir in sorted(dirs): |
2571 if self.excluded(subdir, root): |
2539 if self.excluded(subdir, root): |
2572 dirs.remove(subdir) |
2540 dirs.remove(subdir) |
2573 for filename in sorted(files): |
2541 for filename in sorted(files): |
2574 # contain a pattern that matches? |
2542 # contain a pattern that matches? |
2575 if ((filename_match(filename, filepatterns) and |
2543 if ( |
2576 not self.excluded(filename, root))): |
2544 filename_match(filename, filepatterns) and |
|
2545 not self.excluded(filename, root) |
|
2546 ): |
2577 runner(os.path.join(root, filename)) |
2547 runner(os.path.join(root, filename)) |
2578 |
2548 |
2579 def excluded(self, filename, parent=None): |
2549 def excluded(self, filename, parent=None): |
2580 """Check if the file should be excluded. |
2550 """Check if the file should be excluded. |
2581 |
2551 |
2741 for opt in config.options(pycodestyle_section): |
2711 for opt in config.options(pycodestyle_section): |
2742 if opt.replace('_', '-') not in parser.config_options: |
2712 if opt.replace('_', '-') not in parser.config_options: |
2743 print(" unknown option '%s' ignored" % opt) |
2713 print(" unknown option '%s' ignored" % opt) |
2744 continue |
2714 continue |
2745 if options.verbose > 1: |
2715 if options.verbose > 1: |
2746 print(" %s = %s" % (opt, |
2716 print(" {} = {}".format(opt, |
2747 config.get(pycodestyle_section, opt))) |
2717 config.get(pycodestyle_section, opt))) |
2748 normalized_opt = opt.replace('-', '_') |
2718 normalized_opt = opt.replace('-', '_') |
2749 opt_type = option_list[normalized_opt] |
2719 opt_type = option_list[normalized_opt] |
2750 if opt_type in ('int', 'count'): |
2720 if opt_type in ('int', 'count'): |
2751 value = config.getint(pycodestyle_section, opt) |
2721 value = config.getint(pycodestyle_section, opt) |
2752 elif opt_type in ('store_true', 'store_false'): |
2722 elif opt_type in ('store_true', 'store_false'): |