1 #!/usr/bin/env python |
1 #!/usr/bin/env python |
2 # -*- coding: utf-8 -*- |
2 # -*- coding: utf-8 -*- |
3 |
3 |
4 # pep8.py - Check Python source code formatting, according to PEP 8 |
4 # pycodestyle.py - Check Python source code formatting, according to PEP 8 |
5 # Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net> |
5 # Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net> |
6 # Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com> |
6 # Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com> |
7 # Copyright (C) 2014-2016 Ian Lee <ianlee1521@gmail.com> |
7 # Copyright (C) 2014-2016 Ian Lee <ianlee1521@gmail.com> |
8 # |
8 # |
9 # Permission is hereby granted, free of charge, to any person |
9 # Permission is hereby granted, free of charge, to any person |
58 # - added code for eric6 integration |
58 # - added code for eric6 integration |
59 # |
59 # |
60 # Copyright (c) 2011 - 2016 Detlev Offenbach <detlev@die-offenbachs.de> |
60 # Copyright (c) 2011 - 2016 Detlev Offenbach <detlev@die-offenbachs.de> |
61 # |
61 # |
62 |
62 |
63 import os |
|
64 import sys |
|
65 import re |
|
66 import time |
|
67 import inspect |
63 import inspect |
68 import keyword |
64 import keyword |
|
65 import os |
|
66 import re |
|
67 import sys |
|
68 import time |
69 import tokenize |
69 import tokenize |
70 import ast |
70 ##import ast |
|
71 from fnmatch import fnmatch |
71 from optparse import OptionParser |
72 from optparse import OptionParser |
72 from fnmatch import fnmatch |
|
73 try: |
73 try: |
74 from configparser import RawConfigParser |
74 from configparser import RawConfigParser |
75 from io import TextIOWrapper |
75 from io import TextIOWrapper |
76 except ImportError: |
76 except ImportError: |
77 from ConfigParser import RawConfigParser # __IGNORE_WARNING__ |
77 from ConfigParser import RawConfigParser # __IGNORE_WARNING__ |
78 |
78 |
79 __version__ = '1.7.0' |
79 __version__ = '2.1.0.dev0' |
80 |
80 |
81 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' |
81 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' |
82 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704' |
82 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503' |
83 try: |
83 try: |
84 if sys.platform == 'win32': |
84 if sys.platform == 'win32': |
85 USER_CONFIG = os.path.expanduser(r'~\.pep8') |
85 USER_CONFIG = os.path.expanduser(r'~\.pycodestyle') |
86 else: |
86 else: |
87 USER_CONFIG = os.path.join( |
87 USER_CONFIG = os.path.join( |
88 os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), |
88 os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), |
89 'pep8' |
89 'pycodestyle' |
90 ) |
90 ) |
91 except ImportError: |
91 except ImportError: |
92 USER_CONFIG = None |
92 USER_CONFIG = None |
93 |
93 |
94 PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8') |
94 PROJECT_CONFIG = ('setup.cfg', 'tox.ini') |
95 TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') |
95 TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') |
96 MAX_LINE_LENGTH = 79 |
96 MAX_LINE_LENGTH = 79 |
97 REPORT_FORMAT = { |
97 REPORT_FORMAT = { |
98 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', |
98 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', |
99 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', |
99 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', |
100 } |
100 } |
101 |
101 |
|
102 PyCF_ONLY_AST = 1024 |
102 SINGLETONS = frozenset(['False', 'None', 'True']) |
103 SINGLETONS = frozenset(['False', 'None', 'True']) |
103 KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS |
104 KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS |
104 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) |
105 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) |
105 ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-']) |
106 ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-']) |
106 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) |
107 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) |
206 return 0, "W391 blank line at end of file" |
207 return 0, "W391 blank line at end of file" |
207 if stripped_last_line == physical_line: |
208 if stripped_last_line == physical_line: |
208 return len(physical_line), "W292 no newline at end of file" |
209 return len(physical_line), "W292 no newline at end of file" |
209 |
210 |
210 |
211 |
211 def maximum_line_length(physical_line, max_line_length, multiline): |
212 def maximum_line_length(physical_line, max_line_length, multiline, noqa): |
212 r"""Limit all lines to a maximum of 79 characters. |
213 r"""Limit all lines to a maximum of 79 characters. |
213 |
214 |
214 There are still many devices around that are limited to 80 character |
215 There are still many devices around that are limited to 80 character |
215 lines; plus, limiting windows to 80 characters makes it possible to have |
216 lines; plus, limiting windows to 80 characters makes it possible to have |
216 several windows side-by-side. The default wrapping on such devices looks |
217 several windows side-by-side. The default wrapping on such devices looks |
246 # Plugins (check functions) for logical lines |
247 # Plugins (check functions) for logical lines |
247 ############################################################################## |
248 ############################################################################## |
248 |
249 |
249 |
250 |
250 def blank_lines(logical_line, blank_lines, indent_level, line_number, |
251 def blank_lines(logical_line, blank_lines, indent_level, line_number, |
251 blank_before, previous_logical, previous_indent_level): |
252 blank_before, previous_logical, |
|
253 previous_unindented_logical_line, previous_indent_level, |
|
254 lines): |
252 r"""Separate top-level function and class definitions with two blank lines. |
255 r"""Separate top-level function and class definitions with two blank lines. |
253 |
256 |
254 Method definitions inside a class are separated by a single blank line. |
257 Method definitions inside a class are separated by a single blank line. |
255 |
258 |
256 Extra blank lines may be used (sparingly) to separate groups of related |
259 Extra blank lines may be used (sparingly) to separate groups of related |
258 one-liners (e.g. a set of dummy implementations). |
261 one-liners (e.g. a set of dummy implementations). |
259 |
262 |
260 Use blank lines in functions, sparingly, to indicate logical sections. |
263 Use blank lines in functions, sparingly, to indicate logical sections. |
261 |
264 |
262 Okay: def a():\n pass\n\n\ndef b():\n pass |
265 Okay: def a():\n pass\n\n\ndef b():\n pass |
|
266 Okay: def a():\n pass\n\n\nasync def b():\n pass |
263 Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass |
267 Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass |
264 |
268 |
265 E301: class Foo:\n b = 0\n def bar():\n pass |
269 E301: class Foo:\n b = 0\n def bar():\n pass |
266 E302: def a():\n pass\n\ndef b(n):\n pass |
270 E302: def a():\n pass\n\ndef b(n):\n pass |
|
271 E302: def a():\n pass\n\nasync def b(n):\n pass |
267 E303: def a():\n pass\n\n\n\ndef b(n):\n pass |
272 E303: def a():\n pass\n\n\n\ndef b(n):\n pass |
268 E303: def a():\n\n\n\n pass |
273 E303: def a():\n\n\n\n pass |
269 E304: @decorator\n\ndef a():\n pass |
274 E304: @decorator\n\ndef a():\n pass |
|
275 E305: def a():\n pass\na() |
270 """ |
276 """ |
271 if line_number < 3 and not previous_logical: |
277 if line_number < 3 and not previous_logical: |
272 return # Don't expect blank lines before the first line |
278 return # Don't expect blank lines before the first line |
273 if previous_logical.startswith('@'): |
279 if previous_logical.startswith('@'): |
274 if blank_lines: |
280 if blank_lines: |
275 yield 0, "E304 blank lines found after function decorator" |
281 yield 0, "E304 blank lines found after function decorator" |
276 elif blank_lines > 2 or (indent_level and blank_lines == 2): |
282 elif blank_lines > 2 or (indent_level and blank_lines == 2): |
277 yield 0, "E303 too many blank lines (%d)", blank_lines |
283 yield 0, "E303 too many blank lines (%d)", blank_lines |
278 elif logical_line.startswith(('def ', 'class ', '@')): |
284 elif logical_line.startswith(('def ', 'async def ', 'class ', '@')): |
279 if indent_level: |
285 if indent_level: |
280 if not (blank_before or previous_indent_level < indent_level or |
286 if not (blank_before or previous_indent_level < indent_level or |
281 DOCSTRING_REGEX.match(previous_logical)): |
287 DOCSTRING_REGEX.match(previous_logical)): |
282 yield 0, "E301 expected 1 blank line, found 0" |
288 ancestor_level = indent_level |
|
289 nested = False |
|
290 # Search backwards for a def ancestor or tree root (top level). |
|
291 for line in lines[line_number - 2::-1]: |
|
292 if line.strip() and expand_indent(line) < ancestor_level: |
|
293 ancestor_level = expand_indent(line) |
|
294 nested = line.lstrip().startswith('def ') |
|
295 if nested or ancestor_level == 0: |
|
296 break |
|
297 if nested: |
|
298 yield 0, "E306 expected 1 blank line before a " \ |
|
299 "nested definition, found 0" |
|
300 else: |
|
301 yield 0, "E301 expected 1 blank line, found 0" |
283 elif blank_before != 2: |
302 elif blank_before != 2: |
284 yield 0, "E302 expected 2 blank lines, found %d", blank_before |
303 yield 0, "E302 expected 2 blank lines, found %d", blank_before |
|
304 elif (logical_line and not indent_level and blank_before != 2 and |
|
305 previous_unindented_logical_line.startswith(('def', 'class'))): |
|
306 yield 0, "E305 expected 2 blank lines after " \ |
|
307 "class or function definition, found %d", blank_before |
285 |
308 |
286 |
309 |
287 def extraneous_whitespace(logical_line): |
310 def extraneous_whitespace(logical_line): |
288 r"""Avoid extraneous whitespace. |
311 r"""Avoid extraneous whitespace. |
289 |
312 |
335 |
358 |
336 if '\t' in after: |
359 if '\t' in after: |
337 yield match.start(2), "E273 tab after keyword" |
360 yield match.start(2), "E273 tab after keyword" |
338 elif len(after) > 1: |
361 elif len(after) > 1: |
339 yield match.start(2), "E271 multiple spaces after keyword" |
362 yield match.start(2), "E271 multiple spaces after keyword" |
|
363 |
|
364 |
|
365 def missing_whitespace_after_import_keyword(logical_line): |
|
366 r"""Multiple imports in form from x import (a, b, c) should have space |
|
367 between import statement and parenthesised name list. |
|
368 |
|
369 Okay: from foo import (bar, baz) |
|
370 E275: from foo import(bar, baz) |
|
371 E275: from importable.module import(bar, baz) |
|
372 """ |
|
373 line = logical_line |
|
374 indicator = ' import(' |
|
375 if line.startswith('from '): |
|
376 found = line.find(indicator) |
|
377 if -1 < found: |
|
378 pos = found + len(indicator) - 1 |
|
379 yield pos, "E275 missing whitespace after keyword" |
340 |
380 |
341 |
381 |
342 def missing_whitespace(logical_line): |
382 def missing_whitespace(logical_line): |
343 r"""Each comma, semicolon or colon should be followed by whitespace. |
383 r"""Each comma, semicolon or colon should be followed by whitespace. |
344 |
384 |
771 Okay: boolean(a == b) |
811 Okay: boolean(a == b) |
772 Okay: boolean(a != b) |
812 Okay: boolean(a != b) |
773 Okay: boolean(a <= b) |
813 Okay: boolean(a <= b) |
774 Okay: boolean(a >= b) |
814 Okay: boolean(a >= b) |
775 Okay: def foo(arg: int = 42): |
815 Okay: def foo(arg: int = 42): |
|
816 Okay: async def foo(arg: int = 42): |
776 |
817 |
777 E251: def complex(real, imag = 0.0): |
818 E251: def complex(real, imag = 0.0): |
778 E251: return magic(r = real, i = imag) |
819 E251: return magic(r = real, i = imag) |
779 """ |
820 """ |
780 parens = 0 |
821 parens = 0 |
781 no_space = False |
822 no_space = False |
782 prev_end = None |
823 prev_end = None |
783 annotated_func_arg = False |
824 annotated_func_arg = False |
784 in_def = logical_line.startswith('def') |
825 in_def = logical_line.startswith(('def', 'async def')) |
785 message = "E251 unexpected spaces around keyword / parameter equals" |
826 message = "E251 unexpected spaces around keyword / parameter equals" |
786 for token_type, text, start, end, line in tokens: |
827 for token_type, text, start, end, line in tokens: |
787 if token_type == tokenize.NL: |
828 if token_type == tokenize.NL: |
788 continue |
829 continue |
789 if no_space: |
830 if no_space: |
790 no_space = False |
831 no_space = False |
791 if start != prev_end: |
832 if start != prev_end: |
792 yield (prev_end, message) |
833 yield (prev_end, message) |
793 if token_type == tokenize.OP: |
834 if token_type == tokenize.OP: |
794 if text == '(': |
835 if text in '([': |
795 parens += 1 |
836 parens += 1 |
796 elif text == ')': |
837 elif text in ')]': |
797 parens -= 1 |
838 parens -= 1 |
798 elif in_def and text == ':' and parens == 1: |
839 elif in_def and text == ':' and parens == 1: |
799 annotated_func_arg = True |
840 annotated_func_arg = True |
800 elif parens and text == ',' and parens == 1: |
841 elif parens and text == ',' and parens == 1: |
801 annotated_func_arg = False |
842 annotated_func_arg = False |
869 yield found, "E401 multiple imports on one line" |
910 yield found, "E401 multiple imports on one line" |
870 |
911 |
871 |
912 |
872 def module_imports_on_top_of_file( |
913 def module_imports_on_top_of_file( |
873 logical_line, indent_level, checker_state, noqa): |
914 logical_line, indent_level, checker_state, noqa): |
874 r"""Imports are always put at the top of the file, just after any module |
915 r"""Place imports at the top of the file. |
875 comments and docstrings, and before module globals and constants. |
916 |
|
917 Always put imports at the top of the file, just after any module comments |
|
918 and docstrings, and before module globals and constants. |
876 |
919 |
877 Okay: import os |
920 Okay: import os |
878 Okay: # this is a comment\nimport os |
921 Okay: # this is a comment\nimport os |
879 Okay: '''this is a module docstring'''\nimport os |
922 Okay: '''this is a module docstring'''\nimport os |
880 Okay: r'''this is a module docstring'''\nimport os |
923 Okay: r'''this is a module docstring'''\nimport os |
943 E701: finally: cleanup() |
986 E701: finally: cleanup() |
944 E701: if foo == 'blah': one(); two(); three() |
987 E701: if foo == 'blah': one(); two(); three() |
945 E702: do_one(); do_two(); do_three() |
988 E702: do_one(); do_two(); do_three() |
946 E703: do_four(); # useless semicolon |
989 E703: do_four(); # useless semicolon |
947 E704: def f(x): return 2*x |
990 E704: def f(x): return 2*x |
|
991 E705: async def f(x): return 2*x |
948 E731: f = lambda x: 2*x |
992 E731: f = lambda x: 2*x |
949 """ |
993 """ |
950 line = logical_line |
994 line = logical_line |
951 last_char = len(line) - 1 |
995 last_char = len(line) - 1 |
952 found = line.find(':') |
996 found = line.find(':') |
|
997 prev_found = 0 |
|
998 counts = dict((char, 0) for char in '{}[]()') |
953 while -1 < found < last_char: |
999 while -1 < found < last_char: |
954 before = line[:found] |
1000 update_counts(line[prev_found:found], counts) |
955 if ((before.count('{') <= before.count('}') and # {'a': 1} (dict) |
1001 if ((counts['{'] <= counts['}'] and # {'a': 1} (dict) |
956 before.count('[') <= before.count(']') and # [1:2] (slice) |
1002 counts['['] <= counts[']'] and # [1:2] (slice) |
957 before.count('(') <= before.count(')'))): # (annotation) |
1003 counts['('] <= counts[')'])): # (annotation) |
958 lambda_kw = LAMBDA_REGEX.search(before) |
1004 lambda_kw = LAMBDA_REGEX.search(line, 0, found) |
959 if lambda_kw: |
1005 if lambda_kw: |
960 before = line[:lambda_kw.start()].rstrip() |
1006 before = line[:lambda_kw.start()].rstrip() |
961 if before[-1:] == '=' and isidentifier(before[:-1].strip()): |
1007 if before[-1:] == '=' and isidentifier(before[:-1].strip()): |
962 yield 0, ("E731 do not assign a lambda expression, use a " |
1008 yield 0, ("E731 do not assign a lambda expression, use a " |
963 "def") |
1009 "def") |
964 break |
1010 break |
965 if before.startswith('def '): |
1011 if line.startswith('def '): |
966 yield 0, "E704 multiple statements on one line (def)" |
1012 yield 0, "E704 multiple statements on one line (def)" |
|
1013 elif line.startswith('async def '): |
|
1014 yield 0, "E705 multiple statements on one line (async def)" |
967 else: |
1015 else: |
968 yield found, "E701 multiple statements on one line (colon)" |
1016 yield found, "E701 multiple statements on one line (colon)" |
|
1017 prev_found = found |
969 found = line.find(':', found + 1) |
1018 found = line.find(':', found + 1) |
970 found = line.find(';') |
1019 found = line.find(';') |
971 while -1 < found: |
1020 while -1 < found: |
972 if found < last_char: |
1021 if found < last_char: |
973 yield found, "E702 multiple statements on one line (semicolon)" |
1022 yield found, "E702 multiple statements on one line (semicolon)" |
1029 Okay: foo(\n -x) |
1078 Okay: foo(\n -x) |
1030 Okay: foo(x\n []) |
1079 Okay: foo(x\n []) |
1031 Okay: x = '''\n''' + '' |
1080 Okay: x = '''\n''' + '' |
1032 Okay: foo(x,\n -y) |
1081 Okay: foo(x,\n -y) |
1033 Okay: foo(x, # comment\n -y) |
1082 Okay: foo(x, # comment\n -y) |
|
1083 Okay: var = (1 &\n ~2) |
|
1084 Okay: var = (1 /\n -2) |
|
1085 Okay: var = (1 +\n -1 +\n -2) |
1034 """ |
1086 """ |
1035 def is_binary_operator(token_type, text): |
1087 def is_binary_operator(token_type, text): |
1036 # The % character is strictly speaking a binary operator, but the |
1088 # The % character is strictly speaking a binary operator, but the |
1037 # common usage seems to be to put it next to the format parameters, |
1089 # common usage seems to be to put it next to the format parameters, |
1038 # after a line break. |
1090 # after a line break. |
1039 return ((token_type == tokenize.OP or text in ['and', 'or']) and |
1091 return ((token_type == tokenize.OP or text in ['and', 'or']) and |
1040 text not in "()[]{},:.;@=%") |
1092 text not in "()[]{},:.;@=%~") |
1041 |
1093 |
1042 line_break = False |
1094 line_break = False |
1043 unary_context = True |
1095 unary_context = True |
|
1096 # Previous non-newline token types and text |
|
1097 previous_token_type = None |
|
1098 previous_text = None |
1044 for token_type, text, start, end, line in tokens: |
1099 for token_type, text, start, end, line in tokens: |
1045 if token_type == tokenize.COMMENT: |
1100 if token_type == tokenize.COMMENT: |
1046 continue |
1101 continue |
1047 if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: |
1102 if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: |
1048 line_break = True |
1103 line_break = True |
1049 else: |
1104 else: |
1050 if (is_binary_operator(token_type, text) and line_break and |
1105 if (is_binary_operator(token_type, text) and line_break and |
1051 not unary_context): |
1106 not unary_context and |
|
1107 not is_binary_operator(previous_token_type, |
|
1108 previous_text)): |
1052 yield start, "W503 line break before binary operator" |
1109 yield start, "W503 line break before binary operator" |
1053 unary_context = text in '([{,;' |
1110 unary_context = text in '([{,;' |
1054 line_break = False |
1111 line_break = False |
|
1112 previous_token_type = token_type |
|
1113 previous_text = text |
1055 |
1114 |
1056 |
1115 |
1057 def comparison_to_singleton(logical_line, noqa): |
1116 def comparison_to_singleton(logical_line, noqa): |
1058 r"""Comparison to singletons should use "is" or "is not". |
1117 r"""Comparison to singletons should use "is" or "is not". |
1059 |
1118 |
1082 else: |
1141 else: |
1083 code = 'E712' |
1142 code = 'E712' |
1084 nonzero = ((singleton == 'True' and same) or |
1143 nonzero = ((singleton == 'True' and same) or |
1085 (singleton == 'False' and not same)) |
1144 (singleton == 'False' and not same)) |
1086 msg += " or 'if %scond:'" % ('' if nonzero else 'not ') |
1145 msg += " or 'if %scond:'" % ('' if nonzero else 'not ') |
1087 yield (match.start(2), "%s comparison to %s should be %s" % |
1146 yield (match.start(2), ("%s comparison to %s should be %s" % |
1088 (code, singleton, msg), singleton, msg) |
1147 (code, singleton, msg)), singleton, msg) |
1089 |
1148 |
1090 |
1149 |
1091 def comparison_negative(logical_line): |
1150 def comparison_negative(logical_line): |
1092 r"""Negative comparison should be done using "not in" and "is not". |
1151 r"""Negative comparison should be done using "not in" and "is not". |
1093 |
1152 |
1130 if inst and isidentifier(inst) and inst not in SINGLETONS: |
1189 if inst and isidentifier(inst) and inst not in SINGLETONS: |
1131 return # Allow comparison for types which are not obvious |
1190 return # Allow comparison for types which are not obvious |
1132 yield match.start(), "E721 do not compare types, use 'isinstance()'" |
1191 yield match.start(), "E721 do not compare types, use 'isinstance()'" |
1133 |
1192 |
1134 |
1193 |
|
1194 def ambiguous_identifier(logical_line, tokens): |
|
1195 r"""Never use the characters 'l', 'O', or 'I' as variable names. |
|
1196 |
|
1197 In some fonts, these characters are indistinguishable from the numerals |
|
1198 one and zero. When tempted to use 'l', use 'L' instead. |
|
1199 |
|
1200 Okay: L = 0 |
|
1201 Okay: o = 123 |
|
1202 Okay: i = 42 |
|
1203 E741: l = 0 |
|
1204 E741: O = 123 |
|
1205 E741: I = 42 |
|
1206 |
|
1207 Variables can be bound in several other contexts, including class and |
|
1208 function definitions, 'global' and 'nonlocal' statements, exception |
|
1209 handlers, and 'with' statements. |
|
1210 |
|
1211 Okay: except AttributeError as o: |
|
1212 Okay: with lock as L: |
|
1213 E741: except AttributeError as O: |
|
1214 E741: with lock as l: |
|
1215 E741: global I |
|
1216 E741: nonlocal l |
|
1217 E742: class I(object): |
|
1218 E743: def l(x): |
|
1219 """ |
|
1220 idents_to_avoid = ('l', 'O', 'I') |
|
1221 prev_type, prev_text, prev_start, prev_end, __ = tokens[0] |
|
1222 for token_type, text, start, end, line in tokens[1:]: |
|
1223 ident = pos = None |
|
1224 # identifiers on the lhs of an assignment operator |
|
1225 if token_type == tokenize.OP and '=' in text: |
|
1226 if prev_text in idents_to_avoid: |
|
1227 ident = prev_text |
|
1228 pos = prev_start |
|
1229 # identifiers bound to a value with 'as', 'global', or 'nonlocal' |
|
1230 if prev_text in ('as', 'global', 'nonlocal'): |
|
1231 if text in idents_to_avoid: |
|
1232 ident = text |
|
1233 pos = start |
|
1234 if prev_text == 'class': |
|
1235 if text in idents_to_avoid: |
|
1236 yield start, "E742 ambiguous class definition '%s'", text |
|
1237 if prev_text == 'def': |
|
1238 if text in idents_to_avoid: |
|
1239 yield start, "E743 ambiguous function definition '%s'", text |
|
1240 if ident: |
|
1241 yield pos, "E741 ambiguous variable name '%s'", ident |
|
1242 prev_text = text |
|
1243 prev_start = start |
|
1244 |
|
1245 |
1135 def python_3000_has_key(logical_line, noqa): |
1246 def python_3000_has_key(logical_line, noqa): |
1136 r"""The {}.has_key() method is removed in Python 3: use the 'in' operator. |
1247 r"""The {}.has_key() method is removed in Python 3: use the 'in' operator. |
1137 |
1248 |
1138 Okay: if "alph" in d:\n print d["alph"] |
1249 Okay: if "alph" in d:\n print d["alph"] |
1139 W601: assert d.has_key('alph') |
1250 W601: assert d.has_key('alph') |
1199 """Read the source code.""" |
1310 """Read the source code.""" |
1200 try: |
1311 try: |
1201 with open(filename, 'rb') as f: |
1312 with open(filename, 'rb') as f: |
1202 (coding, lines) = tokenize.detect_encoding(f.readline) |
1313 (coding, lines) = tokenize.detect_encoding(f.readline) |
1203 f = TextIOWrapper(f, coding, line_buffering=True) |
1314 f = TextIOWrapper(f, coding, line_buffering=True) |
1204 return [l.decode(coding) for l in lines] + f.readlines() |
1315 return [line.decode(coding) for line in lines] + f.readlines() |
1205 except (LookupError, SyntaxError, UnicodeError): |
1316 except (LookupError, SyntaxError, UnicodeError): |
1206 # Fall back if file encoding is improperly declared |
1317 # Fall back if file encoding is improperly declared |
1207 with open(filename, encoding='latin-1') as f: |
1318 with open(filename, encoding='latin-1') as f: |
1208 return f.readlines() |
1319 return f.readlines() |
1209 isidentifier = str.isidentifier |
1320 isidentifier = str.isidentifier |
1210 |
1321 |
1211 def stdin_get_value(): |
1322 def stdin_get_value(): |
|
1323 """Read the value from stdin.""" |
1212 return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() |
1324 return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() |
|
1325 |
1213 noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search |
1326 noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search |
1214 |
1327 |
1215 |
1328 |
1216 def expand_indent(line): |
1329 def expand_indent(line): |
1217 r"""Return the amount of indentation. |
1330 r"""Return the amount of indentation. |
1311 if not patterns: |
1424 if not patterns: |
1312 return default |
1425 return default |
1313 return any(fnmatch(filename, pattern) for pattern in patterns) |
1426 return any(fnmatch(filename, pattern) for pattern in patterns) |
1314 |
1427 |
1315 |
1428 |
|
1429 def update_counts(s, counts): |
|
1430 r"""Adds one to the counts of each appearance of characters in s, |
|
1431 for characters in counts""" |
|
1432 for char in s: |
|
1433 if char in counts: |
|
1434 counts[char] += 1 |
|
1435 |
|
1436 |
1316 def _is_eol_token(token): |
1437 def _is_eol_token(token): |
1317 return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n' |
1438 return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n' |
|
1439 |
|
1440 |
1318 if COMMENT_WITH_NL: |
1441 if COMMENT_WITH_NL: |
1319 def _is_eol_token(token, _eol_token=_is_eol_token): |
1442 def _is_eol_token(token, _eol_token=_is_eol_token): |
1320 return _eol_token(token) or (token[0] == tokenize.COMMENT and |
1443 return _eol_token(token) or (token[0] == tokenize.COMMENT and |
1321 token[1] == token[4]) |
1444 token[1] == token[4]) |
1322 |
1445 |
1362 The first argument name is either 'physical_line' or 'logical_line'. |
1485 The first argument name is either 'physical_line' or 'logical_line'. |
1363 """ |
1486 """ |
1364 mod = inspect.getmodule(register_check) |
1487 mod = inspect.getmodule(register_check) |
1365 for (name, function) in inspect.getmembers(mod, inspect.isfunction): |
1488 for (name, function) in inspect.getmembers(mod, inspect.isfunction): |
1366 register_check(function) |
1489 register_check(function) |
|
1490 |
|
1491 |
1367 init_checks_registry() |
1492 init_checks_registry() |
1368 |
1493 |
1369 |
1494 |
1370 class Checker(object): |
1495 class Checker(object): |
1371 """Load a Python source file, tokenize it, check coding style.""" |
1496 """Load a Python source file, tokenize it, check coding style.""" |
1445 for name in argument_names: |
1571 for name in argument_names: |
1446 arguments.append(getattr(self, name)) |
1572 arguments.append(getattr(self, name)) |
1447 return check(*arguments) |
1573 return check(*arguments) |
1448 |
1574 |
1449 def init_checker_state(self, name, argument_names): |
1575 def init_checker_state(self, name, argument_names): |
1450 """ Prepares a custom state for the specific checker plugin.""" |
1576 """ Prepare custom state for the specific checker plugin.""" |
1451 if 'checker_state' in argument_names: |
1577 if 'checker_state' in argument_names: |
1452 self.checker_state = self._checker_states.setdefault(name, {}) |
1578 self.checker_state = self._checker_states.setdefault(name, {}) |
1453 |
1579 |
1454 def check_physical(self, line): |
1580 def check_physical(self, line): |
1455 """Run all physical checks on a raw input line.""" |
1581 """Run all physical checks on a raw input line.""" |
1528 self.report_error_args( |
1654 self.report_error_args( |
1529 offset[0], offset[1], text, check, *args) |
1655 offset[0], offset[1], text, check, *args) |
1530 if self.logical_line: |
1656 if self.logical_line: |
1531 self.previous_indent_level = self.indent_level |
1657 self.previous_indent_level = self.indent_level |
1532 self.previous_logical = self.logical_line |
1658 self.previous_logical = self.logical_line |
|
1659 if not self.indent_level: |
|
1660 self.previous_unindented_logical_line = self.logical_line |
1533 self.blank_lines = 0 |
1661 self.blank_lines = 0 |
1534 self.tokens = [] |
1662 self.tokens = [] |
1535 |
1663 |
1536 def check_ast(self): |
1664 def check_ast(self): |
1537 """Build the file's AST and run all AST checks.""" |
1665 """Build the file's AST and run all AST checks.""" |
1538 try: |
1666 try: |
1539 tree = compile(''.join(self.lines), '', 'exec', ast.PyCF_ONLY_AST) |
1667 tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) |
1540 except (ValueError, SyntaxError, TypeError): |
1668 except (ValueError, SyntaxError, TypeError): |
1541 return self.report_invalid_syntax() |
1669 return self.report_invalid_syntax() |
1542 for name, cls, __ in self._ast_checks: |
1670 for name, cls, __ in self._ast_checks: |
1543 # extended API for eric6 integration |
1671 # extended API for eric6 integration |
1544 checker = cls(tree, self.filename, self.options) |
1672 checker = cls(tree, self.filename, self.options) |
1864 # if not (options.select or options.ignore or |
1994 # if not (options.select or options.ignore or |
1865 # options.testsuite or options.doctest) and DEFAULT_IGNORE: |
1995 # options.testsuite or options.doctest) and DEFAULT_IGNORE: |
1866 # # The default choice: ignore controversial checks |
1996 # # The default choice: ignore controversial checks |
1867 # options.ignore = tuple(DEFAULT_IGNORE.split(',')) |
1997 # options.ignore = tuple(DEFAULT_IGNORE.split(',')) |
1868 # else: |
1998 # else: |
1869 # Ignore all checks which are not explicitly selected or all if no |
1999 # # Ignore all checks which are not explicitly selected or all if no |
|
2000 # options.ignore = ('',) if options.select else tuple(options.ignore) |
1870 |
2001 |
1871 # check is ignored or explicitly selected |
2002 # check is ignored or explicitly selected |
1872 options.ignore = ('',) if options.select else tuple(options.ignore) |
2003 options.ignore = ('',) if options.select else tuple(options.ignore) |
1873 options.benchmark_keys = BENCHMARK_KEYS[:] |
2004 options.benchmark_keys = BENCHMARK_KEYS[:] |
1874 options.ignore_code = self.ignore_code |
2005 options.ignore_code = self.ignore_code |
1970 if any(not (code and self.ignore_code(code)) for code in codes): |
2101 if any(not (code and self.ignore_code(code)) for code in codes): |
1971 checks.append((check.__name__, check, args)) |
2102 checks.append((check.__name__, check, args)) |
1972 return sorted(checks) |
2103 return sorted(checks) |
1973 |
2104 |
1974 |
2105 |
1975 def get_parser(prog='pep8', version=__version__): |
2106 def get_parser(prog='pycodestyle', version=__version__): |
|
2107 """Create the parser for the program.""" |
1976 parser = OptionParser(prog=prog, version=version, |
2108 parser = OptionParser(prog=prog, version=version, |
1977 usage="%prog [options] input ...") |
2109 usage="%prog [options] input ...") |
1978 parser.config_options = [ |
2110 parser.config_options = [ |
1979 'exclude', 'filename', 'select', 'ignore', 'max-line-length', |
2111 'exclude', 'filename', 'select', 'ignore', 'max-line-length', |
1980 'hang-closing', 'count', 'format', 'quiet', 'show-pep8', |
2112 'hang-closing', 'count', 'format', 'quiet', 'show-pep8', |
2037 """Read and parse configurations |
2169 """Read and parse configurations |
2038 |
2170 |
2039 If a config file is specified on the command line with the "--config" |
2171 If a config file is specified on the command line with the "--config" |
2040 option, then only it is used for configuration. |
2172 option, then only it is used for configuration. |
2041 |
2173 |
2042 Otherwise, the user configuration (~/.config/pep8) and any local |
2174 Otherwise, the user configuration (~/.config/pycodestyle) and any local |
2043 configurations in the current directory or above will be merged together |
2175 configurations in the current directory or above will be merged together |
2044 (in that order) using the read method of ConfigParser. |
2176 (in that order) using the read method of ConfigParser. |
2045 """ |
2177 """ |
2046 config = RawConfigParser() |
2178 config = RawConfigParser() |
2047 |
2179 |
2085 print(" %s = %s" % (opt, config.get(pep8_section, opt))) |
2217 print(" %s = %s" % (opt, config.get(pep8_section, opt))) |
2086 normalized_opt = opt.replace('-', '_') |
2218 normalized_opt = opt.replace('-', '_') |
2087 opt_type = option_list[normalized_opt] |
2219 opt_type = option_list[normalized_opt] |
2088 if opt_type in ('int', 'count'): |
2220 if opt_type in ('int', 'count'): |
2089 value = config.getint(pep8_section, opt) |
2221 value = config.getint(pep8_section, opt) |
2090 elif opt_type == 'string': |
2222 elif opt_type in ('store_true', 'store_false'): |
|
2223 value = config.getboolean(pep8_section, opt) |
|
2224 else: |
2091 value = config.get(pep8_section, opt) |
2225 value = config.get(pep8_section, opt) |
2092 if normalized_opt == 'exclude': |
2226 if normalized_opt == 'exclude': |
2093 value = normalize_paths(value, local_dir) |
2227 value = normalize_paths(value, local_dir) |
2094 else: |
|
2095 assert opt_type in ('store_true', 'store_false') |
|
2096 value = config.getboolean(pep8_section, opt) |
|
2097 setattr(new_options, normalized_opt, value) |
2228 setattr(new_options, normalized_opt, value) |
2098 |
2229 |
2099 # Third, overwrite with the command-line options |
2230 # Third, overwrite with the command-line options |
2100 (options, __) = parser.parse_args(arglist, values=new_options) |
2231 (options, __) = parser.parse_args(arglist, values=new_options) |
2101 options.doctest = options.testsuite = False |
2232 options.doctest = options.testsuite = False |
2105 def process_options(arglist=None, parse_argv=False, config_file=None, |
2236 def process_options(arglist=None, parse_argv=False, config_file=None, |
2106 parser=None): |
2237 parser=None): |
2107 """Process options passed either via arglist or via command line args. |
2238 """Process options passed either via arglist or via command line args. |
2108 |
2239 |
2109 Passing in the ``config_file`` parameter allows other tools, such as flake8 |
2240 Passing in the ``config_file`` parameter allows other tools, such as flake8 |
2110 to specify their own options to be processed in pep8. |
2241 to specify their own options to be processed in pycodestyle. |
2111 """ |
2242 """ |
2112 if not parser: |
2243 if not parser: |
2113 parser = get_parser() |
2244 parser = get_parser() |
2114 if not parser.has_option('--config'): |
2245 if not parser.has_option('--config'): |
2115 group = parser.add_option_group("Configuration", description=( |
2246 group = parser.add_option_group("Configuration", description=( |
2177 try: |
2308 try: |
2178 signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1)) |
2309 signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1)) |
2179 except AttributeError: |
2310 except AttributeError: |
2180 pass # not supported on Windows |
2311 pass # not supported on Windows |
2181 |
2312 |
2182 pep8style = StyleGuide(parse_argv=True) |
2313 style_guide = StyleGuide(parse_argv=True) |
2183 options = pep8style.options |
2314 options = style_guide.options |
2184 |
2315 |
2185 if options.doctest or options.testsuite: |
2316 if options.doctest or options.testsuite: |
2186 from testsuite.support import run_tests |
2317 from testsuite.support import run_tests |
2187 report = run_tests(pep8style) |
2318 report = run_tests(style_guide) |
2188 else: |
2319 else: |
2189 report = pep8style.check_files() |
2320 report = style_guide.check_files() |
2190 |
2321 |
2191 if options.statistics: |
2322 if options.statistics: |
2192 report.print_statistics() |
2323 report.print_statistics() |
2193 |
2324 |
2194 if options.benchmark: |
2325 if options.benchmark: |