2 # -*- coding: utf-8 -*- |
2 # -*- coding: utf-8 -*- |
3 |
3 |
4 # pep8.py - Check Python source code formatting, according to PEP 8 |
4 # pep8.py - Check Python source code formatting, according to PEP 8 |
5 # Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net> |
5 # Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net> |
6 # Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com> |
6 # Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com> |
|
7 # Copyright (C) 2014-2016 Ian Lee <ianlee1521@gmail.com> |
7 # |
8 # |
8 # Permission is hereby granted, free of charge, to any person |
9 # Permission is hereby granted, free of charge, to any person |
9 # obtaining a copy of this software and associated documentation files |
10 # obtaining a copy of this software and associated documentation files |
10 # (the "Software"), to deal in the Software without restriction, |
11 # (the "Software"), to deal in the Software without restriction, |
11 # including without limitation the rights to use, copy, modify, merge, |
12 # including without limitation the rights to use, copy, modify, merge, |
75 from configparser import RawConfigParser |
74 from configparser import RawConfigParser |
76 from io import TextIOWrapper |
75 from io import TextIOWrapper |
77 except ImportError: |
76 except ImportError: |
78 from ConfigParser import RawConfigParser # __IGNORE_WARNING__ |
77 from ConfigParser import RawConfigParser # __IGNORE_WARNING__ |
79 |
78 |
80 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__' |
79 __version__ = '1.7.0' |
81 DEFAULT_IGNORE = 'E123,E226,E24' |
80 |
82 if sys.platform == 'win32': |
81 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' |
83 DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') |
82 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704' |
84 else: |
83 try: |
85 DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or |
84 if sys.platform == 'win32': |
86 os.path.expanduser('~/.config'), 'pep8') |
85 USER_CONFIG = os.path.expanduser(r'~\.pep8') |
|
86 else: |
|
87 USER_CONFIG = os.path.join( |
|
88 os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), |
|
89 'pep8' |
|
90 ) |
|
91 except ImportError: |
|
92 USER_CONFIG = None |
|
93 |
87 PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8') |
94 PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8') |
88 TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') |
95 TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite') |
89 MAX_LINE_LENGTH = 79 |
96 MAX_LINE_LENGTH = 79 |
90 REPORT_FORMAT = { |
97 REPORT_FORMAT = { |
91 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', |
98 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', |
112 RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$') |
119 RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$') |
113 ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b') |
120 ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b') |
114 DOCSTRING_REGEX = re.compile(r'u?r?["\']') |
121 DOCSTRING_REGEX = re.compile(r'u?r?["\']') |
115 EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]') |
122 EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]') |
116 WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') |
123 WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') |
117 COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)') |
124 COMPARE_SINGLETON_REGEX = re.compile(r'(\bNone|\bFalse|\bTrue)?\s*([=!]=)' |
118 COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^[({ ]+\s+(in|is)\s') |
125 r'\s*(?(1)|(None|False|True))\b') |
|
126 COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^][)(}{ ]+\s+(in|is)\s') |
119 COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type' |
127 COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type' |
120 r'|\s*\(\s*([^)]*[^ )])\s*\))') |
128 r'|\s*\(\s*([^)]*[^ )])\s*\))') |
121 KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) |
129 KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) |
122 OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)') |
130 OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)') |
123 LAMBDA_REGEX = re.compile(r'\blambda\b') |
131 LAMBDA_REGEX = re.compile(r'\blambda\b') |
227 try: |
235 try: |
228 length = len(line.decode('utf-8')) |
236 length = len(line.decode('utf-8')) |
229 except UnicodeError: |
237 except UnicodeError: |
230 pass |
238 pass |
231 if length > max_line_length: |
239 if length > max_line_length: |
232 return (max_line_length, "E501 line too long ", length, max_line_length) |
240 return (max_line_length, "E501 line too long " |
|
241 "(%d > %d characters)" % (length, max_line_length), |
|
242 length, max_line_length) |
233 |
243 |
234 |
244 |
235 ############################################################################## |
245 ############################################################################## |
236 # Plugins (check functions) for logical lines |
246 # Plugins (check functions) for logical lines |
237 ############################################################################## |
247 ############################################################################## |
363 use 8-space tabs. |
373 use 8-space tabs. |
364 |
374 |
365 Okay: a = 1 |
375 Okay: a = 1 |
366 Okay: if a == 0:\n a = 1 |
376 Okay: if a == 0:\n a = 1 |
367 E111: a = 1 |
377 E111: a = 1 |
|
378 E114: # a = 1 |
368 |
379 |
369 Okay: for item in items:\n pass |
380 Okay: for item in items:\n pass |
370 E112: for item in items:\npass |
381 E112: for item in items:\npass |
|
382 E115: for item in items:\n# Hi\n pass |
371 |
383 |
372 Okay: a = 1\nb = 2 |
384 Okay: a = 1\nb = 2 |
373 E113: a = 1\n b = 2 |
385 E113: a = 1\n b = 2 |
374 """ |
386 E116: a = 1\n # b = 2 |
375 if indent_char == ' ' and indent_level % 4: |
387 """ |
376 yield 0, "E111 indentation is not a multiple of four" |
388 c = 0 if logical_line else 3 |
|
389 tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)" |
|
390 if indent_level % 4: |
|
391 yield 0, tmpl % (1 + c, "indentation is not a multiple of four") |
377 indent_expect = previous_logical.endswith(':') |
392 indent_expect = previous_logical.endswith(':') |
378 if indent_expect and indent_level <= previous_indent_level: |
393 if indent_expect and indent_level <= previous_indent_level: |
379 yield 0, "E112 expected an indented block" |
394 yield 0, tmpl % (2 + c, "expected an indented block") |
380 if indent_level > previous_indent_level and not indent_expect: |
395 elif not indent_expect and indent_level > previous_indent_level: |
381 yield 0, "E113 unexpected indentation" |
396 yield 0, tmpl % (3 + c, "unexpected indentation") |
382 |
397 |
383 |
398 |
384 def continued_indentation(logical_line, tokens, indent_level, hang_closing, |
399 def continued_indentation(logical_line, tokens, indent_level, hang_closing, |
385 indent_char, noqa, verbose): |
400 indent_char, noqa, verbose): |
386 r"""Continuation lines indentation. |
401 r"""Continuation lines indentation. |
432 hangs = [None] |
447 hangs = [None] |
433 # visual indents |
448 # visual indents |
434 indent_chances = {} |
449 indent_chances = {} |
435 last_indent = tokens[0][2] |
450 last_indent = tokens[0][2] |
436 visual_indent = None |
451 visual_indent = None |
|
452 last_token_multiline = False |
437 # for each depth, memorize the visual indent column |
453 # for each depth, memorize the visual indent column |
438 indent = [last_indent[1]] |
454 indent = [last_indent[1]] |
439 if verbose >= 3: |
455 if verbose >= 3: |
440 print(">>> " + tokens[0][4].rstrip()) |
456 print(">>> " + tokens[0][4].rstrip()) |
441 |
457 |
442 for token_type, text, start, end, line in tokens: |
458 for token_type, text, start, end, line in tokens: |
443 |
459 |
444 last_token_multiline = (start[0] != end[0]) |
|
445 newline = row < start[0] - first_row |
460 newline = row < start[0] - first_row |
446 if newline: |
461 if newline: |
447 row = start[0] - first_row |
462 row = start[0] - first_row |
448 newline = not last_token_multiline and token_type not in NEWLINE |
463 newline = not last_token_multiline and token_type not in NEWLINE |
449 |
464 |
512 else: |
527 else: |
513 error = "E121", "under-indented for hanging indent" |
528 error = "E121", "under-indented for hanging indent" |
514 yield start, "%s continuation line %s" % error |
529 yield start, "%s continuation line %s" % error |
515 |
530 |
516 # look for visual indenting |
531 # look for visual indenting |
517 if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) |
532 if (parens[row] and |
518 and not indent[depth]): |
533 token_type not in (tokenize.NL, tokenize.COMMENT) and |
|
534 not indent[depth]): |
519 indent[depth] = start[1] |
535 indent[depth] = start[1] |
520 indent_chances[start[1]] = True |
536 indent_chances[start[1]] = True |
521 if verbose >= 4: |
537 if verbose >= 4: |
522 print("bracket depth %s indent to %s" % (depth, start[1])) |
538 print("bracket depth %s indent to %s" % (depth, start[1])) |
523 # deal with implicit string concatenation |
539 # deal with implicit string concatenation |
564 assert len(indent) == depth + 1 |
580 assert len(indent) == depth + 1 |
565 if start[1] not in indent_chances: |
581 if start[1] not in indent_chances: |
566 # allow to line up tokens |
582 # allow to line up tokens |
567 indent_chances[start[1]] = text |
583 indent_chances[start[1]] = text |
568 |
584 |
|
585 last_token_multiline = (start[0] != end[0]) |
569 if last_token_multiline: |
586 if last_token_multiline: |
570 rel_indent[end[0] - first_row] = rel_indent[row] |
587 rel_indent[end[0] - first_row] = rel_indent[row] |
571 |
588 |
572 if indent_next and expand_indent(line) == indent_level + 4: |
589 if indent_next and expand_indent(line) == indent_level + 4: |
573 pos = (start[0], indent[0] + 4) |
590 pos = (start[0], indent[0] + 4) |
685 pass |
702 pass |
686 else: |
703 else: |
687 if need_space is True or need_space[1]: |
704 if need_space is True or need_space[1]: |
688 # A needed trailing space was not found |
705 # A needed trailing space was not found |
689 yield prev_end, "E225 missing whitespace around operator" |
706 yield prev_end, "E225 missing whitespace around operator" |
690 else: |
707 elif prev_text != '**': |
691 code, optype = 'E226', 'arithmetic' |
708 code, optype = 'E226', 'arithmetic' |
692 if prev_text == '%': |
709 if prev_text == '%': |
693 code, optype = 'E228', 'modulo' |
710 code, optype = 'E228', 'modulo' |
694 elif prev_text not in ARITHMETIC_OP: |
711 elif prev_text not in ARITHMETIC_OP: |
695 code, optype = 'E227', 'bitwise or shift' |
712 code, optype = 'E227', 'bitwise or shift' |
753 Okay: return magic(r=real, i=imag) |
770 Okay: return magic(r=real, i=imag) |
754 Okay: boolean(a == b) |
771 Okay: boolean(a == b) |
755 Okay: boolean(a != b) |
772 Okay: boolean(a != b) |
756 Okay: boolean(a <= b) |
773 Okay: boolean(a <= b) |
757 Okay: boolean(a >= b) |
774 Okay: boolean(a >= b) |
|
775 Okay: def foo(arg: int = 42): |
758 |
776 |
759 E251: def complex(real, imag = 0.0): |
777 E251: def complex(real, imag = 0.0): |
760 E251: return magic(r = real, i = imag) |
778 E251: return magic(r = real, i = imag) |
761 """ |
779 """ |
762 parens = 0 |
780 parens = 0 |
763 no_space = False |
781 no_space = False |
764 prev_end = None |
782 prev_end = None |
|
783 annotated_func_arg = False |
|
784 in_def = logical_line.startswith('def') |
765 message = "E251 unexpected spaces around keyword / parameter equals" |
785 message = "E251 unexpected spaces around keyword / parameter equals" |
766 for token_type, text, start, end, line in tokens: |
786 for token_type, text, start, end, line in tokens: |
767 if token_type == tokenize.NL: |
787 if token_type == tokenize.NL: |
768 continue |
788 continue |
769 if no_space: |
789 if no_space: |
770 no_space = False |
790 no_space = False |
771 if start != prev_end: |
791 if start != prev_end: |
772 yield (prev_end, message) |
792 yield (prev_end, message) |
773 elif token_type == tokenize.OP: |
793 if token_type == tokenize.OP: |
774 if text == '(': |
794 if text == '(': |
775 parens += 1 |
795 parens += 1 |
776 elif text == ')': |
796 elif text == ')': |
777 parens -= 1 |
797 parens -= 1 |
778 elif parens and text == '=': |
798 elif in_def and text == ':' and parens == 1: |
|
799 annotated_func_arg = True |
|
800 elif parens and text == ',' and parens == 1: |
|
801 annotated_func_arg = False |
|
802 elif parens and text == '=' and not annotated_func_arg: |
779 no_space = True |
803 no_space = True |
780 if start != prev_end: |
804 if start != prev_end: |
781 yield (prev_end, message) |
805 yield (prev_end, message) |
|
806 if not parens: |
|
807 annotated_func_arg = False |
|
808 |
782 prev_end = end |
809 prev_end = end |
783 |
810 |
784 |
811 |
785 def whitespace_before_comment(logical_line, tokens): |
812 def whitespace_before_comment(logical_line, tokens): |
786 r"""Separate inline comments by at least two spaces. |
813 r"""Separate inline comments by at least two spaces. |
797 Okay: # Block comment |
824 Okay: # Block comment |
798 E261: x = x + 1 # Increment x |
825 E261: x = x + 1 # Increment x |
799 E262: x = x + 1 #Increment x |
826 E262: x = x + 1 #Increment x |
800 E262: x = x + 1 # Increment x |
827 E262: x = x + 1 # Increment x |
801 E265: #Block comment |
828 E265: #Block comment |
|
829 E266: ### Block comment |
802 """ |
830 """ |
803 prev_end = (0, 0) |
831 prev_end = (0, 0) |
804 for token_type, text, start, end, line in tokens: |
832 for token_type, text, start, end, line in tokens: |
805 if token_type == tokenize.COMMENT: |
833 if token_type == tokenize.COMMENT: |
806 inline_comment = line[:start[1]].strip() |
834 inline_comment = line[:start[1]].strip() |
807 if inline_comment: |
835 if inline_comment: |
808 if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: |
836 if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: |
809 yield (prev_end, |
837 yield (prev_end, |
810 "E261 at least two spaces before inline comment") |
838 "E261 at least two spaces before inline comment") |
811 symbol, sp, comment = text.partition(' ') |
839 symbol, sp, comment = text.partition(' ') |
812 bad_prefix = symbol not in ('#', '#:') |
840 bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#') |
813 if inline_comment: |
841 if inline_comment: |
814 if bad_prefix or comment[:1].isspace(): |
842 if bad_prefix or comment[:1] in WHITESPACE: |
815 yield start, "E262 inline comment should start with '# '" |
843 yield start, "E262 inline comment should start with '# '" |
816 elif bad_prefix: |
844 elif bad_prefix and (bad_prefix != '!' or start[0] > 1): |
817 if text.rstrip('#') and (start[0] > 1 or symbol[1] != '!'): |
845 if bad_prefix != '#': |
818 yield start, "E265 block comment should start with '# '" |
846 yield start, "E265 block comment should start with '# '" |
|
847 elif comment: |
|
848 yield start, "E266 too many leading '#' for block comment" |
819 elif token_type != tokenize.NL: |
849 elif token_type != tokenize.NL: |
820 prev_end = end |
850 prev_end = end |
821 |
851 |
822 |
852 |
823 def imports_on_separate_lines(logical_line): |
853 def imports_on_separate_lines(logical_line): |
837 found = line.find(',') |
867 found = line.find(',') |
838 if -1 < found and ';' not in line[:found]: |
868 if -1 < found and ';' not in line[:found]: |
839 yield found, "E401 multiple imports on one line" |
869 yield found, "E401 multiple imports on one line" |
840 |
870 |
841 |
871 |
|
872 def module_imports_on_top_of_file( |
|
873 logical_line, indent_level, checker_state, noqa): |
|
874 r"""Imports are always put at the top of the file, just after any module |
|
875 comments and docstrings, and before module globals and constants. |
|
876 |
|
877 Okay: import os |
|
878 Okay: # this is a comment\nimport os |
|
879 Okay: '''this is a module docstring'''\nimport os |
|
880 Okay: r'''this is a module docstring'''\nimport os |
|
881 Okay: try:\n import x\nexcept:\n pass\nelse:\n pass\nimport y |
|
882 Okay: try:\n import x\nexcept:\n pass\nfinally:\n pass\nimport y |
|
883 E402: a=1\nimport os |
|
884 E402: 'One string'\n"Two string"\nimport os |
|
885 E402: a=1\nfrom sys import x |
|
886 |
|
887 Okay: if x:\n import os |
|
888 """ |
|
889 def is_string_literal(line): |
|
890 if line[0] in 'uUbB': |
|
891 line = line[1:] |
|
892 if line and line[0] in 'rR': |
|
893 line = line[1:] |
|
894 return line and (line[0] == '"' or line[0] == "'") |
|
895 |
|
896 allowed_try_keywords = ('try', 'except', 'else', 'finally') |
|
897 |
|
898 if indent_level: # Allow imports in conditional statements or functions |
|
899 return |
|
900 if not logical_line: # Allow empty lines or comments |
|
901 return |
|
902 if noqa: |
|
903 return |
|
904 line = logical_line |
|
905 if line.startswith('import ') or line.startswith('from '): |
|
906 if checker_state.get('seen_non_imports', False): |
|
907 yield 0, "E402 module level import not at top of file" |
|
908 elif any(line.startswith(kw) for kw in allowed_try_keywords): |
|
909 # Allow try, except, else, finally keywords intermixed with imports in |
|
910 # order to support conditional importing |
|
911 return |
|
912 elif is_string_literal(line): |
|
913 # The first literal is a docstring, allow it. Otherwise, report error. |
|
914 if checker_state.get('seen_docstring', False): |
|
915 checker_state['seen_non_imports'] = True |
|
916 else: |
|
917 checker_state['seen_docstring'] = True |
|
918 else: |
|
919 checker_state['seen_non_imports'] = True |
|
920 |
|
921 |
842 def compound_statements(logical_line): |
922 def compound_statements(logical_line): |
843 r"""Compound statements (on the same line) are generally discouraged. |
923 r"""Compound statements (on the same line) are generally discouraged. |
844 |
924 |
845 While sometimes it's okay to put an if/for/while with a small body |
925 While sometimes it's okay to put an if/for/while with a small body |
846 on the same line, never do this for multi-clause statements. |
926 on the same line, never do this for multi-clause statements. |
847 Also avoid folding such long lines! |
927 Also avoid folding such long lines! |
|
928 |
|
929 Always use a def statement instead of an assignment statement that |
|
930 binds a lambda expression directly to a name. |
848 |
931 |
849 Okay: if foo == 'blah':\n do_blah_thing() |
932 Okay: if foo == 'blah':\n do_blah_thing() |
850 Okay: do_one() |
933 Okay: do_one() |
851 Okay: do_two() |
934 Okay: do_two() |
852 Okay: do_three() |
935 Okay: do_three() |
857 E701: if foo == 'blah': do_blah_thing() |
940 E701: if foo == 'blah': do_blah_thing() |
858 E701: else: do_non_blah_thing() |
941 E701: else: do_non_blah_thing() |
859 E701: try: something() |
942 E701: try: something() |
860 E701: finally: cleanup() |
943 E701: finally: cleanup() |
861 E701: if foo == 'blah': one(); two(); three() |
944 E701: if foo == 'blah': one(); two(); three() |
862 |
|
863 E702: do_one(); do_two(); do_three() |
945 E702: do_one(); do_two(); do_three() |
864 E703: do_four(); # useless semicolon |
946 E703: do_four(); # useless semicolon |
|
947 E704: def f(x): return 2*x |
|
948 E731: f = lambda x: 2*x |
865 """ |
949 """ |
866 line = logical_line |
950 line = logical_line |
867 last_char = len(line) - 1 |
951 last_char = len(line) - 1 |
868 found = line.find(':') |
952 found = line.find(':') |
869 while -1 < found < last_char: |
953 while -1 < found < last_char: |
870 before = line[:found] |
954 before = line[:found] |
871 if (before.count('{') <= before.count('}') and # {'a': 1} (dict) |
955 if ((before.count('{') <= before.count('}') and # {'a': 1} (dict) |
872 before.count('[') <= before.count(']') and # [1:2] (slice) |
956 before.count('[') <= before.count(']') and # [1:2] (slice) |
873 before.count('(') <= before.count(')') and # (Python 3 annotation) |
957 before.count('(') <= before.count(')'))): # (annotation) |
874 not LAMBDA_REGEX.search(before)): # lambda x: x |
958 lambda_kw = LAMBDA_REGEX.search(before) |
875 yield found, "E701 multiple statements on one line (colon)" |
959 if lambda_kw: |
|
960 before = line[:lambda_kw.start()].rstrip() |
|
961 if before[-1:] == '=' and isidentifier(before[:-1].strip()): |
|
962 yield 0, ("E731 do not assign a lambda expression, use a " |
|
963 "def") |
|
964 break |
|
965 if before.startswith('def '): |
|
966 yield 0, "E704 multiple statements on one line (def)" |
|
967 else: |
|
968 yield found, "E701 multiple statements on one line (colon)" |
876 found = line.find(':', found + 1) |
969 found = line.find(':', found + 1) |
877 found = line.find(';') |
970 found = line.find(';') |
878 while -1 < found: |
971 while -1 < found: |
879 if found < last_char: |
972 if found < last_char: |
880 yield found, "E702 multiple statements on one line (semicolon)" |
973 yield found, "E702 multiple statements on one line (semicolon)" |
895 E502: aaa = ("bbb " \\n "ccc") |
988 E502: aaa = ("bbb " \\n "ccc") |
896 |
989 |
897 Okay: aaa = [123,\n 123] |
990 Okay: aaa = [123,\n 123] |
898 Okay: aaa = ("bbb "\n "ccc") |
991 Okay: aaa = ("bbb "\n "ccc") |
899 Okay: aaa = "bbb " \\n "ccc" |
992 Okay: aaa = "bbb " \\n "ccc" |
|
993 Okay: aaa = 123 # \\ |
900 """ |
994 """ |
901 prev_start = prev_end = parens = 0 |
995 prev_start = prev_end = parens = 0 |
|
996 comment = False |
902 backslash = None |
997 backslash = None |
903 for token_type, text, start, end, line in tokens: |
998 for token_type, text, start, end, line in tokens: |
904 if start[0] != prev_start and parens and backslash: |
999 if token_type == tokenize.COMMENT: |
|
1000 comment = True |
|
1001 if start[0] != prev_start and parens and backslash and not comment: |
905 yield backslash, "E502 the backslash is redundant between brackets" |
1002 yield backslash, "E502 the backslash is redundant between brackets" |
906 if end[0] != prev_end: |
1003 if end[0] != prev_end: |
907 if line.rstrip('\r\n').endswith('\\'): |
1004 if line.rstrip('\r\n').endswith('\\'): |
908 backslash = (end[0], len(line.splitlines()[-1]) - 1) |
1005 backslash = (end[0], len(line.splitlines()[-1]) - 1) |
909 else: |
1006 else: |
916 parens += 1 |
1013 parens += 1 |
917 elif text in ')]}': |
1014 elif text in ')]}': |
918 parens -= 1 |
1015 parens -= 1 |
919 |
1016 |
920 |
1017 |
|
1018 def break_around_binary_operator(logical_line, tokens): |
|
1019 r""" |
|
1020 Avoid breaks before binary operators. |
|
1021 |
|
1022 The preferred place to break around a binary operator is after the |
|
1023 operator, not before it. |
|
1024 |
|
1025 W503: (width == 0\n + height == 0) |
|
1026 W503: (width == 0\n and height == 0) |
|
1027 |
|
1028 Okay: (width == 0 +\n height == 0) |
|
1029 Okay: foo(\n -x) |
|
1030 Okay: foo(x\n []) |
|
1031 Okay: x = '''\n''' + '' |
|
1032 Okay: foo(x,\n -y) |
|
1033 Okay: foo(x, # comment\n -y) |
|
1034 """ |
|
1035 def is_binary_operator(token_type, text): |
|
1036 # The % character is strictly speaking a binary operator, but the |
|
1037 # common usage seems to be to put it next to the format parameters, |
|
1038 # after a line break. |
|
1039 return ((token_type == tokenize.OP or text in ['and', 'or']) and |
|
1040 text not in "()[]{},:.;@=%") |
|
1041 |
|
1042 line_break = False |
|
1043 unary_context = True |
|
1044 for token_type, text, start, end, line in tokens: |
|
1045 if token_type == tokenize.COMMENT: |
|
1046 continue |
|
1047 if ('\n' in text or '\r' in text) and token_type != tokenize.STRING: |
|
1048 line_break = True |
|
1049 else: |
|
1050 if (is_binary_operator(token_type, text) and line_break and |
|
1051 not unary_context): |
|
1052 yield start, "W503 line break before binary operator" |
|
1053 unary_context = text in '([{,;' |
|
1054 line_break = False |
|
1055 |
|
1056 |
921 def comparison_to_singleton(logical_line, noqa): |
1057 def comparison_to_singleton(logical_line, noqa): |
922 r"""Comparison to singletons should use "is" or "is not". |
1058 r"""Comparison to singletons should use "is" or "is not". |
923 |
1059 |
924 Comparisons to singletons like None should always be done |
1060 Comparisons to singletons like None should always be done |
925 with "is" or "is not", never the equality operators. |
1061 with "is" or "is not", never the equality operators. |
926 |
1062 |
927 Okay: if arg is not None: |
1063 Okay: if arg is not None: |
928 E711: if arg != None: |
1064 E711: if arg != None: |
|
1065 E711: if None == arg: |
929 E712: if arg == True: |
1066 E712: if arg == True: |
|
1067 E712: if False == arg: |
930 |
1068 |
931 Also, beware of writing if x when you really mean if x is not None -- |
1069 Also, beware of writing if x when you really mean if x is not None -- |
932 e.g. when testing whether a variable or argument that defaults to None was |
1070 e.g. when testing whether a variable or argument that defaults to None was |
933 set to some other value. The other value might have a type (such as a |
1071 set to some other value. The other value might have a type (such as a |
934 container) that could be false in a boolean context! |
1072 container) that could be false in a boolean context! |
935 """ |
1073 """ |
936 match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line) |
1074 match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line) |
937 if match: |
1075 if match: |
938 same = (match.group(1) == '==') |
1076 singleton = match.group(1) or match.group(3) |
939 singleton = match.group(2) |
1077 same = (match.group(2) == '==') |
|
1078 |
940 msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) |
1079 msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) |
941 if singleton in ('None',): |
1080 if singleton in ('None',): |
942 code = 'E711' |
1081 code = 'E711' |
943 else: |
1082 else: |
944 code = 'E712' |
1083 code = 'E712' |
945 nonzero = ((singleton == 'True' and same) or |
1084 nonzero = ((singleton == 'True' and same) or |
946 (singleton == 'False' and not same)) |
1085 (singleton == 'False' and not same)) |
947 msg += " or 'if %scond:'" % ('' if nonzero else 'not ') |
1086 msg += " or 'if %scond:'" % ('' if nonzero else 'not ') |
948 yield (match.start(1), "%s comparison to %s should be %s" % |
1087 yield (match.start(2), "%s comparison to %s should be %s" % |
949 (code, singleton, msg), singleton, msg) |
1088 (code, singleton, msg), singleton, msg) |
950 |
1089 |
951 |
1090 |
952 def comparison_negative(logical_line): |
1091 def comparison_negative(logical_line): |
953 r"""Negative comparison should be done using "not in" and "is not". |
1092 r"""Negative comparison should be done using "not in" and "is not". |
984 |
1123 |
985 Okay: if isinstance(obj, basestring): |
1124 Okay: if isinstance(obj, basestring): |
986 Okay: if type(a1) is type(b1): |
1125 Okay: if type(a1) is type(b1): |
987 """ |
1126 """ |
988 match = COMPARE_TYPE_REGEX.search(logical_line) |
1127 match = COMPARE_TYPE_REGEX.search(logical_line) |
989 if match: |
1128 if match and not noqa: |
990 inst = match.group(1) |
1129 inst = match.group(1) |
991 if inst and isidentifier(inst) and inst not in SINGLETONS: |
1130 if inst and isidentifier(inst) and inst not in SINGLETONS: |
992 return # Allow comparison for types which are not obvious |
1131 return # Allow comparison for types which are not obvious |
993 yield match.start(), "E721 do not compare types, use 'isinstance()'" |
1132 yield match.start(), "E721 do not compare types, use 'isinstance()'" |
994 |
1133 |
1044 ############################################################################## |
1183 ############################################################################## |
1045 # Helper functions |
1184 # Helper functions |
1046 ############################################################################## |
1185 ############################################################################## |
1047 |
1186 |
1048 |
1187 |
1049 if '' == ''.encode("utf-8"): |
1188 if sys.version_info < (3,): |
1050 # Python 2: implicit encoding. |
1189 # Python 2: implicit encoding. |
1051 def readlines(filename): |
1190 def readlines(filename): |
1052 """Read the source code.""" |
1191 """Read the source code.""" |
1053 with open(filename) as f: |
1192 with open(filename, 'rU') as f: |
1054 return f.readlines() |
1193 return f.readlines() |
1055 isidentifier = re.compile(r'[a-zA-Z_]\w*').match |
1194 isidentifier = re.compile(r'[a-zA-Z_]\w*$').match |
1056 stdin_get_value = sys.stdin.read |
1195 stdin_get_value = sys.stdin.read |
1057 else: |
1196 else: |
1058 # Python 3 |
1197 # Python 3 |
1059 def readlines(filename): |
1198 def readlines(filename): |
1060 """Read the source code.""" |
1199 """Read the source code.""" |
1139 elif line[:3] == '+++': |
1278 elif line[:3] == '+++': |
1140 path = line[4:].split('\t', 1)[0] |
1279 path = line[4:].split('\t', 1)[0] |
1141 if path[:2] == 'b/': |
1280 if path[:2] == 'b/': |
1142 path = path[2:] |
1281 path = path[2:] |
1143 rv[path] = set() |
1282 rv[path] = set() |
1144 return dict([(os.path.join(parent, path_), rows) |
1283 return dict([(os.path.join(parent, path), rows) |
1145 for (path_, rows) in rv.items() |
1284 for (path, rows) in rv.items() |
1146 if rows and filename_match(path_, patterns)]) |
1285 if rows and filename_match(path, patterns)]) |
1147 |
1286 |
1148 |
1287 |
1149 def normalize_paths(value, parent=os.curdir): |
1288 def normalize_paths(value, parent=os.curdir): |
1150 """Parse a comma-separated list of paths. |
1289 """Parse a comma-separated list of paths. |
1151 |
1290 |
1152 Return a list of absolute paths. |
1291 Return a list of absolute paths. |
1153 """ |
1292 """ |
1154 if not value or isinstance(value, list): |
1293 if not value: |
|
1294 return [] |
|
1295 if isinstance(value, list): |
1155 return value |
1296 return value |
1156 paths = [] |
1297 paths = [] |
1157 for path in value.split(','): |
1298 for path in value.split(','): |
|
1299 path = path.strip() |
1158 if '/' in path: |
1300 if '/' in path: |
1159 path = os.path.abspath(os.path.join(parent, path)) |
1301 path = os.path.abspath(os.path.join(parent, path)) |
1160 paths.append(path.rstrip('/')) |
1302 paths.append(path.rstrip('/')) |
1161 return paths |
1303 return paths |
1162 |
1304 |
1169 if not patterns: |
1311 if not patterns: |
1170 return default |
1312 return default |
1171 return any(fnmatch(filename, pattern) for pattern in patterns) |
1313 return any(fnmatch(filename, pattern) for pattern in patterns) |
1172 |
1314 |
1173 |
1315 |
|
1316 def _is_eol_token(token): |
|
1317 return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n' |
1174 if COMMENT_WITH_NL: |
1318 if COMMENT_WITH_NL: |
1175 def _is_eol_token(token): |
1319 def _is_eol_token(token, _eol_token=_is_eol_token): |
1176 return (token[0] in NEWLINE or |
1320 return _eol_token(token) or (token[0] == tokenize.COMMENT and |
1177 (token[0] == tokenize.COMMENT and token[1] == token[4])) |
1321 token[1] == token[4]) |
1178 else: |
|
1179 def _is_eol_token(token): |
|
1180 return token[0] in NEWLINE |
|
1181 |
|
1182 |
1322 |
1183 ############################################################################## |
1323 ############################################################################## |
1184 # Framework to run all checks |
1324 # Framework to run all checks |
1185 ############################################################################## |
1325 ############################################################################## |
1186 |
1326 |
1187 |
1327 |
1188 _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} |
1328 _checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} |
|
1329 |
|
1330 |
|
1331 def _get_parameters(function): |
|
1332 if sys.version_info >= (3, 3): |
|
1333 return [parameter.name |
|
1334 for parameter |
|
1335 in inspect.signature(function).parameters.values() |
|
1336 if parameter.kind == parameter.POSITIONAL_OR_KEYWORD] |
|
1337 else: |
|
1338 return inspect.getargspec(function)[0] |
1189 |
1339 |
1190 |
1340 |
1191 def register_check(check, codes=None): |
1341 def register_check(check, codes=None): |
1192 """Register a new check object.""" |
1342 """Register a new check object.""" |
1193 def _add_check(check, kind, codes, args): |
1343 def _add_check(check, kind, codes, args): |
1194 if check in _checks[kind]: |
1344 if check in _checks[kind]: |
1195 _checks[kind][check][0].extend(codes or []) |
1345 _checks[kind][check][0].extend(codes or []) |
1196 else: |
1346 else: |
1197 _checks[kind][check] = (codes or [''], args) |
1347 _checks[kind][check] = (codes or [''], args) |
1198 if inspect.isfunction(check): |
1348 if inspect.isfunction(check): |
1199 args = inspect.getargspec(check)[0] |
1349 args = _get_parameters(check) |
1200 if args and args[0] in ('physical_line', 'logical_line'): |
1350 if args and args[0] in ('physical_line', 'logical_line'): |
1201 if codes is None: |
1351 if codes is None: |
1202 codes = ERRORCODE_REGEX.findall(check.__doc__ or '') |
1352 codes = ERRORCODE_REGEX.findall(check.__doc__ or '') |
1203 _add_check(check, args[0], codes, args) |
1353 _add_check(check, args[0], codes, args) |
1204 elif inspect.isclass(check): |
1354 elif inspect.isclass(check): |
1205 if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']: |
1355 if _get_parameters(check.__init__)[:2] == ['self', 'tree']: |
1206 _add_check(check, 'tree', codes, None) |
1356 _add_check(check, 'tree', codes, None) |
1207 |
1357 |
1208 |
1358 |
1209 def init_checks_registry(): |
1359 def init_checks_registry(): |
1210 """Register all globally visible functions. |
1360 """Register all globally visible functions. |
1233 self.max_line_length = options.max_line_length |
1383 self.max_line_length = options.max_line_length |
1234 self.multiline = False # in a multiline string? |
1384 self.multiline = False # in a multiline string? |
1235 self.hang_closing = options.hang_closing |
1385 self.hang_closing = options.hang_closing |
1236 self.verbose = options.verbose |
1386 self.verbose = options.verbose |
1237 self.filename = filename |
1387 self.filename = filename |
|
1388 # Dictionary where a checker can store its custom state. |
|
1389 self._checker_states = {} |
1238 if filename is None: |
1390 if filename is None: |
1239 self.filename = 'stdin' |
1391 self.filename = 'stdin' |
1240 self.lines = lines or [] |
1392 self.lines = lines or [] |
1241 elif filename == '-': |
1393 elif filename == '-': |
1242 self.filename = 'stdin' |
1394 self.filename = 'stdin' |
1292 arguments = [] |
1444 arguments = [] |
1293 for name in argument_names: |
1445 for name in argument_names: |
1294 arguments.append(getattr(self, name)) |
1446 arguments.append(getattr(self, name)) |
1295 return check(*arguments) |
1447 return check(*arguments) |
1296 |
1448 |
|
1449 def init_checker_state(self, name, argument_names): |
|
1450 """ Prepares a custom state for the specific checker plugin.""" |
|
1451 if 'checker_state' in argument_names: |
|
1452 self.checker_state = self._checker_states.setdefault(name, {}) |
|
1453 |
1297 def check_physical(self, line): |
1454 def check_physical(self, line): |
1298 """Run all physical checks on a raw input line.""" |
1455 """Run all physical checks on a raw input line.""" |
1299 self.physical_line = line |
1456 self.physical_line = line |
1300 for name, check, argument_names in self._physical_checks: |
1457 for name, check, argument_names in self._physical_checks: |
|
1458 self.init_checker_state(name, argument_names) |
1301 result = self.run_check(check, argument_names) |
1459 result = self.run_check(check, argument_names) |
1302 if result is not None: |
1460 if result is not None: |
1303 (offset, text) = result[:2] |
1461 (offset, text) = result[:2] |
1304 args = result[2:] |
1462 args = result[2:] |
1305 self.report_error_args( |
1463 self.report_error_args( |
1325 text = mute_string(text) |
1483 text = mute_string(text) |
1326 if prev_row: |
1484 if prev_row: |
1327 (start_row, start_col) = start |
1485 (start_row, start_col) = start |
1328 if prev_row != start_row: # different row |
1486 if prev_row != start_row: # different row |
1329 prev_text = self.lines[prev_row - 1][prev_col - 1] |
1487 prev_text = self.lines[prev_row - 1][prev_col - 1] |
1330 if prev_text == ',' or (prev_text not in '{[(' |
1488 if prev_text == ',' or (prev_text not in '{[(' and |
1331 and text not in '}])'): |
1489 text not in '}])'): |
1332 text = ' ' + text |
1490 text = ' ' + text |
1333 elif prev_col != start_col: # different column |
1491 elif prev_col != start_col: # different column |
1334 text = line[prev_col:start_col] + text |
1492 text = line[prev_col:start_col] + text |
1335 logical.append(text) |
1493 logical.append(text) |
1336 length += len(text) |
1494 length += len(text) |
1342 |
1500 |
1343 def check_logical(self): |
1501 def check_logical(self): |
1344 """Build a line from tokens and run all logical checks on it.""" |
1502 """Build a line from tokens and run all logical checks on it.""" |
1345 self.report.increment_logical_line() |
1503 self.report.increment_logical_line() |
1346 mapping = self.build_tokens_line() |
1504 mapping = self.build_tokens_line() |
|
1505 |
|
1506 if not mapping: |
|
1507 return |
|
1508 |
1347 (start_row, start_col) = mapping[0][1] |
1509 (start_row, start_col) = mapping[0][1] |
1348 start_line = self.lines[start_row - 1] |
1510 start_line = self.lines[start_row - 1] |
1349 self.indent_level = expand_indent(start_line[:start_col]) |
1511 self.indent_level = expand_indent(start_line[:start_col]) |
1350 if self.blank_before < self.blank_lines: |
1512 if self.blank_before < self.blank_lines: |
1351 self.blank_before = self.blank_lines |
1513 self.blank_before = self.blank_lines |
1352 if self.verbose >= 2: |
1514 if self.verbose >= 2: |
1353 print(self.logical_line[:80].rstrip()) |
1515 print(self.logical_line[:80].rstrip()) |
1354 for name, check, argument_names in self._logical_checks: |
1516 for name, check, argument_names in self._logical_checks: |
1355 if self.verbose >= 4: |
1517 if self.verbose >= 4: |
1356 print(' ' + name) |
1518 print(' ' + name) |
1357 for result in self.run_check(check, argument_names): |
1519 self.init_checker_state(name, argument_names) |
|
1520 for result in self.run_check(check, argument_names) or (): |
1358 offset, text = result[:2] |
1521 offset, text = result[:2] |
1359 args = result[2:] |
1522 args = result[2:] |
1360 if not isinstance(offset, tuple): |
1523 if not isinstance(offset, tuple): |
1361 for token_offset, pos in mapping: |
1524 for token_offset, pos in mapping: |
1362 if offset <= token_offset: |
1525 if offset <= token_offset: |
1372 |
1535 |
1373 def check_ast(self): |
1536 def check_ast(self): |
1374 """Build the file's AST and run all AST checks.""" |
1537 """Build the file's AST and run all AST checks.""" |
1375 try: |
1538 try: |
1376 tree = compile(''.join(self.lines), '', 'exec', ast.PyCF_ONLY_AST) |
1539 tree = compile(''.join(self.lines), '', 'exec', ast.PyCF_ONLY_AST) |
1377 except (SyntaxError, TypeError): |
1540 except (ValueError, SyntaxError, TypeError): |
1378 return self.report_invalid_syntax() |
1541 return self.report_invalid_syntax() |
1379 for name, cls, __ in self._ast_checks: |
1542 for name, cls, __ in self._ast_checks: |
1380 # extended API for eric6 integration |
1543 # extended API for eric6 integration |
1381 checker = cls(tree, self.filename, self.options) |
1544 checker = cls(tree, self.filename, self.options) |
1382 for args in checker.run(): |
1545 for args in checker.run(): |
1471 token = list(token) |
1636 token = list(token) |
1472 token[1] = text.rstrip('\r\n') |
1637 token[1] = text.rstrip('\r\n') |
1473 token[3] = (token[2][0], token[2][1] + len(token[1])) |
1638 token[3] = (token[2][0], token[2][1] + len(token[1])) |
1474 self.tokens = [tuple(token)] |
1639 self.tokens = [tuple(token)] |
1475 self.check_logical() |
1640 self.check_logical() |
1476 if len(self.tokens) > 1 and (token_type == tokenize.ENDMARKER and |
1641 if self.tokens: |
1477 self.tokens[-2][0] not in SKIP_TOKENS): |
1642 self.check_physical(self.lines[-1]) |
1478 self.tokens.pop() |
|
1479 self.check_physical(self.tokens[-1][4]) |
|
1480 self.check_logical() |
1643 self.check_logical() |
1481 return self.report.get_file_results() |
1644 return self.report.get_file_results() |
1482 |
1645 |
1483 |
1646 |
1484 class BaseReport(object): |
1647 class BaseReport(object): |
1646 line = self.lines[line_number - 1] |
1809 line = self.lines[line_number - 1] |
1647 print(line.rstrip()) |
1810 print(line.rstrip()) |
1648 print(re.sub(r'\S', ' ', line[:offset]) + '^') |
1811 print(re.sub(r'\S', ' ', line[:offset]) + '^') |
1649 if self._show_pep8 and doc: |
1812 if self._show_pep8 and doc: |
1650 print(' ' + doc.strip()) |
1813 print(' ' + doc.strip()) |
|
1814 |
|
1815 # stdout is block buffered when not stdout.isatty(). |
|
1816 # line can be broken where buffer boundary since other processes |
|
1817 # write to same file. |
|
1818 # flush() after print() to avoid buffer boundary. |
|
1819 # Typical buffer size is 8192. line written safely when |
|
1820 # len(line) < 8192. |
|
1821 sys.stdout.flush() |
1651 return self.file_errors |
1822 return self.file_errors |
1652 |
1823 |
1653 |
1824 |
1654 class DiffReport(StandardReport): |
1825 class DiffReport(StandardReport): |
1655 """Collect and print the results for the changed lines only.""" |
1826 """Collect and print the results for the changed lines only.""" |
1669 |
1840 |
1670 def __init__(self, *args, **kwargs): |
1841 def __init__(self, *args, **kwargs): |
1671 # build options from the command line |
1842 # build options from the command line |
1672 self.checker_class = kwargs.pop('checker_class', Checker) |
1843 self.checker_class = kwargs.pop('checker_class', Checker) |
1673 parse_argv = kwargs.pop('parse_argv', False) |
1844 parse_argv = kwargs.pop('parse_argv', False) |
1674 config_file = kwargs.pop('config_file', None) |
1845 config_file = kwargs.pop('config_file', False) |
1675 parser = kwargs.pop('parser', None) |
1846 parser = kwargs.pop('parser', None) |
1676 # build options from dict |
1847 # build options from dict |
1677 options_dict = dict(*args, **kwargs) |
1848 options_dict = dict(*args, **kwargs) |
1678 arglist = None if parse_argv else options_dict.get('paths', None) |
1849 arglist = None if parse_argv else options_dict.get('paths', None) |
1679 options, self.paths = process_options( |
1850 options, self.paths = process_options( |
1694 # options.testsuite or options.doctest) and DEFAULT_IGNORE: |
1865 # options.testsuite or options.doctest) and DEFAULT_IGNORE: |
1695 # # The default choice: ignore controversial checks |
1866 # # The default choice: ignore controversial checks |
1696 # options.ignore = tuple(DEFAULT_IGNORE.split(',')) |
1867 # options.ignore = tuple(DEFAULT_IGNORE.split(',')) |
1697 # else: |
1868 # else: |
1698 # Ignore all checks which are not explicitly selected or all if no |
1869 # Ignore all checks which are not explicitly selected or all if no |
|
1870 |
1699 # check is ignored or explicitly selected |
1871 # check is ignored or explicitly selected |
1700 options.ignore = ('',) if options.select else tuple(options.ignore) |
1872 options.ignore = ('',) if options.select else tuple(options.ignore) |
1701 options.benchmark_keys = BENCHMARK_KEYS[:] |
1873 options.benchmark_keys = BENCHMARK_KEYS[:] |
1702 options.ignore_code = self.ignore_code |
1874 options.ignore_code = self.ignore_code |
1703 options.physical_checks = self.get_checks('physical_line') |
1875 options.physical_checks = self.get_checks('physical_line') |
1823 "matching these comma separated patterns " |
1995 "matching these comma separated patterns " |
1824 "(default: %default)") |
1996 "(default: %default)") |
1825 parser.add_option('--select', metavar='errors', default='', |
1997 parser.add_option('--select', metavar='errors', default='', |
1826 help="select errors and warnings (e.g. E,W6)") |
1998 help="select errors and warnings (e.g. E,W6)") |
1827 parser.add_option('--ignore', metavar='errors', default='', |
1999 parser.add_option('--ignore', metavar='errors', default='', |
1828 help="skip errors and warnings (e.g. E4,W)") |
2000 help="skip errors and warnings (e.g. E4,W) " |
|
2001 "(default: %s)" % DEFAULT_IGNORE) |
1829 parser.add_option('--show-source', action='store_true', |
2002 parser.add_option('--show-source', action='store_true', |
1830 help="show source code for each error") |
2003 help="show source code for each error") |
1831 parser.add_option('--show-pep8', action='store_true', |
2004 parser.add_option('--show-pep8', action='store_true', |
1832 help="show text of PEP 8 for each error " |
2005 help="show text of PEP 8 for each error " |
1833 "(implies --first)") |
2006 "(implies --first)") |
1845 help="hang closing bracket instead of matching " |
2018 help="hang closing bracket instead of matching " |
1846 "indentation of opening bracket's line") |
2019 "indentation of opening bracket's line") |
1847 parser.add_option('--format', metavar='format', default='default', |
2020 parser.add_option('--format', metavar='format', default='default', |
1848 help="set the error format [default|pylint|<custom>]") |
2021 help="set the error format [default|pylint|<custom>]") |
1849 parser.add_option('--diff', action='store_true', |
2022 parser.add_option('--diff', action='store_true', |
1850 help="report only lines changed according to the " |
2023 help="report changes only within line number ranges in " |
1851 "unified diff received on STDIN") |
2024 "the unified diff received on STDIN") |
1852 group = parser.add_option_group("Testing Options") |
2025 group = parser.add_option_group("Testing Options") |
1853 if os.path.exists(TESTSUITE_PATH): |
2026 if os.path.exists(TESTSUITE_PATH): |
1854 group.add_option('--testsuite', metavar='dir', |
2027 group.add_option('--testsuite', metavar='dir', |
1855 help="run regression tests from dir") |
2028 help="run regression tests from dir") |
1856 group.add_option('--doctest', action='store_true', |
2029 group.add_option('--doctest', action='store_true', |
1859 help="measure processing speed") |
2032 help="measure processing speed") |
1860 return parser |
2033 return parser |
1861 |
2034 |
1862 |
2035 |
1863 def read_config(options, args, arglist, parser): |
2036 def read_config(options, args, arglist, parser): |
1864 """Read both user configuration and local configuration.""" |
2037 """Read and parse configurations |
|
2038 |
|
2039 If a config file is specified on the command line with the "--config" |
|
2040 option, then only it is used for configuration. |
|
2041 |
|
2042 Otherwise, the user configuration (~/.config/pep8) and any local |
|
2043 configurations in the current directory or above will be merged together |
|
2044 (in that order) using the read method of ConfigParser. |
|
2045 """ |
1865 config = RawConfigParser() |
2046 config = RawConfigParser() |
1866 |
2047 |
1867 user_conf = options.config |
2048 cli_conf = options.config |
1868 if user_conf and os.path.isfile(user_conf): |
2049 |
|
2050 local_dir = os.curdir |
|
2051 |
|
2052 if USER_CONFIG and os.path.isfile(USER_CONFIG): |
1869 if options.verbose: |
2053 if options.verbose: |
1870 print('user configuration: %s' % user_conf) |
2054 print('user configuration: %s' % USER_CONFIG) |
1871 config.read(user_conf) |
2055 config.read(USER_CONFIG) |
1872 |
2056 |
1873 local_dir = os.curdir |
|
1874 parent = tail = args and os.path.abspath(os.path.commonprefix(args)) |
2057 parent = tail = args and os.path.abspath(os.path.commonprefix(args)) |
1875 while tail: |
2058 while tail: |
1876 if config.read([os.path.join(parent, fn) for fn in PROJECT_CONFIG]): |
2059 if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG): |
1877 local_dir = parent |
2060 local_dir = parent |
1878 if options.verbose: |
2061 if options.verbose: |
1879 print('local configuration: in %s' % parent) |
2062 print('local configuration: in %s' % parent) |
1880 break |
2063 break |
1881 (parent, tail) = os.path.split(parent) |
2064 (parent, tail) = os.path.split(parent) |
1882 |
2065 |
|
2066 if cli_conf and os.path.isfile(cli_conf): |
|
2067 if options.verbose: |
|
2068 print('cli configuration: %s' % cli_conf) |
|
2069 config.read(cli_conf) |
|
2070 |
1883 pep8_section = parser.prog |
2071 pep8_section = parser.prog |
1884 if config.has_section(pep8_section): |
2072 if config.has_section(pep8_section): |
1885 option_list = dict([(o.dest, o.type or o.action) |
2073 option_list = dict([(o.dest, o.type or o.action) |
1886 for o in parser.option_list]) |
2074 for o in parser.option_list]) |
1887 |
2075 |
1888 # First, read the default values |
2076 # First, read the default values |
1889 (new_options, __) = parser.parse_args([]) |
2077 (new_options, __) = parser.parse_args([]) |
1890 |
2078 |
1891 # Second, parse the configuration |
2079 # Second, parse the configuration |
1892 for opt in config.options(pep8_section): |
2080 for opt in config.options(pep8_section): |
|
2081 if opt.replace('_', '-') not in parser.config_options: |
|
2082 print(" unknown option '%s' ignored" % opt) |
|
2083 continue |
1893 if options.verbose > 1: |
2084 if options.verbose > 1: |
1894 print(" %s = %s" % (opt, config.get(pep8_section, opt))) |
2085 print(" %s = %s" % (opt, config.get(pep8_section, opt))) |
1895 if opt.replace('_', '-') not in parser.config_options: |
|
1896 print("Unknown option: '%s'\n not in [%s]" % |
|
1897 (opt, ' '.join(parser.config_options))) |
|
1898 sys.exit(1) |
|
1899 normalized_opt = opt.replace('-', '_') |
2086 normalized_opt = opt.replace('-', '_') |
1900 opt_type = option_list[normalized_opt] |
2087 opt_type = option_list[normalized_opt] |
1901 if opt_type in ('int', 'count'): |
2088 if opt_type in ('int', 'count'): |
1902 value = config.getint(pep8_section, opt) |
2089 value = config.getint(pep8_section, opt) |
1903 elif opt_type == 'string': |
2090 elif opt_type == 'string': |
1915 return options |
2102 return options |
1916 |
2103 |
1917 |
2104 |
1918 def process_options(arglist=None, parse_argv=False, config_file=None, |
2105 def process_options(arglist=None, parse_argv=False, config_file=None, |
1919 parser=None): |
2106 parser=None): |
1920 """Process options passed either via arglist or via command line args.""" |
2107 """Process options passed either via arglist or via command line args. |
|
2108 |
|
2109 Passing in the ``config_file`` parameter allows other tools, such as flake8 |
|
2110 to specify their own options to be processed in pep8. |
|
2111 """ |
1921 if not parser: |
2112 if not parser: |
1922 parser = get_parser() |
2113 parser = get_parser() |
1923 if not parser.has_option('--config'): |
2114 if not parser.has_option('--config'): |
1924 if config_file is True: |
|
1925 config_file = DEFAULT_CONFIG |
|
1926 group = parser.add_option_group("Configuration", description=( |
2115 group = parser.add_option_group("Configuration", description=( |
1927 "The project options are read from the [%s] section of the " |
2116 "The project options are read from the [%s] section of the " |
1928 "tox.ini file or the setup.cfg file located in any parent folder " |
2117 "tox.ini file or the setup.cfg file located in any parent folder " |
1929 "of the path(s) being processed. Allowed options are: %s." % |
2118 "of the path(s) being processed. Allowed options are: %s." % |
1930 (parser.prog, ', '.join(parser.config_options)))) |
2119 (parser.prog, ', '.join(parser.config_options)))) |
1931 group.add_option('--config', metavar='path', default=config_file, |
2120 group.add_option('--config', metavar='path', default=config_file, |
1932 help="user config file location (default: %default)") |
2121 help="user config file location") |
1933 # Don't read the command line if the module is used as a library. |
2122 # Don't read the command line if the module is used as a library. |
1934 if not arglist and not parse_argv: |
2123 if not arglist and not parse_argv: |
1935 arglist = [] |
2124 arglist = [] |
1936 # If parse_argv is True and arglist is None, arguments are |
2125 # If parse_argv is True and arglist is None, arguments are |
1937 # parsed from the command line (sys.argv) |
2126 # parsed from the command line (sys.argv) |
1948 else: |
2137 else: |
1949 parser.error('input not specified') |
2138 parser.error('input not specified') |
1950 options = read_config(options, args, arglist, parser) |
2139 options = read_config(options, args, arglist, parser) |
1951 options.reporter = parse_argv and options.quiet == 1 and FileReport |
2140 options.reporter = parse_argv and options.quiet == 1 and FileReport |
1952 |
2141 |
1953 options.filename = options.filename and options.filename.split(',') |
2142 options.filename = _parse_multi_options(options.filename) |
1954 options.exclude = normalize_paths(options.exclude) |
2143 options.exclude = normalize_paths(options.exclude) |
1955 options.select = options.select and options.select.split(',') |
2144 options.select = _parse_multi_options(options.select) |
1956 options.ignore = options.ignore and options.ignore.split(',') |
2145 options.ignore = _parse_multi_options(options.ignore) |
1957 |
2146 |
1958 if options.diff: |
2147 if options.diff: |
1959 options.reporter = DiffReport |
2148 options.reporter = DiffReport |
1960 stdin = stdin_get_value() |
2149 stdin = stdin_get_value() |
1961 options.selected_lines = parse_udiff(stdin, options.filename, args[0]) |
2150 options.selected_lines = parse_udiff(stdin, options.filename, args[0]) |
1962 args = sorted(options.selected_lines) |
2151 args = sorted(options.selected_lines) |
1963 |
2152 |
1964 return options, args |
2153 return options, args |
1965 |
2154 |
1966 |
2155 |
|
2156 def _parse_multi_options(options, split_token=','): |
|
2157 r"""Split and strip and discard empties. |
|
2158 |
|
2159 Turns the following: |
|
2160 |
|
2161 A, |
|
2162 B, |
|
2163 |
|
2164 into ["A", "B"] |
|
2165 """ |
|
2166 if options: |
|
2167 return [o.strip() for o in options.split(split_token) if o.strip()] |
|
2168 else: |
|
2169 return options |
|
2170 |
|
2171 |
1967 def _main(): |
2172 def _main(): |
1968 """Parse options and run checks on Python source.""" |
2173 """Parse options and run checks on Python source.""" |
1969 pep8style = StyleGuide(parse_argv=True, config_file=True) |
2174 import signal |
|
2175 |
|
2176 # Handle "Broken pipe" gracefully |
|
2177 try: |
|
2178 signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1)) |
|
2179 except AttributeError: |
|
2180 pass # not supported on Windows |
|
2181 |
|
2182 pep8style = StyleGuide(parse_argv=True) |
1970 options = pep8style.options |
2183 options = pep8style.options |
|
2184 |
1971 if options.doctest or options.testsuite: |
2185 if options.doctest or options.testsuite: |
1972 from testsuite.support import run_tests |
2186 from testsuite.support import run_tests |
1973 report = run_tests(pep8style) |
2187 report = run_tests(pep8style) |
1974 else: |
2188 else: |
1975 report = pep8style.check_files() |
2189 report = pep8style.check_files() |
|
2190 |
1976 if options.statistics: |
2191 if options.statistics: |
1977 report.print_statistics() |
2192 report.print_statistics() |
|
2193 |
1978 if options.benchmark: |
2194 if options.benchmark: |
1979 report.print_benchmark() |
2195 report.print_benchmark() |
|
2196 |
1980 if options.testsuite and not options.quiet: |
2197 if options.testsuite and not options.quiet: |
1981 report.print_results() |
2198 report.print_results() |
|
2199 |
1982 if report.total_errors: |
2200 if report.total_errors: |
1983 if options.count: |
2201 if options.count: |
1984 sys.stderr.write(str(report.total_errors) + '\n') |
2202 sys.stderr.write(str(report.total_errors) + '\n') |
1985 sys.exit(1) |
2203 sys.exit(1) |
1986 |
2204 |
1987 if __name__ == '__main__': |
2205 if __name__ == '__main__': |
1988 _main() |
2206 _main() |
1989 |
|
1990 # |
2207 # |
1991 # eflag: noqa = M702 |
2208 # eflag: noqa = M702 |