eric6/Plugins/CheckerPlugins/CodeStyleChecker/pycodestyle.py

changeset 7620
52c2fe0308fd
parent 7360
9190402e4505
child 7639
422fd05e9c91
equal deleted inserted replaced
7619:ef2b5af23ce7 7620:52c2fe0308fd
89 from configparser import RawConfigParser 89 from configparser import RawConfigParser
90 from io import TextIOWrapper 90 from io import TextIOWrapper
91 except ImportError: 91 except ImportError:
92 from ConfigParser import RawConfigParser # __IGNORE_WARNING__ 92 from ConfigParser import RawConfigParser # __IGNORE_WARNING__
93 93
94 __version__ = '2.5.0-eric' 94 __version__ = '2.6.0-eric'
95 95
96 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' 96 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox'
97 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503,W504' 97 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503,W504'
98 try: 98 try:
99 if sys.platform == 'win32': 99 if sys.platform == 'win32':
124 124
125 PyCF_ONLY_AST = 1024 125 PyCF_ONLY_AST = 1024
126 SINGLETONS = frozenset(['False', 'None', 'True']) 126 SINGLETONS = frozenset(['False', 'None', 'True'])
127 KEYWORDS = frozenset(keyword.kwlist + ['print', 'async']) - SINGLETONS 127 KEYWORDS = frozenset(keyword.kwlist + ['print', 'async']) - SINGLETONS
128 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) 128 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
129 ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-']) 129 ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-', '@'])
130 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) 130 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
131 # Warn for -> function annotation operator in py3.5+ (issue 803) 131 # Warn for -> function annotation operator in py3.5+ (issue 803)
132 FUNCTION_RETURN_ANNOTATION_OP = ['->'] if sys.version_info >= (3, 5) else [] 132 FUNCTION_RETURN_ANNOTATION_OP = ['->'] if sys.version_info >= (3, 5) else []
133 ASSIGNMENT_EXPRESSION_OP = [':='] if sys.version_info >= (3, 8) else []
133 WS_NEEDED_OPERATORS = frozenset([ 134 WS_NEEDED_OPERATORS = frozenset([
134 '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>', 135 '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
135 '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='] + 136 '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '=',
136 FUNCTION_RETURN_ANNOTATION_OP) 137 'and', 'in', 'is', 'or'] +
138 FUNCTION_RETURN_ANNOTATION_OP +
139 ASSIGNMENT_EXPRESSION_OP)
137 WHITESPACE = frozenset(' \t') 140 WHITESPACE = frozenset(' \t')
138 NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE]) 141 NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
139 SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT]) 142 SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
140 # ERRORTOKEN is triggered by backticks in Python 3 143 # ERRORTOKEN is triggered by backticks in Python 3
141 SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN]) 144 SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
144 INDENT_REGEX = re.compile(r'([ \t]*)') 147 INDENT_REGEX = re.compile(r'([ \t]*)')
145 RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,') 148 RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
146 RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$') 149 RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
147 ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b') 150 ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
148 DOCSTRING_REGEX = re.compile(r'u?r?["\']') 151 DOCSTRING_REGEX = re.compile(r'u?r?["\']')
149 EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[\[({] | [\]}),;:]') 152 EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[\[({] | [\]}),;]| :(?!=)')
150 WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') 153 WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
151 COMPARE_SINGLETON_REGEX = re.compile(r'(\bNone|\bFalse|\bTrue)?\s*([=!]=)' 154 COMPARE_SINGLETON_REGEX = re.compile(r'(\bNone|\bFalse|\bTrue)?\s*([=!]=)'
152 r'\s*(?(1)|(None|False|True))\b') 155 r'\s*(?(1)|(None|False|True))\b')
153 COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^][)(}{ ]+\s+(in|is)\s') 156 COMPARE_NEGATIVE_REGEX = re.compile(r'\b(?<!is\s)(not)\s+[^][)(}{ ]+\s+'
154 COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type' 157 r'(in|is)\s')
158 COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s+type(?:s.\w+Type'
155 r'|\s*\(\s*([^)]*[^ )])\s*\))') 159 r'|\s*\(\s*([^)]*[^ )])\s*\))')
156 KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) 160 KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
157 OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)') 161 OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
158 LAMBDA_REGEX = re.compile(r'\blambda\b') 162 LAMBDA_REGEX = re.compile(r'\blambda\b')
159 HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') 163 HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
323 ######################################################################## 327 ########################################################################
324 # Plugins (check functions) for logical lines 328 # Plugins (check functions) for logical lines
325 ######################################################################## 329 ########################################################################
326 330
327 331
332 def _is_one_liner(logical_line, indent_level, lines, line_number):
333 if not STARTSWITH_TOP_LEVEL_REGEX.match(logical_line):
334 return False
335
336 line_idx = line_number - 1
337
338 if line_idx < 1:
339 prev_indent = 0
340 else:
341 prev_indent = expand_indent(lines[line_idx - 1])
342
343 if prev_indent > indent_level:
344 return False
345
346 while line_idx < len(lines):
347 line = lines[line_idx].strip()
348 if not line.startswith('@') and STARTSWITH_TOP_LEVEL_REGEX.match(line):
349 break
350 else:
351 line_idx += 1
352 else:
353 return False # invalid syntax: EOF while searching for def/class
354
355 next_idx = line_idx + 1
356 while next_idx < len(lines):
357 if lines[next_idx].strip():
358 break
359 else:
360 next_idx += 1
361 else:
362 return True # line is last in the file
363
364 return expand_indent(lines[next_idx]) <= indent_level
365
366
328 @register_check 367 @register_check
329 def blank_lines(logical_line, blank_lines, indent_level, line_number, 368 def blank_lines(logical_line, blank_lines, indent_level, line_number,
330 blank_before, previous_logical, 369 blank_before, previous_logical,
331 previous_unindented_logical_line, previous_indent_level, 370 previous_unindented_logical_line, previous_indent_level,
332 lines): 371 lines):
351 390
352 E301: class Foo:\n b = 0\n def bar():\n pass 391 E301: class Foo:\n b = 0\n def bar():\n pass
353 E302: def a():\n pass\n\ndef b(n):\n pass 392 E302: def a():\n pass\n\ndef b(n):\n pass
354 E302: def a():\n pass\n\nasync def b(n):\n pass 393 E302: def a():\n pass\n\nasync def b(n):\n pass
355 E303: def a():\n pass\n\n\n\ndef b(n):\n pass 394 E303: def a():\n pass\n\n\n\ndef b(n):\n pass
395 E303: def a():\n\n\n\n pass
356 E304: @decorator\n\ndef a():\n pass 396 E304: @decorator\n\ndef a():\n pass
357 E305: def a():\n pass\na() 397 E305: def a():\n pass\na()
358 E306: def a():\n def b():\n pass\n def c():\n pass 398 E306: def a():\n def b():\n pass\n def c():\n pass
359 E307: def a():\n def b():\n pass\n\n\n def c():\n pass 399 E307: def a():\n def b():\n pass\n\n\n def c():\n pass
360 E308: def a():\n\n\n\n pass 400 E308: def a():\n\n\n\n pass
361 """ 401 """ # noqa
362 top_level_lines = BLANK_LINES_CONFIG['top_level'] 402 top_level_lines = BLANK_LINES_CONFIG['top_level']
363 method_lines = BLANK_LINES_CONFIG['method'] 403 method_lines = BLANK_LINES_CONFIG['method']
364 404
365 if line_number < top_level_lines + 1 and not previous_logical: 405 if not previous_logical and blank_before < top_level_lines:
366 return # Don't expect blank lines before the first line 406 return # Don't expect blank lines before the first line
367 if previous_logical.startswith('@'): 407 if previous_logical.startswith('@'):
368 if blank_lines: 408 if blank_lines:
369 yield 0, "E304 blank lines found after function decorator" 409 yield 0, "E304 blank lines found after function decorator"
370 elif (blank_lines > top_level_lines or 410 elif (blank_lines > top_level_lines or
373 if indent_level: 413 if indent_level:
374 if previous_logical.strip().startswith(('def ', 'class ')): 414 if previous_logical.strip().startswith(('def ', 'class ')):
375 yield (0, "E308 too many blank lines (%d)", blank_lines) 415 yield (0, "E308 too many blank lines (%d)", blank_lines)
376 else: 416 else:
377 yield (0, "E307 too many blank lines (%d) in a nested " 417 yield (0, "E307 too many blank lines (%d) in a nested "
378 "scope, expected %d", blank_lines, method_lines) 418 "scope, expected %d", blank_lines, method_lines)
379 else: 419 else:
380 yield (0, "E303 too many blank lines (%d), expected %d", 420 yield (0, "E303 too many blank lines (%d), expected %d",
381 blank_lines, top_level_lines) 421 blank_lines, top_level_lines)
382 elif STARTSWITH_TOP_LEVEL_REGEX.match(logical_line): 422 elif STARTSWITH_TOP_LEVEL_REGEX.match(logical_line):
383 # If this is a one-liner (i.e. the next line is not more 423 # allow a group of one-liners
384 # indented), and the previous line is also not deeper 424 if (
385 # (it would be better to check if the previous line is part 425 _is_one_liner(logical_line, indent_level, lines, line_number) and
386 # of another def/class at the same level), don't require blank 426 blank_before == 0
387 # lines around this. 427 ):
388 prev_line = lines[line_number - 2] if line_number >= 2 else ''
389 next_line = lines[line_number] if line_number < len(lines) else ''
390 if (expand_indent(prev_line) <= indent_level and
391 expand_indent(next_line) <= indent_level):
392 return 428 return
393 if indent_level: 429 if indent_level:
394 if not (blank_before == method_lines or 430 if not (blank_before == method_lines or
395 previous_indent_level < indent_level or 431 previous_indent_level < indent_level or
396 DOCSTRING_REGEX.match(previous_logical) 432 DOCSTRING_REGEX.match(previous_logical)
400 # Search backwards for a def ancestor or tree root 436 # Search backwards for a def ancestor or tree root
401 # (top level). 437 # (top level).
402 for line in lines[line_number - top_level_lines::-1]: 438 for line in lines[line_number - top_level_lines::-1]:
403 if line.strip() and expand_indent(line) < ancestor_level: 439 if line.strip() and expand_indent(line) < ancestor_level:
404 ancestor_level = expand_indent(line) 440 ancestor_level = expand_indent(line)
405 nested = line.lstrip().startswith('def ') 441 nested = STARTSWITH_DEF_REGEX.match(line.lstrip())
406 if nested or ancestor_level == 0: 442 if nested or ancestor_level == 0:
407 break 443 break
408 if nested: 444 if nested:
409 yield (0, "E306 expected %s blank lines before a " 445 yield (0, "E306 expected %s blank lines before a "
410 "nested definition, found %d", method_lines, 446 "nested definition, found %d", method_lines,
515 E231: [{'a':'b'}] 551 E231: [{'a':'b'}]
516 """ 552 """
517 line = logical_line 553 line = logical_line
518 for index in range(len(line) - 1): 554 for index in range(len(line) - 1):
519 char = line[index] 555 char = line[index]
520 if char in ',;:' and line[index + 1] not in WHITESPACE: 556 next_char = line[index + 1]
557 if char in ',;:' and next_char not in WHITESPACE:
521 before = line[:index] 558 before = line[:index]
522 if char == ':' and before.count('[') > before.count(']') and \ 559 if char == ':' and before.count('[') > before.count(']') and \
523 before.rfind('{') < before.rfind('['): 560 before.rfind('{') < before.rfind('['):
524 continue # Slice syntax, no space required 561 continue # Slice syntax, no space required
525 if char == ',' and line[index + 1] == ')': 562 if char == ',' and next_char == ')':
526 continue # Allow tuple with only one element: (3,) 563 continue # Allow tuple with only one element: (3,)
564 if char == ':' and next_char == '=' and sys.version_info >= (3, 8):
565 continue # Allow assignment expression
527 yield index, "E231 missing whitespace after '%s'", char 566 yield index, "E231 missing whitespace after '%s'", char
528 567
529 568
530 @register_check 569 @register_check
531 def indentation(logical_line, previous_logical, indent_char, 570 def indentation(logical_line, previous_logical, indent_char,
556 if indent_expect and indent_level <= previous_indent_level: 595 if indent_expect and indent_level <= previous_indent_level:
557 yield 0, tmpl % (2 + c, "expected an indented block") 596 yield 0, tmpl % (2 + c, "expected an indented block")
558 elif not indent_expect and indent_level > previous_indent_level: 597 elif not indent_expect and indent_level > previous_indent_level:
559 yield 0, tmpl % (3 + c, "unexpected indentation") 598 yield 0, tmpl % (3 + c, "unexpected indentation")
560 599
561 expected_indent_level = previous_indent_level + 4 600 if indent_expect:
562 if indent_expect and indent_level > expected_indent_level: 601 expected_indent_amount = 8 if indent_char == '\t' else 4
563 yield 0, tmpl % (7, 'over-indented') 602 expected_indent_level = previous_indent_level + expected_indent_amount
603 if indent_level > expected_indent_level:
604 yield 0, tmpl % (7, 'over-indented')
564 605
565 606
566 @register_check 607 @register_check
567 def continued_indentation(logical_line, tokens, indent_level, hang_closing, 608 def continued_indentation(logical_line, tokens, indent_level, hang_closing,
568 indent_char, noqa, verbose): 609 indent_char, noqa, verbose):
708 print("bracket depth %s indent to %s" % (depth, start[1])) 749 print("bracket depth %s indent to %s" % (depth, start[1]))
709 # deal with implicit string concatenation 750 # deal with implicit string concatenation
710 elif (token_type in (tokenize.STRING, tokenize.COMMENT) or 751 elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
711 text in ('u', 'ur', 'b', 'br')): 752 text in ('u', 'ur', 'b', 'br')):
712 indent_chances[start[1]] = str 753 indent_chances[start[1]] = str
754 # visual indent after assert/raise/with
755 elif not row and not depth and text in ["assert", "raise", "with"]:
756 indent_chances[end[1] + 1] = True
713 # special case for the "if" statement because len("if (") == 4 757 # special case for the "if" statement because len("if (") == 4
714 elif not indent_chances and not row and not depth and text == 'if': 758 elif not indent_chances and not row and not depth and text == 'if':
715 indent_chances[end[1] + 1] = True 759 indent_chances[end[1] + 1] = True
716 elif text == ':' and line[end[1]:].isspace(): 760 elif text == ':' and line[end[1]:].isspace():
717 open_rows[depth].append(row) 761 open_rows[depth].append(row)
844 888
845 E225: i=i+1 889 E225: i=i+1
846 E225: submitted +=1 890 E225: submitted +=1
847 E225: x = x /2 - 1 891 E225: x = x /2 - 1
848 E225: z = x **y 892 E225: z = x **y
893 E225: z = 1and 1
849 E226: c = (a+b) * (a-b) 894 E226: c = (a+b) * (a-b)
850 E226: hypot2 = x*x + y*y 895 E226: hypot2 = x*x + y*y
851 E227: c = a|b 896 E227: c = a|b
852 E228: msg = fmt%(errno, errmsg) 897 E228: msg = fmt%(errno, errmsg)
853 """ 898 """
854 parens = 0 899 parens = 0
855 need_space = False 900 need_space = False
856 prev_type = tokenize.OP 901 prev_type = tokenize.OP
857 prev_text = prev_end = None 902 prev_text = prev_end = None
903 operator_types = (tokenize.OP, tokenize.NAME)
858 for token_type, text, start, end, line in tokens: 904 for token_type, text, start, end, line in tokens:
859 if token_type in SKIP_COMMENTS: 905 if token_type in SKIP_COMMENTS:
860 continue 906 continue
861 if text in ('(', 'lambda'): 907 if text in ('(', 'lambda'):
862 parens += 1 908 parens += 1
871 need_space = False 917 need_space = False
872 elif text == '>' and prev_text in ('<', '-'): 918 elif text == '>' and prev_text in ('<', '-'):
873 # Tolerate the "<>" operator, even if running Python 3 919 # Tolerate the "<>" operator, even if running Python 3
874 # Deal with Python 3's annotated return value "->" 920 # Deal with Python 3's annotated return value "->"
875 pass 921 pass
922 elif (
923 # def f(a, /, b):
924 # ^
925 # def f(a, b, /):
926 # ^
927 prev_text == '/' and text in {',', ')'} or
928 # def f(a, b, /):
929 # ^
930 prev_text == ')' and text == ':'
931 ):
932 # Tolerate the "/" operator in function definition
933 # For more info see PEP570
934 pass
876 else: 935 else:
877 if need_space is True or need_space[1]: 936 if need_space is True or need_space[1]:
878 # A needed trailing space was not found 937 # A needed trailing space was not found
879 yield prev_end, "E225 missing whitespace around operator" 938 yield prev_end, "E225 missing whitespace around operator"
880 elif prev_text != '**': 939 elif prev_text != '**':
884 elif prev_text not in ARITHMETIC_OP: 943 elif prev_text not in ARITHMETIC_OP:
885 code, optype = 'E227', 'bitwise or shift' 944 code, optype = 'E227', 'bitwise or shift'
886 yield (need_space[0], "%s missing whitespace " 945 yield (need_space[0], "%s missing whitespace "
887 "around %s operator" % (code, optype)) 946 "around %s operator" % (code, optype))
888 need_space = False 947 need_space = False
889 elif token_type == tokenize.OP and prev_end is not None: 948 elif token_type in operator_types and prev_end is not None:
890 if text == '=' and parens: 949 if text == '=' and parens:
891 # Allow keyword args or defaults: foo(bar=None). 950 # Allow keyword args or defaults: foo(bar=None).
892 pass 951 pass
893 elif text in WS_NEEDED_OPERATORS: 952 elif text in WS_NEEDED_OPERATORS:
894 need_space = True 953 need_space = True
1072 1131
1073 Okay: import os 1132 Okay: import os
1074 Okay: # this is a comment\nimport os 1133 Okay: # this is a comment\nimport os
1075 Okay: '''this is a module docstring'''\nimport os 1134 Okay: '''this is a module docstring'''\nimport os
1076 Okay: r'''this is a module docstring'''\nimport os 1135 Okay: r'''this is a module docstring'''\nimport os
1077 Okay: 1136 Okay:
1078 try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y 1137 try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y
1079 Okay: 1138 Okay:
1080 try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y 1139 try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y
1081
1082 E402: a=1\nimport os 1140 E402: a=1\nimport os
1083 E402: 'One string'\n"Two string"\nimport os 1141 E402: 'One string'\n"Two string"\nimport os
1084 E402: a=1\nfrom sys import x 1142 E402: a=1\nfrom sys import x
1085 1143
1086 Okay: if x:\n import os 1144 Okay: if x:\n import os
1090 line = line[1:] 1148 line = line[1:]
1091 if line and line[0] in 'rR': 1149 if line and line[0] in 'rR':
1092 line = line[1:] 1150 line = line[1:]
1093 return line and (line[0] == '"' or line[0] == "'") 1151 return line and (line[0] == '"' or line[0] == "'")
1094 1152
1095 allowed_try_keywords = ('try', 'except', 'else', 'finally') 1153 allowed_keywords = (
1154 'try', 'except', 'else', 'finally', 'with', 'if', 'elif')
1096 1155
1097 if indent_level: # Allow imports in conditional statement/function 1156 if indent_level: # Allow imports in conditional statement/function
1098 return 1157 return
1099 if not logical_line: # Allow empty lines or comments 1158 if not logical_line: # Allow empty lines or comments
1100 return 1159 return
1104 if line.startswith('import ') or line.startswith('from '): 1163 if line.startswith('import ') or line.startswith('from '):
1105 if checker_state.get('seen_non_imports', False): 1164 if checker_state.get('seen_non_imports', False):
1106 yield 0, "E402 module level import not at top of file" 1165 yield 0, "E402 module level import not at top of file"
1107 elif re.match(DUNDER_REGEX, line): 1166 elif re.match(DUNDER_REGEX, line):
1108 return 1167 return
1109 elif any(line.startswith(kw) for kw in allowed_try_keywords): 1168 elif any(line.startswith(kw) for kw in allowed_keywords):
1110 # Allow try, except, else, finally keywords intermixed with 1169 # Allow certain keywords intermixed with imports in order to
1111 # imports in order to support conditional importing 1170 # support conditional or filtered importing
1112 return 1171 return
1113 elif is_string_literal(line): 1172 elif is_string_literal(line):
1114 # The first literal is a docstring, allow it. Otherwise, report 1173 # The first literal is a docstring, allow it. Otherwise, report
1115 # error. 1174 # error.
1116 if checker_state.get('seen_docstring', False): 1175 if checker_state.get('seen_docstring', False):
1158 counts = {char: 0 for char in '{}[]()'} 1217 counts = {char: 0 for char in '{}[]()'}
1159 while -1 < found < last_char: 1218 while -1 < found < last_char:
1160 update_counts(line[prev_found:found], counts) 1219 update_counts(line[prev_found:found], counts)
1161 if ((counts['{'] <= counts['}'] and # {'a': 1} (dict) 1220 if ((counts['{'] <= counts['}'] and # {'a': 1} (dict)
1162 counts['['] <= counts[']'] and # [1:2] (slice) 1221 counts['['] <= counts[']'] and # [1:2] (slice)
1163 counts['('] <= counts[')'])): # (annotation) 1222 counts['('] <= counts[')']) and # (annotation)
1223 not (sys.version_info >= (3, 8) and
1224 line[found + 1] == '=')): # assignment expression
1164 lambda_kw = LAMBDA_REGEX.search(line, 0, found) 1225 lambda_kw = LAMBDA_REGEX.search(line, 0, found)
1165 if lambda_kw: 1226 if lambda_kw:
1166 before = line[:lambda_kw.start()].rstrip() 1227 before = line[:lambda_kw.start()].rstrip()
1167 if before[-1:] == '=' and isidentifier(before[:-1].strip()): 1228 if before[-1:] == '=' and isidentifier(before[:-1].strip()):
1168 yield 0, ("E731 do not assign a lambda expression, use a " 1229 yield 0, ("E731 do not assign a lambda expression, use a "
1222 parens += 1 1283 parens += 1
1223 elif text in ')]}': 1284 elif text in ')]}':
1224 parens -= 1 1285 parens -= 1
1225 1286
1226 1287
1288 _SYMBOLIC_OPS = frozenset("()[]{},:.;@=%~") | frozenset(("...",))
1289
1290
1227 def _is_binary_operator(token_type, text): 1291 def _is_binary_operator(token_type, text):
1228 is_op_token = token_type == tokenize.OP 1292 is_op_token = token_type == tokenize.OP
1229 is_conjunction = text in ['and', 'or'] 1293 is_conjunction = text in ['and', 'or']
1230 # NOTE(sigmavirus24): Previously the not_a_symbol check was executed 1294 # NOTE(sigmavirus24): Previously the not_a_symbol check was executed
1231 # conditionally. Since it is now *always* executed, text may be 1295 # conditionally. Since it is now *always* executed, text may be
1232 # None. In that case we get a TypeError for `text not in str`. 1296 # None. In that case we get a TypeError for `text not in str`.
1233 not_a_symbol = text and text not in "()[]{},:.;@=%~" 1297 not_a_symbol = text and text not in _SYMBOLIC_OPS
1234 # The % character is strictly speaking a binary operator, but the 1298 # The % character is strictly speaking a binary operator, but the
1235 # common usage seems to be to put it next to the format parameters, 1299 # common usage seems to be to put it next to the format parameters,
1236 # after a line break. 1300 # after a line break.
1237 return ((is_op_token or is_conjunction) and not_a_symbol) 1301 return ((is_op_token or is_conjunction) and not_a_symbol)
1238 1302
1441 E741: O = 123 1505 E741: O = 123
1442 E741: I = 42 1506 E741: I = 42
1443 1507
1444 Variables can be bound in several other contexts, including class 1508 Variables can be bound in several other contexts, including class
1445 and function definitions, 'global' and 'nonlocal' statements, 1509 and function definitions, 'global' and 'nonlocal' statements,
1446 exception handlers, and 'with' statements. 1510 exception handlers, and 'with' and 'for' statements.
1511 In addition, we have a special handling for function parameters.
1447 1512
1448 Okay: except AttributeError as o: 1513 Okay: except AttributeError as o:
1449 Okay: with lock as L: 1514 Okay: with lock as L:
1515 Okay: foo(l=12)
1516 Okay: for a in foo(l=12):
1450 E741: except AttributeError as O: 1517 E741: except AttributeError as O:
1451 E741: with lock as l: 1518 E741: with lock as l:
1452 E741: global I 1519 E741: global I
1453 E741: nonlocal l 1520 E741: nonlocal l
1521 E741: def foo(l):
1522 E741: def foo(l=12):
1523 E741: l = foo(l=12)
1524 E741: for l in range(10):
1454 E742: class I(object): 1525 E742: class I(object):
1455 E743: def l(x): 1526 E743: def l(x):
1456 """ 1527 """
1528 is_func_def = False # Set to true if 'def' is found
1529 parameter_parentheses_level = 0
1457 idents_to_avoid = ('l', 'O', 'I') 1530 idents_to_avoid = ('l', 'O', 'I')
1458 prev_type, prev_text, prev_start, prev_end, __ = tokens[0] 1531 prev_type, prev_text, prev_start, prev_end, __ = tokens[0]
1459 for token_type, text, start, end, line in tokens[1:]: 1532 for token_type, text, start, end, line in tokens[1:]:
1460 ident = pos = None 1533 ident = pos = None
1534 # find function definitions
1535 if prev_text == 'def':
1536 is_func_def = True
1537 # update parameter parentheses level
1538 if parameter_parentheses_level == 0 and \
1539 prev_type == tokenize.NAME and \
1540 token_type == tokenize.OP and text == '(':
1541 parameter_parentheses_level = 1
1542 elif parameter_parentheses_level > 0 and \
1543 token_type == tokenize.OP:
1544 if text == '(':
1545 parameter_parentheses_level += 1
1546 elif text == ')':
1547 parameter_parentheses_level -= 1
1461 # identifiers on the lhs of an assignment operator 1548 # identifiers on the lhs of an assignment operator
1462 if token_type == tokenize.OP and '=' in text: 1549 if token_type == tokenize.OP and '=' in text and \
1550 parameter_parentheses_level == 0:
1463 if prev_text in idents_to_avoid: 1551 if prev_text in idents_to_avoid:
1464 ident = prev_text 1552 ident = prev_text
1465 pos = prev_start 1553 pos = prev_start
1466 # identifiers bound to values with 'as', 'global', or 'nonlocal' 1554 # identifiers bound to values with 'as', 'for',
1467 if prev_text in ('as', 'global', 'nonlocal'): 1555 # 'global', or 'nonlocal'
1556 if prev_text in ('as', 'for', 'global', 'nonlocal'):
1557 if text in idents_to_avoid:
1558 ident = text
1559 pos = start
1560 # function parameter definitions
1561 if is_func_def:
1468 if text in idents_to_avoid: 1562 if text in idents_to_avoid:
1469 ident = text 1563 ident = text
1470 pos = start 1564 pos = start
1471 if prev_text == 'class': 1565 if prev_text == 'class':
1472 if text in idents_to_avoid: 1566 if text in idents_to_avoid:
1765 >>> expand_indent(' \t') 1859 >>> expand_indent(' \t')
1766 8 1860 8
1767 >>> expand_indent(' \t') 1861 >>> expand_indent(' \t')
1768 16 1862 16
1769 """ 1863 """
1864 line = line.rstrip('\n\r')
1770 if '\t' not in line: 1865 if '\t' not in line:
1771 return len(line) - len(line.lstrip()) 1866 return len(line) - len(line.lstrip())
1772 result = 0 1867 result = 0
1773 for char in line: 1868 for char in line:
1774 if char == '\t': 1869 if char == '\t':

eric ide

mercurial