82 sys.version_info < (3, 10) and |
82 sys.version_info < (3, 10) and |
83 callable(getattr(tokenize, '_compile', None)) |
83 callable(getattr(tokenize, '_compile', None)) |
84 ): # pragma: no cover (<py310) |
84 ): # pragma: no cover (<py310) |
85 tokenize._compile = lru_cache(tokenize._compile) # type: ignore |
85 tokenize._compile = lru_cache(tokenize._compile) # type: ignore |
86 |
86 |
87 __version__ = '2.12.1-eric' |
87 __version__ = '2.13.0-eric' |
88 |
88 |
89 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' |
89 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox' |
90 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503,W504' |
90 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503,W504' |
91 try: |
91 try: |
92 if sys.platform == 'win32': # pragma: win32 cover |
92 if sys.platform == 'win32': # pragma: win32 cover |
115 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', |
115 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', |
116 } |
116 } |
117 |
117 |
118 PyCF_ONLY_AST = 1024 |
118 PyCF_ONLY_AST = 1024 |
119 SINGLETONS = frozenset(['False', 'None', 'True']) |
119 SINGLETONS = frozenset(['False', 'None', 'True']) |
120 KEYWORDS = frozenset(keyword.kwlist + ['print', 'async']) - SINGLETONS |
120 KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS |
121 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) |
121 UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) |
122 ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-', '@']) |
122 ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-', '@']) |
123 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) |
123 WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%']) |
124 WS_NEEDED_OPERATORS = frozenset([ |
124 WS_NEEDED_OPERATORS = frozenset([ |
125 '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<', '>', |
125 '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<', '>', |
149 KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) |
149 KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS)) |
150 OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+|:=)(\s*)') |
150 OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+|:=)(\s*)') |
151 LAMBDA_REGEX = re.compile(r'\blambda\b') |
151 LAMBDA_REGEX = re.compile(r'\blambda\b') |
152 HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') |
152 HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') |
153 STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\b') |
153 STARTSWITH_DEF_REGEX = re.compile(r'^(async\s+def|def)\b') |
|
154 STARTSWITH_GENERIC_REGEX = re.compile(r'^(async\s+def|def|class|type)\s+\w+\[') |
154 STARTSWITH_TOP_LEVEL_REGEX = re.compile(r'^(async\s+def\s+|def\s+|class\s+|@)') |
155 STARTSWITH_TOP_LEVEL_REGEX = re.compile(r'^(async\s+def\s+|def\s+|class\s+|@)') |
155 STARTSWITH_INDENT_STATEMENT_REGEX = re.compile( |
156 STARTSWITH_INDENT_STATEMENT_REGEX = re.compile( |
156 r'^\s*({})\b'.format('|'.join(s.replace(' ', r'\s+') for s in ( |
157 r'^\s*({})\b'.format('|'.join(s.replace(' ', r'\s+') for s in ( |
157 'def', 'async def', |
158 'def', 'async def', |
158 'for', 'async for', |
159 'for', 'async for', |
247 |
248 |
248 Okay: spam(1)\n# |
249 Okay: spam(1)\n# |
249 W291: spam(1) \n# |
250 W291: spam(1) \n# |
250 W293: class Foo(object):\n \n bang = 12 |
251 W293: class Foo(object):\n \n bang = 12 |
251 """ |
252 """ |
252 physical_line = physical_line.rstrip('\n') # chr(10), newline |
253 # Strip these trailing characters: |
253 physical_line = physical_line.rstrip('\r') # chr(13), carriage return |
254 # - chr(10), newline |
254 physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L |
255 # - chr(13), carriage return |
|
256 # - chr(12), form feed, ^L |
|
257 physical_line = physical_line.rstrip('\n\r\x0c') |
255 stripped = physical_line.rstrip(' \t\v') |
258 stripped = physical_line.rstrip(' \t\v') |
256 if physical_line != stripped: |
259 if physical_line != stripped: |
257 if stripped: |
260 if stripped: |
258 return len(stripped), "W291 trailing whitespace" |
261 return len(stripped), "W291 trailing whitespace" |
259 else: |
262 else: |
808 # Syntax "class A (B):" is allowed, but avoid it |
811 # Syntax "class A (B):" is allowed, but avoid it |
809 (index < 2 or tokens[index - 2][1] != 'class') and |
812 (index < 2 or tokens[index - 2][1] != 'class') and |
810 # Allow "return (a.foo for a in range(5))" |
813 # Allow "return (a.foo for a in range(5))" |
811 not keyword.iskeyword(prev_text) and |
814 not keyword.iskeyword(prev_text) and |
812 ( |
815 ( |
813 sys.version_info < (3, 9) or |
|
814 # 3.12+: type is a soft keyword but no braces after |
816 # 3.12+: type is a soft keyword but no braces after |
815 prev_text == 'type' or |
817 prev_text == 'type' or |
816 not keyword.issoftkeyword(prev_text) |
818 not keyword.issoftkeyword(prev_text) |
817 ) |
819 ) |
818 ): |
820 ): |
983 # Check if the operator is used as a binary operator |
985 # Check if the operator is used as a binary operator |
984 # Allow unary operators: -123, -x, +1. |
986 # Allow unary operators: -123, -x, +1. |
985 # Allow argument unpacking: foo(*args, **kwargs). |
987 # Allow argument unpacking: foo(*args, **kwargs). |
986 if prev_type == tokenize.OP and prev_text in '}])' or ( |
988 if prev_type == tokenize.OP and prev_text in '}])' or ( |
987 prev_type != tokenize.OP and |
989 prev_type != tokenize.OP and |
988 prev_text not in KEYWORDS and ( |
990 prev_text not in KEYWORDS and |
989 sys.version_info < (3, 9) or |
991 not keyword.issoftkeyword(prev_text) |
990 not keyword.issoftkeyword(prev_text) |
|
991 ) |
|
992 ): |
992 ): |
993 need_space = None |
993 need_space = None |
994 elif text in WS_OPTIONAL_OPERATORS: |
994 elif text in WS_OPTIONAL_OPERATORS: |
995 need_space = None |
995 need_space = None |
996 |
996 |
1045 |
1045 |
1046 E251: def complex(real, imag = 0.0): |
1046 E251: def complex(real, imag = 0.0): |
1047 E251: return magic(r = real, i = imag) |
1047 E251: return magic(r = real, i = imag) |
1048 E252: def complex(real, image: float=0.0): |
1048 E252: def complex(real, image: float=0.0): |
1049 """ |
1049 """ |
1050 parens = 0 |
1050 paren_stack = [] |
1051 no_space = False |
1051 no_space = False |
1052 require_space = False |
1052 require_space = False |
1053 prev_end = None |
1053 prev_end = None |
1054 annotated_func_arg = False |
1054 annotated_func_arg = False |
1055 in_def = bool(STARTSWITH_DEF_REGEX.match(logical_line)) |
1055 in_def = bool(STARTSWITH_DEF_REGEX.match(logical_line)) |
|
1056 in_generic = bool(STARTSWITH_GENERIC_REGEX.match(logical_line)) |
1056 |
1057 |
1057 message = "E251 unexpected spaces around keyword / parameter equals" |
1058 message = "E251 unexpected spaces around keyword / parameter equals" |
1058 missing_message = "E252 missing whitespace around parameter equals" |
1059 missing_message = "E252 missing whitespace around parameter equals" |
1059 |
1060 |
1060 for token_type, text, start, end, line in tokens: |
1061 for token_type, text, start, end, line in tokens: |
1068 require_space = False |
1069 require_space = False |
1069 if start == prev_end: |
1070 if start == prev_end: |
1070 yield (prev_end, missing_message) |
1071 yield (prev_end, missing_message) |
1071 if token_type == tokenize.OP: |
1072 if token_type == tokenize.OP: |
1072 if text in '([': |
1073 if text in '([': |
1073 parens += 1 |
1074 paren_stack.append(text) |
1074 elif text in ')]': |
1075 # PEP 696 defaults always use spaced-style `=` |
1075 parens -= 1 |
1076 # type A[T = default] = ... |
1076 elif in_def and text == ':' and parens == 1: |
1077 # def f[T = default](): ... |
|
1078 # class C[T = default](): ... |
|
1079 if in_generic and paren_stack == ['[']: |
|
1080 annotated_func_arg = True |
|
1081 elif text in ')]' and paren_stack: |
|
1082 paren_stack.pop() |
|
1083 # def f(arg: tp = default): ... |
|
1084 elif text == ':' and in_def and paren_stack == ['(']: |
1077 annotated_func_arg = True |
1085 annotated_func_arg = True |
1078 elif parens == 1 and text == ',': |
1086 elif len(paren_stack) == 1 and text == ',': |
1079 annotated_func_arg = False |
1087 annotated_func_arg = False |
1080 elif parens and text == '=': |
1088 elif paren_stack and text == '=': |
1081 if annotated_func_arg and parens == 1: |
1089 if annotated_func_arg and len(paren_stack) == 1: |
1082 require_space = True |
1090 require_space = True |
1083 if start == prev_end: |
1091 if start == prev_end: |
1084 yield (prev_end, missing_message) |
1092 yield (prev_end, missing_message) |
1085 else: |
1093 else: |
1086 no_space = True |
1094 no_space = True |
1087 if start != prev_end: |
1095 if start != prev_end: |
1088 yield (prev_end, message) |
1096 yield (prev_end, message) |
1089 if not parens: |
1097 if not paren_stack: |
1090 annotated_func_arg = False |
1098 annotated_func_arg = False |
1091 |
1099 |
1092 prev_end = end |
1100 prev_end = end |
1093 |
1101 |
1094 |
1102 |
1938 self.indent_char = line[0] |
1946 self.indent_char = line[0] |
1939 return line |
1947 return line |
1940 |
1948 |
1941 def run_check(self, check, argument_names): |
1949 def run_check(self, check, argument_names): |
1942 """Run a check plugin.""" |
1950 """Run a check plugin.""" |
1943 arguments = [] |
1951 arguments = [getattr(self, name) for name in argument_names] |
1944 for name in argument_names: |
|
1945 arguments.append(getattr(self, name)) |
|
1946 return check(*arguments) |
1952 return check(*arguments) |
1947 |
1953 |
1948 def init_checker_state(self, name, argument_names): |
1954 def init_checker_state(self, name, argument_names): |
1949 """Prepare custom state for the specific checker plugin.""" |
1955 """Prepare custom state for the specific checker plugin.""" |
1950 if 'checker_state' in argument_names: |
1956 if 'checker_state' in argument_names: |