Updated pyflakes to 2.3.1 and pycodestyle to 2.7.0.

Fri, 09 Apr 2021 21:14:51 +0200

author
T.Rzepka <Tobias.Rzepka@gmail.com>
date
Fri, 09 Apr 2021 21:14:51 +0200
changeset 8208
37836fa8e4ea
parent 8207
d359172d11be
child 8209
14470a65a52e

Updated pyflakes to 2.3.1 and pycodestyle to 2.7.0.

docs/changelog file | annotate | diff | comparison | revisions
eric6/Plugins/CheckerPlugins/CodeStyleChecker/pycodestyle.py file | annotate | diff | comparison | revisions
eric6/Plugins/CheckerPlugins/SyntaxChecker/pyflakes/__init__.py file | annotate | diff | comparison | revisions
eric6/Plugins/CheckerPlugins/SyntaxChecker/pyflakes/checker.py file | annotate | diff | comparison | revisions
--- a/docs/changelog	Fri Apr 09 18:38:01 2021 +0200
+++ b/docs/changelog	Fri Apr 09 21:14:51 2021 +0200
@@ -6,9 +6,12 @@
   -- added a checker to find code that could be simplified
   -- added capability to the results page to filter the messages based on
      message code
+  -- updated pycodestyle to version 2.7.0
 - MicroPython
   -- added option to select the baud rate for flashing ESP32 and ESP8266 boards
   -- updated the BBC micro:bit API file to support micro:bit V2
+- Syntax Checker
+  -- updated pyflakes to version 2.3.1
 
 Version 21.4:
 - bug fixes
--- a/eric6/Plugins/CheckerPlugins/CodeStyleChecker/pycodestyle.py	Fri Apr 09 18:38:01 2021 +0200
+++ b/eric6/Plugins/CheckerPlugins/CodeStyleChecker/pycodestyle.py	Fri Apr 09 21:14:51 2021 +0200
@@ -90,7 +90,7 @@
 except ImportError:
     from ConfigParser import RawConfigParser            # __IGNORE_WARNING__
 
-__version__ = '2.6.0-eric'
+__version__ = '2.7.0-eric'
 
 DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox'
 DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704,W503,W504'
@@ -116,6 +116,7 @@
     'method': 1,
 }
 MAX_DOC_LENGTH = 72
+INDENT_SIZE = 4
 REPORT_FORMAT = {
     'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
     'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
@@ -276,7 +277,7 @@
     However the last line should end with a new line (warning W292).
     """
     if line_number == total_lines:
-        stripped_last_line = physical_line.rstrip()
+        stripped_last_line = physical_line.rstrip('\r\n')
         if physical_line and not stripped_last_line:
             return 0, "W391 blank line at end of file"
         if stripped_last_line == physical_line:
@@ -567,8 +568,9 @@
 
 @register_check
 def indentation(logical_line, previous_logical, indent_char,
-                indent_level, previous_indent_level):
-    r"""Use 4 spaces per indentation level.
+                indent_level, previous_indent_level,
+                indent_size, indent_size_str):
+    r"""Use indent_size (PEP8 says 4) spaces per indentation level.
 
     For really old code that you don't want to mess up, you can continue
     to use 8-space tabs.
@@ -588,8 +590,11 @@
     """
     c = 0 if logical_line else 3
     tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)"
-    if indent_level % 4:
-        yield 0, tmpl % (1 + c, "indentation is not a multiple of four")
+    if indent_level % indent_size:
+        yield 0, tmpl % (
+            1 + c,
+            "indentation is not a multiple of " + indent_size_str,
+        )
     indent_expect = previous_logical.endswith(':')
     if indent_expect and indent_level <= previous_indent_level:
         yield 0, tmpl % (2 + c, "expected an indented block")
@@ -605,7 +610,8 @@
 
 @register_check
 def continued_indentation(logical_line, tokens, indent_level, hang_closing,
-                          indent_char, noqa, verbose):
+                          indent_char, indent_size, indent_size_str, noqa,
+                          verbose):
     r"""Continuation lines indentation.
 
     Continuation lines should align wrapped elements either vertically
@@ -644,7 +650,8 @@
     indent_next = logical_line.endswith(':')
 
     row = depth = 0
-    valid_hangs = (4,) if indent_char != '\t' else (4, 8)
+    valid_hangs = (indent_size,) if indent_char != '\t' \
+        else (indent_size, indent_size * 2)
     # remember how many brackets were opened on each line
     parens = [0] * nrows
     # relative indents of physical lines
@@ -709,7 +716,8 @@
                     # visual indent is broken
                     yield (start, "E128 continuation line "
                            "under-indented for visual indent")
-            elif hanging_indent or (indent_next and rel_indent[row] == 8):
+            elif hanging_indent or (indent_next and
+                                    rel_indent[row] == 2 * indent_size):
                 # hanging indent is verified
                 if close_bracket and not hang_closing:
                     yield (start, "E123 closing bracket does not match "
@@ -732,7 +740,7 @@
                     error = "E131", "unaligned for hanging indent"
                 else:
                     hangs[depth] = hang
-                    if hang > 4:
+                    if hang > indent_size:
                         error = "E126", "over-indented for hanging indent"
                     else:
                         error = "E121", "under-indented for hanging indent"
@@ -799,8 +807,8 @@
         if last_token_multiline:
             rel_indent[end[0] - first_row] = rel_indent[row]
 
-    if indent_next and expand_indent(line) == indent_level + 4:
-        pos = (start[0], indent[0] + 4)
+    if indent_next and expand_indent(line) == indent_level + indent_size:
+        pos = (start[0], indent[0] + indent_size)
         if visual_indent:
             code = "E129 visually indented line"
         else:
@@ -1132,9 +1140,9 @@
     Okay: # this is a comment\nimport os
     Okay: '''this is a module docstring'''\nimport os
     Okay: r'''this is a module docstring'''\nimport os
-    Okay:
+    Okay:  
     try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y
-    Okay:
+    Okay:  
     try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y
     E402: a=1\nimport os
     E402: 'One string'\n"Two string"\nimport os
@@ -1569,6 +1577,7 @@
                 yield start, "E743 ambiguous function definition '%s'", text
         if ident:
             yield pos, "E741 ambiguous variable name '%s'", ident
+        prev_type = token_type
         prev_text = text
         prev_start = start
 
@@ -1829,10 +1838,8 @@
     def readlines(filename):
         """Read the source code."""
         try:
-            with open(filename, 'rb') as f:
-                (coding, lines) = tokenize.detect_encoding(f.readline)
-                f = TextIOWrapper(f, coding, line_buffering=True)
-                return [line.decode(coding) for line in lines] + f.readlines()
+            with tokenize.open(filename) as f:
+                return f.readlines()
         except (LookupError, SyntaxError, UnicodeError):
             # Fall back if file encoding is improperly declared
             with open(filename, encoding='latin-1') as f:
@@ -1983,8 +1990,12 @@
         self._ast_checks = options.ast_checks
         self.max_line_length = options.max_line_length
         self.max_doc_length = options.max_doc_length
+        self.indent_size = options.indent_size
         self.multiline = False  # in a multiline string?
         self.hang_closing = options.hang_closing
+        self.indent_size = options.indent_size
+        self.indent_size_str = ({2: 'two', 4: 'four', 8: 'eight'}
+                                .get(self.indent_size, str(self.indent_size)))
         self.verbose = options.verbose
         self.filename = filename
         # Dictionary where a checker can store its custom state.
@@ -2159,21 +2170,30 @@
             self.report_error_args(1, 0, 'E902', self._io_error, readlines)
         tokengen = tokenize.generate_tokens(self.readline)
         try:
+            prev_physical = ''
             for token in tokengen:
                 if token[2][0] > self.total_lines:
                     return
                 self.noqa = token[4] and noqa(token[4])
-                self.maybe_check_physical(token)
+                self.maybe_check_physical(token, prev_physical)
                 yield token
+                prev_physical = token[4]
         except (SyntaxError, tokenize.TokenError):
             self.report_invalid_syntax()
 
-    def maybe_check_physical(self, token):
+    def maybe_check_physical(self, token, prev_physical):
         """If appropriate for token, check current physical line(s)."""
         # Called after every token, but act only on end of line.
+
+        # a newline token ends a single physical line.
         if _is_eol_token(token):
-            # Obviously, a newline token ends a single physical line.
-            self.check_physical(token[4])
+            # if the file does not end with a newline, the NEWLINE
+            # token is inserted by the parser, but it does not contain
+            # the previous physical line in `token[4]`
+            if token[4] == '':
+                self.check_physical(prev_physical)
+            else:
+                self.check_physical(token[4])
         elif token[0] == tokenize.STRING and '\n' in token[1]:
             # Less obviously, a string that contains newlines is a
             # multiline string, either triple-quoted or with internal
@@ -2584,8 +2604,8 @@
                           usage="%prog [options] input ...")
     parser.config_options = [
         'exclude', 'filename', 'select', 'ignore', 'max-line-length',
-        'max-doc-length', 'hang-closing', 'count', 'format', 'quiet',
-        'show-pep8', 'show-source', 'statistics', 'verbose']
+        'max-doc-length', 'indent-size', 'hang-closing', 'count', 'format',
+        'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose']
     parser.add_option('-v', '--verbose', default=0, action='count',
                       help="print status messages, or debug with -vv")
     parser.add_option('-q', '--quiet', default=0, action='count',
@@ -2625,6 +2645,10 @@
                       default=None,
                       help="set maximum allowed doc line length and perform "
                            "these checks (unchecked if not set)")
+    parser.add_option('--indent-size', type='int', metavar='n',
+                      default=INDENT_SIZE,
+                      help="set how many spaces make up an indent "
+                           "(default: %default)")
     parser.add_option('--hang-closing', action='store_true',
                       help="hang closing bracket instead of matching "
                            "indentation of opening bracket's line")
--- a/eric6/Plugins/CheckerPlugins/SyntaxChecker/pyflakes/__init__.py	Fri Apr 09 18:38:01 2021 +0200
+++ b/eric6/Plugins/CheckerPlugins/SyntaxChecker/pyflakes/__init__.py	Fri Apr 09 21:14:51 2021 +0200
@@ -31,13 +31,30 @@
 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 """
 
-__version__ = '2.2.0'
+__version__ = '2.3.1'
 
 """
-pyflakes repository date: 2020-02-03.
+pyflakes repository date: 2021-03-24.
 """
 
 """ Changes
+2.3.1 (2021-03-24)
+
+- Fix regression in 2.3.0: type annotations no longer redefine imports
+
+2.3.0 (2021-03-14)
+
+- Recognize tuple concatenation in ``__all__`` export definitions
+- Better support use of annotation-only assignments when using
+  ``from __future__ import annotations``
+- Recognize special-case typing for ``Annotated``
+- Fix undefined name ``__qualname__`` in class scope
+- Recognize special-cased typing for ``TypeVar``
+- Errors for undefined exports in ``__all__`` are shown in a deterministic
+  order
+- Fix false positives in certain typing constructs (``TypeVar``,
+  ``NamedTuple``, ``TypedDict``, ``cast``)
+
 2.2.0 (2020-04-08)
 
 - Include column information in error messages
--- a/eric6/Plugins/CheckerPlugins/SyntaxChecker/pyflakes/checker.py	Fri Apr 09 18:38:01 2021 +0200
+++ b/eric6/Plugins/CheckerPlugins/SyntaxChecker/pyflakes/checker.py	Fri Apr 09 21:14:51 2021 +0200
@@ -86,6 +86,10 @@
     LOOP_TYPES = (ast.While, ast.For)
     FUNCTION_TYPES = (ast.FunctionDef,)
 
+if PY36_PLUS:
+    ANNASSIGN_TYPES = (ast.AnnAssign,)
+else:
+    ANNASSIGN_TYPES = ()
 
 if PY38_PLUS:
     def _is_singleton(node):  # type: (ast.AST) -> bool
@@ -131,6 +135,13 @@
     return _is_constant(node) and not _is_singleton(node)
 
 
+def _is_name_or_attr(node, name):  # type: (ast.Ast, str) -> bool
+    return (
+        (isinstance(node, ast.Name) and node.id == name) or
+        (isinstance(node, ast.Attribute) and node.attr == name)
+    )
+
+
 # https://github.com/python/typed_ast/blob/1.4.0/ast27/Parser/tokenizer.c#L102-L104
 TYPE_COMMENT_RE = re.compile(r'^#\s*type:\s*')
 # https://github.com/python/typed_ast/blob/1.4.0/ast27/Parser/tokenizer.c#L1408-L1413
@@ -535,6 +546,20 @@
     """
 
 
+class Annotation(Binding):
+    """
+    Represents binding a name to a type without an associated value.
+
+    As long as this name is not assigned a value in another binding, it is considered
+    undefined for most purposes. One notable exception is using the name as a type
+    annotation.
+    """
+
+    def redefines(self, other):
+        """An Annotation doesn't define any name, so it cannot redefine one."""
+        return False
+
+
 class FunctionDefinition(Definition):
     pass
 
@@ -549,7 +574,7 @@
     can be determined statically, they will be treated as names for export and
     additional checking applied to them.
 
-    The only recognized C{__all__} assignment via list concatenation is in the
+    The only recognized C{__all__} assignment via list/tuple concatenation is in the
     following format:
 
         __all__ = ['a'] + ['b'] + ['c']
@@ -571,10 +596,10 @@
 
         if isinstance(source.value, (ast.List, ast.Tuple)):
             _add_to_names(source.value)
-        # If concatenating lists
+        # If concatenating lists or tuples
         elif isinstance(source.value, ast.BinOp):
             currentValue = source.value
-            while isinstance(currentValue.right, ast.List):
+            while isinstance(currentValue.right, (ast.List, ast.Tuple)):
                 left = currentValue.left
                 right = currentValue.right
                 _add_to_names(right)
@@ -582,7 +607,7 @@
                 if isinstance(left, ast.BinOp):
                     currentValue = left
                 # If just two lists are being added
-                elif isinstance(left, ast.List):
+                elif isinstance(left, (ast.List, ast.Tuple)):
                     _add_to_names(left)
                     # All lists accounted for - done
                     break
@@ -655,6 +680,10 @@
         self.col_offset = col_offset
 
 
+class DetectClassScopedMagic:
+    names = dir()
+
+
 # Globally defined names which are not attributes of the builtins module, or
 # are only present on some platforms.
 _MAGIC_GLOBALS = ['__file__', '__builtins__', 'WindowsError']
@@ -739,6 +768,12 @@
     )
 
 
+class AnnotationState:
+    NONE = 0
+    STRING = 1
+    BARE = 2
+
+
 def in_annotation(func):
     @functools.wraps(func)
     def in_annotation_func(self, *args, **kwargs):
@@ -747,6 +782,14 @@
     return in_annotation_func
 
 
+def in_string_annotation(func):
+    @functools.wraps(func)
+    def in_annotation_func(self, *args, **kwargs):
+        with self._enter_annotation(AnnotationState.STRING):
+            return func(self, *args, **kwargs)
+    return in_annotation_func
+
+
 def make_tokens(code):
     # PY3: tokenize.tokenize requires readline of bytes
     if not isinstance(code, bytes):
@@ -833,8 +876,7 @@
     nodeDepth = 0
     offset = None
     traceTree = False
-    _in_annotation = False
-    _in_typing_literal = False
+    _in_annotation = AnnotationState.NONE
     _in_deferred = False
 
     builtIns = set(builtin_vars).union(_MAGIC_GLOBALS)
@@ -961,7 +1003,10 @@
 
             if all_binding:
                 all_names = set(all_binding.names)
-                undefined = all_names.difference(scope)
+                undefined = [
+                    name for name in all_binding.names
+                    if name not in scope
+                ]
             else:
                 all_names = undefined = []
 
@@ -1106,7 +1151,10 @@
             # then assume the rebound name is used as a global or within a loop
             value.used = self.scope[value.name].used
 
-        self.scope[value.name] = value
+        # don't treat annotations as assignments if there is an existing value
+        # in scope
+        if value.name not in self.scope or not isinstance(value, Annotation):
+            self.scope[value.name] = value
 
     def _unknown_handler(self, node):
         # this environment variable configures whether to error on unknown
@@ -1153,8 +1201,11 @@
                     # iteration
                     continue
 
-            if (name == 'print' and
-                    isinstance(scope.get(name, None), Builtin)):
+            binding = scope.get(name, None)
+            if isinstance(binding, Annotation) and not self._in_postponed_annotation:
+                continue
+
+            if name == 'print' and isinstance(binding, Builtin):
                 parent = self.getParent(node)
                 if (isinstance(parent, ast.BinOp) and
                         isinstance(parent.op, ast.RShift)):
@@ -1201,7 +1252,7 @@
             # the special name __path__ is valid only in packages
             return
 
-        if name == '__module__' and isinstance(self.scope, ClassScope):
+        if name in DetectClassScopedMagic.names and isinstance(self.scope, ClassScope):
             return
 
         # protected with a NameError handler?
@@ -1229,7 +1280,9 @@
                     break
 
         parent_stmt = self.getParent(node)
-        if isinstance(parent_stmt, (FOR_TYPES, ast.comprehension)) or (
+        if isinstance(parent_stmt, ANNASSIGN_TYPES) and parent_stmt.value is None:
+            binding = Annotation(name, node)
+        elif isinstance(parent_stmt, (FOR_TYPES, ast.comprehension)) or (
                 parent_stmt != node._pyflakes_parent and
                 not self.isLiteralTupleUnpacking(parent_stmt)):
             binding = Binding(name, node)
@@ -1272,13 +1325,20 @@
                 self.report(messages.UndefinedName, node, name)
 
     @contextlib.contextmanager
-    def _enter_annotation(self):
-        orig, self._in_annotation = self._in_annotation, True
+    def _enter_annotation(self, ann_type=AnnotationState.BARE):
+        orig, self._in_annotation = self._in_annotation, ann_type
         try:
             yield
         finally:
             self._in_annotation = orig
 
+    @property
+    def _in_postponed_annotation(self):
+        return (
+            self._in_annotation == AnnotationState.STRING or
+            self.annotationsFutureEnabled
+        )
+
     def _handle_type_comments(self, node):
         for (lineno, col_offset), comment in self._type_comments.get(node, ()):
             comment = comment.split(':', 1)[1].strip()
@@ -1406,7 +1466,7 @@
         self.popScope()
         self.scopeStack = saved_stack
 
-    @in_annotation
+    @in_string_annotation
     def handleStringAnnotation(self, s, node, ref_lineno, ref_col_offset, err):
         try:
             tree = ast.parse(s)
@@ -1464,20 +1524,36 @@
         STARRED = NAMECONSTANT = NAMEDEXPR = handleChildren
 
     def SUBSCRIPT(self, node):
-        if (
-                (
-                    isinstance(node.value, ast.Name) and
-                    node.value.id == 'Literal'
-                ) or (
-                    isinstance(node.value, ast.Attribute) and
-                    node.value.attr == 'Literal'
-                )
-        ):
-            orig, self._in_typing_literal = self._in_typing_literal, True
-            try:
+        if _is_name_or_attr(node.value, 'Literal'):
+            with self._enter_annotation(AnnotationState.NONE):
                 self.handleChildren(node)
-            finally:
-                self._in_typing_literal = orig
+        elif _is_name_or_attr(node.value, 'Annotated'):
+            self.handleNode(node.value, node)
+
+            # py39+
+            if isinstance(node.slice, ast.Tuple):
+                slice_tuple = node.slice
+            # <py39
+            elif (
+                    isinstance(node.slice, ast.Index) and
+                    isinstance(node.slice.value, ast.Tuple)
+            ):
+                slice_tuple = node.slice.value
+            else:
+                slice_tuple = None
+
+            # not a multi-arg `Annotated`
+            if slice_tuple is None or len(slice_tuple.elts) < 2:
+                self.handleNode(node.slice, node)
+            else:
+                # the first argument is the type
+                self.handleNode(slice_tuple.elts[0], node)
+                # the rest of the arguments are not
+                with self._enter_annotation(AnnotationState.NONE):
+                    for arg in slice_tuple.elts[1:]:
+                        self.handleNode(arg, node)
+
+            self.handleNode(node.ctx, node)
         else:
             if _is_any_typing_member(node.value, self.scopeStack):
                 with self._enter_annotation():
@@ -1613,15 +1689,79 @@
         ):
             self._handle_string_dot_format(node)
 
+        omit = []
+        annotated = []
+        not_annotated = []
+
         if (
             _is_typing(node.func, 'cast', self.scopeStack) and
-            len(node.args) >= 1 and
-            isinstance(node.args[0], ast.Str)
+            len(node.args) >= 1
         ):
             with self._enter_annotation():
                 self.handleNode(node.args[0], node)
 
-        self.handleChildren(node)
+        elif _is_typing(node.func, 'TypeVar', self.scopeStack):
+
+            # TypeVar("T", "int", "str")
+            omit += ["args"]
+            annotated += [arg for arg in node.args[1:]]
+
+            # TypeVar("T", bound="str")
+            omit += ["keywords"]
+            annotated += [k.value for k in node.keywords if k.arg == "bound"]
+            not_annotated += [
+                (k, ["value"] if k.arg == "bound" else None)
+                for k in node.keywords
+            ]
+
+        elif _is_typing(node.func, "TypedDict", self.scopeStack):
+            # TypedDict("a", {"a": int})
+            if len(node.args) > 1 and isinstance(node.args[1], ast.Dict):
+                omit += ["args"]
+                annotated += node.args[1].values
+                not_annotated += [
+                    (arg, ["values"] if i == 1 else None)
+                    for i, arg in enumerate(node.args)
+                ]
+
+            # TypedDict("a", a=int)
+            omit += ["keywords"]
+            annotated += [k.value for k in node.keywords]
+            not_annotated += [(k, ["value"]) for k in node.keywords]
+
+        elif _is_typing(node.func, "NamedTuple", self.scopeStack):
+            # NamedTuple("a", [("a", int)])
+            if (
+                len(node.args) > 1 and
+                isinstance(node.args[1], (ast.Tuple, ast.List)) and
+                all(isinstance(x, (ast.Tuple, ast.List)) and
+                    len(x.elts) == 2 for x in node.args[1].elts)
+            ):
+                omit += ["args"]
+                annotated += [elt.elts[1] for elt in node.args[1].elts]
+                not_annotated += [(elt.elts[0], None) for elt in node.args[1].elts]
+                not_annotated += [
+                    (arg, ["elts"] if i == 1 else None)
+                    for i, arg in enumerate(node.args)
+                ]
+                not_annotated += [(elt, "elts") for elt in node.args[1].elts]
+
+            # NamedTuple("a", a=int)
+            omit += ["keywords"]
+            annotated += [k.value for k in node.keywords]
+            not_annotated += [(k, ["value"]) for k in node.keywords]
+
+        if omit:
+            with self._enter_annotation(AnnotationState.NONE):
+                for na_node, na_omit in not_annotated:
+                    self.handleChildren(na_node, omit=na_omit)
+                self.handleChildren(node, omit=omit)
+
+            with self._enter_annotation():
+                for annotated_node in annotated:
+                    self.handleNode(annotated_node, node)
+        else:
+            self.handleChildren(node)
 
     def _handle_percent_format(self, node):
         try:
@@ -1735,7 +1875,7 @@
         self.handleChildren(node)
 
     def STR(self, node):
-        if self._in_annotation and not self._in_typing_literal:
+        if self._in_annotation:
             fn = functools.partial(
                 self.handleStringAnnotation,
                 node.s,
@@ -2231,11 +2371,7 @@
             self.scope[node.name] = prev_definition
 
     def ANNASSIGN(self, node):
-        if node.value:
-            # Only bind the *targets* if the assignment has a value.
-            # Otherwise it's not really ast.Store and shouldn't silence
-            # UndefinedLocal warnings.
-            self.handleNode(node.target, node)
+        self.handleNode(node.target, node)
         self.handleAnnotation(node.annotation, node)
         if node.value:
             # If the assignment has value, handle the *value* now.

eric ide

mercurial