Tue, 16 Jun 2020 17:44:28 +0200
Third Party packages: updated asttoken to version 2.0.4.
--- a/docs/changelog Mon Jun 15 19:48:51 2020 +0200 +++ b/docs/changelog Tue Jun 16 17:44:28 2020 +0200 @@ -7,6 +7,8 @@ -- updated pycodestyle to version 2.6.0 - Syntax Checker -- updated pyflakes to version 2.2.0 +- Third Party packages: + -- updated asttoken to version 2.0.4 Version 20.6: - bug fixes
--- a/eric6/ThirdParty/asttokens/__init__.py Mon Jun 15 19:48:51 2020 +0200 +++ b/eric6/ThirdParty/asttokens/__init__.py Tue Jun 16 17:44:28 2020 +0200 @@ -10,4 +10,4 @@ is modified to not depend on 'six'. """ -__version__ = "1.1.13" +__version__ = "2.0.4"
--- a/eric6/ThirdParty/asttokens/asttokens/asttokens.py Mon Jun 15 19:48:51 2020 +0200 +++ b/eric6/ThirdParty/asttokens/asttokens/asttokens.py Tue Jun 16 17:44:28 2020 +0200 @@ -17,6 +17,9 @@ import sys if sys.version_info[0] == 3: xrange = range + binary_type = bytes +else: + binary_type = str import ast import bisect @@ -51,6 +54,12 @@ self._filename = filename self._tree = ast.parse(source_text, filename) if parse else tree + # Decode source after parsing to let Python 2 handle coding declarations. + # (If the encoding was not utf-8 compatible, then even if it parses correctly, + # we'll fail with a unicode error here.) + if isinstance(source_text, binary_type): + source_text = source_text.decode('utf8') + self._text = source_text self._line_numbers = LineNumbers(source_text)
--- a/eric6/ThirdParty/asttokens/asttokens/mark_tokens.py Mon Jun 15 19:48:51 2020 +0200 +++ b/eric6/ThirdParty/asttokens/asttokens/mark_tokens.py Tue Jun 16 17:44:28 2020 +0200 @@ -15,16 +15,18 @@ # limitations under the License. try: - string_types = basestring, # Python 2 + text_type = unicode # Python 2 + binary_type = str except NameError: - string_types = str, # Python 3 + text_type = str # Python 3 + binary_type = bytes +import numbers import sys -import numbers import token + from . import util - # Mapping of matching braces. To find a token here, look up token[:2]. _matching_pairs_left = { (token.OP, '('): (token.OP, ')'), @@ -88,7 +90,7 @@ # Statements continue to before NEWLINE. This helps cover a few different cases at once. if util.is_stmt(node): - last = self._find_last_in_line(last) + last = self._find_last_in_stmt(last) # Capture any unmatched brackets. first, last = self._expand_to_matching_pairs(first, last, node) @@ -103,28 +105,13 @@ node.first_token = nfirst node.last_token = nlast - def _find_last_in_line(self, start_token): - try: - newline = self._code.find_token(start_token, token.NEWLINE) - except IndexError: - newline = self._code.find_token(start_token, token.ENDMARKER) - return self._code.prev_token(newline) - - def _iter_non_child_tokens(self, first_token, last_token, node): - """ - Generates all tokens in [first_token, last_token] range that do not belong to any children of - node. E.g. `foo(bar)` has children `foo` and `bar`, but we would yield the `(`. - """ - tok = first_token - for n in self._iter_children(node): - for t in self._code.token_range(tok, self._code.prev_token(n.first_token)): - yield t - if n.last_token.index >= last_token.index: - return - tok = self._code.next_token(n.last_token) - - for t in self._code.token_range(tok, last_token): - yield t + def _find_last_in_stmt(self, start_token): + t = start_token + while (not util.match_token(t, token.NEWLINE) and + not util.match_token(t, token.OP, ';') and + not token.ISEOF(t.type)): + t = self._code.next_token(t, include_extra=True) + return self._code.prev_token(t) def _expand_to_matching_pairs(self, first_token, last_token, node): """ @@ -135,7 +122,7 @@ # child nodes). If we find any closing ones, we match them to the opens. to_match_right = [] to_match_left = [] - for tok in self._iter_non_child_tokens(first_token, last_token, node): + for tok in self._code.token_range(first_token, last_token): tok_info = tok[:2] if to_match_right and tok_info == to_match_right[-1]: to_match_right.pop() @@ -147,8 +134,8 @@ # Once done, extend `last_token` to match any unclosed parens/braces. for match in reversed(to_match_right): last = self._code.next_token(last_token) - # Allow for a trailing comma before the closing delimiter. - if util.match_token(last, token.OP, ','): + # Allow for trailing commas or colons (allowed in subscripts) before the closing delimiter + while any(util.match_token(last, token.OP, x) for x in (',', ':')): last = self._code.next_token(last) # Now check for the actual closing delimiter. if util.match_token(last, *match): @@ -178,8 +165,11 @@ util.expect_token(before, token.OP, open_brace) return (before, last_token) - def visit_listcomp(self, node, first_token, last_token): - return self.handle_comp('[', node, first_token, last_token) + # Python 3.8 fixed the starting position of list comprehensions: + # https://bugs.python.org/issue31241 + if sys.version_info < (3, 8): + def visit_listcomp(self, node, first_token, last_token): + return self.handle_comp('[', node, first_token, last_token) if sys.version_info[0] == 2: # We shouldn't do this on PY3 because its SetComp/DictComp already have a correct start. @@ -195,6 +185,11 @@ first = self._code.find_token(first_token, token.NAME, 'for', reverse=True) return (first, last_token) + def visit_if(self, node, first_token, last_token): + while first_token.string not in ('if', 'elif'): + first_token = self._code.prev_token(first_token) + return first_token, last_token + def handle_attr(self, node, first_token, last_token): # Attribute node has ".attr" (2 tokens) after the last child. dot = self._code.find_token(last_token, token.OP, '.') @@ -206,38 +201,88 @@ visit_assignattr = handle_attr visit_delattr = handle_attr - def handle_doc(self, node, first_token, last_token): + def handle_def(self, node, first_token, last_token): # With astroid, nodes that start with a doc-string can have an empty body, in which case we # need to adjust the last token to include the doc string. if not node.body and getattr(node, 'doc', None): last_token = self._code.find_token(last_token, token.STRING) + + # Include @ from decorator + if first_token.index > 0: + prev = self._code.prev_token(first_token) + if util.match_token(prev, token.OP, '@'): + first_token = prev return (first_token, last_token) - visit_classdef = handle_doc - visit_funcdef = handle_doc + visit_classdef = handle_def + visit_functiondef = handle_def + + def handle_following_brackets(self, node, last_token, opening_bracket): + # This is for calls and subscripts, which have a pair of brackets + # at the end which may contain no nodes, e.g. foo() or bar[:]. + # We look for the opening bracket and then let the matching pair be found automatically + # Remember that last_token is at the end of all children, + # so we are not worried about encountering a bracket that belongs to a child. + first_child = next(self._iter_children(node)) + call_start = self._code.find_token(first_child.last_token, token.OP, opening_bracket) + if call_start.index > last_token.index: + last_token = call_start + return last_token def visit_call(self, node, first_token, last_token): - # A function call isn't over until we see a closing paren. Remember that last_token is at the - # end of all children, so we are not worried about encountering a paren that belongs to a - # child. - first_child = next(self._iter_children(node)) - call_start = self._code.find_token(first_child.last_token, token.OP, '(') - if call_start.index > last_token.index: - last_token = call_start + last_token = self.handle_following_brackets(node, last_token, '(') + + # Handling a python bug with decorators with empty parens, e.g. + # @deco() + # def ... + if util.match_token(first_token, token.OP, '@'): + first_token = self._code.next_token(first_token) return (first_token, last_token) def visit_subscript(self, node, first_token, last_token): - # A subscript operations isn't over until we see a closing bracket. Similar to function calls. - return (first_token, self._code.find_token(last_token, token.OP, ']')) + last_token = self.handle_following_brackets(node, last_token, '[') + return (first_token, last_token) + + def handle_bare_tuple(self, node, first_token, last_token): + # A bare tuple doesn't include parens; if there is a trailing comma, make it part of the tuple. + maybe_comma = self._code.next_token(last_token) + if util.match_token(maybe_comma, token.OP, ','): + last_token = maybe_comma + return (first_token, last_token) + + if sys.version_info >= (3, 8): + # In Python3.8 parsed tuples include parentheses when present. + def handle_tuple_nonempty(self, node, first_token, last_token): + # It's a bare tuple if the first token belongs to the first child. The first child may + # include extraneous parentheses (which don't create new nodes), so account for those too. + child = node.elts[0] + child_first, child_last = self._gobble_parens(child.first_token, child.last_token, True) + if first_token == child_first: + return self.handle_bare_tuple(node, first_token, last_token) + return (first_token, last_token) + else: + # Before python 3.8, parsed tuples do not include parens. + def handle_tuple_nonempty(self, node, first_token, last_token): + (first_token, last_token) = self.handle_bare_tuple(node, first_token, last_token) + return self._gobble_parens(first_token, last_token, False) def visit_tuple(self, node, first_token, last_token): - # A tuple doesn't include parens; if there is a trailing comma, make it part of the tuple. - try: - maybe_comma = self._code.next_token(last_token) - if util.match_token(maybe_comma, token.OP, ','): - last_token = maybe_comma - except IndexError: - pass + if not node.elts: + # An empty tuple is just "()", and we need no further info. + return (first_token, last_token) + return self.handle_tuple_nonempty(node, first_token, last_token) + + def _gobble_parens(self, first_token, last_token, include_all=False): + # Expands a range of tokens to include one or all pairs of surrounding parentheses, and + # returns (first, last) tokens that include these parens. + while first_token.index > 0: + prev = self._code.prev_token(first_token) + next = self._code.next_token(last_token) + if util.match_token(prev, token.OP, '(') and util.match_token(next, token.OP, ')'): + first_token, last_token = prev, next + if include_all: + continue + break return (first_token, last_token) def visit_str(self, node, first_token, last_token): @@ -246,6 +291,9 @@ def visit_joinedstr(self, node, first_token, last_token): return self.handle_str(first_token, last_token) + def visit_bytes(self, node, first_token, last_token): + return self.handle_str(first_token, last_token) + def handle_str(self, first_token, last_token): # Multiple adjacent STRING tokens form a single string. last = self._code.next_token(last_token) @@ -254,22 +302,42 @@ last = self._code.next_token(last_token) return (first_token, last_token) - def visit_num(self, node, first_token, last_token): + def handle_num(self, node, value, first_token, last_token): # A constant like '-1' gets turned into two tokens; this will skip the '-'. while util.match_token(last_token, token.OP): last_token = self._code.next_token(last_token) + + if isinstance(value, complex): + # A complex number like -2j cannot be compared directly to 0 + # A complex number like 1-2j is expressed as a binary operation + # so we don't need to worry about it + value = value.imag + + # This makes sure that the - is included + if value < 0 and first_token.type == token.NUMBER: + first_token = self._code.prev_token(first_token) return (first_token, last_token) + def visit_num(self, node, first_token, last_token): + return self.handle_num(node, node.n, first_token, last_token) + # In Astroid, the Num and Str nodes are replaced by Const. def visit_const(self, node, first_token, last_token): if isinstance(node.value, numbers.Number): - return self.visit_num(node, first_token, last_token) - elif isinstance(node.value, string_types): + return self.handle_num(node, node.value, first_token, last_token) + elif isinstance(node.value, (text_type, binary_type)): return self.visit_str(node, first_token, last_token) return (first_token, last_token) + # In Python >= 3.6, there is a similar class 'Constant' for literals + # In 3.8 it became the type produced by ast.parse + # https://bugs.python.org/issue32892 + visit_constant = visit_const + def visit_keyword(self, node, first_token, last_token): - if node.arg is not None: + # Until python 3.9 (https://bugs.python.org/issue40141), + # ast.keyword nodes didn't have line info. Astroid has lineno None. + if node.arg is not None and getattr(node, 'lineno', None) is None: equals = self._code.find_token(first_token, token.OP, '=', reverse=True) name = self._code.prev_token(equals) util.expect_token(name, token.NAME, node.arg) @@ -296,3 +364,21 @@ def visit_with(self, node, first_token, last_token): first = self._code.find_token(first_token, token.NAME, 'with', reverse=True) return (first, last_token) + + # Async nodes should typically start with the word 'async' + # but Python < 3.7 doesn't put the col_offset there + # AsyncFunctionDef is slightly different because it might have + # decorators before that, which visit_functiondef handles + def handle_async(self, node, first_token, last_token): + if not first_token.string == 'async': + first_token = self._code.prev_token(first_token) + return (first_token, last_token) + + visit_asyncfor = handle_async + visit_asyncwith = handle_async + + def visit_asyncfunctiondef(self, node, first_token, last_token): + if util.match_token(first_token, token.NAME, 'def'): + # Include the 'async' token + first_token = self._code.prev_token(first_token) + return self.visit_functiondef(node, first_token, last_token)
--- a/eric6/ThirdParty/asttokens/asttokens/util.py Mon Jun 15 19:48:51 2020 +0200 +++ b/eric6/ThirdParty/asttokens/asttokens/util.py Tue Jun 16 17:44:28 2020 +0200 @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright 2016 Grist Labs, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -69,7 +71,7 @@ # These were previously defined in tokenize.py and distinguishable by being greater than # token.N_TOKEN. As of python3.7, they are in token.py, and we check for them explicitly. -if hasattr(token, 'COMMENT'): +if hasattr(token, 'ENCODING'): def is_non_coding_token(token_type): """ These are considered non-coding tokens, as they don't affect the syntax tree. @@ -82,17 +84,12 @@ """ return token_type >= token.N_TOKENS -def iter_children(node): - """ - Yields all direct children of a AST node, skipping children that are singleton nodes. - """ - return iter_children_astroid(node) if hasattr(node, 'get_children') else iter_children_ast(node) - def iter_children_func(node): """ - Returns a slightly more optimized function to use in place of ``iter_children``, depending on - whether ``node`` is from ``ast`` or from the ``astroid`` module. + Returns a function which yields all direct children of a AST node, + skipping children that are singleton nodes. + The function depends on whether ``node`` is from ``ast`` or from the ``astroid`` module. """ return iter_children_astroid if hasattr(node, 'get_children') else iter_children_ast @@ -113,6 +110,15 @@ if is_joined_str(node): return + if isinstance(node, ast.Dict): + # override the iteration order: instead of <all keys>, <all values>, + # yield keys and values in source order (key1, value1, key2, value2, ...) + for (key, value) in zip(node.keys, node.values): + if key is not None: + yield key + yield value + return + for child in ast.iter_child_nodes(node): # Skip singleton children; they don't reflect particular positions in the code and break the # assumptions about the tree consisting of distinct nodes. Note that collecting classes @@ -148,6 +154,19 @@ return node.__class__.__name__ == 'JoinedStr' +def is_slice(node): + """Returns whether node represents a slice, e.g. `1:2` in `x[1:2]`""" + # Before 3.9, a tuple containing a slice is an ExtSlice, + # but this was removed in https://bugs.python.org/issue34822 + return ( + node.__class__.__name__ in ('Slice', 'ExtSlice') + or ( + node.__class__.__name__ == 'Tuple' + and any(map(is_slice, node.elts)) + ) + ) + + # Sentinel value used by visit_tree(). _PREVISIT = object() @@ -166,10 +185,8 @@ returned from ``previsit()`` of this node itself. The return ``value`` is ignored except the one for the root node, which is returned from the overall ``visit_tree()`` call. - For the initial node, ``par_value`` is None. Either ``previsit`` and ``postvisit`` may be None. + For the initial node, ``par_value`` is None. ``postvisit`` may be None. """ - if not previsit: - previsit = lambda node, pvalue: (None, None) if not postvisit: postvisit = lambda node, pvalue, value: None @@ -227,7 +244,7 @@ modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is an iterable of ``(start, end, new_text)`` tuples. - For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces + For example, ``replace("this is a test", [(0, 4, "X"), (8, 9, "THE")])`` produces ``"X is THE test"``. """ p = 0