Updated jasy to 1.5-beta5 (latest release available).

Fri, 11 Aug 2017 14:40:54 +0200

author
Detlev Offenbach <detlev@die-offenbachs.de>
date
Fri, 11 Aug 2017 14:40:54 +0200
changeset 5843
76eee727ccd9
parent 5842
c3f41b959a65
child 5844
1294772ac8e6

Updated jasy to 1.5-beta5 (latest release available).

ThirdParty/Jasy/jasy/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/api/Text.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/Node.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/Parser.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/VanillaBuilder.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/tokenize/Lang.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/license.md file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/parse/AbstractNode.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/parse/__init__.py file | annotate | diff | comparison | revisions
changelog file | annotate | diff | comparison | revisions
eric6.e4p file | annotate | diff | comparison | revisions
--- a/ThirdParty/Jasy/jasy/__init__.py	Thu Aug 10 13:58:50 2017 +0200
+++ b/ThirdParty/Jasy/jasy/__init__.py	Fri Aug 11 14:40:54 2017 +0200
@@ -1,7 +1,7 @@
 #
 # Jasy - Web Tooling Framework
 # Copyright 2010-2012 Zynga Inc.
-# Copyright 2013 Sebastian Werner
+# Copyright 2013-2014 Sebastian Werner
 #
 
 """
@@ -9,11 +9,11 @@
 
 Jasy is a powerful Python3-based tooling framework. 
 It makes it easy to manage heavy web projects. 
-Its main goal is to offer an API which could be used by developers to write their
-custom build/deployment scripts.
+Its main goal is to offer an API which could be used by developers to write
+their custom build/deployment scripts.
 """
 
 from __future__ import unicode_literals
 
-__version__ = "1.1.0"
+__version__ = "1.5-beta5"
 __author__ = "Sebastian Werner <info@sebastian-werner.net>"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/js/api/Text.py	Fri Aug 11 14:40:54 2017 +0200
@@ -0,0 +1,38 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+#
+
+from __future__ import unicode_literals
+
+import re
+import jasy.core.Console as Console
+
+__all__ = ["extractSummary"]
+
+# Used to filter first paragraph from HTML
+paragraphExtract = re.compile(r"^(.*?)(\. |\? |\! |$)")
+newlineMatcher = re.compile(r"\n")
+
+# Used to remove markup sequences after doc processing of comment text
+stripMarkup = re.compile(r"<.*?>")
+
+def extractSummary(text):
+    try:
+        text = stripMarkup.sub("", newlineMatcher.sub(" ", text))
+        matched = paragraphExtract.match(text)
+    except TypeError:
+        matched = None
+        
+    if matched:
+        summary = matched.group(1)
+        if summary is not None:
+            if not summary.endswith((".", "!", "?")):
+                summary = summary.strip() + "."
+            return summary
+            
+    else:
+        Console.warn("Unable to extract summary for: %s", text)
+    
+    return None
+    
--- a/ThirdParty/Jasy/jasy/js/parse/Node.py	Thu Aug 10 13:58:50 2017 +0200
+++ b/ThirdParty/Jasy/jasy/js/parse/Node.py	Fri Aug 11 14:40:54 2017 +0200
@@ -12,6 +12,7 @@
 
 from __future__ import unicode_literals
 
+import json
 import copy
 
 class Node(list):
@@ -24,14 +25,16 @@
         "comments", "scope", 
         
         # node type specific
-        "value", "expression", "body", "functionForm", "parenthesized", "fileId",
-        "params", "name", "readOnly", "initializer", "condition", "isLoop", "isEach",
-        "object", "assignOp", "iterator", "thenPart", "exception", "elsePart", "setup",
-        "postfix", "update", "tryBlock", "block", "defaultIndex", "discriminant", "label",
-        "statements", "finallyBlock", "statement", "variables", "names", "guard", "for",
+        "value", "expression", "body", "functionForm", "parenthesized",
+        "fileId", "params", "name", "readOnly", "initializer", "condition",
+        "isLoop", "isEach", "object", "assignOp", "iterator", "thenPart",
+        "exception", "elsePart", "setup", "postfix", "update", "tryBlock",
+        "block", "defaultIndex", "discriminant", "label", "statements",
+        "finallyBlock", "statement", "variables", "names", "guard", "for",
         "tail", "expressionClosure"
     ]
     
+    
     def __init__(self, tokenizer=None, type=None, args=[]):
         list.__init__(self)
         
@@ -42,8 +45,8 @@
         if tokenizer:
             token = getattr(tokenizer, "token", None)
             if token:
-                # We may define a custom type but use the same positioning as another
-                # token, e.g. transform curlys in block nodes, etc.
+                # We may define a custom type but use the same positioning as another token
+                # e.g. transform curlys in block nodes, etc.
                 self.type = type if type else getattr(token, "type", None)
                 self.line = token.line
                 
@@ -64,7 +67,8 @@
 
         for arg in args:
             self.append(arg)
-    
+            
+            
     def getUnrelatedChildren(self):
         """Collects all unrelated children"""
         
@@ -74,7 +78,8 @@
                 collection.append(child)
             
         return collection
-    
+        
+
     def getChildrenLength(self, filter=True):
         """Number of (per default unrelated) children"""
         
@@ -83,6 +88,7 @@
             if not filter or not hasattr(child, "rel"):
                 count += 1
         return count
+            
     
     def remove(self, kid):
         """Removes the given kid"""
@@ -96,7 +102,8 @@
             del kid.parent
             
         list.remove(self, kid)
-    
+        
+        
     def insert(self, index, kid):
         """Inserts the given kid at the given index"""
         
@@ -109,7 +116,8 @@
         kid.parent = self
 
         return list.insert(self, index, kid)
-    
+            
+
     def append(self, kid, rel=None):
         """Appends the given kid with an optional relation hint"""
         
@@ -134,7 +142,7 @@
                        self.end == None or \
                        self.end < kid.end:
                         self.end = kid.end
-            
+                
             kid.parent = self
             
             # alias for function
@@ -145,8 +153,9 @@
         # Block None kids when they should be related
         if not kid and rel:
             return
-        
+            
         return list.append(self, kid)
+
     
     def replace(self, kid, repl):
         """Replaces the given kid with a replacement kid"""
@@ -162,7 +171,8 @@
             
             # cleanup old kid
             delattr(kid, "rel")
-        
+            
+            
         elif hasattr(repl, "rel"):
             # delete old relation on new child
             delattr(repl, "rel")
@@ -171,7 +181,90 @@
         repl.parent = self
         
         return kid
-    
+        
+
+    def toXml(self, format=True, indent=0, tab="  "):
+        """Converts the node to XML"""
+
+        lead = tab * indent if format else ""
+        innerLead = tab * (indent+1) if format else ""
+        lineBreak = "\n" if format else ""
+
+        relatedChildren = []
+        attrsCollection = []
+        
+        for name in self.__slots__:
+            # "type" is used as node name - no need to repeat it as an attribute
+            # "parent" is a relation to the parent node - for serialization we ignore these at the moment
+            # "rel" is used internally to keep the relation to the parent - used by nodes which need to keep track of specific children
+            # "start" and "end" are for debugging only
+            if hasattr(self, name) and name not in ("type", "parent", "comments", "rel", "start", "end") and name[0] != "_":
+                value = getattr(self, name)
+                if isinstance(value, Node):
+                    if hasattr(value, "rel"):
+                        relatedChildren.append(value)
+
+                elif type(value) in (bool, int, float, str, list, set, dict):
+                    if type(value) == bool:
+                        value = "true" if value else "false" 
+                    elif type(value) in (int, float):
+                        value = str(value)
+                    elif type(value) in (list, set, dict):
+                        if type(value) == dict:
+                            value = value.keys()
+                        if len(value) == 0:
+                            continue
+                        try:
+                            value = ",".join(value)
+                        except TypeError:
+                            raise Exception("Invalid attribute list child at: %s" % name)
+                            
+                    attrsCollection.append('%s=%s' % (name, json.dumps(value)))
+
+        attrs = (" " + " ".join(attrsCollection)) if len(attrsCollection) > 0 else ""
+        
+        comments = getattr(self, "comments", None)
+        scope = getattr(self, "scope", None)
+        
+        if len(self) == 0 and len(relatedChildren) == 0 and (not comments or len(comments) == 0) and not scope:
+            result = "%s<%s%s/>%s" % (lead, self.type, attrs, lineBreak)
+
+        else:
+            result = "%s<%s%s>%s" % (lead, self.type, attrs, lineBreak)
+            
+            if comments:
+                for comment in comments:
+                    result += '%s<comment context="%s" variant="%s">%s</comment>%s' % (innerLead, comment.context, comment.variant, comment.text, lineBreak)
+                    
+            if scope:
+                for statKey in scope:
+                    statValue = scope[statKey]
+                    if statValue != None and len(statValue) > 0:
+                        if type(statValue) is set:
+                            statValue = ",".join(statValue)
+                        elif type(statValue) is dict:
+                            statValue = ",".join(statValue.keys())
+                        
+                        result += '%s<stat name="%s">%s</stat>%s' % (innerLead, statKey, statValue, lineBreak)
+
+            for child in self:
+                if not child:
+                    result += "%s<none/>%s" % (innerLead, lineBreak)
+                elif not hasattr(child, "rel"):
+                    result += child.toXml(format, indent+1)
+                elif not child in relatedChildren:
+                    raise Exception("Oops, irritated by non related: %s in %s - child says it is related as %s" % (child.type, self.type, child.rel))
+
+            for child in relatedChildren:
+                result += "%s<%s>%s" % (innerLead, child.rel, lineBreak)
+                result += child.toXml(format, indent+2)
+                result += "%s</%s>%s" % (innerLead, child.rel, lineBreak)
+
+            result += "%s</%s>%s" % (lead, self.type, lineBreak)
+
+        return result
+        
+        
     def __deepcopy__(self, memo):
         """Used by deepcopy function to clone Node instances"""
         
@@ -202,12 +295,34 @@
                     setattr(result, name, value)
                 elif type(value) in (list, set, dict, Node):
                     setattr(result, name, copy.deepcopy(value, memo))
-                # Scope can be assigned (will be re-created when needed for the
-                # copied node)
+                # Scope can be assigned (will be re-created when needed for the copied node)
                 elif name == "scope":
                     result.scope = self.scope
 
         return result
+        
+        
+    def getSource(self):
+        """Returns the source code of the node"""
+
+        if not self.tokenizer:
+            raise Exception("Could not find source for node '%s'" % node.type)
+            
+        if getattr(self, "start", None) is not None:
+            if getattr(self, "end", None) is not None:
+                return self.tokenizer.source[self.start:self.end]
+            return self.tokenizer.source[self.start:]
+    
+        if getattr(self, "end", None) is not None:
+            return self.tokenizer.source[:self.end]
+    
+        return self.tokenizer.source[:]
+
+
+    # Map Python built-ins
+    __repr__ = toXml
+    __str__ = toXml
+    
     
     def __eq__(self, other):
         return self is other
--- a/ThirdParty/Jasy/jasy/js/parse/Parser.py	Thu Aug 10 13:58:50 2017 +0200
+++ b/ThirdParty/Jasy/jasy/js/parse/Parser.py	Fri Aug 11 14:40:54 2017 +0200
@@ -1,11 +1,12 @@
 #
 # Jasy - Web Tooling Framework
 # Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
 #
 
 #
 # License: MPL 1.1/GPL 2.0/LGPL 2.1
-# Authors: 
+# Authors:
 #   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
 #   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010-2012)
 #
@@ -21,28 +22,29 @@
 def parseExpression(source, fileId=None, line=1, builder=None):
     if builder == None:
         builder = jasy.js.parse.VanillaBuilder.VanillaBuilder()
-    
+
     # Convert source into expression statement to be friendly to the Tokenizer
     if not source.endswith(";"):
         source = source + ";"
-    
+
     tokenizer = jasy.js.tokenize.Tokenizer.Tokenizer(source, fileId, line)
     staticContext = StaticContext(False, builder)
-    
+
     return Expression(tokenizer, staticContext)
 
 
+
 def parse(source, fileId=None, line=1, builder=None):
     if builder == None:
         builder = jasy.js.parse.VanillaBuilder.VanillaBuilder()
-    
+
     tokenizer = jasy.js.tokenize.Tokenizer.Tokenizer(source, fileId, line)
     staticContext = StaticContext(False, builder)
     node = Script(tokenizer, staticContext)
-    
+
     # store fileId on top-level node
     node.fileId = tokenizer.fileId
-    
+
     # add missing comments e.g. empty file with only a comment etc.
     # if there is something non-attached by an inner node it is attached to
     # the top level node, which is not correct, but might be better than
@@ -51,7 +53,7 @@
         builder.COMMENTS_add(node[-1], None, tokenizer.getComments())
     else:
         builder.COMMENTS_add(node, None, tokenizer.getComments())
-    
+
     if not tokenizer.done():
         raise SyntaxError("Unexpected end of file", tokenizer)
 
@@ -61,39 +63,37 @@
 
 class SyntaxError(Exception):
     def __init__(self, message, tokenizer):
-        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (
-            message, tokenizer.fileId, tokenizer.line))
+        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (message, tokenizer.fileId, tokenizer.line))
 
 
 # Used as a status container during tree-building for every def body and the global body
 class StaticContext(object):
     # inFunction is used to check if a return stm appears in a valid context.
     def __init__(self, inFunction, builder):
-        # Whether this is inside a function, mostly True, only for top-level scope
-        # it's False
+        # Whether this is inside a function, mostly True, only for top-level scope it's False
         self.inFunction = inFunction
-        
+
         self.hasEmptyReturn = False
         self.hasReturnWithValue = False
         self.isGenerator = False
         self.blockId = 0
         self.builder = builder
         self.statementStack = []
-        
+
         # Sets to store variable uses
         # self.functions = set()
         # self.variables = set()
-        
+
         # Status
         # self.needsHoisting = False
         self.bracketLevel = 0
         self.curlyLevel = 0
         self.parenLevel = 0
         self.hookLevel = 0
-        
+
         # Configure strict ecmascript 3 mode
         self.ecma3OnlyMode = False
-        
+
         # Status flag during parsing
         self.inForLoopInit = False
 
@@ -101,16 +101,16 @@
 def Script(tokenizer, staticContext):
     """Parses the toplevel and def bodies."""
     node = Statements(tokenizer, staticContext)
-    
+
     # change type from "block" to "script" for script root
     node.type = "script"
-    
+
     # copy over data from compiler context
     # node.functions = staticContext.functions
     # node.variables = staticContext.variables
 
     return node
-    
+
 
 def nest(tokenizer, staticContext, node, func, end=None):
     """Statement stack and nested statement handler."""
@@ -118,7 +118,7 @@
     node = func(tokenizer, staticContext)
     staticContext.statementStack.pop()
     end and tokenizer.mustMatch(end)
-    
+
     return node
 
 
@@ -157,7 +157,7 @@
     tokenizer.mustMatch("left_curly")
     node = Statements(tokenizer, staticContext)
     tokenizer.mustMatch("right_curly")
-    
+
     return node
 
 
@@ -169,25 +169,24 @@
 
     # Cases for statements ending in a right curly return early, avoiding the
     # common semicolon insertion magic after this switch.
-    
+
     if tokenType == "function":
-        # "declared_form" extends functions of staticContext,
-        # "statement_form" doesn'tokenizer.
+        # "declared_form" extends functions of staticContext, "statement_form" doesn'tokenizer.
         if len(staticContext.statementStack) > 1:
             kind = "statement_form"
         else:
             kind = "declared_form"
-        
+
         return FunctionDefinition(tokenizer, staticContext, True, kind)
-        
-        
+
+
     elif tokenType == "left_curly":
         node = Statements(tokenizer, staticContext)
         tokenizer.mustMatch("right_curly")
-        
+
         return node
-        
-        
+
+
     elif tokenType == "if":
         node = builder.IF_build(tokenizer)
         builder.IF_setCondition(node, ParenExpression(tokenizer, staticContext))
@@ -202,10 +201,10 @@
 
         staticContext.statementStack.pop()
         builder.IF_finish(node)
-        
+
         return node
-        
-        
+
+
     elif tokenType == "switch":
         # This allows CASEs after a "default", which is in the standard.
         node = builder.SWITCH_build(tokenizer)
@@ -214,23 +213,23 @@
 
         tokenizer.mustMatch("left_curly")
         tokenType = tokenizer.get()
-        
+
         while tokenType != "right_curly":
             if tokenType == "default":
                 if node.defaultIndex >= 0:
                     raise SyntaxError("More than one switch default", tokenizer)
-                    
+
                 childNode = builder.DEFAULT_build(tokenizer)
                 builder.SWITCH_setDefaultIndex(node, len(node)-1)
                 tokenizer.mustMatch("colon")
                 builder.DEFAULT_initializeStatements(childNode, tokenizer)
-                
+
                 while True:
                     tokenType=tokenizer.peek(True)
                     if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
                         break
                     builder.DEFAULT_addStatement(childNode, Statement(tokenizer, staticContext))
-                
+
                 builder.DEFAULT_finish(childNode)
 
             elif tokenType == "case":
@@ -244,7 +243,7 @@
                     if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
                         break
                     builder.CASE_addStatement(childNode, Statement(tokenizer, staticContext))
-                
+
                 builder.CASE_finish(childNode)
 
             else:
@@ -257,32 +256,32 @@
         builder.SWITCH_finish(node)
 
         return node
-        
+
 
     elif tokenType == "for":
         node = builder.FOR_build(tokenizer)
         forBlock = None
-        
+
         if tokenizer.match("identifier") and tokenizer.token.value == "each":
             builder.FOR_rebuildForEach(node)
-            
+
         tokenizer.mustMatch("left_paren")
         tokenType = tokenizer.peek()
         childNode = None
-        
+
         if tokenType != "semicolon":
             staticContext.inForLoopInit = True
-            
+
             if tokenType == "var" or tokenType == "const":
                 tokenizer.get()
                 childNode = Variables(tokenizer, staticContext)
-            
+
             elif tokenType == "let":
                 tokenizer.get()
 
                 if tokenizer.peek() == "left_paren":
                     childNode = LetBlock(tokenizer, staticContext, False)
-                    
+
                 else:
                     # Let in for head, we need to add an implicit block
                     # around the rest of the for.
@@ -290,72 +289,72 @@
                     staticContext.blockId += 1
                     staticContext.statementStack.append(forBlock)
                     childNode = Variables(tokenizer, staticContext, forBlock)
-                
+
             else:
                 childNode = Expression(tokenizer, staticContext)
-            
+
             staticContext.inForLoopInit = False
 
         if childNode and tokenizer.match("in"):
             builder.FOR_rebuildForIn(node)
             builder.FOR_setObject(node, Expression(tokenizer, staticContext), forBlock)
-            
+
             if childNode.type == "var" or childNode.type == "let":
                 if len(childNode) != 1:
                     raise SyntaxError("Invalid for..in left-hand side", tokenizer)
 
                 builder.FOR_setIterator(node, childNode, forBlock)
-                
+
             else:
                 builder.FOR_setIterator(node, childNode, forBlock)
 
         else:
             builder.FOR_setSetup(node, childNode)
             tokenizer.mustMatch("semicolon")
-            
+
             if node.isEach:
                 raise SyntaxError("Invalid for each..in loop", tokenizer)
-                
+
             if tokenizer.peek() == "semicolon":
                 builder.FOR_setCondition(node, None)
             else:
                 builder.FOR_setCondition(node, Expression(tokenizer, staticContext))
-            
+
             tokenizer.mustMatch("semicolon")
-            
+
             if tokenizer.peek() == "right_paren":
                 builder.FOR_setUpdate(node, None)
-            else:    
+            else:
                 builder.FOR_setUpdate(node, Expression(tokenizer, staticContext))
-        
+
         tokenizer.mustMatch("right_paren")
         builder.FOR_setBody(node, nest(tokenizer, staticContext, node, Statement))
-        
+
         if forBlock:
             builder.BLOCK_finish(forBlock)
             staticContext.statementStack.pop()
-    
+
         builder.FOR_finish(node)
         return node
-        
-        
+
+
     elif tokenType == "while":
         node = builder.WHILE_build(tokenizer)
-        
+
         builder.WHILE_setCondition(node, ParenExpression(tokenizer, staticContext))
         builder.WHILE_setBody(node, nest(tokenizer, staticContext, node, Statement))
         builder.WHILE_finish(node)
-        
-        return node                                    
-        
-        
+
+        return node
+
+
     elif tokenType == "do":
         node = builder.DO_build(tokenizer)
-        
+
         builder.DO_setBody(node, nest(tokenizer, staticContext, node, Statement, "while"))
         builder.DO_setCondition(node, ParenExpression(tokenizer, staticContext))
         builder.DO_finish(node)
-        
+
         if not staticContext.ecma3OnlyMode:
             # <script language="JavaScript"> (without version hints) may need
             # automatic semicolon insertion without a newline after do-while.
@@ -364,17 +363,17 @@
             return node
 
         # NO RETURN
-      
-      
+
+
     elif tokenType == "break" or tokenType == "continue":
         if tokenType == "break":
-            node = builder.BREAK_build(tokenizer) 
+            node = builder.BREAK_build(tokenizer)
         else:
             node = builder.CONTINUE_build(tokenizer)
 
         if tokenizer.peekOnSameLine() == "identifier":
             tokenizer.get()
-            
+
             if tokenType == "break":
                 builder.BREAK_setLabel(node, tokenizer.token.value)
             else:
@@ -392,21 +391,21 @@
                 if getattr(statementStack[i], "label", None) == label:
                     break
 
-            # 
+            #
             # Both break and continue to label need to be handled specially
             # within a labeled loop, so that they target that loop. If not in
             # a loop, then break targets its labeled statement. Labels can be
             # nested so we skip all labels immediately enclosing the nearest
             # non-label statement.
-            # 
+            #
             while i < len(statementStack) - 1 and statementStack[i+1].type == "label":
                 i += 1
-                
+
             if i < len(statementStack) - 1 and getattr(statementStack[i+1], "isLoop", False):
                 i += 1
             elif tokenType == "continue":
                 raise SyntaxError("Invalid continue", tokenizer)
-                
+
         else:
             while True:
                 i -= 1
@@ -418,84 +417,84 @@
 
                 if getattr(statementStack[i], "isLoop", False) or (tokenType == "break" and statementStack[i].type == "switch"):
                     break
-        
+
         if tokenType == "break":
             builder.BREAK_finish(node)
         else:
             builder.CONTINUE_finish(node)
-        
+
         # NO RETURN
 
 
     elif tokenType == "try":
         node = builder.TRY_build(tokenizer)
         builder.TRY_setTryBlock(node, Block(tokenizer, staticContext))
-        
+
         while tokenizer.match("catch"):
             childNode = builder.CATCH_build(tokenizer)
             tokenizer.mustMatch("left_paren")
             nextTokenType = tokenizer.get()
-            
+
             if nextTokenType == "left_bracket" or nextTokenType == "left_curly":
                 # Destructured catch identifiers.
                 tokenizer.unget()
                 exception = DestructuringExpression(tokenizer, staticContext, True)
-            
+
             elif nextTokenType == "identifier":
                 exception = builder.CATCH_wrapException(tokenizer)
-            
+
             else:
                 raise SyntaxError("Missing identifier in catch", tokenizer)
-                
+
             builder.CATCH_setException(childNode, exception)
-            
+
             if tokenizer.match("if"):
                 if staticContext.ecma3OnlyMode:
                     raise SyntaxError("Illegal catch guard", tokenizer)
-                    
+
                 if node.getChildrenLength() > 0 and not node.getUnrelatedChildren()[0].guard:
                     raise SyntaxError("Guarded catch after unguarded", tokenizer)
-                    
+
                 builder.CATCH_setGuard(childNode, Expression(tokenizer, staticContext))
-                
+
             else:
                 builder.CATCH_setGuard(childNode, None)
-            
+
             tokenizer.mustMatch("right_paren")
-            
+
             builder.CATCH_setBlock(childNode, Block(tokenizer, staticContext))
             builder.CATCH_finish(childNode)
-            
+
             builder.TRY_addCatch(node, childNode)
-        
+
         builder.TRY_finishCatches(node)
-        
+
         if tokenizer.match("finally"):
             builder.TRY_setFinallyBlock(node, Block(tokenizer, staticContext))
-            
+
         if node.getChildrenLength() == 0 and not hasattr(node, "finallyBlock"):
             raise SyntaxError("Invalid try statement", tokenizer)
-            
+
         builder.TRY_finish(node)
         return node
-        
+
 
     elif tokenType == "catch" or tokenType == "finally":
-        raise SyntaxError(tokenizer.tokens[tokenType] + " without preceding try", tokenizer)
+        raise SyntaxError(tokens[tokenType] + " without preceding try", tokenizer)
 
 
     elif tokenType == "throw":
         node = builder.THROW_build(tokenizer)
-        
+
         builder.THROW_setException(node, Expression(tokenizer, staticContext))
         builder.THROW_finish(node)
-        
+
         # NO RETURN
 
 
     elif tokenType == "return":
         node = returnOrYield(tokenizer, staticContext)
-        
+
         # NO RETURN
 
 
@@ -511,31 +510,31 @@
 
     elif tokenType == "var" or tokenType == "const":
         node = Variables(tokenizer, staticContext)
-        
+
         # NO RETURN
-        
+
 
     elif tokenType == "let":
         if tokenizer.peek() == "left_paren":
             node = LetBlock(tokenizer, staticContext, True)
         else:
             node = Variables(tokenizer, staticContext)
-        
+
         # NO RETURN
-        
+
 
     elif tokenType == "debugger":
         node = builder.DEBUGGER_build(tokenizer)
-        
+
         # NO RETURN
-        
+
 
     elif tokenType == "newline" or tokenType == "semicolon":
         node = builder.SEMICOLON_build(tokenizer)
 
         builder.SEMICOLON_setExpression(node, None)
         builder.SEMICOLON_finish(tokenizer)
-        
+
         return node
 
 
@@ -547,21 +546,21 @@
             if tokenType == "colon":
                 label = tokenizer.token.value
                 statementStack = staticContext.statementStack
-               
+
                 i = len(statementStack)-1
                 while i >= 0:
                     if getattr(statementStack[i], "label", None) == label:
                         raise SyntaxError("Duplicate label", tokenizer)
-                    
+
                     i -= 1
-               
+
                 tokenizer.get()
                 node = builder.LABEL_build(tokenizer)
-                
+
                 builder.LABEL_setLabel(node, label)
                 builder.LABEL_setStatement(node, nest(tokenizer, staticContext, node, Statement))
                 builder.LABEL_finish(node)
-                
+
                 return node
 
         # Expression statement.
@@ -571,9 +570,9 @@
         builder.SEMICOLON_setExpression(node, Expression(tokenizer, staticContext))
         node.end = node.expression.end
         builder.SEMICOLON_finish(node)
-        
+
         # NO RETURN
-        
+
 
     MagicalSemicolon(tokenizer)
     return node
@@ -583,13 +582,13 @@
 def MagicalSemicolon(tokenizer):
     if tokenizer.line == tokenizer.token.line:
         tokenType = tokenizer.peekOnSameLine()
-    
+
         if tokenType != "end" and tokenType != "newline" and tokenType != "semicolon" and tokenType != "right_curly":
             raise SyntaxError("Missing ; before statement", tokenizer)
-    
+
     tokenizer.match("semicolon")
 
-    
+
 
 def returnOrYield(tokenizer, staticContext):
     builder = staticContext.builder
@@ -598,13 +597,13 @@
     if tokenType == "return":
         if not staticContext.inFunction:
             raise SyntaxError("Return not in function", tokenizer)
-            
+
         node = builder.RETURN_build(tokenizer)
-        
+
     else:
         if not staticContext.inFunction:
             raise SyntaxError("Yield not in function", tokenizer)
-            
+
         staticContext.isGenerator = True
         node = builder.YIELD_build(tokenizer)
 
@@ -615,7 +614,7 @@
             staticContext.hasReturnWithValue = True
         else:
             builder.YIELD_setValue(node, AssignExpression(tokenizer, staticContext))
-        
+
     elif tokenType == "return":
         staticContext.hasEmptyReturn = True
 
@@ -635,14 +634,14 @@
 def FunctionDefinition(tokenizer, staticContext, requireName, functionForm):
     builder = staticContext.builder
     functionNode = builder.FUNCTION_build(tokenizer)
-    
+
     if tokenizer.match("identifier"):
         builder.FUNCTION_setName(functionNode, tokenizer.token.value)
     elif requireName:
         raise SyntaxError("Missing def identifier", tokenizer)
 
     tokenizer.mustMatch("left_paren")
-    
+
     if not tokenizer.match("right_paren"):
         builder.FUNCTION_initParams(functionNode, tokenizer)
         prevParamNode = None
@@ -652,21 +651,21 @@
                 # Destructured formal parameters.
                 tokenizer.unget()
                 paramNode = DestructuringExpression(tokenizer, staticContext)
-                
+
             elif tokenType == "identifier":
                 paramNode = builder.FUNCTION_wrapParam(tokenizer)
-                
+
             else:
                 raise SyntaxError("Missing formal parameter", tokenizer)
-                
+
             builder.FUNCTION_addParam(functionNode, tokenizer, paramNode)
             builder.COMMENTS_add(paramNode, prevParamNode, tokenizer.getComments())
-        
+
             if not tokenizer.match("comma"):
                 break
-                
+
             prevParamNode = paramNode
-        
+
         tokenizer.mustMatch("right_paren")
 
     # Do we have an expression closure or a normal body?
@@ -676,8 +675,7 @@
         tokenizer.unget()
 
     childContext = StaticContext(True, builder)
-    tokenizer.save()
-    
+
     if staticContext.inFunction:
         # Inner functions don't reset block numbering, only functions at
         # the top level of the program do.
@@ -687,128 +685,49 @@
         builder.FUNCTION_setBody(functionNode, AssignExpression(tokenizer, staticContext))
         if staticContext.isGenerator:
             raise SyntaxError("Generator returns a value", tokenizer)
-            
+
     else:
         builder.FUNCTION_hoistVars(childContext.blockId)
         builder.FUNCTION_setBody(functionNode, Script(tokenizer, childContext))
 
-    # 
-    # Hoisting makes parse-time binding analysis tricky. A taxonomy of hoists:
-    # 
-    # 1. vars hoist to the top of their function:
-    # 
-    #    var x = 'global';
-    #    function f() {
-    #      x = 'f';
-    #      if (false)
-    #        var x;
-    #    }
-    #    f();
-    #    print(x); // "global"
-    # 
-    # 2. lets hoist to the top of their block:
-    # 
-    #    function f() { // id: 0
-    #      var x = 'f';
-    #      {
-    #        {
-    #          print(x); // "undefined"
-    #        }
-    #        let x;
-    #      }
-    #    }
-    #    f();
-    # 
-    # 3. inner functions at function top-level hoist to the beginning
-    #    of the function.
-    # 
-    # If the builder used is doing parse-time analyses, hoisting may
-    # invalidate earlier conclusions it makes about variable scope.
-    # 
-    # The builder can opt to set the needsHoisting flag in a
-    # CompilerContext (in the case of var and function hoisting) or in a
-    # node of type BLOCK (in the case of let hoisting). This signals for
-    # the parser to reparse sections of code.
-    # 
-    # To avoid exponential blowup, if a function at the program top-level
-    # has any hoists in its child blocks or inner functions, we reparse
-    # the entire toplevel function. Each toplevel function is parsed at
-    # most twice.
-    # 
-    # The list of declarations can be tied to block ids to aid talking
-    # about declarations of blocks that have not yet been fully parsed.
-    # 
-    # Blocks are already uniquely numbered; see the comment in
-    # Statements.
-    # 
-    
-    #
-    # wpbasti: 
-    # Don't have the feeling that I need this functionality because the
-    # tree is often modified before the variables and names inside are 
-    # of any interest. So better doing this in a post-scan.
-    #
-    
-    #
-    # if childContext.needsHoisting:
-    #     # Order is important here! Builders expect functions to come after variables!
-    #     builder.setHoists(functionNode.body.id, childContext.variables.concat(childContext.functions))
-    # 
-    #     if staticContext.inFunction:
-    #         # If an inner function needs hoisting, we need to propagate
-    #         # this flag up to the parent function.
-    #         staticContext.needsHoisting = True
-    #     
-    #     else:
-    #         # Only re-parse functions at the top level of the program.
-    #         childContext = StaticContext(True, builder)
-    #         tokenizer.rewind(rp)
-    #         
-    #         # Set a flag in case the builder wants to have different behavior
-    #         # on the second pass.
-    #         builder.secondPass = True
-    #         builder.FUNCTION_hoistVars(functionNode.body.id, True)
-    #         builder.FUNCTION_setBody(functionNode, Script(tokenizer, childContext))
-    #         builder.secondPass = False
-
     if tokenType == "left_curly":
         tokenizer.mustMatch("right_curly")
 
     functionNode.end = tokenizer.token.end
     functionNode.functionForm = functionForm
-    
+
     builder.COMMENTS_add(functionNode.body, functionNode.body, tokenizer.getComments())
     builder.FUNCTION_finish(functionNode, staticContext)
-    
+
     return functionNode
 
 
 
 def Variables(tokenizer, staticContext, letBlock=None):
     """Parses a comma-separated list of var declarations (and maybe initializations)."""
-    
+
     builder = staticContext.builder
     if tokenizer.token.type == "var":
         build = builder.VAR_build
         addDecl = builder.VAR_addDecl
         finish = builder.VAR_finish
         childContext = staticContext
-            
+
     elif tokenizer.token.type == "const":
         build = builder.CONST_build
         addDecl = builder.CONST_addDecl
         finish = builder.CONST_finish
         childContext = staticContext
-        
+
     elif tokenizer.token.type == "let" or tokenizer.token.type == "left_paren":
         build = builder.LET_build
         addDecl = builder.LET_addDecl
         finish = builder.LET_finish
-        
+
         if not letBlock:
             statementStack = staticContext.statementStack
             i = len(statementStack) - 1
-            
+
             # a BLOCK *must* be found.
             while statementStack[i].type != "block":
                 i -= 1
@@ -822,12 +741,12 @@
 
             else:
                 childContext = statementStack[i]
-            
+
         else:
             childContext = letBlock
 
     node = build(tokenizer)
-    
+
     while True:
         tokenType = tokenizer.get()
 
@@ -836,20 +755,20 @@
         # IDENTIFIER to mean both identifier declarations and destructured
         # declarations.
         childNode = builder.DECL_build(tokenizer)
-        
+
         if tokenType == "left_bracket" or tokenType == "left_curly":
             # Pass in childContext if we need to add each pattern matched into
             # its variables, else pass in staticContext.
             # Need to unget to parse the full destructured expression.
             tokenizer.unget()
             builder.DECL_setNames(childNode, DestructuringExpression(tokenizer, staticContext, True, childContext))
-            
+
             if staticContext.inForLoopInit and tokenizer.peek() == "in":
                 addDecl(node, childNode, childContext)
-                if tokenizer.match("comma"): 
+                if tokenizer.match("comma"):
                     continue
-                else: 
-                    break            
+                else:
+                    break
 
             tokenizer.mustMatch("assign")
             if tokenizer.token.assignOp:
@@ -859,15 +778,15 @@
             builder.DECL_setInitializer(childNode, AssignExpression(tokenizer, staticContext))
             builder.DECL_finish(childNode)
             addDecl(node, childNode, childContext)
-            
+
             # Copy over names for variable list
             # for nameNode in childNode.names:
             #    childContext.variables.add(nameNode.value)
-                
-            if tokenizer.match("comma"): 
+
+            if tokenizer.match("comma"):
                 continue
-            else: 
-                break            
+            else:
+                break
 
         if tokenType != "identifier":
             raise SyntaxError("Missing variable name", tokenizer)
@@ -884,16 +803,16 @@
             builder.DECL_setInitializer(childNode, initializerNode)
 
         builder.DECL_finish(childNode)
-        
+
         # If we directly use the node in "let" constructs
         # if not hasattr(childContext, "variables"):
         #    childContext.variables = set()
-        
+
         # childContext.variables.add(childNode.name)
-        
+
         if not tokenizer.match("comma"):
             break
-        
+
     finish(node)
     return node
 
@@ -921,7 +840,7 @@
     if isStatement:
         childNode = Block(tokenizer, staticContext)
         builder.LETBLOCK_setBlock(node, childNode)
-        
+
     else:
         childNode = AssignExpression(tokenizer, staticContext)
         builder.LETBLOCK_setExpression(node, childNode)
@@ -933,7 +852,7 @@
 def checkDestructuring(tokenizer, staticContext, node, simpleNamesOnly=None, data=None):
     if node.type == "array_comp":
         raise SyntaxError("Invalid array comprehension left-hand side", tokenizer)
-        
+
     if node.type != "array_init" and node.type != "object_init":
         return
 
@@ -942,23 +861,23 @@
     for child in node:
         if child == None:
             continue
-        
+
         if child.type == "property_init":
             lhs = child[0]
             rhs = child[1]
         else:
             lhs = None
             rhs = None
-            
-    
+
+
         if rhs and (rhs.type == "array_init" or rhs.type == "object_init"):
             checkDestructuring(tokenizer, staticContext, rhs, simpleNamesOnly, data)
-            
+
         if lhs and simpleNamesOnly:
             # In declarations, lhs must be simple names
             if lhs.type != "identifier":
                 raise SyntaxError("Missing name in pattern", tokenizer)
-                
+
             elif data:
                 childNode = builder.DECL_build(tokenizer)
                 builder.DECL_setName(childNode, lhs.value)
@@ -969,7 +888,7 @@
 
                 # Each pattern needs to be added to variables.
                 # data.variables.add(childNode.name)
-                
+
 
 # JavaScript 1.7
 def DestructuringExpression(tokenizer, staticContext, simpleNamesOnly=None, data=None):
@@ -987,20 +906,20 @@
     builder.GENERATOR_setExpression(node, expression)
     builder.GENERATOR_setTail(node, comprehensionTail(tokenizer, staticContext))
     builder.GENERATOR_finish(node)
-    
+
     return node
 
 
 # JavaScript 1.7 Comprehensions Tails (Generators / Arrays)
 def comprehensionTail(tokenizer, staticContext):
     builder = staticContext.builder
-    
+
     # tokenizer.token.type must be "for"
     body = builder.COMPTAIL_build(tokenizer)
-    
+
     while True:
         node = builder.FOR_build(tokenizer)
-        
+
         # Comprehension tails are always for..in loops.
         builder.FOR_rebuildForIn(node)
         if tokenizer.match("identifier"):
@@ -1011,7 +930,7 @@
                 tokenizer.unget()
 
         tokenizer.mustMatch("left_paren")
-        
+
         tokenType = tokenizer.get()
         if tokenType == "left_bracket" or tokenType == "left_curly":
             tokenizer.unget()
@@ -1021,7 +940,7 @@
         elif tokenType == "identifier":
             # Removed variable/declaration substructure in Python port.
             # Variable declarations are not allowed here. So why process them in such a way?
-            
+
             # declaration = builder.DECL_build(tokenizer)
             # builder.DECL_setName(declaration, tokenizer.token.value)
             # builder.DECL_finish(declaration)
@@ -1032,18 +951,18 @@
 
             # Don't add to variables since the semantics of comprehensions is
             # such that the variables are in their own def when desugared.
-            
+
             identifier = builder.PRIMARY_build(tokenizer, "identifier")
             builder.FOR_setIterator(node, identifier)
 
         else:
             raise SyntaxError("Missing identifier", tokenizer)
-        
+
         tokenizer.mustMatch("in")
         builder.FOR_setObject(node, Expression(tokenizer, staticContext))
         tokenizer.mustMatch("right_paren")
         builder.COMPTAIL_addFor(body, node)
-        
+
         if not tokenizer.match("for"):
             break
 
@@ -1071,10 +990,10 @@
     if tokenizer.match("for"):
         if node.type == "yield" and not node.parenthesized:
             raise SyntaxError("Yield " + err, tokenizer)
-            
+
         if node.type == "comma" and not node.parenthesized:
             raise SyntaxError("Generator " + err, tokenizer)
-            
+
         node = GeneratorExpression(tokenizer, staticContext, node)
 
     tokenizer.mustMatch("right_paren")
@@ -1096,10 +1015,10 @@
             if childNode.type == "yield" and not childNode.parenthesized:
                 raise SyntaxError("Yield expression must be parenthesized", tokenizer)
             builder.COMMA_addOperand(node, AssignExpression(tokenizer, staticContext))
-            
+
             if not tokenizer.match("comma"):
                 break
-                
+
         builder.COMMA_finish(node)
 
     return node
@@ -1128,7 +1047,7 @@
         pass
     else:
         raise SyntaxError("Bad left-hand side of assignment", tokenizer)
-        
+
     builder.ASSIGN_setAssignOp(node, tokenizer.token.assignOp)
     builder.ASSIGN_addOperand(node, lhs)
     builder.ASSIGN_addOperand(node, AssignExpression(tokenizer, staticContext))
@@ -1153,20 +1072,20 @@
         staticContext.inForLoopInit = False
         builder.HOOK_setThenPart(node, AssignExpression(tokenizer, staticContext))
         staticContext.inForLoopInit = oldLoopInit
-        
+
         if not tokenizer.match("colon"):
             raise SyntaxError("Missing : after ?", tokenizer)
-            
+
         builder.HOOK_setElsePart(node, AssignExpression(tokenizer, staticContext))
         builder.HOOK_finish(node)
 
     return node
-    
+
 
 def OrExpression(tokenizer, staticContext):
     builder = staticContext.builder
     node = AndExpression(tokenizer, staticContext)
-    
+
     while tokenizer.match("or"):
         childNode = builder.OR_build(tokenizer)
         builder.OR_addOperand(childNode, node)
@@ -1194,7 +1113,7 @@
 def BitwiseOrExpression(tokenizer, staticContext):
     builder = staticContext.builder
     node = BitwiseXorExpression(tokenizer, staticContext)
-    
+
     while tokenizer.match("bitwise_or"):
         childNode = builder.BITWISEOR_build(tokenizer)
         builder.BITWISEOR_addOperand(childNode, node)
@@ -1208,7 +1127,7 @@
 def BitwiseXorExpression(tokenizer, staticContext):
     builder = staticContext.builder
     node = BitwiseAndExpression(tokenizer, staticContext)
-    
+
     while tokenizer.match("bitwise_xor"):
         childNode = builder.BITWISEXOR_build(tokenizer)
         builder.BITWISEXOR_addOperand(childNode, node)
@@ -1236,7 +1155,7 @@
 def EqualityExpression(tokenizer, staticContext):
     builder = staticContext.builder
     node = RelationalExpression(tokenizer, staticContext)
-    
+
     while tokenizer.match("eq") or tokenizer.match("ne") or tokenizer.match("strict_eq") or tokenizer.match("strict_ne"):
         childNode = builder.EQUALITY_build(tokenizer)
         builder.EQUALITY_addOperand(childNode, node)
@@ -1262,7 +1181,7 @@
         builder.RELATIONAL_addOperand(childNode, ShiftExpression(tokenizer, staticContext))
         builder.RELATIONAL_finish(childNode)
         node = childNode
-    
+
     staticContext.inForLoopInit = oldLoopInit
 
     return node
@@ -1271,7 +1190,7 @@
 def ShiftExpression(tokenizer, staticContext):
     builder = staticContext.builder
     node = AddExpression(tokenizer, staticContext)
-    
+
     while tokenizer.match("lsh") or tokenizer.match("rsh") or tokenizer.match("ursh"):
         childNode = builder.SHIFT_build(tokenizer)
         builder.SHIFT_addOperand(childNode, node)
@@ -1285,7 +1204,7 @@
 def AddExpression(tokenizer, staticContext):
     builder = staticContext.builder
     node = MultiplyExpression(tokenizer, staticContext)
-    
+
     while tokenizer.match("plus") or tokenizer.match("minus"):
         childNode = builder.ADD_build(tokenizer)
         builder.ADD_addOperand(childNode, node)
@@ -1299,7 +1218,7 @@
 def MultiplyExpression(tokenizer, staticContext):
     builder = staticContext.builder
     node = UnaryExpression(tokenizer, staticContext)
-    
+
     while tokenizer.match("mul") or tokenizer.match("div") or tokenizer.match("mod"):
         childNode = builder.MULTIPLY_build(tokenizer)
         builder.MULTIPLY_addOperand(childNode, node)
@@ -1317,7 +1236,7 @@
     if tokenType in ["delete", "void", "typeof", "not", "bitwise_not", "plus", "minus"]:
         node = builder.UNARY_build(tokenizer)
         builder.UNARY_addOperand(node, UnaryExpression(tokenizer, staticContext))
-    
+
     elif tokenType == "increment" or tokenType == "decrement":
         # Prefix increment/decrement.
         node = builder.UNARY_build(tokenizer)
@@ -1346,13 +1265,13 @@
     if tokenizer.match("new"):
         node = builder.MEMBER_build(tokenizer)
         builder.MEMBER_addOperand(node, MemberExpression(tokenizer, staticContext, False))
-        
+
         if tokenizer.match("left_paren"):
             builder.MEMBER_rebuildNewWithArgs(node)
             builder.MEMBER_addOperand(node, ArgumentList(tokenizer, staticContext))
-        
+
         builder.MEMBER_finish(node)
-    
+
     else:
         node = PrimaryExpression(tokenizer, staticContext)
 
@@ -1360,7 +1279,7 @@
         tokenType = tokenizer.get()
         if tokenType == "end":
             break
-        
+
         if tokenType == "dot":
             childNode = builder.MEMBER_build(tokenizer)
             builder.MEMBER_addOperand(childNode, node)
@@ -1391,20 +1310,20 @@
 def ArgumentList(tokenizer, staticContext):
     builder = staticContext.builder
     node = builder.LIST_build(tokenizer)
-    
+
     if tokenizer.match("right_paren", True):
         return node
-    
-    while True:    
+
+    while True:
         childNode = AssignExpression(tokenizer, staticContext)
         if childNode.type == "yield" and not childNode.parenthesized and tokenizer.peek() == "comma":
             raise SyntaxError("Yield expression must be parenthesized", tokenizer)
-            
+
         if tokenizer.match("for"):
             childNode = GeneratorExpression(tokenizer, staticContext, childNode)
             if len(node) > 1 or tokenizer.peek(True) == "comma":
                 raise SyntaxError("Generator expression must be parenthesized", tokenizer)
-        
+
         builder.LIST_addOperand(node, childNode)
         if not tokenizer.match("comma"):
             break
@@ -1428,7 +1347,7 @@
             tokenType = tokenizer.peek(True)
             if tokenType == "right_bracket":
                 break
-        
+
             if tokenType == "comma":
                 tokenizer.get()
                 builder.ARRAYINIT_addElement(node, None)
@@ -1446,7 +1365,7 @@
             builder.ARRAYCOMP_setExpression(childNode, node[0])
             builder.ARRAYCOMP_setTail(childNode, comprehensionTail(tokenizer, staticContext))
             node = childNode
-        
+
         builder.COMMENTS_add(node, node, tokenizer.getComments())
         tokenizer.mustMatch("right_bracket")
         builder.PRIMARY_finish(node)
@@ -1459,26 +1378,26 @@
                 tokenType = tokenizer.get()
                 tokenValue = getattr(tokenizer.token, "value", None)
                 comments = tokenizer.getComments()
-                
+
                 if tokenValue in ("get", "set") and tokenizer.peek() == "identifier":
                     if staticContext.ecma3OnlyMode:
                         raise SyntaxError("Illegal property accessor", tokenizer)
-                        
+
                     fd = FunctionDefinition(tokenizer, staticContext, True, "expressed_form")
                     builder.OBJECTINIT_addProperty(node, fd)
-                    
+
                 else:
                     if tokenType == "identifier" or tokenType == "number" or tokenType == "string":
                         id = builder.PRIMARY_build(tokenizer, "identifier")
                         builder.PRIMARY_finish(id)
-                        
+
                     elif tokenType == "right_curly":
                         if staticContext.ecma3OnlyMode:
                             raise SyntaxError("Illegal trailing ,", tokenizer)
-                            
+
                         tokenizer.unget()
                         break
-                            
+
                     else:
                         if tokenValue in jasy.js.tokenize.Lang.keywords:
                             id = builder.PRIMARY_build(tokenizer, "identifier")
@@ -1486,7 +1405,7 @@
                         else:
                             print("Value is '%s'" % tokenValue)
                             raise SyntaxError("Invalid property name", tokenizer)
-                    
+
                     if tokenizer.match("colon"):
                         childNode = builder.PROPERTYINIT_build(tokenizer)
                         builder.COMMENTS_add(childNode, node, comments)
@@ -1494,14 +1413,14 @@
                         builder.PROPERTYINIT_addOperand(childNode, AssignExpression(tokenizer, staticContext))
                         builder.PROPERTYINIT_finish(childNode)
                         builder.OBJECTINIT_addProperty(node, childNode)
-                        
+
                     else:
                         # Support, e.g., |var {staticContext, y} = o| as destructuring shorthand
                         # for |var {staticContext: staticContext, y: y} = o|, per proposed JS2/ES4 for JS1.8.
                         if tokenizer.peek() != "comma" and tokenizer.peek() != "right_curly":
                             raise SyntaxError("Missing : after property", tokenizer)
                         builder.OBJECTINIT_addProperty(node, id)
-                    
+
                 if not tokenizer.match("comma"):
                     break
 
--- a/ThirdParty/Jasy/jasy/js/parse/VanillaBuilder.py	Thu Aug 10 13:58:50 2017 +0200
+++ b/ThirdParty/Jasy/jasy/js/parse/VanillaBuilder.py	Fri Aug 11 14:40:54 2017 +0200
@@ -1,11 +1,12 @@
 #
 # Jasy - Web Tooling Framework
 # Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
 #
 
 #
 # License: MPL 1.1/GPL 2.0/LGPL 2.1
-# Authors: 
+# Authors:
 #   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
 #   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010)
 #
@@ -16,38 +17,37 @@
 
 class VanillaBuilder:
     """The vanilla AST builder."""
-    
+
     def COMMENTS_add(self, currNode, prevNode, comments):
         if not comments:
             return
-            
+
         currComments = []
         prevComments = []
         for comment in comments:
             # post comments - for previous node
             if comment.context == "inline":
                 prevComments.append(comment)
-                
+
             # all other comment styles are attached to the current one
             else:
                 currComments.append(comment)
-        
+
         # Merge with previously added ones
         if hasattr(currNode, "comments"):
             currNode.comments.extend(currComments)
         else:
             currNode.comments = currComments
-        
+
         if prevNode:
             if hasattr(prevNode, "comments"):
                 prevNode.comments.extend(prevComments)
             else:
                 prevNode.comments = prevComments
         else:
-            # Don't loose the comment in tree (if not previous node is there, attach it
-            # to this node)
+            # Don't loose the comment in tree (if not previous node is there, attach it to this node)
             currNode.comments.extend(prevComments)
-    
+
     def IF_build(self, tokenizer):
         return jasy.js.parse.Node.Node(tokenizer, "if")
 
@@ -221,7 +221,7 @@
     def CATCH_build(self, tokenizer):
         node = jasy.js.parse.Node.Node(tokenizer, "catch")
         return node
-        
+
     def CATCH_wrapException(self, tokenizer):
         node = jasy.js.parse.Node.Node(tokenizer, "exception")
         node.value = tokenizer.token.value
@@ -321,7 +321,7 @@
                 node.type = "getter"
             else:
                 node.type = "setter"
-                
+
         return node
 
     def FUNCTION_setName(self, node, identifier):
@@ -329,24 +329,24 @@
 
     def FUNCTION_initParams(self, node, tokenizer):
         node.append(jasy.js.parse.Node.Node(tokenizer, "list"), "params")
-        
+
     def FUNCTION_wrapParam(self, tokenizer):
         param = jasy.js.parse.Node.Node(tokenizer)
         param.value = tokenizer.token.value
         return param
-        
+
     def FUNCTION_addParam(self, node, tokenizer, expression):
         node.params.append(expression)
-        
+
     def FUNCTION_setExpressionClosure(self, node, expressionClosure):
         node.expressionClosure = expressionClosure
 
     def FUNCTION_setBody(self, node, statement):
         # copy over function parameters to function body
-        #params = getattr(node, "params", None)
+        params = getattr(node, "params", None)
         #if params:
         #    statement.params = [param.value for param in params]
-            
+
         node.append(statement, "body")
 
     def FUNCTION_hoistVars(self, x):
@@ -562,13 +562,13 @@
         pass
 
     def UNARY_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "delete", "void", "typeof", "not",
-        # "bitwise_not", "unary_plus", "unary_minus", "increment", or "decrement".
+        # NB: tokenizer.token.type must be "delete", "void", "typeof", "not", "bitwise_not",
+        # "unary_plus", "unary_minus", "increment", or "decrement".
         if tokenizer.token.type == "plus":
             tokenizer.token.type = "unary_plus"
         elif tokenizer.token.type == "minus":
             tokenizer.token.type = "unary_minus"
-            
+
         return jasy.js.parse.Node.Node(tokenizer)
 
     def UNARY_addOperand(self, node, childNode):
@@ -596,12 +596,11 @@
         pass
 
     def PRIMARY_build(self, tokenizer, tokenType):
-        # NB: tokenizer.token.type must be "null", "this", "true", "false", "identifier",
-        # "number", "string", or "regexp".
+        # NB: tokenizer.token.type must be "null", "this", "true", "false", "identifier", "number", "string", or "regexp".
         node = jasy.js.parse.Node.Node(tokenizer, tokenType)
         if tokenType in ("identifier", "string", "regexp", "number"):
             node.value = tokenizer.token.value
-            
+
         return node
 
     def PRIMARY_finish(self, node):
@@ -618,13 +617,13 @@
 
     def ARRAYCOMP_build(self, tokenizer):
         return jasy.js.parse.Node.Node(tokenizer, "array_comp")
-    
+
     def ARRAYCOMP_setExpression(self, node, expression):
         node.append(expression, "expression")
-    
+
     def ARRAYCOMP_setTail(self, node, childNode):
         node.append(childNode, "tail")
-    
+
     def ARRAYCOMP_finish(self, node):
         pass
 
--- a/ThirdParty/Jasy/jasy/js/tokenize/Lang.py	Thu Aug 10 13:58:50 2017 +0200
+++ b/ThirdParty/Jasy/jasy/js/tokenize/Lang.py	Fri Aug 11 14:40:54 2017 +0200
@@ -3,9 +3,9 @@
 # Copyright 2010-2012 Zynga Inc.
 #
 
-"""JavaScript 1.7 keywords"""
 from __future__ import unicode_literals
 
+"""JavaScript 1.7 keywords"""
 keywords = set([
     "break",
     "case", "catch", "const", "continue",
--- a/ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py	Thu Aug 10 13:58:50 2017 +0200
+++ b/ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py	Fri Aug 11 14:40:54 2017 +0200
@@ -187,6 +187,7 @@
             elif ch == "/" and next == "*":
                 self.cursor += 1
                 text = "/*"
+                inline = startLine == self.line and startLine > 1
                 commentStartLine = self.line
                 if startLine == self.line and not startOfFile:
                     mode = "inline"
--- a/ThirdParty/Jasy/jasy/license.md	Thu Aug 10 13:58:50 2017 +0200
+++ b/ThirdParty/Jasy/jasy/license.md	Fri Aug 11 14:40:54 2017 +0200
@@ -1,4 +1,5 @@
 Copyright (c) 2011-2012 Zynga Inc. http://zynga.com/
+Copyright (c) 2013-2014 Sebastian Werner http://seabstian-werner.com
 
 Permission is hereby granted, free of charge, to any person obtaining
 a copy of this software and associated documentation files (the
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/parse/AbstractNode.py	Fri Aug 11 14:40:54 2017 +0200
@@ -0,0 +1,357 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2013-2014 Sebastian Werner
+#
+
+from __future__ import unicode_literals
+
+import json, copy
+
+class AbstractNode(list):
+
+    __slots__ = [
+        # core data
+        "line", "type", "tokenizer", "start", "end", "rel", "parent",
+
+        # dynamic added data by other modules
+        "comments", "scope", "values",
+
+        # node type specific
+        "value", "parenthesized", "fileId", "params",
+        "name", "initializer", "condition", "assignOp",
+        "thenPart", "elsePart", "statements",
+        "statement", "variables", "names", "postfix"
+    ]
+
+
+    def __init__(self, tokenizer=None, type=None, args=[]):
+        list.__init__(self)
+
+        self.start = 0
+        self.end = 0
+        self.line = None
+
+        if tokenizer:
+            token = getattr(tokenizer, "token", None)
+            if token:
+                # We may define a custom type but use the same positioning as another token
+                # e.g. transform curlys in block nodes, etc.
+                self.type = type if type else getattr(token, "type", None)
+                self.line = token.line
+
+                # Start & end are file positions for error handling.
+                self.start = token.start
+                self.end = token.end
+
+            else:
+                self.type = type
+                self.line = tokenizer.line
+                self.start = None
+                self.end = None
+
+            self.tokenizer = tokenizer
+
+        elif type:
+            self.type = type
+
+        for arg in args:
+            self.append(arg)
+
+
+    def getFileName(self):
+        """
+        Traverses up the tree to find a node with a fileId and returns it
+        """
+
+        node = self
+        while node:
+            fileId = getattr(node, "fileId", None)
+            if fileId is not None:
+                return fileId
+
+            node = getattr(node, "parent", None)
+
+
+    def getUnrelatedChildren(self):
+        """Collects all unrelated children"""
+
+        collection = []
+        for child in self:
+            if not hasattr(child, "rel"):
+                collection.append(child)
+
+        return collection
+
+
+    def getChildrenLength(self, filter=True):
+        """Number of (per default unrelated) children"""
+
+        count = 0
+        for child in self:
+            if not filter or not hasattr(child, "rel"):
+                count += 1
+        return count
+
+
+    def remove(self, kid):
+        """Removes the given kid"""
+
+        if not kid in self:
+            raise Exception("Given node is no child!")
+
+        if hasattr(kid, "rel"):
+            delattr(self, kid.rel)
+            del kid.rel
+            del kid.parent
+
+        list.remove(self, kid)
+
+
+    def insert(self, index, kid):
+        """Inserts the given kid at the given index"""
+
+        if index is None:
+            return self.append(kid)
+
+        if hasattr(kid, "parent"):
+            kid.parent.remove(kid)
+
+        kid.parent = self
+
+        return list.insert(self, index, kid)
+
+
+    def insertAll(self, index, kids):
+        """Inserts all kids starting with the given index"""
+
+        if index is None:
+            for kid in list(kids):
+                self.append(kid)
+        else:
+            for pos, kid in enumerate(list(kids)):
+                self.insert(index+pos, kid)
+
+
+    def insertAllReplace(self, orig, kids):
+        """Inserts all kids at the same position as the original node (which is removed afterwards)"""
+
+        index = self.index(orig)
+        for pos, kid in enumerate(list(kids)):
+            self.insert(index+pos, kid)
+
+        self.remove(orig)
+
+
+    def append(self, kid, rel=None):
+        """Appends the given kid with an optional relation hint"""
+
+        # kid can be null e.g. [1, , 2].
+        if kid:
+            if hasattr(kid, "parent"):
+                kid.parent.remove(kid)
+
+            # Debug
+            if not isinstance(kid, AbstractNode):
+                raise Exception("Invalid kid: %s" % kid)
+
+            if hasattr(kid, "tokenizer"):
+                if hasattr(kid, "start"):
+                    if not hasattr(self, "start") or self.start == None or kid.start < self.start:
+                        self.start = kid.start
+
+                if hasattr(kid, "end"):
+                    if not hasattr(self, "end") or self.end == None or self.end < kid.end:
+                        self.end = kid.end
+
+            kid.parent = self
+
+            # alias for function
+            if rel != None:
+                setattr(self, rel, kid)
+                setattr(kid, "rel", rel)
+
+        # Block None kids when they should be related
+        if not kid and rel:
+            return
+
+        return list.append(self, kid)
+
+
+    def replace(self, kid, repl):
+        """Replaces the given kid with a replacement kid"""
+
+        if repl in self:
+            self.remove(repl)
+
+        self[self.index(kid)] = repl
+
+        if hasattr(kid, "rel"):
+            repl.rel = kid.rel
+            setattr(self, kid.rel, repl)
+
+            # cleanup old kid
+            delattr(kid, "rel")
+
+        elif hasattr(repl, "rel"):
+            # delete old relation on new child
+            delattr(repl, "rel")
+
+        delattr(kid, "parent")
+        repl.parent = self
+
+        return kid
+
+
+    def toXml(self, format=True, indent=0, tab="  "):
+        """Converts the node to XML"""
+
+        lead = tab * indent if format else ""
+        innerLead = tab * (indent+1) if format else ""
+        lineBreak = "\n" if format else ""
+
+        relatedChildren = []
+        attrsCollection = []
+
+        for name in self.__slots__:
+            # "type" is used as node name - no need to repeat it as an attribute
+            # "parent" is a relation to the parent node - for serialization we ignore these at the moment
+            # "rel" is used internally to keep the relation to the parent - used by nodes which need to keep track of specific children
+            # "start" and "end" are for debugging only
+            if hasattr(self, name) and name not in ("type", "parent", "comments", "selector", "rel", "start", "end") and name[0] != "_":
+                value = getattr(self, name)
+                if isinstance(value, AbstractNode):
+                    if hasattr(value, "rel"):
+                        relatedChildren.append(value)
+
+                elif type(value) in (bool, int, float, str, list, set, dict):
+                    if type(value) == bool:
+                        value = "true" if value else "false"
+                    elif type(value) in (int, float):
+                        value = str(value)
+                    elif type(value) in (list, set, dict):
+                        if type(value) == dict:
+                            value = value.keys()
+                        if len(value) == 0:
+                            continue
+                        try:
+                            value = ",".join(value)
+                        except TypeError as ex:
+                            raise Exception("Invalid attribute list child at: %s: %s" % (name, ex))
+
+                    attrsCollection.append('%s=%s' % (name, json.dumps(value)))
+
+        attrs = (" " + " ".join(attrsCollection)) if len(attrsCollection) > 0 else ""
+
+        comments = getattr(self, "comments", None)
+        scope = getattr(self, "scope", None)
+        selector = getattr(self, "selector", None)
+
+        if len(self) == 0 and len(relatedChildren) == 0 and (not comments or len(comments) == 0) and not scope and not selector:
+            result = "%s<%s%s/>%s" % (lead, self.type, attrs, lineBreak)
+
+        else:
+            result = "%s<%s%s>%s" % (lead, self.type, attrs, lineBreak)
+
+            if comments:
+                for comment in comments:
+                    result += '%s<comment context="%s" variant="%s">%s</comment>%s' % (innerLead, comment.context, comment.variant, comment.text, lineBreak)
+
+            if scope:
+                for statKey in scope:
+                    statValue = scope[statKey]
+                    if statValue != None and len(statValue) > 0:
+                        if type(statValue) is set:
+                            statValue = ",".join(statValue)
+                        elif type(statValue) is dict:
+                            statValue = ",".join(statValue.keys())
+
+                        result += '%s<stat name="%s">%s</stat>%s' % (innerLead, statKey, statValue, lineBreak)
+
+            if selector:
+                for entry in selector:
+                    result += '%s<selector>%s</selector>%s' % (innerLead, entry, lineBreak)
+
+            for child in self:
+                if not child:
+                    result += "%s<none/>%s" % (innerLead, lineBreak)
+                elif not hasattr(child, "rel"):
+                    result += child.toXml(format, indent+1)
+                elif not child in relatedChildren:
+                    raise Exception("Oops, irritated by non related: %s in %s - child says it is related as %s" % (child.type, self.type, child.rel))
+
+            for child in relatedChildren:
+                result += "%s<%s>%s" % (innerLead, child.rel, lineBreak)
+                result += child.toXml(format, indent+2)
+                result += "%s</%s>%s" % (innerLead, child.rel, lineBreak)
+
+            result += "%s</%s>%s" % (lead, self.type, lineBreak)
+
+        return result
+
+
+    def __deepcopy__(self, memo):
+        """Used by deepcopy function to clone AbstractNode instances"""
+
+        CurrentClass = self.__class__
+
+        # Create copy
+        if hasattr(self, "tokenizer"):
+            result = CurrentClass(tokenizer=self.tokenizer)
+        else:
+            result = CurrentClass(type=self.type)
+
+        # Copy children
+        for child in self:
+            if child is None:
+                list.append(result, None)
+            else:
+                # Using simple list appends for better performance
+                childCopy = copy.deepcopy(child, memo)
+                childCopy.parent = result
+                list.append(result, childCopy)
+
+        # Sync attributes
+        # Note: "parent" attribute is handled by append() already
+        for name in self.__slots__:
+            if hasattr(self, name) and not name in ("parent", "tokenizer"):
+                value = getattr(self, name)
+                if value is None:
+                    pass
+                elif type(value) in (bool, int, float, str):
+                    setattr(result, name, value)
+                elif type(value) in (list, set, dict, CurrentClass):
+                    setattr(result, name, copy.deepcopy(value, memo))
+                # Scope can be assigned (will be re-created when needed for the copied node)
+                elif name == "scope":
+                    result.scope = self.scope
+
+        return result
+
+
+    def getSource(self):
+        """Returns the source code of the node"""
+
+        if not self.tokenizer:
+            raise Exception("Could not find source for node '%s'" % self.type)
+
+        if getattr(self, "start", None) is not None:
+            if getattr(self, "end", None) is not None:
+                return self.tokenizer.source[self.start:self.end]
+            return self.tokenizer.source[self.start:]
+
+        if getattr(self, "end", None) is not None:
+            return self.tokenizer.source[:self.end]
+
+        return self.tokenizer.source[:]
+
+
+    # Map Python built-ins
+    __repr__ = toXml
+    __str__ = toXml
+
+
+    def __eq__(self, other):
+        return self is other
+
+    def __bool__(self):
+        return True
--- a/changelog	Thu Aug 10 13:58:50 2017 +0200
+++ b/changelog	Fri Aug 11 14:40:54 2017 +0200
@@ -9,6 +9,8 @@
      docutils (ReST) previewers
 - Web Browser (NG)
   -- added support for Google Safe Browsing
+- Third Party packages
+  -- updated jasy to 1.5-beta5 (latest release available)
 
 Version 17.08:
 - bug fixes
--- a/eric6.e4p	Thu Aug 10 13:58:50 2017 +0200
+++ b/eric6.e4p	Fri Aug 11 14:40:54 2017 +0200
@@ -981,6 +981,7 @@
     <Source>ThirdParty/Jasy/jasy/core/__init__.py</Source>
     <Source>ThirdParty/Jasy/jasy/js/__init__.py</Source>
     <Source>ThirdParty/Jasy/jasy/js/api/Comment.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/api/Text.py</Source>
     <Source>ThirdParty/Jasy/jasy/js/api/__init__.py</Source>
     <Source>ThirdParty/Jasy/jasy/js/parse/Node.py</Source>
     <Source>ThirdParty/Jasy/jasy/js/parse/Parser.py</Source>
@@ -990,6 +991,8 @@
     <Source>ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py</Source>
     <Source>ThirdParty/Jasy/jasy/js/tokenize/__init__.py</Source>
     <Source>ThirdParty/Jasy/jasy/js/util/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/parse/AbstractNode.py</Source>
+    <Source>ThirdParty/Jasy/jasy/parse/__init__.py</Source>
     <Source>ThirdParty/Pygments/__init__.py</Source>
     <Source>ThirdParty/Pygments/pygments/__init__.py</Source>
     <Source>ThirdParty/Pygments/pygments/cmdline.py</Source>

eric ide

mercurial