jasy: updated jasy to 1.5-beta6 (latest release available).

Sat, 12 Jan 2019 12:11:42 +0100

author
Detlev Offenbach <detlev@die-offenbachs.de>
date
Sat, 12 Jan 2019 12:11:42 +0100
changeset 6650
1dd52aa8897c
parent 6649
f1b3a73831c9
child 6651
e8f3b5568b21

jasy: updated jasy to 1.5-beta6 (latest release available).

Plugins/CheckerPlugins/SyntaxChecker/jsCheckSyntax.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/core/Console.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/core/Text.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/core/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/api/Comment.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/api/Text.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/api/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/Node.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/Parser.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/VanillaBuilder.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/tokenize/Lang.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/tokenize/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/util/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/parse/AbstractNode.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/api/Comment.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/api/Text.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/api/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/output/Compressor.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/output/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/parse/Lang.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/parse/Node.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/parse/Parser.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/parse/VanillaBuilder.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/parse/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/tokenize/Lang.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/tokenize/Tokenizer.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/tokenize/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/script/util/__init__.py file | annotate | diff | comparison | revisions
Utilities/ClassBrowsers/jsclbr.py file | annotate | diff | comparison | revisions
changelog file | annotate | diff | comparison | revisions
eric6.e4p file | annotate | diff | comparison | revisions
--- a/Plugins/CheckerPlugins/SyntaxChecker/jsCheckSyntax.py	Sat Jan 12 11:26:32 2019 +0100
+++ b/Plugins/CheckerPlugins/SyntaxChecker/jsCheckSyntax.py	Sat Jan 12 12:11:42 2019 +0100
@@ -172,8 +172,8 @@
             (file name, line number, column, codestring (only at syntax
             errors), the message, a list with arguments for the message)
     """
-    import jasy.js.parse.Parser as jsParser
-    import jasy.js.tokenize.Tokenizer as jsTokenizer
+    import jasy.script.parse.Parser as jsParser
+    import jasy.script.tokenize.Tokenizer as jsTokenizer
     
     codestring = normalizeCode(codestring)
     
--- a/ThirdParty/Jasy/jasy/__init__.py	Sat Jan 12 11:26:32 2019 +0100
+++ b/ThirdParty/Jasy/jasy/__init__.py	Sat Jan 12 12:11:42 2019 +0100
@@ -7,13 +7,33 @@
 """
 **Jasy - Web Tooling Framework**
 
-Jasy is a powerful Python3-based tooling framework. 
-It makes it easy to manage heavy web projects. 
-Its main goal is to offer an API which could be used by developers to write
-their custom build/deployment scripts.
+Jasy is a powerful Python3-based tooling framework.
+It makes it easy to manage heavy web projects.
+Its main goal is to offer an API which could be used by developers to write their custom build/deployment scripts.
 """
 
 from __future__ import unicode_literals
 
-__version__ = "1.5-beta5"
+__version__ = "1.5-beta6"
 __author__ = "Sebastian Werner <info@sebastian-werner.net>"
+
+import os.path
+datadir = os.path.join(os.path.dirname(__file__), "data")
+
+def info():
+    """
+    Prints information about Jasy to the console.
+    """
+
+    import jasy.core.Console as Console
+
+    print("Jasy %s is a powerful web tooling framework" % __version__)
+    print("Visit %s for details." % Console.colorize("https://github.com/sebastian-software/jasy", "underline"))
+    print()
+
+
+class UserError(Exception):
+    """
+    Standard Jasy error class raised whenever something happens which the system understands (somehow excepected)
+    """
+    pass
--- a/ThirdParty/Jasy/jasy/core/Console.py	Sat Jan 12 11:26:32 2019 +0100
+++ b/ThirdParty/Jasy/jasy/core/Console.py	Sat Jan 12 12:11:42 2019 +0100
@@ -2,27 +2,124 @@
 
 # Copyright (c) 2013 - 2019 Detlev Offenbach <detlev@die-offenbachs.de>
 #
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+"""
+Centralized logging for complete Jasy environment.
+"""
 
 from __future__ import unicode_literals
 
-import logging
+import logging, sys
+
+__all__ = ["colorize", "header", "error", "warn", "info", "debug", "indent", "outdent"]
+
+
+
+# ---------------------------------------------
+# Colorized Output
+# ---------------------------------------------
+
+__colors = {
+    'bold'      : ['\033[1m',  '\033[22m'],
+    'italic'    : ['\033[3m',  '\033[23m'],
+    'underline' : ['\033[4m',  '\033[24m'],
+    'inverse'   : ['\033[7m',  '\033[27m'],
+
+    'white'     : ['\033[37m', '\033[39m'],
+    'grey'      : ['\033[90m', '\033[39m'],
+    'black'     : ['\033[30m', '\033[39m'],
+
+    'blue'      : ['\033[34m', '\033[39m'],
+    'cyan'      : ['\033[36m', '\033[39m'],
+    'green'     : ['\033[32m', '\033[39m'],
+    'magenta'   : ['\033[35m', '\033[39m'],
+    'red'       : ['\033[31m', '\033[39m'],
+    'yellow'    : ['\033[33m', '\033[39m']
+}
+
+def colorize(text, color="red"):
+    """Uses to colorize the given text for output on Unix terminals"""
+
+    # Not supported on console on Windows native
+    # Note: Cygwin has a different platform value
+    if sys.platform == "win32":
+        return text
+
+    entry = __colors[color]
+    return "%s%s%s" % (entry[0], text, entry[1])
+
+
+
+# ---------------------------------------------
+# Logging API
+# ---------------------------------------------
+
+__level = 0
+
+def __format(text):
+    global __level
+
+    if __level == 0 or text == "":
+        return text
+    elif __level == 1:
+        return "- %s" % text
+    else:
+        return "%s- %s" % ("  " * (__level-1), text)
+
+def indent():
+    """
+    Increments global indenting level. Prepends spaces to the next
+    logging messages until outdent() is called.
+
+    Should be called whenever leaving a structural logging section.
+    """
+
+    global __level
+    __level += 1
+
+def outdent(all=False):
+    """
+    Decrements global indenting level.
+    Should be called whenever leaving a structural logging section.
+    """
+
+    global __level
+
+    if all:
+        __level = 0
+    else:
+        __level -= 1
 
 def error(text, *argv):
-    """Outputs an error message"""
+    """Outputs an error message (visible by default)"""
 
-    logging.error(text, *argv)
+    logging.warn(__format(colorize(colorize(text, "red"), "bold")), *argv)
 
 def warn(text, *argv):
-    """Outputs an warning"""
+    """Outputs an warning (visible by default)"""
 
-    logging.warn(text, *argv)
+    logging.warn(__format(colorize(text, "red")), *argv)
 
 def info(text, *argv):
-    """Outputs an info message"""
+    """Outputs an info message (visible by default, disable via --quiet option)"""
 
-    logging.info(text, *argv)
+    logging.info(__format(text), *argv)
 
 def debug(text, *argv):
-    """Output a debug message"""
+    """Output a debug message (hidden by default, enable via --verbose option)"""
+
+    logging.debug(__format(text), *argv)
+
+def header(title):
+    """Outputs the given title with prominent formatting"""
 
-    logging.debug(text, *argv)
+    global __level
+    __level = 0
+
+    logging.info("")
+    logging.info(colorize(colorize(">>> %s" % title.upper(), "blue"), "bold"))
+    logging.info(colorize("-------------------------------------------------------------------------------", "blue"))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/core/Text.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,87 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+from __future__ import unicode_literals
+
+import re
+
+
+#
+# MARKDOWN TO HTML
+#
+
+try:
+    # import hoedown
+    #
+    # hoedownExt = hoedown.EXT_AUTOLINK | hoedown.EXT_NO_INTRA_EMPHASIS | hoedown.EXT_FENCED_CODE | hoedown.EXT_TABLES | hoedown.EXT_FOOTNOTES | hoedown.EXT_QUOTE | hoedown.EXT_STRIKETHROUGH | hoedown.EXT_UNDERLINE | hoedown.EXT_HIGHLIGHT
+    # hoedownExt = hoedown.EXT_AUTOLINK
+    # hoedownRender = hoedown.HTML_SKIP_STYLE | hoedown.HTML_SMARTYPANTS
+
+    import misaka
+
+    hoedownExt = misaka.EXT_AUTOLINK | misaka.EXT_NO_INTRA_EMPHASIS | misaka.EXT_FENCED_CODE
+    hoedownRender = misaka.HTML_SKIP_STYLE | misaka.HTML_SMARTYPANTS
+    hoedown = misaka
+
+    supportsMarkdown = True
+
+except:
+    supportsMarkdown = False
+
+def markdownToHtml(markdownStr):
+    """
+    Converts Markdown to HTML. Supports GitHub's fenced code blocks,
+    auto linking and typographic features by SmartyPants.
+    """
+
+    return hoedown.html(markdownStr, hoedownExt, hoedownRender)
+
+
+#
+# HIGHLIGHT CODE BLOCKS
+#
+
+try:
+    from pygments import highlight
+    from pygments.formatters import HtmlFormatter
+    from pygments.lexers import get_lexer_by_name
+
+    # By http://misaka.61924.nl/#toc_3
+    codeblock = re.compile(r'<pre(?: lang="([a-z0-9]+)")?><code(?: class="([a-z0-9]+).*?")?>(.*?)</code></pre>', re.IGNORECASE | re.DOTALL)
+
+    supportsHighlighting = True
+
+except ImportError:
+
+    supportsHighlighting = False
+
+def highlightCodeBlocks(html, tabsize=2, defaultlang="javascript"):
+    """
+    Patches 'code' elements in HTML to apply HTML based syntax highlighting. Automatically
+    chooses the matching language detected via a CSS class of the 'code' element.
+    """
+
+    def unescape(html):
+        html = html.replace('&lt;', '<')
+        html = html.replace('&gt;', '>')
+        html = html.replace('&amp;', '&')
+        html = html.replace('&quot;', '"')
+        return html.replace('&#39;', "'")
+
+    def replace(match):
+        language, classname, code = match.groups()
+        if language is None:
+            language = classname if classname else defaultlang
+
+        lexer = get_lexer_by_name(language, tabsize=tabsize)
+        formatter = HtmlFormatter(linenos="table")
+
+        code = unescape(code)
+
+        # for some reason pygments escapes our code once again so we need to reverse it twice
+        return unescape(highlight(code, lexer, formatter))
+
+    return codeblock.sub(replace, html)
--- a/ThirdParty/Jasy/jasy/core/__init__.py	Sat Jan 12 11:26:32 2019 +0100
+++ b/ThirdParty/Jasy/jasy/core/__init__.py	Sat Jan 12 12:11:42 2019 +0100
@@ -4,6 +4,6 @@
 #
 
 #
-# This is an eric6 dummy package to provide some specially variants of modules
+# This is an eric6 dummy package to provide some special variants of modules
 # found in the standard jasy package
 #
--- a/ThirdParty/Jasy/jasy/js/api/Comment.py	Sat Jan 12 11:26:32 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,163 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-#
-
-from __future__ import unicode_literals
-
-import re
-
-import jasy.core.Console as Console
-
-__all__ = ["CommentException", "Comment"]
-
-
-# Used to measure the doc indent size (with leading stars in front of content)
-docIndentReg = re.compile(r"^(\s*\*\s*)(\S*)")
-
-
-class CommentException(Exception):
-    """
-    Thrown when errors during comment processing are detected.
-    """
-    def __init__(self, message, lineNo=0):
-        Exception.__init__(self, "Comment error: %s (line: %s)" % (message, lineNo+1))
-
-
-class Comment():
-    """
-    Comment class is attached to parsed nodes and used to store all comment related
-    information.
-    
-    The class supports a new Markdown and TomDoc inspired dialect to make developers life
-    easier and work less repeative.
-    """
-    
-    # Relation to code
-    context = None
-    
-    # Collected text of the comment
-    text = None
-    
-    def __init__(self, text, context=None, lineNo=0, indent="", fileId=None):
-
-        # Store context (relation to code)
-        self.context = context
-        
-        # Store fileId
-        self.fileId = fileId
-        
-        # Figure out the type of the comment based on the starting characters
-
-        # Inline comments
-        if text.startswith("//"):
-            # "// hello" => "   hello"
-            text = "  " + text[2:]
-            self.variant = "single"
-            
-        # Doc comments
-        elif text.startswith("/**"):
-            # "/** hello */" => "    hello "
-            text = "   " + text[3:-2]
-            self.variant = "doc"
-
-        # Protected comments which should not be removed
-        # (e.g these are used for license blocks)
-        elif text.startswith("/*!"):
-            # "/*! hello */" => "    hello "
-            text = "   " + text[3:-2]
-            self.variant = "protected"
-            
-        # A normal multiline comment
-        elif text.startswith("/*"):
-            # "/* hello */" => "   hello "
-            text = "  " + text[2:-2]
-            self.variant = "multi"
-            
-        else:
-            raise CommentException("Invalid comment text: %s" % text, lineNo)
-
-        # Multi line comments need to have their indentation removed
-        if "\n" in text:
-            text = self.__outdent(text, indent, lineNo)
-
-        # For single line comments strip the surrounding whitespace
-        else:
-            # " hello " => "hello"
-            text = text.strip()
-
-        # The text of the comment
-        self.text = text
-    
-    def __outdent(self, text, indent, startLineNo):
-        """
-        Outdent multi line comment text and filtering empty lines
-        """
-        
-        lines = []
-
-        # First, split up the comments lines and remove the leading indentation
-        for lineNo, line in enumerate((indent+text).split("\n")):
-
-            if line.startswith(indent):
-                lines.append(line[len(indent):].rstrip())
-
-            elif line.strip() == "":
-                lines.append("")
-
-            else:
-                # Only warn for doc comments, otherwise it might just be code commented
-                # out which is sometimes formatted pretty crazy when commented out
-                if self.variant == "doc":
-                    Console.warn("Could not outdent doc comment at line %s in %s",
-                        startLineNo+lineNo, self.fileId)
-                    
-                return text
-
-        # Find first line with real content, then grab the one after it to get the 
-        # characters which need 
-        outdentString = ""
-        for lineNo, line in enumerate(lines):
-
-            if line != "" and line.strip() != "":
-                matchedDocIndent = docIndentReg.match(line)
-                
-                if not matchedDocIndent:
-                    # As soon as we find a non doc indent like line we stop
-                    break
-                    
-                elif matchedDocIndent.group(2) != "":
-                    # otherwise we look for content behind the indent to get the 
-                    # correct real indent (with spaces)
-                    outdentString = matchedDocIndent.group(1)
-                    break
-                
-            lineNo += 1
-
-        # Process outdenting to all lines (remove the outdentString from the start
-        # of the lines)
-        if outdentString != "":
-
-            lineNo = 0
-            outdentStringLen = len(outdentString)
-
-            for lineNo, line in enumerate(lines):
-                if len(line) <= outdentStringLen:
-                    lines[lineNo] = ""
-
-                else:
-                    if not line.startswith(outdentString):
-                        
-                        # Only warn for doc comments, otherwise it might just be code
-                        # commented out which is sometimes formatted pretty crazy when
-                        # commented out
-                        if self.variant == "doc":
-                            Console.warn(
-                                "Invalid indentation in doc comment at line %s in %s",
-                                startLineNo+lineNo, self.fileId)
-                        
-                    else:
-                        lines[lineNo] = line[outdentStringLen:]
-
-        # Merge final lines and remove leading and trailing new lines
-        return "\n".join(lines).strip("\n")
--- a/ThirdParty/Jasy/jasy/js/api/Text.py	Sat Jan 12 11:26:32 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-#
-
-from __future__ import unicode_literals
-
-import re
-import jasy.core.Console as Console
-
-__all__ = ["extractSummary"]
-
-# Used to filter first paragraph from HTML
-paragraphExtract = re.compile(r"^(.*?)(\. |\? |\! |$)")
-newlineMatcher = re.compile(r"\n")
-
-# Used to remove markup sequences after doc processing of comment text
-stripMarkup = re.compile(r"<.*?>")
-
-def extractSummary(text):
-    try:
-        text = stripMarkup.sub("", newlineMatcher.sub(" ", text))
-        matched = paragraphExtract.match(text)
-    except TypeError:
-        matched = None
-        
-    if matched:
-        summary = matched.group(1)
-        if summary is not None:
-            if not summary.endswith((".", "!", "?")):
-                summary = summary.strip() + "."
-            return summary
-            
-    else:
-        Console.warn("Unable to extract summary for: %s", text)
-    
-    return None
-    
--- a/ThirdParty/Jasy/jasy/js/parse/Node.py	Sat Jan 12 11:26:32 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,331 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-#
-
-#
-# License: MPL 1.1/GPL 2.0/LGPL 2.1
-# Authors: 
-#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004)
-#   - Sebastian Werner <info@sebastian-werner.net> (Refactoring Python) (2010)
-#
-
-from __future__ import unicode_literals
-
-import json
-import copy
-
-class Node(list):
-    
-    __slots__ = [
-        # core data
-        "line", "type", "tokenizer", "start", "end", "rel", "parent", 
-        
-        # dynamic added data by other modules
-        "comments", "scope", 
-        
-        # node type specific
-        "value", "expression", "body", "functionForm", "parenthesized",
-        "fileId", "params", "name", "readOnly", "initializer", "condition",
-        "isLoop", "isEach", "object", "assignOp", "iterator", "thenPart",
-        "exception", "elsePart", "setup", "postfix", "update", "tryBlock",
-        "block", "defaultIndex", "discriminant", "label", "statements",
-        "finallyBlock", "statement", "variables", "names", "guard", "for",
-        "tail", "expressionClosure"
-    ]
-    
-    
-    def __init__(self, tokenizer=None, type=None, args=[]):
-        list.__init__(self)
-        
-        self.start = 0
-        self.end = 0
-        self.line = None
-        
-        if tokenizer:
-            token = getattr(tokenizer, "token", None)
-            if token:
-                # We may define a custom type but use the same positioning as another token
-                # e.g. transform curlys in block nodes, etc.
-                self.type = type if type else getattr(token, "type", None)
-                self.line = token.line
-                
-                # Start & end are file positions for error handling.
-                self.start = token.start
-                self.end = token.end
-            
-            else:
-                self.type = type
-                self.line = tokenizer.line
-                self.start = None
-                self.end = None
-
-            self.tokenizer = tokenizer
-            
-        elif type:
-            self.type = type
-
-        for arg in args:
-            self.append(arg)
-            
-            
-    def getUnrelatedChildren(self):
-        """Collects all unrelated children"""
-        
-        collection = []
-        for child in self:
-            if not hasattr(child, "rel"):
-                collection.append(child)
-            
-        return collection
-        
-
-    def getChildrenLength(self, filter=True):
-        """Number of (per default unrelated) children"""
-        
-        count = 0
-        for child in self:
-            if not filter or not hasattr(child, "rel"):
-                count += 1
-        return count
-            
-    
-    def remove(self, kid):
-        """Removes the given kid"""
-        
-        if not kid in self:
-            raise Exception("Given node is no child!")
-        
-        if hasattr(kid, "rel"):
-            delattr(self, kid.rel)
-            del kid.rel
-            del kid.parent
-            
-        list.remove(self, kid)
-        
-        
-    def insert(self, index, kid):
-        """Inserts the given kid at the given index"""
-        
-        if index is None:
-            return self.append(kid)
-            
-        if hasattr(kid, "parent"):
-            kid.parent.remove(kid)
-            
-        kid.parent = self
-
-        return list.insert(self, index, kid)
-            
-
-    def append(self, kid, rel=None):
-        """Appends the given kid with an optional relation hint"""
-        
-        # kid can be null e.g. [1, , 2].
-        if kid:
-            if hasattr(kid, "parent"):
-                kid.parent.remove(kid)
-            
-            # Debug
-            if not isinstance(kid, Node):
-                raise Exception("Invalid kid: %s" % kid)
-            
-            if hasattr(kid, "tokenizer"):
-                if hasattr(kid, "start"):
-                    if not hasattr(self, "start") or \
-                       self.start == None or \
-                       kid.start < self.start:
-                        self.start = kid.start
-
-                if hasattr(kid, "end"):
-                    if not hasattr(self, "end") or \
-                       self.end == None or \
-                       self.end < kid.end:
-                        self.end = kid.end
-                
-            kid.parent = self
-            
-            # alias for function
-            if rel != None:
-                setattr(self, rel, kid)
-                setattr(kid, "rel", rel)
-
-        # Block None kids when they should be related
-        if not kid and rel:
-            return
-            
-        return list.append(self, kid)
-
-    
-    def replace(self, kid, repl):
-        """Replaces the given kid with a replacement kid"""
-        
-        if repl in self:
-            self.remove(repl)
-        
-        self[self.index(kid)] = repl
-        
-        if hasattr(kid, "rel"):
-            repl.rel = kid.rel
-            setattr(self, kid.rel, repl)
-            
-            # cleanup old kid
-            delattr(kid, "rel")
-            
-            
-        elif hasattr(repl, "rel"):
-            # delete old relation on new child
-            delattr(repl, "rel")
-
-        delattr(kid, "parent")
-        repl.parent = self
-        
-        return kid
-        
-
-    def toXml(self, format=True, indent=0, tab="  "):
-        """Converts the node to XML"""
-
-        lead = tab * indent if format else ""
-        innerLead = tab * (indent+1) if format else ""
-        lineBreak = "\n" if format else ""
-
-        relatedChildren = []
-        attrsCollection = []
-        
-        for name in self.__slots__:
-            # "type" is used as node name - no need to repeat it as an attribute
-            # "parent" is a relation to the parent node - for serialization we ignore these at the moment
-            # "rel" is used internally to keep the relation to the parent - used by nodes which need to keep track of specific children
-            # "start" and "end" are for debugging only
-            if hasattr(self, name) and name not in ("type", "parent", "comments", "rel", "start", "end") and name[0] != "_":
-                value = getattr(self, name)
-                if isinstance(value, Node):
-                    if hasattr(value, "rel"):
-                        relatedChildren.append(value)
-
-                elif type(value) in (bool, int, float, str, list, set, dict):
-                    if type(value) == bool:
-                        value = "true" if value else "false" 
-                    elif type(value) in (int, float):
-                        value = str(value)
-                    elif type(value) in (list, set, dict):
-                        if type(value) == dict:
-                            value = value.keys()
-                        if len(value) == 0:
-                            continue
-                        try:
-                            value = ",".join(value)
-                        except TypeError:
-                            raise Exception("Invalid attribute list child at: %s" % name)
-                            
-                    attrsCollection.append('%s=%s' % (name, json.dumps(value)))
-
-        attrs = (" " + " ".join(attrsCollection)) if len(attrsCollection) > 0 else ""
-        
-        comments = getattr(self, "comments", None)
-        scope = getattr(self, "scope", None)
-        
-        if len(self) == 0 and len(relatedChildren) == 0 and (not comments or len(comments) == 0) and not scope:
-            result = "%s<%s%s/>%s" % (lead, self.type, attrs, lineBreak)
-
-        else:
-            result = "%s<%s%s>%s" % (lead, self.type, attrs, lineBreak)
-            
-            if comments:
-                for comment in comments:
-                    result += '%s<comment context="%s" variant="%s">%s</comment>%s' % (innerLead, comment.context, comment.variant, comment.text, lineBreak)
-                    
-            if scope:
-                for statKey in scope:
-                    statValue = scope[statKey]
-                    if statValue != None and len(statValue) > 0:
-                        if type(statValue) is set:
-                            statValue = ",".join(statValue)
-                        elif type(statValue) is dict:
-                            statValue = ",".join(statValue.keys())
-                        
-                        result += '%s<stat name="%s">%s</stat>%s' % (innerLead, statKey, statValue, lineBreak)
-
-            for child in self:
-                if not child:
-                    result += "%s<none/>%s" % (innerLead, lineBreak)
-                elif not hasattr(child, "rel"):
-                    result += child.toXml(format, indent+1)
-                elif not child in relatedChildren:
-                    raise Exception("Oops, irritated by non related: %s in %s - child says it is related as %s" % (child.type, self.type, child.rel))
-
-            for child in relatedChildren:
-                result += "%s<%s>%s" % (innerLead, child.rel, lineBreak)
-                result += child.toXml(format, indent+2)
-                result += "%s</%s>%s" % (innerLead, child.rel, lineBreak)
-
-            result += "%s</%s>%s" % (lead, self.type, lineBreak)
-
-        return result
-        
-        
-    def __deepcopy__(self, memo):
-        """Used by deepcopy function to clone Node instances"""
-        
-        # Create copy
-        if hasattr(self, "tokenizer"):
-            result = Node(tokenizer=self.tokenizer)
-        else:
-            result = Node(type=self.type)
-        
-        # Copy children
-        for child in self:
-            if child is None:
-                list.append(result, None)
-            else:
-                # Using simple list appends for better performance
-                childCopy = copy.deepcopy(child, memo)
-                childCopy.parent = result
-                list.append(result, childCopy)
-        
-        # Sync attributes
-        # Note: "parent" attribute is handled by append() already
-        for name in self.__slots__:
-            if hasattr(self, name) and not name in ("parent", "tokenizer"):
-                value = getattr(self, name)
-                if value is None:
-                    pass
-                elif type(value) in (bool, int, float, str):
-                    setattr(result, name, value)
-                elif type(value) in (list, set, dict, Node):
-                    setattr(result, name, copy.deepcopy(value, memo))
-                # Scope can be assigned (will be re-created when needed for the copied node)
-                elif name == "scope":
-                    result.scope = self.scope
-
-        return result
-        
-        
-    def getSource(self):
-        """Returns the source code of the node"""
-
-        if not self.tokenizer:
-            raise Exception("Could not find source for node '%s'" % node.type)
-            
-        if getattr(self, "start", None) is not None:
-            if getattr(self, "end", None) is not None:
-                return self.tokenizer.source[self.start:self.end]
-            return self.tokenizer.source[self.start:]
-    
-        if getattr(self, "end", None) is not None:
-            return self.tokenizer.source[:self.end]
-    
-        return self.tokenizer.source[:]
-
-
-    # Map Python built-ins
-    __repr__ = toXml
-    __str__ = toXml
-    
-    
-    def __eq__(self, other):
-        return self is other
-
-    def __bool__(self): 
-        return True
--- a/ThirdParty/Jasy/jasy/js/parse/Parser.py	Sat Jan 12 11:26:32 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1448 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-#
-# License: MPL 1.1/GPL 2.0/LGPL 2.1
-# Authors:
-#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
-#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010-2012)
-#
-
-from __future__ import unicode_literals
-
-import jasy.js.tokenize.Tokenizer
-import jasy.js.parse.VanillaBuilder
-import jasy.js.tokenize.Lang
-
-__all__ = [ "parse", "parseExpression" ]
-
-def parseExpression(source, fileId=None, line=1, builder=None):
-    if builder == None:
-        builder = jasy.js.parse.VanillaBuilder.VanillaBuilder()
-
-    # Convert source into expression statement to be friendly to the Tokenizer
-    if not source.endswith(";"):
-        source = source + ";"
-
-    tokenizer = jasy.js.tokenize.Tokenizer.Tokenizer(source, fileId, line)
-    staticContext = StaticContext(False, builder)
-
-    return Expression(tokenizer, staticContext)
-
-
-
-def parse(source, fileId=None, line=1, builder=None):
-    if builder == None:
-        builder = jasy.js.parse.VanillaBuilder.VanillaBuilder()
-
-    tokenizer = jasy.js.tokenize.Tokenizer.Tokenizer(source, fileId, line)
-    staticContext = StaticContext(False, builder)
-    node = Script(tokenizer, staticContext)
-
-    # store fileId on top-level node
-    node.fileId = tokenizer.fileId
-
-    # add missing comments e.g. empty file with only a comment etc.
-    # if there is something non-attached by an inner node it is attached to
-    # the top level node, which is not correct, but might be better than
-    # just ignoring the comment after all.
-    if len(node) > 0:
-        builder.COMMENTS_add(node[-1], None, tokenizer.getComments())
-    else:
-        builder.COMMENTS_add(node, None, tokenizer.getComments())
-
-    if not tokenizer.done():
-        raise SyntaxError("Unexpected end of file", tokenizer)
-
-    return node
-
-
-
-class SyntaxError(Exception):
-    def __init__(self, message, tokenizer):
-        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (message, tokenizer.fileId, tokenizer.line))
-
-
-# Used as a status container during tree-building for every def body and the global body
-class StaticContext(object):
-    # inFunction is used to check if a return stm appears in a valid context.
-    def __init__(self, inFunction, builder):
-        # Whether this is inside a function, mostly True, only for top-level scope it's False
-        self.inFunction = inFunction
-
-        self.hasEmptyReturn = False
-        self.hasReturnWithValue = False
-        self.isGenerator = False
-        self.blockId = 0
-        self.builder = builder
-        self.statementStack = []
-
-        # Sets to store variable uses
-        # self.functions = set()
-        # self.variables = set()
-
-        # Status
-        # self.needsHoisting = False
-        self.bracketLevel = 0
-        self.curlyLevel = 0
-        self.parenLevel = 0
-        self.hookLevel = 0
-
-        # Configure strict ecmascript 3 mode
-        self.ecma3OnlyMode = False
-
-        # Status flag during parsing
-        self.inForLoopInit = False
-
-
-def Script(tokenizer, staticContext):
-    """Parses the toplevel and def bodies."""
-    node = Statements(tokenizer, staticContext)
-
-    # change type from "block" to "script" for script root
-    node.type = "script"
-
-    # copy over data from compiler context
-    # node.functions = staticContext.functions
-    # node.variables = staticContext.variables
-
-    return node
-
-
-def nest(tokenizer, staticContext, node, func, end=None):
-    """Statement stack and nested statement handler."""
-    staticContext.statementStack.append(node)
-    node = func(tokenizer, staticContext)
-    staticContext.statementStack.pop()
-    end and tokenizer.mustMatch(end)
-
-    return node
-
-
-def Statements(tokenizer, staticContext):
-    """Parses a list of Statements."""
-
-    builder = staticContext.builder
-    node = builder.BLOCK_build(tokenizer, staticContext.blockId)
-    staticContext.blockId += 1
-
-    builder.BLOCK_hoistLets(node)
-    staticContext.statementStack.append(node)
-
-    prevNode = None
-    while not tokenizer.done() and tokenizer.peek(True) != "right_curly":
-        comments = tokenizer.getComments()
-        childNode = Statement(tokenizer, staticContext)
-        builder.COMMENTS_add(childNode, prevNode, comments)
-        builder.BLOCK_addStatement(node, childNode)
-        prevNode = childNode
-
-    staticContext.statementStack.pop()
-    builder.BLOCK_finish(node)
-
-    # if getattr(node, "needsHoisting", False):
-    #     # TODO
-    #     raise Exception("Needs hoisting went true!!!")
-    #     builder.setHoists(node.id, node.variables)
-    #     # Propagate up to the function.
-    #     staticContext.needsHoisting = True
-
-    return node
-
-
-def Block(tokenizer, staticContext):
-    tokenizer.mustMatch("left_curly")
-    node = Statements(tokenizer, staticContext)
-    tokenizer.mustMatch("right_curly")
-
-    return node
-
-
-def Statement(tokenizer, staticContext):
-    """Parses a Statement."""
-
-    tokenType = tokenizer.get(True)
-    builder = staticContext.builder
-
-    # Cases for statements ending in a right curly return early, avoiding the
-    # common semicolon insertion magic after this switch.
-
-    if tokenType == "function":
-        # "declared_form" extends functions of staticContext, "statement_form" doesn'tokenizer.
-        if len(staticContext.statementStack) > 1:
-            kind = "statement_form"
-        else:
-            kind = "declared_form"
-
-        return FunctionDefinition(tokenizer, staticContext, True, kind)
-
-
-    elif tokenType == "left_curly":
-        node = Statements(tokenizer, staticContext)
-        tokenizer.mustMatch("right_curly")
-
-        return node
-
-
-    elif tokenType == "if":
-        node = builder.IF_build(tokenizer)
-        builder.IF_setCondition(node, ParenExpression(tokenizer, staticContext))
-        staticContext.statementStack.append(node)
-        builder.IF_setThenPart(node, Statement(tokenizer, staticContext))
-
-        if tokenizer.match("else"):
-            comments = tokenizer.getComments()
-            elsePart = Statement(tokenizer, staticContext)
-            builder.COMMENTS_add(elsePart, node, comments)
-            builder.IF_setElsePart(node, elsePart)
-
-        staticContext.statementStack.pop()
-        builder.IF_finish(node)
-
-        return node
-
-
-    elif tokenType == "switch":
-        # This allows CASEs after a "default", which is in the standard.
-        node = builder.SWITCH_build(tokenizer)
-        builder.SWITCH_setDiscriminant(node, ParenExpression(tokenizer, staticContext))
-        staticContext.statementStack.append(node)
-
-        tokenizer.mustMatch("left_curly")
-        tokenType = tokenizer.get()
-
-        while tokenType != "right_curly":
-            if tokenType == "default":
-                if node.defaultIndex >= 0:
-                    raise SyntaxError("More than one switch default", tokenizer)
-
-                childNode = builder.DEFAULT_build(tokenizer)
-                builder.SWITCH_setDefaultIndex(node, len(node)-1)
-                tokenizer.mustMatch("colon")
-                builder.DEFAULT_initializeStatements(childNode, tokenizer)
-
-                while True:
-                    tokenType=tokenizer.peek(True)
-                    if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
-                        break
-                    builder.DEFAULT_addStatement(childNode, Statement(tokenizer, staticContext))
-
-                builder.DEFAULT_finish(childNode)
-
-            elif tokenType == "case":
-                childNode = builder.CASE_build(tokenizer)
-                builder.CASE_setLabel(childNode, Expression(tokenizer, staticContext))
-                tokenizer.mustMatch("colon")
-                builder.CASE_initializeStatements(childNode, tokenizer)
-
-                while True:
-                    tokenType=tokenizer.peek(True)
-                    if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
-                        break
-                    builder.CASE_addStatement(childNode, Statement(tokenizer, staticContext))
-
-                builder.CASE_finish(childNode)
-
-            else:
-                raise SyntaxError("Invalid switch case", tokenizer)
-
-            builder.SWITCH_addCase(node, childNode)
-            tokenType = tokenizer.get()
-
-        staticContext.statementStack.pop()
-        builder.SWITCH_finish(node)
-
-        return node
-
-
-    elif tokenType == "for":
-        node = builder.FOR_build(tokenizer)
-        forBlock = None
-
-        if tokenizer.match("identifier") and tokenizer.token.value == "each":
-            builder.FOR_rebuildForEach(node)
-
-        tokenizer.mustMatch("left_paren")
-        tokenType = tokenizer.peek()
-        childNode = None
-
-        if tokenType != "semicolon":
-            staticContext.inForLoopInit = True
-
-            if tokenType == "var" or tokenType == "const":
-                tokenizer.get()
-                childNode = Variables(tokenizer, staticContext)
-
-            elif tokenType == "let":
-                tokenizer.get()
-
-                if tokenizer.peek() == "left_paren":
-                    childNode = LetBlock(tokenizer, staticContext, False)
-
-                else:
-                    # Let in for head, we need to add an implicit block
-                    # around the rest of the for.
-                    forBlock = builder.BLOCK_build(tokenizer, staticContext.blockId)
-                    staticContext.blockId += 1
-                    staticContext.statementStack.append(forBlock)
-                    childNode = Variables(tokenizer, staticContext, forBlock)
-
-            else:
-                childNode = Expression(tokenizer, staticContext)
-
-            staticContext.inForLoopInit = False
-
-        if childNode and tokenizer.match("in"):
-            builder.FOR_rebuildForIn(node)
-            builder.FOR_setObject(node, Expression(tokenizer, staticContext), forBlock)
-
-            if childNode.type == "var" or childNode.type == "let":
-                if len(childNode) != 1:
-                    raise SyntaxError("Invalid for..in left-hand side", tokenizer)
-
-                builder.FOR_setIterator(node, childNode, forBlock)
-
-            else:
-                builder.FOR_setIterator(node, childNode, forBlock)
-
-        else:
-            builder.FOR_setSetup(node, childNode)
-            tokenizer.mustMatch("semicolon")
-
-            if node.isEach:
-                raise SyntaxError("Invalid for each..in loop", tokenizer)
-
-            if tokenizer.peek() == "semicolon":
-                builder.FOR_setCondition(node, None)
-            else:
-                builder.FOR_setCondition(node, Expression(tokenizer, staticContext))
-
-            tokenizer.mustMatch("semicolon")
-
-            if tokenizer.peek() == "right_paren":
-                builder.FOR_setUpdate(node, None)
-            else:
-                builder.FOR_setUpdate(node, Expression(tokenizer, staticContext))
-
-        tokenizer.mustMatch("right_paren")
-        builder.FOR_setBody(node, nest(tokenizer, staticContext, node, Statement))
-
-        if forBlock:
-            builder.BLOCK_finish(forBlock)
-            staticContext.statementStack.pop()
-
-        builder.FOR_finish(node)
-        return node
-
-
-    elif tokenType == "while":
-        node = builder.WHILE_build(tokenizer)
-
-        builder.WHILE_setCondition(node, ParenExpression(tokenizer, staticContext))
-        builder.WHILE_setBody(node, nest(tokenizer, staticContext, node, Statement))
-        builder.WHILE_finish(node)
-
-        return node
-
-
-    elif tokenType == "do":
-        node = builder.DO_build(tokenizer)
-
-        builder.DO_setBody(node, nest(tokenizer, staticContext, node, Statement, "while"))
-        builder.DO_setCondition(node, ParenExpression(tokenizer, staticContext))
-        builder.DO_finish(node)
-
-        if not staticContext.ecma3OnlyMode:
-            # <script language="JavaScript"> (without version hints) may need
-            # automatic semicolon insertion without a newline after do-while.
-            # See http://bugzilla.mozilla.org/show_bug.cgi?id=238945.
-            tokenizer.match("semicolon")
-            return node
-
-        # NO RETURN
-
-
-    elif tokenType == "break" or tokenType == "continue":
-        if tokenType == "break":
-            node = builder.BREAK_build(tokenizer)
-        else:
-            node = builder.CONTINUE_build(tokenizer)
-
-        if tokenizer.peekOnSameLine() == "identifier":
-            tokenizer.get()
-
-            if tokenType == "break":
-                builder.BREAK_setLabel(node, tokenizer.token.value)
-            else:
-                builder.CONTINUE_setLabel(node, tokenizer.token.value)
-
-        statementStack = staticContext.statementStack
-        i = len(statementStack)
-        label = node.label if hasattr(node, "label") else None
-
-        if label:
-            while True:
-                i -= 1
-                if i < 0:
-                    raise SyntaxError("Label not found", tokenizer)
-                if getattr(statementStack[i], "label", None) == label:
-                    break
-
-            #
-            # Both break and continue to label need to be handled specially
-            # within a labeled loop, so that they target that loop. If not in
-            # a loop, then break targets its labeled statement. Labels can be
-            # nested so we skip all labels immediately enclosing the nearest
-            # non-label statement.
-            #
-            while i < len(statementStack) - 1 and statementStack[i+1].type == "label":
-                i += 1
-
-            if i < len(statementStack) - 1 and getattr(statementStack[i+1], "isLoop", False):
-                i += 1
-            elif tokenType == "continue":
-                raise SyntaxError("Invalid continue", tokenizer)
-
-        else:
-            while True:
-                i -= 1
-                if i < 0:
-                    if tokenType == "break":
-                        raise SyntaxError("Invalid break", tokenizer)
-                    else:
-                        raise SyntaxError("Invalid continue", tokenizer)
-
-                if getattr(statementStack[i], "isLoop", False) or (tokenType == "break" and statementStack[i].type == "switch"):
-                    break
-
-        if tokenType == "break":
-            builder.BREAK_finish(node)
-        else:
-            builder.CONTINUE_finish(node)
-
-        # NO RETURN
-
-
-    elif tokenType == "try":
-        node = builder.TRY_build(tokenizer)
-        builder.TRY_setTryBlock(node, Block(tokenizer, staticContext))
-
-        while tokenizer.match("catch"):
-            childNode = builder.CATCH_build(tokenizer)
-            tokenizer.mustMatch("left_paren")
-            nextTokenType = tokenizer.get()
-
-            if nextTokenType == "left_bracket" or nextTokenType == "left_curly":
-                # Destructured catch identifiers.
-                tokenizer.unget()
-                exception = DestructuringExpression(tokenizer, staticContext, True)
-
-            elif nextTokenType == "identifier":
-                exception = builder.CATCH_wrapException(tokenizer)
-
-            else:
-                raise SyntaxError("Missing identifier in catch", tokenizer)
-
-            builder.CATCH_setException(childNode, exception)
-
-            if tokenizer.match("if"):
-                if staticContext.ecma3OnlyMode:
-                    raise SyntaxError("Illegal catch guard", tokenizer)
-
-                if node.getChildrenLength() > 0 and not node.getUnrelatedChildren()[0].guard:
-                    raise SyntaxError("Guarded catch after unguarded", tokenizer)
-
-                builder.CATCH_setGuard(childNode, Expression(tokenizer, staticContext))
-
-            else:
-                builder.CATCH_setGuard(childNode, None)
-
-            tokenizer.mustMatch("right_paren")
-
-            builder.CATCH_setBlock(childNode, Block(tokenizer, staticContext))
-            builder.CATCH_finish(childNode)
-
-            builder.TRY_addCatch(node, childNode)
-
-        builder.TRY_finishCatches(node)
-
-        if tokenizer.match("finally"):
-            builder.TRY_setFinallyBlock(node, Block(tokenizer, staticContext))
-
-        if node.getChildrenLength() == 0 and not hasattr(node, "finallyBlock"):
-            raise SyntaxError("Invalid try statement", tokenizer)
-
-        builder.TRY_finish(node)
-        return node
-
-
-    elif tokenType == "catch" or tokenType == "finally":
-        raise SyntaxError(tokens[tokenType] + " without preceding try", tokenizer)
-
-
-    elif tokenType == "throw":
-        node = builder.THROW_build(tokenizer)
-
-        builder.THROW_setException(node, Expression(tokenizer, staticContext))
-        builder.THROW_finish(node)
-
-        # NO RETURN
-
-
-    elif tokenType == "return":
-        node = returnOrYield(tokenizer, staticContext)
-
-        # NO RETURN
-
-
-    elif tokenType == "with":
-        node = builder.WITH_build(tokenizer)
-
-        builder.WITH_setObject(node, ParenExpression(tokenizer, staticContext))
-        builder.WITH_setBody(node, nest(tokenizer, staticContext, node, Statement))
-        builder.WITH_finish(node)
-
-        return node
-
-
-    elif tokenType == "var" or tokenType == "const":
-        node = Variables(tokenizer, staticContext)
-
-        # NO RETURN
-
-
-    elif tokenType == "let":
-        if tokenizer.peek() == "left_paren":
-            node = LetBlock(tokenizer, staticContext, True)
-        else:
-            node = Variables(tokenizer, staticContext)
-
-        # NO RETURN
-
-
-    elif tokenType == "debugger":
-        node = builder.DEBUGGER_build(tokenizer)
-
-        # NO RETURN
-
-
-    elif tokenType == "newline" or tokenType == "semicolon":
-        node = builder.SEMICOLON_build(tokenizer)
-
-        builder.SEMICOLON_setExpression(node, None)
-        builder.SEMICOLON_finish(tokenizer)
-
-        return node
-
-
-    else:
-        if tokenType == "identifier":
-            tokenType = tokenizer.peek()
-
-            # Labeled statement.
-            if tokenType == "colon":
-                label = tokenizer.token.value
-                statementStack = staticContext.statementStack
-
-                i = len(statementStack)-1
-                while i >= 0:
-                    if getattr(statementStack[i], "label", None) == label:
-                        raise SyntaxError("Duplicate label", tokenizer)
-
-                    i -= 1
-
-                tokenizer.get()
-                node = builder.LABEL_build(tokenizer)
-
-                builder.LABEL_setLabel(node, label)
-                builder.LABEL_setStatement(node, nest(tokenizer, staticContext, node, Statement))
-                builder.LABEL_finish(node)
-
-                return node
-
-        # Expression statement.
-        # We unget the current token to parse the expression as a whole.
-        node = builder.SEMICOLON_build(tokenizer)
-        tokenizer.unget()
-        builder.SEMICOLON_setExpression(node, Expression(tokenizer, staticContext))
-        node.end = node.expression.end
-        builder.SEMICOLON_finish(node)
-
-        # NO RETURN
-
-
-    MagicalSemicolon(tokenizer)
-    return node
-
-
-
-def MagicalSemicolon(tokenizer):
-    if tokenizer.line == tokenizer.token.line:
-        tokenType = tokenizer.peekOnSameLine()
-
-        if tokenType != "end" and tokenType != "newline" and tokenType != "semicolon" and tokenType != "right_curly":
-            raise SyntaxError("Missing ; before statement", tokenizer)
-
-    tokenizer.match("semicolon")
-
-
-
-def returnOrYield(tokenizer, staticContext):
-    builder = staticContext.builder
-    tokenType = tokenizer.token.type
-
-    if tokenType == "return":
-        if not staticContext.inFunction:
-            raise SyntaxError("Return not in function", tokenizer)
-
-        node = builder.RETURN_build(tokenizer)
-
-    else:
-        if not staticContext.inFunction:
-            raise SyntaxError("Yield not in function", tokenizer)
-
-        staticContext.isGenerator = True
-        node = builder.YIELD_build(tokenizer)
-
-    nextTokenType = tokenizer.peek(True)
-    if nextTokenType != "end" and nextTokenType != "newline" and nextTokenType != "semicolon" and nextTokenType != "right_curly" and (tokenType != "yield" or (nextTokenType != tokenType and nextTokenType != "right_bracket" and nextTokenType != "right_paren" and nextTokenType != "colon" and nextTokenType != "comma")):
-        if tokenType == "return":
-            builder.RETURN_setValue(node, Expression(tokenizer, staticContext))
-            staticContext.hasReturnWithValue = True
-        else:
-            builder.YIELD_setValue(node, AssignExpression(tokenizer, staticContext))
-
-    elif tokenType == "return":
-        staticContext.hasEmptyReturn = True
-
-    # Disallow return v; in generator.
-    if staticContext.hasReturnWithValue and staticContext.isGenerator:
-        raise SyntaxError("Generator returns a value", tokenizer)
-
-    if tokenType == "return":
-        builder.RETURN_finish(node)
-    else:
-        builder.YIELD_finish(node)
-
-    return node
-
-
-
-def FunctionDefinition(tokenizer, staticContext, requireName, functionForm):
-    builder = staticContext.builder
-    functionNode = builder.FUNCTION_build(tokenizer)
-
-    if tokenizer.match("identifier"):
-        builder.FUNCTION_setName(functionNode, tokenizer.token.value)
-    elif requireName:
-        raise SyntaxError("Missing def identifier", tokenizer)
-
-    tokenizer.mustMatch("left_paren")
-
-    if not tokenizer.match("right_paren"):
-        builder.FUNCTION_initParams(functionNode, tokenizer)
-        prevParamNode = None
-        while True:
-            tokenType = tokenizer.get()
-            if tokenType == "left_bracket" or tokenType == "left_curly":
-                # Destructured formal parameters.
-                tokenizer.unget()
-                paramNode = DestructuringExpression(tokenizer, staticContext)
-
-            elif tokenType == "identifier":
-                paramNode = builder.FUNCTION_wrapParam(tokenizer)
-
-            else:
-                raise SyntaxError("Missing formal parameter", tokenizer)
-
-            builder.FUNCTION_addParam(functionNode, tokenizer, paramNode)
-            builder.COMMENTS_add(paramNode, prevParamNode, tokenizer.getComments())
-
-            if not tokenizer.match("comma"):
-                break
-
-            prevParamNode = paramNode
-
-        tokenizer.mustMatch("right_paren")
-
-    # Do we have an expression closure or a normal body?
-    tokenType = tokenizer.get()
-    if tokenType != "left_curly":
-        builder.FUNCTION_setExpressionClosure(functionNode, True)
-        tokenizer.unget()
-
-    childContext = StaticContext(True, builder)
-
-    if staticContext.inFunction:
-        # Inner functions don't reset block numbering, only functions at
-        # the top level of the program do.
-        childContext.blockId = staticContext.blockId
-
-    if tokenType != "left_curly":
-        builder.FUNCTION_setBody(functionNode, AssignExpression(tokenizer, staticContext))
-        if staticContext.isGenerator:
-            raise SyntaxError("Generator returns a value", tokenizer)
-
-    else:
-        builder.FUNCTION_hoistVars(childContext.blockId)
-        builder.FUNCTION_setBody(functionNode, Script(tokenizer, childContext))
-
-    if tokenType == "left_curly":
-        tokenizer.mustMatch("right_curly")
-
-    functionNode.end = tokenizer.token.end
-    functionNode.functionForm = functionForm
-
-    builder.COMMENTS_add(functionNode.body, functionNode.body, tokenizer.getComments())
-    builder.FUNCTION_finish(functionNode, staticContext)
-
-    return functionNode
-
-
-
-def Variables(tokenizer, staticContext, letBlock=None):
-    """Parses a comma-separated list of var declarations (and maybe initializations)."""
-
-    builder = staticContext.builder
-    if tokenizer.token.type == "var":
-        build = builder.VAR_build
-        addDecl = builder.VAR_addDecl
-        finish = builder.VAR_finish
-        childContext = staticContext
-
-    elif tokenizer.token.type == "const":
-        build = builder.CONST_build
-        addDecl = builder.CONST_addDecl
-        finish = builder.CONST_finish
-        childContext = staticContext
-
-    elif tokenizer.token.type == "let" or tokenizer.token.type == "left_paren":
-        build = builder.LET_build
-        addDecl = builder.LET_addDecl
-        finish = builder.LET_finish
-
-        if not letBlock:
-            statementStack = staticContext.statementStack
-            i = len(statementStack) - 1
-
-            # a BLOCK *must* be found.
-            while statementStack[i].type != "block":
-                i -= 1
-
-            # Lets at the def toplevel are just vars, at least in SpiderMonkey.
-            if i == 0:
-                build = builder.VAR_build
-                addDecl = builder.VAR_addDecl
-                finish = builder.VAR_finish
-                childContext = staticContext
-
-            else:
-                childContext = statementStack[i]
-
-        else:
-            childContext = letBlock
-
-    node = build(tokenizer)
-
-    while True:
-        tokenType = tokenizer.get()
-
-        # Done in Python port!
-        # FIXME Should have a special DECLARATION node instead of overloading
-        # IDENTIFIER to mean both identifier declarations and destructured
-        # declarations.
-        childNode = builder.DECL_build(tokenizer)
-
-        if tokenType == "left_bracket" or tokenType == "left_curly":
-            # Pass in childContext if we need to add each pattern matched into
-            # its variables, else pass in staticContext.
-            # Need to unget to parse the full destructured expression.
-            tokenizer.unget()
-            builder.DECL_setNames(childNode, DestructuringExpression(tokenizer, staticContext, True, childContext))
-
-            if staticContext.inForLoopInit and tokenizer.peek() == "in":
-                addDecl(node, childNode, childContext)
-                if tokenizer.match("comma"):
-                    continue
-                else:
-                    break
-
-            tokenizer.mustMatch("assign")
-            if tokenizer.token.assignOp:
-                raise SyntaxError("Invalid variable initialization", tokenizer)
-
-            # Parse the init as a normal assignment.
-            builder.DECL_setInitializer(childNode, AssignExpression(tokenizer, staticContext))
-            builder.DECL_finish(childNode)
-            addDecl(node, childNode, childContext)
-
-            # Copy over names for variable list
-            # for nameNode in childNode.names:
-            #    childContext.variables.add(nameNode.value)
-
-            if tokenizer.match("comma"):
-                continue
-            else:
-                break
-
-        if tokenType != "identifier":
-            raise SyntaxError("Missing variable name", tokenizer)
-
-        builder.DECL_setName(childNode, tokenizer.token.value)
-        builder.DECL_setReadOnly(childNode, node.type == "const")
-        addDecl(node, childNode, childContext)
-
-        if tokenizer.match("assign"):
-            if tokenizer.token.assignOp:
-                raise SyntaxError("Invalid variable initialization", tokenizer)
-
-            initializerNode = AssignExpression(tokenizer, staticContext)
-            builder.DECL_setInitializer(childNode, initializerNode)
-
-        builder.DECL_finish(childNode)
-
-        # If we directly use the node in "let" constructs
-        # if not hasattr(childContext, "variables"):
-        #    childContext.variables = set()
-
-        # childContext.variables.add(childNode.name)
-
-        if not tokenizer.match("comma"):
-            break
-
-    finish(node)
-    return node
-
-
-
-def LetBlock(tokenizer, staticContext, isStatement):
-    """Does not handle let inside of for loop init."""
-    builder = staticContext.builder
-
-    # tokenizer.token.type must be "let"
-    node = builder.LETBLOCK_build(tokenizer)
-    tokenizer.mustMatch("left_paren")
-    builder.LETBLOCK_setVariables(node, Variables(tokenizer, staticContext, node))
-    tokenizer.mustMatch("right_paren")
-
-    if isStatement and tokenizer.peek() != "left_curly":
-        # If this is really an expression in let statement guise, then we
-        # need to wrap the "let_block" node in a "semicolon" node so that we pop
-        # the return value of the expression.
-        childNode = builder.SEMICOLON_build(tokenizer)
-        builder.SEMICOLON_setExpression(childNode, node)
-        builder.SEMICOLON_finish(childNode)
-        isStatement = False
-
-    if isStatement:
-        childNode = Block(tokenizer, staticContext)
-        builder.LETBLOCK_setBlock(node, childNode)
-
-    else:
-        childNode = AssignExpression(tokenizer, staticContext)
-        builder.LETBLOCK_setExpression(node, childNode)
-
-    builder.LETBLOCK_finish(node)
-    return node
-
-
-def checkDestructuring(tokenizer, staticContext, node, simpleNamesOnly=None, data=None):
-    if node.type == "array_comp":
-        raise SyntaxError("Invalid array comprehension left-hand side", tokenizer)
-
-    if node.type != "array_init" and node.type != "object_init":
-        return
-
-    builder = staticContext.builder
-
-    for child in node:
-        if child == None:
-            continue
-
-        if child.type == "property_init":
-            lhs = child[0]
-            rhs = child[1]
-        else:
-            lhs = None
-            rhs = None
-
-
-        if rhs and (rhs.type == "array_init" or rhs.type == "object_init"):
-            checkDestructuring(tokenizer, staticContext, rhs, simpleNamesOnly, data)
-
-        if lhs and simpleNamesOnly:
-            # In declarations, lhs must be simple names
-            if lhs.type != "identifier":
-                raise SyntaxError("Missing name in pattern", tokenizer)
-
-            elif data:
-                childNode = builder.DECL_build(tokenizer)
-                builder.DECL_setName(childNode, lhs.value)
-
-                # Don't need to set initializer because it's just for
-                # hoisting anyways.
-                builder.DECL_finish(childNode)
-
-                # Each pattern needs to be added to variables.
-                # data.variables.add(childNode.name)
-
-
-# JavaScript 1.7
-def DestructuringExpression(tokenizer, staticContext, simpleNamesOnly=None, data=None):
-    node = PrimaryExpression(tokenizer, staticContext)
-    checkDestructuring(tokenizer, staticContext, node, simpleNamesOnly, data)
-
-    return node
-
-
-# JavsScript 1.7
-def GeneratorExpression(tokenizer, staticContext, expression):
-    builder = staticContext.builder
-    node = builder.GENERATOR_build(tokenizer)
-
-    builder.GENERATOR_setExpression(node, expression)
-    builder.GENERATOR_setTail(node, comprehensionTail(tokenizer, staticContext))
-    builder.GENERATOR_finish(node)
-
-    return node
-
-
-# JavaScript 1.7 Comprehensions Tails (Generators / Arrays)
-def comprehensionTail(tokenizer, staticContext):
-    builder = staticContext.builder
-
-    # tokenizer.token.type must be "for"
-    body = builder.COMPTAIL_build(tokenizer)
-
-    while True:
-        node = builder.FOR_build(tokenizer)
-
-        # Comprehension tails are always for..in loops.
-        builder.FOR_rebuildForIn(node)
-        if tokenizer.match("identifier"):
-            # But sometimes they're for each..in.
-            if tokenizer.token.value == "each":
-                builder.FOR_rebuildForEach(node)
-            else:
-                tokenizer.unget()
-
-        tokenizer.mustMatch("left_paren")
-
-        tokenType = tokenizer.get()
-        if tokenType == "left_bracket" or tokenType == "left_curly":
-            tokenizer.unget()
-            # Destructured left side of for in comprehension tails.
-            builder.FOR_setIterator(node, DestructuringExpression(tokenizer, staticContext))
-
-        elif tokenType == "identifier":
-            # Removed variable/declaration substructure in Python port.
-            # Variable declarations are not allowed here. So why process them in such a way?
-
-            # declaration = builder.DECL_build(tokenizer)
-            # builder.DECL_setName(declaration, tokenizer.token.value)
-            # builder.DECL_finish(declaration)
-            # childNode = builder.VAR_build(tokenizer)
-            # builder.VAR_addDecl(childNode, declaration)
-            # builder.VAR_finish(childNode)
-            # builder.FOR_setIterator(node, declaration)
-
-            # Don't add to variables since the semantics of comprehensions is
-            # such that the variables are in their own def when desugared.
-
-            identifier = builder.PRIMARY_build(tokenizer, "identifier")
-            builder.FOR_setIterator(node, identifier)
-
-        else:
-            raise SyntaxError("Missing identifier", tokenizer)
-
-        tokenizer.mustMatch("in")
-        builder.FOR_setObject(node, Expression(tokenizer, staticContext))
-        tokenizer.mustMatch("right_paren")
-        builder.COMPTAIL_addFor(body, node)
-
-        if not tokenizer.match("for"):
-            break
-
-    # Optional guard.
-    if tokenizer.match("if"):
-        builder.COMPTAIL_setGuard(body, ParenExpression(tokenizer, staticContext))
-
-    builder.COMPTAIL_finish(body)
-
-    return body
-
-
-def ParenExpression(tokenizer, staticContext):
-    tokenizer.mustMatch("left_paren")
-
-    # Always accept the 'in' operator in a parenthesized expression,
-    # where it's unambiguous, even if we might be parsing the init of a
-    # for statement.
-    oldLoopInit = staticContext.inForLoopInit
-    staticContext.inForLoopInit = False
-    node = Expression(tokenizer, staticContext)
-    staticContext.inForLoopInit = oldLoopInit
-
-    err = "expression must be parenthesized"
-    if tokenizer.match("for"):
-        if node.type == "yield" and not node.parenthesized:
-            raise SyntaxError("Yield " + err, tokenizer)
-
-        if node.type == "comma" and not node.parenthesized:
-            raise SyntaxError("Generator " + err, tokenizer)
-
-        node = GeneratorExpression(tokenizer, staticContext, node)
-
-    tokenizer.mustMatch("right_paren")
-
-    return node
-
-
-def Expression(tokenizer, staticContext):
-    """Top-down expression parser matched against SpiderMonkey."""
-    builder = staticContext.builder
-    node = AssignExpression(tokenizer, staticContext)
-
-    if tokenizer.match("comma"):
-        childNode = builder.COMMA_build(tokenizer)
-        builder.COMMA_addOperand(childNode, node)
-        node = childNode
-        while True:
-            childNode = node[len(node)-1]
-            if childNode.type == "yield" and not childNode.parenthesized:
-                raise SyntaxError("Yield expression must be parenthesized", tokenizer)
-            builder.COMMA_addOperand(node, AssignExpression(tokenizer, staticContext))
-
-            if not tokenizer.match("comma"):
-                break
-
-        builder.COMMA_finish(node)
-
-    return node
-
-
-def AssignExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-
-    # Have to treat yield like an operand because it could be the leftmost
-    # operand of the expression.
-    if tokenizer.match("yield", True):
-        return returnOrYield(tokenizer, staticContext)
-
-    comments = tokenizer.getComments()
-    node = builder.ASSIGN_build(tokenizer)
-    lhs = ConditionalExpression(tokenizer, staticContext)
-    builder.COMMENTS_add(lhs, None, comments)
-
-    if not tokenizer.match("assign"):
-        builder.ASSIGN_finish(node)
-        return lhs
-
-    if lhs.type == "object_init" or lhs.type == "array_init":
-        checkDestructuring(tokenizer, staticContext, lhs)
-    elif lhs.type == "identifier" or lhs.type == "dot" or lhs.type == "index" or lhs.type == "call":
-        pass
-    else:
-        raise SyntaxError("Bad left-hand side of assignment", tokenizer)
-
-    builder.ASSIGN_setAssignOp(node, tokenizer.token.assignOp)
-    builder.ASSIGN_addOperand(node, lhs)
-    builder.ASSIGN_addOperand(node, AssignExpression(tokenizer, staticContext))
-    builder.ASSIGN_finish(node)
-
-    return node
-
-
-def ConditionalExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = OrExpression(tokenizer, staticContext)
-
-    if tokenizer.match("hook"):
-        childNode = node
-        node = builder.HOOK_build(tokenizer)
-        builder.HOOK_setCondition(node, childNode)
-
-        # Always accept the 'in' operator in the middle clause of a ternary,
-        # where it's unambiguous, even if we might be parsing the init of a
-        # for statement.
-        oldLoopInit = staticContext.inForLoopInit
-        staticContext.inForLoopInit = False
-        builder.HOOK_setThenPart(node, AssignExpression(tokenizer, staticContext))
-        staticContext.inForLoopInit = oldLoopInit
-
-        if not tokenizer.match("colon"):
-            raise SyntaxError("Missing : after ?", tokenizer)
-
-        builder.HOOK_setElsePart(node, AssignExpression(tokenizer, staticContext))
-        builder.HOOK_finish(node)
-
-    return node
-
-
-def OrExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = AndExpression(tokenizer, staticContext)
-
-    while tokenizer.match("or"):
-        childNode = builder.OR_build(tokenizer)
-        builder.OR_addOperand(childNode, node)
-        builder.OR_addOperand(childNode, AndExpression(tokenizer, staticContext))
-        builder.OR_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def AndExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = BitwiseOrExpression(tokenizer, staticContext)
-
-    while tokenizer.match("and"):
-        childNode = builder.AND_build(tokenizer)
-        builder.AND_addOperand(childNode, node)
-        builder.AND_addOperand(childNode, BitwiseOrExpression(tokenizer, staticContext))
-        builder.AND_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def BitwiseOrExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = BitwiseXorExpression(tokenizer, staticContext)
-
-    while tokenizer.match("bitwise_or"):
-        childNode = builder.BITWISEOR_build(tokenizer)
-        builder.BITWISEOR_addOperand(childNode, node)
-        builder.BITWISEOR_addOperand(childNode, BitwiseXorExpression(tokenizer, staticContext))
-        builder.BITWISEOR_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def BitwiseXorExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = BitwiseAndExpression(tokenizer, staticContext)
-
-    while tokenizer.match("bitwise_xor"):
-        childNode = builder.BITWISEXOR_build(tokenizer)
-        builder.BITWISEXOR_addOperand(childNode, node)
-        builder.BITWISEXOR_addOperand(childNode, BitwiseAndExpression(tokenizer, staticContext))
-        builder.BITWISEXOR_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def BitwiseAndExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = EqualityExpression(tokenizer, staticContext)
-
-    while tokenizer.match("bitwise_and"):
-        childNode = builder.BITWISEAND_build(tokenizer)
-        builder.BITWISEAND_addOperand(childNode, node)
-        builder.BITWISEAND_addOperand(childNode, EqualityExpression(tokenizer, staticContext))
-        builder.BITWISEAND_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def EqualityExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = RelationalExpression(tokenizer, staticContext)
-
-    while tokenizer.match("eq") or tokenizer.match("ne") or tokenizer.match("strict_eq") or tokenizer.match("strict_ne"):
-        childNode = builder.EQUALITY_build(tokenizer)
-        builder.EQUALITY_addOperand(childNode, node)
-        builder.EQUALITY_addOperand(childNode, RelationalExpression(tokenizer, staticContext))
-        builder.EQUALITY_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def RelationalExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    oldLoopInit = staticContext.inForLoopInit
-
-    # Uses of the in operator in shiftExprs are always unambiguous,
-    # so unset the flag that prohibits recognizing it.
-    staticContext.inForLoopInit = False
-    node = ShiftExpression(tokenizer, staticContext)
-
-    while tokenizer.match("lt") or tokenizer.match("le") or tokenizer.match("ge") or tokenizer.match("gt") or (oldLoopInit == False and tokenizer.match("in")) or tokenizer.match("instanceof"):
-        childNode = builder.RELATIONAL_build(tokenizer)
-        builder.RELATIONAL_addOperand(childNode, node)
-        builder.RELATIONAL_addOperand(childNode, ShiftExpression(tokenizer, staticContext))
-        builder.RELATIONAL_finish(childNode)
-        node = childNode
-
-    staticContext.inForLoopInit = oldLoopInit
-
-    return node
-
-
-def ShiftExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = AddExpression(tokenizer, staticContext)
-
-    while tokenizer.match("lsh") or tokenizer.match("rsh") or tokenizer.match("ursh"):
-        childNode = builder.SHIFT_build(tokenizer)
-        builder.SHIFT_addOperand(childNode, node)
-        builder.SHIFT_addOperand(childNode, AddExpression(tokenizer, staticContext))
-        builder.SHIFT_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def AddExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = MultiplyExpression(tokenizer, staticContext)
-
-    while tokenizer.match("plus") or tokenizer.match("minus"):
-        childNode = builder.ADD_build(tokenizer)
-        builder.ADD_addOperand(childNode, node)
-        builder.ADD_addOperand(childNode, MultiplyExpression(tokenizer, staticContext))
-        builder.ADD_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def MultiplyExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = UnaryExpression(tokenizer, staticContext)
-
-    while tokenizer.match("mul") or tokenizer.match("div") or tokenizer.match("mod"):
-        childNode = builder.MULTIPLY_build(tokenizer)
-        builder.MULTIPLY_addOperand(childNode, node)
-        builder.MULTIPLY_addOperand(childNode, UnaryExpression(tokenizer, staticContext))
-        builder.MULTIPLY_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def UnaryExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    tokenType = tokenizer.get(True)
-
-    if tokenType in ["delete", "void", "typeof", "not", "bitwise_not", "plus", "minus"]:
-        node = builder.UNARY_build(tokenizer)
-        builder.UNARY_addOperand(node, UnaryExpression(tokenizer, staticContext))
-
-    elif tokenType == "increment" or tokenType == "decrement":
-        # Prefix increment/decrement.
-        node = builder.UNARY_build(tokenizer)
-        builder.UNARY_addOperand(node, MemberExpression(tokenizer, staticContext, True))
-
-    else:
-        tokenizer.unget()
-        node = MemberExpression(tokenizer, staticContext, True)
-
-        # Don't look across a newline boundary for a postfix {in,de}crement.
-        if tokenizer.tokens[(tokenizer.tokenIndex + tokenizer.lookahead - 1) & 3].line == tokenizer.line:
-            if tokenizer.match("increment") or tokenizer.match("decrement"):
-                childNode = builder.UNARY_build(tokenizer)
-                builder.UNARY_setPostfix(childNode)
-                builder.UNARY_finish(node)
-                builder.UNARY_addOperand(childNode, node)
-                node = childNode
-
-    builder.UNARY_finish(node)
-    return node
-
-
-def MemberExpression(tokenizer, staticContext, allowCallSyntax):
-    builder = staticContext.builder
-
-    if tokenizer.match("new"):
-        node = builder.MEMBER_build(tokenizer)
-        builder.MEMBER_addOperand(node, MemberExpression(tokenizer, staticContext, False))
-
-        if tokenizer.match("left_paren"):
-            builder.MEMBER_rebuildNewWithArgs(node)
-            builder.MEMBER_addOperand(node, ArgumentList(tokenizer, staticContext))
-
-        builder.MEMBER_finish(node)
-
-    else:
-        node = PrimaryExpression(tokenizer, staticContext)
-
-    while True:
-        tokenType = tokenizer.get()
-        if tokenType == "end":
-            break
-
-        if tokenType == "dot":
-            childNode = builder.MEMBER_build(tokenizer)
-            builder.MEMBER_addOperand(childNode, node)
-            tokenizer.mustMatch("identifier")
-            builder.MEMBER_addOperand(childNode, builder.MEMBER_build(tokenizer))
-
-        elif tokenType == "left_bracket":
-            childNode = builder.MEMBER_build(tokenizer, "index")
-            builder.MEMBER_addOperand(childNode, node)
-            builder.MEMBER_addOperand(childNode, Expression(tokenizer, staticContext))
-            tokenizer.mustMatch("right_bracket")
-
-        elif tokenType == "left_paren" and allowCallSyntax:
-            childNode = builder.MEMBER_build(tokenizer, "call")
-            builder.MEMBER_addOperand(childNode, node)
-            builder.MEMBER_addOperand(childNode, ArgumentList(tokenizer, staticContext))
-
-        else:
-            tokenizer.unget()
-            return node
-
-        builder.MEMBER_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def ArgumentList(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = builder.LIST_build(tokenizer)
-
-    if tokenizer.match("right_paren", True):
-        return node
-
-    while True:
-        childNode = AssignExpression(tokenizer, staticContext)
-        if childNode.type == "yield" and not childNode.parenthesized and tokenizer.peek() == "comma":
-            raise SyntaxError("Yield expression must be parenthesized", tokenizer)
-
-        if tokenizer.match("for"):
-            childNode = GeneratorExpression(tokenizer, staticContext, childNode)
-            if len(node) > 1 or tokenizer.peek(True) == "comma":
-                raise SyntaxError("Generator expression must be parenthesized", tokenizer)
-
-        builder.LIST_addOperand(node, childNode)
-        if not tokenizer.match("comma"):
-            break
-
-    tokenizer.mustMatch("right_paren")
-    builder.LIST_finish(node)
-
-    return node
-
-
-def PrimaryExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    tokenType = tokenizer.get(True)
-
-    if tokenType == "function":
-        node = FunctionDefinition(tokenizer, staticContext, False, "expressed_form")
-
-    elif tokenType == "left_bracket":
-        node = builder.ARRAYINIT_build(tokenizer)
-        while True:
-            tokenType = tokenizer.peek(True)
-            if tokenType == "right_bracket":
-                break
-
-            if tokenType == "comma":
-                tokenizer.get()
-                builder.ARRAYINIT_addElement(node, None)
-                continue
-
-            builder.ARRAYINIT_addElement(node, AssignExpression(tokenizer, staticContext))
-
-            if tokenType != "comma" and not tokenizer.match("comma"):
-                break
-
-        # If we matched exactly one element and got a "for", we have an
-        # array comprehension.
-        if len(node) == 1 and tokenizer.match("for"):
-            childNode = builder.ARRAYCOMP_build(tokenizer)
-            builder.ARRAYCOMP_setExpression(childNode, node[0])
-            builder.ARRAYCOMP_setTail(childNode, comprehensionTail(tokenizer, staticContext))
-            node = childNode
-
-        builder.COMMENTS_add(node, node, tokenizer.getComments())
-        tokenizer.mustMatch("right_bracket")
-        builder.PRIMARY_finish(node)
-
-    elif tokenType == "left_curly":
-        node = builder.OBJECTINIT_build(tokenizer)
-
-        if not tokenizer.match("right_curly"):
-            while True:
-                tokenType = tokenizer.get()
-                tokenValue = getattr(tokenizer.token, "value", None)
-                comments = tokenizer.getComments()
-
-                if tokenValue in ("get", "set") and tokenizer.peek() == "identifier":
-                    if staticContext.ecma3OnlyMode:
-                        raise SyntaxError("Illegal property accessor", tokenizer)
-
-                    fd = FunctionDefinition(tokenizer, staticContext, True, "expressed_form")
-                    builder.OBJECTINIT_addProperty(node, fd)
-
-                else:
-                    if tokenType == "identifier" or tokenType == "number" or tokenType == "string":
-                        id = builder.PRIMARY_build(tokenizer, "identifier")
-                        builder.PRIMARY_finish(id)
-
-                    elif tokenType == "right_curly":
-                        if staticContext.ecma3OnlyMode:
-                            raise SyntaxError("Illegal trailing ,", tokenizer)
-
-                        tokenizer.unget()
-                        break
-
-                    else:
-                        if tokenValue in jasy.js.tokenize.Lang.keywords:
-                            id = builder.PRIMARY_build(tokenizer, "identifier")
-                            builder.PRIMARY_finish(id)
-                        else:
-                            print("Value is '%s'" % tokenValue)
-                            raise SyntaxError("Invalid property name", tokenizer)
-
-                    if tokenizer.match("colon"):
-                        childNode = builder.PROPERTYINIT_build(tokenizer)
-                        builder.COMMENTS_add(childNode, node, comments)
-                        builder.PROPERTYINIT_addOperand(childNode, id)
-                        builder.PROPERTYINIT_addOperand(childNode, AssignExpression(tokenizer, staticContext))
-                        builder.PROPERTYINIT_finish(childNode)
-                        builder.OBJECTINIT_addProperty(node, childNode)
-
-                    else:
-                        # Support, e.g., |var {staticContext, y} = o| as destructuring shorthand
-                        # for |var {staticContext: staticContext, y: y} = o|, per proposed JS2/ES4 for JS1.8.
-                        if tokenizer.peek() != "comma" and tokenizer.peek() != "right_curly":
-                            raise SyntaxError("Missing : after property", tokenizer)
-                        builder.OBJECTINIT_addProperty(node, id)
-
-                if not tokenizer.match("comma"):
-                    break
-
-            builder.COMMENTS_add(node, node, tokenizer.getComments())
-            tokenizer.mustMatch("right_curly")
-
-        builder.OBJECTINIT_finish(node)
-
-    elif tokenType == "left_paren":
-        # ParenExpression does its own matching on parentheses, so we need to unget.
-        tokenizer.unget()
-        node = ParenExpression(tokenizer, staticContext)
-        node.parenthesized = True
-
-    elif tokenType == "let":
-        node = LetBlock(tokenizer, staticContext, False)
-
-    elif tokenType in ["null", "this", "true", "false", "identifier", "number", "string", "regexp"]:
-        node = builder.PRIMARY_build(tokenizer, tokenType)
-        builder.PRIMARY_finish(node)
-
-    else:
-        raise SyntaxError("Missing operand. Found type: %s" % tokenType, tokenizer)
-
-    return node
--- a/ThirdParty/Jasy/jasy/js/parse/VanillaBuilder.py	Sat Jan 12 11:26:32 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,679 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-#
-# License: MPL 1.1/GPL 2.0/LGPL 2.1
-# Authors:
-#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
-#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010)
-#
-
-from __future__ import unicode_literals
-
-import jasy.js.parse.Node
-
-class VanillaBuilder:
-    """The vanilla AST builder."""
-
-    def COMMENTS_add(self, currNode, prevNode, comments):
-        if not comments:
-            return
-
-        currComments = []
-        prevComments = []
-        for comment in comments:
-            # post comments - for previous node
-            if comment.context == "inline":
-                prevComments.append(comment)
-
-            # all other comment styles are attached to the current one
-            else:
-                currComments.append(comment)
-
-        # Merge with previously added ones
-        if hasattr(currNode, "comments"):
-            currNode.comments.extend(currComments)
-        else:
-            currNode.comments = currComments
-
-        if prevNode:
-            if hasattr(prevNode, "comments"):
-                prevNode.comments.extend(prevComments)
-            else:
-                prevNode.comments = prevComments
-        else:
-            # Don't loose the comment in tree (if not previous node is there, attach it to this node)
-            currNode.comments.extend(prevComments)
-
-    def IF_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "if")
-
-    def IF_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def IF_setThenPart(self, node, statement):
-        node.append(statement, "thenPart")
-
-    def IF_setElsePart(self, node, statement):
-        node.append(statement, "elsePart")
-
-    def IF_finish(self, node):
-        pass
-
-    def SWITCH_build(self, tokenizer):
-        node = jasy.js.parse.Node.Node(tokenizer, "switch")
-        node.defaultIndex = -1
-        return node
-
-    def SWITCH_setDiscriminant(self, node, expression):
-        node.append(expression, "discriminant")
-
-    def SWITCH_setDefaultIndex(self, node, index):
-        node.defaultIndex = index
-
-    def SWITCH_addCase(self, node, childNode):
-        node.append(childNode)
-
-    def SWITCH_finish(self, node):
-        pass
-
-    def CASE_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "case")
-
-    def CASE_setLabel(self, node, expression):
-        node.append(expression, "label")
-
-    def CASE_initializeStatements(self, node, tokenizer):
-        node.append(jasy.js.parse.Node.Node(tokenizer, "block"), "statements")
-
-    def CASE_addStatement(self, node, statement):
-        node.statements.append(statement)
-
-    def CASE_finish(self, node):
-        pass
-
-    def DEFAULT_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "default")
-
-    def DEFAULT_initializeStatements(self, node, tokenizer):
-        node.append(jasy.js.parse.Node.Node(tokenizer, "block"), "statements")
-
-    def DEFAULT_addStatement(self, node, statement):
-        node.statements.append(statement)
-
-    def DEFAULT_finish(self, node):
-        pass
-
-    def FOR_build(self, tokenizer):
-        node = jasy.js.parse.Node.Node(tokenizer, "for")
-        node.isLoop = True
-        node.isEach = False
-        return node
-
-    def FOR_rebuildForEach(self, node):
-        node.isEach = True
-
-    # NB: This function is called after rebuildForEach, if that'statement called at all.
-    def FOR_rebuildForIn(self, node):
-        node.type = "for_in"
-
-    def FOR_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def FOR_setSetup(self, node, expression):
-        node.append(expression, "setup")
-
-    def FOR_setUpdate(self, node, expression):
-        node.append(expression, "update")
-
-    def FOR_setObject(self, node, expression, forBlock=None):
-        # wpbasti: not sure what forBlock stands for but it is used in the parser
-        # JS tolerates the optinal unused parameter, but not so Python.
-        node.append(expression, "object")
-
-    def FOR_setIterator(self, node, expression, forBlock=None):
-        # wpbasti: not sure what forBlock stands for but it is used in the parser
-        # JS tolerates the optinal unused parameter, but not so Python.
-        node.append(expression, "iterator")
-
-    def FOR_setBody(self, node, statement):
-        node.append(statement, "body")
-
-    def FOR_finish(self, node):
-        pass
-
-    def WHILE_build(self, tokenizer):
-        node = jasy.js.parse.Node.Node(tokenizer, "while")
-        node.isLoop = True
-        return node
-
-    def WHILE_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def WHILE_setBody(self, node, statement):
-        node.append(statement, "body")
-
-    def WHILE_finish(self, node):
-        pass
-
-    def DO_build(self, tokenizer):
-        node = jasy.js.parse.Node.Node(tokenizer, "do")
-        node.isLoop = True
-        return node
-
-    def DO_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def DO_setBody(self, node, statement):
-        node.append(statement, "body")
-
-    def DO_finish(self, node):
-        pass
-
-    def BREAK_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "break")
-
-    def BREAK_setLabel(self, node, label):
-        node.label = label
-
-    def BREAK_setTarget(self, node, target):
-        # Hint, no append() - relation, but not a child
-        node.target = target
-
-    def BREAK_finish(self, node):
-        pass
-
-    def CONTINUE_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "continue")
-
-    def CONTINUE_setLabel(self, node, label):
-        node.label = label
-
-    def CONTINUE_setTarget(self, node, target):
-        # Hint, no append() - relation, but not a child
-        node.target = target
-
-    def CONTINUE_finish(self, node):
-        pass
-
-    def TRY_build(self, tokenizer):
-        node = jasy.js.parse.Node.Node(tokenizer, "try")
-        return node
-
-    def TRY_setTryBlock(self, node, statement):
-        node.append(statement, "tryBlock")
-
-    def TRY_addCatch(self, node, childNode):
-        node.append(childNode)
-
-    def TRY_finishCatches(self, node):
-        pass
-
-    def TRY_setFinallyBlock(self, node, statement):
-        node.append(statement, "finallyBlock")
-
-    def TRY_finish(self, node):
-        pass
-
-    def CATCH_build(self, tokenizer):
-        node = jasy.js.parse.Node.Node(tokenizer, "catch")
-        return node
-
-    def CATCH_wrapException(self, tokenizer):
-        node = jasy.js.parse.Node.Node(tokenizer, "exception")
-        node.value = tokenizer.token.value
-        return node
-
-    def CATCH_setException(self, node, exception):
-        node.append(exception, "exception")
-
-    def CATCH_setGuard(self, node, expression):
-        node.append(expression, "guard")
-
-    def CATCH_setBlock(self, node, statement):
-        node.append(statement, "block")
-
-    def CATCH_finish(self, node):
-        pass
-
-    def THROW_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "throw")
-
-    def THROW_setException(self, node, expression):
-        node.append(expression, "exception")
-
-    def THROW_finish(self, node):
-        pass
-
-    def RETURN_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "return")
-
-    def RETURN_setValue(self, node, expression):
-        node.append(expression, "value")
-
-    def RETURN_finish(self, node):
-        pass
-
-    def YIELD_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "yield")
-
-    def YIELD_setValue(self, node, expression):
-        node.append(expression, "value")
-
-    def YIELD_finish(self, node):
-        pass
-
-    def GENERATOR_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "generator")
-
-    def GENERATOR_setExpression(self, node, expression):
-        node.append(expression, "expression")
-
-    def GENERATOR_setTail(self, node, childNode):
-        node.append(childNode, "tail")
-
-    def GENERATOR_finish(self, node):
-        pass
-
-    def WITH_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "with")
-
-    def WITH_setObject(self, node, expression):
-        node.append(expression, "object")
-
-    def WITH_setBody(self, node, statement):
-        node.append(statement, "body")
-
-    def WITH_finish(self, node):
-        pass
-
-    def DEBUGGER_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "debugger")
-
-    def SEMICOLON_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "semicolon")
-
-    def SEMICOLON_setExpression(self, node, expression):
-        node.append(expression, "expression")
-
-    def SEMICOLON_finish(self, node):
-        pass
-
-    def LABEL_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "label")
-
-    def LABEL_setLabel(self, node, label):
-        node.label = label
-
-    def LABEL_setStatement(self, node, statement):
-        node.append(statement, "statement")
-
-    def LABEL_finish(self, node):
-        pass
-
-    def FUNCTION_build(self, tokenizer):
-        node = jasy.js.parse.Node.Node(tokenizer)
-        if node.type != "function":
-            if tokenizer.token.value == "get":
-                node.type = "getter"
-            else:
-                node.type = "setter"
-
-        return node
-
-    def FUNCTION_setName(self, node, identifier):
-        node.name = identifier
-
-    def FUNCTION_initParams(self, node, tokenizer):
-        node.append(jasy.js.parse.Node.Node(tokenizer, "list"), "params")
-
-    def FUNCTION_wrapParam(self, tokenizer):
-        param = jasy.js.parse.Node.Node(tokenizer)
-        param.value = tokenizer.token.value
-        return param
-
-    def FUNCTION_addParam(self, node, tokenizer, expression):
-        node.params.append(expression)
-
-    def FUNCTION_setExpressionClosure(self, node, expressionClosure):
-        node.expressionClosure = expressionClosure
-
-    def FUNCTION_setBody(self, node, statement):
-        # copy over function parameters to function body
-        params = getattr(node, "params", None)
-        #if params:
-        #    statement.params = [param.value for param in params]
-
-        node.append(statement, "body")
-
-    def FUNCTION_hoistVars(self, x):
-        pass
-
-    def FUNCTION_finish(self, node, x):
-        pass
-
-    def VAR_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "var")
-
-    def VAR_addDecl(self, node, childNode, childContext=None):
-        node.append(childNode)
-
-    def VAR_finish(self, node):
-        pass
-
-    def CONST_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "const")
-
-    def CONST_addDecl(self, node, childNode, childContext=None):
-        node.append(childNode)
-
-    def CONST_finish(self, node):
-        pass
-
-    def LET_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "let")
-
-    def LET_addDecl(self, node, childNode, childContext=None):
-        node.append(childNode)
-
-    def LET_finish(self, node):
-        pass
-
-    def DECL_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "declaration")
-
-    def DECL_setNames(self, node, expression):
-        node.append(expression, "names")
-
-    def DECL_setName(self, node, identifier):
-        node.name = identifier
-
-    def DECL_setInitializer(self, node, expression):
-        node.append(expression, "initializer")
-
-    def DECL_setReadOnly(self, node, readOnly):
-        node.readOnly = readOnly
-
-    def DECL_finish(self, node):
-        pass
-
-    def LETBLOCK_build(self, tokenizer):
-        node = jasy.js.parse.Node.Node(tokenizer, "let_block")
-        return node
-
-    def LETBLOCK_setVariables(self, node, childNode):
-        node.append(childNode, "variables")
-
-    def LETBLOCK_setExpression(self, node, expression):
-        node.append(expression, "expression")
-
-    def LETBLOCK_setBlock(self, node, statement):
-        node.append(statement, "block")
-
-    def LETBLOCK_finish(self, node):
-        pass
-
-    def BLOCK_build(self, tokenizer, id):
-        node = jasy.js.parse.Node.Node(tokenizer, "block")
-        # node.id = id
-        return node
-
-    def BLOCK_hoistLets(self, node):
-        pass
-
-    def BLOCK_addStatement(self, node, childNode):
-        node.append(childNode)
-
-    def BLOCK_finish(self, node):
-        pass
-
-    def EXPRESSION_build(self, tokenizer, tokenType):
-        return jasy.js.parse.Node.Node(tokenizer, tokenType)
-
-    def EXPRESSION_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def EXPRESSION_finish(self, node):
-        pass
-
-    def ASSIGN_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "assign")
-
-    def ASSIGN_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def ASSIGN_setAssignOp(self, node, operator):
-        node.assignOp = operator
-
-    def ASSIGN_finish(self, node):
-        pass
-
-    def HOOK_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "hook")
-
-    def HOOK_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def HOOK_setThenPart(self, node, childNode):
-        node.append(childNode, "thenPart")
-
-    def HOOK_setElsePart(self, node, childNode):
-        node.append(childNode, "elsePart")
-
-    def HOOK_finish(self, node):
-        pass
-
-    def OR_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "or")
-
-    def OR_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def OR_finish(self, node):
-        pass
-
-    def AND_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "and")
-
-    def AND_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def AND_finish(self, node):
-        pass
-
-    def BITWISEOR_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "bitwise_or")
-
-    def BITWISEOR_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def BITWISEOR_finish(self, node):
-        pass
-
-    def BITWISEXOR_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "bitwise_xor")
-
-    def BITWISEXOR_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def BITWISEXOR_finish(self, node):
-        pass
-
-    def BITWISEAND_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "bitwise_and")
-
-    def BITWISEAND_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def BITWISEAND_finish(self, node):
-        pass
-
-    def EQUALITY_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "eq", "ne", "strict_eq", or "strict_ne".
-        return jasy.js.parse.Node.Node(tokenizer)
-
-    def EQUALITY_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def EQUALITY_finish(self, node):
-        pass
-
-    def RELATIONAL_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "lt", "le", "ge", or "gt".
-        return jasy.js.parse.Node.Node(tokenizer)
-
-    def RELATIONAL_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def RELATIONAL_finish(self, node):
-        pass
-
-    def SHIFT_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "lsh", "rsh", or "ursh".
-        return jasy.js.parse.Node.Node(tokenizer)
-
-    def SHIFT_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def SHIFT_finish(self, node):
-        pass
-
-    def ADD_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "plus" or "minus".
-        return jasy.js.parse.Node.Node(tokenizer)
-
-    def ADD_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def ADD_finish(self, node):
-        pass
-
-    def MULTIPLY_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "mul", "div", or "mod".
-        return jasy.js.parse.Node.Node(tokenizer)
-
-    def MULTIPLY_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def MULTIPLY_finish(self, node):
-        pass
-
-    def UNARY_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "delete", "void", "typeof", "not", "bitwise_not",
-        # "unary_plus", "unary_minus", "increment", or "decrement".
-        if tokenizer.token.type == "plus":
-            tokenizer.token.type = "unary_plus"
-        elif tokenizer.token.type == "minus":
-            tokenizer.token.type = "unary_minus"
-
-        return jasy.js.parse.Node.Node(tokenizer)
-
-    def UNARY_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def UNARY_setPostfix(self, node):
-        node.postfix = True
-
-    def UNARY_finish(self, node):
-        pass
-
-    def MEMBER_build(self, tokenizer, tokenType=None):
-        node = jasy.js.parse.Node.Node(tokenizer, tokenType)
-        if node.type == "identifier":
-            node.value = tokenizer.token.value
-        return node
-
-    def MEMBER_rebuildNewWithArgs(self, node):
-        node.type = "new_with_args"
-
-    def MEMBER_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def MEMBER_finish(self, node):
-        pass
-
-    def PRIMARY_build(self, tokenizer, tokenType):
-        # NB: tokenizer.token.type must be "null", "this", "true", "false", "identifier", "number", "string", or "regexp".
-        node = jasy.js.parse.Node.Node(tokenizer, tokenType)
-        if tokenType in ("identifier", "string", "regexp", "number"):
-            node.value = tokenizer.token.value
-
-        return node
-
-    def PRIMARY_finish(self, node):
-        pass
-
-    def ARRAYINIT_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "array_init")
-
-    def ARRAYINIT_addElement(self, node, childNode):
-        node.append(childNode)
-
-    def ARRAYINIT_finish(self, node):
-        pass
-
-    def ARRAYCOMP_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "array_comp")
-
-    def ARRAYCOMP_setExpression(self, node, expression):
-        node.append(expression, "expression")
-
-    def ARRAYCOMP_setTail(self, node, childNode):
-        node.append(childNode, "tail")
-
-    def ARRAYCOMP_finish(self, node):
-        pass
-
-    def COMPTAIL_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "comp_tail")
-
-    def COMPTAIL_setGuard(self, node, expression):
-        node.append(expression, "guard")
-
-    def COMPTAIL_addFor(self, node, childNode):
-        node.append(childNode, "for")
-
-    def COMPTAIL_finish(self, node):
-        pass
-
-    def OBJECTINIT_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "object_init")
-
-    def OBJECTINIT_addProperty(self, node, childNode):
-        node.append(childNode)
-
-    def OBJECTINIT_finish(self, node):
-        pass
-
-    def PROPERTYINIT_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "property_init")
-
-    def PROPERTYINIT_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def PROPERTYINIT_finish(self, node):
-        pass
-
-    def COMMA_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "comma")
-
-    def COMMA_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def COMMA_finish(self, node):
-        pass
-
-    def LIST_build(self, tokenizer):
-        return jasy.js.parse.Node.Node(tokenizer, "list")
-
-    def LIST_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def LIST_finish(self, node):
-        pass
-
-    def setHoists(self, id, vds):
-        pass
--- a/ThirdParty/Jasy/jasy/js/tokenize/Lang.py	Sat Jan 12 11:26:32 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,24 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-#
-
-from __future__ import unicode_literals
-
-"""JavaScript 1.7 keywords"""
-keywords = set([
-    "break",
-    "case", "catch", "const", "continue",
-    "debugger", "default", "delete", "do",
-    "else",
-    "false", "finally", "for", "function",
-    "if", "in", "instanceof",
-    "let",
-    "new", "null",
-    "return",
-    "switch",
-    "this", "throw", "true", "try", "typeof",
-    "var", "void",
-    "yield",
-    "while", "with"
-])
--- a/ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py	Sat Jan 12 11:26:32 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,609 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-#
-
-#
-# License: MPL 1.1/GPL 2.0/LGPL 2.1
-# Authors: 
-#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
-#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010)
-#
-
-from __future__ import unicode_literals
-
-import copy
-
-import jasy.js.tokenize.Lang as Lang
-import jasy.js.api.Comment as Comment
-import jasy.core.Console as Console
-
-__all__ = [ "Tokenizer" ]
-
-
-# Operator and punctuator mapping from token to tree node type name.
-# NB: because the lexer doesn't backtrack, all token prefixes must themselves
-# be valid tokens (e.g. !== is acceptable because its prefixes are the valid
-# tokens != and !).
-operatorNames = {
-    '<'   : 'lt', 
-    '>'   : 'gt', 
-    '<='  : 'le', 
-    '>='  : 'ge', 
-    '!='  : 'ne', 
-    '!'   : 'not', 
-    '=='  : 'eq', 
-    '===' : 'strict_eq', 
-    '!==' : 'strict_ne', 
-
-    '>>'  : 'rsh', 
-    '<<'  : 'lsh',
-    '>>>' : 'ursh', 
-     
-    '+'   : 'plus', 
-    '*'   : 'mul', 
-    '-'   : 'minus', 
-    '/'   : 'div', 
-    '%'   : 'mod', 
-
-    ','   : 'comma', 
-    ';'   : 'semicolon', 
-    ':'   : 'colon', 
-    '='   : 'assign', 
-    '?'   : 'hook', 
-
-    '&&'  : 'and', 
-    '||'  : 'or', 
-
-    '++'  : 'increment', 
-    '--'  : 'decrement', 
-
-    ')'   : 'right_paren', 
-    '('   : 'left_paren', 
-    '['   : 'left_bracket', 
-    ']'   : 'right_bracket', 
-    '{'   : 'left_curly', 
-    '}'   : 'right_curly', 
-
-    '&'   : 'bitwise_and', 
-    '^'   : 'bitwise_xor', 
-    '|'   : 'bitwise_or', 
-    '~'   : 'bitwise_not'
-}
-
-
-# Assignment operators
-assignOperators = ["|", "^", "&", "<<", ">>", ">>>", "+", "-", "*", "/", "%"]
-
-
-
-
-#
-# Classes
-#
-
-class Token: 
-    __slots__ = ["type", "start", "line", "assignOp", "end", "value"]
-
-
-class ParseError(Exception):
-    def __init__(self, message, fileId, line):
-        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (message, fileId, line))
-
-
-class Tokenizer(object):
-    def __init__(self, source, fileId="", line=1):
-        # source: JavaScript source
-        # fileId: Filename (for debugging proposes)
-        # line: Line number (for debugging proposes)
-        self.cursor = 0
-        self.source = str(source)
-        self.tokens = {}
-        self.tokenIndex = 0
-        self.lookahead = 0
-        self.scanNewlines = False
-        self.fileId = fileId
-        self.line = line
-        self.comments = []
-
-    input_ = property(lambda self: self.source[self.cursor:])
-    token = property(lambda self: self.tokens.get(self.tokenIndex))
-
-
-    def done(self):
-        # We need to set scanOperand to true here because the first thing
-        # might be a regexp.
-        return self.peek(True) == "end"
-        
-
-    def match(self, tokenType, scanOperand=False):
-        return self.get(scanOperand) == tokenType or self.unget()
-
-
-    def mustMatch(self, tokenType):
-        if not self.match(tokenType):
-            raise ParseError("Missing " + tokenType, self.fileId, self.line)
-            
-        return self.token
-
-
-    def peek(self, scanOperand=False):
-        if self.lookahead:
-            next = self.tokens.get((self.tokenIndex + self.lookahead) & 3)
-            if self.scanNewlines and (getattr(next, "line", None) != getattr(self, "line", None)):
-                tokenType = "newline"
-            else:
-                tokenType = getattr(next, "type", None)
-        else:
-            tokenType = self.get(scanOperand)
-            self.unget()
-            
-        return tokenType
-
-
-    def peekOnSameLine(self, scanOperand=False):
-        self.scanNewlines = True
-        tokenType = self.peek(scanOperand)
-        self.scanNewlines = False
-        return tokenType
-        
-
-    def getComments(self):
-        if self.comments:
-            comments = self.comments
-            self.comments = []
-            return comments
-            
-        return None
-
-
-    def skip(self):
-        """Eats comments and whitespace."""
-        input = self.source
-        startLine = self.line
-
-        # Whether this is the first called as happen on start parsing a file (eat leading comments/white space)
-        startOfFile = self.cursor is 0
-        
-        indent = ""
-        
-        while (True):
-            if len(input) > self.cursor:
-                ch = input[self.cursor]
-            else:
-                return
-                
-            self.cursor += 1
-            
-            if len(input) > self.cursor:
-                next = input[self.cursor]
-            else:
-                next = None
-
-            if ch == "\n" and not self.scanNewlines:
-                self.line += 1
-                indent = ""
-                
-            elif ch == "/" and next == "*":
-                self.cursor += 1
-                text = "/*"
-                inline = startLine == self.line and startLine > 1
-                commentStartLine = self.line
-                if startLine == self.line and not startOfFile:
-                    mode = "inline"
-                elif (self.line-1) > startLine:
-                    # distance before this comment means it is a comment block for a whole section (multiple lines of code)
-                    mode = "section"
-                else:
-                    # comment for maybe multiple following lines of code, but not that important (no visual white space divider)
-                    mode = "block"
-                    
-                while (True):
-                    try:
-                        ch = input[self.cursor]
-                        self.cursor += 1
-                    except IndexError:
-                        raise ParseError("Unterminated comment", self.fileId, self.line)
-                        
-                    if ch == "*":
-                        next = input[self.cursor]
-                        if next == "/":
-                            text += "*/"
-                            self.cursor += 1
-                            break
-                            
-                    elif ch == "\n":
-                        self.line += 1
-                        
-                    text += ch
-                    
-                
-                # Filter escaping on slash-star combinations in comment text
-                text = text.replace("*\/", "*/")
-                
-                try:
-                    self.comments.append(Comment.Comment(text, mode, commentStartLine, indent, self.fileId))
-                except Comment.CommentException as commentError:
-                    Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
-                    
-                    
-            elif ch == "/" and next == "/":
-                self.cursor += 1
-                text = "//"
-                if startLine == self.line and not startOfFile:
-                    mode = "inline"
-                elif (self.line-1) > startLine:
-                    # distance before this comment means it is a comment block for a whole section (multiple lines of code)
-                    mode = "section"
-                else:
-                    # comment for maybe multiple following lines of code, but not that important (no visual white space divider)
-                    mode = "block"
-                    
-                while (True):
-                    try:
-                        ch = input[self.cursor]
-                        self.cursor += 1
-                    except IndexError:
-                        # end of file etc.
-                        break
-
-                    if ch == "\n":
-                        self.line += 1
-                        break
-                    
-                    text += ch
-                    
-                try:
-                    self.comments.append(Comment.Comment(text, mode, self.line-1, "", self.fileId))
-                except Comment.CommentException:
-                    Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
-
-            # check for whitespace, also for special cases like 0xA0
-            elif ch in "\xA0 \t":
-                indent += ch
-
-            else:
-                self.cursor -= 1
-                return
-
-
-    # Lexes the exponential part of a number, if present. Returns True if an
-    # exponential part was found.
-    def lexExponent(self):
-        input = self.source
-        next = input[self.cursor]
-        if next == "e" or next == "E":
-            self.cursor += 1
-            ch = input[self.cursor]
-            self.cursor += 1
-            if ch == "+" or ch == "-":
-                ch = input[self.cursor]
-                self.cursor += 1
-
-            if ch < "0" or ch > "9":
-                raise ParseError("Missing exponent", self.fileId, self.line)
-
-            while(True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not (ch >= "0" and ch <= "9"):
-                    break
-                
-            self.cursor -= 1
-            return True
-
-        return False
-
-
-    def lexZeroNumber(self, ch):
-        token = self.token
-        input = self.source
-        token.type = "number"
-
-        ch = input[self.cursor]
-        self.cursor += 1
-        if ch == ".":
-            while(True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not (ch >= "0" and ch <= "9"):
-                    break
-                
-            self.cursor -= 1
-            self.lexExponent()
-            token.value = input[token.start:self.cursor]
-            
-        elif ch == "x" or ch == "X":
-            while(True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not ((ch >= "0" and ch <= "9") or (ch >= "a" and ch <= "f") or (ch >= "A" and ch <= "F")):
-                    break
-                    
-            self.cursor -= 1
-            token.value = input[token.start:self.cursor]
-
-        elif ch >= "0" and ch <= "7":
-            while(True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not (ch >= "0" and ch <= "7"):
-                    break
-                    
-            self.cursor -= 1
-            token.value = input[token.start:self.cursor]
-
-        else:
-            self.cursor -= 1
-            self.lexExponent()     # 0E1, &c.
-            token.value = 0
-    
-
-    def lexNumber(self, ch):
-        token = self.token
-        input = self.source
-        token.type = "number"
-
-        floating = False
-        while(True):
-            ch = input[self.cursor]
-            self.cursor += 1
-            
-            if ch == "." and not floating:
-                floating = True
-                ch = input[self.cursor]
-                self.cursor += 1
-                
-            if not (ch >= "0" and ch <= "9"):
-                break
-
-        self.cursor -= 1
-
-        exponent = self.lexExponent()
-        segment = input[token.start:self.cursor]
-        
-        # Protect float or exponent numbers
-        if floating or exponent:
-            token.value = segment
-        else:
-            token.value = int(segment)
-
-
-    def lexDot(self, ch):
-        token = self.token
-        input = self.source
-        next = input[self.cursor]
-        
-        if next >= "0" and next <= "9":
-            while (True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not (ch >= "0" and ch <= "9"):
-                    break
-
-            self.cursor -= 1
-            self.lexExponent()
-
-            token.type = "number"
-            token.value = input[token.start:self.cursor]
-
-        else:
-            token.type = "dot"
-
-
-    def lexString(self, ch):
-        token = self.token
-        input = self.source
-        token.type = "string"
-
-        hasEscapes = False
-        delim = ch
-        ch = input[self.cursor]
-        self.cursor += 1
-        while ch != delim:
-            if ch == "\\":
-                hasEscapes = True
-                self.cursor += 1
-
-            ch = input[self.cursor]
-            self.cursor += 1
-
-        if hasEscapes:
-            token.value = eval(input[token.start:self.cursor])
-        else:
-            token.value = input[token.start+1:self.cursor-1]
-
-
-    def lexRegExp(self, ch):
-        token = self.token
-        input = self.source
-        token.type = "regexp"
-
-        while (True):
-            try:
-                ch = input[self.cursor]
-                self.cursor += 1
-            except IndexError:
-                raise ParseError("Unterminated regex", self.fileId, self.line)
-
-            if ch == "\\":
-                self.cursor += 1
-                
-            elif ch == "[":
-                while (True):
-                    if ch == "\\":
-                        self.cursor += 1
-
-                    try:
-                        ch = input[self.cursor]
-                        self.cursor += 1
-                    except IndexError:
-                        raise ParseError("Unterminated character class", self.fileId, self.line)
-                    
-                    if ch == "]":
-                        break
-                    
-            if ch == "/":
-                break
-
-        while(True):
-            ch = input[self.cursor]
-            self.cursor += 1
-            if not (ch >= "a" and ch <= "z"):
-                break
-
-        self.cursor -= 1
-        token.value = input[token.start:self.cursor]
-    
-
-    def lexOp(self, ch):
-        token = self.token
-        input = self.source
-
-        op = ch
-        while(True):
-            try:
-                next = input[self.cursor]
-            except IndexError:
-                break
-                
-            if (op + next) in operatorNames:
-                self.cursor += 1
-                op += next
-            else:
-                break
-        
-        try:
-            next = input[self.cursor]
-        except IndexError:
-            next = None
-
-        if next == "=" and op in assignOperators:
-            self.cursor += 1
-            token.type = "assign"
-            token.assignOp = operatorNames[op]
-            op += "="
-            
-        else:
-            token.type = operatorNames[op]
-            token.assignOp = None
-
-
-    # FIXME: Unicode escape sequences
-    # FIXME: Unicode identifiers
-    def lexIdent(self, ch):
-        token = self.token
-        input = self.source
-
-        try:
-            while True:
-                ch = input[self.cursor]
-                self.cursor += 1
-            
-                if not ((ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or (ch >= "0" and ch <= "9") or ch == "$" or ch == "_"):
-                    break
-                    
-        except IndexError:
-            self.cursor += 1
-            pass
-        
-        # Put the non-word character back.
-        self.cursor -= 1
-
-        identifier = input[token.start:self.cursor]
-        if identifier in Lang.keywords:
-            token.type = identifier
-        else:
-            token.type = "identifier"
-            token.value = identifier
-
-
-    def get(self, scanOperand=False):
-        """ 
-        It consumes input *only* if there is no lookahead.
-        Dispatches to the appropriate lexing function depending on the input.
-        """
-        while self.lookahead:
-            self.lookahead -= 1
-            self.tokenIndex = (self.tokenIndex + 1) & 3
-            token = self.tokens[self.tokenIndex]
-            if token.type != "newline" or self.scanNewlines:
-                return token.type
-
-        self.skip()
-
-        self.tokenIndex = (self.tokenIndex + 1) & 3
-        self.tokens[self.tokenIndex] = token = Token()
-
-        token.start = self.cursor
-        token.line = self.line
-
-        input = self.source
-        if self.cursor == len(input):
-            token.end = token.start
-            token.type = "end"
-            return token.type
-
-        ch = input[self.cursor]
-        self.cursor += 1
-        
-        if (ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or ch == "$" or ch == "_":
-            self.lexIdent(ch)
-        
-        elif scanOperand and ch == "/":
-            self.lexRegExp(ch)
-        
-        elif ch == ".":
-            self.lexDot(ch)
-
-        elif self.scanNewlines and ch == "\n":
-            token.type = "newline"
-            self.line += 1
-
-        elif ch in operatorNames:
-            self.lexOp(ch)
-        
-        elif ch >= "1" and ch <= "9":
-            self.lexNumber(ch)
-        
-        elif ch == "0":
-            self.lexZeroNumber(ch)
-        
-        elif ch == '"' or ch == "'":
-            self.lexString(ch)
-        
-        else:
-            raise ParseError("Illegal token: %s (Code: %s)" % (ch, ord(ch)), self.fileId, self.line)
-
-        token.end = self.cursor
-        return token.type
-        
-
-    def unget(self):
-        """ Match depends on unget returning undefined."""
-        self.lookahead += 1
-        
-        if self.lookahead == 4: 
-            raise ParseError("PANIC: too much lookahead!", self.fileId, self.line)
-        
-        self.tokenIndex = (self.tokenIndex - 1) & 3
-        
-    
-    def save(self):
-        return {
-            "cursor" : self.cursor,
-            "tokenIndex": self.tokenIndex,
-            "tokens": copy.copy(self.tokens),
-            "lookahead": self.lookahead,
-            "scanNewlines": self.scanNewlines,
-            "line": self.line
-        }
-
-    
-    def rewind(self, point):
-        self.cursor = point["cursor"]
-        self.tokenIndex = point["tokenIndex"]
-        self.tokens = copy.copy(point["tokens"])
-        self.lookahead = point["lookahead"]
-        self.scanNewline = point["scanNewline"]
-        self.line = point["line"]
--- a/ThirdParty/Jasy/jasy/js/util/__init__.py	Sat Jan 12 11:26:32 2019 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,16 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-#
-
-#
-# minimized for using just the parser within eric6
-# Copyright (c) 2013 - 2019 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-from __future__ import unicode_literals
-
-pseudoTypes = set(["any", "var", "undefined", "null", "true", "false", "this",
-                   "arguments"])
-builtinTypes = set(["Object", "String", "Number", "Boolean", "Array", "Function",
-                    "RegExp", "Date"])
--- a/ThirdParty/Jasy/jasy/parse/AbstractNode.py	Sat Jan 12 11:26:32 2019 +0100
+++ b/ThirdParty/Jasy/jasy/parse/AbstractNode.py	Sat Jan 12 12:11:42 2019 +0100
@@ -3,8 +3,6 @@
 # Copyright 2013-2014 Sebastian Werner
 #
 
-from __future__ import unicode_literals
-
 import json, copy
 
 class AbstractNode(list):
@@ -332,7 +330,7 @@
         """Returns the source code of the node"""
 
         if not self.tokenizer:
-            raise Exception("Could not find source for node '%s'" % self.type)
+            raise Exception("Could not find source for node '%s'" % node.type)
 
         if getattr(self, "start", None) is not None:
             if getattr(self, "end", None) is not None:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/api/Comment.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,677 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+from __future__ import unicode_literals
+
+import re
+
+import jasy.core.Text as Text
+import jasy.core.Console as Console
+
+from jasy import UserError
+from jasy.script.util import *
+
+
+# Used to measure the doc indent size (with leading stars in front of content)
+docIndentReg = re.compile(r"^(\s*\*\s*)(\S*)")
+
+# Used to split type lists as supported by throw, return and params
+listSplit = re.compile("\s*\|\s*")
+
+# Used to remove markup sequences after doc processing of comment text
+stripMarkup = re.compile(r"<.*?>")
+
+
+
+# Matches return blocks in comments
+returnMatcher = re.compile(r"^\s*\{([a-zA-Z0-9_ \.\|\[\]]+)\}")
+
+# Matches type definitions in comments
+typeMatcher = re.compile(r"^\s*\{=([a-zA-Z0-9_ \.]+)\}")
+
+# Matches tags
+tagMatcher = re.compile(r"#([a-zA-Z][a-zA-Z0-9]+)(\((\S+)\))?(\s|$)")
+
+# Matches param declarations in own dialect
+paramMatcher = re.compile(r"@([a-zA-Z0-9_][a-zA-Z0-9_\.]*[a-zA-Z0-9_]|[a-zA-Z0-9_]+)(\s*\{([a-zA-Z0-9_ \.\|\[\]]+?)(\s*\.{3}\s*)?((\s*\?\s*(\S+))|(\s*\?\s*))?\})?")
+
+# Matches links in own dialect
+linkMatcher = re.compile(r"(\{((static|member|property|event)\:)?([a-zA-Z0-9_\.]+)?(\#([a-zA-Z0-9_]+))?\})")
+
+# matches backticks and has a built-in failsafe for backticks which do not terminate on the same line
+tickMatcher = re.compile(r"(`[^\n`]*?`)")
+
+
+class CommentException(Exception):
+    """
+    Thrown when errors during comment processing are detected.
+    """
+
+    def __init__(self, message, lineNo=0):
+        Exception.__init__(self, "Comment error: %s (line: %s)" % (message, lineNo+1))
+
+
+
+
+class Comment():
+    """
+    Comment class is attached to parsed nodes and used to store all comment related information.
+
+    The class supports a new Markdown and TomDoc inspired dialect to make developers life easier and work less repeative.
+    """
+
+    # Relation to code
+    context = None
+
+    # Dictionary of tags
+    tags = None
+
+    # Dictionary of params
+    params = None
+
+    # List of return types
+    returns = None
+
+    # Static type
+    type = None
+
+    # Collected text of the comment (without the extracted doc relevant data)
+    text = None
+
+    # Text with extracted / parsed data
+    __processedText = None
+
+    # Text of the comment converted to HTML including highlighting (only for doc comment)
+    __highlightedText = None
+
+    # Text / Code Blocks in the comment
+    __blocks = None
+
+
+    def __init__(self, text, context=None, lineNo=0, indent="", fileId=None):
+
+        # Store context (relation to code)
+        self.context = context
+
+        # Store fileId
+        self.fileId = fileId
+
+        # Figure out the type of the comment based on the starting characters
+
+        # Inline comments
+        if text.startswith("//"):
+            # "// hello" => "   hello"
+            text = "  " + text[2:]
+            self.variant = "single"
+
+        # Doc comments
+        elif text.startswith("/**"):
+            # "/** hello */" => "    hello "
+            text = "   " + text[3:-2]
+            self.variant = "doc"
+
+        # Protected comments which should not be removed (e.g these are used for license blocks)
+        elif text.startswith("/*!"):
+            # "/*! hello */" => "    hello "
+            text = "   " + text[3:-2]
+            self.variant = "protected"
+
+        # A normal multiline comment
+        elif text.startswith("/*"):
+            # "/* hello */" => "   hello "
+            text = "  " + text[2:-2]
+            self.variant = "multi"
+
+        else:
+            raise CommentException("Invalid comment text: %s" % text, lineNo)
+
+        # Multi line comments need to have their indentation removed
+        if "\n" in text:
+            text = self.__outdent(text, indent, lineNo)
+
+        # For single line comments strip the surrounding whitespace
+        else:
+            # " hello " => "hello"
+            text = text.strip()
+
+        # The text of the comment before any processing took place
+        self.text = text
+
+
+        # Perform annotation parsing, markdown conversion and code highlighting on doc blocks
+        if self.variant == "doc":
+
+            # Separate text and code blocks
+            self.__blocks = self.__splitBlocks(text)
+
+            # Re-combine everything and apply processing and formatting
+            plainText = '' # text without annotations but with markdown
+            for b in self.__blocks:
+
+                if b["type"] == "comment":
+
+                    processed = self.__processDoc(b["text"], lineNo)
+                    b["processed"] = processed
+
+                    if "<" in processed:
+                        plainText += stripMarkup.sub("", processed)
+
+                    else:
+                        plainText += processed
+
+                else:
+                    plainText += "\n\n" + b["text"] + "\n\n"
+
+            # The without any annotations
+            self.text = plainText.strip()
+
+
+    def __splitBlocks(self, text):
+        """
+        Splits up text and code blocks in comments.
+
+        This will try to use hoedown for Markdown parsing if available and will
+        fallback to a simpler implementation in order to allow processing of
+        doc parameters and links without hoedown being installed.
+        """
+
+        if not Text.supportsMarkdown:
+            return self.__splitSimple(text)
+
+        marked = Text.markdownToHtml(text)
+
+        def unescape(html):
+            html = html.replace('&lt;', '<')
+            html = html.replace('&gt;', '>')
+            html = html.replace('&amp;', '&')
+            html = html.replace('&quot;', '"')
+            return html.replace('&#39;', "'")
+
+        parts = []
+
+        lineNo = 0
+        lines = text.split("\n")
+        markedLines = marked.split("\n")
+
+        i = 0
+        while i < len(markedLines):
+
+            l = markedLines[i]
+
+            # the original text of the line
+            parsed = unescape(stripMarkup.sub("", l))
+
+            # start of a code block, grab all text before it and move it into a block
+            if l.startswith('<pre><code>'):
+
+                # everything since the last code block and before this one must be text
+                comment = []
+                for s in range(lineNo, len(lines)):
+
+                    source = lines[s]
+                    if source.strip() == parsed.strip():
+                        lineNo = s
+                        break
+
+                    comment.append(source)
+
+                parts.append({
+                    "type": "comment",
+                    "text": "\n".join(comment)
+                })
+
+                # Find the end of the code block
+                e = i
+                while i < len(markedLines):
+                    l = markedLines[i]
+                    i += 1
+
+                    if l.startswith('</code></pre>'):
+                        break
+
+                lineCount = (i - e) - 1
+
+                # add the code block
+                parts.append({
+                    "type": "code",
+                    "text": "\n".join(lines[lineNo:lineNo + lineCount])
+                })
+
+                lineNo += lineCount
+
+            else:
+                i += 1
+
+        # append the rest of the comment as text
+        parts.append({
+            "type": "comment",
+            "text": "\n".join(lines[lineNo:])
+        })
+
+        return parts
+
+
+    def __splitSimple(self, text):
+        """Splits comment text and code blocks by manually parsing a subset of markdown"""
+
+        inCode = False
+        oldIndent = 0
+        parts = []
+        wasEmpty = False
+        wasList = False
+
+        lineNo = 0
+        lines = text.split("\n")
+
+        for s, l in enumerate(lines):
+
+            # ignore empty lines
+            if not l.strip() == "":
+
+                # get indentation value and change
+                indent = len(l) - len(l.lstrip())
+                change = indent - oldIndent
+
+                # detect code blocks
+                if change >= 4 and wasEmpty:
+                    if not wasList:
+                        oldIndent = indent
+                        inCode = True
+
+                        parts.append({
+                            "type": "comment",
+                            "text": "\n".join(lines[lineNo:s])
+                        })
+
+                        lineNo = s
+
+                # detect outdents
+                elif change < 0:
+                    inCode = False
+
+                    parts.append({
+                        "type": "code",
+                        "text": "\n".join(lines[lineNo:s - 1])
+                    })
+
+                    lineNo = s
+
+                # only keep track of old previous indentation outside of comments
+                if not inCode:
+                    oldIndent = indent
+
+                # remember whether this marked a list or not
+                wasList = l.strip().startswith('-') or l.strip().startswith('*')
+                wasEmpty = False
+
+            else:
+                wasEmpty = True
+
+        parts.append({
+            "type": "code" if inCode else "comment",
+            "text": "\n".join(lines[lineNo:])
+        })
+
+        return parts
+
+
+    def getHtml(self, highlight=True):
+        """
+        Returns the comment text converted to HTML
+
+        :param highlight: Whether to highlight the code
+        :type highlight: bool
+        """
+
+        if not Text.supportsMarkdown:
+            raise UserError("Markdown is not supported by the system. Documentation comments could converted to HTML.")
+
+        if highlight:
+
+            if self.__highlightedText is None:
+
+                highlightedText = ""
+
+                for block in self.__blocks:
+
+                    if block["type"] == "comment":
+                        highlightedText += Text.highlightCodeBlocks(Text.markdownToHtml(block["processed"]))
+                    else:
+                        highlightedText += "\n%s" % Text.highlightCodeBlocks(Text.markdownToHtml(block["text"]))
+
+                self.__highlightedText = highlightedText
+
+            return self.__highlightedText
+
+        else:
+
+            if self.__processedText is None:
+
+                processedText = ""
+
+                for block in self.__blocks:
+
+                    if block["type"] == "comment":
+                        processedText += Text.markdownToHtml(block["processed"])
+                    else:
+                        processedText += "\n%s\n\n" % block["text"]
+
+                self.__processedText = processedText.strip()
+
+            return self.__processedText
+
+
+    def hasContent(self):
+        return self.variant == "doc" and len(self.text)
+
+
+    def getTags(self):
+        return self.tags
+
+
+    def hasTag(self, name):
+        if not self.tags:
+            return False
+
+        return name in self.tags
+
+
+    def __outdent(self, text, indent, startLineNo):
+        """
+        Outdent multi line comment text and filtering empty lines
+        """
+
+        lines = []
+
+        # First, split up the comments lines and remove the leading indentation
+        for lineNo, line in enumerate((indent+text).split("\n")):
+
+            if line.startswith(indent):
+                lines.append(line[len(indent):].rstrip())
+
+            elif line.strip() == "":
+                lines.append("")
+
+            else:
+                # Only warn for doc comments, otherwise it might just be code commented out
+                # which is sometimes formatted pretty crazy when commented out
+                if self.variant == "doc":
+                    Console.warn("Could not outdent doc comment at line %s in %s", startLineNo+lineNo, self.fileId)
+
+                return text
+
+        # Find first line with real content, then grab the one after it to get the
+        # characters which need
+        outdentString = ""
+        for lineNo, line in enumerate(lines):
+
+            if line != "" and line.strip() != "":
+                matchedDocIndent = docIndentReg.match(line)
+
+                if not matchedDocIndent:
+                    # As soon as we find a non doc indent like line we stop
+                    break
+
+                elif matchedDocIndent.group(2) != "":
+                    # otherwise we look for content behind the indent to get the
+                    # correct real indent (with spaces)
+                    outdentString = matchedDocIndent.group(1)
+                    break
+
+            lineNo += 1
+
+        # Process outdenting to all lines (remove the outdentString from the start of the lines)
+        if outdentString != "":
+
+            lineNo = 0
+            outdentStringLen = len(outdentString)
+
+            for lineNo, line in enumerate(lines):
+                if len(line) <= outdentStringLen:
+                    lines[lineNo] = ""
+
+                else:
+                    if not line.startswith(outdentString):
+
+                        # Only warn for doc comments, otherwise it might just be code commented out
+                        # which is sometimes formatted pretty crazy when commented out
+                        if self.variant == "doc":
+                            Console.warn("Invalid indentation in doc comment at line %s in %s", startLineNo+lineNo, self.fileId)
+
+                    else:
+                        lines[lineNo] = line[outdentStringLen:]
+
+        # Merge final lines and remove leading and trailing new lines
+        return "\n".join(lines).strip("\n")
+
+
+    def __processDoc(self, text, startLineNo):
+
+        text = self.__extractStaticType(text)
+        text = self.__extractReturns(text)
+        text = self.__extractTags(text)
+
+        # Collapse new empty lines at start/end
+        text = text.strip("\n\t ")
+
+        parsed = ''
+
+        # Now parse only the text outside of backticks
+        last = 0
+        def split(match):
+
+            # Grab the text before the back tick and process any parameters in it
+            nonlocal parsed
+            nonlocal last
+
+            start, end = match.span()
+            before = text[last:start]
+            parsed += self.__processParams(before) + match.group(1)
+            last = end
+
+        tickMatcher.sub(split, text)
+
+        # add the rest of the text
+        parsed += self.__processParams(text[last:])
+
+        text = self.__processLinks(parsed)
+
+        return text
+
+
+    def __splitTypeList(self, decl):
+
+        if decl is None:
+            return decl
+
+        splitted = listSplit.split(decl.strip())
+
+        result = []
+        for entry in splitted:
+
+            # Figure out if it is marked as array
+            isArray = False
+            if entry.endswith("[]"):
+                isArray = True
+                entry = entry[:-2]
+
+            store = {
+                "name" : entry
+            }
+
+            if isArray:
+                store["array"] = True
+
+            if entry in builtinTypes:
+                store["builtin"] = True
+
+            if entry in pseudoTypes:
+                store["pseudo"] = True
+
+            result.append(store)
+
+        return result
+
+
+
+    def __extractReturns(self, text):
+        """
+        Extracts leading return defintion (when type is function)
+        """
+
+        def collectReturn(match):
+            self.returns = self.__splitTypeList(match.group(1))
+            return ""
+
+        return returnMatcher.sub(collectReturn, text)
+
+
+
+    def __extractStaticType(self, text):
+        """
+        Extracts leading type defintion (when value is a static type)
+        """
+
+        def collectType(match):
+            self.type = match.group(1).strip()
+            return ""
+
+        return typeMatcher.sub(collectType, text)
+
+
+
+    def __extractTags(self, text):
+        """
+        Extract all tags inside the give doc comment. These are replaced from
+        the text and collected inside the "tags" key as a dict.
+        """
+
+        def collectTags(match):
+             if not self.tags:
+                 self.tags = {}
+
+             name = match.group(1)
+             param = match.group(3)
+
+             if name in self.tags:
+                 self.tags[name].add(param)
+             elif param:
+                 self.tags[name] = set([param])
+             else:
+                 self.tags[name] = True
+
+             return ""
+
+        return tagMatcher.sub(collectTags, text)
+
+
+    def __processParams(self, text):
+
+        def collectParams(match):
+
+            paramName = match.group(1)
+            paramTypes = match.group(3)
+            paramDynamic = match.group(4) is not None
+            paramOptional = match.group(5) is not None
+            paramDefault = match.group(7)
+
+            if paramTypes:
+                paramTypes = self.__splitTypeList(paramTypes)
+
+            if self.params is None:
+                self.params = {}
+
+            params = self.params
+            fullName = match.group(1).strip()
+            names = fullName.split('.')
+
+            for i, mapName in enumerate(names):
+
+                # Ensure we have the map object in the params
+                if not mapName in params:
+                    params[mapName] = {}
+
+                # Add new entries and overwrite if a type is defined in this entry
+                if not mapName in params or paramTypes is not None:
+
+                    # Make sure to not overwrite something like @options {Object} with the type of @options.x {Number}
+                    if i == len(names) - 1:
+
+                        paramEntry = params[mapName] = {}
+
+                        if paramTypes is not None:
+                            paramEntry["type"] = paramTypes
+
+                        if paramDynamic:
+                            paramEntry["dynamic"] = paramDynamic
+
+                        if paramOptional:
+                            paramEntry["optional"] = paramOptional
+
+                        if paramDefault is not None:
+                            paramEntry["default"] = paramDefault
+
+                    else:
+                        paramEntry = params[mapName]
+
+
+                else:
+                    paramEntry = params[mapName]
+
+                # create fields for new map level
+                if i + 1 < len(names):
+                    if not "fields" in paramEntry:
+                        paramEntry["fields"] = {}
+
+                    params = paramEntry["fields"]
+
+            return '<code class="param">%s</code>' % fullName
+
+        return paramMatcher.sub(collectParams, text)
+
+
+    def __processLinks(self, text):
+
+        def formatTypes(match):
+
+            parsedSection = match.group(3)
+            parsedFile = match.group(4)
+            parsedItem = match.group(6)
+
+            # Do not match {}
+            if parsedSection is None and parsedFile is None and parsedItem is None:
+                return match.group(1)
+
+            # Minor corrections
+            if parsedSection and not parsedItem:
+                parsedSection = ""
+
+            attr = ""
+            link = ""
+            label = ""
+
+            if parsedSection:
+                link += '%s:' % parsedSection
+
+            if parsedFile:
+                link += parsedFile
+                label += parsedFile
+
+            if parsedItem:
+                link += "~%s" % parsedItem
+                if label == "":
+                    label = parsedItem
+                else:
+                    label += "#%s" % parsedItem
+
+            # add link to attributes list
+            attr += ' href="#%s"' % link
+
+            # build final HTML
+            return '<a%s><code>%s</code></a>' % (attr, label)
+
+        return linkMatcher.sub(formatTypes, text)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/api/Text.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,38 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+from __future__ import unicode_literals
+
+import re
+import jasy.core.Console as Console
+
+
+# Used to filter first paragraph from HTML
+paragraphExtract = re.compile(r"^(.*?)(\. |\? |\! |$)")
+newlineMatcher = re.compile(r"\n")
+
+# Used to remove markup sequences after doc processing of comment text
+stripMarkup = re.compile(r"<.*?>")
+
+def extractSummary(text):
+    try:
+        text = stripMarkup.sub("", newlineMatcher.sub(" ", text))
+        matched = paragraphExtract.match(text)
+    except TypeError:
+        matched = None
+
+    if matched:
+        summary = matched.group(1)
+        if summary is not None:
+            if not summary.endswith((".", "!", "?")):
+                summary = summary.strip() + "."
+            return summary
+
+    else:
+        Console.warn("Unable to extract summary for: %s", text)
+
+    return None
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/output/Compressor.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,564 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+from __future__ import unicode_literals
+
+import re, sys, json
+
+from jasy.script.tokenize.Lang import keywords
+from jasy.script.parse.Lang import expressions, futureReserved
+
+high_unicode = re.compile(r"\\u[2-9A-Fa-f][0-9A-Fa-f]{3}")
+ascii_encoder = json.JSONEncoder(ensure_ascii=True)
+unicode_encoder = json.JSONEncoder(ensure_ascii=False)
+
+#
+# Class
+#
+
+class Compressor:
+    __semicolonSymbol = ";"
+    __commaSymbol = ","
+
+
+    def __init__(self, format=None):
+        if format:
+            if format.has("semicolon"):
+                self.__semicolonSymbol = ";\n"
+
+            if format.has("comma"):
+                self.__commaSymbol = ",\n"
+
+        self.__forcedSemicolon = False
+
+
+
+    #
+    # Main
+    #
+
+    def compress(self, node):
+        type = node.type
+        result = None
+
+        if type in self.__simple:
+            result = type
+        elif type in self.__prefixes:
+            if getattr(node, "postfix", False):
+                result = self.compress(node[0]) + self.__prefixes[node.type]
+            else:
+                result = self.__prefixes[node.type] + self.compress(node[0])
+
+        elif type in self.__dividers:
+            first = self.compress(node[0])
+            second = self.compress(node[1])
+            divider = self.__dividers[node.type]
+
+            # Fast path
+            if node.type not in ("plus", "minus"):
+                result = "%s%s%s" % (first, divider, second)
+
+            # Special code for dealing with situations like x + ++y and y-- - x
+            else:
+                result = first
+                if first.endswith(divider):
+                    result += " "
+
+                result += divider
+
+                if second.startswith(divider):
+                    result += " "
+
+                result += second
+
+        else:
+            try:
+                result = getattr(self, "type_%s" % type)(node)
+            except AttributeError:
+                raise Exception("Script compressor does not support type '%s' from line %s in file %s" % (type, node.line, node.getFileName()))
+
+        if getattr(node, "parenthesized", None):
+            return "(%s)" % result
+        else:
+            return result
+
+
+
+    #
+    # Helpers
+    #
+
+    def __statements(self, node):
+        result = []
+        for child in node:
+            result.append(self.compress(child))
+
+        return "".join(result)
+
+    def __handleForcedSemicolon(self, node):
+        if node.type == "semicolon" and not hasattr(node, "expression"):
+            self.__forcedSemicolon = True
+
+    def __addSemicolon(self, result):
+        if not result.endswith(self.__semicolonSymbol):
+            if self.__forcedSemicolon:
+                self.__forcedSemicolon = False
+
+            return result + self.__semicolonSymbol
+
+        else:
+            return result
+
+    def __removeSemicolon(self, result):
+        if self.__forcedSemicolon:
+            self.__forcedSemicolon = False
+            return result
+
+        if result.endswith(self.__semicolonSymbol):
+            return result[:-len(self.__semicolonSymbol)]
+        else:
+            return result
+
+
+    #
+    # Data
+    #
+
+    __simple_property = re.compile(r"^[a-zA-Z_$][a-zA-Z0-9_$]*$")
+    __number_property = re.compile(r"^[0-9]+$")
+
+    __simple = ["true", "false", "null", "this", "debugger"]
+
+    __dividers = {
+        "plus"        : '+',
+        "minus"       : '-',
+        "mul"         : '*',
+        "div"         : '/',
+        "mod"         : '%',
+        "dot"         : '.',
+        "or"          : "||",
+        "and"         : "&&",
+        "strict_eq"   : '===',
+        "eq"          : '==',
+        "strict_ne"   : '!==',
+        "ne"          : '!=',
+        "lsh"         : '<<',
+        "le"          : '<=',
+        "lt"          : '<',
+        "ursh"        : '>>>',
+        "rsh"         : '>>',
+        "ge"          : '>=',
+        "gt"          : '>',
+        "bitwise_or"  : '|',
+        "bitwise_xor" : '^',
+        "bitwise_and" : '&'
+    }
+
+    __prefixes = {
+        "increment"   : "++",
+        "decrement"   : "--",
+        "bitwise_not" : '~',
+        "not"         : "!",
+        "unary_plus"  : "+",
+        "unary_minus" : "-",
+        "delete"      : "delete ",
+        "new"         : "new ",
+        "typeof"      : "typeof ",
+        "void"        : "void "
+    }
+
+
+
+    #
+    # Script Scope
+    #
+
+    def type_script(self, node):
+        return self.__statements(node)
+
+
+
+    #
+    # Expressions
+    #
+
+    def type_comma(self, node):
+        return self.__commaSymbol.join(map(self.compress, node))
+
+    def type_object_init(self, node):
+        return "{%s}" % self.__commaSymbol.join(map(self.compress, node))
+
+    def type_property_init(self, node):
+        key = self.compress(node[0])
+        value = self.compress(node[1])
+
+        if type(key) in (int, float):
+            pass
+
+        elif self.__number_property.match(key):
+            pass
+
+        # Protect keywords and special characters
+        elif key in keywords or key in futureReserved or not self.__simple_property.match(key):
+            key = self.type_string(node[0])
+
+        return "%s:%s" % (key, value)
+
+    def type_array_init(self, node):
+        def helper(child):
+            return self.compress(child) if child != None else ""
+
+        return "[%s]" % ",".join(map(helper, node))
+
+    def type_array_comp(self, node):
+        return "[%s %s]" % (self.compress(node.expression), self.compress(node.tail))
+
+    def type_string(self, node):
+        # Omit writing real high unicode character which are not supported well by browsers
+        ascii = ascii_encoder.encode(node.value)
+
+        if high_unicode.search(ascii):
+            return ascii
+        else:
+            return unicode_encoder.encode(node.value)
+
+    def type_number(self, node):
+        value = node.value
+
+        # Special handling for protected float/exponential
+        if type(value) == str:
+            # Convert zero-prefix
+            if value.startswith("0.") and len(value) > 2:
+                value = value[1:]
+
+            # Convert zero postfix
+            elif value.endswith(".0"):
+                value = value[:-2]
+
+        elif int(value) == value and node.parent.type != "dot":
+            value = int(value)
+
+        return "%s" % value
+
+    def type_regexp(self, node):
+        return node.value
+
+    def type_identifier(self, node):
+        return node.value
+
+    def type_list(self, node):
+        return ",".join(map(self.compress, node))
+
+    def type_index(self, node):
+        return "%s[%s]" % (self.compress(node[0]), self.compress(node[1]))
+
+    def type_declaration(self, node):
+        names = getattr(node, "names", None)
+        if names:
+            result = self.compress(names)
+        else:
+            result = node.name
+
+        initializer = getattr(node, "initializer", None)
+        if initializer:
+            result += "=%s" % self.compress(node.initializer)
+
+        return result
+
+    def type_assign(self, node):
+        assignOp = getattr(node, "assignOp", None)
+        operator = "=" if not assignOp else self.__dividers[assignOp] + "="
+
+        return self.compress(node[0]) + operator + self.compress(node[1])
+
+    def type_call(self, node):
+        return "%s(%s)" % (self.compress(node[0]), self.compress(node[1]))
+
+    def type_new_with_args(self, node):
+        result = "new %s" % self.compress(node[0])
+
+        # Compress new Object(); => new Object;
+        if len(node[1]) > 0:
+            result += "(%s)" % self.compress(node[1])
+        else:
+            parent = getattr(node, "parent", None)
+            if parent and parent.type is "dot":
+                result += "()"
+
+        return result
+
+    def type_exception(self, node):
+        return node.value
+
+    def type_generator(self, node):
+        """ Generator Expression """
+        result = self.compress(getattr(node, "expression"))
+        tail = getattr(node, "tail", None)
+        if tail:
+            result += " %s" % self.compress(tail)
+
+        return result
+
+    def type_comp_tail(self, node):
+        """  Comprehensions Tails """
+        result = self.compress(getattr(node, "for"))
+        guard = getattr(node, "guard", None)
+        if guard:
+            result += "if(%s)" % self.compress(guard)
+
+        return result
+
+    def type_in(self, node):
+        first = self.compress(node[0])
+        second = self.compress(node[1])
+
+        if first.endswith("'") or first.endswith('"'):
+            pattern = "%sin %s"
+        else:
+            pattern = "%s in %s"
+
+        return pattern % (first, second)
+
+    def type_instanceof(self, node):
+        first = self.compress(node[0])
+        second = self.compress(node[1])
+
+        return "%s instanceof %s" % (first, second)
+
+
+
+    #
+    # Statements :: Core
+    #
+
+    def type_block(self, node):
+        return "{%s}" % self.__removeSemicolon(self.__statements(node))
+
+    def type_let_block(self, node):
+        begin = "let(%s)" % ",".join(map(self.compress, node.variables))
+        if hasattr(node, "block"):
+            end = self.compress(node.block)
+        elif hasattr(node, "expression"):
+            end = self.compress(node.expression)
+
+        return begin + end
+
+    def type_const(self, node):
+        return self.__addSemicolon("const %s" % self.type_list(node))
+
+    def type_var(self, node):
+        return self.__addSemicolon("var %s" % self.type_list(node))
+
+    def type_let(self, node):
+        return self.__addSemicolon("let %s" % self.type_list(node))
+
+    def type_semicolon(self, node):
+        expression = getattr(node, "expression", None)
+        return self.__addSemicolon(self.compress(expression) if expression else "")
+
+    def type_label(self, node):
+        return self.__addSemicolon("%s:%s" % (node.label, self.compress(node.statement)))
+
+    def type_break(self, node):
+        return self.__addSemicolon("break" if not hasattr(node, "label") else "break %s" % node.label)
+
+    def type_continue(self, node):
+        return self.__addSemicolon("continue" if not hasattr(node, "label") else "continue %s" % node.label)
+
+
+    #
+    # Statements :: Functions
+    #
+
+    def type_function(self, node):
+        if node.type == "setter":
+            result = "set"
+        elif node.type == "getter":
+            result = "get"
+        else:
+            result = "function"
+
+        name = getattr(node, "name", None)
+        if name:
+            result += " %s" % name
+
+        params = getattr(node, "params", None)
+        result += "(%s)" % self.compress(params) if params else "()"
+
+        # keep expression closure format (may be micro-optimized for other code, too)
+        if getattr(node, "expressionClosure", False):
+            result += self.compress(node.body)
+        else:
+            result += "{%s}" % self.__removeSemicolon(self.compress(node.body))
+
+        return result
+
+    def type_getter(self, node):
+        return self.type_function(node)
+
+    def type_setter(self, node):
+        return self.type_function(node)
+
+    def type_return(self, node):
+        result = "return"
+        if hasattr(node, "value"):
+            valueCode = self.compress(node.value)
+
+            # Micro optimization: Don't need a space when a block/map/array/group/strings are returned
+            if not valueCode.startswith(("(","[","{","'",'"',"!","-","/")):
+                result += " "
+
+            result += valueCode
+
+        return self.__addSemicolon(result)
+
+
+
+    #
+    # Statements :: Exception Handling
+    #
+
+    def type_throw(self, node):
+        return self.__addSemicolon("throw %s" % self.compress(node.exception))
+
+    def type_try(self, node):
+        result = "try%s" % self.compress(node.tryBlock)
+
+        for catch in node:
+            if catch.type == "catch":
+                if hasattr(catch, "guard"):
+                    result += "catch(%s if %s)%s" % (self.compress(catch.exception), self.compress(catch.guard), self.compress(catch.block))
+                else:
+                    result += "catch(%s)%s" % (self.compress(catch.exception), self.compress(catch.block))
+
+        if hasattr(node, "finallyBlock"):
+            result += "finally%s" % self.compress(node.finallyBlock)
+
+        return result
+
+
+
+    #
+    # Statements :: Loops
+    #
+
+    def type_while(self, node):
+        result = "while(%s)%s" % (self.compress(node.condition), self.compress(node.body))
+        self.__handleForcedSemicolon(node.body)
+        return result
+
+
+    def type_do(self, node):
+        # block unwrapping don't help to reduce size on this loop type
+        # but if it happens (don't like to modify a global function to fix a local issue), we
+        # need to fix the body and re-add braces around the statement
+        body = self.compress(node.body)
+        if not body.startswith("{"):
+            body = "{%s}" % body
+
+        return self.__addSemicolon("do%swhile(%s)" % (body, self.compress(node.condition)))
+
+
+    def type_for_in(self, node):
+        # Optional variable declarations
+        varDecl = getattr(node, "varDecl", None)
+
+        # Body is optional - at least in comprehensions tails
+        body = getattr(node, "body", None)
+        if body:
+            body = self.compress(body)
+        else:
+            body = ""
+
+        result = "for"
+        if node.isEach:
+            result += " each"
+
+        result += "(%s in %s)%s" % (self.__removeSemicolon(self.compress(node.iterator)), self.compress(node.object), body)
+
+        if body:
+            self.__handleForcedSemicolon(node.body)
+
+        return result
+
+
+    def type_for(self, node):
+        setup = getattr(node, "setup", None)
+        condition = getattr(node, "condition", None)
+        update = getattr(node, "update", None)
+
+        result = "for("
+        result += self.__addSemicolon(self.compress(setup) if setup else "")
+        result += self.__addSemicolon(self.compress(condition) if condition else "")
+        result += self.compress(update) if update else ""
+        result += ")%s" % self.compress(node.body)
+
+        self.__handleForcedSemicolon(node.body)
+        return result
+
+
+
+    #
+    # Statements :: Conditionals
+    #
+
+    def type_hook(self, node):
+        """aka ternary operator"""
+        condition = node.condition
+        thenPart = node.thenPart
+        elsePart = node.elsePart
+
+        if condition.type == "not":
+            [thenPart,elsePart] = [elsePart,thenPart]
+            condition = condition[0]
+
+        return "%s?%s:%s" % (self.compress(condition), self.compress(thenPart), self.compress(elsePart))
+
+
+    def type_if(self, node):
+        result = "if(%s)%s" % (self.compress(node.condition), self.compress(node.thenPart))
+
+        elsePart = getattr(node, "elsePart", None)
+        if elsePart:
+            result += "else"
+
+            elseCode = self.compress(elsePart)
+
+            # Micro optimization: Don't need a space when the child is a block
+            # At this time the brace could not be part of a map declaration (would be a syntax error)
+            if not elseCode.startswith(("{", "(", ";")):
+                result += " "
+
+            result += elseCode
+
+            self.__handleForcedSemicolon(elsePart)
+
+        return result
+
+
+    def type_switch(self, node):
+        result = "switch(%s){" % self.compress(node.discriminant)
+        for case in node:
+            if case.type == "case":
+                labelCode = self.compress(case.label)
+                if labelCode.startswith('"'):
+                    result += "case%s:" % labelCode
+                else:
+                    result += "case %s:" % labelCode
+            elif case.type == "default":
+                result += "default:"
+            else:
+                continue
+
+            for statement in case.statements:
+                temp = self.compress(statement)
+                if len(temp) > 0:
+                    result += self.__addSemicolon(temp)
+
+        return "%s}" % self.__removeSemicolon(result)
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/parse/Lang.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,211 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+from __future__ import unicode_literals
+
+futureReserved = set([
+    "abstract",
+    "boolean",
+    "byte",
+    "char",
+    "class",
+    "const",
+    "debugger",
+    "double",
+    "enum",
+    "export",
+    "extends",
+    "final",
+    "float",
+    "goto",
+    "implements",
+    "import",
+    "int",
+    "interface",
+    "long",
+    "native",
+    "package",
+    "private",
+    "protected",
+    "public",
+    "short",
+    "static",
+    "super",
+    "synchronized",
+    "throws",
+    "transient",
+    "volatile"
+])
+
+
+statements = [
+    # With semicolon at end
+    "semicolon",
+    "return",
+    "throw",
+    "label",
+    "break",
+    "continue",
+    "var",
+    "const",
+    "debugger",
+
+    # Only semicolon when no-block braces are created
+    "block",
+    "let_block",
+    "while",
+    "do",
+    "for",
+    "for_in",
+    "if",
+    "switch",
+    "hook",
+    "with",
+
+    # no semicolons
+    # function, setter and getter as statement_form or declared_form
+    "function",
+    "setter",
+    "getter",
+    "try",
+    "label"
+]
+
+
+# All allowed expression types of JavaScript 1.7
+# They may be separated by "comma" which is quite of special
+# and not allowed everywhere e.g. in conditional statements
+expressions = [
+    # Primary Expression - Part 1 (expressed form)
+    "function",
+
+    # Primary Expression - Part 2
+    "object_init",
+    "array_init",
+    "array_comp",
+
+    # Primary Expression - Part 3
+    "let",
+
+    # Primary Expression - Part 4
+    "null",
+    "this",
+    "true",
+    "false",
+    "identifier",
+    "number",
+    "string",
+    "regexp",
+
+    # Member Expression - Part 1
+    "new_with_args",
+    "new",
+
+    # Member Expression - Part 2
+    "dot",
+    "call",
+    "index",
+
+    # Unary Expression
+    "unary_plus",
+    "unary_minus",
+    "delete",
+    "void",
+    "typeof",
+    "not",
+    "bitwise_not",
+    "increment",
+    "decrement",
+
+    # Multiply Expression
+    "mul",
+    "div",
+    "mod",
+
+    # Add Expression
+    "plus",
+    "minus",
+
+    # Shift Expression
+    "lsh",
+    "rsh",
+    "ursh",
+
+    # Relational Expression
+    "lt",
+    "le",
+    "ge",
+    "gt",
+    "in",
+    "instanceof",
+
+    # Equality Expression
+    "eq",
+    "ne",
+    "strict_eq",
+    "strict_ne",
+
+    # BitwiseAnd Expression
+    "bitwise_and",
+
+    # BitwiseXor Expression
+    "bitwise_xor",
+
+    # BitwiseOr Expression
+    "bitwise_or",
+
+    # And Expression
+    "and",
+
+    # Or Expression
+    "or",
+
+    # Conditional Expression
+    "hook",
+
+    # Assign Expression
+    "assign",
+
+    # Expression
+    "comma"
+]
+
+
+
+
+def __createOrder():
+    expressions = [
+        ["comma"],
+        ["assign"],
+        ["hook"],
+        ["or"],
+        ["and"],
+        ["bitwise_or"],
+        ["bitwise_xor",],
+        ["bitwise_and"],
+        ["eq","ne","strict_eq","strict_ne"],
+        ["lt","le","ge","gt","in","instanceof"],
+        ["lsh","rsh","ursh"],
+        ["plus","minus"],
+        ["mul","div","mod"],
+        ["unary_plus","unary_minus","delete","void","typeof","not","bitwise_not","increment","decrement"],
+        ["dot","call","index"],
+        ["new_with_args","new"],
+        ["null","this","true","false","identifier","number","string","regexp"],
+        ["let"],
+        ["object_init","array_init","array_comp"],
+        ["function"]
+    ]
+
+    result = {}
+    for priority, itemList in enumerate(expressions):
+        for item in itemList:
+            result[item] = priority
+
+    return result
+
+expressionOrder = __createOrder()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/parse/Node.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,26 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2013-2014 Sebastian Werner
+#
+
+from __future__ import unicode_literals
+
+import jasy.parse.AbstractNode as AbstractNode
+
+class Node(AbstractNode.AbstractNode):
+
+    __slots__ = [
+        # core data
+        "line", "type", "tokenizer", "start", "end", "rel", "parent",
+
+        # dynamic added data by other modules
+        "comments", "scope",
+
+        # node type specific
+        "value", "expression", "body", "functionForm", "parenthesized", "fileId", "params",
+        "name", "readOnly", "initializer", "condition", "isLoop", "isEach", "object", "assignOp",
+        "iterator", "thenPart", "exception", "elsePart", "setup", "postfix", "update", "tryBlock",
+        "block", "defaultIndex", "discriminant", "label", "statements", "finallyBlock",
+        "statement", "variables", "names", "guard", "for", "tail", "expressionClosure"
+    ]
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/parse/Parser.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,1448 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+#
+# License: MPL 1.1/GPL 2.0/LGPL 2.1
+# Authors:
+#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
+#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010-2012)
+#
+
+from __future__ import unicode_literals
+
+import jasy.script.tokenize.Tokenizer
+import jasy.script.parse.VanillaBuilder
+import jasy.script.tokenize.Lang
+
+__all__ = [ "parse", "parseExpression" ]
+
+def parseExpression(source, fileId=None, line=1, builder=None):
+    if builder == None:
+        builder = jasy.script.parse.VanillaBuilder.VanillaBuilder()
+
+    # Convert source into expression statement to be friendly to the Tokenizer
+    if not source.endswith(";"):
+        source = source + ";"
+
+    tokenizer = jasy.script.tokenize.Tokenizer.Tokenizer(source, fileId, line)
+    staticContext = StaticContext(False, builder)
+
+    return Expression(tokenizer, staticContext)
+
+
+
+def parse(source, fileId=None, line=1, builder=None):
+    if builder == None:
+        builder = jasy.script.parse.VanillaBuilder.VanillaBuilder()
+
+    tokenizer = jasy.script.tokenize.Tokenizer.Tokenizer(source, fileId, line)
+    staticContext = StaticContext(False, builder)
+    node = Script(tokenizer, staticContext)
+
+    # store fileId on top-level node
+    node.fileId = tokenizer.fileId
+
+    # add missing comments e.g. empty file with only a comment etc.
+    # if there is something non-attached by an inner node it is attached to
+    # the top level node, which is not correct, but might be better than
+    # just ignoring the comment after all.
+    if len(node) > 0:
+        builder.COMMENTS_add(node[-1], None, tokenizer.getComments())
+    else:
+        builder.COMMENTS_add(node, None, tokenizer.getComments())
+
+    if not tokenizer.done():
+        raise SyntaxError("Unexpected end of file", tokenizer)
+
+    return node
+
+
+
+class SyntaxError(Exception):
+    def __init__(self, message, tokenizer):
+        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (message, tokenizer.fileId, tokenizer.line))
+
+
+# Used as a status container during tree-building for every def body and the global body
+class StaticContext(object):
+    # inFunction is used to check if a return stm appears in a valid context.
+    def __init__(self, inFunction, builder):
+        # Whether this is inside a function, mostly True, only for top-level scope it's False
+        self.inFunction = inFunction
+
+        self.hasEmptyReturn = False
+        self.hasReturnWithValue = False
+        self.isGenerator = False
+        self.blockId = 0
+        self.builder = builder
+        self.statementStack = []
+
+        # Sets to store variable uses
+        # self.functions = set()
+        # self.variables = set()
+
+        # Status
+        # self.needsHoisting = False
+        self.bracketLevel = 0
+        self.curlyLevel = 0
+        self.parenLevel = 0
+        self.hookLevel = 0
+
+        # Configure strict ecmascript 3 mode
+        self.ecma3OnlyMode = False
+
+        # Status flag during parsing
+        self.inForLoopInit = False
+
+
+def Script(tokenizer, staticContext):
+    """Parses the toplevel and def bodies."""
+    node = Statements(tokenizer, staticContext)
+
+    # change type from "block" to "script" for script root
+    node.type = "script"
+
+    # copy over data from compiler context
+    # node.functions = staticContext.functions
+    # node.variables = staticContext.variables
+
+    return node
+
+
+def nest(tokenizer, staticContext, node, func, end=None):
+    """Statement stack and nested statement handler."""
+    staticContext.statementStack.append(node)
+    node = func(tokenizer, staticContext)
+    staticContext.statementStack.pop()
+    end and tokenizer.mustMatch(end)
+
+    return node
+
+
+def Statements(tokenizer, staticContext):
+    """Parses a list of Statements."""
+
+    builder = staticContext.builder
+    node = builder.BLOCK_build(tokenizer, staticContext.blockId)
+    staticContext.blockId += 1
+
+    builder.BLOCK_hoistLets(node)
+    staticContext.statementStack.append(node)
+
+    prevNode = None
+    while not tokenizer.done() and tokenizer.peek(True) != "right_curly":
+        comments = tokenizer.getComments()
+        childNode = Statement(tokenizer, staticContext)
+        builder.COMMENTS_add(childNode, prevNode, comments)
+        builder.BLOCK_addStatement(node, childNode)
+        prevNode = childNode
+
+    staticContext.statementStack.pop()
+    builder.BLOCK_finish(node)
+
+    # if getattr(node, "needsHoisting", False):
+    #     # TODO
+    #     raise Exception("Needs hoisting went true!!!")
+    #     builder.setHoists(node.id, node.variables)
+    #     # Propagate up to the function.
+    #     staticContext.needsHoisting = True
+
+    return node
+
+
+def Block(tokenizer, staticContext):
+    tokenizer.mustMatch("left_curly")
+    node = Statements(tokenizer, staticContext)
+    tokenizer.mustMatch("right_curly")
+
+    return node
+
+
+def Statement(tokenizer, staticContext):
+    """Parses a Statement."""
+
+    tokenType = tokenizer.get(True)
+    builder = staticContext.builder
+
+    # Cases for statements ending in a right curly return early, avoiding the
+    # common semicolon insertion magic after this switch.
+
+    if tokenType == "function":
+        # "declared_form" extends functions of staticContext, "statement_form" doesn'tokenizer.
+        if len(staticContext.statementStack) > 1:
+            kind = "statement_form"
+        else:
+            kind = "declared_form"
+
+        return FunctionDefinition(tokenizer, staticContext, True, kind)
+
+
+    elif tokenType == "left_curly":
+        node = Statements(tokenizer, staticContext)
+        tokenizer.mustMatch("right_curly")
+
+        return node
+
+
+    elif tokenType == "if":
+        node = builder.IF_build(tokenizer)
+        builder.IF_setCondition(node, ParenExpression(tokenizer, staticContext))
+        staticContext.statementStack.append(node)
+        builder.IF_setThenPart(node, Statement(tokenizer, staticContext))
+
+        if tokenizer.match("else"):
+            comments = tokenizer.getComments()
+            elsePart = Statement(tokenizer, staticContext)
+            builder.COMMENTS_add(elsePart, node, comments)
+            builder.IF_setElsePart(node, elsePart)
+
+        staticContext.statementStack.pop()
+        builder.IF_finish(node)
+
+        return node
+
+
+    elif tokenType == "switch":
+        # This allows CASEs after a "default", which is in the standard.
+        node = builder.SWITCH_build(tokenizer)
+        builder.SWITCH_setDiscriminant(node, ParenExpression(tokenizer, staticContext))
+        staticContext.statementStack.append(node)
+
+        tokenizer.mustMatch("left_curly")
+        tokenType = tokenizer.get()
+
+        while tokenType != "right_curly":
+            if tokenType == "default":
+                if node.defaultIndex >= 0:
+                    raise SyntaxError("More than one switch default", tokenizer)
+
+                childNode = builder.DEFAULT_build(tokenizer)
+                builder.SWITCH_setDefaultIndex(node, len(node)-1)
+                tokenizer.mustMatch("colon")
+                builder.DEFAULT_initializeStatements(childNode, tokenizer)
+
+                while True:
+                    tokenType=tokenizer.peek(True)
+                    if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
+                        break
+                    builder.DEFAULT_addStatement(childNode, Statement(tokenizer, staticContext))
+
+                builder.DEFAULT_finish(childNode)
+
+            elif tokenType == "case":
+                childNode = builder.CASE_build(tokenizer)
+                builder.CASE_setLabel(childNode, Expression(tokenizer, staticContext))
+                tokenizer.mustMatch("colon")
+                builder.CASE_initializeStatements(childNode, tokenizer)
+
+                while True:
+                    tokenType=tokenizer.peek(True)
+                    if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
+                        break
+                    builder.CASE_addStatement(childNode, Statement(tokenizer, staticContext))
+
+                builder.CASE_finish(childNode)
+
+            else:
+                raise SyntaxError("Invalid switch case", tokenizer)
+
+            builder.SWITCH_addCase(node, childNode)
+            tokenType = tokenizer.get()
+
+        staticContext.statementStack.pop()
+        builder.SWITCH_finish(node)
+
+        return node
+
+
+    elif tokenType == "for":
+        node = builder.FOR_build(tokenizer)
+        forBlock = None
+
+        if tokenizer.match("identifier") and tokenizer.token.value == "each":
+            builder.FOR_rebuildForEach(node)
+
+        tokenizer.mustMatch("left_paren")
+        tokenType = tokenizer.peek()
+        childNode = None
+
+        if tokenType != "semicolon":
+            staticContext.inForLoopInit = True
+
+            if tokenType == "var" or tokenType == "const":
+                tokenizer.get()
+                childNode = Variables(tokenizer, staticContext)
+
+            elif tokenType == "let":
+                tokenizer.get()
+
+                if tokenizer.peek() == "left_paren":
+                    childNode = LetBlock(tokenizer, staticContext, False)
+
+                else:
+                    # Let in for head, we need to add an implicit block
+                    # around the rest of the for.
+                    forBlock = builder.BLOCK_build(tokenizer, staticContext.blockId)
+                    staticContext.blockId += 1
+                    staticContext.statementStack.append(forBlock)
+                    childNode = Variables(tokenizer, staticContext, forBlock)
+
+            else:
+                childNode = Expression(tokenizer, staticContext)
+
+            staticContext.inForLoopInit = False
+
+        if childNode and tokenizer.match("in"):
+            builder.FOR_rebuildForIn(node)
+            builder.FOR_setObject(node, Expression(tokenizer, staticContext), forBlock)
+
+            if childNode.type == "var" or childNode.type == "let":
+                if len(childNode) != 1:
+                    raise SyntaxError("Invalid for..in left-hand side", tokenizer)
+
+                builder.FOR_setIterator(node, childNode, forBlock)
+
+            else:
+                builder.FOR_setIterator(node, childNode, forBlock)
+
+        else:
+            builder.FOR_setSetup(node, childNode)
+            tokenizer.mustMatch("semicolon")
+
+            if node.isEach:
+                raise SyntaxError("Invalid for each..in loop", tokenizer)
+
+            if tokenizer.peek() == "semicolon":
+                builder.FOR_setCondition(node, None)
+            else:
+                builder.FOR_setCondition(node, Expression(tokenizer, staticContext))
+
+            tokenizer.mustMatch("semicolon")
+
+            if tokenizer.peek() == "right_paren":
+                builder.FOR_setUpdate(node, None)
+            else:
+                builder.FOR_setUpdate(node, Expression(tokenizer, staticContext))
+
+        tokenizer.mustMatch("right_paren")
+        builder.FOR_setBody(node, nest(tokenizer, staticContext, node, Statement))
+
+        if forBlock:
+            builder.BLOCK_finish(forBlock)
+            staticContext.statementStack.pop()
+
+        builder.FOR_finish(node)
+        return node
+
+
+    elif tokenType == "while":
+        node = builder.WHILE_build(tokenizer)
+
+        builder.WHILE_setCondition(node, ParenExpression(tokenizer, staticContext))
+        builder.WHILE_setBody(node, nest(tokenizer, staticContext, node, Statement))
+        builder.WHILE_finish(node)
+
+        return node
+
+
+    elif tokenType == "do":
+        node = builder.DO_build(tokenizer)
+
+        builder.DO_setBody(node, nest(tokenizer, staticContext, node, Statement, "while"))
+        builder.DO_setCondition(node, ParenExpression(tokenizer, staticContext))
+        builder.DO_finish(node)
+
+        if not staticContext.ecma3OnlyMode:
+            # <script language="JavaScript"> (without version hints) may need
+            # automatic semicolon insertion without a newline after do-while.
+            # See http://bugzilla.mozilla.org/show_bug.cgi?id=238945.
+            tokenizer.match("semicolon")
+            return node
+
+        # NO RETURN
+
+
+    elif tokenType == "break" or tokenType == "continue":
+        if tokenType == "break":
+            node = builder.BREAK_build(tokenizer)
+        else:
+            node = builder.CONTINUE_build(tokenizer)
+
+        if tokenizer.peekOnSameLine() == "identifier":
+            tokenizer.get()
+
+            if tokenType == "break":
+                builder.BREAK_setLabel(node, tokenizer.token.value)
+            else:
+                builder.CONTINUE_setLabel(node, tokenizer.token.value)
+
+        statementStack = staticContext.statementStack
+        i = len(statementStack)
+        label = node.label if hasattr(node, "label") else None
+
+        if label:
+            while True:
+                i -= 1
+                if i < 0:
+                    raise SyntaxError("Label not found", tokenizer)
+                if getattr(statementStack[i], "label", None) == label:
+                    break
+
+            #
+            # Both break and continue to label need to be handled specially
+            # within a labeled loop, so that they target that loop. If not in
+            # a loop, then break targets its labeled statement. Labels can be
+            # nested so we skip all labels immediately enclosing the nearest
+            # non-label statement.
+            #
+            while i < len(statementStack) - 1 and statementStack[i+1].type == "label":
+                i += 1
+
+            if i < len(statementStack) - 1 and getattr(statementStack[i+1], "isLoop", False):
+                i += 1
+            elif tokenType == "continue":
+                raise SyntaxError("Invalid continue", tokenizer)
+
+        else:
+            while True:
+                i -= 1
+                if i < 0:
+                    if tokenType == "break":
+                        raise SyntaxError("Invalid break", tokenizer)
+                    else:
+                        raise SyntaxError("Invalid continue", tokenizer)
+
+                if getattr(statementStack[i], "isLoop", False) or (tokenType == "break" and statementStack[i].type == "switch"):
+                    break
+
+        if tokenType == "break":
+            builder.BREAK_finish(node)
+        else:
+            builder.CONTINUE_finish(node)
+
+        # NO RETURN
+
+
+    elif tokenType == "try":
+        node = builder.TRY_build(tokenizer)
+        builder.TRY_setTryBlock(node, Block(tokenizer, staticContext))
+
+        while tokenizer.match("catch"):
+            childNode = builder.CATCH_build(tokenizer)
+            tokenizer.mustMatch("left_paren")
+            nextTokenType = tokenizer.get()
+
+            if nextTokenType == "left_bracket" or nextTokenType == "left_curly":
+                # Destructured catch identifiers.
+                tokenizer.unget()
+                exception = DestructuringExpression(tokenizer, staticContext, True)
+
+            elif nextTokenType == "identifier":
+                exception = builder.CATCH_wrapException(tokenizer)
+
+            else:
+                raise SyntaxError("Missing identifier in catch", tokenizer)
+
+            builder.CATCH_setException(childNode, exception)
+
+            if tokenizer.match("if"):
+                if staticContext.ecma3OnlyMode:
+                    raise SyntaxError("Illegal catch guard", tokenizer)
+
+                if node.getChildrenLength() > 0 and not node.getUnrelatedChildren()[0].guard:
+                    raise SyntaxError("Guarded catch after unguarded", tokenizer)
+
+                builder.CATCH_setGuard(childNode, Expression(tokenizer, staticContext))
+
+            else:
+                builder.CATCH_setGuard(childNode, None)
+
+            tokenizer.mustMatch("right_paren")
+
+            builder.CATCH_setBlock(childNode, Block(tokenizer, staticContext))
+            builder.CATCH_finish(childNode)
+
+            builder.TRY_addCatch(node, childNode)
+
+        builder.TRY_finishCatches(node)
+
+        if tokenizer.match("finally"):
+            builder.TRY_setFinallyBlock(node, Block(tokenizer, staticContext))
+
+        if node.getChildrenLength() == 0 and not hasattr(node, "finallyBlock"):
+            raise SyntaxError("Invalid try statement", tokenizer)
+
+        builder.TRY_finish(node)
+        return node
+
+
+    elif tokenType == "catch" or tokenType == "finally":
+        raise SyntaxError(tokens[tokenType] + " without preceding try", tokenizer)
+
+
+    elif tokenType == "throw":
+        node = builder.THROW_build(tokenizer)
+
+        builder.THROW_setException(node, Expression(tokenizer, staticContext))
+        builder.THROW_finish(node)
+
+        # NO RETURN
+
+
+    elif tokenType == "return":
+        node = returnOrYield(tokenizer, staticContext)
+
+        # NO RETURN
+
+
+    elif tokenType == "with":
+        node = builder.WITH_build(tokenizer)
+
+        builder.WITH_setObject(node, ParenExpression(tokenizer, staticContext))
+        builder.WITH_setBody(node, nest(tokenizer, staticContext, node, Statement))
+        builder.WITH_finish(node)
+
+        return node
+
+
+    elif tokenType == "var" or tokenType == "const":
+        node = Variables(tokenizer, staticContext)
+
+        # NO RETURN
+
+
+    elif tokenType == "let":
+        if tokenizer.peek() == "left_paren":
+            node = LetBlock(tokenizer, staticContext, True)
+        else:
+            node = Variables(tokenizer, staticContext)
+
+        # NO RETURN
+
+
+    elif tokenType == "debugger":
+        node = builder.DEBUGGER_build(tokenizer)
+
+        # NO RETURN
+
+
+    elif tokenType == "newline" or tokenType == "semicolon":
+        node = builder.SEMICOLON_build(tokenizer)
+
+        builder.SEMICOLON_setExpression(node, None)
+        builder.SEMICOLON_finish(tokenizer)
+
+        return node
+
+
+    else:
+        if tokenType == "identifier":
+            tokenType = tokenizer.peek()
+
+            # Labeled statement.
+            if tokenType == "colon":
+                label = tokenizer.token.value
+                statementStack = staticContext.statementStack
+
+                i = len(statementStack)-1
+                while i >= 0:
+                    if getattr(statementStack[i], "label", None) == label:
+                        raise SyntaxError("Duplicate label", tokenizer)
+
+                    i -= 1
+
+                tokenizer.get()
+                node = builder.LABEL_build(tokenizer)
+
+                builder.LABEL_setLabel(node, label)
+                builder.LABEL_setStatement(node, nest(tokenizer, staticContext, node, Statement))
+                builder.LABEL_finish(node)
+
+                return node
+
+        # Expression statement.
+        # We unget the current token to parse the expression as a whole.
+        node = builder.SEMICOLON_build(tokenizer)
+        tokenizer.unget()
+        builder.SEMICOLON_setExpression(node, Expression(tokenizer, staticContext))
+        node.end = node.expression.end
+        builder.SEMICOLON_finish(node)
+
+        # NO RETURN
+
+
+    MagicalSemicolon(tokenizer)
+    return node
+
+
+
+def MagicalSemicolon(tokenizer):
+    if tokenizer.line == tokenizer.token.line:
+        tokenType = tokenizer.peekOnSameLine()
+
+        if tokenType != "end" and tokenType != "newline" and tokenType != "semicolon" and tokenType != "right_curly":
+            raise SyntaxError("Missing ; before statement", tokenizer)
+
+    tokenizer.match("semicolon")
+
+
+
+def returnOrYield(tokenizer, staticContext):
+    builder = staticContext.builder
+    tokenType = tokenizer.token.type
+
+    if tokenType == "return":
+        if not staticContext.inFunction:
+            raise SyntaxError("Return not in function", tokenizer)
+
+        node = builder.RETURN_build(tokenizer)
+
+    else:
+        if not staticContext.inFunction:
+            raise SyntaxError("Yield not in function", tokenizer)
+
+        staticContext.isGenerator = True
+        node = builder.YIELD_build(tokenizer)
+
+    nextTokenType = tokenizer.peek(True)
+    if nextTokenType != "end" and nextTokenType != "newline" and nextTokenType != "semicolon" and nextTokenType != "right_curly" and (tokenType != "yield" or (nextTokenType != tokenType and nextTokenType != "right_bracket" and nextTokenType != "right_paren" and nextTokenType != "colon" and nextTokenType != "comma")):
+        if tokenType == "return":
+            builder.RETURN_setValue(node, Expression(tokenizer, staticContext))
+            staticContext.hasReturnWithValue = True
+        else:
+            builder.YIELD_setValue(node, AssignExpression(tokenizer, staticContext))
+
+    elif tokenType == "return":
+        staticContext.hasEmptyReturn = True
+
+    # Disallow return v; in generator.
+    if staticContext.hasReturnWithValue and staticContext.isGenerator:
+        raise SyntaxError("Generator returns a value", tokenizer)
+
+    if tokenType == "return":
+        builder.RETURN_finish(node)
+    else:
+        builder.YIELD_finish(node)
+
+    return node
+
+
+
+def FunctionDefinition(tokenizer, staticContext, requireName, functionForm):
+    builder = staticContext.builder
+    functionNode = builder.FUNCTION_build(tokenizer)
+
+    if tokenizer.match("identifier"):
+        builder.FUNCTION_setName(functionNode, tokenizer.token.value)
+    elif requireName:
+        raise SyntaxError("Missing def identifier", tokenizer)
+
+    tokenizer.mustMatch("left_paren")
+
+    if not tokenizer.match("right_paren"):
+        builder.FUNCTION_initParams(functionNode, tokenizer)
+        prevParamNode = None
+        while True:
+            tokenType = tokenizer.get()
+            if tokenType == "left_bracket" or tokenType == "left_curly":
+                # Destructured formal parameters.
+                tokenizer.unget()
+                paramNode = DestructuringExpression(tokenizer, staticContext)
+
+            elif tokenType == "identifier":
+                paramNode = builder.FUNCTION_wrapParam(tokenizer)
+
+            else:
+                raise SyntaxError("Missing formal parameter", tokenizer)
+
+            builder.FUNCTION_addParam(functionNode, tokenizer, paramNode)
+            builder.COMMENTS_add(paramNode, prevParamNode, tokenizer.getComments())
+
+            if not tokenizer.match("comma"):
+                break
+
+            prevParamNode = paramNode
+
+        tokenizer.mustMatch("right_paren")
+
+    # Do we have an expression closure or a normal body?
+    tokenType = tokenizer.get()
+    if tokenType != "left_curly":
+        builder.FUNCTION_setExpressionClosure(functionNode, True)
+        tokenizer.unget()
+
+    childContext = StaticContext(True, builder)
+
+    if staticContext.inFunction:
+        # Inner functions don't reset block numbering, only functions at
+        # the top level of the program do.
+        childContext.blockId = staticContext.blockId
+
+    if tokenType != "left_curly":
+        builder.FUNCTION_setBody(functionNode, AssignExpression(tokenizer, staticContext))
+        if staticContext.isGenerator:
+            raise SyntaxError("Generator returns a value", tokenizer)
+
+    else:
+        builder.FUNCTION_hoistVars(childContext.blockId)
+        builder.FUNCTION_setBody(functionNode, Script(tokenizer, childContext))
+
+    if tokenType == "left_curly":
+        tokenizer.mustMatch("right_curly")
+
+    functionNode.end = tokenizer.token.end
+    functionNode.functionForm = functionForm
+
+    builder.COMMENTS_add(functionNode.body, functionNode.body, tokenizer.getComments())
+    builder.FUNCTION_finish(functionNode, staticContext)
+
+    return functionNode
+
+
+
+def Variables(tokenizer, staticContext, letBlock=None):
+    """Parses a comma-separated list of var declarations (and maybe initializations)."""
+
+    builder = staticContext.builder
+    if tokenizer.token.type == "var":
+        build = builder.VAR_build
+        addDecl = builder.VAR_addDecl
+        finish = builder.VAR_finish
+        childContext = staticContext
+
+    elif tokenizer.token.type == "const":
+        build = builder.CONST_build
+        addDecl = builder.CONST_addDecl
+        finish = builder.CONST_finish
+        childContext = staticContext
+
+    elif tokenizer.token.type == "let" or tokenizer.token.type == "left_paren":
+        build = builder.LET_build
+        addDecl = builder.LET_addDecl
+        finish = builder.LET_finish
+
+        if not letBlock:
+            statementStack = staticContext.statementStack
+            i = len(statementStack) - 1
+
+            # a BLOCK *must* be found.
+            while statementStack[i].type != "block":
+                i -= 1
+
+            # Lets at the def toplevel are just vars, at least in SpiderMonkey.
+            if i == 0:
+                build = builder.VAR_build
+                addDecl = builder.VAR_addDecl
+                finish = builder.VAR_finish
+                childContext = staticContext
+
+            else:
+                childContext = statementStack[i]
+
+        else:
+            childContext = letBlock
+
+    node = build(tokenizer)
+
+    while True:
+        tokenType = tokenizer.get()
+
+        # Done in Python port!
+        # FIXME Should have a special DECLARATION node instead of overloading
+        # IDENTIFIER to mean both identifier declarations and destructured
+        # declarations.
+        childNode = builder.DECL_build(tokenizer)
+
+        if tokenType == "left_bracket" or tokenType == "left_curly":
+            # Pass in childContext if we need to add each pattern matched into
+            # its variables, else pass in staticContext.
+            # Need to unget to parse the full destructured expression.
+            tokenizer.unget()
+            builder.DECL_setNames(childNode, DestructuringExpression(tokenizer, staticContext, True, childContext))
+
+            if staticContext.inForLoopInit and tokenizer.peek() == "in":
+                addDecl(node, childNode, childContext)
+                if tokenizer.match("comma"):
+                    continue
+                else:
+                    break
+
+            tokenizer.mustMatch("assign")
+            if tokenizer.token.assignOp:
+                raise SyntaxError("Invalid variable initialization", tokenizer)
+
+            # Parse the init as a normal assignment.
+            builder.DECL_setInitializer(childNode, AssignExpression(tokenizer, staticContext))
+            builder.DECL_finish(childNode)
+            addDecl(node, childNode, childContext)
+
+            # Copy over names for variable list
+            # for nameNode in childNode.names:
+            #    childContext.variables.add(nameNode.value)
+
+            if tokenizer.match("comma"):
+                continue
+            else:
+                break
+
+        if tokenType != "identifier":
+            raise SyntaxError("Missing variable name", tokenizer)
+
+        builder.DECL_setName(childNode, tokenizer.token.value)
+        builder.DECL_setReadOnly(childNode, node.type == "const")
+        addDecl(node, childNode, childContext)
+
+        if tokenizer.match("assign"):
+            if tokenizer.token.assignOp:
+                raise SyntaxError("Invalid variable initialization", tokenizer)
+
+            initializerNode = AssignExpression(tokenizer, staticContext)
+            builder.DECL_setInitializer(childNode, initializerNode)
+
+        builder.DECL_finish(childNode)
+
+        # If we directly use the node in "let" constructs
+        # if not hasattr(childContext, "variables"):
+        #    childContext.variables = set()
+
+        # childContext.variables.add(childNode.name)
+
+        if not tokenizer.match("comma"):
+            break
+
+    finish(node)
+    return node
+
+
+
+def LetBlock(tokenizer, staticContext, isStatement):
+    """Does not handle let inside of for loop init."""
+    builder = staticContext.builder
+
+    # tokenizer.token.type must be "let"
+    node = builder.LETBLOCK_build(tokenizer)
+    tokenizer.mustMatch("left_paren")
+    builder.LETBLOCK_setVariables(node, Variables(tokenizer, staticContext, node))
+    tokenizer.mustMatch("right_paren")
+
+    if isStatement and tokenizer.peek() != "left_curly":
+        # If this is really an expression in let statement guise, then we
+        # need to wrap the "let_block" node in a "semicolon" node so that we pop
+        # the return value of the expression.
+        childNode = builder.SEMICOLON_build(tokenizer)
+        builder.SEMICOLON_setExpression(childNode, node)
+        builder.SEMICOLON_finish(childNode)
+        isStatement = False
+
+    if isStatement:
+        childNode = Block(tokenizer, staticContext)
+        builder.LETBLOCK_setBlock(node, childNode)
+
+    else:
+        childNode = AssignExpression(tokenizer, staticContext)
+        builder.LETBLOCK_setExpression(node, childNode)
+
+    builder.LETBLOCK_finish(node)
+    return node
+
+
+def checkDestructuring(tokenizer, staticContext, node, simpleNamesOnly=None, data=None):
+    if node.type == "array_comp":
+        raise SyntaxError("Invalid array comprehension left-hand side", tokenizer)
+
+    if node.type != "array_init" and node.type != "object_init":
+        return
+
+    builder = staticContext.builder
+
+    for child in node:
+        if child == None:
+            continue
+
+        if child.type == "property_init":
+            lhs = child[0]
+            rhs = child[1]
+        else:
+            lhs = None
+            rhs = None
+
+
+        if rhs and (rhs.type == "array_init" or rhs.type == "object_init"):
+            checkDestructuring(tokenizer, staticContext, rhs, simpleNamesOnly, data)
+
+        if lhs and simpleNamesOnly:
+            # In declarations, lhs must be simple names
+            if lhs.type != "identifier":
+                raise SyntaxError("Missing name in pattern", tokenizer)
+
+            elif data:
+                childNode = builder.DECL_build(tokenizer)
+                builder.DECL_setName(childNode, lhs.value)
+
+                # Don't need to set initializer because it's just for
+                # hoisting anyways.
+                builder.DECL_finish(childNode)
+
+                # Each pattern needs to be added to variables.
+                # data.variables.add(childNode.name)
+
+
+# JavaScript 1.7
+def DestructuringExpression(tokenizer, staticContext, simpleNamesOnly=None, data=None):
+    node = PrimaryExpression(tokenizer, staticContext)
+    checkDestructuring(tokenizer, staticContext, node, simpleNamesOnly, data)
+
+    return node
+
+
+# JavsScript 1.7
+def GeneratorExpression(tokenizer, staticContext, expression):
+    builder = staticContext.builder
+    node = builder.GENERATOR_build(tokenizer)
+
+    builder.GENERATOR_setExpression(node, expression)
+    builder.GENERATOR_setTail(node, comprehensionTail(tokenizer, staticContext))
+    builder.GENERATOR_finish(node)
+
+    return node
+
+
+# JavaScript 1.7 Comprehensions Tails (Generators / Arrays)
+def comprehensionTail(tokenizer, staticContext):
+    builder = staticContext.builder
+
+    # tokenizer.token.type must be "for"
+    body = builder.COMPTAIL_build(tokenizer)
+
+    while True:
+        node = builder.FOR_build(tokenizer)
+
+        # Comprehension tails are always for..in loops.
+        builder.FOR_rebuildForIn(node)
+        if tokenizer.match("identifier"):
+            # But sometimes they're for each..in.
+            if tokenizer.token.value == "each":
+                builder.FOR_rebuildForEach(node)
+            else:
+                tokenizer.unget()
+
+        tokenizer.mustMatch("left_paren")
+
+        tokenType = tokenizer.get()
+        if tokenType == "left_bracket" or tokenType == "left_curly":
+            tokenizer.unget()
+            # Destructured left side of for in comprehension tails.
+            builder.FOR_setIterator(node, DestructuringExpression(tokenizer, staticContext))
+
+        elif tokenType == "identifier":
+            # Removed variable/declaration substructure in Python port.
+            # Variable declarations are not allowed here. So why process them in such a way?
+
+            # declaration = builder.DECL_build(tokenizer)
+            # builder.DECL_setName(declaration, tokenizer.token.value)
+            # builder.DECL_finish(declaration)
+            # childNode = builder.VAR_build(tokenizer)
+            # builder.VAR_addDecl(childNode, declaration)
+            # builder.VAR_finish(childNode)
+            # builder.FOR_setIterator(node, declaration)
+
+            # Don't add to variables since the semantics of comprehensions is
+            # such that the variables are in their own def when desugared.
+
+            identifier = builder.PRIMARY_build(tokenizer, "identifier")
+            builder.FOR_setIterator(node, identifier)
+
+        else:
+            raise SyntaxError("Missing identifier", tokenizer)
+
+        tokenizer.mustMatch("in")
+        builder.FOR_setObject(node, Expression(tokenizer, staticContext))
+        tokenizer.mustMatch("right_paren")
+        builder.COMPTAIL_addFor(body, node)
+
+        if not tokenizer.match("for"):
+            break
+
+    # Optional guard.
+    if tokenizer.match("if"):
+        builder.COMPTAIL_setGuard(body, ParenExpression(tokenizer, staticContext))
+
+    builder.COMPTAIL_finish(body)
+
+    return body
+
+
+def ParenExpression(tokenizer, staticContext):
+    tokenizer.mustMatch("left_paren")
+
+    # Always accept the 'in' operator in a parenthesized expression,
+    # where it's unambiguous, even if we might be parsing the init of a
+    # for statement.
+    oldLoopInit = staticContext.inForLoopInit
+    staticContext.inForLoopInit = False
+    node = Expression(tokenizer, staticContext)
+    staticContext.inForLoopInit = oldLoopInit
+
+    err = "expression must be parenthesized"
+    if tokenizer.match("for"):
+        if node.type == "yield" and not node.parenthesized:
+            raise SyntaxError("Yield " + err, tokenizer)
+
+        if node.type == "comma" and not node.parenthesized:
+            raise SyntaxError("Generator " + err, tokenizer)
+
+        node = GeneratorExpression(tokenizer, staticContext, node)
+
+    tokenizer.mustMatch("right_paren")
+
+    return node
+
+
+def Expression(tokenizer, staticContext):
+    """Top-down expression parser matched against SpiderMonkey."""
+    builder = staticContext.builder
+    node = AssignExpression(tokenizer, staticContext)
+
+    if tokenizer.match("comma"):
+        childNode = builder.COMMA_build(tokenizer)
+        builder.COMMA_addOperand(childNode, node)
+        node = childNode
+        while True:
+            childNode = node[len(node)-1]
+            if childNode.type == "yield" and not childNode.parenthesized:
+                raise SyntaxError("Yield expression must be parenthesized", tokenizer)
+            builder.COMMA_addOperand(node, AssignExpression(tokenizer, staticContext))
+
+            if not tokenizer.match("comma"):
+                break
+
+        builder.COMMA_finish(node)
+
+    return node
+
+
+def AssignExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+
+    # Have to treat yield like an operand because it could be the leftmost
+    # operand of the expression.
+    if tokenizer.match("yield", True):
+        return returnOrYield(tokenizer, staticContext)
+
+    comments = tokenizer.getComments()
+    node = builder.ASSIGN_build(tokenizer)
+    lhs = ConditionalExpression(tokenizer, staticContext)
+    builder.COMMENTS_add(lhs, None, comments)
+
+    if not tokenizer.match("assign"):
+        builder.ASSIGN_finish(node)
+        return lhs
+
+    if lhs.type == "object_init" or lhs.type == "array_init":
+        checkDestructuring(tokenizer, staticContext, lhs)
+    elif lhs.type == "identifier" or lhs.type == "dot" or lhs.type == "index" or lhs.type == "call":
+        pass
+    else:
+        raise SyntaxError("Bad left-hand side of assignment", tokenizer)
+
+    builder.ASSIGN_setAssignOp(node, tokenizer.token.assignOp)
+    builder.ASSIGN_addOperand(node, lhs)
+    builder.ASSIGN_addOperand(node, AssignExpression(tokenizer, staticContext))
+    builder.ASSIGN_finish(node)
+
+    return node
+
+
+def ConditionalExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = OrExpression(tokenizer, staticContext)
+
+    if tokenizer.match("hook"):
+        childNode = node
+        node = builder.HOOK_build(tokenizer)
+        builder.HOOK_setCondition(node, childNode)
+
+        # Always accept the 'in' operator in the middle clause of a ternary,
+        # where it's unambiguous, even if we might be parsing the init of a
+        # for statement.
+        oldLoopInit = staticContext.inForLoopInit
+        staticContext.inForLoopInit = False
+        builder.HOOK_setThenPart(node, AssignExpression(tokenizer, staticContext))
+        staticContext.inForLoopInit = oldLoopInit
+
+        if not tokenizer.match("colon"):
+            raise SyntaxError("Missing : after ?", tokenizer)
+
+        builder.HOOK_setElsePart(node, AssignExpression(tokenizer, staticContext))
+        builder.HOOK_finish(node)
+
+    return node
+
+
+def OrExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = AndExpression(tokenizer, staticContext)
+
+    while tokenizer.match("or"):
+        childNode = builder.OR_build(tokenizer)
+        builder.OR_addOperand(childNode, node)
+        builder.OR_addOperand(childNode, AndExpression(tokenizer, staticContext))
+        builder.OR_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def AndExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = BitwiseOrExpression(tokenizer, staticContext)
+
+    while tokenizer.match("and"):
+        childNode = builder.AND_build(tokenizer)
+        builder.AND_addOperand(childNode, node)
+        builder.AND_addOperand(childNode, BitwiseOrExpression(tokenizer, staticContext))
+        builder.AND_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def BitwiseOrExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = BitwiseXorExpression(tokenizer, staticContext)
+
+    while tokenizer.match("bitwise_or"):
+        childNode = builder.BITWISEOR_build(tokenizer)
+        builder.BITWISEOR_addOperand(childNode, node)
+        builder.BITWISEOR_addOperand(childNode, BitwiseXorExpression(tokenizer, staticContext))
+        builder.BITWISEOR_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def BitwiseXorExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = BitwiseAndExpression(tokenizer, staticContext)
+
+    while tokenizer.match("bitwise_xor"):
+        childNode = builder.BITWISEXOR_build(tokenizer)
+        builder.BITWISEXOR_addOperand(childNode, node)
+        builder.BITWISEXOR_addOperand(childNode, BitwiseAndExpression(tokenizer, staticContext))
+        builder.BITWISEXOR_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def BitwiseAndExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = EqualityExpression(tokenizer, staticContext)
+
+    while tokenizer.match("bitwise_and"):
+        childNode = builder.BITWISEAND_build(tokenizer)
+        builder.BITWISEAND_addOperand(childNode, node)
+        builder.BITWISEAND_addOperand(childNode, EqualityExpression(tokenizer, staticContext))
+        builder.BITWISEAND_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def EqualityExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = RelationalExpression(tokenizer, staticContext)
+
+    while tokenizer.match("eq") or tokenizer.match("ne") or tokenizer.match("strict_eq") or tokenizer.match("strict_ne"):
+        childNode = builder.EQUALITY_build(tokenizer)
+        builder.EQUALITY_addOperand(childNode, node)
+        builder.EQUALITY_addOperand(childNode, RelationalExpression(tokenizer, staticContext))
+        builder.EQUALITY_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def RelationalExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    oldLoopInit = staticContext.inForLoopInit
+
+    # Uses of the in operator in shiftExprs are always unambiguous,
+    # so unset the flag that prohibits recognizing it.
+    staticContext.inForLoopInit = False
+    node = ShiftExpression(tokenizer, staticContext)
+
+    while tokenizer.match("lt") or tokenizer.match("le") or tokenizer.match("ge") or tokenizer.match("gt") or (oldLoopInit == False and tokenizer.match("in")) or tokenizer.match("instanceof"):
+        childNode = builder.RELATIONAL_build(tokenizer)
+        builder.RELATIONAL_addOperand(childNode, node)
+        builder.RELATIONAL_addOperand(childNode, ShiftExpression(tokenizer, staticContext))
+        builder.RELATIONAL_finish(childNode)
+        node = childNode
+
+    staticContext.inForLoopInit = oldLoopInit
+
+    return node
+
+
+def ShiftExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = AddExpression(tokenizer, staticContext)
+
+    while tokenizer.match("lsh") or tokenizer.match("rsh") or tokenizer.match("ursh"):
+        childNode = builder.SHIFT_build(tokenizer)
+        builder.SHIFT_addOperand(childNode, node)
+        builder.SHIFT_addOperand(childNode, AddExpression(tokenizer, staticContext))
+        builder.SHIFT_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def AddExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = MultiplyExpression(tokenizer, staticContext)
+
+    while tokenizer.match("plus") or tokenizer.match("minus"):
+        childNode = builder.ADD_build(tokenizer)
+        builder.ADD_addOperand(childNode, node)
+        builder.ADD_addOperand(childNode, MultiplyExpression(tokenizer, staticContext))
+        builder.ADD_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def MultiplyExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = UnaryExpression(tokenizer, staticContext)
+
+    while tokenizer.match("mul") or tokenizer.match("div") or tokenizer.match("mod"):
+        childNode = builder.MULTIPLY_build(tokenizer)
+        builder.MULTIPLY_addOperand(childNode, node)
+        builder.MULTIPLY_addOperand(childNode, UnaryExpression(tokenizer, staticContext))
+        builder.MULTIPLY_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def UnaryExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    tokenType = tokenizer.get(True)
+
+    if tokenType in ["delete", "void", "typeof", "not", "bitwise_not", "plus", "minus"]:
+        node = builder.UNARY_build(tokenizer)
+        builder.UNARY_addOperand(node, UnaryExpression(tokenizer, staticContext))
+
+    elif tokenType == "increment" or tokenType == "decrement":
+        # Prefix increment/decrement.
+        node = builder.UNARY_build(tokenizer)
+        builder.UNARY_addOperand(node, MemberExpression(tokenizer, staticContext, True))
+
+    else:
+        tokenizer.unget()
+        node = MemberExpression(tokenizer, staticContext, True)
+
+        # Don't look across a newline boundary for a postfix {in,de}crement.
+        if tokenizer.tokens[(tokenizer.tokenIndex + tokenizer.lookahead - 1) & 3].line == tokenizer.line:
+            if tokenizer.match("increment") or tokenizer.match("decrement"):
+                childNode = builder.UNARY_build(tokenizer)
+                builder.UNARY_setPostfix(childNode)
+                builder.UNARY_finish(node)
+                builder.UNARY_addOperand(childNode, node)
+                node = childNode
+
+    builder.UNARY_finish(node)
+    return node
+
+
+def MemberExpression(tokenizer, staticContext, allowCallSyntax):
+    builder = staticContext.builder
+
+    if tokenizer.match("new"):
+        node = builder.MEMBER_build(tokenizer)
+        builder.MEMBER_addOperand(node, MemberExpression(tokenizer, staticContext, False))
+
+        if tokenizer.match("left_paren"):
+            builder.MEMBER_rebuildNewWithArgs(node)
+            builder.MEMBER_addOperand(node, ArgumentList(tokenizer, staticContext))
+
+        builder.MEMBER_finish(node)
+
+    else:
+        node = PrimaryExpression(tokenizer, staticContext)
+
+    while True:
+        tokenType = tokenizer.get()
+        if tokenType == "end":
+            break
+
+        if tokenType == "dot":
+            childNode = builder.MEMBER_build(tokenizer)
+            builder.MEMBER_addOperand(childNode, node)
+            tokenizer.mustMatch("identifier")
+            builder.MEMBER_addOperand(childNode, builder.MEMBER_build(tokenizer))
+
+        elif tokenType == "left_bracket":
+            childNode = builder.MEMBER_build(tokenizer, "index")
+            builder.MEMBER_addOperand(childNode, node)
+            builder.MEMBER_addOperand(childNode, Expression(tokenizer, staticContext))
+            tokenizer.mustMatch("right_bracket")
+
+        elif tokenType == "left_paren" and allowCallSyntax:
+            childNode = builder.MEMBER_build(tokenizer, "call")
+            builder.MEMBER_addOperand(childNode, node)
+            builder.MEMBER_addOperand(childNode, ArgumentList(tokenizer, staticContext))
+
+        else:
+            tokenizer.unget()
+            return node
+
+        builder.MEMBER_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def ArgumentList(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = builder.LIST_build(tokenizer)
+
+    if tokenizer.match("right_paren", True):
+        return node
+
+    while True:
+        childNode = AssignExpression(tokenizer, staticContext)
+        if childNode.type == "yield" and not childNode.parenthesized and tokenizer.peek() == "comma":
+            raise SyntaxError("Yield expression must be parenthesized", tokenizer)
+
+        if tokenizer.match("for"):
+            childNode = GeneratorExpression(tokenizer, staticContext, childNode)
+            if len(node) > 1 or tokenizer.peek(True) == "comma":
+                raise SyntaxError("Generator expression must be parenthesized", tokenizer)
+
+        builder.LIST_addOperand(node, childNode)
+        if not tokenizer.match("comma"):
+            break
+
+    tokenizer.mustMatch("right_paren")
+    builder.LIST_finish(node)
+
+    return node
+
+
+def PrimaryExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    tokenType = tokenizer.get(True)
+
+    if tokenType == "function":
+        node = FunctionDefinition(tokenizer, staticContext, False, "expressed_form")
+
+    elif tokenType == "left_bracket":
+        node = builder.ARRAYINIT_build(tokenizer)
+        while True:
+            tokenType = tokenizer.peek(True)
+            if tokenType == "right_bracket":
+                break
+
+            if tokenType == "comma":
+                tokenizer.get()
+                builder.ARRAYINIT_addElement(node, None)
+                continue
+
+            builder.ARRAYINIT_addElement(node, AssignExpression(tokenizer, staticContext))
+
+            if tokenType != "comma" and not tokenizer.match("comma"):
+                break
+
+        # If we matched exactly one element and got a "for", we have an
+        # array comprehension.
+        if len(node) == 1 and tokenizer.match("for"):
+            childNode = builder.ARRAYCOMP_build(tokenizer)
+            builder.ARRAYCOMP_setExpression(childNode, node[0])
+            builder.ARRAYCOMP_setTail(childNode, comprehensionTail(tokenizer, staticContext))
+            node = childNode
+
+        builder.COMMENTS_add(node, node, tokenizer.getComments())
+        tokenizer.mustMatch("right_bracket")
+        builder.PRIMARY_finish(node)
+
+    elif tokenType == "left_curly":
+        node = builder.OBJECTINIT_build(tokenizer)
+
+        if not tokenizer.match("right_curly"):
+            while True:
+                tokenType = tokenizer.get()
+                tokenValue = getattr(tokenizer.token, "value", None)
+                comments = tokenizer.getComments()
+
+                if tokenValue in ("get", "set") and tokenizer.peek() == "identifier":
+                    if staticContext.ecma3OnlyMode:
+                        raise SyntaxError("Illegal property accessor", tokenizer)
+
+                    fd = FunctionDefinition(tokenizer, staticContext, True, "expressed_form")
+                    builder.OBJECTINIT_addProperty(node, fd)
+
+                else:
+                    if tokenType == "identifier" or tokenType == "number" or tokenType == "string":
+                        id = builder.PRIMARY_build(tokenizer, "identifier")
+                        builder.PRIMARY_finish(id)
+
+                    elif tokenType == "right_curly":
+                        if staticContext.ecma3OnlyMode:
+                            raise SyntaxError("Illegal trailing ,", tokenizer)
+
+                        tokenizer.unget()
+                        break
+
+                    else:
+                        if tokenValue in jasy.script.tokenize.Lang.keywords:
+                            id = builder.PRIMARY_build(tokenizer, "identifier")
+                            builder.PRIMARY_finish(id)
+                        else:
+                            print("Value is '%s'" % tokenValue)
+                            raise SyntaxError("Invalid property name", tokenizer)
+
+                    if tokenizer.match("colon"):
+                        childNode = builder.PROPERTYINIT_build(tokenizer)
+                        builder.COMMENTS_add(childNode, node, comments)
+                        builder.PROPERTYINIT_addOperand(childNode, id)
+                        builder.PROPERTYINIT_addOperand(childNode, AssignExpression(tokenizer, staticContext))
+                        builder.PROPERTYINIT_finish(childNode)
+                        builder.OBJECTINIT_addProperty(node, childNode)
+
+                    else:
+                        # Support, e.g., |var {staticContext, y} = o| as destructuring shorthand
+                        # for |var {staticContext: staticContext, y: y} = o|, per proposed JS2/ES4 for JS1.8.
+                        if tokenizer.peek() != "comma" and tokenizer.peek() != "right_curly":
+                            raise SyntaxError("Missing : after property", tokenizer)
+                        builder.OBJECTINIT_addProperty(node, id)
+
+                if not tokenizer.match("comma"):
+                    break
+
+            builder.COMMENTS_add(node, node, tokenizer.getComments())
+            tokenizer.mustMatch("right_curly")
+
+        builder.OBJECTINIT_finish(node)
+
+    elif tokenType == "left_paren":
+        # ParenExpression does its own matching on parentheses, so we need to unget.
+        tokenizer.unget()
+        node = ParenExpression(tokenizer, staticContext)
+        node.parenthesized = True
+
+    elif tokenType == "let":
+        node = LetBlock(tokenizer, staticContext, False)
+
+    elif tokenType in ["null", "this", "true", "false", "identifier", "number", "string", "regexp"]:
+        node = builder.PRIMARY_build(tokenizer, tokenType)
+        builder.PRIMARY_finish(node)
+
+    else:
+        raise SyntaxError("Missing operand. Found type: %s" % tokenType, tokenizer)
+
+    return node
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/parse/VanillaBuilder.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,679 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+#
+# License: MPL 1.1/GPL 2.0/LGPL 2.1
+# Authors:
+#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
+#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010)
+#
+
+from __future__ import unicode_literals
+
+import jasy.script.parse.Node
+
+class VanillaBuilder:
+    """The vanilla AST builder."""
+
+    def COMMENTS_add(self, currNode, prevNode, comments):
+        if not comments:
+            return
+
+        currComments = []
+        prevComments = []
+        for comment in comments:
+            # post comments - for previous node
+            if comment.context == "inline":
+                prevComments.append(comment)
+
+            # all other comment styles are attached to the current one
+            else:
+                currComments.append(comment)
+
+        # Merge with previously added ones
+        if hasattr(currNode, "comments"):
+            currNode.comments.extend(currComments)
+        else:
+            currNode.comments = currComments
+
+        if prevNode:
+            if hasattr(prevNode, "comments"):
+                prevNode.comments.extend(prevComments)
+            else:
+                prevNode.comments = prevComments
+        else:
+            # Don't loose the comment in tree (if not previous node is there, attach it to this node)
+            currNode.comments.extend(prevComments)
+
+    def IF_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "if")
+
+    def IF_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def IF_setThenPart(self, node, statement):
+        node.append(statement, "thenPart")
+
+    def IF_setElsePart(self, node, statement):
+        node.append(statement, "elsePart")
+
+    def IF_finish(self, node):
+        pass
+
+    def SWITCH_build(self, tokenizer):
+        node = jasy.script.parse.Node.Node(tokenizer, "switch")
+        node.defaultIndex = -1
+        return node
+
+    def SWITCH_setDiscriminant(self, node, expression):
+        node.append(expression, "discriminant")
+
+    def SWITCH_setDefaultIndex(self, node, index):
+        node.defaultIndex = index
+
+    def SWITCH_addCase(self, node, childNode):
+        node.append(childNode)
+
+    def SWITCH_finish(self, node):
+        pass
+
+    def CASE_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "case")
+
+    def CASE_setLabel(self, node, expression):
+        node.append(expression, "label")
+
+    def CASE_initializeStatements(self, node, tokenizer):
+        node.append(jasy.script.parse.Node.Node(tokenizer, "block"), "statements")
+
+    def CASE_addStatement(self, node, statement):
+        node.statements.append(statement)
+
+    def CASE_finish(self, node):
+        pass
+
+    def DEFAULT_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "default")
+
+    def DEFAULT_initializeStatements(self, node, tokenizer):
+        node.append(jasy.script.parse.Node.Node(tokenizer, "block"), "statements")
+
+    def DEFAULT_addStatement(self, node, statement):
+        node.statements.append(statement)
+
+    def DEFAULT_finish(self, node):
+        pass
+
+    def FOR_build(self, tokenizer):
+        node = jasy.script.parse.Node.Node(tokenizer, "for")
+        node.isLoop = True
+        node.isEach = False
+        return node
+
+    def FOR_rebuildForEach(self, node):
+        node.isEach = True
+
+    # NB: This function is called after rebuildForEach, if that'statement called at all.
+    def FOR_rebuildForIn(self, node):
+        node.type = "for_in"
+
+    def FOR_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def FOR_setSetup(self, node, expression):
+        node.append(expression, "setup")
+
+    def FOR_setUpdate(self, node, expression):
+        node.append(expression, "update")
+
+    def FOR_setObject(self, node, expression, forBlock=None):
+        # wpbasti: not sure what forBlock stands for but it is used in the parser
+        # JS tolerates the optinal unused parameter, but not so Python.
+        node.append(expression, "object")
+
+    def FOR_setIterator(self, node, expression, forBlock=None):
+        # wpbasti: not sure what forBlock stands for but it is used in the parser
+        # JS tolerates the optinal unused parameter, but not so Python.
+        node.append(expression, "iterator")
+
+    def FOR_setBody(self, node, statement):
+        node.append(statement, "body")
+
+    def FOR_finish(self, node):
+        pass
+
+    def WHILE_build(self, tokenizer):
+        node = jasy.script.parse.Node.Node(tokenizer, "while")
+        node.isLoop = True
+        return node
+
+    def WHILE_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def WHILE_setBody(self, node, statement):
+        node.append(statement, "body")
+
+    def WHILE_finish(self, node):
+        pass
+
+    def DO_build(self, tokenizer):
+        node = jasy.script.parse.Node.Node(tokenizer, "do")
+        node.isLoop = True
+        return node
+
+    def DO_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def DO_setBody(self, node, statement):
+        node.append(statement, "body")
+
+    def DO_finish(self, node):
+        pass
+
+    def BREAK_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "break")
+
+    def BREAK_setLabel(self, node, label):
+        node.label = label
+
+    def BREAK_setTarget(self, node, target):
+        # Hint, no append() - relation, but not a child
+        node.target = target
+
+    def BREAK_finish(self, node):
+        pass
+
+    def CONTINUE_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "continue")
+
+    def CONTINUE_setLabel(self, node, label):
+        node.label = label
+
+    def CONTINUE_setTarget(self, node, target):
+        # Hint, no append() - relation, but not a child
+        node.target = target
+
+    def CONTINUE_finish(self, node):
+        pass
+
+    def TRY_build(self, tokenizer):
+        node = jasy.script.parse.Node.Node(tokenizer, "try")
+        return node
+
+    def TRY_setTryBlock(self, node, statement):
+        node.append(statement, "tryBlock")
+
+    def TRY_addCatch(self, node, childNode):
+        node.append(childNode)
+
+    def TRY_finishCatches(self, node):
+        pass
+
+    def TRY_setFinallyBlock(self, node, statement):
+        node.append(statement, "finallyBlock")
+
+    def TRY_finish(self, node):
+        pass
+
+    def CATCH_build(self, tokenizer):
+        node = jasy.script.parse.Node.Node(tokenizer, "catch")
+        return node
+
+    def CATCH_wrapException(self, tokenizer):
+        node = jasy.script.parse.Node.Node(tokenizer, "exception")
+        node.value = tokenizer.token.value
+        return node
+
+    def CATCH_setException(self, node, exception):
+        node.append(exception, "exception")
+
+    def CATCH_setGuard(self, node, expression):
+        node.append(expression, "guard")
+
+    def CATCH_setBlock(self, node, statement):
+        node.append(statement, "block")
+
+    def CATCH_finish(self, node):
+        pass
+
+    def THROW_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "throw")
+
+    def THROW_setException(self, node, expression):
+        node.append(expression, "exception")
+
+    def THROW_finish(self, node):
+        pass
+
+    def RETURN_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "return")
+
+    def RETURN_setValue(self, node, expression):
+        node.append(expression, "value")
+
+    def RETURN_finish(self, node):
+        pass
+
+    def YIELD_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "yield")
+
+    def YIELD_setValue(self, node, expression):
+        node.append(expression, "value")
+
+    def YIELD_finish(self, node):
+        pass
+
+    def GENERATOR_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "generator")
+
+    def GENERATOR_setExpression(self, node, expression):
+        node.append(expression, "expression")
+
+    def GENERATOR_setTail(self, node, childNode):
+        node.append(childNode, "tail")
+
+    def GENERATOR_finish(self, node):
+        pass
+
+    def WITH_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "with")
+
+    def WITH_setObject(self, node, expression):
+        node.append(expression, "object")
+
+    def WITH_setBody(self, node, statement):
+        node.append(statement, "body")
+
+    def WITH_finish(self, node):
+        pass
+
+    def DEBUGGER_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "debugger")
+
+    def SEMICOLON_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "semicolon")
+
+    def SEMICOLON_setExpression(self, node, expression):
+        node.append(expression, "expression")
+
+    def SEMICOLON_finish(self, node):
+        pass
+
+    def LABEL_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "label")
+
+    def LABEL_setLabel(self, node, label):
+        node.label = label
+
+    def LABEL_setStatement(self, node, statement):
+        node.append(statement, "statement")
+
+    def LABEL_finish(self, node):
+        pass
+
+    def FUNCTION_build(self, tokenizer):
+        node = jasy.script.parse.Node.Node(tokenizer)
+        if node.type != "function":
+            if tokenizer.token.value == "get":
+                node.type = "getter"
+            else:
+                node.type = "setter"
+
+        return node
+
+    def FUNCTION_setName(self, node, identifier):
+        node.name = identifier
+
+    def FUNCTION_initParams(self, node, tokenizer):
+        node.append(jasy.script.parse.Node.Node(tokenizer, "list"), "params")
+
+    def FUNCTION_wrapParam(self, tokenizer):
+        param = jasy.script.parse.Node.Node(tokenizer)
+        param.value = tokenizer.token.value
+        return param
+
+    def FUNCTION_addParam(self, node, tokenizer, expression):
+        node.params.append(expression)
+
+    def FUNCTION_setExpressionClosure(self, node, expressionClosure):
+        node.expressionClosure = expressionClosure
+
+    def FUNCTION_setBody(self, node, statement):
+        # copy over function parameters to function body
+        params = getattr(node, "params", None)
+        #if params:
+        #    statement.params = [param.value for param in params]
+
+        node.append(statement, "body")
+
+    def FUNCTION_hoistVars(self, x):
+        pass
+
+    def FUNCTION_finish(self, node, x):
+        pass
+
+    def VAR_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "var")
+
+    def VAR_addDecl(self, node, childNode, childContext=None):
+        node.append(childNode)
+
+    def VAR_finish(self, node):
+        pass
+
+    def CONST_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "const")
+
+    def CONST_addDecl(self, node, childNode, childContext=None):
+        node.append(childNode)
+
+    def CONST_finish(self, node):
+        pass
+
+    def LET_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "let")
+
+    def LET_addDecl(self, node, childNode, childContext=None):
+        node.append(childNode)
+
+    def LET_finish(self, node):
+        pass
+
+    def DECL_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "declaration")
+
+    def DECL_setNames(self, node, expression):
+        node.append(expression, "names")
+
+    def DECL_setName(self, node, identifier):
+        node.name = identifier
+
+    def DECL_setInitializer(self, node, expression):
+        node.append(expression, "initializer")
+
+    def DECL_setReadOnly(self, node, readOnly):
+        node.readOnly = readOnly
+
+    def DECL_finish(self, node):
+        pass
+
+    def LETBLOCK_build(self, tokenizer):
+        node = jasy.script.parse.Node.Node(tokenizer, "let_block")
+        return node
+
+    def LETBLOCK_setVariables(self, node, childNode):
+        node.append(childNode, "variables")
+
+    def LETBLOCK_setExpression(self, node, expression):
+        node.append(expression, "expression")
+
+    def LETBLOCK_setBlock(self, node, statement):
+        node.append(statement, "block")
+
+    def LETBLOCK_finish(self, node):
+        pass
+
+    def BLOCK_build(self, tokenizer, id):
+        node = jasy.script.parse.Node.Node(tokenizer, "block")
+        # node.id = id
+        return node
+
+    def BLOCK_hoistLets(self, node):
+        pass
+
+    def BLOCK_addStatement(self, node, childNode):
+        node.append(childNode)
+
+    def BLOCK_finish(self, node):
+        pass
+
+    def EXPRESSION_build(self, tokenizer, tokenType):
+        return jasy.script.parse.Node.Node(tokenizer, tokenType)
+
+    def EXPRESSION_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def EXPRESSION_finish(self, node):
+        pass
+
+    def ASSIGN_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "assign")
+
+    def ASSIGN_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def ASSIGN_setAssignOp(self, node, operator):
+        node.assignOp = operator
+
+    def ASSIGN_finish(self, node):
+        pass
+
+    def HOOK_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "hook")
+
+    def HOOK_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def HOOK_setThenPart(self, node, childNode):
+        node.append(childNode, "thenPart")
+
+    def HOOK_setElsePart(self, node, childNode):
+        node.append(childNode, "elsePart")
+
+    def HOOK_finish(self, node):
+        pass
+
+    def OR_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "or")
+
+    def OR_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def OR_finish(self, node):
+        pass
+
+    def AND_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "and")
+
+    def AND_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def AND_finish(self, node):
+        pass
+
+    def BITWISEOR_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "bitwise_or")
+
+    def BITWISEOR_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def BITWISEOR_finish(self, node):
+        pass
+
+    def BITWISEXOR_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "bitwise_xor")
+
+    def BITWISEXOR_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def BITWISEXOR_finish(self, node):
+        pass
+
+    def BITWISEAND_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "bitwise_and")
+
+    def BITWISEAND_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def BITWISEAND_finish(self, node):
+        pass
+
+    def EQUALITY_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "eq", "ne", "strict_eq", or "strict_ne".
+        return jasy.script.parse.Node.Node(tokenizer)
+
+    def EQUALITY_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def EQUALITY_finish(self, node):
+        pass
+
+    def RELATIONAL_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "lt", "le", "ge", or "gt".
+        return jasy.script.parse.Node.Node(tokenizer)
+
+    def RELATIONAL_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def RELATIONAL_finish(self, node):
+        pass
+
+    def SHIFT_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "lsh", "rsh", or "ursh".
+        return jasy.script.parse.Node.Node(tokenizer)
+
+    def SHIFT_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def SHIFT_finish(self, node):
+        pass
+
+    def ADD_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "plus" or "minus".
+        return jasy.script.parse.Node.Node(tokenizer)
+
+    def ADD_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def ADD_finish(self, node):
+        pass
+
+    def MULTIPLY_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "mul", "div", or "mod".
+        return jasy.script.parse.Node.Node(tokenizer)
+
+    def MULTIPLY_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def MULTIPLY_finish(self, node):
+        pass
+
+    def UNARY_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "delete", "void", "typeof", "not", "bitwise_not",
+        # "unary_plus", "unary_minus", "increment", or "decrement".
+        if tokenizer.token.type == "plus":
+            tokenizer.token.type = "unary_plus"
+        elif tokenizer.token.type == "minus":
+            tokenizer.token.type = "unary_minus"
+
+        return jasy.script.parse.Node.Node(tokenizer)
+
+    def UNARY_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def UNARY_setPostfix(self, node):
+        node.postfix = True
+
+    def UNARY_finish(self, node):
+        pass
+
+    def MEMBER_build(self, tokenizer, tokenType=None):
+        node = jasy.script.parse.Node.Node(tokenizer, tokenType)
+        if node.type == "identifier":
+            node.value = tokenizer.token.value
+        return node
+
+    def MEMBER_rebuildNewWithArgs(self, node):
+        node.type = "new_with_args"
+
+    def MEMBER_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def MEMBER_finish(self, node):
+        pass
+
+    def PRIMARY_build(self, tokenizer, tokenType):
+        # NB: tokenizer.token.type must be "null", "this", "true", "false", "identifier", "number", "string", or "regexp".
+        node = jasy.script.parse.Node.Node(tokenizer, tokenType)
+        if tokenType in ("identifier", "string", "regexp", "number"):
+            node.value = tokenizer.token.value
+
+        return node
+
+    def PRIMARY_finish(self, node):
+        pass
+
+    def ARRAYINIT_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "array_init")
+
+    def ARRAYINIT_addElement(self, node, childNode):
+        node.append(childNode)
+
+    def ARRAYINIT_finish(self, node):
+        pass
+
+    def ARRAYCOMP_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "array_comp")
+
+    def ARRAYCOMP_setExpression(self, node, expression):
+        node.append(expression, "expression")
+
+    def ARRAYCOMP_setTail(self, node, childNode):
+        node.append(childNode, "tail")
+
+    def ARRAYCOMP_finish(self, node):
+        pass
+
+    def COMPTAIL_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "comp_tail")
+
+    def COMPTAIL_setGuard(self, node, expression):
+        node.append(expression, "guard")
+
+    def COMPTAIL_addFor(self, node, childNode):
+        node.append(childNode, "for")
+
+    def COMPTAIL_finish(self, node):
+        pass
+
+    def OBJECTINIT_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "object_init")
+
+    def OBJECTINIT_addProperty(self, node, childNode):
+        node.append(childNode)
+
+    def OBJECTINIT_finish(self, node):
+        pass
+
+    def PROPERTYINIT_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "property_init")
+
+    def PROPERTYINIT_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def PROPERTYINIT_finish(self, node):
+        pass
+
+    def COMMA_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "comma")
+
+    def COMMA_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def COMMA_finish(self, node):
+        pass
+
+    def LIST_build(self, tokenizer):
+        return jasy.script.parse.Node.Node(tokenizer, "list")
+
+    def LIST_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def LIST_finish(self, node):
+        pass
+
+    def setHoists(self, id, vds):
+        pass
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/tokenize/Lang.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,25 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+from __future__ import unicode_literals
+
+"""JavaScript 1.7 keywords"""
+keywords = set([
+    "break",
+    "case", "catch", "const", "continue",
+    "debugger", "default", "delete", "do",
+    "else",
+    "false", "finally", "for", "function",
+    "if", "in", "instanceof",
+    "let",
+    "new", "null",
+    "return",
+    "switch",
+    "this", "throw", "true", "try", "typeof",
+    "var", "void",
+    "yield",
+    "while", "with"
+])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/tokenize/Tokenizer.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,589 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+#
+# License: MPL 1.1/GPL 2.0/LGPL 2.1
+# Authors:
+#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
+#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010)
+#
+
+from __future__ import unicode_literals
+
+import re, copy
+
+import jasy.script.tokenize.Lang as Lang
+import jasy.script.api.Comment as Comment
+import jasy.core.Console as Console
+
+
+# Operator and punctuator mapping from token to tree node type name.
+# NB: because the lexer doesn't backtrack, all token prefixes must themselves
+# be valid tokens (e.g. !== is acceptable because its prefixes are the valid
+# tokens != and !).
+operatorNames = {
+    '<'   : 'lt',
+    '>'   : 'gt',
+    '<='  : 'le',
+    '>='  : 'ge',
+    '!='  : 'ne',
+    '!'   : 'not',
+    '=='  : 'eq',
+    '===' : 'strict_eq',
+    '!==' : 'strict_ne',
+
+    '>>'  : 'rsh',
+    '<<'  : 'lsh',
+    '>>>' : 'ursh',
+
+    '+'   : 'plus',
+    '*'   : 'mul',
+    '-'   : 'minus',
+    '/'   : 'div',
+    '%'   : 'mod',
+
+    ','   : 'comma',
+    ';'   : 'semicolon',
+    ':'   : 'colon',
+    '='   : 'assign',
+    '?'   : 'hook',
+
+    '&&'  : 'and',
+    '||'  : 'or',
+
+    '++'  : 'increment',
+    '--'  : 'decrement',
+
+    ')'   : 'right_paren',
+    '('   : 'left_paren',
+    '['   : 'left_bracket',
+    ']'   : 'right_bracket',
+    '{'   : 'left_curly',
+    '}'   : 'right_curly',
+
+    '&'   : 'bitwise_and',
+    '^'   : 'bitwise_xor',
+    '|'   : 'bitwise_or',
+    '~'   : 'bitwise_not'
+}
+
+
+# Assignment operators
+assignOperators = ["|", "^", "&", "<<", ">>", ">>>", "+", "-", "*", "/", "%"]
+
+
+
+
+#
+# Classes
+#
+
+class Token:
+    __slots__ = ["type", "start", "line", "assignOp", "end", "value"]
+
+
+class ParseError(Exception):
+    def __init__(self, message, fileId, line):
+        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (message, fileId, line))
+
+
+class Tokenizer(object):
+    def __init__(self, source, fileId="", line=1):
+        # source: JavaScript source
+        # fileId: Filename (for debugging proposes)
+        # line: Line number (for debugging proposes)
+        self.cursor = 0
+        self.source = str(source)
+        self.tokens = {}
+        self.tokenIndex = 0
+        self.lookahead = 0
+        self.scanNewlines = False
+        self.fileId = fileId
+        self.line = line
+        self.comments = []
+
+    input_ = property(lambda self: self.source[self.cursor:])
+    token = property(lambda self: self.tokens.get(self.tokenIndex))
+
+
+    def done(self):
+        # We need to set scanOperand to true here because the first thing
+        # might be a regexp.
+        return self.peek(True) == "end"
+
+
+    def match(self, tokenType, scanOperand=False):
+        return self.get(scanOperand) == tokenType or self.unget()
+
+
+    def mustMatch(self, tokenType):
+        if not self.match(tokenType):
+            raise ParseError("Missing " + tokenType, self.fileId, self.line)
+
+        return self.token
+
+
+    def peek(self, scanOperand=False):
+        if self.lookahead:
+            next = self.tokens.get((self.tokenIndex + self.lookahead) & 3)
+            if self.scanNewlines and (getattr(next, "line", None) != getattr(self, "line", None)):
+                tokenType = "newline"
+            else:
+                tokenType = getattr(next, "type", None)
+        else:
+            tokenType = self.get(scanOperand)
+            self.unget()
+
+        return tokenType
+
+
+    def peekOnSameLine(self, scanOperand=False):
+        self.scanNewlines = True
+        tokenType = self.peek(scanOperand)
+        self.scanNewlines = False
+        return tokenType
+
+
+    def getComments(self):
+        if self.comments:
+            comments = self.comments
+            self.comments = []
+            return comments
+
+        return None
+
+
+    def skip(self):
+        """Eats comments and whitespace."""
+        input = self.source
+        startLine = self.line
+
+        # Whether this is the first called as happen on start parsing a file (eat leading comments/white space)
+        startOfFile = self.cursor is 0
+
+        indent = ""
+
+        while (True):
+            if len(input) > self.cursor:
+                ch = input[self.cursor]
+            else:
+                return
+
+            self.cursor += 1
+
+            if len(input) > self.cursor:
+                next = input[self.cursor]
+            else:
+                next = None
+
+            if ch == "\n" and not self.scanNewlines:
+                self.line += 1
+                indent = ""
+
+            elif ch == "/" and next == "*":
+                self.cursor += 1
+                text = "/*"
+                inline = startLine == self.line and startLine > 1
+                commentStartLine = self.line
+                if startLine == self.line and not startOfFile:
+                    mode = "inline"
+                elif (self.line-1) > startLine:
+                    # distance before this comment means it is a comment block for a whole section (multiple lines of code)
+                    mode = "section"
+                else:
+                    # comment for maybe multiple following lines of code, but not that important (no visual white space divider)
+                    mode = "block"
+
+                while (True):
+                    try:
+                        ch = input[self.cursor]
+                        self.cursor += 1
+                    except IndexError:
+                        raise ParseError("Unterminated comment", self.fileId, self.line)
+
+                    if ch == "*":
+                        next = input[self.cursor]
+                        if next == "/":
+                            text += "*/"
+                            self.cursor += 1
+                            break
+
+                    elif ch == "\n":
+                        self.line += 1
+
+                    text += ch
+
+
+                # Filter escaping on slash-star combinations in comment text
+                text = text.replace("*\/", "*/")
+
+                try:
+                    self.comments.append(Comment.Comment(text, mode, commentStartLine, indent, self.fileId))
+                except Comment.CommentException as commentError:
+                    Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
+
+
+            elif ch == "/" and next == "/":
+                self.cursor += 1
+                text = "//"
+                if startLine == self.line and not startOfFile:
+                    mode = "inline"
+                elif (self.line-1) > startLine:
+                    # distance before this comment means it is a comment block for a whole section (multiple lines of code)
+                    mode = "section"
+                else:
+                    # comment for maybe multiple following lines of code, but not that important (no visual white space divider)
+                    mode = "block"
+
+                while (True):
+                    try:
+                        ch = input[self.cursor]
+                        self.cursor += 1
+                    except IndexError:
+                        # end of file etc.
+                        break
+
+                    if ch == "\n":
+                        self.line += 1
+                        break
+
+                    text += ch
+
+                try:
+                    self.comments.append(Comment.Comment(text, mode, self.line-1, "", self.fileId))
+                except Comment.CommentException:
+                    Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
+
+            # check for whitespace, also for special cases like 0xA0
+            elif ch in "\xA0 \t":
+                indent += ch
+
+            else:
+                self.cursor -= 1
+                return
+
+
+    # Lexes the exponential part of a number, if present. Returns True if an
+    # exponential part was found.
+    def lexExponent(self):
+        input = self.source
+        next = input[self.cursor]
+        if next == "e" or next == "E":
+            self.cursor += 1
+            ch = input[self.cursor]
+            self.cursor += 1
+            if ch == "+" or ch == "-":
+                ch = input[self.cursor]
+                self.cursor += 1
+
+            if ch < "0" or ch > "9":
+                raise ParseError("Missing exponent", self.fileId, self.line)
+
+            while(True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not (ch >= "0" and ch <= "9"):
+                    break
+
+            self.cursor -= 1
+            return True
+
+        return False
+
+
+    def lexZeroNumber(self, ch):
+        token = self.token
+        input = self.source
+        token.type = "number"
+
+        ch = input[self.cursor]
+        self.cursor += 1
+        if ch == ".":
+            while(True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not (ch >= "0" and ch <= "9"):
+                    break
+
+            self.cursor -= 1
+            self.lexExponent()
+            token.value = input[token.start:self.cursor]
+
+        elif ch == "x" or ch == "X":
+            while(True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not ((ch >= "0" and ch <= "9") or (ch >= "a" and ch <= "f") or (ch >= "A" and ch <= "F")):
+                    break
+
+            self.cursor -= 1
+            token.value = input[token.start:self.cursor]
+
+        elif ch >= "0" and ch <= "7":
+            while(True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not (ch >= "0" and ch <= "7"):
+                    break
+
+            self.cursor -= 1
+            token.value = input[token.start:self.cursor]
+
+        else:
+            self.cursor -= 1
+            self.lexExponent()     # 0E1, &c.
+            token.value = 0
+
+
+    def lexNumber(self, ch):
+        token = self.token
+        input = self.source
+        token.type = "number"
+
+        floating = False
+        while(True):
+            ch = input[self.cursor]
+            self.cursor += 1
+
+            if ch == "." and not floating:
+                floating = True
+                ch = input[self.cursor]
+                self.cursor += 1
+
+            if not (ch >= "0" and ch <= "9"):
+                break
+
+        self.cursor -= 1
+
+        exponent = self.lexExponent()
+        segment = input[token.start:self.cursor]
+
+        # Protect float or exponent numbers
+        if floating or exponent:
+            token.value = segment
+        else:
+            token.value = int(segment)
+
+
+    def lexDot(self, ch):
+        token = self.token
+        input = self.source
+        next = input[self.cursor]
+
+        if next >= "0" and next <= "9":
+            while (True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not (ch >= "0" and ch <= "9"):
+                    break
+
+            self.cursor -= 1
+            self.lexExponent()
+
+            token.type = "number"
+            token.value = input[token.start:self.cursor]
+
+        else:
+            token.type = "dot"
+
+
+    def lexString(self, ch):
+        token = self.token
+        input = self.source
+        token.type = "string"
+
+        hasEscapes = False
+        delim = ch
+        ch = input[self.cursor]
+        self.cursor += 1
+        while ch != delim:
+            if ch == "\\":
+                hasEscapes = True
+                self.cursor += 1
+
+            ch = input[self.cursor]
+            self.cursor += 1
+
+        if hasEscapes:
+            token.value = eval(input[token.start:self.cursor])
+        else:
+            token.value = input[token.start+1:self.cursor-1]
+
+
+    def lexRegExp(self, ch):
+        token = self.token
+        input = self.source
+        token.type = "regexp"
+
+        while (True):
+            try:
+                ch = input[self.cursor]
+                self.cursor += 1
+            except IndexError:
+                raise ParseError("Unterminated regex", self.fileId, self.line)
+
+            if ch == "\\":
+                self.cursor += 1
+
+            elif ch == "[":
+                while (True):
+                    if ch == "\\":
+                        self.cursor += 1
+
+                    try:
+                        ch = input[self.cursor]
+                        self.cursor += 1
+                    except IndexError:
+                        raise ParseError("Unterminated character class", self.fileId, self.line)
+
+                    if ch == "]":
+                        break
+
+            if ch == "/":
+                break
+
+        while(True):
+            ch = input[self.cursor]
+            self.cursor += 1
+            if not (ch >= "a" and ch <= "z"):
+                break
+
+        self.cursor -= 1
+        token.value = input[token.start:self.cursor]
+
+
+    def lexOp(self, ch):
+        token = self.token
+        input = self.source
+
+        op = ch
+        while(True):
+            try:
+                next = input[self.cursor]
+            except IndexError:
+                break
+
+            if (op + next) in operatorNames:
+                self.cursor += 1
+                op += next
+            else:
+                break
+
+        try:
+            next = input[self.cursor]
+        except IndexError:
+            next = None
+
+        if next == "=" and op in assignOperators:
+            self.cursor += 1
+            token.type = "assign"
+            token.assignOp = operatorNames[op]
+            op += "="
+
+        else:
+            token.type = operatorNames[op]
+            token.assignOp = None
+
+
+    # FIXME: Unicode escape sequences
+    # FIXME: Unicode identifiers
+    def lexIdent(self, ch):
+        token = self.token
+        input = self.source
+
+        try:
+            while True:
+                ch = input[self.cursor]
+                self.cursor += 1
+
+                if not ((ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or (ch >= "0" and ch <= "9") or ch == "$" or ch == "_"):
+                    break
+
+        except IndexError:
+            self.cursor += 1
+            pass
+
+        # Put the non-word character back.
+        self.cursor -= 1
+
+        identifier = input[token.start:self.cursor]
+        if identifier in Lang.keywords:
+            token.type = identifier
+        else:
+            token.type = "identifier"
+            token.value = identifier
+
+
+    def get(self, scanOperand=False):
+        """
+        It consumes input *only* if there is no lookahead.
+        Dispatches to the appropriate lexing function depending on the input.
+        """
+        while self.lookahead:
+            self.lookahead -= 1
+            self.tokenIndex = (self.tokenIndex + 1) & 3
+            token = self.tokens[self.tokenIndex]
+            if token.type != "newline" or self.scanNewlines:
+                return token.type
+
+        self.skip()
+
+        self.tokenIndex = (self.tokenIndex + 1) & 3
+        self.tokens[self.tokenIndex] = token = Token()
+
+        token.start = self.cursor
+        token.line = self.line
+
+        input = self.source
+        if self.cursor == len(input):
+            token.end = token.start
+            token.type = "end"
+            return token.type
+
+        ch = input[self.cursor]
+        self.cursor += 1
+
+        if (ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or ch == "$" or ch == "_":
+            self.lexIdent(ch)
+
+        elif scanOperand and ch == "/":
+            self.lexRegExp(ch)
+
+        elif ch == ".":
+            self.lexDot(ch)
+
+        elif self.scanNewlines and ch == "\n":
+            token.type = "newline"
+            self.line += 1
+
+        elif ch in operatorNames:
+            self.lexOp(ch)
+
+        elif ch >= "1" and ch <= "9":
+            self.lexNumber(ch)
+
+        elif ch == "0":
+            self.lexZeroNumber(ch)
+
+        elif ch == '"' or ch == "'":
+            self.lexString(ch)
+
+        else:
+            raise ParseError("Illegal token: %s (Code: %s)" % (ch, ord(ch)), self.fileId, self.line)
+
+        token.end = self.cursor
+        return token.type
+
+
+    def unget(self):
+        """ Match depends on unget returning undefined."""
+        self.lookahead += 1
+
+        if self.lookahead == 4:
+            raise ParseError("PANIC: too much lookahead!", self.fileId, self.line)
+
+        self.tokenIndex = (self.tokenIndex - 1) & 3
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/script/util/__init__.py	Sat Jan 12 12:11:42 2019 +0100
@@ -0,0 +1,416 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013-2014 Sebastian Werner
+#
+
+from __future__ import unicode_literals
+
+from jasy.script.output.Compressor import Compressor
+
+# Shared instance
+compressor = Compressor()
+
+pseudoTypes = set(["any", "var", "undefined", "null", "true", "false", "this", "arguments"])
+builtinTypes = set(["Object", "String", "Number", "Boolean", "Array", "Function", "RegExp", "Date"])
+
+# Basic user friendly node type to human type
+nodeTypeToDocType = {
+
+    # Primitives
+    "string": "String",
+    "number": "Number",
+    "not": "Boolean",
+    "true": "Boolean",
+    "false": "Boolean",
+
+    # Literals
+    "function": "Function",
+    "regexp": "RegExp",
+    "object_init": "Map",
+    "array_init": "Array",
+
+    # We could figure out the real class automatically - at least that's the case quite often
+    "new": "Object",
+    "new_with_args": "Object",
+
+    # Comparisons
+    "eq" : "Boolean",
+    "ne" : "Boolean",
+    "strict_eq" : "Boolean",
+    "strict_ne" : "Boolean",
+    "lt" : "Boolean",
+    "le" : "Boolean",
+    "gt" : "Boolean",
+    "ge" : "Boolean",
+    "in" : "Boolean",
+    "instanceof" : "Boolean",
+
+    # Numbers
+    "lsh": "Number",
+    "rsh": "Number",
+    "ursh": "Number",
+    "minus": "Number",
+    "mul": "Number",
+    "div": "Number",
+    "mod": "Number",
+    "bitwise_and": "Number",
+    "bitwise_xor": "Number",
+    "bitwise_or": "Number",
+    "bitwise_not": "Number",
+    "increment": "Number",
+    "decrement": "Number",
+    "unary_minus": "Number",
+    "unary_plus": "Number",
+
+    # This is not 100% correct, but I don't like to introduce a BooleanLike type.
+    # If the author likes something different he is still able to override it via API docs
+    "and": "Boolean",
+    "or": "Boolean",
+
+    # Operators/Built-ins
+    "void": "undefined",
+    "null": "null",
+    "typeof": "String",
+    "delete": "Boolean",
+    "this": "This",
+
+    # These are not real types, we try to figure out the real value behind automatically
+    "call": "Call",
+    "hook": "Hook",
+    "assign": "Assign",
+    "plus": "Plus",
+    "identifier" : "Identifier",
+    "dot": "Object",
+    "index": "var"
+}
+
+
+def getVisibility(name):
+    """
+    Returns the visibility of the given name by convention
+    """
+
+    if name.startswith("__"):
+        return "private"
+    elif name.startswith("_"):
+        return "internal"
+    else:
+        return "public"
+
+
+def requiresDocumentation(name):
+    """
+    Whether the given name suggests that documentation is required
+    """
+
+    return not name.startswith("_")
+
+
+def getKeyValue(dict, key):
+    """
+    Returns the value node of the given key inside the given object initializer.
+    """
+
+    for propertyInit in dict:
+        if propertyInit[0].value == key:
+            return propertyInit[1]
+
+
+def findAssignments(name, node):
+    """
+    Returns a list of assignments which might have impact on the value used in the given node.
+    """
+
+    # Looking for all script blocks
+    scripts = []
+    parent = node
+    while parent:
+        if parent.type == "script":
+            scope = getattr(parent, "scope", None)
+            if scope and name in scope.modified:
+                scripts.append(parent)
+
+        parent = getattr(parent, "parent", None)
+
+    def assignMatcher(node):
+        if node.type == "assign" and node[0].type == "identifier" and node[0].value == name:
+            return True
+
+        if node.type == "declaration" and node.name == name and getattr(node, "initializer", None):
+            return True
+
+        if node.type == "function" and node.functionForm == "declared_form" and node.name == name:
+            return True
+
+        return False
+
+    # Query all relevant script nodes
+    assignments = []
+    for script in scripts:
+        queryResult = queryAll(script, assignMatcher, False)
+        assignments.extend(queryResult)
+
+    # Collect assigned values
+    values = []
+    for assignment in assignments:
+        if assignment.type == "function":
+            values.append(assignment)
+        elif assignment.type == "assign":
+            values.append(assignment[1])
+        else:
+            values.append(assignment.initializer)
+
+    return assignments, values
+
+
+def findFunction(node):
+    """
+    Returns the first function inside the given node
+    """
+
+    return query(node, lambda node: node.type == "function")
+
+
+def findCommentNode(node):
+    """
+    Finds the first doc comment node inside the given node
+    """
+
+    def matcher(node):
+        comments = getattr(node, "comments", None)
+        if comments:
+            for comment in comments:
+                if comment.variant == "doc":
+                    return True
+
+    return query(node, matcher)
+
+
+def getDocComment(node):
+    """
+    Returns the first doc comment of the given node.
+    """
+
+    comments = getattr(node, "comments", None)
+    if comments:
+        for comment in comments:
+            if comment.variant == "doc":
+                return comment
+
+    return None
+
+
+def findReturn(node):
+    """
+    Finds the first return inside the given node
+    """
+
+    return query(node, lambda node: node.type == "return", True)
+
+
+
+def valueToString(node):
+    """
+    Converts the value of the given node into something human friendly
+    """
+
+    if node.type in ("number", "string", "false", "true", "regexp", "null"):
+        return compressor.compress(node)
+    elif node.type in nodeTypeToDocType:
+        if node.type == "plus":
+            return detectPlusType(node)
+        elif node.type in ("new", "new_with_args", "dot"):
+            return detectObjectType(node)
+        else:
+            return nodeTypeToDocType[node.type]
+    else:
+        return "Other"
+
+
+
+def queryAll(node, matcher, deep=True, inner=False, result=None):
+    """
+    Recurses the tree starting with the given node and returns a list of nodes
+    matched by the given matcher method
+
+    - node: any node
+    - matcher: function which should return a truish value when node matches
+    - deep: whether inner scopes should be scanned, too
+    - inner: used internally to differentiate between current and inner nodes
+    - result: can be used to extend an existing list, otherwise a new list is created and returned
+    """
+
+    if result == None:
+        result = []
+
+    # Don't do in closure functions
+    if inner and node.type == "script" and not deep:
+        return None
+
+    if matcher(node):
+        result.append(node)
+
+    for child in node:
+        queryAll(child, matcher, deep, True, result)
+
+    return result
+
+
+
+def query(node, matcher, deep=True, inner=False):
+    """
+    Recurses the tree starting with the given node and returns the first node
+    which is matched by the given matcher method.
+
+    - node: any node
+    - matcher: function which should return a truish value when node matches
+    - deep: whether inner scopes should be scanned, too
+    - inner: used internally to differentiate between current and inner nodes
+    """
+
+    # Don't do in closure functions
+    if inner and node.type == "script" and not deep:
+        return None
+
+    if matcher(node):
+        return node
+
+    for child in node:
+        result = query(child, matcher, deep, True)
+        if result is not None:
+            return result
+
+    return None
+
+
+def findCall(node, methodName):
+    """
+    Recurses the tree starting with the given node and returns the first node
+    which calls the given method name (supports namespaces, too)
+    """
+
+    if type(methodName) is str:
+        methodName = set([methodName])
+
+    def matcher(node):
+        call = getCallName(node)
+        if call and call in methodName:
+            return call
+
+    return query(node, matcher)
+
+
+def getCallName(node):
+    if node.type == "call":
+        if node[0].type == "dot":
+            return assembleDot(node[0])
+        elif node[0].type == "identifier":
+            return node[0].value
+
+    return None
+
+
+def getParameterFromCall(call, index=0):
+    """
+    Returns a parameter node by index on the call node
+    """
+
+    try:
+        return call[1][index]
+    except:
+        return None
+
+
+def getParamNamesFromFunction(func):
+    """
+    Returns a human readable list of parameter names (sorted by their order in the given function)
+    """
+
+    params = getattr(func, "params", None)
+    if params:
+        return [identifier.value for identifier in params]
+    else:
+        return None
+
+
+def detectPlusType(plusNode):
+    """
+    Analyses the given "plus" node and tries to figure out if a "string" or "number" result is produced.
+    """
+
+    if plusNode[0].type == "string" or plusNode[1].type == "string":
+        return "String"
+    elif plusNode[0].type == "number" and plusNode[1].type == "number":
+        return "Number"
+    elif plusNode[0].type == "plus" and detectPlusType(plusNode[0]) == "String":
+        return "String"
+    else:
+        return "var"
+
+
+def detectObjectType(objectNode):
+    """
+    Returns a human readable type information of the given node
+    """
+
+    if objectNode.type in ("new", "new_with_args"):
+        construct = objectNode[0]
+    else:
+        construct = objectNode
+
+    # Only support built-in top level constructs
+    if construct.type == "identifier" and construct.value in ("Array", "Boolean", "Date", "Function", "Number", "Object", "String", "RegExp"):
+        return construct.value
+
+    # And namespaced custom classes
+    elif construct.type == "dot":
+        assembled = assembleDot(construct)
+        if assembled:
+            return assembled
+
+    return "Object"
+
+
+
+def resolveIdentifierNode(identifierNode):
+    assignNodes, assignValues = findAssignments(identifierNode.value, identifierNode)
+    if assignNodes:
+
+        assignCommentNode = None
+
+        # Find first relevant assignment with comment! Otherwise just first one.
+        for assign in assignNodes:
+
+            # The parent is the relevant doc comment container
+            # It's either a "var" (declaration) or "semicolon" (assignment)
+            if getDocComment(assign):
+                assignCommentNode = assign
+                break
+            elif getDocComment(assign.parent):
+                assignCommentNode = assign.parent
+                break
+
+        return assignValues[0], assignCommentNode or assignValues[0]
+
+    return None, None
+
+
+
+def assembleDot(node, result=None):
+    """
+    Joins a dot node (cascaded supported, too) into a single string like "foo.bar.Baz"
+    """
+
+    if result == None:
+        result = []
+
+    for child in node:
+        if child.type == "identifier":
+            result.append(child.value)
+        elif child.type == "dot":
+            assembleDot(child, result)
+        else:
+            return None
+
+    return ".".join(result)
--- a/Utilities/ClassBrowsers/jsclbr.py	Sat Jan 12 11:26:32 2019 +0100
+++ b/Utilities/ClassBrowsers/jsclbr.py	Sat Jan 12 12:11:42 2019 +0100
@@ -11,8 +11,8 @@
 
 from __future__ import unicode_literals
 
-import jasy.js.parse.Parser as jsParser
-import jasy.js.tokenize.Tokenizer as jsTokenizer
+import jasy.script.parse.Parser as jsParser
+import jasy.script.tokenize.Tokenizer as jsTokenizer
 
 import Utilities
 import Utilities.ClassBrowsers as ClassBrowsers
@@ -142,7 +142,7 @@
         """
         Public method to ignore the given node.
         
-        @param node reference to the node (jasy.js.parse.Node.Node)
+        @param node reference to the node (jasy.script.parse.Node.Node)
         """
         pass
 
@@ -150,7 +150,7 @@
         """
         Public method to treat a function node.
         
-        @param node reference to the node (jasy.js.parse.Node.Node)
+        @param node reference to the node (jasy.script.parse.Node.Node)
         """
         if node.type == "function" and \
            getattr(node, "name", None) and \
@@ -188,7 +188,7 @@
         """
         Public method to treat a property_init node.
         
-        @param node reference to the node (jasy.js.parse.Node.Node)
+        @param node reference to the node (jasy.script.parse.Node.Node)
         """
         if node.type == "property_init" and node[1].type == "function":
             if self.__stack and self.__stack[-1].endlineno < node[0].line:
@@ -224,7 +224,7 @@
         """
         Public method to treat a variable node.
         
-        @param node reference to the node (jasy.js.parse.Node.Node)
+        @param node reference to the node (jasy.script.parse.Node.Node)
         """
         if node.type == "var" and \
            node.parent.type == "script" and \
@@ -250,7 +250,7 @@
         """
         Public method to treat a constant node.
         
-        @param node reference to the node (jasy.js.parse.Node.Node)
+        @param node reference to the node (jasy.script.parse.Node.Node)
         """
         if node.type == "const" and \
            node.parent.type == "script" and \
--- a/changelog	Sat Jan 12 11:26:32 2019 +0100
+++ b/changelog	Sat Jan 12 12:11:42 2019 +0100
@@ -6,6 +6,7 @@
   -- added cpability to suppress some markers in the marker map
 - Third Party packages
   -- updated coverage.py to 4.5.2
+  -- updated jasy to 1.5-beta6 (latest release available)
 
 Version 19.01:
 - bug fixes
--- a/eric6.e4p	Sat Jan 12 11:26:32 2019 +0100
+++ b/eric6.e4p	Sat Jan 12 12:11:42 2019 +0100
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <!DOCTYPE Project SYSTEM "Project-5.1.dtd">
 <!-- eric project file for project eric6 -->
-<!-- Copyright (C) 2018 Detlev Offenbach, detlev@die-offenbachs.de -->
+<!-- Copyright (C) 2019 Detlev Offenbach, detlev@die-offenbachs.de -->
 <Project version="5.1">
   <Language>en_US</Language>
   <ProjectWordList>Dictionaries/words.dic</ProjectWordList>
@@ -1103,21 +1103,25 @@
     <Source>ThirdParty/Jasy/__init__.py</Source>
     <Source>ThirdParty/Jasy/jasy/__init__.py</Source>
     <Source>ThirdParty/Jasy/jasy/core/Console.py</Source>
+    <Source>ThirdParty/Jasy/jasy/core/Text.py</Source>
     <Source>ThirdParty/Jasy/jasy/core/__init__.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/__init__.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/api/Comment.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/api/Text.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/api/__init__.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/parse/Node.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/parse/Parser.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/parse/VanillaBuilder.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/parse/__init__.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/tokenize/Lang.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/tokenize/__init__.py</Source>
-    <Source>ThirdParty/Jasy/jasy/js/util/__init__.py</Source>
     <Source>ThirdParty/Jasy/jasy/parse/AbstractNode.py</Source>
     <Source>ThirdParty/Jasy/jasy/parse/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/api/Comment.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/api/Text.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/api/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/output/Compressor.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/output/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/parse/Lang.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/parse/Node.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/parse/Parser.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/parse/VanillaBuilder.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/parse/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/tokenize/Lang.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/tokenize/Tokenizer.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/tokenize/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/script/util/__init__.py</Source>
     <Source>ThirdParty/Pygments/__init__.py</Source>
     <Source>ThirdParty/Pygments/pygments/__init__.py</Source>
     <Source>ThirdParty/Pygments/pygments/cmdline.py</Source>

eric ide

mercurial