Added a JavaScript parser to the file browser in order to show the structure of JavaScript files

Tue, 09 Jul 2013 19:30:56 +0200

author
Detlev Offenbach <detlev@die-offenbachs.de>
date
Tue, 09 Jul 2013 19:30:56 +0200
changeset 2779
4d433896b6d6
parent 2776
43b8060a4b44
child 2781
4a96bf806c49

Added a JavaScript parser to the file browser in order to show the structure of JavaScript files

APIs/Python3/eric5.api file | annotate | diff | comparison | revisions
Documentation/Help/source.qch file | annotate | diff | comparison | revisions
Documentation/Help/source.qhp file | annotate | diff | comparison | revisions
Documentation/Source/eric5.UI.BrowserModel.html file | annotate | diff | comparison | revisions
Documentation/Source/eric5.Utilities.ClassBrowsers.__init__.html file | annotate | diff | comparison | revisions
Documentation/Source/eric5.Utilities.ClassBrowsers.jsclbr.html file | annotate | diff | comparison | revisions
Documentation/Source/index-eric5.Utilities.ClassBrowsers.html file | annotate | diff | comparison | revisions
ThirdParty/Jasy/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/core/Console.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/core/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/api/Comment.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/api/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/Node.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/Parser.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/VanillaBuilder.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/parse/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/tokenize/Lang.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/tokenize/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/js/util/__init__.py file | annotate | diff | comparison | revisions
ThirdParty/Jasy/jasy/license.md file | annotate | diff | comparison | revisions
UI/BrowserModel.py file | annotate | diff | comparison | revisions
Utilities/ClassBrowsers/__init__.py file | annotate | diff | comparison | revisions
Utilities/ClassBrowsers/jsclbr.py file | annotate | diff | comparison | revisions
changelog file | annotate | diff | comparison | revisions
eric5.e4p file | annotate | diff | comparison | revisions
eric5.py file | annotate | diff | comparison | revisions
icons/default/fileJavascript.png file | annotate | diff | comparison | revisions
--- a/APIs/Python3/eric5.api	Sat Jul 06 18:31:44 2013 +0200
+++ b/APIs/Python3/eric5.api	Tue Jul 09 19:30:56 2013 +0200
@@ -7536,6 +7536,7 @@
 eric5.UI.BrowserModel.BrowserFileItem.isDFile?4()
 eric5.UI.BrowserModel.BrowserFileItem.isDesignerFile?4()
 eric5.UI.BrowserModel.BrowserFileItem.isIdlFile?4()
+eric5.UI.BrowserModel.BrowserFileItem.isJavaScriptFile?4()
 eric5.UI.BrowserModel.BrowserFileItem.isLinguistFile?4()
 eric5.UI.BrowserModel.BrowserFileItem.isMultiProjectFile?4()
 eric5.UI.BrowserModel.BrowserFileItem.isPixmapFile?4()
@@ -7900,6 +7901,7 @@
 eric5.Utilities.ClassBrowsers.ClbrBaseClasses._ClbrBase.setEndLine?4(endLineNo)
 eric5.Utilities.ClassBrowsers.ClbrBaseClasses._ClbrBase?2(module, name, file, lineno)
 eric5.Utilities.ClassBrowsers.IDL_SOURCE?7
+eric5.Utilities.ClassBrowsers.JS_SOURCE?7
 eric5.Utilities.ClassBrowsers.PTL_SOURCE?7
 eric5.Utilities.ClassBrowsers.PY_SOURCE?7
 eric5.Utilities.ClassBrowsers.RB_SOURCE?7
@@ -7916,6 +7918,19 @@
 eric5.Utilities.ClassBrowsers.idlclbr._modules?8
 eric5.Utilities.ClassBrowsers.idlclbr._normalize?8
 eric5.Utilities.ClassBrowsers.idlclbr.readmodule_ex?4(module, path=[])
+eric5.Utilities.ClassBrowsers.jsclbr.Attribute?1(module, name, file, lineno)
+eric5.Utilities.ClassBrowsers.jsclbr.Function?1(module, name, file, lineno, signature='', separator=', ')
+eric5.Utilities.ClassBrowsers.jsclbr.SUPPORTED_TYPES?7
+eric5.Utilities.ClassBrowsers.jsclbr.VisibilityMixin?1()
+eric5.Utilities.ClassBrowsers.jsclbr.Visitor.parse?4()
+eric5.Utilities.ClassBrowsers.jsclbr.Visitor.visit_const?4(node)
+eric5.Utilities.ClassBrowsers.jsclbr.Visitor.visit_function?4(node)
+eric5.Utilities.ClassBrowsers.jsclbr.Visitor.visit_noop?4(node)
+eric5.Utilities.ClassBrowsers.jsclbr.Visitor.visit_property_init?4(node)
+eric5.Utilities.ClassBrowsers.jsclbr.Visitor.visit_var?4(node)
+eric5.Utilities.ClassBrowsers.jsclbr.Visitor?1(src, module, filename)
+eric5.Utilities.ClassBrowsers.jsclbr._modules?8
+eric5.Utilities.ClassBrowsers.jsclbr.readmodule_ex?4(module, path=[])
 eric5.Utilities.ClassBrowsers.pyclbr.Attribute?1(module, name, file, lineno)
 eric5.Utilities.ClassBrowsers.pyclbr.Class?1(module, name, super, file, lineno)
 eric5.Utilities.ClassBrowsers.pyclbr.Function?1(module, name, file, lineno, signature='', separator=', ', modifierType=ClbrBaseClasses.Function.General)
Binary file Documentation/Help/source.qch has changed
--- a/Documentation/Help/source.qhp	Sat Jul 06 18:31:44 2013 +0200
+++ b/Documentation/Help/source.qhp	Tue Jul 09 19:30:56 2013 +0200
@@ -940,6 +940,7 @@
               <section title="eric5.Utilities.ClassBrowsers.ClbrBaseClasses" ref="eric5.Utilities.ClassBrowsers.ClbrBaseClasses.html" />
               <section title="eric5.Utilities.ClassBrowsers.__init__" ref="eric5.Utilities.ClassBrowsers.__init__.html" />
               <section title="eric5.Utilities.ClassBrowsers.idlclbr" ref="eric5.Utilities.ClassBrowsers.idlclbr.html" />
+              <section title="eric5.Utilities.ClassBrowsers.jsclbr" ref="eric5.Utilities.ClassBrowsers.jsclbr.html" />
               <section title="eric5.Utilities.ClassBrowsers.pyclbr" ref="eric5.Utilities.ClassBrowsers.pyclbr.html" />
               <section title="eric5.Utilities.ClassBrowsers.rbclbr" ref="eric5.Utilities.ClassBrowsers.rbclbr.html" />
             </section>
@@ -1434,11 +1435,13 @@
       <keyword name="AsyncPendingWrite" id="AsyncPendingWrite" ref="eric5.DebugClients.Ruby.AsyncFile.html#AsyncPendingWrite" />
       <keyword name="Attribute" id="Attribute" ref="eric5.Utilities.ClassBrowsers.ClbrBaseClasses.html#Attribute" />
       <keyword name="Attribute" id="Attribute" ref="eric5.Utilities.ClassBrowsers.idlclbr.html#Attribute" />
+      <keyword name="Attribute" id="Attribute" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Attribute" />
       <keyword name="Attribute" id="Attribute" ref="eric5.Utilities.ClassBrowsers.pyclbr.html#Attribute" />
       <keyword name="Attribute" id="Attribute" ref="eric5.Utilities.ClassBrowsers.rbclbr.html#Attribute" />
       <keyword name="Attribute" id="Attribute" ref="eric5.Utilities.ModuleParser.html#Attribute" />
       <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric5.Utilities.ClassBrowsers.ClbrBaseClasses.html#Attribute.__init__" />
       <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric5.Utilities.ClassBrowsers.idlclbr.html#Attribute.__init__" />
+      <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Attribute.__init__" />
       <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric5.Utilities.ClassBrowsers.pyclbr.html#Attribute.__init__" />
       <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric5.Utilities.ClassBrowsers.rbclbr.html#Attribute.__init__" />
       <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric5.Utilities.ModuleParser.html#Attribute.__init__" />
@@ -1788,6 +1791,7 @@
       <keyword name="BrowserFileItem.isDFile" id="BrowserFileItem.isDFile" ref="eric5.UI.BrowserModel.html#BrowserFileItem.isDFile" />
       <keyword name="BrowserFileItem.isDesignerFile" id="BrowserFileItem.isDesignerFile" ref="eric5.UI.BrowserModel.html#BrowserFileItem.isDesignerFile" />
       <keyword name="BrowserFileItem.isIdlFile" id="BrowserFileItem.isIdlFile" ref="eric5.UI.BrowserModel.html#BrowserFileItem.isIdlFile" />
+      <keyword name="BrowserFileItem.isJavaScriptFile" id="BrowserFileItem.isJavaScriptFile" ref="eric5.UI.BrowserModel.html#BrowserFileItem.isJavaScriptFile" />
       <keyword name="BrowserFileItem.isLinguistFile" id="BrowserFileItem.isLinguistFile" ref="eric5.UI.BrowserModel.html#BrowserFileItem.isLinguistFile" />
       <keyword name="BrowserFileItem.isMultiProjectFile" id="BrowserFileItem.isMultiProjectFile" ref="eric5.UI.BrowserModel.html#BrowserFileItem.isMultiProjectFile" />
       <keyword name="BrowserFileItem.isPixmapFile" id="BrowserFileItem.isPixmapFile" ref="eric5.UI.BrowserModel.html#BrowserFileItem.isPixmapFile" />
@@ -4797,11 +4801,13 @@
       <keyword name="FtpUtilities (Module)" id="FtpUtilities (Module)" ref="eric5.Utilities.FtpUtilities.html" />
       <keyword name="Function" id="Function" ref="eric5.Utilities.ClassBrowsers.ClbrBaseClasses.html#Function" />
       <keyword name="Function" id="Function" ref="eric5.Utilities.ClassBrowsers.idlclbr.html#Function" />
+      <keyword name="Function" id="Function" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Function" />
       <keyword name="Function" id="Function" ref="eric5.Utilities.ClassBrowsers.pyclbr.html#Function" />
       <keyword name="Function" id="Function" ref="eric5.Utilities.ClassBrowsers.rbclbr.html#Function" />
       <keyword name="Function" id="Function" ref="eric5.Utilities.ModuleParser.html#Function" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric5.Utilities.ClassBrowsers.ClbrBaseClasses.html#Function.__init__" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric5.Utilities.ClassBrowsers.idlclbr.html#Function.__init__" />
+      <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Function.__init__" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric5.Utilities.ClassBrowsers.pyclbr.html#Function.__init__" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric5.Utilities.ClassBrowsers.rbclbr.html#Function.__init__" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric5.Utilities.ModuleParser.html#Function.__init__" />
@@ -12151,11 +12157,22 @@
       <keyword name="VisibilityBase.setProtected" id="VisibilityBase.setProtected" ref="eric5.Utilities.ModuleParser.html#VisibilityBase.setProtected" />
       <keyword name="VisibilityBase.setPublic" id="VisibilityBase.setPublic" ref="eric5.Utilities.ModuleParser.html#VisibilityBase.setPublic" />
       <keyword name="VisibilityMixin" id="VisibilityMixin" ref="eric5.Utilities.ClassBrowsers.idlclbr.html#VisibilityMixin" />
+      <keyword name="VisibilityMixin" id="VisibilityMixin" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#VisibilityMixin" />
       <keyword name="VisibilityMixin" id="VisibilityMixin" ref="eric5.Utilities.ClassBrowsers.pyclbr.html#VisibilityMixin" />
       <keyword name="VisibilityMixin" id="VisibilityMixin" ref="eric5.Utilities.ClassBrowsers.rbclbr.html#VisibilityMixin" />
       <keyword name="VisibilityMixin (Constructor)" id="VisibilityMixin (Constructor)" ref="eric5.Utilities.ClassBrowsers.idlclbr.html#VisibilityMixin.__init__" />
+      <keyword name="VisibilityMixin (Constructor)" id="VisibilityMixin (Constructor)" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#VisibilityMixin.__init__" />
       <keyword name="VisibilityMixin (Constructor)" id="VisibilityMixin (Constructor)" ref="eric5.Utilities.ClassBrowsers.pyclbr.html#VisibilityMixin.__init__" />
       <keyword name="VisibilityMixin (Constructor)" id="VisibilityMixin (Constructor)" ref="eric5.Utilities.ClassBrowsers.rbclbr.html#VisibilityMixin.__init__" />
+      <keyword name="Visitor" id="Visitor" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Visitor" />
+      <keyword name="Visitor (Constructor)" id="Visitor (Constructor)" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Visitor.__init__" />
+      <keyword name="Visitor.__visit" id="Visitor.__visit" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Visitor.__visit" />
+      <keyword name="Visitor.parse" id="Visitor.parse" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Visitor.parse" />
+      <keyword name="Visitor.visit_const" id="Visitor.visit_const" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_const" />
+      <keyword name="Visitor.visit_function" id="Visitor.visit_function" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_function" />
+      <keyword name="Visitor.visit_noop" id="Visitor.visit_noop" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_noop" />
+      <keyword name="Visitor.visit_property_init" id="Visitor.visit_property_init" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_property_init" />
+      <keyword name="Visitor.visit_var" id="Visitor.visit_var" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_var" />
       <keyword name="VmListspacePlugin" id="VmListspacePlugin" ref="eric5.Plugins.PluginVmListspace.html#VmListspacePlugin" />
       <keyword name="VmListspacePlugin (Constructor)" id="VmListspacePlugin (Constructor)" ref="eric5.Plugins.PluginVmListspace.html#VmListspacePlugin.__init__" />
       <keyword name="VmListspacePlugin.activate" id="VmListspacePlugin.activate" ref="eric5.Plugins.PluginVmListspace.html#VmListspacePlugin.activate" />
@@ -12768,6 +12785,7 @@
       <keyword name="isinpath" id="isinpath" ref="eric5.Utilities.__init__.html#isinpath" />
       <keyword name="iter_child_nodes" id="iter_child_nodes" ref="eric5.UtilitiesPython2.py2flakes.checker.html#iter_child_nodes" />
       <keyword name="joinext" id="joinext" ref="eric5.Utilities.__init__.html#joinext" />
+      <keyword name="jsclbr (Module)" id="jsclbr (Module)" ref="eric5.Utilities.ClassBrowsers.jsclbr.html" />
       <keyword name="linesep" id="linesep" ref="eric5.Utilities.__init__.html#linesep" />
       <keyword name="loadTranslatorForLocale" id="loadTranslatorForLocale" ref="eric5.Toolbox.Startup.html#loadTranslatorForLocale" />
       <keyword name="loadTranslators" id="loadTranslators" ref="eric5.Toolbox.Startup.html#loadTranslators" />
@@ -12896,6 +12914,7 @@
       <keyword name="readlines_1" id="readlines_1" ref="eric5.UtilitiesPython2.pep8.html#readlines_1" />
       <keyword name="readmodule" id="readmodule" ref="eric5.Utilities.ClassBrowsers.__init__.html#readmodule" />
       <keyword name="readmodule_ex" id="readmodule_ex" ref="eric5.Utilities.ClassBrowsers.idlclbr.html#readmodule_ex" />
+      <keyword name="readmodule_ex" id="readmodule_ex" ref="eric5.Utilities.ClassBrowsers.jsclbr.html#readmodule_ex" />
       <keyword name="readmodule_ex" id="readmodule_ex" ref="eric5.Utilities.ClassBrowsers.pyclbr.html#readmodule_ex" />
       <keyword name="readmodule_ex" id="readmodule_ex" ref="eric5.Utilities.ClassBrowsers.rbclbr.html#readmodule_ex" />
       <keyword name="rebase (Module)" id="rebase (Module)" ref="eric5.Plugins.VcsPlugins.vcsMercurial.RebaseExtension.rebase.html" />
@@ -13793,6 +13812,7 @@
       <file>eric5.Utilities.ClassBrowsers.ClbrBaseClasses.html</file>
       <file>eric5.Utilities.ClassBrowsers.__init__.html</file>
       <file>eric5.Utilities.ClassBrowsers.idlclbr.html</file>
+      <file>eric5.Utilities.ClassBrowsers.jsclbr.html</file>
       <file>eric5.Utilities.ClassBrowsers.pyclbr.html</file>
       <file>eric5.Utilities.ClassBrowsers.rbclbr.html</file>
       <file>eric5.Utilities.FtpUtilities.html</file>
--- a/Documentation/Source/eric5.UI.BrowserModel.html	Sat Jul 06 18:31:44 2013 +0200
+++ b/Documentation/Source/eric5.UI.BrowserModel.html	Tue Jul 09 19:30:56 2013 +0200
@@ -639,6 +639,9 @@
 <td><a href="#BrowserFileItem.isIdlFile">isIdlFile</a></td>
 <td>Public method to check, if this file is a CORBA IDL file.</td>
 </tr><tr>
+<td><a href="#BrowserFileItem.isJavaScriptFile">isJavaScriptFile</a></td>
+<td>Public method to check, if this file is a JavaScript file.</td>
+</tr><tr>
 <td><a href="#BrowserFileItem.isLinguistFile">isLinguistFile</a></td>
 <td>Public method to check, if this file is a Qt-Linguist file.</td>
 </tr><tr>
@@ -762,6 +765,16 @@
 <dd>
 flag indicating a CORBA IDL file (boolean)
 </dd>
+</dl><a NAME="BrowserFileItem.isJavaScriptFile" ID="BrowserFileItem.isJavaScriptFile"></a>
+<h4>BrowserFileItem.isJavaScriptFile</h4>
+<b>isJavaScriptFile</b>(<i></i>)
+<p>
+        Public method to check, if this file is a JavaScript file.
+</p><dl>
+<dt>Returns:</dt>
+<dd>
+flag indicating a JavaScript file (boolean)
+</dd>
 </dl><a NAME="BrowserFileItem.isLinguistFile" ID="BrowserFileItem.isLinguistFile"></a>
 <h4>BrowserFileItem.isLinguistFile</h4>
 <b>isLinguistFile</b>(<i></i>)
--- a/Documentation/Source/eric5.Utilities.ClassBrowsers.__init__.html	Sat Jul 06 18:31:44 2013 +0200
+++ b/Documentation/Source/eric5.Utilities.ClassBrowsers.__init__.html	Tue Jul 09 19:30:56 2013 +0200
@@ -34,7 +34,7 @@
 </p>
 <h3>Global Attributes</h3>
 <table>
-<tr><td>IDL_SOURCE</td></tr><tr><td>PTL_SOURCE</td></tr><tr><td>PY_SOURCE</td></tr><tr><td>RB_SOURCE</td></tr><tr><td>SUPPORTED_TYPES</td></tr><tr><td>__extensions</td></tr>
+<tr><td>IDL_SOURCE</td></tr><tr><td>JS_SOURCE</td></tr><tr><td>PTL_SOURCE</td></tr><tr><td>PY_SOURCE</td></tr><tr><td>RB_SOURCE</td></tr><tr><td>SUPPORTED_TYPES</td></tr><tr><td>__extensions</td></tr>
 </table>
 <h3>Classes</h3>
 <table>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Documentation/Source/eric5.Utilities.ClassBrowsers.jsclbr.html	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,344 @@
+<!DOCTYPE html>
+<html><head>
+<title>eric5.Utilities.ClassBrowsers.jsclbr</title>
+<meta charset="UTF-8">
+<style>
+body {
+    background: #EDECE6;
+    margin: 0em 1em 10em 1em;
+    color: black;
+}
+
+h1 { color: white; background: #85774A; }
+h2 { color: white; background: #85774A; }
+h3 { color: white; background: #9D936E; }
+h4 { color: white; background: #9D936E; }
+    
+a { color: #BA6D36; }
+
+</style>
+</head>
+<body><a NAME="top" ID="top"></a>
+<h1>eric5.Utilities.ClassBrowsers.jsclbr</h1>
+<p>
+Parse a JavaScript file and retrieve variables and functions.
+</p><p>
+It uses the JavaScript parser contained in the jasy web framework.
+</p>
+<h3>Global Attributes</h3>
+<table>
+<tr><td>SUPPORTED_TYPES</td></tr><tr><td>_modules</td></tr>
+</table>
+<h3>Classes</h3>
+<table>
+<tr>
+<td><a href="#Attribute">Attribute</a></td>
+<td>Class to represent a class attribute.</td>
+</tr><tr>
+<td><a href="#Function">Function</a></td>
+<td>Class to represent a Python function.</td>
+</tr><tr>
+<td><a href="#VisibilityMixin">VisibilityMixin</a></td>
+<td>Mixin class implementing the notion of visibility.</td>
+</tr><tr>
+<td><a href="#Visitor">Visitor</a></td>
+<td>Class implementing a visitor going through the parsed tree.</td>
+</tr>
+</table>
+<h3>Functions</h3>
+<table>
+<tr>
+<td><a href="#readmodule_ex">readmodule_ex</a></td>
+<td>Read a JavaScript file and return a dictionary of functions and variables.</td>
+</tr>
+</table>
+<hr /><hr />
+<a NAME="Attribute" ID="Attribute"></a>
+<h2>Attribute</h2>
+<p>
+    Class to represent a class attribute.
+</p>
+<h3>Derived from</h3>
+ClbrBaseClasses.Attribute, VisibilityMixin
+<h3>Class Attributes</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<h3>Class Methods</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<h3>Methods</h3>
+<table>
+<tr>
+<td><a href="#Attribute.__init__">Attribute</a></td>
+<td>Constructor</td>
+</tr>
+</table>
+<h3>Static Methods</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<a NAME="Attribute.__init__" ID="Attribute.__init__"></a>
+<h4>Attribute (Constructor)</h4>
+<b>Attribute</b>(<i>module, name, file, lineno</i>)
+<p>
+        Constructor
+</p><dl>
+<dt><i>module</i></dt>
+<dd>
+name of the module containing this class
+</dd><dt><i>name</i></dt>
+<dd>
+name of this class
+</dd><dt><i>file</i></dt>
+<dd>
+filename containing this attribute
+</dd><dt><i>lineno</i></dt>
+<dd>
+linenumber of the class definition
+</dd>
+</dl>
+<div align="right"><a href="#top">Up</a></div>
+<hr /><hr />
+<a NAME="Function" ID="Function"></a>
+<h2>Function</h2>
+<p>
+    Class to represent a Python function.
+</p>
+<h3>Derived from</h3>
+ClbrBaseClasses.Function, VisibilityMixin
+<h3>Class Attributes</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<h3>Class Methods</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<h3>Methods</h3>
+<table>
+<tr>
+<td><a href="#Function.__init__">Function</a></td>
+<td>Constructor</td>
+</tr>
+</table>
+<h3>Static Methods</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<a NAME="Function.__init__" ID="Function.__init__"></a>
+<h4>Function (Constructor)</h4>
+<b>Function</b>(<i>module, name, file, lineno, signature='', separator=', '</i>)
+<p>
+        Constructor
+</p><dl>
+<dt><i>module</i></dt>
+<dd>
+name of the module containing this function
+</dd><dt><i>name</i></dt>
+<dd>
+name of this function
+</dd><dt><i>file</i></dt>
+<dd>
+filename containing this class
+</dd><dt><i>lineno</i></dt>
+<dd>
+linenumber of the class definition
+</dd><dt><i>signature</i></dt>
+<dd>
+parameterlist of the method
+</dd><dt><i>separator</i></dt>
+<dd>
+string separating the parameters
+</dd>
+</dl>
+<div align="right"><a href="#top">Up</a></div>
+<hr /><hr />
+<a NAME="VisibilityMixin" ID="VisibilityMixin"></a>
+<h2>VisibilityMixin</h2>
+<p>
+    Mixin class implementing the notion of visibility.
+</p>
+<h3>Derived from</h3>
+ClbrBaseClasses.ClbrVisibilityMixinBase
+<h3>Class Attributes</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<h3>Class Methods</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<h3>Methods</h3>
+<table>
+<tr>
+<td><a href="#VisibilityMixin.__init__">VisibilityMixin</a></td>
+<td>Method to initialize the visibility.</td>
+</tr>
+</table>
+<h3>Static Methods</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<a NAME="VisibilityMixin.__init__" ID="VisibilityMixin.__init__"></a>
+<h4>VisibilityMixin (Constructor)</h4>
+<b>VisibilityMixin</b>(<i></i>)
+<p>
+        Method to initialize the visibility.
+</p>
+<div align="right"><a href="#top">Up</a></div>
+<hr /><hr />
+<a NAME="Visitor" ID="Visitor"></a>
+<h2>Visitor</h2>
+<p>
+    Class implementing a visitor going through the parsed tree.
+</p>
+<h3>Derived from</h3>
+object
+<h3>Class Attributes</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<h3>Class Methods</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<h3>Methods</h3>
+<table>
+<tr>
+<td><a href="#Visitor.__init__">Visitor</a></td>
+<td>Constructor</td>
+</tr><tr>
+<td><a href="#Visitor.__visit">__visit</a></td>
+<td>Private method implementing the visit logic delegating to interesting methods.</td>
+</tr><tr>
+<td><a href="#Visitor.parse">parse</a></td>
+<td>Public method to parse the source.</td>
+</tr><tr>
+<td><a href="#Visitor.visit_const">visit_const</a></td>
+<td>Public method to treat a constant node.</td>
+</tr><tr>
+<td><a href="#Visitor.visit_function">visit_function</a></td>
+<td>Public method to treat a function node.</td>
+</tr><tr>
+<td><a href="#Visitor.visit_noop">visit_noop</a></td>
+<td>Public method to ignore the given node.</td>
+</tr><tr>
+<td><a href="#Visitor.visit_property_init">visit_property_init</a></td>
+<td>Public method to treat a property_init node.</td>
+</tr><tr>
+<td><a href="#Visitor.visit_var">visit_var</a></td>
+<td>Public method to treat a variable node.</td>
+</tr>
+</table>
+<h3>Static Methods</h3>
+<table>
+<tr><td>None</td></tr>
+</table>
+<a NAME="Visitor.__init__" ID="Visitor.__init__"></a>
+<h4>Visitor (Constructor)</h4>
+<b>Visitor</b>(<i>src, module, filename</i>)
+<p>
+        Constructor
+</p><dl>
+<dt><i>src</i></dt>
+<dd>
+source to be parsed (string)
+</dd><dt><i>module</i></dt>
+<dd>
+name of the module (string)
+</dd><dt><i>filename</i></dt>
+<dd>
+file name (string)
+</dd>
+</dl><a NAME="Visitor.__visit" ID="Visitor.__visit"></a>
+<h4>Visitor.__visit</h4>
+<b>__visit</b>(<i>root</i>)
+<p>
+        Private method implementing the visit logic delegating to interesting methods.
+</p><a NAME="Visitor.parse" ID="Visitor.parse"></a>
+<h4>Visitor.parse</h4>
+<b>parse</b>(<i></i>)
+<p>
+        Public method to parse the source.
+</p><dl>
+<dt>Returns:</dt>
+<dd>
+dictionary containing the parsed information
+</dd>
+</dl><a NAME="Visitor.visit_const" ID="Visitor.visit_const"></a>
+<h4>Visitor.visit_const</h4>
+<b>visit_const</b>(<i>node</i>)
+<p>
+        Public method to treat a constant node.
+</p><dl>
+<dt><i>node</i></dt>
+<dd>
+reference to the node (jasy.js.parse.Node.Node)
+</dd>
+</dl><a NAME="Visitor.visit_function" ID="Visitor.visit_function"></a>
+<h4>Visitor.visit_function</h4>
+<b>visit_function</b>(<i>node</i>)
+<p>
+        Public method to treat a function node.
+</p><dl>
+<dt><i>node</i></dt>
+<dd>
+reference to the node (jasy.js.parse.Node.Node)
+</dd>
+</dl><a NAME="Visitor.visit_noop" ID="Visitor.visit_noop"></a>
+<h4>Visitor.visit_noop</h4>
+<b>visit_noop</b>(<i>node</i>)
+<p>
+        Public method to ignore the given node.
+</p><dl>
+<dt><i>node</i></dt>
+<dd>
+reference to the node (jasy.js.parse.Node.Node)
+</dd>
+</dl><a NAME="Visitor.visit_property_init" ID="Visitor.visit_property_init"></a>
+<h4>Visitor.visit_property_init</h4>
+<b>visit_property_init</b>(<i>node</i>)
+<p>
+        Public method to treat a property_init node.
+</p><dl>
+<dt><i>node</i></dt>
+<dd>
+reference to the node (jasy.js.parse.Node.Node)
+</dd>
+</dl><a NAME="Visitor.visit_var" ID="Visitor.visit_var"></a>
+<h4>Visitor.visit_var</h4>
+<b>visit_var</b>(<i>node</i>)
+<p>
+        Public method to treat a variable node.
+</p><dl>
+<dt><i>node</i></dt>
+<dd>
+reference to the node (jasy.js.parse.Node.Node)
+</dd>
+</dl>
+<div align="right"><a href="#top">Up</a></div>
+<hr /><hr />
+<a NAME="readmodule_ex" ID="readmodule_ex"></a>
+<h2>readmodule_ex</h2>
+<b>readmodule_ex</b>(<i>module, path=[]</i>)
+<p>
+    Read a JavaScript file and return a dictionary of functions and variables.
+</p><dl>
+<dt><i>module</i></dt>
+<dd>
+name of the JavaScript file (string)
+</dd><dt><i>path</i></dt>
+<dd>
+path the file should be searched in (list of strings)
+</dd>
+</dl><dl>
+<dt>Returns:</dt>
+<dd>
+the resulting dictionary
+</dd>
+</dl>
+<div align="right"><a href="#top">Up</a></div>
+<hr />
+</body></html>
\ No newline at end of file
--- a/Documentation/Source/index-eric5.Utilities.ClassBrowsers.html	Sat Jul 06 18:31:44 2013 +0200
+++ b/Documentation/Source/index-eric5.Utilities.ClassBrowsers.html	Tue Jul 09 19:30:56 2013 +0200
@@ -46,6 +46,9 @@
 <td><a href="eric5.Utilities.ClassBrowsers.idlclbr.html">idlclbr</a></td>
 <td>Parse a CORBA IDL file and retrieve modules, interfaces, methods and attributes.</td>
 </tr><tr>
+<td><a href="eric5.Utilities.ClassBrowsers.jsclbr.html">jsclbr</a></td>
+<td>Parse a JavaScript file and retrieve variables and functions.</td>
+</tr><tr>
 <td><a href="eric5.Utilities.ClassBrowsers.pyclbr.html">pyclbr</a></td>
 <td>Parse a Python file and retrieve classes, functions/methods and attributes.</td>
 </tr><tr>
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/__init__.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Detlev Offenbach <detlev@die-offenbachs.de>
+#
+
+"""
+Package containing the JavaScript parser of the jasy web framework.
+"""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/__init__.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,17 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+# Copyright 2013 Sebastian Werner
+#
+
+"""
+**Jasy - Web Tooling Framework**
+
+Jasy is a powerful Python3-based tooling framework. 
+It makes it easy to manage heavy web projects. 
+Its main goal is to offer an API which could be used by developers to write their
+custom build/deployment scripts.
+"""
+
+__version__ = "1.1.0"
+__author__ = "Sebastian Werner <info@sebastian-werner.net>"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/core/Console.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Detlev Offenbach <detlev@die-offenbachs.de>
+#
+
+import logging
+
+def error(text, *argv):
+    """Outputs an error message"""
+
+    logging.error(text, *argv)
+
+def warn(text, *argv):
+    """Outputs an warning"""
+
+    logging.warn(text, *argv)
+
+def info(text, *argv):
+    """Outputs an info message"""
+
+    logging.info(text, *argv)
+
+def debug(text, *argv):
+    """Output a debug message"""
+
+    logging.debug(text, *argv)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/core/__init__.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,9 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Detlev Offenbach <detlev@die-offenbachs.de>
+#
+
+#
+# This is an eric5 dummy package to provide some specially variants of modules
+# found in the standard jasy package
+#
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/js/api/Comment.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,161 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+#
+
+import re
+
+import jasy.core.Console as Console
+
+__all__ = ["CommentException", "Comment"]
+
+
+# Used to measure the doc indent size (with leading stars in front of content)
+docIndentReg = re.compile(r"^(\s*\*\s*)(\S*)")
+
+
+class CommentException(Exception):
+    """
+    Thrown when errors during comment processing are detected.
+    """
+    def __init__(self, message, lineNo=0):
+        Exception.__init__(self, "Comment error: %s (line: %s)" % (message, lineNo+1))
+
+
+class Comment():
+    """
+    Comment class is attached to parsed nodes and used to store all comment related
+    information.
+    
+    The class supports a new Markdown and TomDoc inspired dialect to make developers life
+    easier and work less repeative.
+    """
+    
+    # Relation to code
+    context = None
+    
+    # Collected text of the comment
+    text = None
+    
+    def __init__(self, text, context=None, lineNo=0, indent="", fileId=None):
+
+        # Store context (relation to code)
+        self.context = context
+        
+        # Store fileId
+        self.fileId = fileId
+        
+        # Figure out the type of the comment based on the starting characters
+
+        # Inline comments
+        if text.startswith("//"):
+            # "// hello" => "   hello"
+            text = "  " + text[2:]
+            self.variant = "single"
+            
+        # Doc comments
+        elif text.startswith("/**"):
+            # "/** hello */" => "    hello "
+            text = "   " + text[3:-2]
+            self.variant = "doc"
+
+        # Protected comments which should not be removed
+        # (e.g these are used for license blocks)
+        elif text.startswith("/*!"):
+            # "/*! hello */" => "    hello "
+            text = "   " + text[3:-2]
+            self.variant = "protected"
+            
+        # A normal multiline comment
+        elif text.startswith("/*"):
+            # "/* hello */" => "   hello "
+            text = "  " + text[2:-2]
+            self.variant = "multi"
+            
+        else:
+            raise CommentException("Invalid comment text: %s" % text, lineNo)
+
+        # Multi line comments need to have their indentation removed
+        if "\n" in text:
+            text = self.__outdent(text, indent, lineNo)
+
+        # For single line comments strip the surrounding whitespace
+        else:
+            # " hello " => "hello"
+            text = text.strip()
+
+        # The text of the comment
+        self.text = text
+    
+    def __outdent(self, text, indent, startLineNo):
+        """
+        Outdent multi line comment text and filtering empty lines
+        """
+        
+        lines = []
+
+        # First, split up the comments lines and remove the leading indentation
+        for lineNo, line in enumerate((indent+text).split("\n")):
+
+            if line.startswith(indent):
+                lines.append(line[len(indent):].rstrip())
+
+            elif line.strip() == "":
+                lines.append("")
+
+            else:
+                # Only warn for doc comments, otherwise it might just be code commented
+                # out which is sometimes formatted pretty crazy when commented out
+                if self.variant == "doc":
+                    Console.warn("Could not outdent doc comment at line %s in %s",
+                        startLineNo+lineNo, self.fileId)
+                    
+                return text
+
+        # Find first line with real content, then grab the one after it to get the 
+        # characters which need 
+        outdentString = ""
+        for lineNo, line in enumerate(lines):
+
+            if line != "" and line.strip() != "":
+                matchedDocIndent = docIndentReg.match(line)
+                
+                if not matchedDocIndent:
+                    # As soon as we find a non doc indent like line we stop
+                    break
+                    
+                elif matchedDocIndent.group(2) != "":
+                    # otherwise we look for content behind the indent to get the 
+                    # correct real indent (with spaces)
+                    outdentString = matchedDocIndent.group(1)
+                    break
+                
+            lineNo += 1
+
+        # Process outdenting to all lines (remove the outdentString from the start
+        # of the lines)
+        if outdentString != "":
+
+            lineNo = 0
+            outdentStringLen = len(outdentString)
+
+            for lineNo, line in enumerate(lines):
+                if len(line) <= outdentStringLen:
+                    lines[lineNo] = ""
+
+                else:
+                    if not line.startswith(outdentString):
+                        
+                        # Only warn for doc comments, otherwise it might just be code
+                        # commented out which is sometimes formatted pretty crazy when
+                        # commented out
+                        if self.variant == "doc":
+                            Console.warn(
+                                "Invalid indentation in doc comment at line %s in %s",
+                                startLineNo+lineNo, self.fileId)
+                        
+                    else:
+                        lines[lineNo] = line[outdentStringLen:]
+
+        # Merge final lines and remove leading and trailing new lines
+        return "\n".join(lines).strip("\n")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/js/parse/Node.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,214 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+#
+
+#
+# License: MPL 1.1/GPL 2.0/LGPL 2.1
+# Authors: 
+#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004)
+#   - Sebastian Werner <info@sebastian-werner.net> (Refactoring Python) (2010)
+#
+
+import copy
+
+class Node(list):
+    
+    __slots__ = [
+        # core data
+        "line", "type", "tokenizer", "start", "end", "rel", "parent", 
+        
+        # dynamic added data by other modules
+        "comments", "scope", 
+        
+        # node type specific
+        "value", "expression", "body", "functionForm", "parenthesized", "fileId",
+        "params", "name", "readOnly", "initializer", "condition", "isLoop", "isEach",
+        "object", "assignOp", "iterator", "thenPart", "exception", "elsePart", "setup",
+        "postfix", "update", "tryBlock", "block", "defaultIndex", "discriminant", "label",
+        "statements", "finallyBlock", "statement", "variables", "names", "guard", "for",
+        "tail", "expressionClosure"
+    ]
+    
+    def __init__(self, tokenizer=None, type=None, args=[]):
+        list.__init__(self)
+        
+        self.start = 0
+        self.end = 0
+        self.line = None
+        
+        if tokenizer:
+            token = getattr(tokenizer, "token", None)
+            if token:
+                # We may define a custom type but use the same positioning as another
+                # token, e.g. transform curlys in block nodes, etc.
+                self.type = type if type else getattr(token, "type", None)
+                self.line = token.line
+                
+                # Start & end are file positions for error handling.
+                self.start = token.start
+                self.end = token.end
+            
+            else:
+                self.type = type
+                self.line = tokenizer.line
+                self.start = None
+                self.end = None
+
+            self.tokenizer = tokenizer
+            
+        elif type:
+            self.type = type
+
+        for arg in args:
+            self.append(arg)
+    
+    def getUnrelatedChildren(self):
+        """Collects all unrelated children"""
+        
+        collection = []
+        for child in self:
+            if not hasattr(child, "rel"):
+                collection.append(child)
+            
+        return collection
+    
+    def getChildrenLength(self, filter=True):
+        """Number of (per default unrelated) children"""
+        
+        count = 0
+        for child in self:
+            if not filter or not hasattr(child, "rel"):
+                count += 1
+        return count
+    
+    def remove(self, kid):
+        """Removes the given kid"""
+        
+        if not kid in self:
+            raise Exception("Given node is no child!")
+        
+        if hasattr(kid, "rel"):
+            delattr(self, kid.rel)
+            del kid.rel
+            del kid.parent
+            
+        list.remove(self, kid)
+    
+    def insert(self, index, kid):
+        """Inserts the given kid at the given index"""
+        
+        if index is None:
+            return self.append(kid)
+            
+        if hasattr(kid, "parent"):
+            kid.parent.remove(kid)
+            
+        kid.parent = self
+
+        return list.insert(self, index, kid)
+    
+    def append(self, kid, rel=None):
+        """Appends the given kid with an optional relation hint"""
+        
+        # kid can be null e.g. [1, , 2].
+        if kid:
+            if hasattr(kid, "parent"):
+                kid.parent.remove(kid)
+            
+            # Debug
+            if not isinstance(kid, Node):
+                raise Exception("Invalid kid: %s" % kid)
+            
+            if hasattr(kid, "tokenizer"):
+                if hasattr(kid, "start"):
+                    if not hasattr(self, "start") or \
+                       self.start == None or \
+                       kid.start < self.start:
+                        self.start = kid.start
+
+                if hasattr(kid, "end"):
+                    if not hasattr(self, "end") or \
+                       self.end == None or \
+                       self.end < kid.end:
+                        self.end = kid.end
+            
+            kid.parent = self
+            
+            # alias for function
+            if rel != None:
+                setattr(self, rel, kid)
+                setattr(kid, "rel", rel)
+
+        # Block None kids when they should be related
+        if not kid and rel:
+            return
+        
+        return list.append(self, kid)
+    
+    def replace(self, kid, repl):
+        """Replaces the given kid with a replacement kid"""
+        
+        if repl in self:
+            self.remove(repl)
+        
+        self[self.index(kid)] = repl
+        
+        if hasattr(kid, "rel"):
+            repl.rel = kid.rel
+            setattr(self, kid.rel, repl)
+            
+            # cleanup old kid
+            delattr(kid, "rel")
+        
+        elif hasattr(repl, "rel"):
+            # delete old relation on new child
+            delattr(repl, "rel")
+
+        delattr(kid, "parent")
+        repl.parent = self
+        
+        return kid
+    
+    def __deepcopy__(self, memo):
+        """Used by deepcopy function to clone Node instances"""
+        
+        # Create copy
+        if hasattr(self, "tokenizer"):
+            result = Node(tokenizer=self.tokenizer)
+        else:
+            result = Node(type=self.type)
+        
+        # Copy children
+        for child in self:
+            if child is None:
+                list.append(result, None)
+            else:
+                # Using simple list appends for better performance
+                childCopy = copy.deepcopy(child, memo)
+                childCopy.parent = result
+                list.append(result, childCopy)
+        
+        # Sync attributes
+        # Note: "parent" attribute is handled by append() already
+        for name in self.__slots__:
+            if hasattr(self, name) and not name in ("parent", "tokenizer"):
+                value = getattr(self, name)
+                if value is None:
+                    pass
+                elif type(value) in (bool, int, float, str):
+                    setattr(result, name, value)
+                elif type(value) in (list, set, dict, Node):
+                    setattr(result, name, copy.deepcopy(value, memo))
+                # Scope can be assigned (will be re-created when needed for the
+                # copied node)
+                elif name == "scope":
+                    result.scope = self.scope
+
+        return result
+    
+    def __eq__(self, other):
+        return self is other
+
+    def __bool__(self): 
+        return True
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/js/parse/Parser.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,1527 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+#
+
+#
+# License: MPL 1.1/GPL 2.0/LGPL 2.1
+# Authors: 
+#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
+#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010-2012)
+#
+
+import jasy.js.tokenize.Tokenizer
+import jasy.js.parse.VanillaBuilder
+import jasy.js.tokenize.Lang
+
+__all__ = [ "parse", "parseExpression" ]
+
+def parseExpression(source, fileId=None, line=1, builder=None):
+    if builder == None:
+        builder = jasy.js.parse.VanillaBuilder.VanillaBuilder()
+    
+    # Convert source into expression statement to be friendly to the Tokenizer
+    if not source.endswith(";"):
+        source = source + ";"
+    
+    tokenizer = jasy.js.tokenize.Tokenizer.Tokenizer(source, fileId, line)
+    staticContext = StaticContext(False, builder)
+    
+    return Expression(tokenizer, staticContext)
+
+
+def parse(source, fileId=None, line=1, builder=None):
+    if builder == None:
+        builder = jasy.js.parse.VanillaBuilder.VanillaBuilder()
+    
+    tokenizer = jasy.js.tokenize.Tokenizer.Tokenizer(source, fileId, line)
+    staticContext = StaticContext(False, builder)
+    node = Script(tokenizer, staticContext)
+    
+    # store fileId on top-level node
+    node.fileId = tokenizer.fileId
+    
+    # add missing comments e.g. empty file with only a comment etc.
+    # if there is something non-attached by an inner node it is attached to
+    # the top level node, which is not correct, but might be better than
+    # just ignoring the comment after all.
+    if len(node) > 0:
+        builder.COMMENTS_add(node[-1], None, tokenizer.getComments())
+    else:
+        builder.COMMENTS_add(node, None, tokenizer.getComments())
+    
+    if not tokenizer.done():
+        raise SyntaxError("Unexpected end of file", tokenizer)
+
+    return node
+
+
+
+class SyntaxError(Exception):
+    def __init__(self, message, tokenizer):
+        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (
+            message, tokenizer.fileId, tokenizer.line))
+
+
+# Used as a status container during tree-building for every def body and the global body
+class StaticContext(object):
+    # inFunction is used to check if a return stm appears in a valid context.
+    def __init__(self, inFunction, builder):
+        # Whether this is inside a function, mostly True, only for top-level scope
+        # it's False
+        self.inFunction = inFunction
+        
+        self.hasEmptyReturn = False
+        self.hasReturnWithValue = False
+        self.isGenerator = False
+        self.blockId = 0
+        self.builder = builder
+        self.statementStack = []
+        
+        # Sets to store variable uses
+        # self.functions = set()
+        # self.variables = set()
+        
+        # Status
+        # self.needsHoisting = False
+        self.bracketLevel = 0
+        self.curlyLevel = 0
+        self.parenLevel = 0
+        self.hookLevel = 0
+        
+        # Configure strict ecmascript 3 mode
+        self.ecma3OnlyMode = False
+        
+        # Status flag during parsing
+        self.inForLoopInit = False
+
+
+def Script(tokenizer, staticContext):
+    """Parses the toplevel and def bodies."""
+    node = Statements(tokenizer, staticContext)
+    
+    # change type from "block" to "script" for script root
+    node.type = "script"
+    
+    # copy over data from compiler context
+    # node.functions = staticContext.functions
+    # node.variables = staticContext.variables
+
+    return node
+    
+
+def nest(tokenizer, staticContext, node, func, end=None):
+    """Statement stack and nested statement handler."""
+    staticContext.statementStack.append(node)
+    node = func(tokenizer, staticContext)
+    staticContext.statementStack.pop()
+    end and tokenizer.mustMatch(end)
+    
+    return node
+
+
+def Statements(tokenizer, staticContext):
+    """Parses a list of Statements."""
+
+    builder = staticContext.builder
+    node = builder.BLOCK_build(tokenizer, staticContext.blockId)
+    staticContext.blockId += 1
+
+    builder.BLOCK_hoistLets(node)
+    staticContext.statementStack.append(node)
+
+    prevNode = None
+    while not tokenizer.done() and tokenizer.peek(True) != "right_curly":
+        comments = tokenizer.getComments()
+        childNode = Statement(tokenizer, staticContext)
+        builder.COMMENTS_add(childNode, prevNode, comments)
+        builder.BLOCK_addStatement(node, childNode)
+        prevNode = childNode
+
+    staticContext.statementStack.pop()
+    builder.BLOCK_finish(node)
+
+    # if getattr(node, "needsHoisting", False):
+    #     # TODO
+    #     raise Exception("Needs hoisting went true!!!")
+    #     builder.setHoists(node.id, node.variables)
+    #     # Propagate up to the function.
+    #     staticContext.needsHoisting = True
+
+    return node
+
+
+def Block(tokenizer, staticContext):
+    tokenizer.mustMatch("left_curly")
+    node = Statements(tokenizer, staticContext)
+    tokenizer.mustMatch("right_curly")
+    
+    return node
+
+
+def Statement(tokenizer, staticContext):
+    """Parses a Statement."""
+
+    tokenType = tokenizer.get(True)
+    builder = staticContext.builder
+
+    # Cases for statements ending in a right curly return early, avoiding the
+    # common semicolon insertion magic after this switch.
+    
+    if tokenType == "function":
+        # "declared_form" extends functions of staticContext,
+        # "statement_form" doesn'tokenizer.
+        if len(staticContext.statementStack) > 1:
+            kind = "statement_form"
+        else:
+            kind = "declared_form"
+        
+        return FunctionDefinition(tokenizer, staticContext, True, kind)
+        
+        
+    elif tokenType == "left_curly":
+        node = Statements(tokenizer, staticContext)
+        tokenizer.mustMatch("right_curly")
+        
+        return node
+        
+        
+    elif tokenType == "if":
+        node = builder.IF_build(tokenizer)
+        builder.IF_setCondition(node, ParenExpression(tokenizer, staticContext))
+        staticContext.statementStack.append(node)
+        builder.IF_setThenPart(node, Statement(tokenizer, staticContext))
+
+        if tokenizer.match("else"):
+            comments = tokenizer.getComments()
+            elsePart = Statement(tokenizer, staticContext)
+            builder.COMMENTS_add(elsePart, node, comments)
+            builder.IF_setElsePart(node, elsePart)
+
+        staticContext.statementStack.pop()
+        builder.IF_finish(node)
+        
+        return node
+        
+        
+    elif tokenType == "switch":
+        # This allows CASEs after a "default", which is in the standard.
+        node = builder.SWITCH_build(tokenizer)
+        builder.SWITCH_setDiscriminant(node, ParenExpression(tokenizer, staticContext))
+        staticContext.statementStack.append(node)
+
+        tokenizer.mustMatch("left_curly")
+        tokenType = tokenizer.get()
+        
+        while tokenType != "right_curly":
+            if tokenType == "default":
+                if node.defaultIndex >= 0:
+                    raise SyntaxError("More than one switch default", tokenizer)
+                    
+                childNode = builder.DEFAULT_build(tokenizer)
+                builder.SWITCH_setDefaultIndex(node, len(node)-1)
+                tokenizer.mustMatch("colon")
+                builder.DEFAULT_initializeStatements(childNode, tokenizer)
+                
+                while True:
+                    tokenType=tokenizer.peek(True)
+                    if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
+                        break
+                    builder.DEFAULT_addStatement(childNode, Statement(tokenizer, staticContext))
+                
+                builder.DEFAULT_finish(childNode)
+
+            elif tokenType == "case":
+                childNode = builder.CASE_build(tokenizer)
+                builder.CASE_setLabel(childNode, Expression(tokenizer, staticContext))
+                tokenizer.mustMatch("colon")
+                builder.CASE_initializeStatements(childNode, tokenizer)
+
+                while True:
+                    tokenType=tokenizer.peek(True)
+                    if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
+                        break
+                    builder.CASE_addStatement(childNode, Statement(tokenizer, staticContext))
+                
+                builder.CASE_finish(childNode)
+
+            else:
+                raise SyntaxError("Invalid switch case", tokenizer)
+
+            builder.SWITCH_addCase(node, childNode)
+            tokenType = tokenizer.get()
+
+        staticContext.statementStack.pop()
+        builder.SWITCH_finish(node)
+
+        return node
+        
+
+    elif tokenType == "for":
+        node = builder.FOR_build(tokenizer)
+        forBlock = None
+        
+        if tokenizer.match("identifier") and tokenizer.token.value == "each":
+            builder.FOR_rebuildForEach(node)
+            
+        tokenizer.mustMatch("left_paren")
+        tokenType = tokenizer.peek()
+        childNode = None
+        
+        if tokenType != "semicolon":
+            staticContext.inForLoopInit = True
+            
+            if tokenType == "var" or tokenType == "const":
+                tokenizer.get()
+                childNode = Variables(tokenizer, staticContext)
+            
+            elif tokenType == "let":
+                tokenizer.get()
+
+                if tokenizer.peek() == "left_paren":
+                    childNode = LetBlock(tokenizer, staticContext, False)
+                    
+                else:
+                    # Let in for head, we need to add an implicit block
+                    # around the rest of the for.
+                    forBlock = builder.BLOCK_build(tokenizer, staticContext.blockId)
+                    staticContext.blockId += 1
+                    staticContext.statementStack.append(forBlock)
+                    childNode = Variables(tokenizer, staticContext, forBlock)
+                
+            else:
+                childNode = Expression(tokenizer, staticContext)
+            
+            staticContext.inForLoopInit = False
+
+        if childNode and tokenizer.match("in"):
+            builder.FOR_rebuildForIn(node)
+            builder.FOR_setObject(node, Expression(tokenizer, staticContext), forBlock)
+            
+            if childNode.type == "var" or childNode.type == "let":
+                if len(childNode) != 1:
+                    raise SyntaxError("Invalid for..in left-hand side", tokenizer)
+
+                builder.FOR_setIterator(node, childNode, forBlock)
+                
+            else:
+                builder.FOR_setIterator(node, childNode, forBlock)
+
+        else:
+            builder.FOR_setSetup(node, childNode)
+            tokenizer.mustMatch("semicolon")
+            
+            if node.isEach:
+                raise SyntaxError("Invalid for each..in loop", tokenizer)
+                
+            if tokenizer.peek() == "semicolon":
+                builder.FOR_setCondition(node, None)
+            else:
+                builder.FOR_setCondition(node, Expression(tokenizer, staticContext))
+            
+            tokenizer.mustMatch("semicolon")
+            
+            if tokenizer.peek() == "right_paren":
+                builder.FOR_setUpdate(node, None)
+            else:    
+                builder.FOR_setUpdate(node, Expression(tokenizer, staticContext))
+        
+        tokenizer.mustMatch("right_paren")
+        builder.FOR_setBody(node, nest(tokenizer, staticContext, node, Statement))
+        
+        if forBlock:
+            builder.BLOCK_finish(forBlock)
+            staticContext.statementStack.pop()
+    
+        builder.FOR_finish(node)
+        return node
+        
+        
+    elif tokenType == "while":
+        node = builder.WHILE_build(tokenizer)
+        
+        builder.WHILE_setCondition(node, ParenExpression(tokenizer, staticContext))
+        builder.WHILE_setBody(node, nest(tokenizer, staticContext, node, Statement))
+        builder.WHILE_finish(node)
+        
+        return node                                    
+        
+        
+    elif tokenType == "do":
+        node = builder.DO_build(tokenizer)
+        
+        builder.DO_setBody(node, nest(tokenizer, staticContext, node, Statement, "while"))
+        builder.DO_setCondition(node, ParenExpression(tokenizer, staticContext))
+        builder.DO_finish(node)
+        
+        if not staticContext.ecma3OnlyMode:
+            # <script language="JavaScript"> (without version hints) may need
+            # automatic semicolon insertion without a newline after do-while.
+            # See http://bugzilla.mozilla.org/show_bug.cgi?id=238945.
+            tokenizer.match("semicolon")
+            return node
+
+        # NO RETURN
+      
+      
+    elif tokenType == "break" or tokenType == "continue":
+        if tokenType == "break":
+            node = builder.BREAK_build(tokenizer) 
+        else:
+            node = builder.CONTINUE_build(tokenizer)
+
+        if tokenizer.peekOnSameLine() == "identifier":
+            tokenizer.get()
+            
+            if tokenType == "break":
+                builder.BREAK_setLabel(node, tokenizer.token.value)
+            else:
+                builder.CONTINUE_setLabel(node, tokenizer.token.value)
+
+        statementStack = staticContext.statementStack
+        i = len(statementStack)
+        label = node.label if hasattr(node, "label") else None
+
+        if label:
+            while True:
+                i -= 1
+                if i < 0:
+                    raise SyntaxError("Label not found", tokenizer)
+                if getattr(statementStack[i], "label", None) == label:
+                    break
+
+            # 
+            # Both break and continue to label need to be handled specially
+            # within a labeled loop, so that they target that loop. If not in
+            # a loop, then break targets its labeled statement. Labels can be
+            # nested so we skip all labels immediately enclosing the nearest
+            # non-label statement.
+            # 
+            while i < len(statementStack) - 1 and statementStack[i+1].type == "label":
+                i += 1
+                
+            if i < len(statementStack) - 1 and getattr(statementStack[i+1], "isLoop", False):
+                i += 1
+            elif tokenType == "continue":
+                raise SyntaxError("Invalid continue", tokenizer)
+                
+        else:
+            while True:
+                i -= 1
+                if i < 0:
+                    if tokenType == "break":
+                        raise SyntaxError("Invalid break", tokenizer)
+                    else:
+                        raise SyntaxError("Invalid continue", tokenizer)
+
+                if getattr(statementStack[i], "isLoop", False) or (tokenType == "break" and statementStack[i].type == "switch"):
+                    break
+        
+        if tokenType == "break":
+            builder.BREAK_finish(node)
+        else:
+            builder.CONTINUE_finish(node)
+        
+        # NO RETURN
+
+
+    elif tokenType == "try":
+        node = builder.TRY_build(tokenizer)
+        builder.TRY_setTryBlock(node, Block(tokenizer, staticContext))
+        
+        while tokenizer.match("catch"):
+            childNode = builder.CATCH_build(tokenizer)
+            tokenizer.mustMatch("left_paren")
+            nextTokenType = tokenizer.get()
+            
+            if nextTokenType == "left_bracket" or nextTokenType == "left_curly":
+                # Destructured catch identifiers.
+                tokenizer.unget()
+                exception = DestructuringExpression(tokenizer, staticContext, True)
+            
+            elif nextTokenType == "identifier":
+                exception = builder.CATCH_wrapException(tokenizer)
+            
+            else:
+                raise SyntaxError("Missing identifier in catch", tokenizer)
+                
+            builder.CATCH_setException(childNode, exception)
+            
+            if tokenizer.match("if"):
+                if staticContext.ecma3OnlyMode:
+                    raise SyntaxError("Illegal catch guard", tokenizer)
+                    
+                if node.getChildrenLength() > 0 and not node.getUnrelatedChildren()[0].guard:
+                    raise SyntaxError("Guarded catch after unguarded", tokenizer)
+                    
+                builder.CATCH_setGuard(childNode, Expression(tokenizer, staticContext))
+                
+            else:
+                builder.CATCH_setGuard(childNode, None)
+            
+            tokenizer.mustMatch("right_paren")
+            
+            builder.CATCH_setBlock(childNode, Block(tokenizer, staticContext))
+            builder.CATCH_finish(childNode)
+            
+            builder.TRY_addCatch(node, childNode)
+        
+        builder.TRY_finishCatches(node)
+        
+        if tokenizer.match("finally"):
+            builder.TRY_setFinallyBlock(node, Block(tokenizer, staticContext))
+            
+        if node.getChildrenLength() == 0 and not hasattr(node, "finallyBlock"):
+            raise SyntaxError("Invalid try statement", tokenizer)
+            
+        builder.TRY_finish(node)
+        return node
+        
+
+    elif tokenType == "catch" or tokenType == "finally":
+        raise SyntaxError(tokenizer.tokens[tokenType] + " without preceding try", tokenizer)
+
+
+    elif tokenType == "throw":
+        node = builder.THROW_build(tokenizer)
+        
+        builder.THROW_setException(node, Expression(tokenizer, staticContext))
+        builder.THROW_finish(node)
+        
+        # NO RETURN
+
+
+    elif tokenType == "return":
+        node = returnOrYield(tokenizer, staticContext)
+        
+        # NO RETURN
+
+
+    elif tokenType == "with":
+        node = builder.WITH_build(tokenizer)
+
+        builder.WITH_setObject(node, ParenExpression(tokenizer, staticContext))
+        builder.WITH_setBody(node, nest(tokenizer, staticContext, node, Statement))
+        builder.WITH_finish(node)
+
+        return node
+
+
+    elif tokenType == "var" or tokenType == "const":
+        node = Variables(tokenizer, staticContext)
+        
+        # NO RETURN
+        
+
+    elif tokenType == "let":
+        if tokenizer.peek() == "left_paren":
+            node = LetBlock(tokenizer, staticContext, True)
+        else:
+            node = Variables(tokenizer, staticContext)
+        
+        # NO RETURN
+        
+
+    elif tokenType == "debugger":
+        node = builder.DEBUGGER_build(tokenizer)
+        
+        # NO RETURN
+        
+
+    elif tokenType == "newline" or tokenType == "semicolon":
+        node = builder.SEMICOLON_build(tokenizer)
+
+        builder.SEMICOLON_setExpression(node, None)
+        builder.SEMICOLON_finish(tokenizer)
+        
+        return node
+
+
+    else:
+        if tokenType == "identifier":
+            tokenType = tokenizer.peek()
+
+            # Labeled statement.
+            if tokenType == "colon":
+                label = tokenizer.token.value
+                statementStack = staticContext.statementStack
+               
+                i = len(statementStack)-1
+                while i >= 0:
+                    if getattr(statementStack[i], "label", None) == label:
+                        raise SyntaxError("Duplicate label", tokenizer)
+                    
+                    i -= 1
+               
+                tokenizer.get()
+                node = builder.LABEL_build(tokenizer)
+                
+                builder.LABEL_setLabel(node, label)
+                builder.LABEL_setStatement(node, nest(tokenizer, staticContext, node, Statement))
+                builder.LABEL_finish(node)
+                
+                return node
+
+        # Expression statement.
+        # We unget the current token to parse the expression as a whole.
+        node = builder.SEMICOLON_build(tokenizer)
+        tokenizer.unget()
+        builder.SEMICOLON_setExpression(node, Expression(tokenizer, staticContext))
+        node.end = node.expression.end
+        builder.SEMICOLON_finish(node)
+        
+        # NO RETURN
+        
+
+    MagicalSemicolon(tokenizer)
+    return node
+
+
+
+def MagicalSemicolon(tokenizer):
+    if tokenizer.line == tokenizer.token.line:
+        tokenType = tokenizer.peekOnSameLine()
+    
+        if tokenType != "end" and tokenType != "newline" and tokenType != "semicolon" and tokenType != "right_curly":
+            raise SyntaxError("Missing ; before statement", tokenizer)
+    
+    tokenizer.match("semicolon")
+
+    
+
+def returnOrYield(tokenizer, staticContext):
+    builder = staticContext.builder
+    tokenType = tokenizer.token.type
+
+    if tokenType == "return":
+        if not staticContext.inFunction:
+            raise SyntaxError("Return not in function", tokenizer)
+            
+        node = builder.RETURN_build(tokenizer)
+        
+    else:
+        if not staticContext.inFunction:
+            raise SyntaxError("Yield not in function", tokenizer)
+            
+        staticContext.isGenerator = True
+        node = builder.YIELD_build(tokenizer)
+
+    nextTokenType = tokenizer.peek(True)
+    if nextTokenType != "end" and nextTokenType != "newline" and nextTokenType != "semicolon" and nextTokenType != "right_curly" and (tokenType != "yield" or (nextTokenType != tokenType and nextTokenType != "right_bracket" and nextTokenType != "right_paren" and nextTokenType != "colon" and nextTokenType != "comma")):
+        if tokenType == "return":
+            builder.RETURN_setValue(node, Expression(tokenizer, staticContext))
+            staticContext.hasReturnWithValue = True
+        else:
+            builder.YIELD_setValue(node, AssignExpression(tokenizer, staticContext))
+        
+    elif tokenType == "return":
+        staticContext.hasEmptyReturn = True
+
+    # Disallow return v; in generator.
+    if staticContext.hasReturnWithValue and staticContext.isGenerator:
+        raise SyntaxError("Generator returns a value", tokenizer)
+
+    if tokenType == "return":
+        builder.RETURN_finish(node)
+    else:
+        builder.YIELD_finish(node)
+
+    return node
+
+
+
+def FunctionDefinition(tokenizer, staticContext, requireName, functionForm):
+    builder = staticContext.builder
+    functionNode = builder.FUNCTION_build(tokenizer)
+    
+    if tokenizer.match("identifier"):
+        builder.FUNCTION_setName(functionNode, tokenizer.token.value)
+    elif requireName:
+        raise SyntaxError("Missing def identifier", tokenizer)
+
+    tokenizer.mustMatch("left_paren")
+    
+    if not tokenizer.match("right_paren"):
+        builder.FUNCTION_initParams(functionNode, tokenizer)
+        prevParamNode = None
+        while True:
+            tokenType = tokenizer.get()
+            if tokenType == "left_bracket" or tokenType == "left_curly":
+                # Destructured formal parameters.
+                tokenizer.unget()
+                paramNode = DestructuringExpression(tokenizer, staticContext)
+                
+            elif tokenType == "identifier":
+                paramNode = builder.FUNCTION_wrapParam(tokenizer)
+                
+            else:
+                raise SyntaxError("Missing formal parameter", tokenizer)
+                
+            builder.FUNCTION_addParam(functionNode, tokenizer, paramNode)
+            builder.COMMENTS_add(paramNode, prevParamNode, tokenizer.getComments())
+        
+            if not tokenizer.match("comma"):
+                break
+                
+            prevParamNode = paramNode
+        
+        tokenizer.mustMatch("right_paren")
+
+    # Do we have an expression closure or a normal body?
+    tokenType = tokenizer.get()
+    if tokenType != "left_curly":
+        builder.FUNCTION_setExpressionClosure(functionNode, True)
+        tokenizer.unget()
+
+    childContext = StaticContext(True, builder)
+    tokenizer.save()
+    
+    if staticContext.inFunction:
+        # Inner functions don't reset block numbering, only functions at
+        # the top level of the program do.
+        childContext.blockId = staticContext.blockId
+
+    if tokenType != "left_curly":
+        builder.FUNCTION_setBody(functionNode, AssignExpression(tokenizer, staticContext))
+        if staticContext.isGenerator:
+            raise SyntaxError("Generator returns a value", tokenizer)
+            
+    else:
+        builder.FUNCTION_hoistVars(childContext.blockId)
+        builder.FUNCTION_setBody(functionNode, Script(tokenizer, childContext))
+
+    # 
+    # Hoisting makes parse-time binding analysis tricky. A taxonomy of hoists:
+    # 
+    # 1. vars hoist to the top of their function:
+    # 
+    #    var x = 'global';
+    #    function f() {
+    #      x = 'f';
+    #      if (false)
+    #        var x;
+    #    }
+    #    f();
+    #    print(x); // "global"
+    # 
+    # 2. lets hoist to the top of their block:
+    # 
+    #    function f() { // id: 0
+    #      var x = 'f';
+    #      {
+    #        {
+    #          print(x); // "undefined"
+    #        }
+    #        let x;
+    #      }
+    #    }
+    #    f();
+    # 
+    # 3. inner functions at function top-level hoist to the beginning
+    #    of the function.
+    # 
+    # If the builder used is doing parse-time analyses, hoisting may
+    # invalidate earlier conclusions it makes about variable scope.
+    # 
+    # The builder can opt to set the needsHoisting flag in a
+    # CompilerContext (in the case of var and function hoisting) or in a
+    # node of type BLOCK (in the case of let hoisting). This signals for
+    # the parser to reparse sections of code.
+    # 
+    # To avoid exponential blowup, if a function at the program top-level
+    # has any hoists in its child blocks or inner functions, we reparse
+    # the entire toplevel function. Each toplevel function is parsed at
+    # most twice.
+    # 
+    # The list of declarations can be tied to block ids to aid talking
+    # about declarations of blocks that have not yet been fully parsed.
+    # 
+    # Blocks are already uniquely numbered; see the comment in
+    # Statements.
+    # 
+    
+    #
+    # wpbasti: 
+    # Don't have the feeling that I need this functionality because the
+    # tree is often modified before the variables and names inside are 
+    # of any interest. So better doing this in a post-scan.
+    #
+    
+    #
+    # if childContext.needsHoisting:
+    #     # Order is important here! Builders expect functions to come after variables!
+    #     builder.setHoists(functionNode.body.id, childContext.variables.concat(childContext.functions))
+    # 
+    #     if staticContext.inFunction:
+    #         # If an inner function needs hoisting, we need to propagate
+    #         # this flag up to the parent function.
+    #         staticContext.needsHoisting = True
+    #     
+    #     else:
+    #         # Only re-parse functions at the top level of the program.
+    #         childContext = StaticContext(True, builder)
+    #         tokenizer.rewind(rp)
+    #         
+    #         # Set a flag in case the builder wants to have different behavior
+    #         # on the second pass.
+    #         builder.secondPass = True
+    #         builder.FUNCTION_hoistVars(functionNode.body.id, True)
+    #         builder.FUNCTION_setBody(functionNode, Script(tokenizer, childContext))
+    #         builder.secondPass = False
+
+    if tokenType == "left_curly":
+        tokenizer.mustMatch("right_curly")
+
+    functionNode.end = tokenizer.token.end
+    functionNode.functionForm = functionForm
+    
+    builder.COMMENTS_add(functionNode.body, functionNode.body, tokenizer.getComments())
+    builder.FUNCTION_finish(functionNode, staticContext)
+    
+    return functionNode
+
+
+
+def Variables(tokenizer, staticContext, letBlock=None):
+    """Parses a comma-separated list of var declarations (and maybe initializations)."""
+    
+    builder = staticContext.builder
+    if tokenizer.token.type == "var":
+        build = builder.VAR_build
+        addDecl = builder.VAR_addDecl
+        finish = builder.VAR_finish
+        childContext = staticContext
+            
+    elif tokenizer.token.type == "const":
+        build = builder.CONST_build
+        addDecl = builder.CONST_addDecl
+        finish = builder.CONST_finish
+        childContext = staticContext
+        
+    elif tokenizer.token.type == "let" or tokenizer.token.type == "left_paren":
+        build = builder.LET_build
+        addDecl = builder.LET_addDecl
+        finish = builder.LET_finish
+        
+        if not letBlock:
+            statementStack = staticContext.statementStack
+            i = len(statementStack) - 1
+            
+            # a BLOCK *must* be found.
+            while statementStack[i].type != "block":
+                i -= 1
+
+            # Lets at the def toplevel are just vars, at least in SpiderMonkey.
+            if i == 0:
+                build = builder.VAR_build
+                addDecl = builder.VAR_addDecl
+                finish = builder.VAR_finish
+                childContext = staticContext
+
+            else:
+                childContext = statementStack[i]
+            
+        else:
+            childContext = letBlock
+
+    node = build(tokenizer)
+    
+    while True:
+        tokenType = tokenizer.get()
+
+        # Done in Python port!
+        # FIXME Should have a special DECLARATION node instead of overloading
+        # IDENTIFIER to mean both identifier declarations and destructured
+        # declarations.
+        childNode = builder.DECL_build(tokenizer)
+        
+        if tokenType == "left_bracket" or tokenType == "left_curly":
+            # Pass in childContext if we need to add each pattern matched into
+            # its variables, else pass in staticContext.
+            # Need to unget to parse the full destructured expression.
+            tokenizer.unget()
+            builder.DECL_setNames(childNode, DestructuringExpression(tokenizer, staticContext, True, childContext))
+            
+            if staticContext.inForLoopInit and tokenizer.peek() == "in":
+                addDecl(node, childNode, childContext)
+                if tokenizer.match("comma"): 
+                    continue
+                else: 
+                    break            
+
+            tokenizer.mustMatch("assign")
+            if tokenizer.token.assignOp:
+                raise SyntaxError("Invalid variable initialization", tokenizer)
+
+            # Parse the init as a normal assignment.
+            builder.DECL_setInitializer(childNode, AssignExpression(tokenizer, staticContext))
+            builder.DECL_finish(childNode)
+            addDecl(node, childNode, childContext)
+            
+            # Copy over names for variable list
+            # for nameNode in childNode.names:
+            #    childContext.variables.add(nameNode.value)
+                
+            if tokenizer.match("comma"): 
+                continue
+            else: 
+                break            
+
+        if tokenType != "identifier":
+            raise SyntaxError("Missing variable name", tokenizer)
+
+        builder.DECL_setName(childNode, tokenizer.token.value)
+        builder.DECL_setReadOnly(childNode, node.type == "const")
+        addDecl(node, childNode, childContext)
+
+        if tokenizer.match("assign"):
+            if tokenizer.token.assignOp:
+                raise SyntaxError("Invalid variable initialization", tokenizer)
+
+            initializerNode = AssignExpression(tokenizer, staticContext)
+            builder.DECL_setInitializer(childNode, initializerNode)
+
+        builder.DECL_finish(childNode)
+        
+        # If we directly use the node in "let" constructs
+        # if not hasattr(childContext, "variables"):
+        #    childContext.variables = set()
+        
+        # childContext.variables.add(childNode.name)
+        
+        if not tokenizer.match("comma"):
+            break
+        
+    finish(node)
+    return node
+
+
+
+def LetBlock(tokenizer, staticContext, isStatement):
+    """Does not handle let inside of for loop init."""
+    builder = staticContext.builder
+
+    # tokenizer.token.type must be "let"
+    node = builder.LETBLOCK_build(tokenizer)
+    tokenizer.mustMatch("left_paren")
+    builder.LETBLOCK_setVariables(node, Variables(tokenizer, staticContext, node))
+    tokenizer.mustMatch("right_paren")
+
+    if isStatement and tokenizer.peek() != "left_curly":
+        # If this is really an expression in let statement guise, then we
+        # need to wrap the "let_block" node in a "semicolon" node so that we pop
+        # the return value of the expression.
+        childNode = builder.SEMICOLON_build(tokenizer)
+        builder.SEMICOLON_setExpression(childNode, node)
+        builder.SEMICOLON_finish(childNode)
+        isStatement = False
+
+    if isStatement:
+        childNode = Block(tokenizer, staticContext)
+        builder.LETBLOCK_setBlock(node, childNode)
+        
+    else:
+        childNode = AssignExpression(tokenizer, staticContext)
+        builder.LETBLOCK_setExpression(node, childNode)
+
+    builder.LETBLOCK_finish(node)
+    return node
+
+
+def checkDestructuring(tokenizer, staticContext, node, simpleNamesOnly=None, data=None):
+    if node.type == "array_comp":
+        raise SyntaxError("Invalid array comprehension left-hand side", tokenizer)
+        
+    if node.type != "array_init" and node.type != "object_init":
+        return
+
+    builder = staticContext.builder
+
+    for child in node:
+        if child == None:
+            continue
+        
+        if child.type == "property_init":
+            lhs = child[0]
+            rhs = child[1]
+        else:
+            lhs = None
+            rhs = None
+            
+    
+        if rhs and (rhs.type == "array_init" or rhs.type == "object_init"):
+            checkDestructuring(tokenizer, staticContext, rhs, simpleNamesOnly, data)
+            
+        if lhs and simpleNamesOnly:
+            # In declarations, lhs must be simple names
+            if lhs.type != "identifier":
+                raise SyntaxError("Missing name in pattern", tokenizer)
+                
+            elif data:
+                childNode = builder.DECL_build(tokenizer)
+                builder.DECL_setName(childNode, lhs.value)
+
+                # Don't need to set initializer because it's just for
+                # hoisting anyways.
+                builder.DECL_finish(childNode)
+
+                # Each pattern needs to be added to variables.
+                # data.variables.add(childNode.name)
+                
+
+# JavaScript 1.7
+def DestructuringExpression(tokenizer, staticContext, simpleNamesOnly=None, data=None):
+    node = PrimaryExpression(tokenizer, staticContext)
+    checkDestructuring(tokenizer, staticContext, node, simpleNamesOnly, data)
+
+    return node
+
+
+# JavsScript 1.7
+def GeneratorExpression(tokenizer, staticContext, expression):
+    builder = staticContext.builder
+    node = builder.GENERATOR_build(tokenizer)
+
+    builder.GENERATOR_setExpression(node, expression)
+    builder.GENERATOR_setTail(node, comprehensionTail(tokenizer, staticContext))
+    builder.GENERATOR_finish(node)
+    
+    return node
+
+
+# JavaScript 1.7 Comprehensions Tails (Generators / Arrays)
+def comprehensionTail(tokenizer, staticContext):
+    builder = staticContext.builder
+    
+    # tokenizer.token.type must be "for"
+    body = builder.COMPTAIL_build(tokenizer)
+    
+    while True:
+        node = builder.FOR_build(tokenizer)
+        
+        # Comprehension tails are always for..in loops.
+        builder.FOR_rebuildForIn(node)
+        if tokenizer.match("identifier"):
+            # But sometimes they're for each..in.
+            if tokenizer.token.value == "each":
+                builder.FOR_rebuildForEach(node)
+            else:
+                tokenizer.unget()
+
+        tokenizer.mustMatch("left_paren")
+        
+        tokenType = tokenizer.get()
+        if tokenType == "left_bracket" or tokenType == "left_curly":
+            tokenizer.unget()
+            # Destructured left side of for in comprehension tails.
+            builder.FOR_setIterator(node, DestructuringExpression(tokenizer, staticContext))
+
+        elif tokenType == "identifier":
+            # Removed variable/declaration substructure in Python port.
+            # Variable declarations are not allowed here. So why process them in such a way?
+            
+            # declaration = builder.DECL_build(tokenizer)
+            # builder.DECL_setName(declaration, tokenizer.token.value)
+            # builder.DECL_finish(declaration)
+            # childNode = builder.VAR_build(tokenizer)
+            # builder.VAR_addDecl(childNode, declaration)
+            # builder.VAR_finish(childNode)
+            # builder.FOR_setIterator(node, declaration)
+
+            # Don't add to variables since the semantics of comprehensions is
+            # such that the variables are in their own def when desugared.
+            
+            identifier = builder.PRIMARY_build(tokenizer, "identifier")
+            builder.FOR_setIterator(node, identifier)
+
+        else:
+            raise SyntaxError("Missing identifier", tokenizer)
+        
+        tokenizer.mustMatch("in")
+        builder.FOR_setObject(node, Expression(tokenizer, staticContext))
+        tokenizer.mustMatch("right_paren")
+        builder.COMPTAIL_addFor(body, node)
+        
+        if not tokenizer.match("for"):
+            break
+
+    # Optional guard.
+    if tokenizer.match("if"):
+        builder.COMPTAIL_setGuard(body, ParenExpression(tokenizer, staticContext))
+
+    builder.COMPTAIL_finish(body)
+
+    return body
+
+
+def ParenExpression(tokenizer, staticContext):
+    tokenizer.mustMatch("left_paren")
+
+    # Always accept the 'in' operator in a parenthesized expression,
+    # where it's unambiguous, even if we might be parsing the init of a
+    # for statement.
+    oldLoopInit = staticContext.inForLoopInit
+    staticContext.inForLoopInit = False
+    node = Expression(tokenizer, staticContext)
+    staticContext.inForLoopInit = oldLoopInit
+
+    err = "expression must be parenthesized"
+    if tokenizer.match("for"):
+        if node.type == "yield" and not node.parenthesized:
+            raise SyntaxError("Yield " + err, tokenizer)
+            
+        if node.type == "comma" and not node.parenthesized:
+            raise SyntaxError("Generator " + err, tokenizer)
+            
+        node = GeneratorExpression(tokenizer, staticContext, node)
+
+    tokenizer.mustMatch("right_paren")
+
+    return node
+
+
+def Expression(tokenizer, staticContext):
+    """Top-down expression parser matched against SpiderMonkey."""
+    builder = staticContext.builder
+    node = AssignExpression(tokenizer, staticContext)
+
+    if tokenizer.match("comma"):
+        childNode = builder.COMMA_build(tokenizer)
+        builder.COMMA_addOperand(childNode, node)
+        node = childNode
+        while True:
+            childNode = node[len(node)-1]
+            if childNode.type == "yield" and not childNode.parenthesized:
+                raise SyntaxError("Yield expression must be parenthesized", tokenizer)
+            builder.COMMA_addOperand(node, AssignExpression(tokenizer, staticContext))
+            
+            if not tokenizer.match("comma"):
+                break
+                
+        builder.COMMA_finish(node)
+
+    return node
+
+
+def AssignExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+
+    # Have to treat yield like an operand because it could be the leftmost
+    # operand of the expression.
+    if tokenizer.match("yield", True):
+        return returnOrYield(tokenizer, staticContext)
+
+    comments = tokenizer.getComments()
+    node = builder.ASSIGN_build(tokenizer)
+    lhs = ConditionalExpression(tokenizer, staticContext)
+    builder.COMMENTS_add(lhs, None, comments)
+
+    if not tokenizer.match("assign"):
+        builder.ASSIGN_finish(node)
+        return lhs
+
+    if lhs.type == "object_init" or lhs.type == "array_init":
+        checkDestructuring(tokenizer, staticContext, lhs)
+    elif lhs.type == "identifier" or lhs.type == "dot" or lhs.type == "index" or lhs.type == "call":
+        pass
+    else:
+        raise SyntaxError("Bad left-hand side of assignment", tokenizer)
+        
+    builder.ASSIGN_setAssignOp(node, tokenizer.token.assignOp)
+    builder.ASSIGN_addOperand(node, lhs)
+    builder.ASSIGN_addOperand(node, AssignExpression(tokenizer, staticContext))
+    builder.ASSIGN_finish(node)
+
+    return node
+
+
+def ConditionalExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = OrExpression(tokenizer, staticContext)
+
+    if tokenizer.match("hook"):
+        childNode = node
+        node = builder.HOOK_build(tokenizer)
+        builder.HOOK_setCondition(node, childNode)
+
+        # Always accept the 'in' operator in the middle clause of a ternary,
+        # where it's unambiguous, even if we might be parsing the init of a
+        # for statement.
+        oldLoopInit = staticContext.inForLoopInit
+        staticContext.inForLoopInit = False
+        builder.HOOK_setThenPart(node, AssignExpression(tokenizer, staticContext))
+        staticContext.inForLoopInit = oldLoopInit
+        
+        if not tokenizer.match("colon"):
+            raise SyntaxError("Missing : after ?", tokenizer)
+            
+        builder.HOOK_setElsePart(node, AssignExpression(tokenizer, staticContext))
+        builder.HOOK_finish(node)
+
+    return node
+    
+
+def OrExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = AndExpression(tokenizer, staticContext)
+    
+    while tokenizer.match("or"):
+        childNode = builder.OR_build(tokenizer)
+        builder.OR_addOperand(childNode, node)
+        builder.OR_addOperand(childNode, AndExpression(tokenizer, staticContext))
+        builder.OR_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def AndExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = BitwiseOrExpression(tokenizer, staticContext)
+
+    while tokenizer.match("and"):
+        childNode = builder.AND_build(tokenizer)
+        builder.AND_addOperand(childNode, node)
+        builder.AND_addOperand(childNode, BitwiseOrExpression(tokenizer, staticContext))
+        builder.AND_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def BitwiseOrExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = BitwiseXorExpression(tokenizer, staticContext)
+    
+    while tokenizer.match("bitwise_or"):
+        childNode = builder.BITWISEOR_build(tokenizer)
+        builder.BITWISEOR_addOperand(childNode, node)
+        builder.BITWISEOR_addOperand(childNode, BitwiseXorExpression(tokenizer, staticContext))
+        builder.BITWISEOR_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def BitwiseXorExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = BitwiseAndExpression(tokenizer, staticContext)
+    
+    while tokenizer.match("bitwise_xor"):
+        childNode = builder.BITWISEXOR_build(tokenizer)
+        builder.BITWISEXOR_addOperand(childNode, node)
+        builder.BITWISEXOR_addOperand(childNode, BitwiseAndExpression(tokenizer, staticContext))
+        builder.BITWISEXOR_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def BitwiseAndExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = EqualityExpression(tokenizer, staticContext)
+
+    while tokenizer.match("bitwise_and"):
+        childNode = builder.BITWISEAND_build(tokenizer)
+        builder.BITWISEAND_addOperand(childNode, node)
+        builder.BITWISEAND_addOperand(childNode, EqualityExpression(tokenizer, staticContext))
+        builder.BITWISEAND_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def EqualityExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = RelationalExpression(tokenizer, staticContext)
+    
+    while tokenizer.match("eq") or tokenizer.match("ne") or tokenizer.match("strict_eq") or tokenizer.match("strict_ne"):
+        childNode = builder.EQUALITY_build(tokenizer)
+        builder.EQUALITY_addOperand(childNode, node)
+        builder.EQUALITY_addOperand(childNode, RelationalExpression(tokenizer, staticContext))
+        builder.EQUALITY_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def RelationalExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    oldLoopInit = staticContext.inForLoopInit
+
+    # Uses of the in operator in shiftExprs are always unambiguous,
+    # so unset the flag that prohibits recognizing it.
+    staticContext.inForLoopInit = False
+    node = ShiftExpression(tokenizer, staticContext)
+
+    while tokenizer.match("lt") or tokenizer.match("le") or tokenizer.match("ge") or tokenizer.match("gt") or (oldLoopInit == False and tokenizer.match("in")) or tokenizer.match("instanceof"):
+        childNode = builder.RELATIONAL_build(tokenizer)
+        builder.RELATIONAL_addOperand(childNode, node)
+        builder.RELATIONAL_addOperand(childNode, ShiftExpression(tokenizer, staticContext))
+        builder.RELATIONAL_finish(childNode)
+        node = childNode
+    
+    staticContext.inForLoopInit = oldLoopInit
+
+    return node
+
+
+def ShiftExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = AddExpression(tokenizer, staticContext)
+    
+    while tokenizer.match("lsh") or tokenizer.match("rsh") or tokenizer.match("ursh"):
+        childNode = builder.SHIFT_build(tokenizer)
+        builder.SHIFT_addOperand(childNode, node)
+        builder.SHIFT_addOperand(childNode, AddExpression(tokenizer, staticContext))
+        builder.SHIFT_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def AddExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = MultiplyExpression(tokenizer, staticContext)
+    
+    while tokenizer.match("plus") or tokenizer.match("minus"):
+        childNode = builder.ADD_build(tokenizer)
+        builder.ADD_addOperand(childNode, node)
+        builder.ADD_addOperand(childNode, MultiplyExpression(tokenizer, staticContext))
+        builder.ADD_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def MultiplyExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = UnaryExpression(tokenizer, staticContext)
+    
+    while tokenizer.match("mul") or tokenizer.match("div") or tokenizer.match("mod"):
+        childNode = builder.MULTIPLY_build(tokenizer)
+        builder.MULTIPLY_addOperand(childNode, node)
+        builder.MULTIPLY_addOperand(childNode, UnaryExpression(tokenizer, staticContext))
+        builder.MULTIPLY_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def UnaryExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    tokenType = tokenizer.get(True)
+
+    if tokenType in ["delete", "void", "typeof", "not", "bitwise_not", "plus", "minus"]:
+        node = builder.UNARY_build(tokenizer)
+        builder.UNARY_addOperand(node, UnaryExpression(tokenizer, staticContext))
+    
+    elif tokenType == "increment" or tokenType == "decrement":
+        # Prefix increment/decrement.
+        node = builder.UNARY_build(tokenizer)
+        builder.UNARY_addOperand(node, MemberExpression(tokenizer, staticContext, True))
+
+    else:
+        tokenizer.unget()
+        node = MemberExpression(tokenizer, staticContext, True)
+
+        # Don't look across a newline boundary for a postfix {in,de}crement.
+        if tokenizer.tokens[(tokenizer.tokenIndex + tokenizer.lookahead - 1) & 3].line == tokenizer.line:
+            if tokenizer.match("increment") or tokenizer.match("decrement"):
+                childNode = builder.UNARY_build(tokenizer)
+                builder.UNARY_setPostfix(childNode)
+                builder.UNARY_finish(node)
+                builder.UNARY_addOperand(childNode, node)
+                node = childNode
+
+    builder.UNARY_finish(node)
+    return node
+
+
+def MemberExpression(tokenizer, staticContext, allowCallSyntax):
+    builder = staticContext.builder
+
+    if tokenizer.match("new"):
+        node = builder.MEMBER_build(tokenizer)
+        builder.MEMBER_addOperand(node, MemberExpression(tokenizer, staticContext, False))
+        
+        if tokenizer.match("left_paren"):
+            builder.MEMBER_rebuildNewWithArgs(node)
+            builder.MEMBER_addOperand(node, ArgumentList(tokenizer, staticContext))
+        
+        builder.MEMBER_finish(node)
+    
+    else:
+        node = PrimaryExpression(tokenizer, staticContext)
+
+    while True:
+        tokenType = tokenizer.get()
+        if tokenType == "end":
+            break
+        
+        if tokenType == "dot":
+            childNode = builder.MEMBER_build(tokenizer)
+            builder.MEMBER_addOperand(childNode, node)
+            tokenizer.mustMatch("identifier")
+            builder.MEMBER_addOperand(childNode, builder.MEMBER_build(tokenizer))
+
+        elif tokenType == "left_bracket":
+            childNode = builder.MEMBER_build(tokenizer, "index")
+            builder.MEMBER_addOperand(childNode, node)
+            builder.MEMBER_addOperand(childNode, Expression(tokenizer, staticContext))
+            tokenizer.mustMatch("right_bracket")
+
+        elif tokenType == "left_paren" and allowCallSyntax:
+            childNode = builder.MEMBER_build(tokenizer, "call")
+            builder.MEMBER_addOperand(childNode, node)
+            builder.MEMBER_addOperand(childNode, ArgumentList(tokenizer, staticContext))
+
+        else:
+            tokenizer.unget()
+            return node
+
+        builder.MEMBER_finish(childNode)
+        node = childNode
+
+    return node
+
+
+def ArgumentList(tokenizer, staticContext):
+    builder = staticContext.builder
+    node = builder.LIST_build(tokenizer)
+    
+    if tokenizer.match("right_paren", True):
+        return node
+    
+    while True:    
+        childNode = AssignExpression(tokenizer, staticContext)
+        if childNode.type == "yield" and not childNode.parenthesized and tokenizer.peek() == "comma":
+            raise SyntaxError("Yield expression must be parenthesized", tokenizer)
+            
+        if tokenizer.match("for"):
+            childNode = GeneratorExpression(tokenizer, staticContext, childNode)
+            if len(node) > 1 or tokenizer.peek(True) == "comma":
+                raise SyntaxError("Generator expression must be parenthesized", tokenizer)
+        
+        builder.LIST_addOperand(node, childNode)
+        if not tokenizer.match("comma"):
+            break
+
+    tokenizer.mustMatch("right_paren")
+    builder.LIST_finish(node)
+
+    return node
+
+
+def PrimaryExpression(tokenizer, staticContext):
+    builder = staticContext.builder
+    tokenType = tokenizer.get(True)
+
+    if tokenType == "function":
+        node = FunctionDefinition(tokenizer, staticContext, False, "expressed_form")
+
+    elif tokenType == "left_bracket":
+        node = builder.ARRAYINIT_build(tokenizer)
+        while True:
+            tokenType = tokenizer.peek(True)
+            if tokenType == "right_bracket":
+                break
+        
+            if tokenType == "comma":
+                tokenizer.get()
+                builder.ARRAYINIT_addElement(node, None)
+                continue
+
+            builder.ARRAYINIT_addElement(node, AssignExpression(tokenizer, staticContext))
+
+            if tokenType != "comma" and not tokenizer.match("comma"):
+                break
+
+        # If we matched exactly one element and got a "for", we have an
+        # array comprehension.
+        if len(node) == 1 and tokenizer.match("for"):
+            childNode = builder.ARRAYCOMP_build(tokenizer)
+            builder.ARRAYCOMP_setExpression(childNode, node[0])
+            builder.ARRAYCOMP_setTail(childNode, comprehensionTail(tokenizer, staticContext))
+            node = childNode
+        
+        builder.COMMENTS_add(node, node, tokenizer.getComments())
+        tokenizer.mustMatch("right_bracket")
+        builder.PRIMARY_finish(node)
+
+    elif tokenType == "left_curly":
+        node = builder.OBJECTINIT_build(tokenizer)
+
+        if not tokenizer.match("right_curly"):
+            while True:
+                tokenType = tokenizer.get()
+                tokenValue = getattr(tokenizer.token, "value", None)
+                comments = tokenizer.getComments()
+                
+                if tokenValue in ("get", "set") and tokenizer.peek() == "identifier":
+                    if staticContext.ecma3OnlyMode:
+                        raise SyntaxError("Illegal property accessor", tokenizer)
+                        
+                    fd = FunctionDefinition(tokenizer, staticContext, True, "expressed_form")
+                    builder.OBJECTINIT_addProperty(node, fd)
+                    
+                else:
+                    if tokenType == "identifier" or tokenType == "number" or tokenType == "string":
+                        id = builder.PRIMARY_build(tokenizer, "identifier")
+                        builder.PRIMARY_finish(id)
+                        
+                    elif tokenType == "right_curly":
+                        if staticContext.ecma3OnlyMode:
+                            raise SyntaxError("Illegal trailing ,", tokenizer)
+                            
+                        tokenizer.unget()
+                        break
+                            
+                    else:
+                        if tokenValue in jasy.js.tokenize.Lang.keywords:
+                            id = builder.PRIMARY_build(tokenizer, "identifier")
+                            builder.PRIMARY_finish(id)
+                        else:
+                            print("Value is '%s'" % tokenValue)
+                            raise SyntaxError("Invalid property name", tokenizer)
+                    
+                    if tokenizer.match("colon"):
+                        childNode = builder.PROPERTYINIT_build(tokenizer)
+                        builder.COMMENTS_add(childNode, node, comments)
+                        builder.PROPERTYINIT_addOperand(childNode, id)
+                        builder.PROPERTYINIT_addOperand(childNode, AssignExpression(tokenizer, staticContext))
+                        builder.PROPERTYINIT_finish(childNode)
+                        builder.OBJECTINIT_addProperty(node, childNode)
+                        
+                    else:
+                        # Support, e.g., |var {staticContext, y} = o| as destructuring shorthand
+                        # for |var {staticContext: staticContext, y: y} = o|, per proposed JS2/ES4 for JS1.8.
+                        if tokenizer.peek() != "comma" and tokenizer.peek() != "right_curly":
+                            raise SyntaxError("Missing : after property", tokenizer)
+                        builder.OBJECTINIT_addProperty(node, id)
+                    
+                if not tokenizer.match("comma"):
+                    break
+
+            builder.COMMENTS_add(node, node, tokenizer.getComments())
+            tokenizer.mustMatch("right_curly")
+
+        builder.OBJECTINIT_finish(node)
+
+    elif tokenType == "left_paren":
+        # ParenExpression does its own matching on parentheses, so we need to unget.
+        tokenizer.unget()
+        node = ParenExpression(tokenizer, staticContext)
+        node.parenthesized = True
+
+    elif tokenType == "let":
+        node = LetBlock(tokenizer, staticContext, False)
+
+    elif tokenType in ["null", "this", "true", "false", "identifier", "number", "string", "regexp"]:
+        node = builder.PRIMARY_build(tokenizer, tokenType)
+        builder.PRIMARY_finish(node)
+
+    else:
+        raise SyntaxError("Missing operand. Found type: %s" % tokenType, tokenizer)
+
+    return node
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/js/parse/VanillaBuilder.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,678 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+#
+
+#
+# License: MPL 1.1/GPL 2.0/LGPL 2.1
+# Authors: 
+#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
+#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010)
+#
+
+import jasy.js.parse.Node
+
+class VanillaBuilder:
+    """The vanilla AST builder."""
+    
+    def COMMENTS_add(self, currNode, prevNode, comments):
+        if not comments:
+            return
+            
+        currComments = []
+        prevComments = []
+        for comment in comments:
+            # post comments - for previous node
+            if comment.context == "inline":
+                prevComments.append(comment)
+                
+            # all other comment styles are attached to the current one
+            else:
+                currComments.append(comment)
+        
+        # Merge with previously added ones
+        if hasattr(currNode, "comments"):
+            currNode.comments.extend(currComments)
+        else:
+            currNode.comments = currComments
+        
+        if prevNode:
+            if hasattr(prevNode, "comments"):
+                prevNode.comments.extend(prevComments)
+            else:
+                prevNode.comments = prevComments
+        else:
+            # Don't loose the comment in tree (if not previous node is there, attach it
+            # to this node)
+            currNode.comments.extend(prevComments)
+    
+    def IF_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "if")
+
+    def IF_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def IF_setThenPart(self, node, statement):
+        node.append(statement, "thenPart")
+
+    def IF_setElsePart(self, node, statement):
+        node.append(statement, "elsePart")
+
+    def IF_finish(self, node):
+        pass
+
+    def SWITCH_build(self, tokenizer):
+        node = jasy.js.parse.Node.Node(tokenizer, "switch")
+        node.defaultIndex = -1
+        return node
+
+    def SWITCH_setDiscriminant(self, node, expression):
+        node.append(expression, "discriminant")
+
+    def SWITCH_setDefaultIndex(self, node, index):
+        node.defaultIndex = index
+
+    def SWITCH_addCase(self, node, childNode):
+        node.append(childNode)
+
+    def SWITCH_finish(self, node):
+        pass
+
+    def CASE_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "case")
+
+    def CASE_setLabel(self, node, expression):
+        node.append(expression, "label")
+
+    def CASE_initializeStatements(self, node, tokenizer):
+        node.append(jasy.js.parse.Node.Node(tokenizer, "block"), "statements")
+
+    def CASE_addStatement(self, node, statement):
+        node.statements.append(statement)
+
+    def CASE_finish(self, node):
+        pass
+
+    def DEFAULT_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "default")
+
+    def DEFAULT_initializeStatements(self, node, tokenizer):
+        node.append(jasy.js.parse.Node.Node(tokenizer, "block"), "statements")
+
+    def DEFAULT_addStatement(self, node, statement):
+        node.statements.append(statement)
+
+    def DEFAULT_finish(self, node):
+        pass
+
+    def FOR_build(self, tokenizer):
+        node = jasy.js.parse.Node.Node(tokenizer, "for")
+        node.isLoop = True
+        node.isEach = False
+        return node
+
+    def FOR_rebuildForEach(self, node):
+        node.isEach = True
+
+    # NB: This function is called after rebuildForEach, if that'statement called at all.
+    def FOR_rebuildForIn(self, node):
+        node.type = "for_in"
+
+    def FOR_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def FOR_setSetup(self, node, expression):
+        node.append(expression, "setup")
+
+    def FOR_setUpdate(self, node, expression):
+        node.append(expression, "update")
+
+    def FOR_setObject(self, node, expression, forBlock=None):
+        # wpbasti: not sure what forBlock stands for but it is used in the parser
+        # JS tolerates the optinal unused parameter, but not so Python.
+        node.append(expression, "object")
+
+    def FOR_setIterator(self, node, expression, forBlock=None):
+        # wpbasti: not sure what forBlock stands for but it is used in the parser
+        # JS tolerates the optinal unused parameter, but not so Python.
+        node.append(expression, "iterator")
+
+    def FOR_setBody(self, node, statement):
+        node.append(statement, "body")
+
+    def FOR_finish(self, node):
+        pass
+
+    def WHILE_build(self, tokenizer):
+        node = jasy.js.parse.Node.Node(tokenizer, "while")
+        node.isLoop = True
+        return node
+
+    def WHILE_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def WHILE_setBody(self, node, statement):
+        node.append(statement, "body")
+
+    def WHILE_finish(self, node):
+        pass
+
+    def DO_build(self, tokenizer):
+        node = jasy.js.parse.Node.Node(tokenizer, "do")
+        node.isLoop = True
+        return node
+
+    def DO_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def DO_setBody(self, node, statement):
+        node.append(statement, "body")
+
+    def DO_finish(self, node):
+        pass
+
+    def BREAK_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "break")
+
+    def BREAK_setLabel(self, node, label):
+        node.label = label
+
+    def BREAK_setTarget(self, node, target):
+        # Hint, no append() - relation, but not a child
+        node.target = target
+
+    def BREAK_finish(self, node):
+        pass
+
+    def CONTINUE_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "continue")
+
+    def CONTINUE_setLabel(self, node, label):
+        node.label = label
+
+    def CONTINUE_setTarget(self, node, target):
+        # Hint, no append() - relation, but not a child
+        node.target = target
+
+    def CONTINUE_finish(self, node):
+        pass
+
+    def TRY_build(self, tokenizer):
+        node = jasy.js.parse.Node.Node(tokenizer, "try")
+        return node
+
+    def TRY_setTryBlock(self, node, statement):
+        node.append(statement, "tryBlock")
+
+    def TRY_addCatch(self, node, childNode):
+        node.append(childNode)
+
+    def TRY_finishCatches(self, node):
+        pass
+
+    def TRY_setFinallyBlock(self, node, statement):
+        node.append(statement, "finallyBlock")
+
+    def TRY_finish(self, node):
+        pass
+
+    def CATCH_build(self, tokenizer):
+        node = jasy.js.parse.Node.Node(tokenizer, "catch")
+        return node
+        
+    def CATCH_wrapException(self, tokenizer):
+        node = jasy.js.parse.Node.Node(tokenizer, "exception")
+        node.value = tokenizer.token.value
+        return node
+
+    def CATCH_setException(self, node, exception):
+        node.append(exception, "exception")
+
+    def CATCH_setGuard(self, node, expression):
+        node.append(expression, "guard")
+
+    def CATCH_setBlock(self, node, statement):
+        node.append(statement, "block")
+
+    def CATCH_finish(self, node):
+        pass
+
+    def THROW_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "throw")
+
+    def THROW_setException(self, node, expression):
+        node.append(expression, "exception")
+
+    def THROW_finish(self, node):
+        pass
+
+    def RETURN_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "return")
+
+    def RETURN_setValue(self, node, expression):
+        node.append(expression, "value")
+
+    def RETURN_finish(self, node):
+        pass
+
+    def YIELD_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "yield")
+
+    def YIELD_setValue(self, node, expression):
+        node.append(expression, "value")
+
+    def YIELD_finish(self, node):
+        pass
+
+    def GENERATOR_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "generator")
+
+    def GENERATOR_setExpression(self, node, expression):
+        node.append(expression, "expression")
+
+    def GENERATOR_setTail(self, node, childNode):
+        node.append(childNode, "tail")
+
+    def GENERATOR_finish(self, node):
+        pass
+
+    def WITH_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "with")
+
+    def WITH_setObject(self, node, expression):
+        node.append(expression, "object")
+
+    def WITH_setBody(self, node, statement):
+        node.append(statement, "body")
+
+    def WITH_finish(self, node):
+        pass
+
+    def DEBUGGER_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "debugger")
+
+    def SEMICOLON_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "semicolon")
+
+    def SEMICOLON_setExpression(self, node, expression):
+        node.append(expression, "expression")
+
+    def SEMICOLON_finish(self, node):
+        pass
+
+    def LABEL_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "label")
+
+    def LABEL_setLabel(self, node, label):
+        node.label = label
+
+    def LABEL_setStatement(self, node, statement):
+        node.append(statement, "statement")
+
+    def LABEL_finish(self, node):
+        pass
+
+    def FUNCTION_build(self, tokenizer):
+        node = jasy.js.parse.Node.Node(tokenizer)
+        if node.type != "function":
+            if tokenizer.token.value == "get":
+                node.type = "getter"
+            else:
+                node.type = "setter"
+                
+        return node
+
+    def FUNCTION_setName(self, node, identifier):
+        node.name = identifier
+
+    def FUNCTION_initParams(self, node, tokenizer):
+        node.append(jasy.js.parse.Node.Node(tokenizer, "list"), "params")
+        
+    def FUNCTION_wrapParam(self, tokenizer):
+        param = jasy.js.parse.Node.Node(tokenizer)
+        param.value = tokenizer.token.value
+        return param
+        
+    def FUNCTION_addParam(self, node, tokenizer, expression):
+        node.params.append(expression)
+        
+    def FUNCTION_setExpressionClosure(self, node, expressionClosure):
+        node.expressionClosure = expressionClosure
+
+    def FUNCTION_setBody(self, node, statement):
+        # copy over function parameters to function body
+        #params = getattr(node, "params", None)
+        #if params:
+        #    statement.params = [param.value for param in params]
+            
+        node.append(statement, "body")
+
+    def FUNCTION_hoistVars(self, x):
+        pass
+
+    def FUNCTION_finish(self, node, x):
+        pass
+
+    def VAR_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "var")
+
+    def VAR_addDecl(self, node, childNode, childContext=None):
+        node.append(childNode)
+
+    def VAR_finish(self, node):
+        pass
+
+    def CONST_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "const")
+
+    def CONST_addDecl(self, node, childNode, childContext=None):
+        node.append(childNode)
+
+    def CONST_finish(self, node):
+        pass
+
+    def LET_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "let")
+
+    def LET_addDecl(self, node, childNode, childContext=None):
+        node.append(childNode)
+
+    def LET_finish(self, node):
+        pass
+
+    def DECL_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "declaration")
+
+    def DECL_setNames(self, node, expression):
+        node.append(expression, "names")
+
+    def DECL_setName(self, node, identifier):
+        node.name = identifier
+
+    def DECL_setInitializer(self, node, expression):
+        node.append(expression, "initializer")
+
+    def DECL_setReadOnly(self, node, readOnly):
+        node.readOnly = readOnly
+
+    def DECL_finish(self, node):
+        pass
+
+    def LETBLOCK_build(self, tokenizer):
+        node = jasy.js.parse.Node.Node(tokenizer, "let_block")
+        return node
+
+    def LETBLOCK_setVariables(self, node, childNode):
+        node.append(childNode, "variables")
+
+    def LETBLOCK_setExpression(self, node, expression):
+        node.append(expression, "expression")
+
+    def LETBLOCK_setBlock(self, node, statement):
+        node.append(statement, "block")
+
+    def LETBLOCK_finish(self, node):
+        pass
+
+    def BLOCK_build(self, tokenizer, id):
+        node = jasy.js.parse.Node.Node(tokenizer, "block")
+        # node.id = id
+        return node
+
+    def BLOCK_hoistLets(self, node):
+        pass
+
+    def BLOCK_addStatement(self, node, childNode):
+        node.append(childNode)
+
+    def BLOCK_finish(self, node):
+        pass
+
+    def EXPRESSION_build(self, tokenizer, tokenType):
+        return jasy.js.parse.Node.Node(tokenizer, tokenType)
+
+    def EXPRESSION_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def EXPRESSION_finish(self, node):
+        pass
+
+    def ASSIGN_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "assign")
+
+    def ASSIGN_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def ASSIGN_setAssignOp(self, node, operator):
+        node.assignOp = operator
+
+    def ASSIGN_finish(self, node):
+        pass
+
+    def HOOK_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "hook")
+
+    def HOOK_setCondition(self, node, expression):
+        node.append(expression, "condition")
+
+    def HOOK_setThenPart(self, node, childNode):
+        node.append(childNode, "thenPart")
+
+    def HOOK_setElsePart(self, node, childNode):
+        node.append(childNode, "elsePart")
+
+    def HOOK_finish(self, node):
+        pass
+
+    def OR_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "or")
+
+    def OR_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def OR_finish(self, node):
+        pass
+
+    def AND_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "and")
+
+    def AND_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def AND_finish(self, node):
+        pass
+
+    def BITWISEOR_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "bitwise_or")
+
+    def BITWISEOR_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def BITWISEOR_finish(self, node):
+        pass
+
+    def BITWISEXOR_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "bitwise_xor")
+
+    def BITWISEXOR_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def BITWISEXOR_finish(self, node):
+        pass
+
+    def BITWISEAND_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "bitwise_and")
+
+    def BITWISEAND_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def BITWISEAND_finish(self, node):
+        pass
+
+    def EQUALITY_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "eq", "ne", "strict_eq", or "strict_ne".
+        return jasy.js.parse.Node.Node(tokenizer)
+
+    def EQUALITY_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def EQUALITY_finish(self, node):
+        pass
+
+    def RELATIONAL_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "lt", "le", "ge", or "gt".
+        return jasy.js.parse.Node.Node(tokenizer)
+
+    def RELATIONAL_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def RELATIONAL_finish(self, node):
+        pass
+
+    def SHIFT_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "lsh", "rsh", or "ursh".
+        return jasy.js.parse.Node.Node(tokenizer)
+
+    def SHIFT_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def SHIFT_finish(self, node):
+        pass
+
+    def ADD_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "plus" or "minus".
+        return jasy.js.parse.Node.Node(tokenizer)
+
+    def ADD_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def ADD_finish(self, node):
+        pass
+
+    def MULTIPLY_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "mul", "div", or "mod".
+        return jasy.js.parse.Node.Node(tokenizer)
+
+    def MULTIPLY_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def MULTIPLY_finish(self, node):
+        pass
+
+    def UNARY_build(self, tokenizer):
+        # NB: tokenizer.token.type must be "delete", "void", "typeof", "not",
+        # "bitwise_not", "unary_plus", "unary_minus", "increment", or "decrement".
+        if tokenizer.token.type == "plus":
+            tokenizer.token.type = "unary_plus"
+        elif tokenizer.token.type == "minus":
+            tokenizer.token.type = "unary_minus"
+            
+        return jasy.js.parse.Node.Node(tokenizer)
+
+    def UNARY_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def UNARY_setPostfix(self, node):
+        node.postfix = True
+
+    def UNARY_finish(self, node):
+        pass
+
+    def MEMBER_build(self, tokenizer, tokenType=None):
+        node = jasy.js.parse.Node.Node(tokenizer, tokenType)
+        if node.type == "identifier":
+            node.value = tokenizer.token.value
+        return node
+
+    def MEMBER_rebuildNewWithArgs(self, node):
+        node.type = "new_with_args"
+
+    def MEMBER_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def MEMBER_finish(self, node):
+        pass
+
+    def PRIMARY_build(self, tokenizer, tokenType):
+        # NB: tokenizer.token.type must be "null", "this", "true", "false", "identifier",
+        # "number", "string", or "regexp".
+        node = jasy.js.parse.Node.Node(tokenizer, tokenType)
+        if tokenType in ("identifier", "string", "regexp", "number"):
+            node.value = tokenizer.token.value
+            
+        return node
+
+    def PRIMARY_finish(self, node):
+        pass
+
+    def ARRAYINIT_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "array_init")
+
+    def ARRAYINIT_addElement(self, node, childNode):
+        node.append(childNode)
+
+    def ARRAYINIT_finish(self, node):
+        pass
+
+    def ARRAYCOMP_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "array_comp")
+    
+    def ARRAYCOMP_setExpression(self, node, expression):
+        node.append(expression, "expression")
+    
+    def ARRAYCOMP_setTail(self, node, childNode):
+        node.append(childNode, "tail")
+    
+    def ARRAYCOMP_finish(self, node):
+        pass
+
+    def COMPTAIL_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "comp_tail")
+
+    def COMPTAIL_setGuard(self, node, expression):
+        node.append(expression, "guard")
+
+    def COMPTAIL_addFor(self, node, childNode):
+        node.append(childNode, "for")
+
+    def COMPTAIL_finish(self, node):
+        pass
+
+    def OBJECTINIT_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "object_init")
+
+    def OBJECTINIT_addProperty(self, node, childNode):
+        node.append(childNode)
+
+    def OBJECTINIT_finish(self, node):
+        pass
+
+    def PROPERTYINIT_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "property_init")
+
+    def PROPERTYINIT_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def PROPERTYINIT_finish(self, node):
+        pass
+
+    def COMMA_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "comma")
+
+    def COMMA_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def COMMA_finish(self, node):
+        pass
+
+    def LIST_build(self, tokenizer):
+        return jasy.js.parse.Node.Node(tokenizer, "list")
+
+    def LIST_addOperand(self, node, childNode):
+        node.append(childNode)
+
+    def LIST_finish(self, node):
+        pass
+
+    def setHoists(self, id, vds):
+        pass
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/js/tokenize/Lang.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,22 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+#
+
+"""JavaScript 1.7 keywords"""
+keywords = set([
+    "break",
+    "case", "catch", "const", "continue",
+    "debugger", "default", "delete", "do",
+    "else",
+    "false", "finally", "for", "function",
+    "if", "in", "instanceof",
+    "let",
+    "new", "null",
+    "return",
+    "switch",
+    "this", "throw", "true", "try", "typeof",
+    "var", "void",
+    "yield",
+    "while", "with"
+])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,606 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+#
+
+#
+# License: MPL 1.1/GPL 2.0/LGPL 2.1
+# Authors: 
+#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
+#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010)
+#
+
+import copy
+
+import jasy.js.tokenize.Lang as Lang
+import jasy.js.api.Comment as Comment
+import jasy.core.Console as Console
+
+__all__ = [ "Tokenizer" ]
+
+
+# Operator and punctuator mapping from token to tree node type name.
+# NB: because the lexer doesn't backtrack, all token prefixes must themselves
+# be valid tokens (e.g. !== is acceptable because its prefixes are the valid
+# tokens != and !).
+operatorNames = {
+    '<'   : 'lt', 
+    '>'   : 'gt', 
+    '<='  : 'le', 
+    '>='  : 'ge', 
+    '!='  : 'ne', 
+    '!'   : 'not', 
+    '=='  : 'eq', 
+    '===' : 'strict_eq', 
+    '!==' : 'strict_ne', 
+
+    '>>'  : 'rsh', 
+    '<<'  : 'lsh',
+    '>>>' : 'ursh', 
+     
+    '+'   : 'plus', 
+    '*'   : 'mul', 
+    '-'   : 'minus', 
+    '/'   : 'div', 
+    '%'   : 'mod', 
+
+    ','   : 'comma', 
+    ';'   : 'semicolon', 
+    ':'   : 'colon', 
+    '='   : 'assign', 
+    '?'   : 'hook', 
+
+    '&&'  : 'and', 
+    '||'  : 'or', 
+
+    '++'  : 'increment', 
+    '--'  : 'decrement', 
+
+    ')'   : 'right_paren', 
+    '('   : 'left_paren', 
+    '['   : 'left_bracket', 
+    ']'   : 'right_bracket', 
+    '{'   : 'left_curly', 
+    '}'   : 'right_curly', 
+
+    '&'   : 'bitwise_and', 
+    '^'   : 'bitwise_xor', 
+    '|'   : 'bitwise_or', 
+    '~'   : 'bitwise_not'
+}
+
+
+# Assignment operators
+assignOperators = ["|", "^", "&", "<<", ">>", ">>>", "+", "-", "*", "/", "%"]
+
+
+
+
+#
+# Classes
+#
+
+class Token: 
+    __slots__ = ["type", "start", "line", "assignOp", "end", "value"]
+
+
+class ParseError(Exception):
+    def __init__(self, message, fileId, line):
+        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (message, fileId, line))
+
+
+class Tokenizer(object):
+    def __init__(self, source, fileId="", line=1):
+        # source: JavaScript source
+        # fileId: Filename (for debugging proposes)
+        # line: Line number (for debugging proposes)
+        self.cursor = 0
+        self.source = str(source)
+        self.tokens = {}
+        self.tokenIndex = 0
+        self.lookahead = 0
+        self.scanNewlines = False
+        self.fileId = fileId
+        self.line = line
+        self.comments = []
+
+    input_ = property(lambda self: self.source[self.cursor:])
+    token = property(lambda self: self.tokens.get(self.tokenIndex))
+
+
+    def done(self):
+        # We need to set scanOperand to true here because the first thing
+        # might be a regexp.
+        return self.peek(True) == "end"
+        
+
+    def match(self, tokenType, scanOperand=False):
+        return self.get(scanOperand) == tokenType or self.unget()
+
+
+    def mustMatch(self, tokenType):
+        if not self.match(tokenType):
+            raise ParseError("Missing " + tokenType, self.fileId, self.line)
+            
+        return self.token
+
+
+    def peek(self, scanOperand=False):
+        if self.lookahead:
+            next = self.tokens.get((self.tokenIndex + self.lookahead) & 3)
+            if self.scanNewlines and (getattr(next, "line", None) != getattr(self, "line", None)):
+                tokenType = "newline"
+            else:
+                tokenType = getattr(next, "type", None)
+        else:
+            tokenType = self.get(scanOperand)
+            self.unget()
+            
+        return tokenType
+
+
+    def peekOnSameLine(self, scanOperand=False):
+        self.scanNewlines = True
+        tokenType = self.peek(scanOperand)
+        self.scanNewlines = False
+        return tokenType
+        
+
+    def getComments(self):
+        if self.comments:
+            comments = self.comments
+            self.comments = []
+            return comments
+            
+        return None
+
+
+    def skip(self):
+        """Eats comments and whitespace."""
+        input = self.source
+        startLine = self.line
+
+        # Whether this is the first called as happen on start parsing a file (eat leading comments/white space)
+        startOfFile = self.cursor is 0
+        
+        indent = ""
+        
+        while (True):
+            if len(input) > self.cursor:
+                ch = input[self.cursor]
+            else:
+                return
+                
+            self.cursor += 1
+            
+            if len(input) > self.cursor:
+                next = input[self.cursor]
+            else:
+                next = None
+
+            if ch == "\n" and not self.scanNewlines:
+                self.line += 1
+                indent = ""
+                
+            elif ch == "/" and next == "*":
+                self.cursor += 1
+                text = "/*"
+                commentStartLine = self.line
+                if startLine == self.line and not startOfFile:
+                    mode = "inline"
+                elif (self.line-1) > startLine:
+                    # distance before this comment means it is a comment block for a whole section (multiple lines of code)
+                    mode = "section"
+                else:
+                    # comment for maybe multiple following lines of code, but not that important (no visual white space divider)
+                    mode = "block"
+                    
+                while (True):
+                    try:
+                        ch = input[self.cursor]
+                        self.cursor += 1
+                    except IndexError:
+                        raise ParseError("Unterminated comment", self.fileId, self.line)
+                        
+                    if ch == "*":
+                        next = input[self.cursor]
+                        if next == "/":
+                            text += "*/"
+                            self.cursor += 1
+                            break
+                            
+                    elif ch == "\n":
+                        self.line += 1
+                        
+                    text += ch
+                    
+                
+                # Filter escaping on slash-star combinations in comment text
+                text = text.replace("*\/", "*/")
+                
+                try:
+                    self.comments.append(Comment.Comment(text, mode, commentStartLine, indent, self.fileId))
+                except Comment.CommentException as commentError:
+                    Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
+                    
+                    
+            elif ch == "/" and next == "/":
+                self.cursor += 1
+                text = "//"
+                if startLine == self.line and not startOfFile:
+                    mode = "inline"
+                elif (self.line-1) > startLine:
+                    # distance before this comment means it is a comment block for a whole section (multiple lines of code)
+                    mode = "section"
+                else:
+                    # comment for maybe multiple following lines of code, but not that important (no visual white space divider)
+                    mode = "block"
+                    
+                while (True):
+                    try:
+                        ch = input[self.cursor]
+                        self.cursor += 1
+                    except IndexError:
+                        # end of file etc.
+                        break
+
+                    if ch == "\n":
+                        self.line += 1
+                        break
+                    
+                    text += ch
+                    
+                try:
+                    self.comments.append(Comment.Comment(text, mode, self.line-1, "", self.fileId))
+                except Comment.CommentException:
+                    Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
+
+            # check for whitespace, also for special cases like 0xA0
+            elif ch in "\xA0 \t":
+                indent += ch
+
+            else:
+                self.cursor -= 1
+                return
+
+
+    # Lexes the exponential part of a number, if present. Returns True if an
+    # exponential part was found.
+    def lexExponent(self):
+        input = self.source
+        next = input[self.cursor]
+        if next == "e" or next == "E":
+            self.cursor += 1
+            ch = input[self.cursor]
+            self.cursor += 1
+            if ch == "+" or ch == "-":
+                ch = input[self.cursor]
+                self.cursor += 1
+
+            if ch < "0" or ch > "9":
+                raise ParseError("Missing exponent", self.fileId, self.line)
+
+            while(True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not (ch >= "0" and ch <= "9"):
+                    break
+                
+            self.cursor -= 1
+            return True
+
+        return False
+
+
+    def lexZeroNumber(self, ch):
+        token = self.token
+        input = self.source
+        token.type = "number"
+
+        ch = input[self.cursor]
+        self.cursor += 1
+        if ch == ".":
+            while(True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not (ch >= "0" and ch <= "9"):
+                    break
+                
+            self.cursor -= 1
+            self.lexExponent()
+            token.value = input[token.start:self.cursor]
+            
+        elif ch == "x" or ch == "X":
+            while(True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not ((ch >= "0" and ch <= "9") or (ch >= "a" and ch <= "f") or (ch >= "A" and ch <= "F")):
+                    break
+                    
+            self.cursor -= 1
+            token.value = input[token.start:self.cursor]
+
+        elif ch >= "0" and ch <= "7":
+            while(True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not (ch >= "0" and ch <= "7"):
+                    break
+                    
+            self.cursor -= 1
+            token.value = input[token.start:self.cursor]
+
+        else:
+            self.cursor -= 1
+            self.lexExponent()     # 0E1, &c.
+            token.value = 0
+    
+
+    def lexNumber(self, ch):
+        token = self.token
+        input = self.source
+        token.type = "number"
+
+        floating = False
+        while(True):
+            ch = input[self.cursor]
+            self.cursor += 1
+            
+            if ch == "." and not floating:
+                floating = True
+                ch = input[self.cursor]
+                self.cursor += 1
+                
+            if not (ch >= "0" and ch <= "9"):
+                break
+
+        self.cursor -= 1
+
+        exponent = self.lexExponent()
+        segment = input[token.start:self.cursor]
+        
+        # Protect float or exponent numbers
+        if floating or exponent:
+            token.value = segment
+        else:
+            token.value = int(segment)
+
+
+    def lexDot(self, ch):
+        token = self.token
+        input = self.source
+        next = input[self.cursor]
+        
+        if next >= "0" and next <= "9":
+            while (True):
+                ch = input[self.cursor]
+                self.cursor += 1
+                if not (ch >= "0" and ch <= "9"):
+                    break
+
+            self.cursor -= 1
+            self.lexExponent()
+
+            token.type = "number"
+            token.value = input[token.start:self.cursor]
+
+        else:
+            token.type = "dot"
+
+
+    def lexString(self, ch):
+        token = self.token
+        input = self.source
+        token.type = "string"
+
+        hasEscapes = False
+        delim = ch
+        ch = input[self.cursor]
+        self.cursor += 1
+        while ch != delim:
+            if ch == "\\":
+                hasEscapes = True
+                self.cursor += 1
+
+            ch = input[self.cursor]
+            self.cursor += 1
+
+        if hasEscapes:
+            token.value = eval(input[token.start:self.cursor])
+        else:
+            token.value = input[token.start+1:self.cursor-1]
+
+
+    def lexRegExp(self, ch):
+        token = self.token
+        input = self.source
+        token.type = "regexp"
+
+        while (True):
+            try:
+                ch = input[self.cursor]
+                self.cursor += 1
+            except IndexError:
+                raise ParseError("Unterminated regex", self.fileId, self.line)
+
+            if ch == "\\":
+                self.cursor += 1
+                
+            elif ch == "[":
+                while (True):
+                    if ch == "\\":
+                        self.cursor += 1
+
+                    try:
+                        ch = input[self.cursor]
+                        self.cursor += 1
+                    except IndexError:
+                        raise ParseError("Unterminated character class", self.fileId, self.line)
+                    
+                    if ch == "]":
+                        break
+                    
+            if ch == "/":
+                break
+
+        while(True):
+            ch = input[self.cursor]
+            self.cursor += 1
+            if not (ch >= "a" and ch <= "z"):
+                break
+
+        self.cursor -= 1
+        token.value = input[token.start:self.cursor]
+    
+
+    def lexOp(self, ch):
+        token = self.token
+        input = self.source
+
+        op = ch
+        while(True):
+            try:
+                next = input[self.cursor]
+            except IndexError:
+                break
+                
+            if (op + next) in operatorNames:
+                self.cursor += 1
+                op += next
+            else:
+                break
+        
+        try:
+            next = input[self.cursor]
+        except IndexError:
+            next = None
+
+        if next == "=" and op in assignOperators:
+            self.cursor += 1
+            token.type = "assign"
+            token.assignOp = operatorNames[op]
+            op += "="
+            
+        else:
+            token.type = operatorNames[op]
+            token.assignOp = None
+
+
+    # FIXME: Unicode escape sequences
+    # FIXME: Unicode identifiers
+    def lexIdent(self, ch):
+        token = self.token
+        input = self.source
+
+        try:
+            while True:
+                ch = input[self.cursor]
+                self.cursor += 1
+            
+                if not ((ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or (ch >= "0" and ch <= "9") or ch == "$" or ch == "_"):
+                    break
+                    
+        except IndexError:
+            self.cursor += 1
+            pass
+        
+        # Put the non-word character back.
+        self.cursor -= 1
+
+        identifier = input[token.start:self.cursor]
+        if identifier in Lang.keywords:
+            token.type = identifier
+        else:
+            token.type = "identifier"
+            token.value = identifier
+
+
+    def get(self, scanOperand=False):
+        """ 
+        It consumes input *only* if there is no lookahead.
+        Dispatches to the appropriate lexing function depending on the input.
+        """
+        while self.lookahead:
+            self.lookahead -= 1
+            self.tokenIndex = (self.tokenIndex + 1) & 3
+            token = self.tokens[self.tokenIndex]
+            if token.type != "newline" or self.scanNewlines:
+                return token.type
+
+        self.skip()
+
+        self.tokenIndex = (self.tokenIndex + 1) & 3
+        self.tokens[self.tokenIndex] = token = Token()
+
+        token.start = self.cursor
+        token.line = self.line
+
+        input = self.source
+        if self.cursor == len(input):
+            token.end = token.start
+            token.type = "end"
+            return token.type
+
+        ch = input[self.cursor]
+        self.cursor += 1
+        
+        if (ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or ch == "$" or ch == "_":
+            self.lexIdent(ch)
+        
+        elif scanOperand and ch == "/":
+            self.lexRegExp(ch)
+        
+        elif ch == ".":
+            self.lexDot(ch)
+
+        elif self.scanNewlines and ch == "\n":
+            token.type = "newline"
+            self.line += 1
+
+        elif ch in operatorNames:
+            self.lexOp(ch)
+        
+        elif ch >= "1" and ch <= "9":
+            self.lexNumber(ch)
+        
+        elif ch == "0":
+            self.lexZeroNumber(ch)
+        
+        elif ch == '"' or ch == "'":
+            self.lexString(ch)
+        
+        else:
+            raise ParseError("Illegal token: %s (Code: %s)" % (ch, ord(ch)), self.fileId, self.line)
+
+        token.end = self.cursor
+        return token.type
+        
+
+    def unget(self):
+        """ Match depends on unget returning undefined."""
+        self.lookahead += 1
+        
+        if self.lookahead == 4: 
+            raise ParseError("PANIC: too much lookahead!", self.fileId, self.line)
+        
+        self.tokenIndex = (self.tokenIndex - 1) & 3
+        
+    
+    def save(self):
+        return {
+            "cursor" : self.cursor,
+            "tokenIndex": self.tokenIndex,
+            "tokens": copy.copy(self.tokens),
+            "lookahead": self.lookahead,
+            "scanNewlines": self.scanNewlines,
+            "line": self.line
+        }
+
+    
+    def rewind(self, point):
+        self.cursor = point["cursor"]
+        self.tokenIndex = point["tokenIndex"]
+        self.tokens = copy.copy(point["tokens"])
+        self.lookahead = point["lookahead"]
+        self.scanNewline = point["scanNewline"]
+        self.line = point["line"]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/js/util/__init__.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,14 @@
+#
+# Jasy - Web Tooling Framework
+# Copyright 2010-2012 Zynga Inc.
+#
+
+#
+# minimized for using just the parser within eric5
+# Copyright (c) 2013 Detlev Offenbach <detlev@die-offenbachs.de>
+#
+
+pseudoTypes = set(["any", "var", "undefined", "null", "true", "false", "this",
+                   "arguments"])
+builtinTypes = set(["Object", "String", "Number", "Boolean", "Array", "Function",
+                    "RegExp", "Date"])
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/ThirdParty/Jasy/jasy/license.md	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,20 @@
+Copyright (c) 2011-2012 Zynga Inc. http://zynga.com/
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
--- a/UI/BrowserModel.py	Sat Jul 06 18:31:44 2013 +0200
+++ b/UI/BrowserModel.py	Tue Jul 09 19:30:56 2013 +0200
@@ -1008,6 +1008,11 @@
             pixName = "filePixmap.png"
         elif self.isDFile():
             pixName = "fileD.png"
+        elif self.isJavaScriptFile():
+            pixName = "fileJavascript.png"
+            self._populated = False
+            self._lazyPopulation = True
+            self._moduleName = os.path.basename(finfo)
         else:
             pixName = "fileMisc.png"
         
@@ -1146,6 +1151,14 @@
         """
         return self.fileext == '.idl'
     
+    def isJavaScriptFile(self):
+        """
+        Public method to check, if this file is a JavaScript file.
+        
+        @return flag indicating a JavaScript file (boolean)
+        """
+        return self.fileext == '.js'
+    
     def isPixmapFile(self):
         """
         Public method to check, if this file is a pixmap file.
--- a/Utilities/ClassBrowsers/__init__.py	Sat Jul 06 18:31:44 2013 +0200
+++ b/Utilities/ClassBrowsers/__init__.py	Tue Jul 09 19:30:56 2013 +0200
@@ -25,13 +25,15 @@
 PTL_SOURCE = 128
 RB_SOURCE = 129
 IDL_SOURCE = 130
+JS_SOURCE = 131
 
-SUPPORTED_TYPES = [PY_SOURCE, PTL_SOURCE, RB_SOURCE, IDL_SOURCE]
+SUPPORTED_TYPES = [PY_SOURCE, PTL_SOURCE, RB_SOURCE, IDL_SOURCE, JS_SOURCE]
 
 __extensions = {
     "IDL": [".idl"],
     "Python": [".py", ".pyw", ".ptl"],  # currently not used
     "Ruby": [".rb"],
+    "JavaScript": [".js"],
 }
 
 
@@ -56,6 +58,10 @@
         from . import rbclbr
         dict = rbclbr.readmodule_ex(module, path)
         rbclbr._modules.clear()
+    elif ext in __extensions["JavaScript"]:
+        from . import jsclbr
+        dict = jsclbr.readmodule_ex(module, path)
+        jsclbr._modules.clear()
     elif ext in Preferences.getPython("PythonExtensions") or \
          ext in Preferences.getPython("Python3Extensions") or \
          isPyFile:
@@ -102,6 +108,13 @@
                 return (open(pathname), pathname, (ext, 'r', IDL_SOURCE))
         raise ImportError
     
+    elif ext in __extensions["JavaScript"]:
+        for p in path:      # only search in path
+            pathname = os.path.join(p, name)
+            if os.path.exists(pathname):
+                return (open(pathname), pathname, (ext, 'r', JS_SOURCE))
+        raise ImportError
+    
     elif ext == '.ptl':
         for p in path:      # only search in path
             pathname = os.path.join(p, name)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Utilities/ClassBrowsers/jsclbr.py	Tue Jul 09 19:30:56 2013 +0200
@@ -0,0 +1,292 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2013 Detlev Offenbach <detlev@die-offenbachs.de>
+#
+
+"""
+Parse a JavaScript file and retrieve variables and functions.
+
+It uses the JavaScript parser contained in the jasy web framework.
+"""
+
+import ThirdParty.Jasy.jasy.js.parse.Parser as jsParser
+
+import Utilities
+import Utilities.ClassBrowsers as ClassBrowsers
+from . import ClbrBaseClasses
+
+SUPPORTED_TYPES = [ClassBrowsers.JS_SOURCE]
+    
+_modules = {}   # cache of modules we've seen
+
+
+class VisibilityMixin(ClbrBaseClasses.ClbrVisibilityMixinBase):
+    """
+    Mixin class implementing the notion of visibility.
+    """
+    def __init__(self):
+        """
+        Method to initialize the visibility.
+        """
+        if self.name.startswith('__'):
+            self.setPrivate()
+        elif self.name.startswith('_'):
+            self.setProtected()
+        else:
+            self.setPublic()
+
+
+class Function(ClbrBaseClasses.Function, VisibilityMixin):
+    """
+    Class to represent a Python function.
+    """
+    def __init__(self, module, name, file, lineno, signature='', separator=','):
+        """
+        Constructor
+        
+        @param module name of the module containing this function
+        @param name name of this function
+        @param file filename containing this class
+        @param lineno linenumber of the class definition
+        @param signature parameterlist of the method
+        @param separator string separating the parameters
+        """
+        ClbrBaseClasses.Function.__init__(self, module, name, file, lineno,
+                                          signature, separator)
+        VisibilityMixin.__init__(self)
+
+
+class Attribute(ClbrBaseClasses.Attribute, VisibilityMixin):
+    """
+    Class to represent a class attribute.
+    """
+    def __init__(self, module, name, file, lineno):
+        """
+        Constructor
+        
+        @param module name of the module containing this class
+        @param name name of this class
+        @param file filename containing this attribute
+        @param lineno linenumber of the class definition
+        """
+        ClbrBaseClasses.Attribute.__init__(self, module, name, file, lineno)
+        VisibilityMixin.__init__(self)
+
+
+class Visitor(object):
+    """
+    Class implementing a visitor going through the parsed tree.
+    """
+    def __init__(self, src, module, filename):
+        """
+        Constructor
+        
+        @param src source to be parsed (string)
+        @param module name of the module (string)
+        @param filename file name (string)
+        """
+        self.__dict = {}
+        self.__dict_counts = {}
+        self.__root = None
+        self.__stack = []
+        
+        self.__source = src
+        self.__module = module
+        self.__file = filename
+    
+    def parse(self):
+        """
+        Public method to parse the source.
+        
+        @return dictionary containing the parsed information
+        """
+        try:
+            self.__root = jsParser.parse(self.__source)
+            self.__visit(self.__root)
+        except jsParser.SyntaxError:
+            # ignore syntax errors
+            pass
+        
+        return self.__dict
+    
+    def __visit(self, root):
+        """
+        Private method implementing the visit logic delegating to interesting methods.
+        """
+        call = lambda n: getattr(self, "visit_{0}".format(n.type), self.visit_noop)(n)
+        call(root)
+        for node in root:
+            self.__visit(node)
+    
+    def visit_noop(self, node):
+        """
+        Public method to ignore the given node.
+        
+        @param node reference to the node (jasy.js.parse.Node.Node)
+        """
+        pass
+
+    def visit_function(self, node):
+        """
+        Public method to treat a function node.
+        
+        @param node reference to the node (jasy.js.parse.Node.Node)
+        """
+        if node.type == "function" and \
+           getattr(node, "name", None) and \
+           node.functionForm == "declared_form":
+            if self.__stack and self.__stack[-1].endlineno < node.line:
+                del self.__stack[-1]
+            endline = node.line + self.__source.count('\n', node.start, node.end)
+            if getattr(node, "params", None):
+                func_sig = ", ".join([p.value for p in node.params])
+            else:
+                func_sig = ""
+            if self.__stack:
+                # it's a nested function
+                cur_func = self.__stack[-1]
+                f = Function(None, node.name,
+                             self.__file, node.line, func_sig)
+                f.setEndLine(endline)
+                cur_func._addmethod(node.name, f)
+            else:
+                f = Function(self.__module, node.name,
+                             self.__file, node.line, func_sig)
+                f.setEndLine(endline)
+                func_name = node.name
+                if func_name in self.__dict_counts:
+                    self.__dict_counts[func_name] += 1
+                    func_name = "{0}_{1:d}".format(
+                        func_name, self.__dict_counts[func_name])
+                else:
+                    self.__dict_counts[func_name] = 0
+                self.__dict[func_name] = f
+            self.__stack.append(f)
+
+    def visit_property_init(self, node):
+        """
+        Public method to treat a property_init node.
+        
+        @param node reference to the node (jasy.js.parse.Node.Node)
+        """
+        if node.type == "property_init" and node[1].type == "function":
+            if self.__stack and self.__stack[-1].endlineno < node[0].line:
+                del self.__stack[-1]
+            endline = node[0].line + self.__source.count('\n', node.start, node[1].end)
+            if getattr(node[1], "params", None):
+                func_sig = ", ".join([p.value for p in node[1].params])
+            else:
+                func_sig = ""
+            if self.__stack:
+                # it's a nested function
+                cur_func = self.__stack[-1]
+                f = Function(None, node[0].value,
+                             self.__file, node[0].line, func_sig)
+                f.setEndLine(endline)
+                cur_func._addmethod(node[0].value, f)
+            else:
+                f = Function(self.__module, node[0].value,
+                             self.__file, node[0].line, func_sig)
+                f.setEndLine(endline)
+                func_name = node[0].value
+                if func_name in self.__dict_counts:
+                    self.__dict_counts[func_name] += 1
+                    func_name = "{0}_{1:d}".format(
+                        func_name, self.__dict_counts[func_name])
+                else:
+                    self.__dict_counts[func_name] = 0
+                self.__dict[func_name] = f
+            self.__stack.append(f)
+    
+    def visit_var(self, node):
+        """
+        Public method to treat a variable node.
+        
+        @param node reference to the node (jasy.js.parse.Node.Node)
+        """
+        if node.type == "var" and \
+           node.parent.type == "script" and \
+           node.getChildrenLength():
+            if self.__stack and self.__stack[-1].endlineno < node[0].line:
+                del self.__stack[-1]
+            if self.__stack:
+                # function variables
+                for var in node:
+                    attr = Attribute(self.__module, var.name, self.__file, var.line)
+                    self.__stack[-1]._addattribute(attr)
+            else:
+                # global variable
+                if "@@Globals@@" not in self.__dict:
+                    self.__dict["@@Globals@@"] = \
+                        ClbrBaseClasses.ClbrBase(self.__module, "Globals", self.__file, 0)
+                for var in node:
+                    self.__dict["@@Globals@@"]._addglobal(
+                        Attribute(self.__module, var.name, self.__file, var.line))
+    
+    def visit_const(self, node):
+        """
+        Public method to treat a constant node.
+        
+        @param node reference to the node (jasy.js.parse.Node.Node)
+        """
+        if node.type == "const" and \
+           node.parent.type == "script" and \
+           node.getChildrenLength():
+            if self.__stack and self.__stack[-1].endlineno < node[0].line:
+                del self.__stack[-1]
+            if self.__stack:
+                # function variables
+                for var in node:
+                    attr = Attribute(self.__module, "const " + var.name,
+                                     self.__file, var.line)
+                    self.__stack[-1]._addattribute(attr)
+            else:
+                # global variable
+                if "@@Globals@@" not in self.__dict:
+                    self.__dict["@@Globals@@"] = \
+                        ClbrBaseClasses.ClbrBase(self.__module, "Globals", self.__file, 0)
+                for var in node:
+                    self.__dict["@@Globals@@"]._addglobal(
+                        Attribute(self.__module, "const " + var.name,
+                                  self.__file, var.line))
+
+
+def readmodule_ex(module, path=[]):
+    '''
+    Read a JavaScript file and return a dictionary of functions and variables.
+
+    @param module name of the JavaScript file (string)
+    @param path path the file should be searched in (list of strings)
+    @return the resulting dictionary
+    '''
+    global _modules
+    
+    dict = {}
+
+    if module in _modules:
+        # we've seen this file before...
+        return _modules[module]
+
+    # search the path for the file
+    f = None
+    fullpath = list(path)
+    f, file, (suff, mode, type) = ClassBrowsers.find_module(module, fullpath)
+    if f:
+        f.close()
+    if type not in SUPPORTED_TYPES:
+        # not CORBA IDL source, can't do anything with this module
+        _modules[module] = dict
+        return dict
+
+    _modules[module] = dict
+    try:
+        src = Utilities.readEncodedFile(file)[0]
+    except (UnicodeError, IOError):
+        # can't do anything with this module
+        _modules[module] = dict
+        return dict
+    
+    visitor = Visitor(src, module, file)
+    dict = visitor.parse()
+    _modules[module] = dict
+    return dict
--- a/changelog	Sat Jul 06 18:31:44 2013 +0200
+++ b/changelog	Tue Jul 09 19:30:56 2013 +0200
@@ -28,6 +28,8 @@
   -- added support for virtual space
   -- added support to show the current class/method name in the combo boxes
      at the top of the editor
+- File Browser
+  -- added a JavaScript parser in order to show the structure of JavaScript files
 - Mini Editor
   -- changed the line numbers margin to adjust themselves to the size needed
   -- added support for virtual space
@@ -45,6 +47,7 @@
   -- Sources Viewer
      --- added support to highlight the current class/method name of the current
          editor
+     --- added a JavaScript parser in order to show the structure of JavaScript files
   -- Translations Viewer
      --- added a plug-in hook for the 'open' action
 - Shell
--- a/eric5.e4p	Sat Jul 06 18:31:44 2013 +0200
+++ b/eric5.e4p	Tue Jul 09 19:30:56 2013 +0200
@@ -1088,6 +1088,22 @@
     <Source>eric5_qregularexpression.py</Source>
     <Source>eric5_qregularexpression.pyw</Source>
     <Source>Plugins/WizardPlugins/QRegularExpressionWizard/QRegularExpressionWizardServer.py</Source>
+    <Source>Utilities/ClassBrowsers/jsclbr.py</Source>
+    <Source>ThirdParty/Jasy/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/core/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/core/Console.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/util/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/api/Comment.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/api/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/parse/VanillaBuilder.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/parse/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/parse/Node.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/parse/Parser.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/tokenize/Tokenizer.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/tokenize/__init__.py</Source>
+    <Source>ThirdParty/Jasy/jasy/js/tokenize/Lang.py</Source>
   </Sources>
   <Forms>
     <Form>PyUnit/UnittestDialog.ui</Form>
@@ -1530,6 +1546,7 @@
     <Other>APIs/Python3/eric5.bas</Other>
     <Other>APIs/Python3/PyQt4.bas</Other>
     <Other>APIs/Python3/QScintilla2.bas</Other>
+    <Other>ThirdParty/Jasy/jasy/license.md</Other>
   </Others>
   <MainScript>eric5.py</MainScript>
   <Vcs>
--- a/eric5.py	Sat Jul 06 18:31:44 2013 +0200
+++ b/eric5.py	Tue Jul 09 19:30:56 2013 +0200
@@ -44,6 +44,7 @@
 
 # make Third-Party package available as a packages repository
 sys.path.insert(2, os.path.join(os.path.dirname(__file__), "ThirdParty", "Pygments"))
+sys.path.insert(2, os.path.join(os.path.dirname(__file__), "ThirdParty", "Jasy"))
 
 from E5Gui.E5Application import E5Application
 
Binary file icons/default/fileJavascript.png has changed

eric ide

mercurial