JavaScript Support eric7

Sat, 01 Apr 2023 11:09:00 +0200

author
Detlev Offenbach <detlev@die-offenbachs.de>
date
Sat, 01 Apr 2023 11:09:00 +0200
branch
eric7
changeset 9955
aa02420279fe
parent 9954
7c5fa3eef082
child 9956
5b138f996a1e

JavaScript Support
- Removed JavaScript functionality depending on the `Jasy` package because it has not been maintained for years.

docs/ThirdParty.md file | annotate | diff | comparison | revisions
docs/changelog.md file | annotate | diff | comparison | revisions
eric7.epj file | annotate | diff | comparison | revisions
src/eric7/APIs/Python3/eric7.api file | annotate | diff | comparison | revisions
src/eric7/Documentation/Help/source.qch file | annotate | diff | comparison | revisions
src/eric7/Documentation/Help/source.qhp file | annotate | diff | comparison | revisions
src/eric7/Documentation/Source/eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html file | annotate | diff | comparison | revisions
src/eric7/Documentation/Source/eric7.Utilities.ClassBrowsers.jsclbr.html file | annotate | diff | comparison | revisions
src/eric7/Documentation/Source/index-eric7.Plugins.CheckerPlugins.SyntaxChecker.html file | annotate | diff | comparison | revisions
src/eric7/Documentation/Source/index-eric7.Utilities.ClassBrowsers.html file | annotate | diff | comparison | revisions
src/eric7/Plugins/CheckerPlugins/SyntaxChecker/jsCheckSyntax.py file | annotate | diff | comparison | revisions
src/eric7/Plugins/PluginSyntaxChecker.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/core/Console.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/core/Text.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/core/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/license.md file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/parse/AbstractNode.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/parse/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/api/Comment.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/api/Text.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/api/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/output/Compressor.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/output/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/parse/Lang.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/parse/Node.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/parse/Parser.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/parse/VanillaBuilder.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/parse/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/tokenize/Lang.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/tokenize/Tokenizer.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/tokenize/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/Jasy/jasy/script/util/__init__.py file | annotate | diff | comparison | revisions
src/eric7/ThirdParty/__init__.py file | annotate | diff | comparison | revisions
src/eric7/Utilities/ClassBrowsers/__init__.py file | annotate | diff | comparison | revisions
src/eric7/Utilities/ClassBrowsers/jsclbr.py file | annotate | diff | comparison | revisions
src/eric7/eric7_ide.py file | annotate | diff | comparison | revisions
--- a/docs/ThirdParty.md	Fri Mar 31 13:39:51 2023 +0200
+++ b/docs/ThirdParty.md	Sat Apr 01 11:09:00 2023 +0200
@@ -6,7 +6,6 @@
 | Name         |  Version  | License                     |
 |:------------:|:---------:|:----------------------------|
 | eradicate    |   2.2.0   | MIT License (Expat License) |
-| jasy         | 1.5-beta6 | MIT License (MIT)           |
 | mccabe       |   0.7.0   | MIT License (Expat License) |
 | pipdeptree   |   2.5.2   | MIT License (MIT)           |
 | pip-licenses |   4.1.0   | MIT License (MIT)           |
--- a/docs/changelog.md	Fri Mar 31 13:39:51 2023 +0200
+++ b/docs/changelog.md	Sat Apr 01 11:09:00 2023 +0200
@@ -2,6 +2,9 @@
 
 ### Version 23.5
 - bug fixes
+- JavaScript Support
+    - Removed JavaScript functionality depending on the `Jasy` package because it
+      has not been maintained for years.
 - MicroPython
     - Added the capability to select the device path manually in case it could not
       be detected (e.g. because the device does not have a volume name).
--- a/eric7.epj	Fri Mar 31 13:39:51 2023 +0200
+++ b/eric7.epj	Sat Apr 01 11:09:00 2023 +0200
@@ -931,7 +931,6 @@
       "src/eric7/Plugins/ViewManagerPlugins/Tabview/preview.png",
       "src/eric7/Styles",
       "src/eric7/Themes",
-      "src/eric7/ThirdParty/Jasy/jasy/license.md",
       "src/eric7/UI/data/documentViewerStyle-dark.css",
       "src/eric7/UI/data/documentViewerStyle-light.css",
       "src/eric7/WebBrowser/Bookmarks/DefaultBookmarks.xbel",
@@ -1500,7 +1499,6 @@
       "src/eric7/Plugins/CheckerPlugins/SyntaxChecker/SyntaxCheckService.py",
       "src/eric7/Plugins/CheckerPlugins/SyntaxChecker/SyntaxCheckerDialog.py",
       "src/eric7/Plugins/CheckerPlugins/SyntaxChecker/__init__.py",
-      "src/eric7/Plugins/CheckerPlugins/SyntaxChecker/jsCheckSyntax.py",
       "src/eric7/Plugins/CheckerPlugins/SyntaxChecker/jsonCheckSyntax.py",
       "src/eric7/Plugins/CheckerPlugins/SyntaxChecker/pyCheckSyntax.py",
       "src/eric7/Plugins/CheckerPlugins/SyntaxChecker/pyflakes/__init__.py",
@@ -2100,29 +2098,6 @@
       "src/eric7/Testing/TestResultsTree.py",
       "src/eric7/Testing/TestingWidget.py",
       "src/eric7/Testing/__init__.py",
-      "src/eric7/ThirdParty/Jasy/__init__.py",
-      "src/eric7/ThirdParty/Jasy/jasy/__init__.py",
-      "src/eric7/ThirdParty/Jasy/jasy/core/Console.py",
-      "src/eric7/ThirdParty/Jasy/jasy/core/Text.py",
-      "src/eric7/ThirdParty/Jasy/jasy/core/__init__.py",
-      "src/eric7/ThirdParty/Jasy/jasy/parse/AbstractNode.py",
-      "src/eric7/ThirdParty/Jasy/jasy/parse/__init__.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/__init__.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/api/Comment.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/api/Text.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/api/__init__.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/output/Compressor.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/output/__init__.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/parse/Lang.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/parse/Node.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/parse/Parser.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/parse/VanillaBuilder.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/parse/__init__.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/tokenize/Lang.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/tokenize/Tokenizer.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/tokenize/__init__.py",
-      "src/eric7/ThirdParty/Jasy/jasy/script/util/__init__.py",
-      "src/eric7/ThirdParty/__init__.py",
       "src/eric7/Toolbox/SingleApplication.py",
       "src/eric7/Toolbox/Startup.py",
       "src/eric7/Toolbox/__init__.py",
@@ -2174,7 +2149,6 @@
       "src/eric7/Utilities/BackgroundService.py",
       "src/eric7/Utilities/ClassBrowsers/ClbrBaseClasses.py",
       "src/eric7/Utilities/ClassBrowsers/__init__.py",
-      "src/eric7/Utilities/ClassBrowsers/jsclbr.py",
       "src/eric7/Utilities/ClassBrowsers/pyclbr.py",
       "src/eric7/Utilities/ClassBrowsers/rbclbr.py",
       "src/eric7/Utilities/FtpUtilities.py",
--- a/src/eric7/APIs/Python3/eric7.api	Fri Mar 31 13:39:51 2023 +0200
+++ b/src/eric7/APIs/Python3/eric7.api	Sat Apr 01 11:09:00 2023 +0200
@@ -4675,11 +4675,6 @@
 eric7.Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheckerDialog.SyntaxCheckerDialog.startForBrowser?4(fn)
 eric7.Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheckerDialog.SyntaxCheckerDialog.warningRole?7
 eric7.Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheckerDialog.SyntaxCheckerDialog?1(parent=None)
-eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.initBatchService?4()
-eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.initService?4()
-eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.jsSyntaxBatchCheck?4(argumentsList, send, fx, cancelled, maxProcesses=0)
-eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.jsSyntaxCheck?4(file, codestring)
-eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.workerTask?4(inputQueue, outputQueue)
 eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.initBatchService?4()
 eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.initService?4()
 eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.jsonSyntaxBatchCheck?4(argumentsList, send, fx, cancelled, maxProcesses=0)
@@ -10767,20 +10762,6 @@
 eric7.Utilities.ClassBrowsers.getClassBrowserModule?4(moduleType)
 eric7.Utilities.ClassBrowsers.getIcon?4(filename)
 eric7.Utilities.ClassBrowsers.isSupportedType?4(fileext)
-eric7.Utilities.ClassBrowsers.jsclbr.Attribute?1(module, name, file, lineno)
-eric7.Utilities.ClassBrowsers.jsclbr.Function?1(module, name, file, lineno, signature="", separator=", ")
-eric7.Utilities.ClassBrowsers.jsclbr.SUPPORTED_TYPES?7
-eric7.Utilities.ClassBrowsers.jsclbr.VisibilityMixin?1()
-eric7.Utilities.ClassBrowsers.jsclbr.Visitor.call?4()
-eric7.Utilities.ClassBrowsers.jsclbr.Visitor.parse?4()
-eric7.Utilities.ClassBrowsers.jsclbr.Visitor.visit_const?4(node)
-eric7.Utilities.ClassBrowsers.jsclbr.Visitor.visit_function?4(node)
-eric7.Utilities.ClassBrowsers.jsclbr.Visitor.visit_noop?4(node)
-eric7.Utilities.ClassBrowsers.jsclbr.Visitor.visit_property_init?4(node)
-eric7.Utilities.ClassBrowsers.jsclbr.Visitor.visit_var?4(node)
-eric7.Utilities.ClassBrowsers.jsclbr.Visitor?1(src, module, filename)
-eric7.Utilities.ClassBrowsers.jsclbr.readmodule_ex?4(module, path=None, isTypeFile=False)
-eric7.Utilities.ClassBrowsers.jsclbr.scan?4(src, file, module)
 eric7.Utilities.ClassBrowsers.pyclbr.Attribute?1(module, name, file, lineno)
 eric7.Utilities.ClassBrowsers.pyclbr.Class?1(module, name, superClasses, file, lineno)
 eric7.Utilities.ClassBrowsers.pyclbr.Function?1(module, name, file, lineno, signature="", separator=", ", modifierType=ClbrBaseClasses.Function.General, annotation="", )
Binary file src/eric7/Documentation/Help/source.qch has changed
--- a/src/eric7/Documentation/Help/source.qhp	Fri Mar 31 13:39:51 2023 +0200
+++ b/src/eric7/Documentation/Help/source.qhp	Sat Apr 01 11:09:00 2023 +0200
@@ -491,7 +491,6 @@
               <section title="eric7.Plugins.CheckerPlugins.SyntaxChecker" ref="index-eric7.Plugins.CheckerPlugins.SyntaxChecker.html">
                 <section title="eric7.Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheckService" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheckService.html" />
                 <section title="eric7.Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheckerDialog" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheckerDialog.html" />
-                <section title="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html" />
                 <section title="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html" />
                 <section title="eric7.Plugins.CheckerPlugins.SyntaxChecker.pyCheckSyntax" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.pyCheckSyntax.html" />
                 <section title="eric7.Plugins.CheckerPlugins.SyntaxChecker.tomlCheckSyntax" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.tomlCheckSyntax.html" />
@@ -1210,7 +1209,6 @@
             <section title="eric7.Utilities.ClassBrowsers" ref="index-eric7.Utilities.ClassBrowsers.html">
               <section title="eric7.Utilities.ClassBrowsers.ClbrBaseClasses" ref="eric7.Utilities.ClassBrowsers.ClbrBaseClasses.html" />
               <section title="eric7.Utilities.ClassBrowsers.__init__" ref="eric7.Utilities.ClassBrowsers.__init__.html" />
-              <section title="eric7.Utilities.ClassBrowsers.jsclbr" ref="eric7.Utilities.ClassBrowsers.jsclbr.html" />
               <section title="eric7.Utilities.ClassBrowsers.pyclbr" ref="eric7.Utilities.ClassBrowsers.pyclbr.html" />
               <section title="eric7.Utilities.ClassBrowsers.rbclbr" ref="eric7.Utilities.ClassBrowsers.rbclbr.html" />
             </section>
@@ -2006,12 +2004,10 @@
       <keyword name="AsyncFile.writelines" id="AsyncFile.writelines" ref="eric7.DebugClients.Python.AsyncFile.html#AsyncFile.writelines" />
       <keyword name="AsyncPendingWrite" id="AsyncPendingWrite" ref="eric7.DebugClients.Python.AsyncFile.html#AsyncPendingWrite" />
       <keyword name="Attribute" id="Attribute" ref="eric7.Utilities.ClassBrowsers.ClbrBaseClasses.html#Attribute" />
-      <keyword name="Attribute" id="Attribute" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Attribute" />
       <keyword name="Attribute" id="Attribute" ref="eric7.Utilities.ClassBrowsers.pyclbr.html#Attribute" />
       <keyword name="Attribute" id="Attribute" ref="eric7.Utilities.ClassBrowsers.rbclbr.html#Attribute" />
       <keyword name="Attribute" id="Attribute" ref="eric7.Utilities.ModuleParser.html#Attribute" />
       <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric7.Utilities.ClassBrowsers.ClbrBaseClasses.html#Attribute.__init__" />
-      <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Attribute.__init__" />
       <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric7.Utilities.ClassBrowsers.pyclbr.html#Attribute.__init__" />
       <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric7.Utilities.ClassBrowsers.rbclbr.html#Attribute.__init__" />
       <keyword name="Attribute (Constructor)" id="Attribute (Constructor)" ref="eric7.Utilities.ModuleParser.html#Attribute.__init__" />
@@ -6660,13 +6656,11 @@
       <keyword name="FtpUtilities (Module)" id="FtpUtilities (Module)" ref="eric7.Utilities.FtpUtilities.html" />
       <keyword name="Function" id="Function" ref="eric7.Plugins.CheckerPlugins.CodeStyleChecker.Annotations.AnnotationsFunctionVisitor.html#Function" />
       <keyword name="Function" id="Function" ref="eric7.Utilities.ClassBrowsers.ClbrBaseClasses.html#Function" />
-      <keyword name="Function" id="Function" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Function" />
       <keyword name="Function" id="Function" ref="eric7.Utilities.ClassBrowsers.pyclbr.html#Function" />
       <keyword name="Function" id="Function" ref="eric7.Utilities.ClassBrowsers.rbclbr.html#Function" />
       <keyword name="Function" id="Function" ref="eric7.Utilities.ModuleParser.html#Function" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric7.Plugins.CheckerPlugins.CodeStyleChecker.Annotations.AnnotationsFunctionVisitor.html#Function.__init__" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric7.Utilities.ClassBrowsers.ClbrBaseClasses.html#Function.__init__" />
-      <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Function.__init__" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric7.Utilities.ClassBrowsers.pyclbr.html#Function.__init__" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric7.Utilities.ClassBrowsers.rbclbr.html#Function.__init__" />
       <keyword name="Function (Constructor)" id="Function (Constructor)" ref="eric7.Utilities.ModuleParser.html#Function.__init__" />
@@ -18174,22 +18168,10 @@
       <keyword name="VisibilityBase.setPrivate" id="VisibilityBase.setPrivate" ref="eric7.Utilities.ModuleParser.html#VisibilityBase.setPrivate" />
       <keyword name="VisibilityBase.setProtected" id="VisibilityBase.setProtected" ref="eric7.Utilities.ModuleParser.html#VisibilityBase.setProtected" />
       <keyword name="VisibilityBase.setPublic" id="VisibilityBase.setPublic" ref="eric7.Utilities.ModuleParser.html#VisibilityBase.setPublic" />
-      <keyword name="VisibilityMixin" id="VisibilityMixin" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#VisibilityMixin" />
       <keyword name="VisibilityMixin" id="VisibilityMixin" ref="eric7.Utilities.ClassBrowsers.pyclbr.html#VisibilityMixin" />
       <keyword name="VisibilityMixin" id="VisibilityMixin" ref="eric7.Utilities.ClassBrowsers.rbclbr.html#VisibilityMixin" />
-      <keyword name="VisibilityMixin (Constructor)" id="VisibilityMixin (Constructor)" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#VisibilityMixin.__init__" />
       <keyword name="VisibilityMixin (Constructor)" id="VisibilityMixin (Constructor)" ref="eric7.Utilities.ClassBrowsers.pyclbr.html#VisibilityMixin.__init__" />
       <keyword name="VisibilityMixin (Constructor)" id="VisibilityMixin (Constructor)" ref="eric7.Utilities.ClassBrowsers.rbclbr.html#VisibilityMixin.__init__" />
-      <keyword name="Visitor" id="Visitor" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor" />
-      <keyword name="Visitor (Constructor)" id="Visitor (Constructor)" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor.__init__" />
-      <keyword name="Visitor.__visit" id="Visitor.__visit" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor.__visit" />
-      <keyword name="Visitor.call" id="Visitor.call" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor.call" />
-      <keyword name="Visitor.parse" id="Visitor.parse" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor.parse" />
-      <keyword name="Visitor.visit_const" id="Visitor.visit_const" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_const" />
-      <keyword name="Visitor.visit_function" id="Visitor.visit_function" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_function" />
-      <keyword name="Visitor.visit_noop" id="Visitor.visit_noop" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_noop" />
-      <keyword name="Visitor.visit_property_init" id="Visitor.visit_property_init" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_property_init" />
-      <keyword name="Visitor.visit_var" id="Visitor.visit_var" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#Visitor.visit_var" />
       <keyword name="VmListspacePlugin" id="VmListspacePlugin" ref="eric7.Plugins.PluginVmListspace.html#VmListspacePlugin" />
       <keyword name="VmListspacePlugin (Constructor)" id="VmListspacePlugin (Constructor)" ref="eric7.Plugins.PluginVmListspace.html#VmListspacePlugin.__init__" />
       <keyword name="VmListspacePlugin.activate" id="VmListspacePlugin.activate" ref="eric7.Plugins.PluginVmListspace.html#VmListspacePlugin.activate" />
@@ -19100,7 +19082,6 @@
       <keyword name="__getPygmentsLexer" id="__getPygmentsLexer" ref="eric7.QScintilla.Lexers.__init__.html#__getPygmentsLexer" />
       <keyword name="__initChannelModesDict" id="__initChannelModesDict" ref="eric7.Network.IRC.IrcUtilities.html#__initChannelModesDict" />
       <keyword name="__initializeCondaInterface" id="__initializeCondaInterface" ref="eric7.CondaInterface.__init__.html#__initializeCondaInterface" />
-      <keyword name="__jsSyntaxCheck" id="__jsSyntaxCheck" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html#__jsSyntaxCheck" />
       <keyword name="__jsonSyntaxCheck" id="__jsonSyntaxCheck" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html#__jsonSyntaxCheck" />
       <keyword name="__main__ (Module)" id="__main__ (Module)" ref="eric7.__main__.html" />
       <keyword name="__mapType" id="__mapType" ref="eric7.Project.UicLoadUi5.html#__mapType" />
@@ -19826,7 +19807,6 @@
       <keyword name="indentation" id="indentation" ref="eric7.Plugins.CheckerPlugins.CodeStyleChecker.pycodestyle.html#indentation" />
       <keyword name="information" id="information" ref="eric7.EricWidgets.EricMessageBox.html#information" />
       <keyword name="initBatchService" id="initBatchService" ref="eric7.Plugins.CheckerPlugins.CodeStyleChecker.CodeStyleChecker.html#initBatchService" />
-      <keyword name="initBatchService" id="initBatchService" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html#initBatchService" />
       <keyword name="initBatchService" id="initBatchService" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html#initBatchService" />
       <keyword name="initBatchService" id="initBatchService" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.pyCheckSyntax.html#initBatchService" />
       <keyword name="initBatchService" id="initBatchService" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.tomlCheckSyntax.html#initBatchService" />
@@ -19836,7 +19816,6 @@
       <keyword name="initRecentSettings" id="initRecentSettings" ref="eric7.Preferences.__init__.html#initRecentSettings" />
       <keyword name="initSSL" id="initSSL" ref="eric7.EricNetwork.EricSslUtilities.html#initSSL" />
       <keyword name="initService" id="initService" ref="eric7.Plugins.CheckerPlugins.CodeStyleChecker.CodeStyleChecker.html#initService" />
-      <keyword name="initService" id="initService" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html#initService" />
       <keyword name="initService" id="initService" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html#initService" />
       <keyword name="initService" id="initService" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.pyCheckSyntax.html#initService" />
       <keyword name="initService" id="initService" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.tomlCheckSyntax.html#initService" />
@@ -19894,10 +19873,6 @@
       <keyword name="isinpath" id="isinpath" ref="eric7.SystemUtilities.FileSystemUtilities.html#isinpath" />
       <keyword name="jinja2Templates (Module)" id="jinja2Templates (Module)" ref="eric7.Plugins.CheckerPlugins.CodeStyleChecker.Security.Checks.jinja2Templates.html" />
       <keyword name="joinext" id="joinext" ref="eric7.SystemUtilities.FileSystemUtilities.html#joinext" />
-      <keyword name="jsCheckSyntax (Module)" id="jsCheckSyntax (Module)" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html" />
-      <keyword name="jsSyntaxBatchCheck" id="jsSyntaxBatchCheck" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html#jsSyntaxBatchCheck" />
-      <keyword name="jsSyntaxCheck" id="jsSyntaxCheck" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html#jsSyntaxCheck" />
-      <keyword name="jsclbr (Module)" id="jsclbr (Module)" ref="eric7.Utilities.ClassBrowsers.jsclbr.html" />
       <keyword name="jsonCheckSyntax (Module)" id="jsonCheckSyntax (Module)" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html" />
       <keyword name="jsonSyntaxBatchCheck" id="jsonSyntaxBatchCheck" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html#jsonSyntaxBatchCheck" />
       <keyword name="jsonSyntaxCheck" id="jsonSyntaxCheck" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html#jsonSyntaxCheck" />
@@ -20060,7 +20035,6 @@
       <keyword name="read_config" id="read_config" ref="eric7.Plugins.CheckerPlugins.CodeStyleChecker.pycodestyle.html#read_config" />
       <keyword name="readlines" id="readlines" ref="eric7.Plugins.CheckerPlugins.CodeStyleChecker.pycodestyle.html#readlines" />
       <keyword name="readmodule" id="readmodule" ref="eric7.Utilities.ClassBrowsers.__init__.html#readmodule" />
-      <keyword name="readmodule_ex" id="readmodule_ex" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#readmodule_ex" />
       <keyword name="readmodule_ex" id="readmodule_ex" ref="eric7.Utilities.ClassBrowsers.pyclbr.html#readmodule_ex" />
       <keyword name="readmodule_ex" id="readmodule_ex" ref="eric7.Utilities.ClassBrowsers.rbclbr.html#readmodule_ex" />
       <keyword name="rebase (Module)" id="rebase (Module)" ref="eric7.Plugins.VcsPlugins.vcsMercurial.RebaseExtension.rebase.html" />
@@ -20102,7 +20076,6 @@
       <keyword name="saveToolGroups" id="saveToolGroups" ref="eric7.Preferences.__init__.html#saveToolGroups" />
       <keyword name="sbsdiff" id="sbsdiff" ref="eric7.UI.CompareDialog.html#sbsdiff" />
       <keyword name="scan" id="scan" ref="eric7.Utilities.ClassBrowsers.__init__.html#scan" />
-      <keyword name="scan" id="scan" ref="eric7.Utilities.ClassBrowsers.jsclbr.html#scan" />
       <keyword name="scan" id="scan" ref="eric7.Utilities.ClassBrowsers.pyclbr.html#scan" />
       <keyword name="scan" id="scan" ref="eric7.Utilities.ClassBrowsers.rbclbr.html#scan" />
       <keyword name="schemeFromProxyType" id="schemeFromProxyType" ref="eric7.EricNetwork.EricNetworkProxyFactory.html#schemeFromProxyType" />
@@ -20274,7 +20247,6 @@
       <keyword name="windowsDesktopEntries" id="windowsDesktopEntries" ref="eric7.eric7_post_install.html#windowsDesktopEntries" />
       <keyword name="windowsProgramsEntry" id="windowsProgramsEntry" ref="eric7.eric7_post_install.html#windowsProgramsEntry" />
       <keyword name="workerTask" id="workerTask" ref="eric7.Plugins.CheckerPlugins.CodeStyleChecker.CodeStyleChecker.html#workerTask" />
-      <keyword name="workerTask" id="workerTask" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html#workerTask" />
       <keyword name="workerTask" id="workerTask" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html#workerTask" />
       <keyword name="workerTask" id="workerTask" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.pyCheckSyntax.html#workerTask" />
       <keyword name="workerTask" id="workerTask" ref="eric7.Plugins.CheckerPlugins.SyntaxChecker.tomlCheckSyntax.html#workerTask" />
@@ -20664,7 +20636,6 @@
       <file>eric7.Plugins.CheckerPlugins.CodeStyleChecker.translations.html</file>
       <file>eric7.Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheckService.html</file>
       <file>eric7.Plugins.CheckerPlugins.SyntaxChecker.SyntaxCheckerDialog.html</file>
-      <file>eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html</file>
       <file>eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html</file>
       <file>eric7.Plugins.CheckerPlugins.SyntaxChecker.pyCheckSyntax.html</file>
       <file>eric7.Plugins.CheckerPlugins.SyntaxChecker.tomlCheckSyntax.html</file>
@@ -21255,7 +21226,6 @@
       <file>eric7.Utilities.BackgroundService.html</file>
       <file>eric7.Utilities.ClassBrowsers.ClbrBaseClasses.html</file>
       <file>eric7.Utilities.ClassBrowsers.__init__.html</file>
-      <file>eric7.Utilities.ClassBrowsers.jsclbr.html</file>
       <file>eric7.Utilities.ClassBrowsers.pyclbr.html</file>
       <file>eric7.Utilities.ClassBrowsers.rbclbr.html</file>
       <file>eric7.Utilities.FtpUtilities.html</file>
--- a/src/eric7/Documentation/Source/eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,200 +0,0 @@
-<!DOCTYPE html>
-<html><head>
-<title>eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax</title>
-<meta charset="UTF-8">
-<link rel="stylesheet" href="styles.css">
-</head>
-<body>
-<a NAME="top" ID="top"></a>
-<h1>eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax</h1>
-
-<p>
-Module implementing the syntax check for JavaScript.
-</p>
-<h3>Global Attributes</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Classes</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Functions</h3>
-
-<table>
-
-<tr>
-<td><a href="#__jsSyntaxCheck">__jsSyntaxCheck</a></td>
-<td>Function to check a Javascript source file for syntax errors.</td>
-</tr>
-<tr>
-<td><a href="#initBatchService">initBatchService</a></td>
-<td>Initialize the batch service and return the entry point.</td>
-</tr>
-<tr>
-<td><a href="#initService">initService</a></td>
-<td>Initialize the service and return the entry point.</td>
-</tr>
-<tr>
-<td><a href="#jsSyntaxBatchCheck">jsSyntaxBatchCheck</a></td>
-<td>Module function to check syntax for a batch of files.</td>
-</tr>
-<tr>
-<td><a href="#jsSyntaxCheck">jsSyntaxCheck</a></td>
-<td>Function to check a Javascript source file for syntax errors.</td>
-</tr>
-<tr>
-<td><a href="#workerTask">workerTask</a></td>
-<td>Module function acting as the parallel worker for the syntax check.</td>
-</tr>
-</table>
-<hr />
-<hr />
-<a NAME="__jsSyntaxCheck" ID="__jsSyntaxCheck"></a>
-<h2>__jsSyntaxCheck</h2>
-<b>__jsSyntaxCheck</b>(<i>file, codestring</i>)
-
-<p>
-    Function to check a Javascript source file for syntax errors.
-</p>
-<dl>
-
-<dt><i>file</i></dt>
-<dd>
-source filename (string)
-</dd>
-<dt><i>codestring</i></dt>
-<dd>
-string containing the code to check (string)
-</dd>
-</dl>
-<dl>
-<dt>Return:</dt>
-<dd>
-dictionary with the keys 'error' and 'warnings' which
-            hold a list containing details about the error/ warnings
-            (file name, line number, column, codestring (only at syntax
-            errors), the message, a list with arguments for the message)
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="initBatchService" ID="initBatchService"></a>
-<h2>initBatchService</h2>
-<b>initBatchService</b>(<i></i>)
-
-<p>
-    Initialize the batch service and return the entry point.
-</p>
-<dl>
-<dt>Return:</dt>
-<dd>
-the entry point for the background client (function)
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="initService" ID="initService"></a>
-<h2>initService</h2>
-<b>initService</b>(<i></i>)
-
-<p>
-    Initialize the service and return the entry point.
-</p>
-<dl>
-<dt>Return:</dt>
-<dd>
-the entry point for the background client (function)
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="jsSyntaxBatchCheck" ID="jsSyntaxBatchCheck"></a>
-<h2>jsSyntaxBatchCheck</h2>
-<b>jsSyntaxBatchCheck</b>(<i>argumentsList, send, fx, cancelled, maxProcesses=0</i>)
-
-<p>
-    Module function to check syntax for a batch of files.
-</p>
-<dl>
-
-<dt><i>argumentsList</i> (list)</dt>
-<dd>
-list of arguments tuples as given for jsSyntaxCheck
-</dd>
-<dt><i>send</i> (func)</dt>
-<dd>
-reference to send function
-</dd>
-<dt><i>fx</i> (str)</dt>
-<dd>
-registered service name
-</dd>
-<dt><i>cancelled</i> (func)</dt>
-<dd>
-reference to function checking for a cancellation
-</dd>
-<dt><i>maxProcesses</i> (int)</dt>
-<dd>
-number of processes to be used
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="jsSyntaxCheck" ID="jsSyntaxCheck"></a>
-<h2>jsSyntaxCheck</h2>
-<b>jsSyntaxCheck</b>(<i>file, codestring</i>)
-
-<p>
-    Function to check a Javascript source file for syntax errors.
-</p>
-<dl>
-
-<dt><i>file</i></dt>
-<dd>
-source filename (string)
-</dd>
-<dt><i>codestring</i></dt>
-<dd>
-string containing the code to check (string)
-</dd>
-</dl>
-<dl>
-<dt>Return:</dt>
-<dd>
-dictionary with the keys 'error' and 'warnings' which
-            hold a list containing details about the error/ warnings
-            (file name, line number, column, codestring (only at syntax
-            errors), the message, a list with arguments for the message)
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="workerTask" ID="workerTask"></a>
-<h2>workerTask</h2>
-<b>workerTask</b>(<i>inputQueue, outputQueue</i>)
-
-<p>
-    Module function acting as the parallel worker for the syntax check.
-</p>
-<dl>
-
-<dt><i>inputQueue</i></dt>
-<dd>
-input queue (multiprocessing.Queue)
-</dd>
-<dt><i>outputQueue</i></dt>
-<dd>
-output queue (multiprocessing.Queue)
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-</body></html>
\ No newline at end of file
--- a/src/eric7/Documentation/Source/eric7.Utilities.ClassBrowsers.jsclbr.html	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,498 +0,0 @@
-<!DOCTYPE html>
-<html><head>
-<title>eric7.Utilities.ClassBrowsers.jsclbr</title>
-<meta charset="UTF-8">
-<link rel="stylesheet" href="styles.css">
-</head>
-<body>
-<a NAME="top" ID="top"></a>
-<h1>eric7.Utilities.ClassBrowsers.jsclbr</h1>
-
-<p>
-Parse a JavaScript file and retrieve variables and functions.
-</p>
-<p>
-It uses the JavaScript parser contained in the jasy web framework.
-</p>
-<h3>Global Attributes</h3>
-
-<table>
-<tr><td>SUPPORTED_TYPES</td></tr>
-</table>
-<h3>Classes</h3>
-
-<table>
-
-<tr>
-<td><a href="#Attribute">Attribute</a></td>
-<td>Class to represent a class attribute.</td>
-</tr>
-<tr>
-<td><a href="#Function">Function</a></td>
-<td>Class to represent a Python function.</td>
-</tr>
-<tr>
-<td><a href="#VisibilityMixin">VisibilityMixin</a></td>
-<td>Mixin class implementing the notion of visibility.</td>
-</tr>
-<tr>
-<td><a href="#Visitor">Visitor</a></td>
-<td>Class implementing a visitor going through the parsed tree.</td>
-</tr>
-</table>
-<h3>Functions</h3>
-
-<table>
-
-<tr>
-<td><a href="#readmodule_ex">readmodule_ex</a></td>
-<td>Read a JavaScript file and return a dictionary of functions and variables.</td>
-</tr>
-<tr>
-<td><a href="#scan">scan</a></td>
-<td>Public method to scan the given source text.</td>
-</tr>
-</table>
-<hr />
-<hr />
-<a NAME="Attribute" ID="Attribute"></a>
-<h2>Attribute</h2>
-
-<p>
-    Class to represent a class attribute.
-</p>
-<h3>Derived from</h3>
-ClbrBaseClasses.Attribute, VisibilityMixin
-<h3>Class Attributes</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Class Methods</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Methods</h3>
-
-<table>
-
-<tr>
-<td><a href="#Attribute.__init__">Attribute</a></td>
-<td>Constructor</td>
-</tr>
-</table>
-<h3>Static Methods</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-
-<a NAME="Attribute.__init__" ID="Attribute.__init__"></a>
-<h4>Attribute (Constructor)</h4>
-<b>Attribute</b>(<i>module, name, file, lineno</i>)
-
-<p>
-        Constructor
-</p>
-<dl>
-
-<dt><i>module</i></dt>
-<dd>
-name of the module containing this class
-</dd>
-<dt><i>name</i></dt>
-<dd>
-name of this class
-</dd>
-<dt><i>file</i></dt>
-<dd>
-filename containing this attribute
-</dd>
-<dt><i>lineno</i></dt>
-<dd>
-linenumber of the class definition
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="Function" ID="Function"></a>
-<h2>Function</h2>
-
-<p>
-    Class to represent a Python function.
-</p>
-<h3>Derived from</h3>
-ClbrBaseClasses.Function, VisibilityMixin
-<h3>Class Attributes</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Class Methods</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Methods</h3>
-
-<table>
-
-<tr>
-<td><a href="#Function.__init__">Function</a></td>
-<td>Constructor</td>
-</tr>
-</table>
-<h3>Static Methods</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-
-<a NAME="Function.__init__" ID="Function.__init__"></a>
-<h4>Function (Constructor)</h4>
-<b>Function</b>(<i>module, name, file, lineno, signature="", separator=", "</i>)
-
-<p>
-        Constructor
-</p>
-<dl>
-
-<dt><i>module</i></dt>
-<dd>
-name of the module containing this function
-</dd>
-<dt><i>name</i></dt>
-<dd>
-name of this function
-</dd>
-<dt><i>file</i></dt>
-<dd>
-filename containing this class
-</dd>
-<dt><i>lineno</i></dt>
-<dd>
-linenumber of the class definition
-</dd>
-<dt><i>signature</i></dt>
-<dd>
-parameterlist of the method
-</dd>
-<dt><i>separator</i></dt>
-<dd>
-string separating the parameters
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="VisibilityMixin" ID="VisibilityMixin"></a>
-<h2>VisibilityMixin</h2>
-
-<p>
-    Mixin class implementing the notion of visibility.
-</p>
-<h3>Derived from</h3>
-ClbrBaseClasses.ClbrVisibilityMixinBase
-<h3>Class Attributes</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Class Methods</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Methods</h3>
-
-<table>
-
-<tr>
-<td><a href="#VisibilityMixin.__init__">VisibilityMixin</a></td>
-<td>Constructor</td>
-</tr>
-</table>
-<h3>Static Methods</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-
-<a NAME="VisibilityMixin.__init__" ID="VisibilityMixin.__init__"></a>
-<h4>VisibilityMixin (Constructor)</h4>
-<b>VisibilityMixin</b>(<i></i>)
-
-<p>
-        Constructor
-</p>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="Visitor" ID="Visitor"></a>
-<h2>Visitor</h2>
-
-<p>
-    Class implementing a visitor going through the parsed tree.
-</p>
-<h3>Derived from</h3>
-None
-<h3>Class Attributes</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Class Methods</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-<h3>Methods</h3>
-
-<table>
-
-<tr>
-<td><a href="#Visitor.__init__">Visitor</a></td>
-<td>Constructor</td>
-</tr>
-<tr>
-<td><a href="#Visitor.__visit">__visit</a></td>
-<td>Private method implementing the visit logic delegating to interesting methods.</td>
-</tr>
-<tr>
-<td><a href="#Visitor.call">call</a></td>
-<td></td>
-</tr>
-<tr>
-<td><a href="#Visitor.parse">parse</a></td>
-<td>Public method to parse the source.</td>
-</tr>
-<tr>
-<td><a href="#Visitor.visit_const">visit_const</a></td>
-<td>Public method to treat a constant node.</td>
-</tr>
-<tr>
-<td><a href="#Visitor.visit_function">visit_function</a></td>
-<td>Public method to treat a function node.</td>
-</tr>
-<tr>
-<td><a href="#Visitor.visit_noop">visit_noop</a></td>
-<td>Public method to ignore the given node.</td>
-</tr>
-<tr>
-<td><a href="#Visitor.visit_property_init">visit_property_init</a></td>
-<td>Public method to treat a property_init node.</td>
-</tr>
-<tr>
-<td><a href="#Visitor.visit_var">visit_var</a></td>
-<td>Public method to treat a variable node.</td>
-</tr>
-</table>
-<h3>Static Methods</h3>
-
-<table>
-<tr><td>None</td></tr>
-</table>
-
-<a NAME="Visitor.__init__" ID="Visitor.__init__"></a>
-<h4>Visitor (Constructor)</h4>
-<b>Visitor</b>(<i>src, module, filename</i>)
-
-<p>
-        Constructor
-</p>
-<dl>
-
-<dt><i>src</i></dt>
-<dd>
-source to be parsed (string)
-</dd>
-<dt><i>module</i></dt>
-<dd>
-name of the module (string)
-</dd>
-<dt><i>filename</i></dt>
-<dd>
-file name (string)
-</dd>
-</dl>
-<a NAME="Visitor.__visit" ID="Visitor.__visit"></a>
-<h4>Visitor.__visit</h4>
-<b>__visit</b>(<i>root</i>)
-
-<p>
-        Private method implementing the visit logic delegating to interesting
-        methods.
-</p>
-<dl>
-
-<dt><i>root</i></dt>
-<dd>
-root node to visit
-</dd>
-</dl>
-<a NAME="Visitor.call" ID="Visitor.call"></a>
-<h4>Visitor.call</h4>
-<b>call</b>(<i></i>)
-
-<a NAME="Visitor.parse" ID="Visitor.parse"></a>
-<h4>Visitor.parse</h4>
-<b>parse</b>(<i></i>)
-
-<p>
-        Public method to parse the source.
-</p>
-<dl>
-<dt>Return:</dt>
-<dd>
-dictionary containing the parsed information
-</dd>
-</dl>
-<a NAME="Visitor.visit_const" ID="Visitor.visit_const"></a>
-<h4>Visitor.visit_const</h4>
-<b>visit_const</b>(<i>node</i>)
-
-<p>
-        Public method to treat a constant node.
-</p>
-<dl>
-
-<dt><i>node</i></dt>
-<dd>
-reference to the node (jasy.script.parse.Node.Node)
-</dd>
-</dl>
-<a NAME="Visitor.visit_function" ID="Visitor.visit_function"></a>
-<h4>Visitor.visit_function</h4>
-<b>visit_function</b>(<i>node</i>)
-
-<p>
-        Public method to treat a function node.
-</p>
-<dl>
-
-<dt><i>node</i></dt>
-<dd>
-reference to the node (jasy.script.parse.Node.Node)
-</dd>
-</dl>
-<a NAME="Visitor.visit_noop" ID="Visitor.visit_noop"></a>
-<h4>Visitor.visit_noop</h4>
-<b>visit_noop</b>(<i>node</i>)
-
-<p>
-        Public method to ignore the given node.
-</p>
-<dl>
-
-<dt><i>node</i></dt>
-<dd>
-reference to the node (jasy.script.parse.Node.Node)
-</dd>
-</dl>
-<a NAME="Visitor.visit_property_init" ID="Visitor.visit_property_init"></a>
-<h4>Visitor.visit_property_init</h4>
-<b>visit_property_init</b>(<i>node</i>)
-
-<p>
-        Public method to treat a property_init node.
-</p>
-<dl>
-
-<dt><i>node</i></dt>
-<dd>
-reference to the node (jasy.script.parse.Node.Node)
-</dd>
-</dl>
-<a NAME="Visitor.visit_var" ID="Visitor.visit_var"></a>
-<h4>Visitor.visit_var</h4>
-<b>visit_var</b>(<i>node</i>)
-
-<p>
-        Public method to treat a variable node.
-</p>
-<dl>
-
-<dt><i>node</i></dt>
-<dd>
-reference to the node (jasy.script.parse.Node.Node)
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="readmodule_ex" ID="readmodule_ex"></a>
-<h2>readmodule_ex</h2>
-<b>readmodule_ex</b>(<i>module, path=None, isTypeFile=False</i>)
-
-<p>
-    Read a JavaScript file and return a dictionary of functions and variables.
-</p>
-<dl>
-
-<dt><i>module</i> (str)</dt>
-<dd>
-name of the JavaScript file
-</dd>
-<dt><i>path</i> (list of str)</dt>
-<dd>
-path the file should be searched in
-</dd>
-<dt><i>isTypeFile</i> (bool)</dt>
-<dd>
-flag indicating a file of this type
-</dd>
-</dl>
-<dl>
-<dt>Return:</dt>
-<dd>
-the resulting dictionary
-</dd>
-</dl>
-<dl>
-<dt>Return Type:</dt>
-<dd>
-dict
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-<hr />
-<a NAME="scan" ID="scan"></a>
-<h2>scan</h2>
-<b>scan</b>(<i>src, file, module</i>)
-
-<p>
-    Public method to scan the given source text.
-</p>
-<dl>
-
-<dt><i>src</i> (str)</dt>
-<dd>
-source text to be scanned
-</dd>
-<dt><i>file</i> (str)</dt>
-<dd>
-file name associated with the source text
-</dd>
-<dt><i>module</i> (str)</dt>
-<dd>
-module name associated with the source text
-</dd>
-</dl>
-<dl>
-<dt>Return:</dt>
-<dd>
-dictionary containing the extracted data
-</dd>
-</dl>
-<dl>
-<dt>Return Type:</dt>
-<dd>
-dict
-</dd>
-</dl>
-<div align="right"><a href="#top">Up</a></div>
-<hr />
-</body></html>
\ No newline at end of file
--- a/src/eric7/Documentation/Source/index-eric7.Plugins.CheckerPlugins.SyntaxChecker.html	Fri Mar 31 13:39:51 2023 +0200
+++ b/src/eric7/Documentation/Source/index-eric7.Plugins.CheckerPlugins.SyntaxChecker.html	Sat Apr 01 11:09:00 2023 +0200
@@ -24,10 +24,6 @@
 <td>Module implementing a simple Python syntax checker.</td>
 </tr>
 <tr>
-<td><a href="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsCheckSyntax.html">jsCheckSyntax</a></td>
-<td>Module implementing the syntax check for JavaScript.</td>
-</tr>
-<tr>
 <td><a href="eric7.Plugins.CheckerPlugins.SyntaxChecker.jsonCheckSyntax.html">jsonCheckSyntax</a></td>
 <td>Module implementing the syntax check for JSON.</td>
 </tr>
--- a/src/eric7/Documentation/Source/index-eric7.Utilities.ClassBrowsers.html	Fri Mar 31 13:39:51 2023 +0200
+++ b/src/eric7/Documentation/Source/index-eric7.Utilities.ClassBrowsers.html	Sat Apr 01 11:09:00 2023 +0200
@@ -37,10 +37,6 @@
 <td>Package implementing class browsers for various languages.</td>
 </tr>
 <tr>
-<td><a href="eric7.Utilities.ClassBrowsers.jsclbr.html">jsclbr</a></td>
-<td>Parse a JavaScript file and retrieve variables and functions.</td>
-</tr>
-<tr>
 <td><a href="eric7.Utilities.ClassBrowsers.pyclbr.html">pyclbr</a></td>
 <td>Parse a Python file and retrieve classes, functions/methods and attributes.</td>
 </tr>
--- a/src/eric7/Plugins/CheckerPlugins/SyntaxChecker/jsCheckSyntax.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,185 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2014 - 2023 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Module implementing the syntax check for JavaScript.
-"""
-
-import multiprocessing
-import os
-import queue
-import sys
-
-
-def initService():
-    """
-    Initialize the service and return the entry point.
-
-    @return the entry point for the background client (function)
-    """
-    path = __file__
-    for _ in range(4):
-        path = os.path.dirname(path)
-    sys.path.insert(2, os.path.join(path, "ThirdParty", "Jasy"))
-    return jsSyntaxCheck
-
-
-def initBatchService():
-    """
-    Initialize the batch service and return the entry point.
-
-    @return the entry point for the background client (function)
-    """
-    return jsSyntaxBatchCheck
-
-
-def jsSyntaxCheck(file, codestring):
-    """
-    Function to check a Javascript source file for syntax errors.
-
-    @param file source filename (string)
-    @param codestring string containing the code to check (string)
-    @return dictionary with the keys 'error' and 'warnings' which
-            hold a list containing details about the error/ warnings
-            (file name, line number, column, codestring (only at syntax
-            errors), the message, a list with arguments for the message)
-    """
-    return __jsSyntaxCheck(file, codestring)
-
-
-def jsSyntaxBatchCheck(argumentsList, send, fx, cancelled, maxProcesses=0):
-    """
-    Module function to check syntax for a batch of files.
-
-    @param argumentsList list of arguments tuples as given for jsSyntaxCheck
-    @type list
-    @param send reference to send function
-    @type func
-    @param fx registered service name
-    @type str
-    @param cancelled reference to function checking for a cancellation
-    @type func
-    @param maxProcesses number of processes to be used
-    @type int
-    """
-    if maxProcesses == 0:
-        # determine based on CPU count
-        try:
-            NumberOfProcesses = multiprocessing.cpu_count()
-            if NumberOfProcesses >= 1:
-                NumberOfProcesses -= 1
-        except NotImplementedError:
-            NumberOfProcesses = 1
-    else:
-        NumberOfProcesses = maxProcesses
-
-    # Create queues
-    taskQueue = multiprocessing.Queue()
-    doneQueue = multiprocessing.Queue()
-
-    # Submit tasks (initially two times the number of processes)
-    tasks = len(argumentsList)
-    initialTasks = min(2 * NumberOfProcesses, tasks)
-    for _ in range(initialTasks):
-        taskQueue.put(argumentsList.pop(0))
-
-    # Start worker processes
-    workers = [
-        multiprocessing.Process(target=workerTask, args=(taskQueue, doneQueue))
-        for _ in range(NumberOfProcesses)
-    ]
-    for worker in workers:
-        worker.start()
-
-    # Get and send results
-    for _ in range(tasks):
-        resultSent = False
-        wasCancelled = False
-
-        while not resultSent:
-            try:
-                # get result (waiting max. 3 seconds and send it to frontend
-                filename, result = doneQueue.get()
-                send(fx, filename, result)
-                resultSent = True
-            except queue.Empty:
-                # ignore empty queue, just carry on
-                if cancelled():
-                    wasCancelled = True
-                    break
-
-        if wasCancelled or cancelled():
-            # just exit the loop ignoring the results of queued tasks
-            break
-
-        if argumentsList:
-            taskQueue.put(argumentsList.pop(0))
-
-    # Tell child processes to stop
-    for _ in range(NumberOfProcesses):
-        taskQueue.put("STOP")
-
-    for worker in workers:
-        worker.join()
-        worker.close()
-
-    taskQueue.close()
-    doneQueue.close()
-
-
-def workerTask(inputQueue, outputQueue):
-    """
-    Module function acting as the parallel worker for the syntax check.
-
-    @param inputQueue input queue (multiprocessing.Queue)
-    @param outputQueue output queue (multiprocessing.Queue)
-    """
-    for filename, args in iter(inputQueue.get, "STOP"):
-        source = args[0]
-        result = __jsSyntaxCheck(filename, source)
-        outputQueue.put((filename, result))
-
-
-def __jsSyntaxCheck(file, codestring):
-    """
-    Function to check a Javascript source file for syntax errors.
-
-    @param file source filename (string)
-    @param codestring string containing the code to check (string)
-    @return dictionary with the keys 'error' and 'warnings' which
-            hold a list containing details about the error/ warnings
-            (file name, line number, column, codestring (only at syntax
-            errors), the message, a list with arguments for the message)
-    """
-    import jasy.script.parse.Parser as jsParser  # __IGNORE_WARNING_I102__
-    import jasy.script.tokenize.Tokenizer as jsTokenizer  # __IGNORE_WARNING_I102__
-
-    try:
-        jsParser.parse(codestring, file)
-    except (jsParser.SyntaxError, jsTokenizer.ParseError) as exc:
-        details = exc.args[0]
-        error, details = details.splitlines()
-        fn, line = details.strip().rsplit(":", 1)
-        error = error.split(":", 1)[1].strip()
-
-        cline = min(len(codestring.splitlines()), int(line)) - 1
-        code = codestring.splitlines()[cline]
-        return [{"error": (fn, int(line), 0, code, error)}]
-    except IndexError:
-        error = "Incomplete source file"
-        splittedCode = codestring.splitlines()
-        return [
-            {
-                "error": (
-                    file,
-                    len(splittedCode) + 1,
-                    len(splittedCode[-1]),
-                    splittedCode[-1],
-                    error,
-                )
-            }
-        ]
-
-    return [{}]
--- a/src/eric7/Plugins/PluginSyntaxChecker.py	Fri Mar 31 13:39:51 2023 +0200
+++ b/src/eric7/Plugins/PluginSyntaxChecker.py	Sat Apr 01 11:09:00 2023 +0200
@@ -76,20 +76,6 @@
             self.syntaxCheckService.serviceErrorPy3,
         )
 
-        # JavaScript syntax check via Python3
-        self.syntaxCheckService.addLanguage(
-            "JavaScript",
-            "Python3",
-            path,
-            "jsCheckSyntax",
-            lambda: [],  # No options
-            lambda: [".js"],
-            lambda fn, problems: self.syntaxCheckService.syntaxChecked.emit(
-                fn, problems
-            ),
-            self.syntaxCheckService.serviceErrorJavaScript,
-        )
-
         # YAML syntax check via Python3
         self.syntaxCheckService.addLanguage(
             "YAML",
--- a/src/eric7/ThirdParty/Jasy/__init__.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2013 - 2023 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Package containing the JavaScript parser of the jasy web framework.
-"""
--- a/src/eric7/ThirdParty/Jasy/jasy/__init__.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-"""
-**Jasy - Web Tooling Framework**
-
-Jasy is a powerful Python3-based tooling framework.
-It makes it easy to manage heavy web projects.
-Its main goal is to offer an API which could be used by developers to write their custom build/deployment scripts.
-"""
-
-from __future__ import unicode_literals
-
-__version__ = "1.5-beta6"
-__author__ = "Sebastian Werner <info@sebastian-werner.net>"
-
-import os.path
-datadir = os.path.join(os.path.dirname(__file__), "data")
-
-def info():
-    """
-    Prints information about Jasy to the console.
-    """
-
-    import jasy.core.Console as Console
-
-    print("Jasy %s is a powerful web tooling framework" % __version__)
-    print("Visit %s for details." % Console.colorize("https://github.com/sebastian-software/jasy", "underline"))
-    print()
-
-
-class UserError(Exception):
-    """
-    Standard Jasy error class raised whenever something happens which the system understands (somehow excepected)
-    """
-    pass
--- a/src/eric7/ThirdParty/Jasy/jasy/core/Console.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,125 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2013 - 2023 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-"""
-Centralized logging for complete Jasy environment.
-"""
-
-from __future__ import unicode_literals
-
-import logging, sys
-
-__all__ = ["colorize", "header", "error", "warn", "info", "debug", "indent", "outdent"]
-
-
-
-# ---------------------------------------------
-# Colorized Output
-# ---------------------------------------------
-
-__colors = {
-    'bold'      : ['\033[1m',  '\033[22m'],
-    'italic'    : ['\033[3m',  '\033[23m'],
-    'underline' : ['\033[4m',  '\033[24m'],
-    'inverse'   : ['\033[7m',  '\033[27m'],
-
-    'white'     : ['\033[37m', '\033[39m'],
-    'grey'      : ['\033[90m', '\033[39m'],
-    'black'     : ['\033[30m', '\033[39m'],
-
-    'blue'      : ['\033[34m', '\033[39m'],
-    'cyan'      : ['\033[36m', '\033[39m'],
-    'green'     : ['\033[32m', '\033[39m'],
-    'magenta'   : ['\033[35m', '\033[39m'],
-    'red'       : ['\033[31m', '\033[39m'],
-    'yellow'    : ['\033[33m', '\033[39m']
-}
-
-def colorize(text, color="red"):
-    """Uses to colorize the given text for output on Unix terminals"""
-
-    # Not supported on console on Windows native
-    # Note: Cygwin has a different platform value
-    if sys.platform == "win32":
-        return text
-
-    entry = __colors[color]
-    return "%s%s%s" % (entry[0], text, entry[1])
-
-
-
-# ---------------------------------------------
-# Logging API
-# ---------------------------------------------
-
-__level = 0
-
-def __format(text):
-    global __level
-
-    if __level == 0 or text == "":
-        return text
-    elif __level == 1:
-        return "- %s" % text
-    else:
-        return "%s- %s" % ("  " * (__level-1), text)
-
-def indent():
-    """
-    Increments global indenting level. Prepends spaces to the next
-    logging messages until outdent() is called.
-
-    Should be called whenever leaving a structural logging section.
-    """
-
-    global __level
-    __level += 1
-
-def outdent(all=False):
-    """
-    Decrements global indenting level.
-    Should be called whenever leaving a structural logging section.
-    """
-
-    global __level
-
-    if all:
-        __level = 0
-    else:
-        __level -= 1
-
-def error(text, *argv):
-    """Outputs an error message (visible by default)"""
-
-    logging.warn(__format(colorize(colorize(text, "red"), "bold")), *argv)
-
-def warn(text, *argv):
-    """Outputs an warning (visible by default)"""
-
-    logging.warn(__format(colorize(text, "red")), *argv)
-
-def info(text, *argv):
-    """Outputs an info message (visible by default, disable via --quiet option)"""
-
-    logging.info(__format(text), *argv)
-
-def debug(text, *argv):
-    """Output a debug message (hidden by default, enable via --verbose option)"""
-
-    logging.debug(__format(text), *argv)
-
-def header(title):
-    """Outputs the given title with prominent formatting"""
-
-    global __level
-    __level = 0
-
-    logging.info("")
-    logging.info(colorize(colorize(">>> %s" % title.upper(), "blue"), "bold"))
-    logging.info(colorize("-------------------------------------------------------------------------------", "blue"))
--- a/src/eric7/ThirdParty/Jasy/jasy/core/Text.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,87 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-from __future__ import unicode_literals
-
-import re
-
-
-#
-# MARKDOWN TO HTML
-#
-
-try:
-    # import hoedown
-    #
-    # hoedownExt = hoedown.EXT_AUTOLINK | hoedown.EXT_NO_INTRA_EMPHASIS | hoedown.EXT_FENCED_CODE | hoedown.EXT_TABLES | hoedown.EXT_FOOTNOTES | hoedown.EXT_QUOTE | hoedown.EXT_STRIKETHROUGH | hoedown.EXT_UNDERLINE | hoedown.EXT_HIGHLIGHT
-    # hoedownExt = hoedown.EXT_AUTOLINK
-    # hoedownRender = hoedown.HTML_SKIP_STYLE | hoedown.HTML_SMARTYPANTS
-
-    import misaka
-
-    hoedownExt = misaka.EXT_AUTOLINK | misaka.EXT_NO_INTRA_EMPHASIS | misaka.EXT_FENCED_CODE
-    hoedownRender = misaka.HTML_SKIP_STYLE | misaka.HTML_SMARTYPANTS
-    hoedown = misaka
-
-    supportsMarkdown = True
-
-except:
-    supportsMarkdown = False
-
-def markdownToHtml(markdownStr):
-    """
-    Converts Markdown to HTML. Supports GitHub's fenced code blocks,
-    auto linking and typographic features by SmartyPants.
-    """
-
-    return hoedown.html(markdownStr, hoedownExt, hoedownRender)
-
-
-#
-# HIGHLIGHT CODE BLOCKS
-#
-
-try:
-    from pygments import highlight
-    from pygments.formatters import HtmlFormatter
-    from pygments.lexers import get_lexer_by_name
-
-    # By http://misaka.61924.nl/#toc_3
-    codeblock = re.compile(r'<pre(?: lang="([a-z0-9]+)")?><code(?: class="([a-z0-9]+).*?")?>(.*?)</code></pre>', re.IGNORECASE | re.DOTALL)
-
-    supportsHighlighting = True
-
-except ImportError:
-
-    supportsHighlighting = False
-
-def highlightCodeBlocks(html, tabsize=2, defaultlang="javascript"):
-    """
-    Patches 'code' elements in HTML to apply HTML based syntax highlighting. Automatically
-    chooses the matching language detected via a CSS class of the 'code' element.
-    """
-
-    def unescape(html):
-        html = html.replace('&lt;', '<')
-        html = html.replace('&gt;', '>')
-        html = html.replace('&amp;', '&')
-        html = html.replace('&quot;', '"')
-        return html.replace('&#39;', "'")
-
-    def replace(match):
-        language, classname, code = match.groups()
-        if language is None:
-            language = classname if classname else defaultlang
-
-        lexer = get_lexer_by_name(language, tabsize=tabsize)
-        formatter = HtmlFormatter(linenos="table")
-
-        code = unescape(code)
-
-        # for some reason pygments escapes our code once again so we need to reverse it twice
-        return unescape(highlight(code, lexer, formatter))
-
-    return codeblock.sub(replace, html)
--- a/src/eric7/ThirdParty/Jasy/jasy/core/__init__.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,9 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2013 - 2023 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-#
-# This is an eric dummy package to provide some special variants of modules
-# found in the standard jasy package
-#
--- a/src/eric7/ThirdParty/Jasy/jasy/license.md	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,21 +0,0 @@
-Copyright (c) 2011-2012 Zynga Inc. http://zynga.com/
-Copyright (c) 2013-2014 Sebastian Werner http://seabstian-werner.com
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
--- a/src/eric7/ThirdParty/Jasy/jasy/parse/AbstractNode.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,355 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2013-2014 Sebastian Werner
-#
-
-import json, copy
-
-class AbstractNode(list):
-
-    __slots__ = [
-        # core data
-        "line", "type", "tokenizer", "start", "end", "rel", "parent",
-
-        # dynamic added data by other modules
-        "comments", "scope", "values",
-
-        # node type specific
-        "value", "parenthesized", "fileId", "params",
-        "name", "initializer", "condition", "assignOp",
-        "thenPart", "elsePart", "statements",
-        "statement", "variables", "names", "postfix"
-    ]
-
-
-    def __init__(self, tokenizer=None, type=None, args=[]):
-        list.__init__(self)
-
-        self.start = 0
-        self.end = 0
-        self.line = None
-
-        if tokenizer:
-            token = getattr(tokenizer, "token", None)
-            if token:
-                # We may define a custom type but use the same positioning as another token
-                # e.g. transform curlys in block nodes, etc.
-                self.type = type if type else getattr(token, "type", None)
-                self.line = token.line
-
-                # Start & end are file positions for error handling.
-                self.start = token.start
-                self.end = token.end
-
-            else:
-                self.type = type
-                self.line = tokenizer.line
-                self.start = None
-                self.end = None
-
-            self.tokenizer = tokenizer
-
-        elif type:
-            self.type = type
-
-        for arg in args:
-            self.append(arg)
-
-
-    def getFileName(self):
-        """
-        Traverses up the tree to find a node with a fileId and returns it
-        """
-
-        node = self
-        while node:
-            fileId = getattr(node, "fileId", None)
-            if fileId is not None:
-                return fileId
-
-            node = getattr(node, "parent", None)
-
-
-    def getUnrelatedChildren(self):
-        """Collects all unrelated children"""
-
-        collection = []
-        for child in self:
-            if not hasattr(child, "rel"):
-                collection.append(child)
-
-        return collection
-
-
-    def getChildrenLength(self, filter=True):
-        """Number of (per default unrelated) children"""
-
-        count = 0
-        for child in self:
-            if not filter or not hasattr(child, "rel"):
-                count += 1
-        return count
-
-
-    def remove(self, kid):
-        """Removes the given kid"""
-
-        if not kid in self:
-            raise Exception("Given node is no child!")
-
-        if hasattr(kid, "rel"):
-            delattr(self, kid.rel)
-            del kid.rel
-            del kid.parent
-
-        list.remove(self, kid)
-
-
-    def insert(self, index, kid):
-        """Inserts the given kid at the given index"""
-
-        if index is None:
-            return self.append(kid)
-
-        if hasattr(kid, "parent"):
-            kid.parent.remove(kid)
-
-        kid.parent = self
-
-        return list.insert(self, index, kid)
-
-
-    def insertAll(self, index, kids):
-        """Inserts all kids starting with the given index"""
-
-        if index is None:
-            for kid in list(kids):
-                self.append(kid)
-        else:
-            for pos, kid in enumerate(list(kids)):
-                self.insert(index+pos, kid)
-
-
-    def insertAllReplace(self, orig, kids):
-        """Inserts all kids at the same position as the original node (which is removed afterwards)"""
-
-        index = self.index(orig)
-        for pos, kid in enumerate(list(kids)):
-            self.insert(index+pos, kid)
-
-        self.remove(orig)
-
-
-    def append(self, kid, rel=None):
-        """Appends the given kid with an optional relation hint"""
-
-        # kid can be null e.g. [1, , 2].
-        if kid:
-            if hasattr(kid, "parent"):
-                kid.parent.remove(kid)
-
-            # Debug
-            if not isinstance(kid, AbstractNode):
-                raise Exception("Invalid kid: %s" % kid)
-
-            if hasattr(kid, "tokenizer"):
-                if hasattr(kid, "start"):
-                    if not hasattr(self, "start") or self.start == None or kid.start < self.start:
-                        self.start = kid.start
-
-                if hasattr(kid, "end"):
-                    if not hasattr(self, "end") or self.end == None or self.end < kid.end:
-                        self.end = kid.end
-
-            kid.parent = self
-
-            # alias for function
-            if rel != None:
-                setattr(self, rel, kid)
-                setattr(kid, "rel", rel)
-
-        # Block None kids when they should be related
-        if not kid and rel:
-            return
-
-        return list.append(self, kid)
-
-
-    def replace(self, kid, repl):
-        """Replaces the given kid with a replacement kid"""
-
-        if repl in self:
-            self.remove(repl)
-
-        self[self.index(kid)] = repl
-
-        if hasattr(kid, "rel"):
-            repl.rel = kid.rel
-            setattr(self, kid.rel, repl)
-
-            # cleanup old kid
-            delattr(kid, "rel")
-
-        elif hasattr(repl, "rel"):
-            # delete old relation on new child
-            delattr(repl, "rel")
-
-        delattr(kid, "parent")
-        repl.parent = self
-
-        return kid
-
-
-    def toXml(self, format=True, indent=0, tab="  "):
-        """Converts the node to XML"""
-
-        lead = tab * indent if format else ""
-        innerLead = tab * (indent+1) if format else ""
-        lineBreak = "\n" if format else ""
-
-        relatedChildren = []
-        attrsCollection = []
-
-        for name in self.__slots__:
-            # "type" is used as node name - no need to repeat it as an attribute
-            # "parent" is a relation to the parent node - for serialization we ignore these at the moment
-            # "rel" is used internally to keep the relation to the parent - used by nodes which need to keep track of specific children
-            # "start" and "end" are for debugging only
-            if hasattr(self, name) and name not in ("type", "parent", "comments", "selector", "rel", "start", "end") and name[0] != "_":
-                value = getattr(self, name)
-                if isinstance(value, AbstractNode):
-                    if hasattr(value, "rel"):
-                        relatedChildren.append(value)
-
-                elif type(value) in (bool, int, float, str, list, set, dict):
-                    if type(value) == bool:
-                        value = "true" if value else "false"
-                    elif type(value) in (int, float):
-                        value = str(value)
-                    elif type(value) in (list, set, dict):
-                        if type(value) == dict:
-                            value = value.keys()
-                        if len(value) == 0:
-                            continue
-                        try:
-                            value = ",".join(value)
-                        except TypeError as ex:
-                            raise Exception("Invalid attribute list child at: %s: %s" % (name, ex))
-
-                    attrsCollection.append('%s=%s' % (name, json.dumps(value)))
-
-        attrs = (" " + " ".join(attrsCollection)) if len(attrsCollection) > 0 else ""
-
-        comments = getattr(self, "comments", None)
-        scope = getattr(self, "scope", None)
-        selector = getattr(self, "selector", None)
-
-        if len(self) == 0 and len(relatedChildren) == 0 and (not comments or len(comments) == 0) and not scope and not selector:
-            result = "%s<%s%s/>%s" % (lead, self.type, attrs, lineBreak)
-
-        else:
-            result = "%s<%s%s>%s" % (lead, self.type, attrs, lineBreak)
-
-            if comments:
-                for comment in comments:
-                    result += '%s<comment context="%s" variant="%s">%s</comment>%s' % (innerLead, comment.context, comment.variant, comment.text, lineBreak)
-
-            if scope:
-                for statKey in scope:
-                    statValue = scope[statKey]
-                    if statValue != None and len(statValue) > 0:
-                        if type(statValue) is set:
-                            statValue = ",".join(statValue)
-                        elif type(statValue) is dict:
-                            statValue = ",".join(statValue.keys())
-
-                        result += '%s<stat name="%s">%s</stat>%s' % (innerLead, statKey, statValue, lineBreak)
-
-            if selector:
-                for entry in selector:
-                    result += '%s<selector>%s</selector>%s' % (innerLead, entry, lineBreak)
-
-            for child in self:
-                if not child:
-                    result += "%s<none/>%s" % (innerLead, lineBreak)
-                elif not hasattr(child, "rel"):
-                    result += child.toXml(format, indent+1)
-                elif not child in relatedChildren:
-                    raise Exception("Oops, irritated by non related: %s in %s - child says it is related as %s" % (child.type, self.type, child.rel))
-
-            for child in relatedChildren:
-                result += "%s<%s>%s" % (innerLead, child.rel, lineBreak)
-                result += child.toXml(format, indent+2)
-                result += "%s</%s>%s" % (innerLead, child.rel, lineBreak)
-
-            result += "%s</%s>%s" % (lead, self.type, lineBreak)
-
-        return result
-
-
-    def __deepcopy__(self, memo):
-        """Used by deepcopy function to clone AbstractNode instances"""
-
-        CurrentClass = self.__class__
-
-        # Create copy
-        if hasattr(self, "tokenizer"):
-            result = CurrentClass(tokenizer=self.tokenizer)
-        else:
-            result = CurrentClass(type=self.type)
-
-        # Copy children
-        for child in self:
-            if child is None:
-                list.append(result, None)
-            else:
-                # Using simple list appends for better performance
-                childCopy = copy.deepcopy(child, memo)
-                childCopy.parent = result
-                list.append(result, childCopy)
-
-        # Sync attributes
-        # Note: "parent" attribute is handled by append() already
-        for name in self.__slots__:
-            if hasattr(self, name) and not name in ("parent", "tokenizer"):
-                value = getattr(self, name)
-                if value is None:
-                    pass
-                elif type(value) in (bool, int, float, str):
-                    setattr(result, name, value)
-                elif type(value) in (list, set, dict, CurrentClass):
-                    setattr(result, name, copy.deepcopy(value, memo))
-                # Scope can be assigned (will be re-created when needed for the copied node)
-                elif name == "scope":
-                    result.scope = self.scope
-
-        return result
-
-
-    def getSource(self):
-        """Returns the source code of the node"""
-
-        if not self.tokenizer:
-            raise Exception("Could not find source for node '%s'" % self.type)
-
-        if getattr(self, "start", None) is not None:
-            if getattr(self, "end", None) is not None:
-                return self.tokenizer.source[self.start:self.end]
-            return self.tokenizer.source[self.start:]
-
-        if getattr(self, "end", None) is not None:
-            return self.tokenizer.source[:self.end]
-
-        return self.tokenizer.source[:]
-
-
-    # Map Python built-ins
-    __repr__ = toXml
-    __str__ = toXml
-
-
-    def __eq__(self, other):
-        return self is other
-
-    def __bool__(self):
-        return True
--- a/src/eric7/ThirdParty/Jasy/jasy/script/api/Comment.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,677 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-from __future__ import unicode_literals
-
-import re
-
-import jasy.core.Text as Text
-import jasy.core.Console as Console
-
-from jasy import UserError
-from jasy.script.util import *
-
-
-# Used to measure the doc indent size (with leading stars in front of content)
-docIndentReg = re.compile(r"^(\s*\*\s*)(\S*)")
-
-# Used to split type lists as supported by throw, return and params
-listSplit = re.compile("\s*\|\s*")
-
-# Used to remove markup sequences after doc processing of comment text
-stripMarkup = re.compile(r"<.*?>")
-
-
-
-# Matches return blocks in comments
-returnMatcher = re.compile(r"^\s*\{([a-zA-Z0-9_ \.\|\[\]]+)\}")
-
-# Matches type definitions in comments
-typeMatcher = re.compile(r"^\s*\{=([a-zA-Z0-9_ \.]+)\}")
-
-# Matches tags
-tagMatcher = re.compile(r"#([a-zA-Z][a-zA-Z0-9]+)(\((\S+)\))?(\s|$)")
-
-# Matches param declarations in own dialect
-paramMatcher = re.compile(r"@([a-zA-Z0-9_][a-zA-Z0-9_\.]*[a-zA-Z0-9_]|[a-zA-Z0-9_]+)(\s*\{([a-zA-Z0-9_ \.\|\[\]]+?)(\s*\.{3}\s*)?((\s*\?\s*(\S+))|(\s*\?\s*))?\})?")
-
-# Matches links in own dialect
-linkMatcher = re.compile(r"(\{((static|member|property|event)\:)?([a-zA-Z0-9_\.]+)?(\#([a-zA-Z0-9_]+))?\})")
-
-# matches backticks and has a built-in failsafe for backticks which do not terminate on the same line
-tickMatcher = re.compile(r"(`[^\n`]*?`)")
-
-
-class CommentException(Exception):
-    """
-    Thrown when errors during comment processing are detected.
-    """
-
-    def __init__(self, message, lineNo=0):
-        Exception.__init__(self, "Comment error: %s (line: %s)" % (message, lineNo+1))
-
-
-
-
-class Comment():
-    """
-    Comment class is attached to parsed nodes and used to store all comment related information.
-
-    The class supports a new Markdown and TomDoc inspired dialect to make developers life easier and work less repeative.
-    """
-
-    # Relation to code
-    context = None
-
-    # Dictionary of tags
-    tags = None
-
-    # Dictionary of params
-    params = None
-
-    # List of return types
-    returns = None
-
-    # Static type
-    type = None
-
-    # Collected text of the comment (without the extracted doc relevant data)
-    text = None
-
-    # Text with extracted / parsed data
-    __processedText = None
-
-    # Text of the comment converted to HTML including highlighting (only for doc comment)
-    __highlightedText = None
-
-    # Text / Code Blocks in the comment
-    __blocks = None
-
-
-    def __init__(self, text, context=None, lineNo=0, indent="", fileId=None):
-
-        # Store context (relation to code)
-        self.context = context
-
-        # Store fileId
-        self.fileId = fileId
-
-        # Figure out the type of the comment based on the starting characters
-
-        # Inline comments
-        if text.startswith("//"):
-            # "// hello" => "   hello"
-            text = "  " + text[2:]
-            self.variant = "single"
-
-        # Doc comments
-        elif text.startswith("/**"):
-            # "/** hello */" => "    hello "
-            text = "   " + text[3:-2]
-            self.variant = "doc"
-
-        # Protected comments which should not be removed (e.g these are used for license blocks)
-        elif text.startswith("/*!"):
-            # "/*! hello */" => "    hello "
-            text = "   " + text[3:-2]
-            self.variant = "protected"
-
-        # A normal multiline comment
-        elif text.startswith("/*"):
-            # "/* hello */" => "   hello "
-            text = "  " + text[2:-2]
-            self.variant = "multi"
-
-        else:
-            raise CommentException("Invalid comment text: %s" % text, lineNo)
-
-        # Multi line comments need to have their indentation removed
-        if "\n" in text:
-            text = self.__outdent(text, indent, lineNo)
-
-        # For single line comments strip the surrounding whitespace
-        else:
-            # " hello " => "hello"
-            text = text.strip()
-
-        # The text of the comment before any processing took place
-        self.text = text
-
-
-        # Perform annotation parsing, markdown conversion and code highlighting on doc blocks
-        if self.variant == "doc":
-
-            # Separate text and code blocks
-            self.__blocks = self.__splitBlocks(text)
-
-            # Re-combine everything and apply processing and formatting
-            plainText = '' # text without annotations but with markdown
-            for b in self.__blocks:
-
-                if b["type"] == "comment":
-
-                    processed = self.__processDoc(b["text"], lineNo)
-                    b["processed"] = processed
-
-                    if "<" in processed:
-                        plainText += stripMarkup.sub("", processed)
-
-                    else:
-                        plainText += processed
-
-                else:
-                    plainText += "\n\n" + b["text"] + "\n\n"
-
-            # The without any annotations
-            self.text = plainText.strip()
-
-
-    def __splitBlocks(self, text):
-        """
-        Splits up text and code blocks in comments.
-
-        This will try to use hoedown for Markdown parsing if available and will
-        fallback to a simpler implementation in order to allow processing of
-        doc parameters and links without hoedown being installed.
-        """
-
-        if not Text.supportsMarkdown:
-            return self.__splitSimple(text)
-
-        marked = Text.markdownToHtml(text)
-
-        def unescape(html):
-            html = html.replace('&lt;', '<')
-            html = html.replace('&gt;', '>')
-            html = html.replace('&amp;', '&')
-            html = html.replace('&quot;', '"')
-            return html.replace('&#39;', "'")
-
-        parts = []
-
-        lineNo = 0
-        lines = text.split("\n")
-        markedLines = marked.split("\n")
-
-        i = 0
-        while i < len(markedLines):
-
-            l = markedLines[i]
-
-            # the original text of the line
-            parsed = unescape(stripMarkup.sub("", l))
-
-            # start of a code block, grab all text before it and move it into a block
-            if l.startswith('<pre><code>'):
-
-                # everything since the last code block and before this one must be text
-                comment = []
-                for s in range(lineNo, len(lines)):
-
-                    source = lines[s]
-                    if source.strip() == parsed.strip():
-                        lineNo = s
-                        break
-
-                    comment.append(source)
-
-                parts.append({
-                    "type": "comment",
-                    "text": "\n".join(comment)
-                })
-
-                # Find the end of the code block
-                e = i
-                while i < len(markedLines):
-                    l = markedLines[i]
-                    i += 1
-
-                    if l.startswith('</code></pre>'):
-                        break
-
-                lineCount = (i - e) - 1
-
-                # add the code block
-                parts.append({
-                    "type": "code",
-                    "text": "\n".join(lines[lineNo:lineNo + lineCount])
-                })
-
-                lineNo += lineCount
-
-            else:
-                i += 1
-
-        # append the rest of the comment as text
-        parts.append({
-            "type": "comment",
-            "text": "\n".join(lines[lineNo:])
-        })
-
-        return parts
-
-
-    def __splitSimple(self, text):
-        """Splits comment text and code blocks by manually parsing a subset of markdown"""
-
-        inCode = False
-        oldIndent = 0
-        parts = []
-        wasEmpty = False
-        wasList = False
-
-        lineNo = 0
-        lines = text.split("\n")
-
-        for s, l in enumerate(lines):
-
-            # ignore empty lines
-            if not l.strip() == "":
-
-                # get indentation value and change
-                indent = len(l) - len(l.lstrip())
-                change = indent - oldIndent
-
-                # detect code blocks
-                if change >= 4 and wasEmpty:
-                    if not wasList:
-                        oldIndent = indent
-                        inCode = True
-
-                        parts.append({
-                            "type": "comment",
-                            "text": "\n".join(lines[lineNo:s])
-                        })
-
-                        lineNo = s
-
-                # detect outdents
-                elif change < 0:
-                    inCode = False
-
-                    parts.append({
-                        "type": "code",
-                        "text": "\n".join(lines[lineNo:s - 1])
-                    })
-
-                    lineNo = s
-
-                # only keep track of old previous indentation outside of comments
-                if not inCode:
-                    oldIndent = indent
-
-                # remember whether this marked a list or not
-                wasList = l.strip().startswith('-') or l.strip().startswith('*')
-                wasEmpty = False
-
-            else:
-                wasEmpty = True
-
-        parts.append({
-            "type": "code" if inCode else "comment",
-            "text": "\n".join(lines[lineNo:])
-        })
-
-        return parts
-
-
-    def getHtml(self, highlight=True):
-        """
-        Returns the comment text converted to HTML
-
-        :param highlight: Whether to highlight the code
-        :type highlight: bool
-        """
-
-        if not Text.supportsMarkdown:
-            raise UserError("Markdown is not supported by the system. Documentation comments could converted to HTML.")
-
-        if highlight:
-
-            if self.__highlightedText is None:
-
-                highlightedText = ""
-
-                for block in self.__blocks:
-
-                    if block["type"] == "comment":
-                        highlightedText += Text.highlightCodeBlocks(Text.markdownToHtml(block["processed"]))
-                    else:
-                        highlightedText += "\n%s" % Text.highlightCodeBlocks(Text.markdownToHtml(block["text"]))
-
-                self.__highlightedText = highlightedText
-
-            return self.__highlightedText
-
-        else:
-
-            if self.__processedText is None:
-
-                processedText = ""
-
-                for block in self.__blocks:
-
-                    if block["type"] == "comment":
-                        processedText += Text.markdownToHtml(block["processed"])
-                    else:
-                        processedText += "\n%s\n\n" % block["text"]
-
-                self.__processedText = processedText.strip()
-
-            return self.__processedText
-
-
-    def hasContent(self):
-        return self.variant == "doc" and len(self.text)
-
-
-    def getTags(self):
-        return self.tags
-
-
-    def hasTag(self, name):
-        if not self.tags:
-            return False
-
-        return name in self.tags
-
-
-    def __outdent(self, text, indent, startLineNo):
-        """
-        Outdent multi line comment text and filtering empty lines
-        """
-
-        lines = []
-
-        # First, split up the comments lines and remove the leading indentation
-        for lineNo, line in enumerate((indent+text).split("\n")):
-
-            if line.startswith(indent):
-                lines.append(line[len(indent):].rstrip())
-
-            elif line.strip() == "":
-                lines.append("")
-
-            else:
-                # Only warn for doc comments, otherwise it might just be code commented out
-                # which is sometimes formatted pretty crazy when commented out
-                if self.variant == "doc":
-                    Console.warn("Could not outdent doc comment at line %s in %s", startLineNo+lineNo, self.fileId)
-
-                return text
-
-        # Find first line with real content, then grab the one after it to get the
-        # characters which need
-        outdentString = ""
-        for lineNo, line in enumerate(lines):
-
-            if line != "" and line.strip() != "":
-                matchedDocIndent = docIndentReg.match(line)
-
-                if not matchedDocIndent:
-                    # As soon as we find a non doc indent like line we stop
-                    break
-
-                elif matchedDocIndent.group(2) != "":
-                    # otherwise we look for content behind the indent to get the
-                    # correct real indent (with spaces)
-                    outdentString = matchedDocIndent.group(1)
-                    break
-
-            lineNo += 1
-
-        # Process outdenting to all lines (remove the outdentString from the start of the lines)
-        if outdentString != "":
-
-            lineNo = 0
-            outdentStringLen = len(outdentString)
-
-            for lineNo, line in enumerate(lines):
-                if len(line) <= outdentStringLen:
-                    lines[lineNo] = ""
-
-                else:
-                    if not line.startswith(outdentString):
-
-                        # Only warn for doc comments, otherwise it might just be code commented out
-                        # which is sometimes formatted pretty crazy when commented out
-                        if self.variant == "doc":
-                            Console.warn("Invalid indentation in doc comment at line %s in %s", startLineNo+lineNo, self.fileId)
-
-                    else:
-                        lines[lineNo] = line[outdentStringLen:]
-
-        # Merge final lines and remove leading and trailing new lines
-        return "\n".join(lines).strip("\n")
-
-
-    def __processDoc(self, text, startLineNo):
-
-        text = self.__extractStaticType(text)
-        text = self.__extractReturns(text)
-        text = self.__extractTags(text)
-
-        # Collapse new empty lines at start/end
-        text = text.strip("\n\t ")
-
-        parsed = ''
-
-        # Now parse only the text outside of backticks
-        last = 0
-        def split(match):
-
-            # Grab the text before the back tick and process any parameters in it
-            nonlocal parsed
-            nonlocal last
-
-            start, end = match.span()
-            before = text[last:start]
-            parsed += self.__processParams(before) + match.group(1)
-            last = end
-
-        tickMatcher.sub(split, text)
-
-        # add the rest of the text
-        parsed += self.__processParams(text[last:])
-
-        text = self.__processLinks(parsed)
-
-        return text
-
-
-    def __splitTypeList(self, decl):
-
-        if decl is None:
-            return decl
-
-        splitted = listSplit.split(decl.strip())
-
-        result = []
-        for entry in splitted:
-
-            # Figure out if it is marked as array
-            isArray = False
-            if entry.endswith("[]"):
-                isArray = True
-                entry = entry[:-2]
-
-            store = {
-                "name" : entry
-            }
-
-            if isArray:
-                store["array"] = True
-
-            if entry in builtinTypes:
-                store["builtin"] = True
-
-            if entry in pseudoTypes:
-                store["pseudo"] = True
-
-            result.append(store)
-
-        return result
-
-
-
-    def __extractReturns(self, text):
-        """
-        Extracts leading return defintion (when type is function)
-        """
-
-        def collectReturn(match):
-            self.returns = self.__splitTypeList(match.group(1))
-            return ""
-
-        return returnMatcher.sub(collectReturn, text)
-
-
-
-    def __extractStaticType(self, text):
-        """
-        Extracts leading type defintion (when value is a static type)
-        """
-
-        def collectType(match):
-            self.type = match.group(1).strip()
-            return ""
-
-        return typeMatcher.sub(collectType, text)
-
-
-
-    def __extractTags(self, text):
-        """
-        Extract all tags inside the give doc comment. These are replaced from
-        the text and collected inside the "tags" key as a dict.
-        """
-
-        def collectTags(match):
-             if not self.tags:
-                 self.tags = {}
-
-             name = match.group(1)
-             param = match.group(3)
-
-             if name in self.tags:
-                 self.tags[name].add(param)
-             elif param:
-                 self.tags[name] = set([param])
-             else:
-                 self.tags[name] = True
-
-             return ""
-
-        return tagMatcher.sub(collectTags, text)
-
-
-    def __processParams(self, text):
-
-        def collectParams(match):
-
-            paramName = match.group(1)
-            paramTypes = match.group(3)
-            paramDynamic = match.group(4) is not None
-            paramOptional = match.group(5) is not None
-            paramDefault = match.group(7)
-
-            if paramTypes:
-                paramTypes = self.__splitTypeList(paramTypes)
-
-            if self.params is None:
-                self.params = {}
-
-            params = self.params
-            fullName = match.group(1).strip()
-            names = fullName.split('.')
-
-            for i, mapName in enumerate(names):
-
-                # Ensure we have the map object in the params
-                if not mapName in params:
-                    params[mapName] = {}
-
-                # Add new entries and overwrite if a type is defined in this entry
-                if not mapName in params or paramTypes is not None:
-
-                    # Make sure to not overwrite something like @options {Object} with the type of @options.x {Number}
-                    if i == len(names) - 1:
-
-                        paramEntry = params[mapName] = {}
-
-                        if paramTypes is not None:
-                            paramEntry["type"] = paramTypes
-
-                        if paramDynamic:
-                            paramEntry["dynamic"] = paramDynamic
-
-                        if paramOptional:
-                            paramEntry["optional"] = paramOptional
-
-                        if paramDefault is not None:
-                            paramEntry["default"] = paramDefault
-
-                    else:
-                        paramEntry = params[mapName]
-
-
-                else:
-                    paramEntry = params[mapName]
-
-                # create fields for new map level
-                if i + 1 < len(names):
-                    if not "fields" in paramEntry:
-                        paramEntry["fields"] = {}
-
-                    params = paramEntry["fields"]
-
-            return '<code class="param">%s</code>' % fullName
-
-        return paramMatcher.sub(collectParams, text)
-
-
-    def __processLinks(self, text):
-
-        def formatTypes(match):
-
-            parsedSection = match.group(3)
-            parsedFile = match.group(4)
-            parsedItem = match.group(6)
-
-            # Do not match {}
-            if parsedSection is None and parsedFile is None and parsedItem is None:
-                return match.group(1)
-
-            # Minor corrections
-            if parsedSection and not parsedItem:
-                parsedSection = ""
-
-            attr = ""
-            link = ""
-            label = ""
-
-            if parsedSection:
-                link += '%s:' % parsedSection
-
-            if parsedFile:
-                link += parsedFile
-                label += parsedFile
-
-            if parsedItem:
-                link += "~%s" % parsedItem
-                if label == "":
-                    label = parsedItem
-                else:
-                    label += "#%s" % parsedItem
-
-            # add link to attributes list
-            attr += ' href="#%s"' % link
-
-            # build final HTML
-            return '<a%s><code>%s</code></a>' % (attr, label)
-
-        return linkMatcher.sub(formatTypes, text)
-
--- a/src/eric7/ThirdParty/Jasy/jasy/script/api/Text.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-from __future__ import unicode_literals
-
-import re
-import jasy.core.Console as Console
-
-
-# Used to filter first paragraph from HTML
-paragraphExtract = re.compile(r"^(.*?)(\. |\? |\! |$)")
-newlineMatcher = re.compile(r"\n")
-
-# Used to remove markup sequences after doc processing of comment text
-stripMarkup = re.compile(r"<.*?>")
-
-def extractSummary(text):
-    try:
-        text = stripMarkup.sub("", newlineMatcher.sub(" ", text))
-        matched = paragraphExtract.match(text)
-    except TypeError:
-        matched = None
-
-    if matched:
-        summary = matched.group(1)
-        if summary is not None:
-            if not summary.endswith((".", "!", "?")):
-                summary = summary.strip() + "."
-            return summary
-
-    else:
-        Console.warn("Unable to extract summary for: %s", text)
-
-    return None
-
--- a/src/eric7/ThirdParty/Jasy/jasy/script/output/Compressor.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,564 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-from __future__ import unicode_literals
-
-import re, sys, json
-
-from jasy.script.tokenize.Lang import keywords
-from jasy.script.parse.Lang import expressions, futureReserved
-
-high_unicode = re.compile(r"\\u[2-9A-Fa-f][0-9A-Fa-f]{3}")
-ascii_encoder = json.JSONEncoder(ensure_ascii=True)
-unicode_encoder = json.JSONEncoder(ensure_ascii=False)
-
-#
-# Class
-#
-
-class Compressor:
-    __semicolonSymbol = ";"
-    __commaSymbol = ","
-
-
-    def __init__(self, format=None):
-        if format:
-            if format.has("semicolon"):
-                self.__semicolonSymbol = ";\n"
-
-            if format.has("comma"):
-                self.__commaSymbol = ",\n"
-
-        self.__forcedSemicolon = False
-
-
-
-    #
-    # Main
-    #
-
-    def compress(self, node):
-        type = node.type
-        result = None
-
-        if type in self.__simple:
-            result = type
-        elif type in self.__prefixes:
-            if getattr(node, "postfix", False):
-                result = self.compress(node[0]) + self.__prefixes[node.type]
-            else:
-                result = self.__prefixes[node.type] + self.compress(node[0])
-
-        elif type in self.__dividers:
-            first = self.compress(node[0])
-            second = self.compress(node[1])
-            divider = self.__dividers[node.type]
-
-            # Fast path
-            if node.type not in ("plus", "minus"):
-                result = "%s%s%s" % (first, divider, second)
-
-            # Special code for dealing with situations like x + ++y and y-- - x
-            else:
-                result = first
-                if first.endswith(divider):
-                    result += " "
-
-                result += divider
-
-                if second.startswith(divider):
-                    result += " "
-
-                result += second
-
-        else:
-            try:
-                result = getattr(self, "type_%s" % type)(node)
-            except AttributeError:
-                raise Exception("Script compressor does not support type '%s' from line %s in file %s" % (type, node.line, node.getFileName()))
-
-        if getattr(node, "parenthesized", None):
-            return "(%s)" % result
-        else:
-            return result
-
-
-
-    #
-    # Helpers
-    #
-
-    def __statements(self, node):
-        result = []
-        for child in node:
-            result.append(self.compress(child))
-
-        return "".join(result)
-
-    def __handleForcedSemicolon(self, node):
-        if node.type == "semicolon" and not hasattr(node, "expression"):
-            self.__forcedSemicolon = True
-
-    def __addSemicolon(self, result):
-        if not result.endswith(self.__semicolonSymbol):
-            if self.__forcedSemicolon:
-                self.__forcedSemicolon = False
-
-            return result + self.__semicolonSymbol
-
-        else:
-            return result
-
-    def __removeSemicolon(self, result):
-        if self.__forcedSemicolon:
-            self.__forcedSemicolon = False
-            return result
-
-        if result.endswith(self.__semicolonSymbol):
-            return result[:-len(self.__semicolonSymbol)]
-        else:
-            return result
-
-
-    #
-    # Data
-    #
-
-    __simple_property = re.compile(r"^[a-zA-Z_$][a-zA-Z0-9_$]*$")
-    __number_property = re.compile(r"^[0-9]+$")
-
-    __simple = ["true", "false", "null", "this", "debugger"]
-
-    __dividers = {
-        "plus"        : '+',
-        "minus"       : '-',
-        "mul"         : '*',
-        "div"         : '/',
-        "mod"         : '%',
-        "dot"         : '.',
-        "or"          : "||",
-        "and"         : "&&",
-        "strict_eq"   : '===',
-        "eq"          : '==',
-        "strict_ne"   : '!==',
-        "ne"          : '!=',
-        "lsh"         : '<<',
-        "le"          : '<=',
-        "lt"          : '<',
-        "ursh"        : '>>>',
-        "rsh"         : '>>',
-        "ge"          : '>=',
-        "gt"          : '>',
-        "bitwise_or"  : '|',
-        "bitwise_xor" : '^',
-        "bitwise_and" : '&'
-    }
-
-    __prefixes = {
-        "increment"   : "++",
-        "decrement"   : "--",
-        "bitwise_not" : '~',
-        "not"         : "!",
-        "unary_plus"  : "+",
-        "unary_minus" : "-",
-        "delete"      : "delete ",
-        "new"         : "new ",
-        "typeof"      : "typeof ",
-        "void"        : "void "
-    }
-
-
-
-    #
-    # Script Scope
-    #
-
-    def type_script(self, node):
-        return self.__statements(node)
-
-
-
-    #
-    # Expressions
-    #
-
-    def type_comma(self, node):
-        return self.__commaSymbol.join(map(self.compress, node))
-
-    def type_object_init(self, node):
-        return "{%s}" % self.__commaSymbol.join(map(self.compress, node))
-
-    def type_property_init(self, node):
-        key = self.compress(node[0])
-        value = self.compress(node[1])
-
-        if type(key) in (int, float):
-            pass
-
-        elif self.__number_property.match(key):
-            pass
-
-        # Protect keywords and special characters
-        elif key in keywords or key in futureReserved or not self.__simple_property.match(key):
-            key = self.type_string(node[0])
-
-        return "%s:%s" % (key, value)
-
-    def type_array_init(self, node):
-        def helper(child):
-            return self.compress(child) if child != None else ""
-
-        return "[%s]" % ",".join(map(helper, node))
-
-    def type_array_comp(self, node):
-        return "[%s %s]" % (self.compress(node.expression), self.compress(node.tail))
-
-    def type_string(self, node):
-        # Omit writing real high unicode character which are not supported well by browsers
-        ascii = ascii_encoder.encode(node.value)
-
-        if high_unicode.search(ascii):
-            return ascii
-        else:
-            return unicode_encoder.encode(node.value)
-
-    def type_number(self, node):
-        value = node.value
-
-        # Special handling for protected float/exponential
-        if type(value) == str:
-            # Convert zero-prefix
-            if value.startswith("0.") and len(value) > 2:
-                value = value[1:]
-
-            # Convert zero postfix
-            elif value.endswith(".0"):
-                value = value[:-2]
-
-        elif int(value) == value and node.parent.type != "dot":
-            value = int(value)
-
-        return "%s" % value
-
-    def type_regexp(self, node):
-        return node.value
-
-    def type_identifier(self, node):
-        return node.value
-
-    def type_list(self, node):
-        return ",".join(map(self.compress, node))
-
-    def type_index(self, node):
-        return "%s[%s]" % (self.compress(node[0]), self.compress(node[1]))
-
-    def type_declaration(self, node):
-        names = getattr(node, "names", None)
-        if names:
-            result = self.compress(names)
-        else:
-            result = node.name
-
-        initializer = getattr(node, "initializer", None)
-        if initializer:
-            result += "=%s" % self.compress(node.initializer)
-
-        return result
-
-    def type_assign(self, node):
-        assignOp = getattr(node, "assignOp", None)
-        operator = "=" if not assignOp else self.__dividers[assignOp] + "="
-
-        return self.compress(node[0]) + operator + self.compress(node[1])
-
-    def type_call(self, node):
-        return "%s(%s)" % (self.compress(node[0]), self.compress(node[1]))
-
-    def type_new_with_args(self, node):
-        result = "new %s" % self.compress(node[0])
-
-        # Compress new Object(); => new Object;
-        if len(node[1]) > 0:
-            result += "(%s)" % self.compress(node[1])
-        else:
-            parent = getattr(node, "parent", None)
-            if parent and parent.type == "dot":
-                result += "()"
-
-        return result
-
-    def type_exception(self, node):
-        return node.value
-
-    def type_generator(self, node):
-        """ Generator Expression """
-        result = self.compress(getattr(node, "expression"))
-        tail = getattr(node, "tail", None)
-        if tail:
-            result += " %s" % self.compress(tail)
-
-        return result
-
-    def type_comp_tail(self, node):
-        """  Comprehensions Tails """
-        result = self.compress(getattr(node, "for"))
-        guard = getattr(node, "guard", None)
-        if guard:
-            result += "if(%s)" % self.compress(guard)
-
-        return result
-
-    def type_in(self, node):
-        first = self.compress(node[0])
-        second = self.compress(node[1])
-
-        if first.endswith("'") or first.endswith('"'):
-            pattern = "%sin %s"
-        else:
-            pattern = "%s in %s"
-
-        return pattern % (first, second)
-
-    def type_instanceof(self, node):
-        first = self.compress(node[0])
-        second = self.compress(node[1])
-
-        return "%s instanceof %s" % (first, second)
-
-
-
-    #
-    # Statements :: Core
-    #
-
-    def type_block(self, node):
-        return "{%s}" % self.__removeSemicolon(self.__statements(node))
-
-    def type_let_block(self, node):
-        begin = "let(%s)" % ",".join(map(self.compress, node.variables))
-        if hasattr(node, "block"):
-            end = self.compress(node.block)
-        elif hasattr(node, "expression"):
-            end = self.compress(node.expression)
-
-        return begin + end
-
-    def type_const(self, node):
-        return self.__addSemicolon("const %s" % self.type_list(node))
-
-    def type_var(self, node):
-        return self.__addSemicolon("var %s" % self.type_list(node))
-
-    def type_let(self, node):
-        return self.__addSemicolon("let %s" % self.type_list(node))
-
-    def type_semicolon(self, node):
-        expression = getattr(node, "expression", None)
-        return self.__addSemicolon(self.compress(expression) if expression else "")
-
-    def type_label(self, node):
-        return self.__addSemicolon("%s:%s" % (node.label, self.compress(node.statement)))
-
-    def type_break(self, node):
-        return self.__addSemicolon("break" if not hasattr(node, "label") else "break %s" % node.label)
-
-    def type_continue(self, node):
-        return self.__addSemicolon("continue" if not hasattr(node, "label") else "continue %s" % node.label)
-
-
-    #
-    # Statements :: Functions
-    #
-
-    def type_function(self, node):
-        if node.type == "setter":
-            result = "set"
-        elif node.type == "getter":
-            result = "get"
-        else:
-            result = "function"
-
-        name = getattr(node, "name", None)
-        if name:
-            result += " %s" % name
-
-        params = getattr(node, "params", None)
-        result += "(%s)" % self.compress(params) if params else "()"
-
-        # keep expression closure format (may be micro-optimized for other code, too)
-        if getattr(node, "expressionClosure", False):
-            result += self.compress(node.body)
-        else:
-            result += "{%s}" % self.__removeSemicolon(self.compress(node.body))
-
-        return result
-
-    def type_getter(self, node):
-        return self.type_function(node)
-
-    def type_setter(self, node):
-        return self.type_function(node)
-
-    def type_return(self, node):
-        result = "return"
-        if hasattr(node, "value"):
-            valueCode = self.compress(node.value)
-
-            # Micro optimization: Don't need a space when a block/map/array/group/strings are returned
-            if not valueCode.startswith(("(","[","{","'",'"',"!","-","/")):
-                result += " "
-
-            result += valueCode
-
-        return self.__addSemicolon(result)
-
-
-
-    #
-    # Statements :: Exception Handling
-    #
-
-    def type_throw(self, node):
-        return self.__addSemicolon("throw %s" % self.compress(node.exception))
-
-    def type_try(self, node):
-        result = "try%s" % self.compress(node.tryBlock)
-
-        for catch in node:
-            if catch.type == "catch":
-                if hasattr(catch, "guard"):
-                    result += "catch(%s if %s)%s" % (self.compress(catch.exception), self.compress(catch.guard), self.compress(catch.block))
-                else:
-                    result += "catch(%s)%s" % (self.compress(catch.exception), self.compress(catch.block))
-
-        if hasattr(node, "finallyBlock"):
-            result += "finally%s" % self.compress(node.finallyBlock)
-
-        return result
-
-
-
-    #
-    # Statements :: Loops
-    #
-
-    def type_while(self, node):
-        result = "while(%s)%s" % (self.compress(node.condition), self.compress(node.body))
-        self.__handleForcedSemicolon(node.body)
-        return result
-
-
-    def type_do(self, node):
-        # block unwrapping don't help to reduce size on this loop type
-        # but if it happens (don't like to modify a global function to fix a local issue), we
-        # need to fix the body and re-add braces around the statement
-        body = self.compress(node.body)
-        if not body.startswith("{"):
-            body = "{%s}" % body
-
-        return self.__addSemicolon("do%swhile(%s)" % (body, self.compress(node.condition)))
-
-
-    def type_for_in(self, node):
-        # Optional variable declarations
-        varDecl = getattr(node, "varDecl", None)
-
-        # Body is optional - at least in comprehensions tails
-        body = getattr(node, "body", None)
-        if body:
-            body = self.compress(body)
-        else:
-            body = ""
-
-        result = "for"
-        if node.isEach:
-            result += " each"
-
-        result += "(%s in %s)%s" % (self.__removeSemicolon(self.compress(node.iterator)), self.compress(node.object), body)
-
-        if body:
-            self.__handleForcedSemicolon(node.body)
-
-        return result
-
-
-    def type_for(self, node):
-        setup = getattr(node, "setup", None)
-        condition = getattr(node, "condition", None)
-        update = getattr(node, "update", None)
-
-        result = "for("
-        result += self.__addSemicolon(self.compress(setup) if setup else "")
-        result += self.__addSemicolon(self.compress(condition) if condition else "")
-        result += self.compress(update) if update else ""
-        result += ")%s" % self.compress(node.body)
-
-        self.__handleForcedSemicolon(node.body)
-        return result
-
-
-
-    #
-    # Statements :: Conditionals
-    #
-
-    def type_hook(self, node):
-        """aka ternary operator"""
-        condition = node.condition
-        thenPart = node.thenPart
-        elsePart = node.elsePart
-
-        if condition.type == "not":
-            [thenPart,elsePart] = [elsePart,thenPart]
-            condition = condition[0]
-
-        return "%s?%s:%s" % (self.compress(condition), self.compress(thenPart), self.compress(elsePart))
-
-
-    def type_if(self, node):
-        result = "if(%s)%s" % (self.compress(node.condition), self.compress(node.thenPart))
-
-        elsePart = getattr(node, "elsePart", None)
-        if elsePart:
-            result += "else"
-
-            elseCode = self.compress(elsePart)
-
-            # Micro optimization: Don't need a space when the child is a block
-            # At this time the brace could not be part of a map declaration (would be a syntax error)
-            if not elseCode.startswith(("{", "(", ";")):
-                result += " "
-
-            result += elseCode
-
-            self.__handleForcedSemicolon(elsePart)
-
-        return result
-
-
-    def type_switch(self, node):
-        result = "switch(%s){" % self.compress(node.discriminant)
-        for case in node:
-            if case.type == "case":
-                labelCode = self.compress(case.label)
-                if labelCode.startswith('"'):
-                    result += "case%s:" % labelCode
-                else:
-                    result += "case %s:" % labelCode
-            elif case.type == "default":
-                result += "default:"
-            else:
-                continue
-
-            for statement in case.statements:
-                temp = self.compress(statement)
-                if len(temp) > 0:
-                    result += self.__addSemicolon(temp)
-
-        return "%s}" % self.__removeSemicolon(result)
-
-
-
--- a/src/eric7/ThirdParty/Jasy/jasy/script/parse/Lang.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,211 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-from __future__ import unicode_literals
-
-futureReserved = set([
-    "abstract",
-    "boolean",
-    "byte",
-    "char",
-    "class",
-    "const",
-    "debugger",
-    "double",
-    "enum",
-    "export",
-    "extends",
-    "final",
-    "float",
-    "goto",
-    "implements",
-    "import",
-    "int",
-    "interface",
-    "long",
-    "native",
-    "package",
-    "private",
-    "protected",
-    "public",
-    "short",
-    "static",
-    "super",
-    "synchronized",
-    "throws",
-    "transient",
-    "volatile"
-])
-
-
-statements = [
-    # With semicolon at end
-    "semicolon",
-    "return",
-    "throw",
-    "label",
-    "break",
-    "continue",
-    "var",
-    "const",
-    "debugger",
-
-    # Only semicolon when no-block braces are created
-    "block",
-    "let_block",
-    "while",
-    "do",
-    "for",
-    "for_in",
-    "if",
-    "switch",
-    "hook",
-    "with",
-
-    # no semicolons
-    # function, setter and getter as statement_form or declared_form
-    "function",
-    "setter",
-    "getter",
-    "try",
-    "label"
-]
-
-
-# All allowed expression types of JavaScript 1.7
-# They may be separated by "comma" which is quite of special
-# and not allowed everywhere e.g. in conditional statements
-expressions = [
-    # Primary Expression - Part 1 (expressed form)
-    "function",
-
-    # Primary Expression - Part 2
-    "object_init",
-    "array_init",
-    "array_comp",
-
-    # Primary Expression - Part 3
-    "let",
-
-    # Primary Expression - Part 4
-    "null",
-    "this",
-    "true",
-    "false",
-    "identifier",
-    "number",
-    "string",
-    "regexp",
-
-    # Member Expression - Part 1
-    "new_with_args",
-    "new",
-
-    # Member Expression - Part 2
-    "dot",
-    "call",
-    "index",
-
-    # Unary Expression
-    "unary_plus",
-    "unary_minus",
-    "delete",
-    "void",
-    "typeof",
-    "not",
-    "bitwise_not",
-    "increment",
-    "decrement",
-
-    # Multiply Expression
-    "mul",
-    "div",
-    "mod",
-
-    # Add Expression
-    "plus",
-    "minus",
-
-    # Shift Expression
-    "lsh",
-    "rsh",
-    "ursh",
-
-    # Relational Expression
-    "lt",
-    "le",
-    "ge",
-    "gt",
-    "in",
-    "instanceof",
-
-    # Equality Expression
-    "eq",
-    "ne",
-    "strict_eq",
-    "strict_ne",
-
-    # BitwiseAnd Expression
-    "bitwise_and",
-
-    # BitwiseXor Expression
-    "bitwise_xor",
-
-    # BitwiseOr Expression
-    "bitwise_or",
-
-    # And Expression
-    "and",
-
-    # Or Expression
-    "or",
-
-    # Conditional Expression
-    "hook",
-
-    # Assign Expression
-    "assign",
-
-    # Expression
-    "comma"
-]
-
-
-
-
-def __createOrder():
-    expressions = [
-        ["comma"],
-        ["assign"],
-        ["hook"],
-        ["or"],
-        ["and"],
-        ["bitwise_or"],
-        ["bitwise_xor",],
-        ["bitwise_and"],
-        ["eq","ne","strict_eq","strict_ne"],
-        ["lt","le","ge","gt","in","instanceof"],
-        ["lsh","rsh","ursh"],
-        ["plus","minus"],
-        ["mul","div","mod"],
-        ["unary_plus","unary_minus","delete","void","typeof","not","bitwise_not","increment","decrement"],
-        ["dot","call","index"],
-        ["new_with_args","new"],
-        ["null","this","true","false","identifier","number","string","regexp"],
-        ["let"],
-        ["object_init","array_init","array_comp"],
-        ["function"]
-    ]
-
-    result = {}
-    for priority, itemList in enumerate(expressions):
-        for item in itemList:
-            result[item] = priority
-
-    return result
-
-expressionOrder = __createOrder()
-
--- a/src/eric7/ThirdParty/Jasy/jasy/script/parse/Node.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2013-2014 Sebastian Werner
-#
-
-from __future__ import unicode_literals
-
-import jasy.parse.AbstractNode as AbstractNode
-
-class Node(AbstractNode.AbstractNode):
-
-    __slots__ = [
-        # core data
-        "line", "type", "tokenizer", "start", "end", "rel", "parent",
-
-        # dynamic added data by other modules
-        "comments", "scope",
-
-        # node type specific
-        "value", "expression", "body", "functionForm", "parenthesized", "fileId", "params",
-        "name", "readOnly", "initializer", "condition", "isLoop", "isEach", "object", "assignOp",
-        "iterator", "thenPart", "exception", "elsePart", "setup", "postfix", "update", "tryBlock",
-        "block", "defaultIndex", "discriminant", "label", "statements", "finallyBlock",
-        "statement", "variables", "names", "guard", "for", "tail", "expressionClosure"
-    ]
-
--- a/src/eric7/ThirdParty/Jasy/jasy/script/parse/Parser.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1448 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-#
-# License: MPL 1.1/GPL 2.0/LGPL 2.1
-# Authors:
-#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
-#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010-2012)
-#
-
-from __future__ import unicode_literals
-
-import jasy.script.tokenize.Tokenizer
-import jasy.script.parse.VanillaBuilder
-import jasy.script.tokenize.Lang
-
-__all__ = [ "parse", "parseExpression" ]
-
-def parseExpression(source, fileId=None, line=1, builder=None):
-    if builder == None:
-        builder = jasy.script.parse.VanillaBuilder.VanillaBuilder()
-
-    # Convert source into expression statement to be friendly to the Tokenizer
-    if not source.endswith(";"):
-        source = source + ";"
-
-    tokenizer = jasy.script.tokenize.Tokenizer.Tokenizer(source, fileId, line)
-    staticContext = StaticContext(False, builder)
-
-    return Expression(tokenizer, staticContext)
-
-
-
-def parse(source, fileId=None, line=1, builder=None):
-    if builder == None:
-        builder = jasy.script.parse.VanillaBuilder.VanillaBuilder()
-
-    tokenizer = jasy.script.tokenize.Tokenizer.Tokenizer(source, fileId, line)
-    staticContext = StaticContext(False, builder)
-    node = Script(tokenizer, staticContext)
-
-    # store fileId on top-level node
-    node.fileId = tokenizer.fileId
-
-    # add missing comments e.g. empty file with only a comment etc.
-    # if there is something non-attached by an inner node it is attached to
-    # the top level node, which is not correct, but might be better than
-    # just ignoring the comment after all.
-    if len(node) > 0:
-        builder.COMMENTS_add(node[-1], None, tokenizer.getComments())
-    else:
-        builder.COMMENTS_add(node, None, tokenizer.getComments())
-
-    if not tokenizer.done():
-        raise SyntaxError("Unexpected end of file", tokenizer)
-
-    return node
-
-
-
-class SyntaxError(Exception):
-    def __init__(self, message, tokenizer):
-        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (message, tokenizer.fileId, tokenizer.line))
-
-
-# Used as a status container during tree-building for every def body and the global body
-class StaticContext(object):
-    # inFunction is used to check if a return stm appears in a valid context.
-    def __init__(self, inFunction, builder):
-        # Whether this is inside a function, mostly True, only for top-level scope it's False
-        self.inFunction = inFunction
-
-        self.hasEmptyReturn = False
-        self.hasReturnWithValue = False
-        self.isGenerator = False
-        self.blockId = 0
-        self.builder = builder
-        self.statementStack = []
-
-        # Sets to store variable uses
-        # self.functions = set()
-        # self.variables = set()
-
-        # Status
-        # self.needsHoisting = False
-        self.bracketLevel = 0
-        self.curlyLevel = 0
-        self.parenLevel = 0
-        self.hookLevel = 0
-
-        # Configure strict ecmascript 3 mode
-        self.ecma3OnlyMode = False
-
-        # Status flag during parsing
-        self.inForLoopInit = False
-
-
-def Script(tokenizer, staticContext):
-    """Parses the toplevel and def bodies."""
-    node = Statements(tokenizer, staticContext)
-
-    # change type from "block" to "script" for script root
-    node.type = "script"
-
-    # copy over data from compiler context
-    # node.functions = staticContext.functions
-    # node.variables = staticContext.variables
-
-    return node
-
-
-def nest(tokenizer, staticContext, node, func, end=None):
-    """Statement stack and nested statement handler."""
-    staticContext.statementStack.append(node)
-    node = func(tokenizer, staticContext)
-    staticContext.statementStack.pop()
-    end and tokenizer.mustMatch(end)
-
-    return node
-
-
-def Statements(tokenizer, staticContext):
-    """Parses a list of Statements."""
-
-    builder = staticContext.builder
-    node = builder.BLOCK_build(tokenizer, staticContext.blockId)
-    staticContext.blockId += 1
-
-    builder.BLOCK_hoistLets(node)
-    staticContext.statementStack.append(node)
-
-    prevNode = None
-    while not tokenizer.done() and tokenizer.peek(True) != "right_curly":
-        comments = tokenizer.getComments()
-        childNode = Statement(tokenizer, staticContext)
-        builder.COMMENTS_add(childNode, prevNode, comments)
-        builder.BLOCK_addStatement(node, childNode)
-        prevNode = childNode
-
-    staticContext.statementStack.pop()
-    builder.BLOCK_finish(node)
-
-    # if getattr(node, "needsHoisting", False):
-    #     # TODO
-    #     raise Exception("Needs hoisting went true!!!")
-    #     builder.setHoists(node.id, node.variables)
-    #     # Propagate up to the function.
-    #     staticContext.needsHoisting = True
-
-    return node
-
-
-def Block(tokenizer, staticContext):
-    tokenizer.mustMatch("left_curly")
-    node = Statements(tokenizer, staticContext)
-    tokenizer.mustMatch("right_curly")
-
-    return node
-
-
-def Statement(tokenizer, staticContext):
-    """Parses a Statement."""
-
-    tokenType = tokenizer.get(True)
-    builder = staticContext.builder
-
-    # Cases for statements ending in a right curly return early, avoiding the
-    # common semicolon insertion magic after this switch.
-
-    if tokenType == "function":
-        # "declared_form" extends functions of staticContext, "statement_form" doesn'tokenizer.
-        if len(staticContext.statementStack) > 1:
-            kind = "statement_form"
-        else:
-            kind = "declared_form"
-
-        return FunctionDefinition(tokenizer, staticContext, True, kind)
-
-
-    elif tokenType == "left_curly":
-        node = Statements(tokenizer, staticContext)
-        tokenizer.mustMatch("right_curly")
-
-        return node
-
-
-    elif tokenType == "if":
-        node = builder.IF_build(tokenizer)
-        builder.IF_setCondition(node, ParenExpression(tokenizer, staticContext))
-        staticContext.statementStack.append(node)
-        builder.IF_setThenPart(node, Statement(tokenizer, staticContext))
-
-        if tokenizer.match("else"):
-            comments = tokenizer.getComments()
-            elsePart = Statement(tokenizer, staticContext)
-            builder.COMMENTS_add(elsePart, node, comments)
-            builder.IF_setElsePart(node, elsePart)
-
-        staticContext.statementStack.pop()
-        builder.IF_finish(node)
-
-        return node
-
-
-    elif tokenType == "switch":
-        # This allows CASEs after a "default", which is in the standard.
-        node = builder.SWITCH_build(tokenizer)
-        builder.SWITCH_setDiscriminant(node, ParenExpression(tokenizer, staticContext))
-        staticContext.statementStack.append(node)
-
-        tokenizer.mustMatch("left_curly")
-        tokenType = tokenizer.get()
-
-        while tokenType != "right_curly":
-            if tokenType == "default":
-                if node.defaultIndex >= 0:
-                    raise SyntaxError("More than one switch default", tokenizer)
-
-                childNode = builder.DEFAULT_build(tokenizer)
-                builder.SWITCH_setDefaultIndex(node, len(node)-1)
-                tokenizer.mustMatch("colon")
-                builder.DEFAULT_initializeStatements(childNode, tokenizer)
-
-                while True:
-                    tokenType=tokenizer.peek(True)
-                    if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
-                        break
-                    builder.DEFAULT_addStatement(childNode, Statement(tokenizer, staticContext))
-
-                builder.DEFAULT_finish(childNode)
-
-            elif tokenType == "case":
-                childNode = builder.CASE_build(tokenizer)
-                builder.CASE_setLabel(childNode, Expression(tokenizer, staticContext))
-                tokenizer.mustMatch("colon")
-                builder.CASE_initializeStatements(childNode, tokenizer)
-
-                while True:
-                    tokenType=tokenizer.peek(True)
-                    if tokenType == "case" or tokenType == "default" or tokenType == "right_curly":
-                        break
-                    builder.CASE_addStatement(childNode, Statement(tokenizer, staticContext))
-
-                builder.CASE_finish(childNode)
-
-            else:
-                raise SyntaxError("Invalid switch case", tokenizer)
-
-            builder.SWITCH_addCase(node, childNode)
-            tokenType = tokenizer.get()
-
-        staticContext.statementStack.pop()
-        builder.SWITCH_finish(node)
-
-        return node
-
-
-    elif tokenType == "for":
-        node = builder.FOR_build(tokenizer)
-        forBlock = None
-
-        if tokenizer.match("identifier") and tokenizer.token.value == "each":
-            builder.FOR_rebuildForEach(node)
-
-        tokenizer.mustMatch("left_paren")
-        tokenType = tokenizer.peek()
-        childNode = None
-
-        if tokenType != "semicolon":
-            staticContext.inForLoopInit = True
-
-            if tokenType == "var" or tokenType == "const":
-                tokenizer.get()
-                childNode = Variables(tokenizer, staticContext)
-
-            elif tokenType == "let":
-                tokenizer.get()
-
-                if tokenizer.peek() == "left_paren":
-                    childNode = LetBlock(tokenizer, staticContext, False)
-
-                else:
-                    # Let in for head, we need to add an implicit block
-                    # around the rest of the for.
-                    forBlock = builder.BLOCK_build(tokenizer, staticContext.blockId)
-                    staticContext.blockId += 1
-                    staticContext.statementStack.append(forBlock)
-                    childNode = Variables(tokenizer, staticContext, forBlock)
-
-            else:
-                childNode = Expression(tokenizer, staticContext)
-
-            staticContext.inForLoopInit = False
-
-        if childNode and tokenizer.match("in"):
-            builder.FOR_rebuildForIn(node)
-            builder.FOR_setObject(node, Expression(tokenizer, staticContext), forBlock)
-
-            if childNode.type == "var" or childNode.type == "let":
-                if len(childNode) != 1:
-                    raise SyntaxError("Invalid for..in left-hand side", tokenizer)
-
-                builder.FOR_setIterator(node, childNode, forBlock)
-
-            else:
-                builder.FOR_setIterator(node, childNode, forBlock)
-
-        else:
-            builder.FOR_setSetup(node, childNode)
-            tokenizer.mustMatch("semicolon")
-
-            if node.isEach:
-                raise SyntaxError("Invalid for each..in loop", tokenizer)
-
-            if tokenizer.peek() == "semicolon":
-                builder.FOR_setCondition(node, None)
-            else:
-                builder.FOR_setCondition(node, Expression(tokenizer, staticContext))
-
-            tokenizer.mustMatch("semicolon")
-
-            if tokenizer.peek() == "right_paren":
-                builder.FOR_setUpdate(node, None)
-            else:
-                builder.FOR_setUpdate(node, Expression(tokenizer, staticContext))
-
-        tokenizer.mustMatch("right_paren")
-        builder.FOR_setBody(node, nest(tokenizer, staticContext, node, Statement))
-
-        if forBlock:
-            builder.BLOCK_finish(forBlock)
-            staticContext.statementStack.pop()
-
-        builder.FOR_finish(node)
-        return node
-
-
-    elif tokenType == "while":
-        node = builder.WHILE_build(tokenizer)
-
-        builder.WHILE_setCondition(node, ParenExpression(tokenizer, staticContext))
-        builder.WHILE_setBody(node, nest(tokenizer, staticContext, node, Statement))
-        builder.WHILE_finish(node)
-
-        return node
-
-
-    elif tokenType == "do":
-        node = builder.DO_build(tokenizer)
-
-        builder.DO_setBody(node, nest(tokenizer, staticContext, node, Statement, "while"))
-        builder.DO_setCondition(node, ParenExpression(tokenizer, staticContext))
-        builder.DO_finish(node)
-
-        if not staticContext.ecma3OnlyMode:
-            # <script language="JavaScript"> (without version hints) may need
-            # automatic semicolon insertion without a newline after do-while.
-            # See http://bugzilla.mozilla.org/show_bug.cgi?id=238945.
-            tokenizer.match("semicolon")
-            return node
-
-        # NO RETURN
-
-
-    elif tokenType == "break" or tokenType == "continue":
-        if tokenType == "break":
-            node = builder.BREAK_build(tokenizer)
-        else:
-            node = builder.CONTINUE_build(tokenizer)
-
-        if tokenizer.peekOnSameLine() == "identifier":
-            tokenizer.get()
-
-            if tokenType == "break":
-                builder.BREAK_setLabel(node, tokenizer.token.value)
-            else:
-                builder.CONTINUE_setLabel(node, tokenizer.token.value)
-
-        statementStack = staticContext.statementStack
-        i = len(statementStack)
-        label = node.label if hasattr(node, "label") else None
-
-        if label:
-            while True:
-                i -= 1
-                if i < 0:
-                    raise SyntaxError("Label not found", tokenizer)
-                if getattr(statementStack[i], "label", None) == label:
-                    break
-
-            #
-            # Both break and continue to label need to be handled specially
-            # within a labeled loop, so that they target that loop. If not in
-            # a loop, then break targets its labeled statement. Labels can be
-            # nested so we skip all labels immediately enclosing the nearest
-            # non-label statement.
-            #
-            while i < len(statementStack) - 1 and statementStack[i+1].type == "label":
-                i += 1
-
-            if i < len(statementStack) - 1 and getattr(statementStack[i+1], "isLoop", False):
-                i += 1
-            elif tokenType == "continue":
-                raise SyntaxError("Invalid continue", tokenizer)
-
-        else:
-            while True:
-                i -= 1
-                if i < 0:
-                    if tokenType == "break":
-                        raise SyntaxError("Invalid break", tokenizer)
-                    else:
-                        raise SyntaxError("Invalid continue", tokenizer)
-
-                if getattr(statementStack[i], "isLoop", False) or (tokenType == "break" and statementStack[i].type == "switch"):
-                    break
-
-        if tokenType == "break":
-            builder.BREAK_finish(node)
-        else:
-            builder.CONTINUE_finish(node)
-
-        # NO RETURN
-
-
-    elif tokenType == "try":
-        node = builder.TRY_build(tokenizer)
-        builder.TRY_setTryBlock(node, Block(tokenizer, staticContext))
-
-        while tokenizer.match("catch"):
-            childNode = builder.CATCH_build(tokenizer)
-            tokenizer.mustMatch("left_paren")
-            nextTokenType = tokenizer.get()
-
-            if nextTokenType == "left_bracket" or nextTokenType == "left_curly":
-                # Destructured catch identifiers.
-                tokenizer.unget()
-                exception = DestructuringExpression(tokenizer, staticContext, True)
-
-            elif nextTokenType == "identifier":
-                exception = builder.CATCH_wrapException(tokenizer)
-
-            else:
-                raise SyntaxError("Missing identifier in catch", tokenizer)
-
-            builder.CATCH_setException(childNode, exception)
-
-            if tokenizer.match("if"):
-                if staticContext.ecma3OnlyMode:
-                    raise SyntaxError("Illegal catch guard", tokenizer)
-
-                if node.getChildrenLength() > 0 and not node.getUnrelatedChildren()[0].guard:
-                    raise SyntaxError("Guarded catch after unguarded", tokenizer)
-
-                builder.CATCH_setGuard(childNode, Expression(tokenizer, staticContext))
-
-            else:
-                builder.CATCH_setGuard(childNode, None)
-
-            tokenizer.mustMatch("right_paren")
-
-            builder.CATCH_setBlock(childNode, Block(tokenizer, staticContext))
-            builder.CATCH_finish(childNode)
-
-            builder.TRY_addCatch(node, childNode)
-
-        builder.TRY_finishCatches(node)
-
-        if tokenizer.match("finally"):
-            builder.TRY_setFinallyBlock(node, Block(tokenizer, staticContext))
-
-        if node.getChildrenLength() == 0 and not hasattr(node, "finallyBlock"):
-            raise SyntaxError("Invalid try statement", tokenizer)
-
-        builder.TRY_finish(node)
-        return node
-
-
-    elif tokenType == "catch" or tokenType == "finally":
-        raise SyntaxError(tokenType + " without preceding try", tokenizer)
-
-
-    elif tokenType == "throw":
-        node = builder.THROW_build(tokenizer)
-
-        builder.THROW_setException(node, Expression(tokenizer, staticContext))
-        builder.THROW_finish(node)
-
-        # NO RETURN
-
-
-    elif tokenType == "return":
-        node = returnOrYield(tokenizer, staticContext)
-
-        # NO RETURN
-
-
-    elif tokenType == "with":
-        node = builder.WITH_build(tokenizer)
-
-        builder.WITH_setObject(node, ParenExpression(tokenizer, staticContext))
-        builder.WITH_setBody(node, nest(tokenizer, staticContext, node, Statement))
-        builder.WITH_finish(node)
-
-        return node
-
-
-    elif tokenType == "var" or tokenType == "const":
-        node = Variables(tokenizer, staticContext)
-
-        # NO RETURN
-
-
-    elif tokenType == "let":
-        if tokenizer.peek() == "left_paren":
-            node = LetBlock(tokenizer, staticContext, True)
-        else:
-            node = Variables(tokenizer, staticContext)
-
-        # NO RETURN
-
-
-    elif tokenType == "debugger":
-        node = builder.DEBUGGER_build(tokenizer)
-
-        # NO RETURN
-
-
-    elif tokenType == "newline" or tokenType == "semicolon":
-        node = builder.SEMICOLON_build(tokenizer)
-
-        builder.SEMICOLON_setExpression(node, None)
-        builder.SEMICOLON_finish(tokenizer)
-
-        return node
-
-
-    else:
-        if tokenType == "identifier":
-            tokenType = tokenizer.peek()
-
-            # Labeled statement.
-            if tokenType == "colon":
-                label = tokenizer.token.value
-                statementStack = staticContext.statementStack
-
-                i = len(statementStack)-1
-                while i >= 0:
-                    if getattr(statementStack[i], "label", None) == label:
-                        raise SyntaxError("Duplicate label", tokenizer)
-
-                    i -= 1
-
-                tokenizer.get()
-                node = builder.LABEL_build(tokenizer)
-
-                builder.LABEL_setLabel(node, label)
-                builder.LABEL_setStatement(node, nest(tokenizer, staticContext, node, Statement))
-                builder.LABEL_finish(node)
-
-                return node
-
-        # Expression statement.
-        # We unget the current token to parse the expression as a whole.
-        node = builder.SEMICOLON_build(tokenizer)
-        tokenizer.unget()
-        builder.SEMICOLON_setExpression(node, Expression(tokenizer, staticContext))
-        node.end = node.expression.end
-        builder.SEMICOLON_finish(node)
-
-        # NO RETURN
-
-
-    MagicalSemicolon(tokenizer)
-    return node
-
-
-
-def MagicalSemicolon(tokenizer):
-    if tokenizer.line == tokenizer.token.line:
-        tokenType = tokenizer.peekOnSameLine()
-
-        if tokenType != "end" and tokenType != "newline" and tokenType != "semicolon" and tokenType != "right_curly":
-            raise SyntaxError("Missing ; before statement", tokenizer)
-
-    tokenizer.match("semicolon")
-
-
-
-def returnOrYield(tokenizer, staticContext):
-    builder = staticContext.builder
-    tokenType = tokenizer.token.type
-
-    if tokenType == "return":
-        if not staticContext.inFunction:
-            raise SyntaxError("Return not in function", tokenizer)
-
-        node = builder.RETURN_build(tokenizer)
-
-    else:
-        if not staticContext.inFunction:
-            raise SyntaxError("Yield not in function", tokenizer)
-
-        staticContext.isGenerator = True
-        node = builder.YIELD_build(tokenizer)
-
-    nextTokenType = tokenizer.peek(True)
-    if nextTokenType != "end" and nextTokenType != "newline" and nextTokenType != "semicolon" and nextTokenType != "right_curly" and (tokenType != "yield" or (nextTokenType != tokenType and nextTokenType != "right_bracket" and nextTokenType != "right_paren" and nextTokenType != "colon" and nextTokenType != "comma")):
-        if tokenType == "return":
-            builder.RETURN_setValue(node, Expression(tokenizer, staticContext))
-            staticContext.hasReturnWithValue = True
-        else:
-            builder.YIELD_setValue(node, AssignExpression(tokenizer, staticContext))
-
-    elif tokenType == "return":
-        staticContext.hasEmptyReturn = True
-
-    # Disallow return v; in generator.
-    if staticContext.hasReturnWithValue and staticContext.isGenerator:
-        raise SyntaxError("Generator returns a value", tokenizer)
-
-    if tokenType == "return":
-        builder.RETURN_finish(node)
-    else:
-        builder.YIELD_finish(node)
-
-    return node
-
-
-
-def FunctionDefinition(tokenizer, staticContext, requireName, functionForm):
-    builder = staticContext.builder
-    functionNode = builder.FUNCTION_build(tokenizer)
-
-    if tokenizer.match("identifier"):
-        builder.FUNCTION_setName(functionNode, tokenizer.token.value)
-    elif requireName:
-        raise SyntaxError("Missing def identifier", tokenizer)
-
-    tokenizer.mustMatch("left_paren")
-
-    if not tokenizer.match("right_paren"):
-        builder.FUNCTION_initParams(functionNode, tokenizer)
-        prevParamNode = None
-        while True:
-            tokenType = tokenizer.get()
-            if tokenType == "left_bracket" or tokenType == "left_curly":
-                # Destructured formal parameters.
-                tokenizer.unget()
-                paramNode = DestructuringExpression(tokenizer, staticContext)
-
-            elif tokenType == "identifier":
-                paramNode = builder.FUNCTION_wrapParam(tokenizer)
-
-            else:
-                raise SyntaxError("Missing formal parameter", tokenizer)
-
-            builder.FUNCTION_addParam(functionNode, tokenizer, paramNode)
-            builder.COMMENTS_add(paramNode, prevParamNode, tokenizer.getComments())
-
-            if not tokenizer.match("comma"):
-                break
-
-            prevParamNode = paramNode
-
-        tokenizer.mustMatch("right_paren")
-
-    # Do we have an expression closure or a normal body?
-    tokenType = tokenizer.get()
-    if tokenType != "left_curly":
-        builder.FUNCTION_setExpressionClosure(functionNode, True)
-        tokenizer.unget()
-
-    childContext = StaticContext(True, builder)
-
-    if staticContext.inFunction:
-        # Inner functions don't reset block numbering, only functions at
-        # the top level of the program do.
-        childContext.blockId = staticContext.blockId
-
-    if tokenType != "left_curly":
-        builder.FUNCTION_setBody(functionNode, AssignExpression(tokenizer, staticContext))
-        if staticContext.isGenerator:
-            raise SyntaxError("Generator returns a value", tokenizer)
-
-    else:
-        builder.FUNCTION_hoistVars(childContext.blockId)
-        builder.FUNCTION_setBody(functionNode, Script(tokenizer, childContext))
-
-    if tokenType == "left_curly":
-        tokenizer.mustMatch("right_curly")
-
-    functionNode.end = tokenizer.token.end
-    functionNode.functionForm = functionForm
-
-    builder.COMMENTS_add(functionNode.body, functionNode.body, tokenizer.getComments())
-    builder.FUNCTION_finish(functionNode, staticContext)
-
-    return functionNode
-
-
-
-def Variables(tokenizer, staticContext, letBlock=None):
-    """Parses a comma-separated list of var declarations (and maybe initializations)."""
-
-    builder = staticContext.builder
-    if tokenizer.token.type == "var":
-        build = builder.VAR_build
-        addDecl = builder.VAR_addDecl
-        finish = builder.VAR_finish
-        childContext = staticContext
-
-    elif tokenizer.token.type == "const":
-        build = builder.CONST_build
-        addDecl = builder.CONST_addDecl
-        finish = builder.CONST_finish
-        childContext = staticContext
-
-    elif tokenizer.token.type == "let" or tokenizer.token.type == "left_paren":
-        build = builder.LET_build
-        addDecl = builder.LET_addDecl
-        finish = builder.LET_finish
-
-        if not letBlock:
-            statementStack = staticContext.statementStack
-            i = len(statementStack) - 1
-
-            # a BLOCK *must* be found.
-            while statementStack[i].type != "block":
-                i -= 1
-
-            # Lets at the def toplevel are just vars, at least in SpiderMonkey.
-            if i == 0:
-                build = builder.VAR_build
-                addDecl = builder.VAR_addDecl
-                finish = builder.VAR_finish
-                childContext = staticContext
-
-            else:
-                childContext = statementStack[i]
-
-        else:
-            childContext = letBlock
-
-    node = build(tokenizer)
-
-    while True:
-        tokenType = tokenizer.get()
-
-        # Done in Python port!
-        # FIXME Should have a special DECLARATION node instead of overloading
-        # IDENTIFIER to mean both identifier declarations and destructured
-        # declarations.
-        childNode = builder.DECL_build(tokenizer)
-
-        if tokenType == "left_bracket" or tokenType == "left_curly":
-            # Pass in childContext if we need to add each pattern matched into
-            # its variables, else pass in staticContext.
-            # Need to unget to parse the full destructured expression.
-            tokenizer.unget()
-            builder.DECL_setNames(childNode, DestructuringExpression(tokenizer, staticContext, True, childContext))
-
-            if staticContext.inForLoopInit and tokenizer.peek() == "in":
-                addDecl(node, childNode, childContext)
-                if tokenizer.match("comma"):
-                    continue
-                else:
-                    break
-
-            tokenizer.mustMatch("assign")
-            if tokenizer.token.assignOp:
-                raise SyntaxError("Invalid variable initialization", tokenizer)
-
-            # Parse the init as a normal assignment.
-            builder.DECL_setInitializer(childNode, AssignExpression(tokenizer, staticContext))
-            builder.DECL_finish(childNode)
-            addDecl(node, childNode, childContext)
-
-            # Copy over names for variable list
-            # for nameNode in childNode.names:
-            #    childContext.variables.add(nameNode.value)
-
-            if tokenizer.match("comma"):
-                continue
-            else:
-                break
-
-        if tokenType != "identifier":
-            raise SyntaxError("Missing variable name", tokenizer)
-
-        builder.DECL_setName(childNode, tokenizer.token.value)
-        builder.DECL_setReadOnly(childNode, node.type == "const")
-        addDecl(node, childNode, childContext)
-
-        if tokenizer.match("assign"):
-            if tokenizer.token.assignOp:
-                raise SyntaxError("Invalid variable initialization", tokenizer)
-
-            initializerNode = AssignExpression(tokenizer, staticContext)
-            builder.DECL_setInitializer(childNode, initializerNode)
-
-        builder.DECL_finish(childNode)
-
-        # If we directly use the node in "let" constructs
-        # if not hasattr(childContext, "variables"):
-        #    childContext.variables = set()
-
-        # childContext.variables.add(childNode.name)
-
-        if not tokenizer.match("comma"):
-            break
-
-    finish(node)
-    return node
-
-
-
-def LetBlock(tokenizer, staticContext, isStatement):
-    """Does not handle let inside of for loop init."""
-    builder = staticContext.builder
-
-    # tokenizer.token.type must be "let"
-    node = builder.LETBLOCK_build(tokenizer)
-    tokenizer.mustMatch("left_paren")
-    builder.LETBLOCK_setVariables(node, Variables(tokenizer, staticContext, node))
-    tokenizer.mustMatch("right_paren")
-
-    if isStatement and tokenizer.peek() != "left_curly":
-        # If this is really an expression in let statement guise, then we
-        # need to wrap the "let_block" node in a "semicolon" node so that we pop
-        # the return value of the expression.
-        childNode = builder.SEMICOLON_build(tokenizer)
-        builder.SEMICOLON_setExpression(childNode, node)
-        builder.SEMICOLON_finish(childNode)
-        isStatement = False
-
-    if isStatement:
-        childNode = Block(tokenizer, staticContext)
-        builder.LETBLOCK_setBlock(node, childNode)
-
-    else:
-        childNode = AssignExpression(tokenizer, staticContext)
-        builder.LETBLOCK_setExpression(node, childNode)
-
-    builder.LETBLOCK_finish(node)
-    return node
-
-
-def checkDestructuring(tokenizer, staticContext, node, simpleNamesOnly=None, data=None):
-    if node.type == "array_comp":
-        raise SyntaxError("Invalid array comprehension left-hand side", tokenizer)
-
-    if node.type != "array_init" and node.type != "object_init":
-        return
-
-    builder = staticContext.builder
-
-    for child in node:
-        if child == None:
-            continue
-
-        if child.type == "property_init":
-            lhs = child[0]
-            rhs = child[1]
-        else:
-            lhs = None
-            rhs = None
-
-
-        if rhs and (rhs.type == "array_init" or rhs.type == "object_init"):
-            checkDestructuring(tokenizer, staticContext, rhs, simpleNamesOnly, data)
-
-        if lhs and simpleNamesOnly:
-            # In declarations, lhs must be simple names
-            if lhs.type != "identifier":
-                raise SyntaxError("Missing name in pattern", tokenizer)
-
-            elif data:
-                childNode = builder.DECL_build(tokenizer)
-                builder.DECL_setName(childNode, lhs.value)
-
-                # Don't need to set initializer because it's just for
-                # hoisting anyways.
-                builder.DECL_finish(childNode)
-
-                # Each pattern needs to be added to variables.
-                # data.variables.add(childNode.name)
-
-
-# JavaScript 1.7
-def DestructuringExpression(tokenizer, staticContext, simpleNamesOnly=None, data=None):
-    node = PrimaryExpression(tokenizer, staticContext)
-    checkDestructuring(tokenizer, staticContext, node, simpleNamesOnly, data)
-
-    return node
-
-
-# JavsScript 1.7
-def GeneratorExpression(tokenizer, staticContext, expression):
-    builder = staticContext.builder
-    node = builder.GENERATOR_build(tokenizer)
-
-    builder.GENERATOR_setExpression(node, expression)
-    builder.GENERATOR_setTail(node, comprehensionTail(tokenizer, staticContext))
-    builder.GENERATOR_finish(node)
-
-    return node
-
-
-# JavaScript 1.7 Comprehensions Tails (Generators / Arrays)
-def comprehensionTail(tokenizer, staticContext):
-    builder = staticContext.builder
-
-    # tokenizer.token.type must be "for"
-    body = builder.COMPTAIL_build(tokenizer)
-
-    while True:
-        node = builder.FOR_build(tokenizer)
-
-        # Comprehension tails are always for..in loops.
-        builder.FOR_rebuildForIn(node)
-        if tokenizer.match("identifier"):
-            # But sometimes they're for each..in.
-            if tokenizer.token.value == "each":
-                builder.FOR_rebuildForEach(node)
-            else:
-                tokenizer.unget()
-
-        tokenizer.mustMatch("left_paren")
-
-        tokenType = tokenizer.get()
-        if tokenType == "left_bracket" or tokenType == "left_curly":
-            tokenizer.unget()
-            # Destructured left side of for in comprehension tails.
-            builder.FOR_setIterator(node, DestructuringExpression(tokenizer, staticContext))
-
-        elif tokenType == "identifier":
-            # Removed variable/declaration substructure in Python port.
-            # Variable declarations are not allowed here. So why process them in such a way?
-
-            # declaration = builder.DECL_build(tokenizer)
-            # builder.DECL_setName(declaration, tokenizer.token.value)
-            # builder.DECL_finish(declaration)
-            # childNode = builder.VAR_build(tokenizer)
-            # builder.VAR_addDecl(childNode, declaration)
-            # builder.VAR_finish(childNode)
-            # builder.FOR_setIterator(node, declaration)
-
-            # Don't add to variables since the semantics of comprehensions is
-            # such that the variables are in their own def when desugared.
-
-            identifier = builder.PRIMARY_build(tokenizer, "identifier")
-            builder.FOR_setIterator(node, identifier)
-
-        else:
-            raise SyntaxError("Missing identifier", tokenizer)
-
-        tokenizer.mustMatch("in")
-        builder.FOR_setObject(node, Expression(tokenizer, staticContext))
-        tokenizer.mustMatch("right_paren")
-        builder.COMPTAIL_addFor(body, node)
-
-        if not tokenizer.match("for"):
-            break
-
-    # Optional guard.
-    if tokenizer.match("if"):
-        builder.COMPTAIL_setGuard(body, ParenExpression(tokenizer, staticContext))
-
-    builder.COMPTAIL_finish(body)
-
-    return body
-
-
-def ParenExpression(tokenizer, staticContext):
-    tokenizer.mustMatch("left_paren")
-
-    # Always accept the 'in' operator in a parenthesized expression,
-    # where it's unambiguous, even if we might be parsing the init of a
-    # for statement.
-    oldLoopInit = staticContext.inForLoopInit
-    staticContext.inForLoopInit = False
-    node = Expression(tokenizer, staticContext)
-    staticContext.inForLoopInit = oldLoopInit
-
-    err = "expression must be parenthesized"
-    if tokenizer.match("for"):
-        if node.type == "yield" and not node.parenthesized:
-            raise SyntaxError("Yield " + err, tokenizer)
-
-        if node.type == "comma" and not node.parenthesized:
-            raise SyntaxError("Generator " + err, tokenizer)
-
-        node = GeneratorExpression(tokenizer, staticContext, node)
-
-    tokenizer.mustMatch("right_paren")
-
-    return node
-
-
-def Expression(tokenizer, staticContext):
-    """Top-down expression parser matched against SpiderMonkey."""
-    builder = staticContext.builder
-    node = AssignExpression(tokenizer, staticContext)
-
-    if tokenizer.match("comma"):
-        childNode = builder.COMMA_build(tokenizer)
-        builder.COMMA_addOperand(childNode, node)
-        node = childNode
-        while True:
-            childNode = node[len(node)-1]
-            if childNode.type == "yield" and not childNode.parenthesized:
-                raise SyntaxError("Yield expression must be parenthesized", tokenizer)
-            builder.COMMA_addOperand(node, AssignExpression(tokenizer, staticContext))
-
-            if not tokenizer.match("comma"):
-                break
-
-        builder.COMMA_finish(node)
-
-    return node
-
-
-def AssignExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-
-    # Have to treat yield like an operand because it could be the leftmost
-    # operand of the expression.
-    if tokenizer.match("yield", True):
-        return returnOrYield(tokenizer, staticContext)
-
-    comments = tokenizer.getComments()
-    node = builder.ASSIGN_build(tokenizer)
-    lhs = ConditionalExpression(tokenizer, staticContext)
-    builder.COMMENTS_add(lhs, None, comments)
-
-    if not tokenizer.match("assign"):
-        builder.ASSIGN_finish(node)
-        return lhs
-
-    if lhs.type == "object_init" or lhs.type == "array_init":
-        checkDestructuring(tokenizer, staticContext, lhs)
-    elif lhs.type == "identifier" or lhs.type == "dot" or lhs.type == "index" or lhs.type == "call":
-        pass
-    else:
-        raise SyntaxError("Bad left-hand side of assignment", tokenizer)
-
-    builder.ASSIGN_setAssignOp(node, tokenizer.token.assignOp)
-    builder.ASSIGN_addOperand(node, lhs)
-    builder.ASSIGN_addOperand(node, AssignExpression(tokenizer, staticContext))
-    builder.ASSIGN_finish(node)
-
-    return node
-
-
-def ConditionalExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = OrExpression(tokenizer, staticContext)
-
-    if tokenizer.match("hook"):
-        childNode = node
-        node = builder.HOOK_build(tokenizer)
-        builder.HOOK_setCondition(node, childNode)
-
-        # Always accept the 'in' operator in the middle clause of a ternary,
-        # where it's unambiguous, even if we might be parsing the init of a
-        # for statement.
-        oldLoopInit = staticContext.inForLoopInit
-        staticContext.inForLoopInit = False
-        builder.HOOK_setThenPart(node, AssignExpression(tokenizer, staticContext))
-        staticContext.inForLoopInit = oldLoopInit
-
-        if not tokenizer.match("colon"):
-            raise SyntaxError("Missing : after ?", tokenizer)
-
-        builder.HOOK_setElsePart(node, AssignExpression(tokenizer, staticContext))
-        builder.HOOK_finish(node)
-
-    return node
-
-
-def OrExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = AndExpression(tokenizer, staticContext)
-
-    while tokenizer.match("or"):
-        childNode = builder.OR_build(tokenizer)
-        builder.OR_addOperand(childNode, node)
-        builder.OR_addOperand(childNode, AndExpression(tokenizer, staticContext))
-        builder.OR_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def AndExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = BitwiseOrExpression(tokenizer, staticContext)
-
-    while tokenizer.match("and"):
-        childNode = builder.AND_build(tokenizer)
-        builder.AND_addOperand(childNode, node)
-        builder.AND_addOperand(childNode, BitwiseOrExpression(tokenizer, staticContext))
-        builder.AND_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def BitwiseOrExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = BitwiseXorExpression(tokenizer, staticContext)
-
-    while tokenizer.match("bitwise_or"):
-        childNode = builder.BITWISEOR_build(tokenizer)
-        builder.BITWISEOR_addOperand(childNode, node)
-        builder.BITWISEOR_addOperand(childNode, BitwiseXorExpression(tokenizer, staticContext))
-        builder.BITWISEOR_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def BitwiseXorExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = BitwiseAndExpression(tokenizer, staticContext)
-
-    while tokenizer.match("bitwise_xor"):
-        childNode = builder.BITWISEXOR_build(tokenizer)
-        builder.BITWISEXOR_addOperand(childNode, node)
-        builder.BITWISEXOR_addOperand(childNode, BitwiseAndExpression(tokenizer, staticContext))
-        builder.BITWISEXOR_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def BitwiseAndExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = EqualityExpression(tokenizer, staticContext)
-
-    while tokenizer.match("bitwise_and"):
-        childNode = builder.BITWISEAND_build(tokenizer)
-        builder.BITWISEAND_addOperand(childNode, node)
-        builder.BITWISEAND_addOperand(childNode, EqualityExpression(tokenizer, staticContext))
-        builder.BITWISEAND_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def EqualityExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = RelationalExpression(tokenizer, staticContext)
-
-    while tokenizer.match("eq") or tokenizer.match("ne") or tokenizer.match("strict_eq") or tokenizer.match("strict_ne"):
-        childNode = builder.EQUALITY_build(tokenizer)
-        builder.EQUALITY_addOperand(childNode, node)
-        builder.EQUALITY_addOperand(childNode, RelationalExpression(tokenizer, staticContext))
-        builder.EQUALITY_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def RelationalExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    oldLoopInit = staticContext.inForLoopInit
-
-    # Uses of the in operator in shiftExprs are always unambiguous,
-    # so unset the flag that prohibits recognizing it.
-    staticContext.inForLoopInit = False
-    node = ShiftExpression(tokenizer, staticContext)
-
-    while tokenizer.match("lt") or tokenizer.match("le") or tokenizer.match("ge") or tokenizer.match("gt") or (oldLoopInit == False and tokenizer.match("in")) or tokenizer.match("instanceof"):
-        childNode = builder.RELATIONAL_build(tokenizer)
-        builder.RELATIONAL_addOperand(childNode, node)
-        builder.RELATIONAL_addOperand(childNode, ShiftExpression(tokenizer, staticContext))
-        builder.RELATIONAL_finish(childNode)
-        node = childNode
-
-    staticContext.inForLoopInit = oldLoopInit
-
-    return node
-
-
-def ShiftExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = AddExpression(tokenizer, staticContext)
-
-    while tokenizer.match("lsh") or tokenizer.match("rsh") or tokenizer.match("ursh"):
-        childNode = builder.SHIFT_build(tokenizer)
-        builder.SHIFT_addOperand(childNode, node)
-        builder.SHIFT_addOperand(childNode, AddExpression(tokenizer, staticContext))
-        builder.SHIFT_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def AddExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = MultiplyExpression(tokenizer, staticContext)
-
-    while tokenizer.match("plus") or tokenizer.match("minus"):
-        childNode = builder.ADD_build(tokenizer)
-        builder.ADD_addOperand(childNode, node)
-        builder.ADD_addOperand(childNode, MultiplyExpression(tokenizer, staticContext))
-        builder.ADD_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def MultiplyExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = UnaryExpression(tokenizer, staticContext)
-
-    while tokenizer.match("mul") or tokenizer.match("div") or tokenizer.match("mod"):
-        childNode = builder.MULTIPLY_build(tokenizer)
-        builder.MULTIPLY_addOperand(childNode, node)
-        builder.MULTIPLY_addOperand(childNode, UnaryExpression(tokenizer, staticContext))
-        builder.MULTIPLY_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def UnaryExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    tokenType = tokenizer.get(True)
-
-    if tokenType in ["delete", "void", "typeof", "not", "bitwise_not", "plus", "minus"]:
-        node = builder.UNARY_build(tokenizer)
-        builder.UNARY_addOperand(node, UnaryExpression(tokenizer, staticContext))
-
-    elif tokenType == "increment" or tokenType == "decrement":
-        # Prefix increment/decrement.
-        node = builder.UNARY_build(tokenizer)
-        builder.UNARY_addOperand(node, MemberExpression(tokenizer, staticContext, True))
-
-    else:
-        tokenizer.unget()
-        node = MemberExpression(tokenizer, staticContext, True)
-
-        # Don't look across a newline boundary for a postfix {in,de}crement.
-        if tokenizer.tokens[(tokenizer.tokenIndex + tokenizer.lookahead - 1) & 3].line == tokenizer.line:
-            if tokenizer.match("increment") or tokenizer.match("decrement"):
-                childNode = builder.UNARY_build(tokenizer)
-                builder.UNARY_setPostfix(childNode)
-                builder.UNARY_finish(node)
-                builder.UNARY_addOperand(childNode, node)
-                node = childNode
-
-    builder.UNARY_finish(node)
-    return node
-
-
-def MemberExpression(tokenizer, staticContext, allowCallSyntax):
-    builder = staticContext.builder
-
-    if tokenizer.match("new"):
-        node = builder.MEMBER_build(tokenizer)
-        builder.MEMBER_addOperand(node, MemberExpression(tokenizer, staticContext, False))
-
-        if tokenizer.match("left_paren"):
-            builder.MEMBER_rebuildNewWithArgs(node)
-            builder.MEMBER_addOperand(node, ArgumentList(tokenizer, staticContext))
-
-        builder.MEMBER_finish(node)
-
-    else:
-        node = PrimaryExpression(tokenizer, staticContext)
-
-    while True:
-        tokenType = tokenizer.get()
-        if tokenType == "end":
-            break
-
-        if tokenType == "dot":
-            childNode = builder.MEMBER_build(tokenizer)
-            builder.MEMBER_addOperand(childNode, node)
-            tokenizer.mustMatch("identifier")
-            builder.MEMBER_addOperand(childNode, builder.MEMBER_build(tokenizer))
-
-        elif tokenType == "left_bracket":
-            childNode = builder.MEMBER_build(tokenizer, "index")
-            builder.MEMBER_addOperand(childNode, node)
-            builder.MEMBER_addOperand(childNode, Expression(tokenizer, staticContext))
-            tokenizer.mustMatch("right_bracket")
-
-        elif tokenType == "left_paren" and allowCallSyntax:
-            childNode = builder.MEMBER_build(tokenizer, "call")
-            builder.MEMBER_addOperand(childNode, node)
-            builder.MEMBER_addOperand(childNode, ArgumentList(tokenizer, staticContext))
-
-        else:
-            tokenizer.unget()
-            return node
-
-        builder.MEMBER_finish(childNode)
-        node = childNode
-
-    return node
-
-
-def ArgumentList(tokenizer, staticContext):
-    builder = staticContext.builder
-    node = builder.LIST_build(tokenizer)
-
-    if tokenizer.match("right_paren", True):
-        return node
-
-    while True:
-        childNode = AssignExpression(tokenizer, staticContext)
-        if childNode.type == "yield" and not childNode.parenthesized and tokenizer.peek() == "comma":
-            raise SyntaxError("Yield expression must be parenthesized", tokenizer)
-
-        if tokenizer.match("for"):
-            childNode = GeneratorExpression(tokenizer, staticContext, childNode)
-            if len(node) > 1 or tokenizer.peek(True) == "comma":
-                raise SyntaxError("Generator expression must be parenthesized", tokenizer)
-
-        builder.LIST_addOperand(node, childNode)
-        if not tokenizer.match("comma"):
-            break
-
-    tokenizer.mustMatch("right_paren")
-    builder.LIST_finish(node)
-
-    return node
-
-
-def PrimaryExpression(tokenizer, staticContext):
-    builder = staticContext.builder
-    tokenType = tokenizer.get(True)
-
-    if tokenType == "function":
-        node = FunctionDefinition(tokenizer, staticContext, False, "expressed_form")
-
-    elif tokenType == "left_bracket":
-        node = builder.ARRAYINIT_build(tokenizer)
-        while True:
-            tokenType = tokenizer.peek(True)
-            if tokenType == "right_bracket":
-                break
-
-            if tokenType == "comma":
-                tokenizer.get()
-                builder.ARRAYINIT_addElement(node, None)
-                continue
-
-            builder.ARRAYINIT_addElement(node, AssignExpression(tokenizer, staticContext))
-
-            if tokenType != "comma" and not tokenizer.match("comma"):
-                break
-
-        # If we matched exactly one element and got a "for", we have an
-        # array comprehension.
-        if len(node) == 1 and tokenizer.match("for"):
-            childNode = builder.ARRAYCOMP_build(tokenizer)
-            builder.ARRAYCOMP_setExpression(childNode, node[0])
-            builder.ARRAYCOMP_setTail(childNode, comprehensionTail(tokenizer, staticContext))
-            node = childNode
-
-        builder.COMMENTS_add(node, node, tokenizer.getComments())
-        tokenizer.mustMatch("right_bracket")
-        builder.PRIMARY_finish(node)
-
-    elif tokenType == "left_curly":
-        node = builder.OBJECTINIT_build(tokenizer)
-
-        if not tokenizer.match("right_curly"):
-            while True:
-                tokenType = tokenizer.get()
-                tokenValue = getattr(tokenizer.token, "value", None)
-                comments = tokenizer.getComments()
-
-                if tokenValue in ("get", "set") and tokenizer.peek() == "identifier":
-                    if staticContext.ecma3OnlyMode:
-                        raise SyntaxError("Illegal property accessor", tokenizer)
-
-                    fd = FunctionDefinition(tokenizer, staticContext, True, "expressed_form")
-                    builder.OBJECTINIT_addProperty(node, fd)
-
-                else:
-                    if tokenType == "identifier" or tokenType == "number" or tokenType == "string":
-                        id = builder.PRIMARY_build(tokenizer, "identifier")
-                        builder.PRIMARY_finish(id)
-
-                    elif tokenType == "right_curly":
-                        if staticContext.ecma3OnlyMode:
-                            raise SyntaxError("Illegal trailing ,", tokenizer)
-
-                        tokenizer.unget()
-                        break
-
-                    else:
-                        if tokenValue in jasy.script.tokenize.Lang.keywords:
-                            id = builder.PRIMARY_build(tokenizer, "identifier")
-                            builder.PRIMARY_finish(id)
-                        else:
-                            print("Value is '%s'" % tokenValue)
-                            raise SyntaxError("Invalid property name", tokenizer)
-
-                    if tokenizer.match("colon"):
-                        childNode = builder.PROPERTYINIT_build(tokenizer)
-                        builder.COMMENTS_add(childNode, node, comments)
-                        builder.PROPERTYINIT_addOperand(childNode, id)
-                        builder.PROPERTYINIT_addOperand(childNode, AssignExpression(tokenizer, staticContext))
-                        builder.PROPERTYINIT_finish(childNode)
-                        builder.OBJECTINIT_addProperty(node, childNode)
-
-                    else:
-                        # Support, e.g., |var {staticContext, y} = o| as destructuring shorthand
-                        # for |var {staticContext: staticContext, y: y} = o|, per proposed JS2/ES4 for JS1.8.
-                        if tokenizer.peek() != "comma" and tokenizer.peek() != "right_curly":
-                            raise SyntaxError("Missing : after property", tokenizer)
-                        builder.OBJECTINIT_addProperty(node, id)
-
-                if not tokenizer.match("comma"):
-                    break
-
-            builder.COMMENTS_add(node, node, tokenizer.getComments())
-            tokenizer.mustMatch("right_curly")
-
-        builder.OBJECTINIT_finish(node)
-
-    elif tokenType == "left_paren":
-        # ParenExpression does its own matching on parentheses, so we need to unget.
-        tokenizer.unget()
-        node = ParenExpression(tokenizer, staticContext)
-        node.parenthesized = True
-
-    elif tokenType == "let":
-        node = LetBlock(tokenizer, staticContext, False)
-
-    elif tokenType in ["null", "this", "true", "false", "identifier", "number", "string", "regexp"]:
-        node = builder.PRIMARY_build(tokenizer, tokenType)
-        builder.PRIMARY_finish(node)
-
-    else:
-        raise SyntaxError("Missing operand. Found type: %s" % tokenType, tokenizer)
-
-    return node
--- a/src/eric7/ThirdParty/Jasy/jasy/script/parse/VanillaBuilder.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,679 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-#
-# License: MPL 1.1/GPL 2.0/LGPL 2.1
-# Authors:
-#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
-#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010)
-#
-
-from __future__ import unicode_literals
-
-import jasy.script.parse.Node
-
-class VanillaBuilder:
-    """The vanilla AST builder."""
-
-    def COMMENTS_add(self, currNode, prevNode, comments):
-        if not comments:
-            return
-
-        currComments = []
-        prevComments = []
-        for comment in comments:
-            # post comments - for previous node
-            if comment.context == "inline":
-                prevComments.append(comment)
-
-            # all other comment styles are attached to the current one
-            else:
-                currComments.append(comment)
-
-        # Merge with previously added ones
-        if hasattr(currNode, "comments"):
-            currNode.comments.extend(currComments)
-        else:
-            currNode.comments = currComments
-
-        if prevNode:
-            if hasattr(prevNode, "comments"):
-                prevNode.comments.extend(prevComments)
-            else:
-                prevNode.comments = prevComments
-        else:
-            # Don't loose the comment in tree (if not previous node is there, attach it to this node)
-            currNode.comments.extend(prevComments)
-
-    def IF_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "if")
-
-    def IF_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def IF_setThenPart(self, node, statement):
-        node.append(statement, "thenPart")
-
-    def IF_setElsePart(self, node, statement):
-        node.append(statement, "elsePart")
-
-    def IF_finish(self, node):
-        pass
-
-    def SWITCH_build(self, tokenizer):
-        node = jasy.script.parse.Node.Node(tokenizer, "switch")
-        node.defaultIndex = -1
-        return node
-
-    def SWITCH_setDiscriminant(self, node, expression):
-        node.append(expression, "discriminant")
-
-    def SWITCH_setDefaultIndex(self, node, index):
-        node.defaultIndex = index
-
-    def SWITCH_addCase(self, node, childNode):
-        node.append(childNode)
-
-    def SWITCH_finish(self, node):
-        pass
-
-    def CASE_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "case")
-
-    def CASE_setLabel(self, node, expression):
-        node.append(expression, "label")
-
-    def CASE_initializeStatements(self, node, tokenizer):
-        node.append(jasy.script.parse.Node.Node(tokenizer, "block"), "statements")
-
-    def CASE_addStatement(self, node, statement):
-        node.statements.append(statement)
-
-    def CASE_finish(self, node):
-        pass
-
-    def DEFAULT_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "default")
-
-    def DEFAULT_initializeStatements(self, node, tokenizer):
-        node.append(jasy.script.parse.Node.Node(tokenizer, "block"), "statements")
-
-    def DEFAULT_addStatement(self, node, statement):
-        node.statements.append(statement)
-
-    def DEFAULT_finish(self, node):
-        pass
-
-    def FOR_build(self, tokenizer):
-        node = jasy.script.parse.Node.Node(tokenizer, "for")
-        node.isLoop = True
-        node.isEach = False
-        return node
-
-    def FOR_rebuildForEach(self, node):
-        node.isEach = True
-
-    # NB: This function is called after rebuildForEach, if that'statement called at all.
-    def FOR_rebuildForIn(self, node):
-        node.type = "for_in"
-
-    def FOR_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def FOR_setSetup(self, node, expression):
-        node.append(expression, "setup")
-
-    def FOR_setUpdate(self, node, expression):
-        node.append(expression, "update")
-
-    def FOR_setObject(self, node, expression, forBlock=None):
-        # wpbasti: not sure what forBlock stands for but it is used in the parser
-        # JS tolerates the optinal unused parameter, but not so Python.
-        node.append(expression, "object")
-
-    def FOR_setIterator(self, node, expression, forBlock=None):
-        # wpbasti: not sure what forBlock stands for but it is used in the parser
-        # JS tolerates the optinal unused parameter, but not so Python.
-        node.append(expression, "iterator")
-
-    def FOR_setBody(self, node, statement):
-        node.append(statement, "body")
-
-    def FOR_finish(self, node):
-        pass
-
-    def WHILE_build(self, tokenizer):
-        node = jasy.script.parse.Node.Node(tokenizer, "while")
-        node.isLoop = True
-        return node
-
-    def WHILE_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def WHILE_setBody(self, node, statement):
-        node.append(statement, "body")
-
-    def WHILE_finish(self, node):
-        pass
-
-    def DO_build(self, tokenizer):
-        node = jasy.script.parse.Node.Node(tokenizer, "do")
-        node.isLoop = True
-        return node
-
-    def DO_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def DO_setBody(self, node, statement):
-        node.append(statement, "body")
-
-    def DO_finish(self, node):
-        pass
-
-    def BREAK_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "break")
-
-    def BREAK_setLabel(self, node, label):
-        node.label = label
-
-    def BREAK_setTarget(self, node, target):
-        # Hint, no append() - relation, but not a child
-        node.target = target
-
-    def BREAK_finish(self, node):
-        pass
-
-    def CONTINUE_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "continue")
-
-    def CONTINUE_setLabel(self, node, label):
-        node.label = label
-
-    def CONTINUE_setTarget(self, node, target):
-        # Hint, no append() - relation, but not a child
-        node.target = target
-
-    def CONTINUE_finish(self, node):
-        pass
-
-    def TRY_build(self, tokenizer):
-        node = jasy.script.parse.Node.Node(tokenizer, "try")
-        return node
-
-    def TRY_setTryBlock(self, node, statement):
-        node.append(statement, "tryBlock")
-
-    def TRY_addCatch(self, node, childNode):
-        node.append(childNode)
-
-    def TRY_finishCatches(self, node):
-        pass
-
-    def TRY_setFinallyBlock(self, node, statement):
-        node.append(statement, "finallyBlock")
-
-    def TRY_finish(self, node):
-        pass
-
-    def CATCH_build(self, tokenizer):
-        node = jasy.script.parse.Node.Node(tokenizer, "catch")
-        return node
-
-    def CATCH_wrapException(self, tokenizer):
-        node = jasy.script.parse.Node.Node(tokenizer, "exception")
-        node.value = tokenizer.token.value
-        return node
-
-    def CATCH_setException(self, node, exception):
-        node.append(exception, "exception")
-
-    def CATCH_setGuard(self, node, expression):
-        node.append(expression, "guard")
-
-    def CATCH_setBlock(self, node, statement):
-        node.append(statement, "block")
-
-    def CATCH_finish(self, node):
-        pass
-
-    def THROW_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "throw")
-
-    def THROW_setException(self, node, expression):
-        node.append(expression, "exception")
-
-    def THROW_finish(self, node):
-        pass
-
-    def RETURN_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "return")
-
-    def RETURN_setValue(self, node, expression):
-        node.append(expression, "value")
-
-    def RETURN_finish(self, node):
-        pass
-
-    def YIELD_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "yield")
-
-    def YIELD_setValue(self, node, expression):
-        node.append(expression, "value")
-
-    def YIELD_finish(self, node):
-        pass
-
-    def GENERATOR_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "generator")
-
-    def GENERATOR_setExpression(self, node, expression):
-        node.append(expression, "expression")
-
-    def GENERATOR_setTail(self, node, childNode):
-        node.append(childNode, "tail")
-
-    def GENERATOR_finish(self, node):
-        pass
-
-    def WITH_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "with")
-
-    def WITH_setObject(self, node, expression):
-        node.append(expression, "object")
-
-    def WITH_setBody(self, node, statement):
-        node.append(statement, "body")
-
-    def WITH_finish(self, node):
-        pass
-
-    def DEBUGGER_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "debugger")
-
-    def SEMICOLON_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "semicolon")
-
-    def SEMICOLON_setExpression(self, node, expression):
-        node.append(expression, "expression")
-
-    def SEMICOLON_finish(self, node):
-        pass
-
-    def LABEL_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "label")
-
-    def LABEL_setLabel(self, node, label):
-        node.label = label
-
-    def LABEL_setStatement(self, node, statement):
-        node.append(statement, "statement")
-
-    def LABEL_finish(self, node):
-        pass
-
-    def FUNCTION_build(self, tokenizer):
-        node = jasy.script.parse.Node.Node(tokenizer)
-        if node.type != "function":
-            if tokenizer.token.value == "get":
-                node.type = "getter"
-            else:
-                node.type = "setter"
-
-        return node
-
-    def FUNCTION_setName(self, node, identifier):
-        node.name = identifier
-
-    def FUNCTION_initParams(self, node, tokenizer):
-        node.append(jasy.script.parse.Node.Node(tokenizer, "list"), "params")
-
-    def FUNCTION_wrapParam(self, tokenizer):
-        param = jasy.script.parse.Node.Node(tokenizer)
-        param.value = tokenizer.token.value
-        return param
-
-    def FUNCTION_addParam(self, node, tokenizer, expression):
-        node.params.append(expression)
-
-    def FUNCTION_setExpressionClosure(self, node, expressionClosure):
-        node.expressionClosure = expressionClosure
-
-    def FUNCTION_setBody(self, node, statement):
-        # copy over function parameters to function body
-        params = getattr(node, "params", None)
-        #if params:
-        #    statement.params = [param.value for param in params]
-
-        node.append(statement, "body")
-
-    def FUNCTION_hoistVars(self, x):
-        pass
-
-    def FUNCTION_finish(self, node, x):
-        pass
-
-    def VAR_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "var")
-
-    def VAR_addDecl(self, node, childNode, childContext=None):
-        node.append(childNode)
-
-    def VAR_finish(self, node):
-        pass
-
-    def CONST_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "const")
-
-    def CONST_addDecl(self, node, childNode, childContext=None):
-        node.append(childNode)
-
-    def CONST_finish(self, node):
-        pass
-
-    def LET_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "let")
-
-    def LET_addDecl(self, node, childNode, childContext=None):
-        node.append(childNode)
-
-    def LET_finish(self, node):
-        pass
-
-    def DECL_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "declaration")
-
-    def DECL_setNames(self, node, expression):
-        node.append(expression, "names")
-
-    def DECL_setName(self, node, identifier):
-        node.name = identifier
-
-    def DECL_setInitializer(self, node, expression):
-        node.append(expression, "initializer")
-
-    def DECL_setReadOnly(self, node, readOnly):
-        node.readOnly = readOnly
-
-    def DECL_finish(self, node):
-        pass
-
-    def LETBLOCK_build(self, tokenizer):
-        node = jasy.script.parse.Node.Node(tokenizer, "let_block")
-        return node
-
-    def LETBLOCK_setVariables(self, node, childNode):
-        node.append(childNode, "variables")
-
-    def LETBLOCK_setExpression(self, node, expression):
-        node.append(expression, "expression")
-
-    def LETBLOCK_setBlock(self, node, statement):
-        node.append(statement, "block")
-
-    def LETBLOCK_finish(self, node):
-        pass
-
-    def BLOCK_build(self, tokenizer, id):
-        node = jasy.script.parse.Node.Node(tokenizer, "block")
-        # node.id = id
-        return node
-
-    def BLOCK_hoistLets(self, node):
-        pass
-
-    def BLOCK_addStatement(self, node, childNode):
-        node.append(childNode)
-
-    def BLOCK_finish(self, node):
-        pass
-
-    def EXPRESSION_build(self, tokenizer, tokenType):
-        return jasy.script.parse.Node.Node(tokenizer, tokenType)
-
-    def EXPRESSION_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def EXPRESSION_finish(self, node):
-        pass
-
-    def ASSIGN_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "assign")
-
-    def ASSIGN_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def ASSIGN_setAssignOp(self, node, operator):
-        node.assignOp = operator
-
-    def ASSIGN_finish(self, node):
-        pass
-
-    def HOOK_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "hook")
-
-    def HOOK_setCondition(self, node, expression):
-        node.append(expression, "condition")
-
-    def HOOK_setThenPart(self, node, childNode):
-        node.append(childNode, "thenPart")
-
-    def HOOK_setElsePart(self, node, childNode):
-        node.append(childNode, "elsePart")
-
-    def HOOK_finish(self, node):
-        pass
-
-    def OR_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "or")
-
-    def OR_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def OR_finish(self, node):
-        pass
-
-    def AND_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "and")
-
-    def AND_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def AND_finish(self, node):
-        pass
-
-    def BITWISEOR_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "bitwise_or")
-
-    def BITWISEOR_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def BITWISEOR_finish(self, node):
-        pass
-
-    def BITWISEXOR_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "bitwise_xor")
-
-    def BITWISEXOR_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def BITWISEXOR_finish(self, node):
-        pass
-
-    def BITWISEAND_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "bitwise_and")
-
-    def BITWISEAND_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def BITWISEAND_finish(self, node):
-        pass
-
-    def EQUALITY_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "eq", "ne", "strict_eq", or "strict_ne".
-        return jasy.script.parse.Node.Node(tokenizer)
-
-    def EQUALITY_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def EQUALITY_finish(self, node):
-        pass
-
-    def RELATIONAL_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "lt", "le", "ge", or "gt".
-        return jasy.script.parse.Node.Node(tokenizer)
-
-    def RELATIONAL_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def RELATIONAL_finish(self, node):
-        pass
-
-    def SHIFT_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "lsh", "rsh", or "ursh".
-        return jasy.script.parse.Node.Node(tokenizer)
-
-    def SHIFT_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def SHIFT_finish(self, node):
-        pass
-
-    def ADD_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "plus" or "minus".
-        return jasy.script.parse.Node.Node(tokenizer)
-
-    def ADD_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def ADD_finish(self, node):
-        pass
-
-    def MULTIPLY_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "mul", "div", or "mod".
-        return jasy.script.parse.Node.Node(tokenizer)
-
-    def MULTIPLY_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def MULTIPLY_finish(self, node):
-        pass
-
-    def UNARY_build(self, tokenizer):
-        # NB: tokenizer.token.type must be "delete", "void", "typeof", "not", "bitwise_not",
-        # "unary_plus", "unary_minus", "increment", or "decrement".
-        if tokenizer.token.type == "plus":
-            tokenizer.token.type = "unary_plus"
-        elif tokenizer.token.type == "minus":
-            tokenizer.token.type = "unary_minus"
-
-        return jasy.script.parse.Node.Node(tokenizer)
-
-    def UNARY_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def UNARY_setPostfix(self, node):
-        node.postfix = True
-
-    def UNARY_finish(self, node):
-        pass
-
-    def MEMBER_build(self, tokenizer, tokenType=None):
-        node = jasy.script.parse.Node.Node(tokenizer, tokenType)
-        if node.type == "identifier":
-            node.value = tokenizer.token.value
-        return node
-
-    def MEMBER_rebuildNewWithArgs(self, node):
-        node.type = "new_with_args"
-
-    def MEMBER_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def MEMBER_finish(self, node):
-        pass
-
-    def PRIMARY_build(self, tokenizer, tokenType):
-        # NB: tokenizer.token.type must be "null", "this", "true", "false", "identifier", "number", "string", or "regexp".
-        node = jasy.script.parse.Node.Node(tokenizer, tokenType)
-        if tokenType in ("identifier", "string", "regexp", "number"):
-            node.value = tokenizer.token.value
-
-        return node
-
-    def PRIMARY_finish(self, node):
-        pass
-
-    def ARRAYINIT_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "array_init")
-
-    def ARRAYINIT_addElement(self, node, childNode):
-        node.append(childNode)
-
-    def ARRAYINIT_finish(self, node):
-        pass
-
-    def ARRAYCOMP_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "array_comp")
-
-    def ARRAYCOMP_setExpression(self, node, expression):
-        node.append(expression, "expression")
-
-    def ARRAYCOMP_setTail(self, node, childNode):
-        node.append(childNode, "tail")
-
-    def ARRAYCOMP_finish(self, node):
-        pass
-
-    def COMPTAIL_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "comp_tail")
-
-    def COMPTAIL_setGuard(self, node, expression):
-        node.append(expression, "guard")
-
-    def COMPTAIL_addFor(self, node, childNode):
-        node.append(childNode, "for")
-
-    def COMPTAIL_finish(self, node):
-        pass
-
-    def OBJECTINIT_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "object_init")
-
-    def OBJECTINIT_addProperty(self, node, childNode):
-        node.append(childNode)
-
-    def OBJECTINIT_finish(self, node):
-        pass
-
-    def PROPERTYINIT_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "property_init")
-
-    def PROPERTYINIT_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def PROPERTYINIT_finish(self, node):
-        pass
-
-    def COMMA_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "comma")
-
-    def COMMA_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def COMMA_finish(self, node):
-        pass
-
-    def LIST_build(self, tokenizer):
-        return jasy.script.parse.Node.Node(tokenizer, "list")
-
-    def LIST_addOperand(self, node, childNode):
-        node.append(childNode)
-
-    def LIST_finish(self, node):
-        pass
-
-    def setHoists(self, id, vds):
-        pass
--- a/src/eric7/ThirdParty/Jasy/jasy/script/tokenize/Lang.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,25 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-from __future__ import unicode_literals
-
-"""JavaScript 1.7 keywords"""
-keywords = set([
-    "break",
-    "case", "catch", "const", "continue",
-    "debugger", "default", "delete", "do",
-    "else",
-    "false", "finally", "for", "function",
-    "if", "in", "instanceof",
-    "let",
-    "new", "null",
-    "return",
-    "switch",
-    "this", "throw", "true", "try", "typeof",
-    "var", "void",
-    "yield",
-    "while", "with"
-])
--- a/src/eric7/ThirdParty/Jasy/jasy/script/tokenize/Tokenizer.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,589 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-#
-# License: MPL 1.1/GPL 2.0/LGPL 2.1
-# Authors:
-#   - Brendan Eich <brendan@mozilla.org> (Original JavaScript) (2004-2010)
-#   - Sebastian Werner <info@sebastian-werner.net> (Python Port) (2010)
-#
-
-from __future__ import unicode_literals
-
-import re, copy
-
-import jasy.script.tokenize.Lang as Lang
-import jasy.script.api.Comment as Comment
-import jasy.core.Console as Console
-
-
-# Operator and punctuator mapping from token to tree node type name.
-# NB: because the lexer doesn't backtrack, all token prefixes must themselves
-# be valid tokens (e.g. !== is acceptable because its prefixes are the valid
-# tokens != and !).
-operatorNames = {
-    '<'   : 'lt',
-    '>'   : 'gt',
-    '<='  : 'le',
-    '>='  : 'ge',
-    '!='  : 'ne',
-    '!'   : 'not',
-    '=='  : 'eq',
-    '===' : 'strict_eq',
-    '!==' : 'strict_ne',
-
-    '>>'  : 'rsh',
-    '<<'  : 'lsh',
-    '>>>' : 'ursh',
-
-    '+'   : 'plus',
-    '*'   : 'mul',
-    '-'   : 'minus',
-    '/'   : 'div',
-    '%'   : 'mod',
-
-    ','   : 'comma',
-    ';'   : 'semicolon',
-    ':'   : 'colon',
-    '='   : 'assign',
-    '?'   : 'hook',
-
-    '&&'  : 'and',
-    '||'  : 'or',
-
-    '++'  : 'increment',
-    '--'  : 'decrement',
-
-    ')'   : 'right_paren',
-    '('   : 'left_paren',
-    '['   : 'left_bracket',
-    ']'   : 'right_bracket',
-    '{'   : 'left_curly',
-    '}'   : 'right_curly',
-
-    '&'   : 'bitwise_and',
-    '^'   : 'bitwise_xor',
-    '|'   : 'bitwise_or',
-    '~'   : 'bitwise_not'
-}
-
-
-# Assignment operators
-assignOperators = ["|", "^", "&", "<<", ">>", ">>>", "+", "-", "*", "/", "%"]
-
-
-
-
-#
-# Classes
-#
-
-class Token:
-    __slots__ = ["type", "start", "line", "assignOp", "end", "value"]
-
-
-class ParseError(Exception):
-    def __init__(self, message, fileId, line):
-        Exception.__init__(self, "Syntax error: %s\n%s:%s" % (message, fileId, line))
-
-
-class Tokenizer(object):
-    def __init__(self, source, fileId="", line=1):
-        # source: JavaScript source
-        # fileId: Filename (for debugging proposes)
-        # line: Line number (for debugging proposes)
-        self.cursor = 0
-        self.source = str(source)
-        self.tokens = {}
-        self.tokenIndex = 0
-        self.lookahead = 0
-        self.scanNewlines = False
-        self.fileId = fileId
-        self.line = line
-        self.comments = []
-
-    input_ = property(lambda self: self.source[self.cursor:])
-    token = property(lambda self: self.tokens.get(self.tokenIndex))
-
-
-    def done(self):
-        # We need to set scanOperand to true here because the first thing
-        # might be a regexp.
-        return self.peek(True) == "end"
-
-
-    def match(self, tokenType, scanOperand=False):
-        return self.get(scanOperand) == tokenType or self.unget()
-
-
-    def mustMatch(self, tokenType):
-        if not self.match(tokenType):
-            raise ParseError("Missing " + tokenType, self.fileId, self.line)
-
-        return self.token
-
-
-    def peek(self, scanOperand=False):
-        if self.lookahead:
-            next = self.tokens.get((self.tokenIndex + self.lookahead) & 3)
-            if self.scanNewlines and (getattr(next, "line", None) != getattr(self, "line", None)):
-                tokenType = "newline"
-            else:
-                tokenType = getattr(next, "type", None)
-        else:
-            tokenType = self.get(scanOperand)
-            self.unget()
-
-        return tokenType
-
-
-    def peekOnSameLine(self, scanOperand=False):
-        self.scanNewlines = True
-        tokenType = self.peek(scanOperand)
-        self.scanNewlines = False
-        return tokenType
-
-
-    def getComments(self):
-        if self.comments:
-            comments = self.comments
-            self.comments = []
-            return comments
-
-        return None
-
-
-    def skip(self):
-        """Eats comments and whitespace."""
-        input = self.source
-        startLine = self.line
-
-        # Whether this is the first called as happen on start parsing a file (eat leading comments/white space)
-        startOfFile = self.cursor == 0
-
-        indent = ""
-
-        while (True):
-            if len(input) > self.cursor:
-                ch = input[self.cursor]
-            else:
-                return
-
-            self.cursor += 1
-
-            if len(input) > self.cursor:
-                next = input[self.cursor]
-            else:
-                next = None
-
-            if ch == "\n" and not self.scanNewlines:
-                self.line += 1
-                indent = ""
-
-            elif ch == "/" and next == "*":
-                self.cursor += 1
-                text = "/*"
-                inline = startLine == self.line and startLine > 1
-                commentStartLine = self.line
-                if startLine == self.line and not startOfFile:
-                    mode = "inline"
-                elif (self.line-1) > startLine:
-                    # distance before this comment means it is a comment block for a whole section (multiple lines of code)
-                    mode = "section"
-                else:
-                    # comment for maybe multiple following lines of code, but not that important (no visual white space divider)
-                    mode = "block"
-
-                while (True):
-                    try:
-                        ch = input[self.cursor]
-                        self.cursor += 1
-                    except IndexError:
-                        raise ParseError("Unterminated comment", self.fileId, self.line)
-
-                    if ch == "*":
-                        next = input[self.cursor]
-                        if next == "/":
-                            text += "*/"
-                            self.cursor += 1
-                            break
-
-                    elif ch == "\n":
-                        self.line += 1
-
-                    text += ch
-
-
-                # Filter escaping on slash-star combinations in comment text
-                text = text.replace("*\/", "*/")
-
-                try:
-                    self.comments.append(Comment.Comment(text, mode, commentStartLine, indent, self.fileId))
-                except Comment.CommentException as commentError:
-                    Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
-
-
-            elif ch == "/" and next == "/":
-                self.cursor += 1
-                text = "//"
-                if startLine == self.line and not startOfFile:
-                    mode = "inline"
-                elif (self.line-1) > startLine:
-                    # distance before this comment means it is a comment block for a whole section (multiple lines of code)
-                    mode = "section"
-                else:
-                    # comment for maybe multiple following lines of code, but not that important (no visual white space divider)
-                    mode = "block"
-
-                while (True):
-                    try:
-                        ch = input[self.cursor]
-                        self.cursor += 1
-                    except IndexError:
-                        # end of file etc.
-                        break
-
-                    if ch == "\n":
-                        self.line += 1
-                        break
-
-                    text += ch
-
-                try:
-                    self.comments.append(Comment.Comment(text, mode, self.line-1, "", self.fileId))
-                except Comment.CommentException as commentError:
-                    Console.error("Ignoring comment in %s: %s", self.fileId, commentError)
-
-            # check for whitespace, also for special cases like 0xA0
-            elif ch in "\xA0 \t":
-                indent += ch
-
-            else:
-                self.cursor -= 1
-                return
-
-
-    # Lexes the exponential part of a number, if present. Returns True if an
-    # exponential part was found.
-    def lexExponent(self):
-        input = self.source
-        next = input[self.cursor]
-        if next == "e" or next == "E":
-            self.cursor += 1
-            ch = input[self.cursor]
-            self.cursor += 1
-            if ch == "+" or ch == "-":
-                ch = input[self.cursor]
-                self.cursor += 1
-
-            if ch < "0" or ch > "9":
-                raise ParseError("Missing exponent", self.fileId, self.line)
-
-            while(True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not (ch >= "0" and ch <= "9"):
-                    break
-
-            self.cursor -= 1
-            return True
-
-        return False
-
-
-    def lexZeroNumber(self, ch):
-        token = self.token
-        input = self.source
-        token.type = "number"
-
-        ch = input[self.cursor]
-        self.cursor += 1
-        if ch == ".":
-            while(True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not (ch >= "0" and ch <= "9"):
-                    break
-
-            self.cursor -= 1
-            self.lexExponent()
-            token.value = input[token.start:self.cursor]
-
-        elif ch == "x" or ch == "X":
-            while(True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not ((ch >= "0" and ch <= "9") or (ch >= "a" and ch <= "f") or (ch >= "A" and ch <= "F")):
-                    break
-
-            self.cursor -= 1
-            token.value = input[token.start:self.cursor]
-
-        elif ch >= "0" and ch <= "7":
-            while(True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not (ch >= "0" and ch <= "7"):
-                    break
-
-            self.cursor -= 1
-            token.value = input[token.start:self.cursor]
-
-        else:
-            self.cursor -= 1
-            self.lexExponent()     # 0E1, &c.
-            token.value = 0
-
-
-    def lexNumber(self, ch):
-        token = self.token
-        input = self.source
-        token.type = "number"
-
-        floating = False
-        while(True):
-            ch = input[self.cursor]
-            self.cursor += 1
-
-            if ch == "." and not floating:
-                floating = True
-                ch = input[self.cursor]
-                self.cursor += 1
-
-            if not (ch >= "0" and ch <= "9"):
-                break
-
-        self.cursor -= 1
-
-        exponent = self.lexExponent()
-        segment = input[token.start:self.cursor]
-
-        # Protect float or exponent numbers
-        if floating or exponent:
-            token.value = segment
-        else:
-            token.value = int(segment)
-
-
-    def lexDot(self, ch):
-        token = self.token
-        input = self.source
-        next = input[self.cursor]
-
-        if next >= "0" and next <= "9":
-            while (True):
-                ch = input[self.cursor]
-                self.cursor += 1
-                if not (ch >= "0" and ch <= "9"):
-                    break
-
-            self.cursor -= 1
-            self.lexExponent()
-
-            token.type = "number"
-            token.value = input[token.start:self.cursor]
-
-        else:
-            token.type = "dot"
-
-
-    def lexString(self, ch):
-        token = self.token
-        input = self.source
-        token.type = "string"
-
-        hasEscapes = False
-        delim = ch
-        ch = input[self.cursor]
-        self.cursor += 1
-        while ch != delim:
-            if ch == "\\":
-                hasEscapes = True
-                self.cursor += 1
-
-            ch = input[self.cursor]
-            self.cursor += 1
-
-        if hasEscapes:
-            token.value = eval(input[token.start:self.cursor])
-        else:
-            token.value = input[token.start+1:self.cursor-1]
-
-
-    def lexRegExp(self, ch):
-        token = self.token
-        input = self.source
-        token.type = "regexp"
-
-        while (True):
-            try:
-                ch = input[self.cursor]
-                self.cursor += 1
-            except IndexError:
-                raise ParseError("Unterminated regex", self.fileId, self.line)
-
-            if ch == "\\":
-                self.cursor += 1
-
-            elif ch == "[":
-                while (True):
-                    if ch == "\\":
-                        self.cursor += 1
-
-                    try:
-                        ch = input[self.cursor]
-                        self.cursor += 1
-                    except IndexError:
-                        raise ParseError("Unterminated character class", self.fileId, self.line)
-
-                    if ch == "]":
-                        break
-
-            if ch == "/":
-                break
-
-        while(True):
-            ch = input[self.cursor]
-            self.cursor += 1
-            if not (ch >= "a" and ch <= "z"):
-                break
-
-        self.cursor -= 1
-        token.value = input[token.start:self.cursor]
-
-
-    def lexOp(self, ch):
-        token = self.token
-        input = self.source
-
-        op = ch
-        while(True):
-            try:
-                next = input[self.cursor]
-            except IndexError:
-                break
-
-            if (op + next) in operatorNames:
-                self.cursor += 1
-                op += next
-            else:
-                break
-
-        try:
-            next = input[self.cursor]
-        except IndexError:
-            next = None
-
-        if next == "=" and op in assignOperators:
-            self.cursor += 1
-            token.type = "assign"
-            token.assignOp = operatorNames[op]
-            op += "="
-
-        else:
-            token.type = operatorNames[op]
-            token.assignOp = None
-
-
-    # FIXME: Unicode escape sequences
-    # FIXME: Unicode identifiers
-    def lexIdent(self, ch):
-        token = self.token
-        input = self.source
-
-        try:
-            while True:
-                ch = input[self.cursor]
-                self.cursor += 1
-
-                if not ((ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or (ch >= "0" and ch <= "9") or ch == "$" or ch == "_"):
-                    break
-
-        except IndexError:
-            self.cursor += 1
-            pass
-
-        # Put the non-word character back.
-        self.cursor -= 1
-
-        identifier = input[token.start:self.cursor]
-        if identifier in Lang.keywords:
-            token.type = identifier
-        else:
-            token.type = "identifier"
-            token.value = identifier
-
-
-    def get(self, scanOperand=False):
-        """
-        It consumes input *only* if there is no lookahead.
-        Dispatches to the appropriate lexing function depending on the input.
-        """
-        while self.lookahead:
-            self.lookahead -= 1
-            self.tokenIndex = (self.tokenIndex + 1) & 3
-            token = self.tokens[self.tokenIndex]
-            if token.type != "newline" or self.scanNewlines:
-                return token.type
-
-        self.skip()
-
-        self.tokenIndex = (self.tokenIndex + 1) & 3
-        self.tokens[self.tokenIndex] = token = Token()
-
-        token.start = self.cursor
-        token.line = self.line
-
-        input = self.source
-        if self.cursor == len(input):
-            token.end = token.start
-            token.type = "end"
-            return token.type
-
-        ch = input[self.cursor]
-        self.cursor += 1
-
-        if (ch >= "a" and ch <= "z") or (ch >= "A" and ch <= "Z") or ch == "$" or ch == "_":
-            self.lexIdent(ch)
-
-        elif scanOperand and ch == "/":
-            self.lexRegExp(ch)
-
-        elif ch == ".":
-            self.lexDot(ch)
-
-        elif self.scanNewlines and ch == "\n":
-            token.type = "newline"
-            self.line += 1
-
-        elif ch in operatorNames:
-            self.lexOp(ch)
-
-        elif ch >= "1" and ch <= "9":
-            self.lexNumber(ch)
-
-        elif ch == "0":
-            self.lexZeroNumber(ch)
-
-        elif ch == '"' or ch == "'":
-            self.lexString(ch)
-
-        else:
-            raise ParseError("Illegal token: %s (Code: %s)" % (ch, ord(ch)), self.fileId, self.line)
-
-        token.end = self.cursor
-        return token.type
-
-
-    def unget(self):
-        """ Match depends on unget returning undefined."""
-        self.lookahead += 1
-
-        if self.lookahead == 4:
-            raise ParseError("PANIC: too much lookahead!", self.fileId, self.line)
-
-        self.tokenIndex = (self.tokenIndex - 1) & 3
-
--- a/src/eric7/ThirdParty/Jasy/jasy/script/util/__init__.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,416 +0,0 @@
-#
-# Jasy - Web Tooling Framework
-# Copyright 2010-2012 Zynga Inc.
-# Copyright 2013-2014 Sebastian Werner
-#
-
-from __future__ import unicode_literals
-
-from jasy.script.output.Compressor import Compressor
-
-# Shared instance
-compressor = Compressor()
-
-pseudoTypes = set(["any", "var", "undefined", "null", "true", "false", "this", "arguments"])
-builtinTypes = set(["Object", "String", "Number", "Boolean", "Array", "Function", "RegExp", "Date"])
-
-# Basic user friendly node type to human type
-nodeTypeToDocType = {
-
-    # Primitives
-    "string": "String",
-    "number": "Number",
-    "not": "Boolean",
-    "true": "Boolean",
-    "false": "Boolean",
-
-    # Literals
-    "function": "Function",
-    "regexp": "RegExp",
-    "object_init": "Map",
-    "array_init": "Array",
-
-    # We could figure out the real class automatically - at least that's the case quite often
-    "new": "Object",
-    "new_with_args": "Object",
-
-    # Comparisons
-    "eq" : "Boolean",
-    "ne" : "Boolean",
-    "strict_eq" : "Boolean",
-    "strict_ne" : "Boolean",
-    "lt" : "Boolean",
-    "le" : "Boolean",
-    "gt" : "Boolean",
-    "ge" : "Boolean",
-    "in" : "Boolean",
-    "instanceof" : "Boolean",
-
-    # Numbers
-    "lsh": "Number",
-    "rsh": "Number",
-    "ursh": "Number",
-    "minus": "Number",
-    "mul": "Number",
-    "div": "Number",
-    "mod": "Number",
-    "bitwise_and": "Number",
-    "bitwise_xor": "Number",
-    "bitwise_or": "Number",
-    "bitwise_not": "Number",
-    "increment": "Number",
-    "decrement": "Number",
-    "unary_minus": "Number",
-    "unary_plus": "Number",
-
-    # This is not 100% correct, but I don't like to introduce a BooleanLike type.
-    # If the author likes something different he is still able to override it via API docs
-    "and": "Boolean",
-    "or": "Boolean",
-
-    # Operators/Built-ins
-    "void": "undefined",
-    "null": "null",
-    "typeof": "String",
-    "delete": "Boolean",
-    "this": "This",
-
-    # These are not real types, we try to figure out the real value behind automatically
-    "call": "Call",
-    "hook": "Hook",
-    "assign": "Assign",
-    "plus": "Plus",
-    "identifier" : "Identifier",
-    "dot": "Object",
-    "index": "var"
-}
-
-
-def getVisibility(name):
-    """
-    Returns the visibility of the given name by convention
-    """
-
-    if name.startswith("__"):
-        return "private"
-    elif name.startswith("_"):
-        return "internal"
-    else:
-        return "public"
-
-
-def requiresDocumentation(name):
-    """
-    Whether the given name suggests that documentation is required
-    """
-
-    return not name.startswith("_")
-
-
-def getKeyValue(dict, key):
-    """
-    Returns the value node of the given key inside the given object initializer.
-    """
-
-    for propertyInit in dict:
-        if propertyInit[0].value == key:
-            return propertyInit[1]
-
-
-def findAssignments(name, node):
-    """
-    Returns a list of assignments which might have impact on the value used in the given node.
-    """
-
-    # Looking for all script blocks
-    scripts = []
-    parent = node
-    while parent:
-        if parent.type == "script":
-            scope = getattr(parent, "scope", None)
-            if scope and name in scope.modified:
-                scripts.append(parent)
-
-        parent = getattr(parent, "parent", None)
-
-    def assignMatcher(node):
-        if node.type == "assign" and node[0].type == "identifier" and node[0].value == name:
-            return True
-
-        if node.type == "declaration" and node.name == name and getattr(node, "initializer", None):
-            return True
-
-        if node.type == "function" and node.functionForm == "declared_form" and node.name == name:
-            return True
-
-        return False
-
-    # Query all relevant script nodes
-    assignments = []
-    for script in scripts:
-        queryResult = queryAll(script, assignMatcher, False)
-        assignments.extend(queryResult)
-
-    # Collect assigned values
-    values = []
-    for assignment in assignments:
-        if assignment.type == "function":
-            values.append(assignment)
-        elif assignment.type == "assign":
-            values.append(assignment[1])
-        else:
-            values.append(assignment.initializer)
-
-    return assignments, values
-
-
-def findFunction(node):
-    """
-    Returns the first function inside the given node
-    """
-
-    return query(node, lambda node: node.type == "function")
-
-
-def findCommentNode(node):
-    """
-    Finds the first doc comment node inside the given node
-    """
-
-    def matcher(node):
-        comments = getattr(node, "comments", None)
-        if comments:
-            for comment in comments:
-                if comment.variant == "doc":
-                    return True
-
-    return query(node, matcher)
-
-
-def getDocComment(node):
-    """
-    Returns the first doc comment of the given node.
-    """
-
-    comments = getattr(node, "comments", None)
-    if comments:
-        for comment in comments:
-            if comment.variant == "doc":
-                return comment
-
-    return None
-
-
-def findReturn(node):
-    """
-    Finds the first return inside the given node
-    """
-
-    return query(node, lambda node: node.type == "return", True)
-
-
-
-def valueToString(node):
-    """
-    Converts the value of the given node into something human friendly
-    """
-
-    if node.type in ("number", "string", "false", "true", "regexp", "null"):
-        return compressor.compress(node)
-    elif node.type in nodeTypeToDocType:
-        if node.type == "plus":
-            return detectPlusType(node)
-        elif node.type in ("new", "new_with_args", "dot"):
-            return detectObjectType(node)
-        else:
-            return nodeTypeToDocType[node.type]
-    else:
-        return "Other"
-
-
-
-def queryAll(node, matcher, deep=True, inner=False, result=None):
-    """
-    Recurses the tree starting with the given node and returns a list of nodes
-    matched by the given matcher method
-
-    - node: any node
-    - matcher: function which should return a truish value when node matches
-    - deep: whether inner scopes should be scanned, too
-    - inner: used internally to differentiate between current and inner nodes
-    - result: can be used to extend an existing list, otherwise a new list is created and returned
-    """
-
-    if result == None:
-        result = []
-
-    # Don't do in closure functions
-    if inner and node.type == "script" and not deep:
-        return None
-
-    if matcher(node):
-        result.append(node)
-
-    for child in node:
-        queryAll(child, matcher, deep, True, result)
-
-    return result
-
-
-
-def query(node, matcher, deep=True, inner=False):
-    """
-    Recurses the tree starting with the given node and returns the first node
-    which is matched by the given matcher method.
-
-    - node: any node
-    - matcher: function which should return a truish value when node matches
-    - deep: whether inner scopes should be scanned, too
-    - inner: used internally to differentiate between current and inner nodes
-    """
-
-    # Don't do in closure functions
-    if inner and node.type == "script" and not deep:
-        return None
-
-    if matcher(node):
-        return node
-
-    for child in node:
-        result = query(child, matcher, deep, True)
-        if result is not None:
-            return result
-
-    return None
-
-
-def findCall(node, methodName):
-    """
-    Recurses the tree starting with the given node and returns the first node
-    which calls the given method name (supports namespaces, too)
-    """
-
-    if type(methodName) is str:
-        methodName = set([methodName])
-
-    def matcher(node):
-        call = getCallName(node)
-        if call and call in methodName:
-            return call
-
-    return query(node, matcher)
-
-
-def getCallName(node):
-    if node.type == "call":
-        if node[0].type == "dot":
-            return assembleDot(node[0])
-        elif node[0].type == "identifier":
-            return node[0].value
-
-    return None
-
-
-def getParameterFromCall(call, index=0):
-    """
-    Returns a parameter node by index on the call node
-    """
-
-    try:
-        return call[1][index]
-    except:
-        return None
-
-
-def getParamNamesFromFunction(func):
-    """
-    Returns a human readable list of parameter names (sorted by their order in the given function)
-    """
-
-    params = getattr(func, "params", None)
-    if params:
-        return [identifier.value for identifier in params]
-    else:
-        return None
-
-
-def detectPlusType(plusNode):
-    """
-    Analyses the given "plus" node and tries to figure out if a "string" or "number" result is produced.
-    """
-
-    if plusNode[0].type == "string" or plusNode[1].type == "string":
-        return "String"
-    elif plusNode[0].type == "number" and plusNode[1].type == "number":
-        return "Number"
-    elif plusNode[0].type == "plus" and detectPlusType(plusNode[0]) == "String":
-        return "String"
-    else:
-        return "var"
-
-
-def detectObjectType(objectNode):
-    """
-    Returns a human readable type information of the given node
-    """
-
-    if objectNode.type in ("new", "new_with_args"):
-        construct = objectNode[0]
-    else:
-        construct = objectNode
-
-    # Only support built-in top level constructs
-    if construct.type == "identifier" and construct.value in ("Array", "Boolean", "Date", "Function", "Number", "Object", "String", "RegExp"):
-        return construct.value
-
-    # And namespaced custom classes
-    elif construct.type == "dot":
-        assembled = assembleDot(construct)
-        if assembled:
-            return assembled
-
-    return "Object"
-
-
-
-def resolveIdentifierNode(identifierNode):
-    assignNodes, assignValues = findAssignments(identifierNode.value, identifierNode)
-    if assignNodes:
-
-        assignCommentNode = None
-
-        # Find first relevant assignment with comment! Otherwise just first one.
-        for assign in assignNodes:
-
-            # The parent is the relevant doc comment container
-            # It's either a "var" (declaration) or "semicolon" (assignment)
-            if getDocComment(assign):
-                assignCommentNode = assign
-                break
-            elif getDocComment(assign.parent):
-                assignCommentNode = assign.parent
-                break
-
-        return assignValues[0], assignCommentNode or assignValues[0]
-
-    return None, None
-
-
-
-def assembleDot(node, result=None):
-    """
-    Joins a dot node (cascaded supported, too) into a single string like "foo.bar.Baz"
-    """
-
-    if result == None:
-        result = []
-
-    for child in node:
-        if child.type == "identifier":
-            result.append(child.value)
-        elif child.type == "dot":
-            assembleDot(child, result)
-        else:
-            return None
-
-    return ".".join(result)
--- a/src/eric7/ThirdParty/__init__.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2003 - 2023 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Package containing third party packages used by eric.
-""" 
\ No newline at end of file
--- a/src/eric7/Utilities/ClassBrowsers/__init__.py	Fri Mar 31 13:39:51 2023 +0200
+++ b/src/eric7/Utilities/ClassBrowsers/__init__.py	Sat Apr 01 11:09:00 2023 +0200
@@ -105,7 +105,6 @@
     @rtype module
     """
     typeMapping = {
-        "javascript": ".jsclbr",
         "python": ".pyclbr",
         "ruby": ".rbclbr",
     }
--- a/src/eric7/Utilities/ClassBrowsers/jsclbr.py	Fri Mar 31 13:39:51 2023 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,339 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright (c) 2013 - 2023 Detlev Offenbach <detlev@die-offenbachs.de>
-#
-
-"""
-Parse a JavaScript file and retrieve variables and functions.
-
-It uses the JavaScript parser contained in the jasy web framework.
-"""
-
-import jasy.script.parse.Parser as jsParser
-import jasy.script.tokenize.Tokenizer as jsTokenizer
-
-from eric7 import Utilities
-from eric7.Utilities import ClassBrowsers
-
-from . import ClbrBaseClasses
-
-SUPPORTED_TYPES = [ClassBrowsers.JS_SOURCE]
-
-
-class VisibilityMixin(ClbrBaseClasses.ClbrVisibilityMixinBase):
-    """
-    Mixin class implementing the notion of visibility.
-    """
-
-    def __init__(self):
-        """
-        Constructor
-        """
-        if self.name.startswith("__"):
-            self.setPrivate()
-        elif self.name.startswith("_"):
-            self.setProtected()
-        else:
-            self.setPublic()
-
-
-class Function(ClbrBaseClasses.Function, VisibilityMixin):
-    """
-    Class to represent a Python function.
-    """
-
-    def __init__(self, module, name, file, lineno, signature="", separator=","):
-        """
-        Constructor
-
-        @param module name of the module containing this function
-        @param name name of this function
-        @param file filename containing this class
-        @param lineno linenumber of the class definition
-        @param signature parameterlist of the method
-        @param separator string separating the parameters
-        """
-        ClbrBaseClasses.Function.__init__(
-            self, module, name, file, lineno, signature, separator
-        )
-        VisibilityMixin.__init__(self)
-
-
-class Attribute(ClbrBaseClasses.Attribute, VisibilityMixin):
-    """
-    Class to represent a class attribute.
-    """
-
-    def __init__(self, module, name, file, lineno):
-        """
-        Constructor
-
-        @param module name of the module containing this class
-        @param name name of this class
-        @param file filename containing this attribute
-        @param lineno linenumber of the class definition
-        """
-        ClbrBaseClasses.Attribute.__init__(self, module, name, file, lineno)
-        VisibilityMixin.__init__(self)
-
-
-class Visitor:
-    """
-    Class implementing a visitor going through the parsed tree.
-    """
-
-    def __init__(self, src, module, filename):
-        """
-        Constructor
-
-        @param src source to be parsed (string)
-        @param module name of the module (string)
-        @param filename file name (string)
-        """
-        self.__dict = {}
-        self.__dict_counts = {}
-        self.__root = None
-        self.__stack = []
-
-        self.__module = module
-        self.__file = filename
-        self.__source = src
-
-        # normalize line endings
-        self.__source = self.__source.replace("\r\n", "\n").replace("\r", "\n")
-
-        # ensure source ends with an eol
-        if bool(self.__source) and self.__source[-1] != "\n":
-            self.__source = self.__source + "\n"
-
-    def parse(self):
-        """
-        Public method to parse the source.
-
-        @return dictionary containing the parsed information
-        """
-        try:
-            self.__root = jsParser.parse(self.__source, self.__file)
-            self.__visit(self.__root)
-        except jsParser.SyntaxError:
-            # ignore syntax errors of the parser
-            pass
-        except jsTokenizer.ParseError:
-            # ignore syntax errors of the tokenizer
-            pass
-
-        return self.__dict
-
-    def __visit(self, root):
-        """
-        Private method implementing the visit logic delegating to interesting
-        methods.
-
-        @param root root node to visit
-        """
-
-        def call(n):
-            getattr(self, "visit_{0}".format(n.type), self.visit_noop)(n)
-
-        call(root)
-        for node in root:
-            self.__visit(node)
-
-    def visit_noop(self, node):
-        """
-        Public method to ignore the given node.
-
-        @param node reference to the node (jasy.script.parse.Node.Node)
-        """
-        pass
-
-    def visit_function(self, node):
-        """
-        Public method to treat a function node.
-
-        @param node reference to the node (jasy.script.parse.Node.Node)
-        """
-        if (
-            node.type == "function"
-            and getattr(node, "name", None)
-            and node.functionForm == "declared_form"
-        ):
-            if self.__stack and self.__stack[-1].endlineno < node.line:
-                del self.__stack[-1]
-            endline = node.line + self.__source.count("\n", node.start, node.end)
-            if getattr(node, "params", None):
-                func_sig = ", ".join([p.value for p in node.params])
-            else:
-                func_sig = ""
-            if self.__stack:
-                # it's a nested function
-                cur_func = self.__stack[-1]
-                f = Function(None, node.name, self.__file, node.line, func_sig)
-                f.setEndLine(endline)
-                cur_func._addmethod(node.name, f)
-            else:
-                f = Function(self.__module, node.name, self.__file, node.line, func_sig)
-                f.setEndLine(endline)
-                func_name = node.name
-                if func_name in self.__dict_counts:
-                    self.__dict_counts[func_name] += 1
-                    func_name = "{0}_{1:d}".format(
-                        func_name, self.__dict_counts[func_name]
-                    )
-                else:
-                    self.__dict_counts[func_name] = 0
-                self.__dict[func_name] = f
-            self.__stack.append(f)
-
-    def visit_property_init(self, node):
-        """
-        Public method to treat a property_init node.
-
-        @param node reference to the node (jasy.script.parse.Node.Node)
-        """
-        if node.type == "property_init" and node[1].type == "function":
-            if self.__stack and self.__stack[-1].endlineno < node[0].line:
-                del self.__stack[-1]
-            endline = node[0].line + self.__source.count("\n", node.start, node[1].end)
-            if getattr(node[1], "params", None):
-                func_sig = ", ".join([p.value for p in node[1].params])
-            else:
-                func_sig = ""
-            if self.__stack:
-                # it's a nested function
-                cur_func = self.__stack[-1]
-                f = Function(None, node[0].value, self.__file, node[0].line, func_sig)
-                f.setEndLine(endline)
-                cur_func._addmethod(node[0].value, f)
-            else:
-                f = Function(
-                    self.__module, node[0].value, self.__file, node[0].line, func_sig
-                )
-                f.setEndLine(endline)
-                func_name = node[0].value
-                if func_name in self.__dict_counts:
-                    self.__dict_counts[func_name] += 1
-                    func_name = "{0}_{1:d}".format(
-                        func_name, self.__dict_counts[func_name]
-                    )
-                else:
-                    self.__dict_counts[func_name] = 0
-                self.__dict[func_name] = f
-            self.__stack.append(f)
-
-    def visit_var(self, node):
-        """
-        Public method to treat a variable node.
-
-        @param node reference to the node (jasy.script.parse.Node.Node)
-        """
-        if (
-            node.type == "var"
-            and node.parent.type == "script"
-            and node.getChildrenLength()
-        ):
-            if self.__stack and self.__stack[-1].endlineno < node[0].line:
-                del self.__stack[-1]
-            if self.__stack:
-                # function variables
-                for var in node:
-                    attr = Attribute(self.__module, var.name, self.__file, var.line)
-                    self.__stack[-1]._addattribute(attr)
-            else:
-                # global variable
-                if "@@Globals@@" not in self.__dict:
-                    self.__dict["@@Globals@@"] = ClbrBaseClasses.ClbrBase(
-                        self.__module, "Globals", self.__file, 0
-                    )
-                for var in node:
-                    self.__dict["@@Globals@@"]._addglobal(
-                        Attribute(self.__module, var.name, self.__file, var.line)
-                    )
-
-    def visit_const(self, node):
-        """
-        Public method to treat a constant node.
-
-        @param node reference to the node (jasy.script.parse.Node.Node)
-        """
-        if (
-            node.type == "const"
-            and node.parent.type == "script"
-            and node.getChildrenLength()
-        ):
-            if self.__stack and self.__stack[-1].endlineno < node[0].line:
-                del self.__stack[-1]
-            if self.__stack:
-                # function variables
-                for var in node:
-                    attr = Attribute(
-                        self.__module, "const " + var.name, self.__file, var.line
-                    )
-                    self.__stack[-1]._addattribute(attr)
-            else:
-                # global variable
-                if "@@Globals@@" not in self.__dict:
-                    self.__dict["@@Globals@@"] = ClbrBaseClasses.ClbrBase(
-                        self.__module, "Globals", self.__file, 0
-                    )
-                for var in node:
-                    self.__dict["@@Globals@@"]._addglobal(
-                        Attribute(
-                            self.__module, "const " + var.name, self.__file, var.line
-                        )
-                    )
-
-
-def readmodule_ex(module, path=None, isTypeFile=False):
-    """
-    Read a JavaScript file and return a dictionary of functions and variables.
-
-    @param module name of the JavaScript file
-    @type str
-    @param path path the file should be searched in
-    @type list of str
-    @param isTypeFile flag indicating a file of this type
-    @type bool
-    @return the resulting dictionary
-    @rtype dict
-    """
-    # search the path for the file
-    f = None
-    fullpath = [] if path is None else path[:]
-    f, file, (suff, mode, type) = ClassBrowsers.find_module(module, fullpath)
-    if f:
-        f.close()
-    if type not in SUPPORTED_TYPES:
-        # not Javascript source, can't do anything with this module
-        return {}
-
-    try:
-        src = Utilities.readEncodedFile(file)[0]
-    except (UnicodeError, OSError):
-        # can't do anything with this module
-        return {}
-
-    return scan(src, file, module)
-
-
-def scan(src, file, module):
-    """
-    Public method to scan the given source text.
-
-    @param src source text to be scanned
-    @type str
-    @param file file name associated with the source text
-    @type str
-    @param module module name associated with the source text
-    @type str
-    @return dictionary containing the extracted data
-    @rtype dict
-    """
-    # convert eol markers the Python style
-    src = src.replace("\r\n", "\n").replace("\r", "\n")
-
-    dictionary = {}
-
-    visitor = Visitor(src, module, file)
-    dictionary = visitor.parse()
-    return dictionary
--- a/src/eric7/eric7_ide.py	Fri Mar 31 13:39:51 2023 +0200
+++ b/src/eric7/eric7_ide.py	Sat Apr 01 11:09:00 2023 +0200
@@ -87,9 +87,6 @@
         )
         sys.argv.remove(arg)
 
-# make Third-Party package 'Jasy' available as a packages repository
-sys.path.insert(2, os.path.join(os.path.dirname(__file__), "ThirdParty", "Jasy"))
-
 from eric7.EricWidgets.EricApplication import EricApplication
 
 

eric ide

mercurial