ThirdParty/Pygments/pygments/lexers/templates.py

changeset 4172
4f20dba37ab6
parent 3145
a9de05d4a22f
child 4697
c2e9bf425554
--- a/ThirdParty/Pygments/pygments/lexers/templates.py	Wed Mar 11 18:25:37 2015 +0100
+++ b/ThirdParty/Pygments/pygments/lexers/templates.py	Wed Mar 11 18:32:27 2015 +0100
@@ -5,23 +5,24 @@
 
     Lexers for various template engines' markup.
 
-    :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-from __future__ import unicode_literals
-
 import re
 
-from pygments.lexers.web import \
-     PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer, LassoLexer
-from pygments.lexers.agile import PythonLexer, PerlLexer
-from pygments.lexers.compiled import JavaLexer
-from pygments.lexers.jvm import TeaLangLexer
+from pygments.lexers.html import HtmlLexer, XmlLexer
+from pygments.lexers.javascript import JavascriptLexer, LassoLexer
+from pygments.lexers.css import CssLexer
+from pygments.lexers.php import PhpLexer
+from pygments.lexers.python import PythonLexer
+from pygments.lexers.perl import PerlLexer
+from pygments.lexers.jvm import JavaLexer, TeaLangLexer
+from pygments.lexers.data import YamlLexer
 from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
-     include, using, this
-from pygments.token import Error, Punctuation, \
-     Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
+    include, using, this, default, combined
+from pygments.token import Error, Punctuation, Whitespace, \
+    Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
 from pygments.util import html_doctype_matches, looks_like_xml
 
 __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
@@ -38,9 +39,12 @@
            'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
            'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
            'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
-           'ColdfusionHtmlLexer', 'VelocityLexer', 'VelocityHtmlLexer',
-           'VelocityXmlLexer', 'SspLexer', 'TeaTemplateLexer', 'LassoHtmlLexer',
-           'LassoXmlLexer', 'LassoCssLexer', 'LassoJavascriptLexer']
+           'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
+           'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
+           'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
+           'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer',
+           'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer',
+           'TwigLexer', 'TwigHtmlLexer']
 
 
 class ErbLexer(Lexer):
@@ -61,7 +65,7 @@
     _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
 
     def __init__(self, **options):
-        from pygments.lexers.agile import RubyLexer
+        from pygments.lexers.ruby import RubyLexer
         self.ruby_lexer = RubyLexer(**options)
         Lexer.__init__(self, **options)
 
@@ -104,7 +108,7 @@
                         data = tokens.pop()
                         r_idx = 0
                         for r_idx, r_token, r_value in \
-                            self.ruby_lexer.get_tokens_unprocessed(data):
+                                self.ruby_lexer.get_tokens_unprocessed(data):
                             yield r_idx + idx, r_token, r_value
                         idx += len(data)
                         state = 2
@@ -117,7 +121,7 @@
                         yield idx, Comment.Preproc, tag[0]
                         r_idx = 0
                         for r_idx, r_token, r_value in \
-                            self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
+                                self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
                             yield idx + 1 + r_idx, r_token, r_value
                         idx += len(tag)
                         state = 0
@@ -161,22 +165,23 @@
             (r'(\{php\})(.*?)(\{/php\})',
              bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
                       Comment.Preproc)),
-            (r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)',
+            (r'(\{)(/?[a-zA-Z_]\w*)(\s*)',
              bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
             (r'\{', Comment.Preproc, 'smarty')
         ],
         'smarty': [
             (r'\s+', Text),
+            (r'\{', Comment.Preproc, '#push'),
             (r'\}', Comment.Preproc, '#pop'),
-            (r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable),
-            (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable),
-            (r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator),
+            (r'#[a-zA-Z_]\w*#', Name.Variable),
+            (r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable),
+            (r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator),
             (r'(true|false|null)\b', Keyword.Constant),
             (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
              r"0[xX][0-9a-fA-F]+[Ll]?", Number),
             (r'"(\\\\|\\"|[^"])*"', String.Double),
             (r"'(\\\\|\\'|[^'])*'", String.Single),
-            (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute)
+            (r'[a-zA-Z_]\w*', Name.Attribute)
         ]
     }
 
@@ -203,11 +208,11 @@
 
     name = 'Velocity'
     aliases = ['velocity']
-    filenames = ['*.vm','*.fhtml']
+    filenames = ['*.vm', '*.fhtml']
 
     flags = re.MULTILINE | re.DOTALL
 
-    identifier = r'[a-zA-Z_][a-zA-Z0-9_]*'
+    identifier = r'[a-zA-Z_]\w*'
 
     tokens = {
         'root': [
@@ -229,10 +234,10 @@
             (r'(\.)(' + identifier + r')',
              bygroups(Punctuation, Name.Variable), '#push'),
             (r'\}', Punctuation, '#pop'),
-            (r'', Other, '#pop')
+            default('#pop')
         ],
         'directiveparams': [
-            (r'(&&|\|\||==?|!=?|[-<>+*%&\|\^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
+            (r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
              Operator),
             (r'\[', Operator, 'rangeoperator'),
             (r'\b' + identifier + r'\b', Name.Function),
@@ -253,7 +258,9 @@
             (r"\b[0-9]+\b", Number),
             (r'(true|false|null)\b', Keyword.Constant),
             (r'\(', Punctuation, '#push'),
-            (r'\)', Punctuation, '#pop')
+            (r'\)', Punctuation, '#pop'),
+            (r'\[', Punctuation, '#push'),
+            (r'\]', Punctuation, '#pop'),
         ]
     }
 
@@ -265,39 +272,39 @@
             rv += 0.15
         if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text):
             rv += 0.15
-        if re.search(r'\$\{?[a-zA-Z_][a-zA-Z0-9_]*(\([^)]*\))?'
-                     r'(\.[a-zA-Z0-9_]+(\([^)]*\))?)*\}?', text):
+        if re.search(r'\$\{?[a-zA-Z_]\w*(\([^)]*\))?'
+                     r'(\.\w+(\([^)]*\))?)*\}?', text):
             rv += 0.01
         return rv
 
 
 class VelocityHtmlLexer(DelegatingLexer):
     """
-    Subclass of the `VelocityLexer` that highlights unlexer data
+    Subclass of the `VelocityLexer` that highlights unlexed data
     with the `HtmlLexer`.
 
     """
 
     name = 'HTML+Velocity'
     aliases = ['html+velocity']
-    alias_filenames = ['*.html','*.fhtml']
+    alias_filenames = ['*.html', '*.fhtml']
     mimetypes = ['text/html+velocity']
 
     def __init__(self, **options):
         super(VelocityHtmlLexer, self).__init__(HtmlLexer, VelocityLexer,
-                                              **options)
+                                                **options)
 
 
 class VelocityXmlLexer(DelegatingLexer):
     """
-    Subclass of the `VelocityLexer` that highlights unlexer data
+    Subclass of the `VelocityLexer` that highlights unlexed data
     with the `XmlLexer`.
 
     """
 
     name = 'XML+Velocity'
     aliases = ['xml+velocity']
-    alias_filenames = ['*.xml','*.vm']
+    alias_filenames = ['*.xml', '*.vm']
     mimetypes = ['application/xml+velocity']
 
     def __init__(self, **options):
@@ -307,7 +314,7 @@
     def analyse_text(text):
         rv = VelocityLexer.analyse_text(text) - 0.01
         if looks_like_xml(text):
-            rv += 0.5
+            rv += 0.4
         return rv
 
 
@@ -345,25 +352,25 @@
                       Text, Comment.Preproc, Text, Keyword, Text,
                       Comment.Preproc)),
             # filter blocks
-            (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
+            (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)',
              bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
              'block'),
-            (r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
+            (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
              bygroups(Comment.Preproc, Text, Keyword), 'block'),
             (r'\{', Other)
         ],
         'varnames': [
-            (r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
+            (r'(\|)(\s*)([a-zA-Z_]\w*)',
              bygroups(Operator, Text, Name.Function)),
-            (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)',
+            (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)',
              bygroups(Keyword, Text, Keyword, Text, Name.Function)),
             (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
             (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
              r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
              Keyword),
             (r'(loop|block|super|forloop)\b', Name.Builtin),
-            (r'[a-zA-Z][a-zA-Z0-9_-]*', Name.Variable),
-            (r'\.[a-zA-Z0-9_]+', Name.Variable),
+            (r'[a-zA-Z][\w-]*', Name.Variable),
+            (r'\.\w+', Name.Variable),
             (r':?"(\\\\|\\"|[^"])*"', String.Double),
             (r":?'(\\\\|\\'|[^'])*'", String.Single),
             (r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
@@ -399,7 +406,7 @@
     Generic `myghty templates`_ lexer. Code that isn't Myghty
     markup is yielded as `Token.Other`.
 
-    *New in Pygments 0.6.*
+    .. versionadded:: 0.6
 
     .. _myghty templates: http://www.myghty.org/
     """
@@ -444,10 +451,10 @@
 
 class MyghtyHtmlLexer(DelegatingLexer):
     """
-    Subclass of the `MyghtyLexer` that highlights unlexer data
+    Subclass of the `MyghtyLexer` that highlights unlexed data
     with the `HtmlLexer`.
 
-    *New in Pygments 0.6.*
+    .. versionadded:: 0.6
     """
 
     name = 'HTML+Myghty'
@@ -461,10 +468,10 @@
 
 class MyghtyXmlLexer(DelegatingLexer):
     """
-    Subclass of the `MyghtyLexer` that highlights unlexer data
+    Subclass of the `MyghtyLexer` that highlights unlexed data
     with the `XmlLexer`.
 
-    *New in Pygments 0.6.*
+    .. versionadded:: 0.6
     """
 
     name = 'XML+Myghty'
@@ -478,10 +485,10 @@
 
 class MyghtyJavascriptLexer(DelegatingLexer):
     """
-    Subclass of the `MyghtyLexer` that highlights unlexer data
+    Subclass of the `MyghtyLexer` that highlights unlexed data
     with the `JavascriptLexer`.
 
-    *New in Pygments 0.6.*
+    .. versionadded:: 0.6
     """
 
     name = 'JavaScript+Myghty'
@@ -497,10 +504,10 @@
 
 class MyghtyCssLexer(DelegatingLexer):
     """
-    Subclass of the `MyghtyLexer` that highlights unlexer data
+    Subclass of the `MyghtyLexer` that highlights unlexed data
     with the `CssLexer`.
 
-    *New in Pygments 0.6.*
+    .. versionadded:: 0.6
     """
 
     name = 'CSS+Myghty'
@@ -519,7 +526,7 @@
 
     .. _mason templates: http://www.masonhq.com/
 
-    *New in Pygments 1.4.*
+    .. versionadded:: 1.4
     """
     name = 'Mason'
     aliases = ['mason']
@@ -572,7 +579,7 @@
     Generic `mako templates`_ lexer. Code that isn't Mako
     markup is yielded as `Token.Other`.
 
-    *New in Pygments 0.7.*
+    .. versionadded:: 0.7
 
     .. _mako templates: http://www.makotemplates.org/
     """
@@ -591,11 +598,11 @@
             (r'(\s*)(##[^\n]*)(\n|\Z)',
              bygroups(Text, Comment.Preproc, Other)),
             (r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
-            (r'(<%)([\w\.\:]+)',
+            (r'(<%)([\w.:]+)',
              bygroups(Comment.Preproc, Name.Builtin), 'tag'),
-            (r'(</%)([\w\.\:]+)(>)',
+            (r'(</%)([\w.:]+)(>)',
              bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
-            (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
+            (r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'),
             (r'(<%(?:!?))(.*?)(%>)(?s)',
              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
             (r'(\$\{)(.*?)(\})',
@@ -640,7 +647,7 @@
     Subclass of the `MakoLexer` that highlights unlexed data
     with the `HtmlLexer`.
 
-    *New in Pygments 0.7.*
+    .. versionadded:: 0.7
     """
 
     name = 'HTML+Mako'
@@ -649,14 +656,15 @@
 
     def __init__(self, **options):
         super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
-                                              **options)
+                                            **options)
+
 
 class MakoXmlLexer(DelegatingLexer):
     """
-    Subclass of the `MakoLexer` that highlights unlexer data
+    Subclass of the `MakoLexer` that highlights unlexed data
     with the `XmlLexer`.
 
-    *New in Pygments 0.7.*
+    .. versionadded:: 0.7
     """
 
     name = 'XML+Mako'
@@ -665,14 +673,15 @@
 
     def __init__(self, **options):
         super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
-                                             **options)
+                                           **options)
+
 
 class MakoJavascriptLexer(DelegatingLexer):
     """
-    Subclass of the `MakoLexer` that highlights unlexer data
+    Subclass of the `MakoLexer` that highlights unlexed data
     with the `JavascriptLexer`.
 
-    *New in Pygments 0.7.*
+    .. versionadded:: 0.7
     """
 
     name = 'JavaScript+Mako'
@@ -683,14 +692,15 @@
 
     def __init__(self, **options):
         super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
-                                                    MakoLexer, **options)
+                                                  MakoLexer, **options)
+
 
 class MakoCssLexer(DelegatingLexer):
     """
-    Subclass of the `MakoLexer` that highlights unlexer data
+    Subclass of the `MakoLexer` that highlights unlexed data
     with the `CssLexer`.
 
-    *New in Pygments 0.7.*
+    .. versionadded:: 0.7
     """
 
     name = 'CSS+Mako'
@@ -699,7 +709,7 @@
 
     def __init__(self, **options):
         super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
-                                             **options)
+                                           **options)
 
 
 # Genshi and Cheetah lexers courtesy of Matt Good.
@@ -743,7 +753,7 @@
              (bygroups(Comment.Preproc, using(CheetahPythonLexer),
                        Comment.Preproc))),
             # TODO support other Python syntax like $foo['bar']
-            (r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])',
+            (r'(\$)([a-zA-Z_][\w.]*\w)',
              bygroups(Comment.Preproc, using(CheetahPythonLexer))),
             (r'(\$\{!?)(.*?)(\})(?s)',
              bygroups(Comment.Preproc, using(CheetahPythonLexer),
@@ -751,7 +761,7 @@
             (r'''(?sx)
                 (.+?)               # anything, followed by:
                 (?:
-                 (?=[#][#a-zA-Z]*) |   # an eval comment
+                 (?=\#[#a-zA-Z]*) | # an eval comment
                  (?=\$[a-zA-Z_{]) | # a substitution
                  \Z                 # end of string
                 )
@@ -763,12 +773,12 @@
 
 class CheetahHtmlLexer(DelegatingLexer):
     """
-    Subclass of the `CheetahLexer` that highlights unlexer data
+    Subclass of the `CheetahLexer` that highlights unlexed data
     with the `HtmlLexer`.
     """
 
     name = 'HTML+Cheetah'
-    aliases = ['html+cheetah', 'html+spitfire']
+    aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
     mimetypes = ['text/html+cheetah', 'text/html+spitfire']
 
     def __init__(self, **options):
@@ -778,7 +788,7 @@
 
 class CheetahXmlLexer(DelegatingLexer):
     """
-    Subclass of the `CheetahLexer` that highlights unlexer data
+    Subclass of the `CheetahLexer` that highlights unlexed data
     with the `XmlLexer`.
     """
 
@@ -793,7 +803,7 @@
 
 class CheetahJavascriptLexer(DelegatingLexer):
     """
-    Subclass of the `CheetahLexer` that highlights unlexer data
+    Subclass of the `CheetahLexer` that highlights unlexed data
     with the `JavascriptLexer`.
     """
 
@@ -824,11 +834,11 @@
 
     tokens = {
         'root': [
-            (r'[^#\$\s]+', Other),
+            (r'[^#$\s]+', Other),
             (r'^(\s*)(##.*)$', bygroups(Text, Comment)),
             (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
             include('variable'),
-            (r'[#\$\s]', Other),
+            (r'[#$\s]', Other),
         ],
         'directive': [
             (r'\n', Text, '#pop'),
@@ -841,7 +851,7 @@
         'variable': [
             (r'(?<!\$)(\$\{)(.+?)(\})',
              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
-            (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
+            (r'(?<!\$)(\$)([a-zA-Z_][\w.]*)',
              Name.Variable),
         ]
     }
@@ -857,7 +867,7 @@
 
     tokens = {
         'root': [
-            (r'[^<\$]+', Other),
+            (r'[^<$]+', Other),
             (r'(<\?python)(.*?)(\?>)',
              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
             # yield style and script blocks as Other
@@ -865,11 +875,11 @@
             (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
             (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
             include('variable'),
-            (r'[<\$]', Other),
+            (r'[<$]', Other),
         ],
         'pytag': [
             (r'\s+', Text),
-            (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'),
+            (r'[\w:-]+\s*=', Name.Attribute, 'pyattr'),
             (r'/?\s*>', Name.Tag, '#pop'),
         ],
         'pyattr': [
@@ -879,8 +889,8 @@
         ],
         'tag': [
             (r'\s+', Text),
-            (r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'),
-            (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
+            (r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'),
+            (r'[\w:-]+\s*=', Name.Attribute, 'attr'),
             (r'/?\s*>', Name.Tag, '#pop'),
         ],
         'attr': [
@@ -905,7 +915,7 @@
         'variable': [
             (r'(?<!\$)(\$\{)(.+?)(\})',
              bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
-            (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
+            (r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)',
              Name.Variable),
         ]
     }
@@ -1112,7 +1122,7 @@
 
 class XmlPhpLexer(DelegatingLexer):
     """
-    Subclass of `PhpLexer` that higlights unhandled data with the `XmlLexer`.
+    Subclass of `PhpLexer` that highlights unhandled data with the `XmlLexer`.
     """
 
     name = 'XML+PHP'
@@ -1170,7 +1180,7 @@
 
 class HtmlSmartyLexer(DelegatingLexer):
     """
-    Subclass of the `SmartyLexer` that highighlights unlexed data with the
+    Subclass of the `SmartyLexer` that highlights unlexed data with the
     `HtmlLexer`.
 
     Nested Javascript and CSS is highlighted too.
@@ -1253,14 +1263,14 @@
 
 class HtmlDjangoLexer(DelegatingLexer):
     """
-    Subclass of the `DjangoLexer` that highighlights unlexed data with the
+    Subclass of the `DjangoLexer` that highlights unlexed data with the
     `HtmlLexer`.
 
     Nested Javascript and CSS is highlighted too.
     """
 
     name = 'HTML+Django/Jinja'
-    aliases = ['html+django', 'html+jinja']
+    aliases = ['html+django', 'html+jinja', 'htmldjango']
     alias_filenames = ['*.html', '*.htm', '*.xhtml']
     mimetypes = ['text/html+django', 'text/html+jinja']
 
@@ -1343,7 +1353,7 @@
     Base for the `JspLexer`. Yields `Token.Other` for area outside of
     JSP tags.
 
-    *New in Pygments 0.7.*
+    .. versionadded:: 0.7
     """
 
     tokens = {
@@ -1367,7 +1377,7 @@
     """
     Lexer for Java Server Pages.
 
-    *New in Pygments 0.7.*
+    .. versionadded:: 0.7
     """
     name = 'Java Server Page'
     aliases = ['jsp']
@@ -1390,7 +1400,7 @@
     """
     For files using the Evoque templating system.
 
-    *New in Pygments 1.1.*
+    .. versionadded:: 1.1
     """
     name = 'Evoque'
     aliases = ['evoque']
@@ -1412,7 +1422,7 @@
                       String, Punctuation)),
             # directives: evoque, overlay
             # see doc for handling first name arg: /directives/evoque/
-            #+ minor inconsistency: the "name" in e.g. $overlay{name=site_base}
+            # + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
             # should be using(PythonLexer), not passed out as String
             (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
              r'(.*?)((?(4)%)\})',
@@ -1438,12 +1448,13 @@
         ],
     }
 
+
 class EvoqueHtmlLexer(DelegatingLexer):
     """
     Subclass of the `EvoqueLexer` that highlights unlexed data with the
     `HtmlLexer`.
 
-    *New in Pygments 1.1.*
+    .. versionadded:: 1.1
     """
     name = 'HTML+Evoque'
     aliases = ['html+evoque']
@@ -1454,12 +1465,13 @@
         super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
                                               **options)
 
+
 class EvoqueXmlLexer(DelegatingLexer):
     """
     Subclass of the `EvoqueLexer` that highlights unlexed data with the
     `XmlLexer`.
 
-    *New in Pygments 1.1.*
+    .. versionadded:: 1.1
     """
     name = 'XML+Evoque'
     aliases = ['xml+evoque']
@@ -1470,6 +1482,7 @@
         super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
                                              **options)
 
+
 class ColdfusionLexer(RegexLexer):
     """
     Coldfusion statements
@@ -1478,26 +1491,33 @@
     aliases = ['cfs']
     filenames = []
     mimetypes = []
-    flags = re.IGNORECASE | re.MULTILINE
+    flags = re.IGNORECASE
 
     tokens = {
         'root': [
-            (r'//.*', Comment),
+            (r'//.*?\n', Comment.Single),
+            (r'/\*(?:.|\n)*?\*/', Comment.Multiline),
             (r'\+\+|--', Operator),
             (r'[-+*/^&=!]', Operator),
-            (r'<=|>=|<|>', Operator),
+            (r'<=|>=|<|>|==', Operator),
             (r'mod\b', Operator),
             (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
             (r'\|\||&&', Operator),
+            (r'\?', Operator),
             (r'"', String.Double, 'string'),
             # There is a special rule for allowing html in single quoted
             # strings, evidently.
             (r"'.*?'", String.Single),
             (r'\d+', Number),
-            (r'(if|else|len|var|case|default|break|switch)\b', Keyword),
-            (r'([A-Za-z_$][A-Za-z0-9_.]*)(\s*)(\()',
+            (r'(if|else|len|var|xml|default|break|switch|component|property|function|do|'
+             r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|'
+             r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword),
+            (r'(true|false|null)\b', Keyword.Constant),
+            (r'(application|session|client|cookie|super|this|variables|arguments)\b',
+             Name.Constant),
+            (r'([a-z_$][\w.]*)(\s*)(\()',
              bygroups(Name.Function, Text, Punctuation)),
-            (r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable),
+            (r'[a-z_$][\w.]*', Name.Variable),
             (r'[()\[\]{};:,.\\]', Punctuation),
             (r'\s+', Text),
         ],
@@ -1527,7 +1547,7 @@
             (r'<[^<>]*', Other),
         ],
         'tags': [
-            (r'(?s)<!---.*?--->', Comment.Multiline),
+            (r'<!---', Comment.Multiline, 'cfcomment'),
             (r'(?s)<!--.*?-->', Comment),
             (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
             (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
@@ -1543,12 +1563,17 @@
             (r'[^#<]+', Other),
             (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
                                       Punctuation)),
-            #(r'<cfoutput.*?>', Name.Builtin, '#push'),
+            # (r'<cfoutput.*?>', Name.Builtin, '#push'),
             (r'</cfoutput.*?>', Name.Builtin, '#pop'),
             include('tags'),
             (r'(?s)<[^<>]*', Other),
             (r'#', Other),
         ],
+        'cfcomment': [
+            (r'<!---', Comment.Multiline, '#push'),
+            (r'--->', Comment.Multiline, '#pop'),
+            (r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline),
+        ],
     }
 
 
@@ -1558,7 +1583,7 @@
     """
     name = 'Coldfusion HTML'
     aliases = ['cfm']
-    filenames = ['*.cfm', '*.cfml', '*.cfc']
+    filenames = ['*.cfm', '*.cfml']
     mimetypes = ['application/x-coldfusion']
 
     def __init__(self, **options):
@@ -1566,11 +1591,27 @@
                                                   **options)
 
 
+class ColdfusionCFCLexer(DelegatingLexer):
+    """
+    Coldfusion markup/script components
+
+    .. versionadded:: 2.0
+    """
+    name = 'Coldfusion CFC'
+    aliases = ['cfc']
+    filenames = ['*.cfc']
+    mimetypes = []
+
+    def __init__(self, **options):
+        super(ColdfusionCFCLexer, self).__init__(ColdfusionHtmlLexer, ColdfusionLexer,
+                                                 **options)
+
+
 class SspLexer(DelegatingLexer):
     """
     Lexer for Scalate Server Pages.
 
-    *New in Pygments 1.4.*
+    .. versionadded:: 1.4
     """
     name = 'Scalate Server Page'
     aliases = ['ssp']
@@ -1596,7 +1637,7 @@
     Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
     code blocks.
 
-    *New in Pygments 1.5.*
+    .. versionadded:: 1.5
     """
 
     tokens = {
@@ -1604,20 +1645,20 @@
             (r'<%\S?', Keyword, 'sec'),
             (r'[^<]+', Other),
             (r'<', Other),
-            ],
+        ],
         'sec': [
             (r'%>', Keyword, '#pop'),
             # note: '\w\W' != '.' without DOTALL.
             (r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
-            ],
-        }
+        ],
+    }
 
 
 class TeaTemplateLexer(DelegatingLexer):
     """
     Lexer for `Tea Templates <http://teatrove.org/>`_.
 
-    *New in Pygments 1.5.*
+    .. versionadded:: 1.5
     """
     name = 'Tea'
     aliases = ['tea']
@@ -1644,7 +1685,7 @@
 
     Nested JavaScript and CSS is also highlighted.
 
-    *New in Pygments 1.6.*
+    .. versionadded:: 1.6
     """
 
     name = 'HTML+Lasso'
@@ -1659,10 +1700,8 @@
         super(LassoHtmlLexer, self).__init__(HtmlLexer, LassoLexer, **options)
 
     def analyse_text(text):
-        rv = LassoLexer.analyse_text(text)
-        if re.search(r'<\w+>', text, re.I):
-            rv += 0.2
-        if html_doctype_matches(text):
+        rv = LassoLexer.analyse_text(text) - 0.01
+        if html_doctype_matches(text):  # same as HTML lexer
             rv += 0.5
         return rv
 
@@ -1672,7 +1711,7 @@
     Subclass of the `LassoLexer` which highlights unhandled data with the
     `XmlLexer`.
 
-    *New in Pygments 1.6.*
+    .. versionadded:: 1.6
     """
 
     name = 'XML+Lasso'
@@ -1685,9 +1724,9 @@
         super(LassoXmlLexer, self).__init__(XmlLexer, LassoLexer, **options)
 
     def analyse_text(text):
-        rv = LassoLexer.analyse_text(text)
+        rv = LassoLexer.analyse_text(text) - 0.01
         if looks_like_xml(text):
-            rv += 0.5
+            rv += 0.4
         return rv
 
 
@@ -1696,7 +1735,7 @@
     Subclass of the `LassoLexer` which highlights unhandled data with the
     `CssLexer`.
 
-    *New in Pygments 1.6.*
+    .. versionadded:: 1.6
     """
 
     name = 'CSS+Lasso'
@@ -1709,8 +1748,8 @@
         super(LassoCssLexer, self).__init__(CssLexer, LassoLexer, **options)
 
     def analyse_text(text):
-        rv = LassoLexer.analyse_text(text)
-        if re.search(r'\w+:.+;', text):
+        rv = LassoLexer.analyse_text(text) - 0.05
+        if re.search(r'\w+:.+?;', text):
             rv += 0.1
         if 'padding:' in text:
             rv += 0.1
@@ -1722,7 +1761,7 @@
     Subclass of the `LassoLexer` which highlights unhandled data with the
     `JavascriptLexer`.
 
-    *New in Pygments 1.6.*
+    .. versionadded:: 1.6
     """
 
     name = 'JavaScript+Lasso'
@@ -1738,7 +1777,398 @@
                                                    **options)
 
     def analyse_text(text):
-        rv = LassoLexer.analyse_text(text)
+        rv = LassoLexer.analyse_text(text) - 0.05
         if 'function' in text:
             rv += 0.2
         return rv
+
+
+class HandlebarsLexer(RegexLexer):
+    """
+    Generic `handlebars <http://handlebarsjs.com/>` template lexer.
+
+    Highlights only the Handlebars template tags (stuff between `{{` and `}}`).
+    Everything else is left for a delegating lexer.
+
+    .. versionadded:: 2.0
+    """
+
+    name = "Handlebars"
+    aliases = ['handlebars']
+
+    tokens = {
+        'root': [
+            (r'[^{]+', Other),
+
+            (r'\{\{!.*\}\}', Comment),
+
+            (r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'),
+            (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'),
+        ],
+
+        'tag': [
+            (r'\s+', Text),
+            (r'\}\}\}', Comment.Special, '#pop'),
+            (r'\}\}', Comment.Preproc, '#pop'),
+
+            # Handlebars
+            (r'([#/]*)(each|if|unless|else|with|log|in)', bygroups(Keyword,
+             Keyword)),
+
+            # General {{#block}}
+            (r'([#/])([\w-]+)', bygroups(Name.Function, Name.Function)),
+
+            # {{opt=something}}
+            (r'([\w-]+)(=)', bygroups(Name.Attribute, Operator)),
+
+            # borrowed from DjangoLexer
+            (r':?"(\\\\|\\"|[^"])*"', String.Double),
+            (r":?'(\\\\|\\'|[^'])*'", String.Single),
+            (r'[a-zA-Z][\w-]*', Name.Variable),
+            (r'\.[\w-]+', Name.Variable),
+            (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
+             r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+        ]
+    }
+
+
+class HandlebarsHtmlLexer(DelegatingLexer):
+    """
+    Subclass of the `HandlebarsLexer` that highlights unlexed data with the
+    `HtmlLexer`.
+
+    .. versionadded:: 2.0
+    """
+
+    name = "HTML+Handlebars"
+    aliases = ["html+handlebars"]
+    filenames = ['*.handlebars', '*.hbs']
+    mimetypes = ['text/html+handlebars', 'text/x-handlebars-template']
+
+    def __init__(self, **options):
+        super(HandlebarsHtmlLexer, self).__init__(HtmlLexer, HandlebarsLexer, **options)
+
+
+class YamlJinjaLexer(DelegatingLexer):
+    """
+    Subclass of the `DjangoLexer` that highlights unlexed data with the
+    `YamlLexer`.
+
+    Commonly used in Saltstack salt states.
+
+    .. versionadded:: 2.0
+    """
+
+    name = 'YAML+Jinja'
+    aliases = ['yaml+jinja', 'salt', 'sls']
+    filenames = ['*.sls']
+    mimetypes = ['text/x-yaml+jinja', 'text/x-sls']
+
+    def __init__(self, **options):
+        super(YamlJinjaLexer, self).__init__(YamlLexer, DjangoLexer, **options)
+
+
+class LiquidLexer(RegexLexer):
+    """
+    Lexer for `Liquid templates
+    <http://www.rubydoc.info/github/Shopify/liquid>`_.
+
+    .. versionadded:: 2.0
+    """
+    name = 'liquid'
+    aliases = ['liquid']
+    filenames = ['*.liquid']
+
+    tokens = {
+        'root': [
+            (r'[^{]+', Text),
+            # tags and block tags
+            (r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'),
+            # output tags
+            (r'(\{\{)(\s*)([^\s}]+)',
+             bygroups(Punctuation, Whitespace, using(this, state = 'generic')),
+             'output'),
+            (r'\{', Text)
+        ],
+
+        'tag-or-block': [
+            # builtin logic blocks
+            (r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'),
+            (r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace),
+             combined('end-of-block', 'whitespace', 'generic')),
+            (r'(else)(\s*)(%\})',
+             bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'),
+
+            # other builtin blocks
+            (r'(capture)(\s+)([^\s%]+)(\s*)(%\})',
+             bygroups(Name.Tag, Whitespace, using(this, state = 'variable'),
+                      Whitespace, Punctuation), '#pop'),
+            (r'(comment)(\s*)(%\})',
+             bygroups(Name.Tag, Whitespace, Punctuation), 'comment'),
+            (r'(raw)(\s*)(%\})',
+             bygroups(Name.Tag, Whitespace, Punctuation), 'raw'),
+
+            # end of block
+            (r'(end(case|unless|if))(\s*)(%\})',
+             bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'),
+            (r'(end([^\s%]+))(\s*)(%\})',
+             bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'),
+
+            # builtin tags (assign and include are handled together with usual tags)
+            (r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)',
+             bygroups(Name.Tag, Whitespace,
+                      using(this, state='generic'), Punctuation, Whitespace),
+             'variable-tag-markup'),
+
+            # other tags or blocks
+            (r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup')
+        ],
+
+        'output': [
+            include('whitespace'),
+            ('\}\}', Punctuation, '#pop'),  # end of output
+
+            (r'\|', Punctuation, 'filters')
+        ],
+
+        'filters': [
+            include('whitespace'),
+            (r'\}\}', Punctuation, ('#pop', '#pop')),  # end of filters and output
+
+            (r'([^\s|:]+)(:?)(\s*)',
+             bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup')
+        ],
+
+        'filter-markup': [
+            (r'\|', Punctuation, '#pop'),
+            include('end-of-tag'),
+            include('default-param-markup')
+        ],
+
+        'condition': [
+            include('end-of-block'),
+            include('whitespace'),
+
+            (r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})',
+             bygroups(using(this, state = 'generic'), Whitespace, Operator,
+                      Whitespace, using(this, state = 'generic'), Whitespace,
+                      Punctuation)),
+            (r'\b!', Operator),
+            (r'\bnot\b', Operator.Word),
+            (r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)',
+             bygroups(using(this, state = 'generic'), Whitespace, Operator.Word,
+                      Whitespace, using(this, state = 'generic'))),
+
+            include('generic'),
+            include('whitespace')
+        ],
+
+        'generic-value': [
+            include('generic'),
+            include('end-at-whitespace')
+        ],
+
+        'operator': [
+            (r'(\s*)((=|!|>|<)=?)(\s*)',
+             bygroups(Whitespace, Operator, None, Whitespace), '#pop'),
+            (r'(\s*)(\bcontains\b)(\s*)',
+             bygroups(Whitespace, Operator.Word, Whitespace), '#pop'),
+        ],
+
+        'end-of-tag': [
+            (r'\}\}', Punctuation, '#pop')
+        ],
+
+        'end-of-block': [
+            (r'%\}', Punctuation, ('#pop', '#pop'))
+        ],
+
+        'end-at-whitespace': [
+            (r'\s+', Whitespace, '#pop')
+        ],
+
+        # states for unknown markup
+        'param-markup': [
+            include('whitespace'),
+            # params with colons or equals
+            (r'([^\s=:]+)(\s*)(=|:)',
+             bygroups(Name.Attribute, Whitespace, Operator)),
+            # explicit variables
+            (r'(\{\{)(\s*)([^\s}])(\s*)(\}\})',
+             bygroups(Punctuation, Whitespace, using(this, state = 'variable'),
+                      Whitespace, Punctuation)),
+
+            include('string'),
+            include('number'),
+            include('keyword'),
+            (r',', Punctuation)
+        ],
+
+        'default-param-markup': [
+            include('param-markup'),
+            (r'.', Text)  # fallback for switches / variables / un-quoted strings / ...
+        ],
+
+        'variable-param-markup': [
+            include('param-markup'),
+            include('variable'),
+            (r'.', Text)  # fallback
+        ],
+
+        'tag-markup': [
+            (r'%\}', Punctuation, ('#pop', '#pop')),  # end of tag
+            include('default-param-markup')
+        ],
+
+        'variable-tag-markup': [
+            (r'%\}', Punctuation, ('#pop', '#pop')),  # end of tag
+            include('variable-param-markup')
+        ],
+
+        # states for different values types
+        'keyword': [
+            (r'\b(false|true)\b', Keyword.Constant)
+        ],
+
+        'variable': [
+            (r'[a-zA-Z_]\w*', Name.Variable),
+            (r'(?<=\w)\.(?=\w)', Punctuation)
+        ],
+
+        'string': [
+            (r"'[^']*'", String.Single),
+            (r'"[^"]*"', String.Double)
+        ],
+
+        'number': [
+            (r'\d+\.\d+', Number.Float),
+            (r'\d+', Number.Integer)
+        ],
+
+        'generic': [  # decides for variable, string, keyword or number
+            include('keyword'),
+            include('string'),
+            include('number'),
+            include('variable')
+        ],
+
+        'whitespace': [
+            (r'[ \t]+', Whitespace)
+        ],
+
+        # states for builtin blocks
+        'comment': [
+            (r'(\{%)(\s*)(endcomment)(\s*)(%\})',
+             bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
+                      Punctuation), ('#pop', '#pop')),
+            (r'.', Comment)
+        ],
+
+        'raw': [
+            (r'[^{]+', Text),
+            (r'(\{%)(\s*)(endraw)(\s*)(%\})',
+             bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
+                      Punctuation), '#pop'),
+            (r'\{', Text)
+        ],
+    }
+
+
+class TwigLexer(RegexLexer):
+    """
+    `Twig <http://twig.sensiolabs.org/>`_ template lexer.
+
+    It just highlights Twig code between the preprocessor directives,
+    other data is left untouched by the lexer.
+
+    .. versionadded:: 2.0
+    """
+
+    name = 'Twig'
+    aliases = ['twig']
+    mimetypes = ['application/x-twig']
+
+    flags = re.M | re.S
+
+    # Note that a backslash is included in the following two patterns
+    # PHP uses a backslash as a namespace separator
+    _ident_char = r'[\\\w-]|[^\x00-\x7f]'
+    _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
+    _ident_end = r'(?:' + _ident_char + ')*'
+    _ident_inner = _ident_begin + _ident_end
+
+    tokens = {
+        'root': [
+            (r'[^{]+', Other),
+            (r'\{\{', Comment.Preproc, 'var'),
+            # twig comments
+            (r'\{\#.*?\#\}', Comment),
+            # raw twig blocks
+            (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
+             r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
+             bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
+                      Other, Comment.Preproc, Text, Keyword, Text,
+                      Comment.Preproc)),
+            (r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)'
+             r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})',
+             bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
+                      Other, Comment.Preproc, Text, Keyword, Text,
+                      Comment.Preproc)),
+            # filter blocks
+            (r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner,
+             bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
+             'tag'),
+            (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
+             bygroups(Comment.Preproc, Text, Keyword), 'tag'),
+            (r'\{', Other),
+        ],
+        'varnames': [
+            (r'(\|)(\s*)(%s)' % _ident_inner,
+             bygroups(Operator, Text, Name.Function)),
+            (r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner,
+             bygroups(Keyword, Text, Keyword, Text, Name.Function)),
+            (r'(?i)(true|false|none|null)\b', Keyword.Pseudo),
+            (r'(in|not|and|b-and|or|b-or|b-xor|is'
+             r'if|elseif|else|import'
+             r'constant|defined|divisibleby|empty|even|iterable|odd|sameas'
+             r'matches|starts\s+with|ends\s+with)\b',
+             Keyword),
+            (r'(loop|block|parent)\b', Name.Builtin),
+            (_ident_inner, Name.Variable),
+            (r'\.' + _ident_inner, Name.Variable),
+            (r'\.[0-9]+', Number),
+            (r':?"(\\\\|\\"|[^"])*"', String.Double),
+            (r":?'(\\\\|\\'|[^'])*'", String.Single),
+            (r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator),
+            (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
+             r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+        ],
+        'var': [
+            (r'\s+', Text),
+            (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
+            include('varnames')
+        ],
+        'tag': [
+            (r'\s+', Text),
+            (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
+            include('varnames'),
+            (r'.', Punctuation),
+        ],
+    }
+
+
+class TwigHtmlLexer(DelegatingLexer):
+    """
+    Subclass of the `TwigLexer` that highlights unlexed data with the
+    `HtmlLexer`.
+
+    .. versionadded:: 2.0
+    """
+
+    name = "HTML+Twig"
+    aliases = ["html+twig"]
+    filenames = ['*.twig']
+    mimetypes = ['text/html+twig']
+
+    def __init__(self, **options):
+        super(TwigHtmlLexer, self).__init__(HtmlLexer, TwigLexer, **options)

eric ide

mercurial