eric6/ThirdParty/Pygments/pygments/lexers/templates.py

changeset 8258
82b608e352ec
parent 8257
28146736bbfc
child 8259
2bbec88047dd
equal deleted inserted replaced
8257:28146736bbfc 8258:82b608e352ec
1 # -*- coding: utf-8 -*-
2 """
3 pygments.lexers.templates
4 ~~~~~~~~~~~~~~~~~~~~~~~~~
5
6 Lexers for various template engines' markup.
7
8 :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
9 :license: BSD, see LICENSE for details.
10 """
11
12 import re
13
14 from pygments.lexers.html import HtmlLexer, XmlLexer
15 from pygments.lexers.javascript import JavascriptLexer, LassoLexer
16 from pygments.lexers.css import CssLexer
17 from pygments.lexers.php import PhpLexer
18 from pygments.lexers.python import PythonLexer
19 from pygments.lexers.perl import PerlLexer
20 from pygments.lexers.jvm import JavaLexer, TeaLangLexer
21 from pygments.lexers.data import YamlLexer
22 from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
23 include, using, this, default, combined
24 from pygments.token import Error, Punctuation, Whitespace, \
25 Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
26 from pygments.util import html_doctype_matches, looks_like_xml
27
28 __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
29 'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
30 'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
31 'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
32 'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
33 'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
34 'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
35 'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
36 'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
37 'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
38 'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
39 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
40 'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
41 'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
42 'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
43 'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
44 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
45 'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer',
46 'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer',
47 'TwigLexer', 'TwigHtmlLexer', 'Angular2Lexer', 'Angular2HtmlLexer']
48
49
50 class ErbLexer(Lexer):
51 """
52 Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
53 lexer.
54
55 Just highlights ruby code between the preprocessor directives, other data
56 is left untouched by the lexer.
57
58 All options are also forwarded to the `RubyLexer`.
59 """
60
61 name = 'ERB'
62 aliases = ['erb']
63 mimetypes = ['application/x-ruby-templating']
64
65 _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
66
67 def __init__(self, **options):
68 from pygments.lexers.ruby import RubyLexer
69 self.ruby_lexer = RubyLexer(**options)
70 Lexer.__init__(self, **options)
71
72 def get_tokens_unprocessed(self, text):
73 """
74 Since ERB doesn't allow "<%" and other tags inside of ruby
75 blocks we have to use a split approach here that fails for
76 that too.
77 """
78 tokens = self._block_re.split(text)
79 tokens.reverse()
80 state = idx = 0
81 try:
82 while True:
83 # text
84 if state == 0:
85 val = tokens.pop()
86 yield idx, Other, val
87 idx += len(val)
88 state = 1
89 # block starts
90 elif state == 1:
91 tag = tokens.pop()
92 # literals
93 if tag in ('<%%', '%%>'):
94 yield idx, Other, tag
95 idx += 3
96 state = 0
97 # comment
98 elif tag == '<%#':
99 yield idx, Comment.Preproc, tag
100 val = tokens.pop()
101 yield idx + 3, Comment, val
102 idx += 3 + len(val)
103 state = 2
104 # blocks or output
105 elif tag in ('<%', '<%=', '<%-'):
106 yield idx, Comment.Preproc, tag
107 idx += len(tag)
108 data = tokens.pop()
109 r_idx = 0
110 for r_idx, r_token, r_value in \
111 self.ruby_lexer.get_tokens_unprocessed(data):
112 yield r_idx + idx, r_token, r_value
113 idx += len(data)
114 state = 2
115 elif tag in ('%>', '-%>'):
116 yield idx, Error, tag
117 idx += len(tag)
118 state = 0
119 # % raw ruby statements
120 else:
121 yield idx, Comment.Preproc, tag[0]
122 r_idx = 0
123 for r_idx, r_token, r_value in \
124 self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
125 yield idx + 1 + r_idx, r_token, r_value
126 idx += len(tag)
127 state = 0
128 # block ends
129 elif state == 2:
130 tag = tokens.pop()
131 if tag not in ('%>', '-%>'):
132 yield idx, Other, tag
133 else:
134 yield idx, Comment.Preproc, tag
135 idx += len(tag)
136 state = 0
137 except IndexError:
138 return
139
140 def analyse_text(text):
141 if '<%' in text and '%>' in text:
142 return 0.4
143
144
145 class SmartyLexer(RegexLexer):
146 """
147 Generic `Smarty <http://smarty.php.net/>`_ template lexer.
148
149 Just highlights smarty code between the preprocessor directives, other
150 data is left untouched by the lexer.
151 """
152
153 name = 'Smarty'
154 aliases = ['smarty']
155 filenames = ['*.tpl']
156 mimetypes = ['application/x-smarty']
157
158 flags = re.MULTILINE | re.DOTALL
159
160 tokens = {
161 'root': [
162 (r'[^{]+', Other),
163 (r'(\{)(\*.*?\*)(\})',
164 bygroups(Comment.Preproc, Comment, Comment.Preproc)),
165 (r'(\{php\})(.*?)(\{/php\})',
166 bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
167 Comment.Preproc)),
168 (r'(\{)(/?[a-zA-Z_]\w*)(\s*)',
169 bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
170 (r'\{', Comment.Preproc, 'smarty')
171 ],
172 'smarty': [
173 (r'\s+', Text),
174 (r'\{', Comment.Preproc, '#push'),
175 (r'\}', Comment.Preproc, '#pop'),
176 (r'#[a-zA-Z_]\w*#', Name.Variable),
177 (r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable),
178 (r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator),
179 (r'(true|false|null)\b', Keyword.Constant),
180 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
181 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
182 (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
183 (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
184 (r'[a-zA-Z_]\w*', Name.Attribute)
185 ]
186 }
187
188 def analyse_text(text):
189 rv = 0.0
190 if re.search(r'\{if\s+.*?\}.*?\{/if\}', text):
191 rv += 0.15
192 if re.search(r'\{include\s+file=.*?\}', text):
193 rv += 0.15
194 if re.search(r'\{foreach\s+.*?\}.*?\{/foreach\}', text):
195 rv += 0.15
196 if re.search(r'\{\$.*?\}', text):
197 rv += 0.01
198 return rv
199
200
201 class VelocityLexer(RegexLexer):
202 """
203 Generic `Velocity <http://velocity.apache.org/>`_ template lexer.
204
205 Just highlights velocity directives and variable references, other
206 data is left untouched by the lexer.
207 """
208
209 name = 'Velocity'
210 aliases = ['velocity']
211 filenames = ['*.vm', '*.fhtml']
212
213 flags = re.MULTILINE | re.DOTALL
214
215 identifier = r'[a-zA-Z_]\w*'
216
217 tokens = {
218 'root': [
219 (r'[^{#$]+', Other),
220 (r'(#)(\*.*?\*)(#)',
221 bygroups(Comment.Preproc, Comment, Comment.Preproc)),
222 (r'(##)(.*?$)',
223 bygroups(Comment.Preproc, Comment)),
224 (r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
225 bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
226 'directiveparams'),
227 (r'(#\{?)(' + identifier + r')(\}|\b)',
228 bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
229 (r'\$!?\{?', Punctuation, 'variable')
230 ],
231 'variable': [
232 (identifier, Name.Variable),
233 (r'\(', Punctuation, 'funcparams'),
234 (r'(\.)(' + identifier + r')',
235 bygroups(Punctuation, Name.Variable), '#push'),
236 (r'\}', Punctuation, '#pop'),
237 default('#pop')
238 ],
239 'directiveparams': [
240 (r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
241 Operator),
242 (r'\[', Operator, 'rangeoperator'),
243 (r'\b' + identifier + r'\b', Name.Function),
244 include('funcparams')
245 ],
246 'rangeoperator': [
247 (r'\.\.', Operator),
248 include('funcparams'),
249 (r'\]', Operator, '#pop')
250 ],
251 'funcparams': [
252 (r'\$!?\{?', Punctuation, 'variable'),
253 (r'\s+', Text),
254 (r'[,:]', Punctuation),
255 (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
256 (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
257 (r"0[xX][0-9a-fA-F]+[Ll]?", Number),
258 (r"\b[0-9]+\b", Number),
259 (r'(true|false|null)\b', Keyword.Constant),
260 (r'\(', Punctuation, '#push'),
261 (r'\)', Punctuation, '#pop'),
262 (r'\{', Punctuation, '#push'),
263 (r'\}', Punctuation, '#pop'),
264 (r'\[', Punctuation, '#push'),
265 (r'\]', Punctuation, '#pop'),
266 ]
267 }
268
269 def analyse_text(text):
270 rv = 0.0
271 if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text):
272 rv += 0.25
273 if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text):
274 rv += 0.15
275 if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text):
276 rv += 0.15
277 if re.search(r'\$!?\{?[a-zA-Z_]\w*(\([^)]*\))?'
278 r'(\.\w+(\([^)]*\))?)*\}?', text):
279 rv += 0.01
280 return rv
281
282
283 class VelocityHtmlLexer(DelegatingLexer):
284 """
285 Subclass of the `VelocityLexer` that highlights unlexed data
286 with the `HtmlLexer`.
287
288 """
289
290 name = 'HTML+Velocity'
291 aliases = ['html+velocity']
292 alias_filenames = ['*.html', '*.fhtml']
293 mimetypes = ['text/html+velocity']
294
295 def __init__(self, **options):
296 super().__init__(HtmlLexer, VelocityLexer, **options)
297
298
299 class VelocityXmlLexer(DelegatingLexer):
300 """
301 Subclass of the `VelocityLexer` that highlights unlexed data
302 with the `XmlLexer`.
303
304 """
305
306 name = 'XML+Velocity'
307 aliases = ['xml+velocity']
308 alias_filenames = ['*.xml', '*.vm']
309 mimetypes = ['application/xml+velocity']
310
311 def __init__(self, **options):
312 super().__init__(XmlLexer, VelocityLexer, **options)
313
314 def analyse_text(text):
315 rv = VelocityLexer.analyse_text(text) - 0.01
316 if looks_like_xml(text):
317 rv += 0.4
318 return rv
319
320
321 class DjangoLexer(RegexLexer):
322 """
323 Generic `django <http://www.djangoproject.com/documentation/templates/>`_
324 and `jinja <https://jinja.pocoo.org/jinja/>`_ template lexer.
325
326 It just highlights django/jinja code between the preprocessor directives,
327 other data is left untouched by the lexer.
328 """
329
330 name = 'Django/Jinja'
331 aliases = ['django', 'jinja']
332 mimetypes = ['application/x-django-templating', 'application/x-jinja']
333
334 flags = re.M | re.S
335
336 tokens = {
337 'root': [
338 (r'[^{]+', Other),
339 (r'\{\{', Comment.Preproc, 'var'),
340 # jinja/django comments
341 (r'\{#.*?#\}', Comment),
342 # django comments
343 (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
344 r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
345 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
346 Comment, Comment.Preproc, Text, Keyword, Text,
347 Comment.Preproc)),
348 # raw jinja blocks
349 (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
350 r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
351 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
352 Text, Comment.Preproc, Text, Keyword, Text,
353 Comment.Preproc)),
354 # filter blocks
355 (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)',
356 bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
357 'block'),
358 (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
359 bygroups(Comment.Preproc, Text, Keyword), 'block'),
360 (r'\{', Other)
361 ],
362 'varnames': [
363 (r'(\|)(\s*)([a-zA-Z_]\w*)',
364 bygroups(Operator, Text, Name.Function)),
365 (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)',
366 bygroups(Keyword, Text, Keyword, Text, Name.Function)),
367 (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
368 (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
369 r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
370 Keyword),
371 (r'(loop|block|super|forloop)\b', Name.Builtin),
372 (r'[a-zA-Z_][\w-]*', Name.Variable),
373 (r'\.\w+', Name.Variable),
374 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
375 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
376 (r'([{}()\[\]+\-*/%,:~]|[><=]=?|!=)', Operator),
377 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
378 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
379 ],
380 'var': [
381 (r'\s+', Text),
382 (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
383 include('varnames')
384 ],
385 'block': [
386 (r'\s+', Text),
387 (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
388 include('varnames'),
389 (r'.', Punctuation)
390 ]
391 }
392
393 def analyse_text(text):
394 rv = 0.0
395 if re.search(r'\{%\s*(block|extends)', text) is not None:
396 rv += 0.4
397 if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
398 rv += 0.1
399 if re.search(r'\{\{.*?\}\}', text) is not None:
400 rv += 0.1
401 return rv
402
403
404 class MyghtyLexer(RegexLexer):
405 """
406 Generic `myghty templates`_ lexer. Code that isn't Myghty
407 markup is yielded as `Token.Other`.
408
409 .. versionadded:: 0.6
410
411 .. _myghty templates: http://www.myghty.org/
412 """
413
414 name = 'Myghty'
415 aliases = ['myghty']
416 filenames = ['*.myt', 'autodelegate']
417 mimetypes = ['application/x-myghty']
418
419 tokens = {
420 'root': [
421 (r'\s+', Text),
422 (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
423 bygroups(Name.Tag, Text, Name.Function, Name.Tag,
424 using(this), Name.Tag)),
425 (r'(?s)(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)',
426 bygroups(Name.Tag, Name.Function, Name.Tag,
427 using(PythonLexer), Name.Tag)),
428 (r'(<&[^|])(.*?)(,.*?)?(&>)',
429 bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
430 (r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
431 bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
432 (r'</&>', Name.Tag),
433 (r'(?s)(<%!?)(.*?)(%>)',
434 bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
435 (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
436 (r'(?<=^)(%)([^\n]*)(\n|\Z)',
437 bygroups(Name.Tag, using(PythonLexer), Other)),
438 (r"""(?sx)
439 (.+?) # anything, followed by:
440 (?:
441 (?<=\n)(?=[%#]) | # an eval or comment line
442 (?=</?[%&]) | # a substitution or block or
443 # call start or end
444 # - don't consume
445 (\\\n) | # an escaped newline
446 \Z # end of string
447 )""", bygroups(Other, Operator)),
448 ]
449 }
450
451
452 class MyghtyHtmlLexer(DelegatingLexer):
453 """
454 Subclass of the `MyghtyLexer` that highlights unlexed data
455 with the `HtmlLexer`.
456
457 .. versionadded:: 0.6
458 """
459
460 name = 'HTML+Myghty'
461 aliases = ['html+myghty']
462 mimetypes = ['text/html+myghty']
463
464 def __init__(self, **options):
465 super().__init__(HtmlLexer, MyghtyLexer, **options)
466
467
468 class MyghtyXmlLexer(DelegatingLexer):
469 """
470 Subclass of the `MyghtyLexer` that highlights unlexed data
471 with the `XmlLexer`.
472
473 .. versionadded:: 0.6
474 """
475
476 name = 'XML+Myghty'
477 aliases = ['xml+myghty']
478 mimetypes = ['application/xml+myghty']
479
480 def __init__(self, **options):
481 super().__init__(XmlLexer, MyghtyLexer, **options)
482
483
484 class MyghtyJavascriptLexer(DelegatingLexer):
485 """
486 Subclass of the `MyghtyLexer` that highlights unlexed data
487 with the `JavascriptLexer`.
488
489 .. versionadded:: 0.6
490 """
491
492 name = 'JavaScript+Myghty'
493 aliases = ['js+myghty', 'javascript+myghty']
494 mimetypes = ['application/x-javascript+myghty',
495 'text/x-javascript+myghty',
496 'text/javascript+mygthy']
497
498 def __init__(self, **options):
499 super().__init__(JavascriptLexer, MyghtyLexer, **options)
500
501
502 class MyghtyCssLexer(DelegatingLexer):
503 """
504 Subclass of the `MyghtyLexer` that highlights unlexed data
505 with the `CssLexer`.
506
507 .. versionadded:: 0.6
508 """
509
510 name = 'CSS+Myghty'
511 aliases = ['css+myghty']
512 mimetypes = ['text/css+myghty']
513
514 def __init__(self, **options):
515 super().__init__(CssLexer, MyghtyLexer, **options)
516
517
518 class MasonLexer(RegexLexer):
519 """
520 Generic `mason templates`_ lexer. Stolen from Myghty lexer. Code that isn't
521 Mason markup is HTML.
522
523 .. _mason templates: http://www.masonhq.com/
524
525 .. versionadded:: 1.4
526 """
527 name = 'Mason'
528 aliases = ['mason']
529 filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler']
530 mimetypes = ['application/x-mason']
531
532 tokens = {
533 'root': [
534 (r'\s+', Text),
535 (r'(?s)(<%doc>)(.*?)(</%doc>)',
536 bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
537 (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
538 bygroups(Name.Tag, Text, Name.Function, Name.Tag,
539 using(this), Name.Tag)),
540 (r'(?s)(<%(\w+)(.*?)(>))(.*?)(</%\2\s*>)',
541 bygroups(Name.Tag, None, None, None, using(PerlLexer), Name.Tag)),
542 (r'(?s)(<&[^|])(.*?)(,.*?)?(&>)',
543 bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
544 (r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
545 bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
546 (r'</&>', Name.Tag),
547 (r'(?s)(<%!?)(.*?)(%>)',
548 bygroups(Name.Tag, using(PerlLexer), Name.Tag)),
549 (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
550 (r'(?<=^)(%)([^\n]*)(\n|\Z)',
551 bygroups(Name.Tag, using(PerlLexer), Other)),
552 (r"""(?sx)
553 (.+?) # anything, followed by:
554 (?:
555 (?<=\n)(?=[%#]) | # an eval or comment line
556 (?=</?[%&]) | # a substitution or block or
557 # call start or end
558 # - don't consume
559 (\\\n) | # an escaped newline
560 \Z # end of string
561 )""", bygroups(using(HtmlLexer), Operator)),
562 ]
563 }
564
565 def analyse_text(text):
566 result = 0.0
567 if re.search(r'</%(class|doc|init)>', text) is not None:
568 result = 1.0
569 elif re.search(r'<&.+&>', text, re.DOTALL) is not None:
570 result = 0.11
571 return result
572
573
574 class MakoLexer(RegexLexer):
575 """
576 Generic `mako templates`_ lexer. Code that isn't Mako
577 markup is yielded as `Token.Other`.
578
579 .. versionadded:: 0.7
580
581 .. _mako templates: http://www.makotemplates.org/
582 """
583
584 name = 'Mako'
585 aliases = ['mako']
586 filenames = ['*.mao']
587 mimetypes = ['application/x-mako']
588
589 tokens = {
590 'root': [
591 (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
592 bygroups(Text, Comment.Preproc, Keyword, Other)),
593 (r'(\s*)(%)([^\n]*)(\n|\Z)',
594 bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
595 (r'(\s*)(##[^\n]*)(\n|\Z)',
596 bygroups(Text, Comment.Preproc, Other)),
597 (r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
598 (r'(<%)([\w.:]+)',
599 bygroups(Comment.Preproc, Name.Builtin), 'tag'),
600 (r'(</%)([\w.:]+)(>)',
601 bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
602 (r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'),
603 (r'(?s)(<%(?:!?))(.*?)(%>)',
604 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
605 (r'(\$\{)(.*?)(\})',
606 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
607 (r'''(?sx)
608 (.+?) # anything, followed by:
609 (?:
610 (?<=\n)(?=%|\#\#) | # an eval or comment line
611 (?=\#\*) | # multiline comment
612 (?=</?%) | # a python block
613 # call start or end
614 (?=\$\{) | # a substitution
615 (?<=\n)(?=\s*%) |
616 # - don't consume
617 (\\\n) | # an escaped newline
618 \Z # end of string
619 )
620 ''', bygroups(Other, Operator)),
621 (r'\s+', Text),
622 ],
623 'ondeftags': [
624 (r'<%', Comment.Preproc),
625 (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
626 include('tag'),
627 ],
628 'tag': [
629 (r'((?:\w+)\s*=)(\s*)(".*?")',
630 bygroups(Name.Attribute, Text, String)),
631 (r'/?\s*>', Comment.Preproc, '#pop'),
632 (r'\s+', Text),
633 ],
634 'attr': [
635 ('".*?"', String, '#pop'),
636 ("'.*?'", String, '#pop'),
637 (r'[^\s>]+', String, '#pop'),
638 ],
639 }
640
641
642 class MakoHtmlLexer(DelegatingLexer):
643 """
644 Subclass of the `MakoLexer` that highlights unlexed data
645 with the `HtmlLexer`.
646
647 .. versionadded:: 0.7
648 """
649
650 name = 'HTML+Mako'
651 aliases = ['html+mako']
652 mimetypes = ['text/html+mako']
653
654 def __init__(self, **options):
655 super().__init__(HtmlLexer, MakoLexer, **options)
656
657
658 class MakoXmlLexer(DelegatingLexer):
659 """
660 Subclass of the `MakoLexer` that highlights unlexed data
661 with the `XmlLexer`.
662
663 .. versionadded:: 0.7
664 """
665
666 name = 'XML+Mako'
667 aliases = ['xml+mako']
668 mimetypes = ['application/xml+mako']
669
670 def __init__(self, **options):
671 super().__init__(XmlLexer, MakoLexer, **options)
672
673
674 class MakoJavascriptLexer(DelegatingLexer):
675 """
676 Subclass of the `MakoLexer` that highlights unlexed data
677 with the `JavascriptLexer`.
678
679 .. versionadded:: 0.7
680 """
681
682 name = 'JavaScript+Mako'
683 aliases = ['js+mako', 'javascript+mako']
684 mimetypes = ['application/x-javascript+mako',
685 'text/x-javascript+mako',
686 'text/javascript+mako']
687
688 def __init__(self, **options):
689 super().__init__(JavascriptLexer, MakoLexer, **options)
690
691
692 class MakoCssLexer(DelegatingLexer):
693 """
694 Subclass of the `MakoLexer` that highlights unlexed data
695 with the `CssLexer`.
696
697 .. versionadded:: 0.7
698 """
699
700 name = 'CSS+Mako'
701 aliases = ['css+mako']
702 mimetypes = ['text/css+mako']
703
704 def __init__(self, **options):
705 super().__init__(CssLexer, MakoLexer, **options)
706
707
708 # Genshi and Cheetah lexers courtesy of Matt Good.
709
710 class CheetahPythonLexer(Lexer):
711 """
712 Lexer for handling Cheetah's special $ tokens in Python syntax.
713 """
714
715 def get_tokens_unprocessed(self, text):
716 pylexer = PythonLexer(**self.options)
717 for pos, type_, value in pylexer.get_tokens_unprocessed(text):
718 if type_ == Token.Error and value == '$':
719 type_ = Comment.Preproc
720 yield pos, type_, value
721
722
723 class CheetahLexer(RegexLexer):
724 """
725 Generic `cheetah templates`_ lexer. Code that isn't Cheetah
726 markup is yielded as `Token.Other`. This also works for
727 `spitfire templates`_ which use the same syntax.
728
729 .. _cheetah templates: http://www.cheetahtemplate.org/
730 .. _spitfire templates: http://code.google.com/p/spitfire/
731 """
732
733 name = 'Cheetah'
734 aliases = ['cheetah', 'spitfire']
735 filenames = ['*.tmpl', '*.spt']
736 mimetypes = ['application/x-cheetah', 'application/x-spitfire']
737
738 tokens = {
739 'root': [
740 (r'(##[^\n]*)$',
741 (bygroups(Comment))),
742 (r'#[*](.|\n)*?[*]#', Comment),
743 (r'#end[^#\n]*(?:#|$)', Comment.Preproc),
744 (r'#slurp$', Comment.Preproc),
745 (r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
746 (bygroups(Comment.Preproc, using(CheetahPythonLexer),
747 Comment.Preproc))),
748 # TODO support other Python syntax like $foo['bar']
749 (r'(\$)([a-zA-Z_][\w.]*\w)',
750 bygroups(Comment.Preproc, using(CheetahPythonLexer))),
751 (r'(?s)(\$\{!?)(.*?)(\})',
752 bygroups(Comment.Preproc, using(CheetahPythonLexer),
753 Comment.Preproc)),
754 (r'''(?sx)
755 (.+?) # anything, followed by:
756 (?:
757 (?=\#[#a-zA-Z]*) | # an eval comment
758 (?=\$[a-zA-Z_{]) | # a substitution
759 \Z # end of string
760 )
761 ''', Other),
762 (r'\s+', Text),
763 ],
764 }
765
766
767 class CheetahHtmlLexer(DelegatingLexer):
768 """
769 Subclass of the `CheetahLexer` that highlights unlexed data
770 with the `HtmlLexer`.
771 """
772
773 name = 'HTML+Cheetah'
774 aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
775 mimetypes = ['text/html+cheetah', 'text/html+spitfire']
776
777 def __init__(self, **options):
778 super().__init__(HtmlLexer, CheetahLexer, **options)
779
780
781 class CheetahXmlLexer(DelegatingLexer):
782 """
783 Subclass of the `CheetahLexer` that highlights unlexed data
784 with the `XmlLexer`.
785 """
786
787 name = 'XML+Cheetah'
788 aliases = ['xml+cheetah', 'xml+spitfire']
789 mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
790
791 def __init__(self, **options):
792 super().__init__(XmlLexer, CheetahLexer, **options)
793
794
795 class CheetahJavascriptLexer(DelegatingLexer):
796 """
797 Subclass of the `CheetahLexer` that highlights unlexed data
798 with the `JavascriptLexer`.
799 """
800
801 name = 'JavaScript+Cheetah'
802 aliases = ['js+cheetah', 'javascript+cheetah',
803 'js+spitfire', 'javascript+spitfire']
804 mimetypes = ['application/x-javascript+cheetah',
805 'text/x-javascript+cheetah',
806 'text/javascript+cheetah',
807 'application/x-javascript+spitfire',
808 'text/x-javascript+spitfire',
809 'text/javascript+spitfire']
810
811 def __init__(self, **options):
812 super().__init__(JavascriptLexer, CheetahLexer, **options)
813
814
815 class GenshiTextLexer(RegexLexer):
816 """
817 A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
818 templates.
819 """
820
821 name = 'Genshi Text'
822 aliases = ['genshitext']
823 mimetypes = ['application/x-genshi-text', 'text/x-genshi']
824
825 tokens = {
826 'root': [
827 (r'[^#$\s]+', Other),
828 (r'^(\s*)(##.*)$', bygroups(Text, Comment)),
829 (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
830 include('variable'),
831 (r'[#$\s]', Other),
832 ],
833 'directive': [
834 (r'\n', Text, '#pop'),
835 (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
836 (r'(choose|when|with)([^\S\n]+)(.*)',
837 bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
838 (r'(choose|otherwise)\b', Keyword, '#pop'),
839 (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
840 ],
841 'variable': [
842 (r'(?<!\$)(\$\{)(.+?)(\})',
843 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
844 (r'(?<!\$)(\$)([a-zA-Z_][\w.]*)',
845 Name.Variable),
846 ]
847 }
848
849
850 class GenshiMarkupLexer(RegexLexer):
851 """
852 Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
853 `GenshiLexer`.
854 """
855
856 flags = re.DOTALL
857
858 tokens = {
859 'root': [
860 (r'[^<$]+', Other),
861 (r'(<\?python)(.*?)(\?>)',
862 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
863 # yield style and script blocks as Other
864 (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
865 (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
866 (r'<\s*[a-zA-Z0-9:.]+', Name.Tag, 'tag'),
867 include('variable'),
868 (r'[<$]', Other),
869 ],
870 'pytag': [
871 (r'\s+', Text),
872 (r'[\w:-]+\s*=', Name.Attribute, 'pyattr'),
873 (r'/?\s*>', Name.Tag, '#pop'),
874 ],
875 'pyattr': [
876 ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
877 ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
878 (r'[^\s>]+', String, '#pop'),
879 ],
880 'tag': [
881 (r'\s+', Text),
882 (r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'),
883 (r'[\w:-]+\s*=', Name.Attribute, 'attr'),
884 (r'/?\s*>', Name.Tag, '#pop'),
885 ],
886 'attr': [
887 ('"', String, 'attr-dstring'),
888 ("'", String, 'attr-sstring'),
889 (r'[^\s>]*', String, '#pop')
890 ],
891 'attr-dstring': [
892 ('"', String, '#pop'),
893 include('strings'),
894 ("'", String)
895 ],
896 'attr-sstring': [
897 ("'", String, '#pop'),
898 include('strings'),
899 ("'", String)
900 ],
901 'strings': [
902 ('[^"\'$]+', String),
903 include('variable')
904 ],
905 'variable': [
906 (r'(?<!\$)(\$\{)(.+?)(\})',
907 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
908 (r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)',
909 Name.Variable),
910 ]
911 }
912
913
914 class HtmlGenshiLexer(DelegatingLexer):
915 """
916 A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
917 `kid <http://kid-templating.org/>`_ kid HTML templates.
918 """
919
920 name = 'HTML+Genshi'
921 aliases = ['html+genshi', 'html+kid']
922 alias_filenames = ['*.html', '*.htm', '*.xhtml']
923 mimetypes = ['text/html+genshi']
924
925 def __init__(self, **options):
926 super().__init__(HtmlLexer, GenshiMarkupLexer, **options)
927
928 def analyse_text(text):
929 rv = 0.0
930 if re.search(r'\$\{.*?\}', text) is not None:
931 rv += 0.2
932 if re.search(r'py:(.*?)=["\']', text) is not None:
933 rv += 0.2
934 return rv + HtmlLexer.analyse_text(text) - 0.01
935
936
937 class GenshiLexer(DelegatingLexer):
938 """
939 A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
940 `kid <http://kid-templating.org/>`_ kid XML templates.
941 """
942
943 name = 'Genshi'
944 aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
945 filenames = ['*.kid']
946 alias_filenames = ['*.xml']
947 mimetypes = ['application/x-genshi', 'application/x-kid']
948
949 def __init__(self, **options):
950 super().__init__(XmlLexer, GenshiMarkupLexer, **options)
951
952 def analyse_text(text):
953 rv = 0.0
954 if re.search(r'\$\{.*?\}', text) is not None:
955 rv += 0.2
956 if re.search(r'py:(.*?)=["\']', text) is not None:
957 rv += 0.2
958 return rv + XmlLexer.analyse_text(text) - 0.01
959
960
961 class JavascriptGenshiLexer(DelegatingLexer):
962 """
963 A lexer that highlights javascript code in genshi text templates.
964 """
965
966 name = 'JavaScript+Genshi Text'
967 aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
968 'javascript+genshi']
969 alias_filenames = ['*.js']
970 mimetypes = ['application/x-javascript+genshi',
971 'text/x-javascript+genshi',
972 'text/javascript+genshi']
973
974 def __init__(self, **options):
975 super().__init__(JavascriptLexer, GenshiTextLexer, **options)
976
977 def analyse_text(text):
978 return GenshiLexer.analyse_text(text) - 0.05
979
980
981 class CssGenshiLexer(DelegatingLexer):
982 """
983 A lexer that highlights CSS definitions in genshi text templates.
984 """
985
986 name = 'CSS+Genshi Text'
987 aliases = ['css+genshitext', 'css+genshi']
988 alias_filenames = ['*.css']
989 mimetypes = ['text/css+genshi']
990
991 def __init__(self, **options):
992 super().__init__(CssLexer, GenshiTextLexer, **options)
993
994 def analyse_text(text):
995 return GenshiLexer.analyse_text(text) - 0.05
996
997
998 class RhtmlLexer(DelegatingLexer):
999 """
1000 Subclass of the ERB lexer that highlights the unlexed data with the
1001 html lexer.
1002
1003 Nested Javascript and CSS is highlighted too.
1004 """
1005
1006 name = 'RHTML'
1007 aliases = ['rhtml', 'html+erb', 'html+ruby']
1008 filenames = ['*.rhtml']
1009 alias_filenames = ['*.html', '*.htm', '*.xhtml']
1010 mimetypes = ['text/html+ruby']
1011
1012 def __init__(self, **options):
1013 super().__init__(HtmlLexer, ErbLexer, **options)
1014
1015 def analyse_text(text):
1016 rv = ErbLexer.analyse_text(text) - 0.01
1017 if html_doctype_matches(text):
1018 # one more than the XmlErbLexer returns
1019 rv += 0.5
1020 return rv
1021
1022
1023 class XmlErbLexer(DelegatingLexer):
1024 """
1025 Subclass of `ErbLexer` which highlights data outside preprocessor
1026 directives with the `XmlLexer`.
1027 """
1028
1029 name = 'XML+Ruby'
1030 aliases = ['xml+erb', 'xml+ruby']
1031 alias_filenames = ['*.xml']
1032 mimetypes = ['application/xml+ruby']
1033
1034 def __init__(self, **options):
1035 super().__init__(XmlLexer, ErbLexer, **options)
1036
1037 def analyse_text(text):
1038 rv = ErbLexer.analyse_text(text) - 0.01
1039 if looks_like_xml(text):
1040 rv += 0.4
1041 return rv
1042
1043
1044 class CssErbLexer(DelegatingLexer):
1045 """
1046 Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
1047 """
1048
1049 name = 'CSS+Ruby'
1050 aliases = ['css+erb', 'css+ruby']
1051 alias_filenames = ['*.css']
1052 mimetypes = ['text/css+ruby']
1053
1054 def __init__(self, **options):
1055 super().__init__(CssLexer, ErbLexer, **options)
1056
1057 def analyse_text(text):
1058 return ErbLexer.analyse_text(text) - 0.05
1059
1060
1061 class JavascriptErbLexer(DelegatingLexer):
1062 """
1063 Subclass of `ErbLexer` which highlights unlexed data with the
1064 `JavascriptLexer`.
1065 """
1066
1067 name = 'JavaScript+Ruby'
1068 aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
1069 alias_filenames = ['*.js']
1070 mimetypes = ['application/x-javascript+ruby',
1071 'text/x-javascript+ruby',
1072 'text/javascript+ruby']
1073
1074 def __init__(self, **options):
1075 super().__init__(JavascriptLexer, ErbLexer, **options)
1076
1077 def analyse_text(text):
1078 return ErbLexer.analyse_text(text) - 0.05
1079
1080
1081 class HtmlPhpLexer(DelegatingLexer):
1082 """
1083 Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
1084
1085 Nested Javascript and CSS is highlighted too.
1086 """
1087
1088 name = 'HTML+PHP'
1089 aliases = ['html+php']
1090 filenames = ['*.phtml']
1091 alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
1092 '*.php[345]']
1093 mimetypes = ['application/x-php',
1094 'application/x-httpd-php', 'application/x-httpd-php3',
1095 'application/x-httpd-php4', 'application/x-httpd-php5']
1096
1097 def __init__(self, **options):
1098 super().__init__(HtmlLexer, PhpLexer, **options)
1099
1100 def analyse_text(text):
1101 rv = PhpLexer.analyse_text(text) - 0.01
1102 if html_doctype_matches(text):
1103 rv += 0.5
1104 return rv
1105
1106
1107 class XmlPhpLexer(DelegatingLexer):
1108 """
1109 Subclass of `PhpLexer` that highlights unhandled data with the `XmlLexer`.
1110 """
1111
1112 name = 'XML+PHP'
1113 aliases = ['xml+php']
1114 alias_filenames = ['*.xml', '*.php', '*.php[345]']
1115 mimetypes = ['application/xml+php']
1116
1117 def __init__(self, **options):
1118 super().__init__(XmlLexer, PhpLexer, **options)
1119
1120 def analyse_text(text):
1121 rv = PhpLexer.analyse_text(text) - 0.01
1122 if looks_like_xml(text):
1123 rv += 0.4
1124 return rv
1125
1126
1127 class CssPhpLexer(DelegatingLexer):
1128 """
1129 Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
1130 """
1131
1132 name = 'CSS+PHP'
1133 aliases = ['css+php']
1134 alias_filenames = ['*.css']
1135 mimetypes = ['text/css+php']
1136
1137 def __init__(self, **options):
1138 super().__init__(CssLexer, PhpLexer, **options)
1139
1140 def analyse_text(text):
1141 return PhpLexer.analyse_text(text) - 0.05
1142
1143
1144 class JavascriptPhpLexer(DelegatingLexer):
1145 """
1146 Subclass of `PhpLexer` which highlights unmatched data with the
1147 `JavascriptLexer`.
1148 """
1149
1150 name = 'JavaScript+PHP'
1151 aliases = ['js+php', 'javascript+php']
1152 alias_filenames = ['*.js']
1153 mimetypes = ['application/x-javascript+php',
1154 'text/x-javascript+php',
1155 'text/javascript+php']
1156
1157 def __init__(self, **options):
1158 super().__init__(JavascriptLexer, PhpLexer, **options)
1159
1160 def analyse_text(text):
1161 return PhpLexer.analyse_text(text)
1162
1163
1164 class HtmlSmartyLexer(DelegatingLexer):
1165 """
1166 Subclass of the `SmartyLexer` that highlights unlexed data with the
1167 `HtmlLexer`.
1168
1169 Nested Javascript and CSS is highlighted too.
1170 """
1171
1172 name = 'HTML+Smarty'
1173 aliases = ['html+smarty']
1174 alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
1175 mimetypes = ['text/html+smarty']
1176
1177 def __init__(self, **options):
1178 super().__init__(HtmlLexer, SmartyLexer, **options)
1179
1180 def analyse_text(text):
1181 rv = SmartyLexer.analyse_text(text) - 0.01
1182 if html_doctype_matches(text):
1183 rv += 0.5
1184 return rv
1185
1186
1187 class XmlSmartyLexer(DelegatingLexer):
1188 """
1189 Subclass of the `SmartyLexer` that highlights unlexed data with the
1190 `XmlLexer`.
1191 """
1192
1193 name = 'XML+Smarty'
1194 aliases = ['xml+smarty']
1195 alias_filenames = ['*.xml', '*.tpl']
1196 mimetypes = ['application/xml+smarty']
1197
1198 def __init__(self, **options):
1199 super().__init__(XmlLexer, SmartyLexer, **options)
1200
1201 def analyse_text(text):
1202 rv = SmartyLexer.analyse_text(text) - 0.01
1203 if looks_like_xml(text):
1204 rv += 0.4
1205 return rv
1206
1207
1208 class CssSmartyLexer(DelegatingLexer):
1209 """
1210 Subclass of the `SmartyLexer` that highlights unlexed data with the
1211 `CssLexer`.
1212 """
1213
1214 name = 'CSS+Smarty'
1215 aliases = ['css+smarty']
1216 alias_filenames = ['*.css', '*.tpl']
1217 mimetypes = ['text/css+smarty']
1218
1219 def __init__(self, **options):
1220 super().__init__(CssLexer, SmartyLexer, **options)
1221
1222 def analyse_text(text):
1223 return SmartyLexer.analyse_text(text) - 0.05
1224
1225
1226 class JavascriptSmartyLexer(DelegatingLexer):
1227 """
1228 Subclass of the `SmartyLexer` that highlights unlexed data with the
1229 `JavascriptLexer`.
1230 """
1231
1232 name = 'JavaScript+Smarty'
1233 aliases = ['js+smarty', 'javascript+smarty']
1234 alias_filenames = ['*.js', '*.tpl']
1235 mimetypes = ['application/x-javascript+smarty',
1236 'text/x-javascript+smarty',
1237 'text/javascript+smarty']
1238
1239 def __init__(self, **options):
1240 super().__init__(JavascriptLexer, SmartyLexer, **options)
1241
1242 def analyse_text(text):
1243 return SmartyLexer.analyse_text(text) - 0.05
1244
1245
1246 class HtmlDjangoLexer(DelegatingLexer):
1247 """
1248 Subclass of the `DjangoLexer` that highlights unlexed data with the
1249 `HtmlLexer`.
1250
1251 Nested Javascript and CSS is highlighted too.
1252 """
1253
1254 name = 'HTML+Django/Jinja'
1255 aliases = ['html+django', 'html+jinja', 'htmldjango']
1256 alias_filenames = ['*.html', '*.htm', '*.xhtml']
1257 mimetypes = ['text/html+django', 'text/html+jinja']
1258
1259 def __init__(self, **options):
1260 super().__init__(HtmlLexer, DjangoLexer, **options)
1261
1262 def analyse_text(text):
1263 rv = DjangoLexer.analyse_text(text) - 0.01
1264 if html_doctype_matches(text):
1265 rv += 0.5
1266 return rv
1267
1268
1269 class XmlDjangoLexer(DelegatingLexer):
1270 """
1271 Subclass of the `DjangoLexer` that highlights unlexed data with the
1272 `XmlLexer`.
1273 """
1274
1275 name = 'XML+Django/Jinja'
1276 aliases = ['xml+django', 'xml+jinja']
1277 alias_filenames = ['*.xml']
1278 mimetypes = ['application/xml+django', 'application/xml+jinja']
1279
1280 def __init__(self, **options):
1281 super().__init__(XmlLexer, DjangoLexer, **options)
1282
1283 def analyse_text(text):
1284 rv = DjangoLexer.analyse_text(text) - 0.01
1285 if looks_like_xml(text):
1286 rv += 0.4
1287 return rv
1288
1289
1290 class CssDjangoLexer(DelegatingLexer):
1291 """
1292 Subclass of the `DjangoLexer` that highlights unlexed data with the
1293 `CssLexer`.
1294 """
1295
1296 name = 'CSS+Django/Jinja'
1297 aliases = ['css+django', 'css+jinja']
1298 alias_filenames = ['*.css']
1299 mimetypes = ['text/css+django', 'text/css+jinja']
1300
1301 def __init__(self, **options):
1302 super().__init__(CssLexer, DjangoLexer, **options)
1303
1304 def analyse_text(text):
1305 return DjangoLexer.analyse_text(text) - 0.05
1306
1307
1308 class JavascriptDjangoLexer(DelegatingLexer):
1309 """
1310 Subclass of the `DjangoLexer` that highlights unlexed data with the
1311 `JavascriptLexer`.
1312 """
1313
1314 name = 'JavaScript+Django/Jinja'
1315 aliases = ['js+django', 'javascript+django',
1316 'js+jinja', 'javascript+jinja']
1317 alias_filenames = ['*.js']
1318 mimetypes = ['application/x-javascript+django',
1319 'application/x-javascript+jinja',
1320 'text/x-javascript+django',
1321 'text/x-javascript+jinja',
1322 'text/javascript+django',
1323 'text/javascript+jinja']
1324
1325 def __init__(self, **options):
1326 super().__init__(JavascriptLexer, DjangoLexer, **options)
1327
1328 def analyse_text(text):
1329 return DjangoLexer.analyse_text(text) - 0.05
1330
1331
1332 class JspRootLexer(RegexLexer):
1333 """
1334 Base for the `JspLexer`. Yields `Token.Other` for area outside of
1335 JSP tags.
1336
1337 .. versionadded:: 0.7
1338 """
1339
1340 tokens = {
1341 'root': [
1342 (r'<%\S?', Keyword, 'sec'),
1343 # FIXME: I want to make these keywords but still parse attributes.
1344 (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
1345 Keyword),
1346 (r'[^<]+', Other),
1347 (r'<', Other),
1348 ],
1349 'sec': [
1350 (r'%>', Keyword, '#pop'),
1351 # note: '\w\W' != '.' without DOTALL.
1352 (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
1353 ],
1354 }
1355
1356
1357 class JspLexer(DelegatingLexer):
1358 """
1359 Lexer for Java Server Pages.
1360
1361 .. versionadded:: 0.7
1362 """
1363 name = 'Java Server Page'
1364 aliases = ['jsp']
1365 filenames = ['*.jsp']
1366 mimetypes = ['application/x-jsp']
1367
1368 def __init__(self, **options):
1369 super().__init__(XmlLexer, JspRootLexer, **options)
1370
1371 def analyse_text(text):
1372 rv = JavaLexer.analyse_text(text) - 0.01
1373 if looks_like_xml(text):
1374 rv += 0.4
1375 if '<%' in text and '%>' in text:
1376 rv += 0.1
1377 return rv
1378
1379
1380 class EvoqueLexer(RegexLexer):
1381 """
1382 For files using the Evoque templating system.
1383
1384 .. versionadded:: 1.1
1385 """
1386 name = 'Evoque'
1387 aliases = ['evoque']
1388 filenames = ['*.evoque']
1389 mimetypes = ['application/x-evoque']
1390
1391 flags = re.DOTALL
1392
1393 tokens = {
1394 'root': [
1395 (r'[^#$]+', Other),
1396 (r'#\[', Comment.Multiline, 'comment'),
1397 (r'\$\$', Other),
1398 # svn keywords
1399 (r'\$\w+:[^$\n]*\$', Comment.Multiline),
1400 # directives: begin, end
1401 (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
1402 bygroups(Punctuation, Name.Builtin, Punctuation, None,
1403 String, Punctuation)),
1404 # directives: evoque, overlay
1405 # see doc for handling first name arg: /directives/evoque/
1406 # + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
1407 # should be using(PythonLexer), not passed out as String
1408 (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?'
1409 r'(.*?)((?(4)%)\})',
1410 bygroups(Punctuation, Name.Builtin, Punctuation, None,
1411 String, using(PythonLexer), Punctuation)),
1412 # directives: if, for, prefer, test
1413 (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
1414 bygroups(Punctuation, Name.Builtin, Punctuation, None,
1415 using(PythonLexer), Punctuation)),
1416 # directive clauses (no {} expression)
1417 (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
1418 # expressions
1419 (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
1420 bygroups(Punctuation, None, using(PythonLexer),
1421 Name.Builtin, None, None, Punctuation)),
1422 (r'#', Other),
1423 ],
1424 'comment': [
1425 (r'[^\]#]', Comment.Multiline),
1426 (r'#\[', Comment.Multiline, '#push'),
1427 (r'\]#', Comment.Multiline, '#pop'),
1428 (r'[\]#]', Comment.Multiline)
1429 ],
1430 }
1431
1432 def analyse_text(text):
1433 """Evoque templates use $evoque, which is unique."""
1434 if '$evoque' in text:
1435 return 1
1436
1437 class EvoqueHtmlLexer(DelegatingLexer):
1438 """
1439 Subclass of the `EvoqueLexer` that highlights unlexed data with the
1440 `HtmlLexer`.
1441
1442 .. versionadded:: 1.1
1443 """
1444 name = 'HTML+Evoque'
1445 aliases = ['html+evoque']
1446 filenames = ['*.html']
1447 mimetypes = ['text/html+evoque']
1448
1449 def __init__(self, **options):
1450 super().__init__(HtmlLexer, EvoqueLexer, **options)
1451
1452 def analyse_text(text):
1453 return EvoqueLexer.analyse_text(text)
1454
1455
1456 class EvoqueXmlLexer(DelegatingLexer):
1457 """
1458 Subclass of the `EvoqueLexer` that highlights unlexed data with the
1459 `XmlLexer`.
1460
1461 .. versionadded:: 1.1
1462 """
1463 name = 'XML+Evoque'
1464 aliases = ['xml+evoque']
1465 filenames = ['*.xml']
1466 mimetypes = ['application/xml+evoque']
1467
1468 def __init__(self, **options):
1469 super().__init__(XmlLexer, EvoqueLexer, **options)
1470
1471 def analyse_text(text):
1472 return EvoqueLexer.analyse_text(text)
1473
1474
1475 class ColdfusionLexer(RegexLexer):
1476 """
1477 Coldfusion statements
1478 """
1479 name = 'cfstatement'
1480 aliases = ['cfs']
1481 filenames = []
1482 mimetypes = []
1483 flags = re.IGNORECASE
1484
1485 tokens = {
1486 'root': [
1487 (r'//.*?\n', Comment.Single),
1488 (r'/\*(?:.|\n)*?\*/', Comment.Multiline),
1489 (r'\+\+|--', Operator),
1490 (r'[-+*/^&=!]', Operator),
1491 (r'<=|>=|<|>|==', Operator),
1492 (r'mod\b', Operator),
1493 (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
1494 (r'\|\||&&', Operator),
1495 (r'\?', Operator),
1496 (r'"', String.Double, 'string'),
1497 # There is a special rule for allowing html in single quoted
1498 # strings, evidently.
1499 (r"'.*?'", String.Single),
1500 (r'\d+', Number),
1501 (r'(if|else|len|var|xml|default|break|switch|component|property|function|do|'
1502 r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|'
1503 r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword),
1504 (r'(true|false|null)\b', Keyword.Constant),
1505 (r'(application|session|client|cookie|super|this|variables|arguments)\b',
1506 Name.Constant),
1507 (r'([a-z_$][\w.]*)(\s*)(\()',
1508 bygroups(Name.Function, Text, Punctuation)),
1509 (r'[a-z_$][\w.]*', Name.Variable),
1510 (r'[()\[\]{};:,.\\]', Punctuation),
1511 (r'\s+', Text),
1512 ],
1513 'string': [
1514 (r'""', String.Double),
1515 (r'#.+?#', String.Interp),
1516 (r'[^"#]+', String.Double),
1517 (r'#', String.Double),
1518 (r'"', String.Double, '#pop'),
1519 ],
1520 }
1521
1522
1523 class ColdfusionMarkupLexer(RegexLexer):
1524 """
1525 Coldfusion markup only
1526 """
1527 name = 'Coldfusion'
1528 aliases = ['cf']
1529 filenames = []
1530 mimetypes = []
1531
1532 tokens = {
1533 'root': [
1534 (r'[^<]+', Other),
1535 include('tags'),
1536 (r'<[^<>]*', Other),
1537 ],
1538 'tags': [
1539 (r'<!---', Comment.Multiline, 'cfcomment'),
1540 (r'(?s)<!--.*?-->', Comment),
1541 (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
1542 (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
1543 bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
1544 # negative lookbehind is for strings with embedded >
1545 (r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
1546 r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
1547 r'mailpart|mail|header|content|zip|image|lock|argument|try|'
1548 r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
1549 bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
1550 ],
1551 'cfoutput': [
1552 (r'[^#<]+', Other),
1553 (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
1554 Punctuation)),
1555 # (r'<cfoutput.*?>', Name.Builtin, '#push'),
1556 (r'</cfoutput.*?>', Name.Builtin, '#pop'),
1557 include('tags'),
1558 (r'(?s)<[^<>]*', Other),
1559 (r'#', Other),
1560 ],
1561 'cfcomment': [
1562 (r'<!---', Comment.Multiline, '#push'),
1563 (r'--->', Comment.Multiline, '#pop'),
1564 (r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline),
1565 ],
1566 }
1567
1568
1569 class ColdfusionHtmlLexer(DelegatingLexer):
1570 """
1571 Coldfusion markup in html
1572 """
1573 name = 'Coldfusion HTML'
1574 aliases = ['cfm']
1575 filenames = ['*.cfm', '*.cfml']
1576 mimetypes = ['application/x-coldfusion']
1577
1578 def __init__(self, **options):
1579 super().__init__(HtmlLexer, ColdfusionMarkupLexer, **options)
1580
1581
1582 class ColdfusionCFCLexer(DelegatingLexer):
1583 """
1584 Coldfusion markup/script components
1585
1586 .. versionadded:: 2.0
1587 """
1588 name = 'Coldfusion CFC'
1589 aliases = ['cfc']
1590 filenames = ['*.cfc']
1591 mimetypes = []
1592
1593 def __init__(self, **options):
1594 super().__init__(ColdfusionHtmlLexer, ColdfusionLexer, **options)
1595
1596
1597 class SspLexer(DelegatingLexer):
1598 """
1599 Lexer for Scalate Server Pages.
1600
1601 .. versionadded:: 1.4
1602 """
1603 name = 'Scalate Server Page'
1604 aliases = ['ssp']
1605 filenames = ['*.ssp']
1606 mimetypes = ['application/x-ssp']
1607
1608 def __init__(self, **options):
1609 super().__init__(XmlLexer, JspRootLexer, **options)
1610
1611 def analyse_text(text):
1612 rv = 0.0
1613 if re.search(r'val \w+\s*:', text):
1614 rv += 0.6
1615 if looks_like_xml(text):
1616 rv += 0.2
1617 if '<%' in text and '%>' in text:
1618 rv += 0.1
1619 return rv
1620
1621
1622 class TeaTemplateRootLexer(RegexLexer):
1623 """
1624 Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
1625 code blocks.
1626
1627 .. versionadded:: 1.5
1628 """
1629
1630 tokens = {
1631 'root': [
1632 (r'<%\S?', Keyword, 'sec'),
1633 (r'[^<]+', Other),
1634 (r'<', Other),
1635 ],
1636 'sec': [
1637 (r'%>', Keyword, '#pop'),
1638 # note: '\w\W' != '.' without DOTALL.
1639 (r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
1640 ],
1641 }
1642
1643
1644 class TeaTemplateLexer(DelegatingLexer):
1645 """
1646 Lexer for `Tea Templates <http://teatrove.org/>`_.
1647
1648 .. versionadded:: 1.5
1649 """
1650 name = 'Tea'
1651 aliases = ['tea']
1652 filenames = ['*.tea']
1653 mimetypes = ['text/x-tea']
1654
1655 def __init__(self, **options):
1656 super().__init__(XmlLexer, TeaTemplateRootLexer, **options)
1657
1658 def analyse_text(text):
1659 rv = TeaLangLexer.analyse_text(text) - 0.01
1660 if looks_like_xml(text):
1661 rv += 0.4
1662 if '<%' in text and '%>' in text:
1663 rv += 0.1
1664 return rv
1665
1666
1667 class LassoHtmlLexer(DelegatingLexer):
1668 """
1669 Subclass of the `LassoLexer` which highlights unhandled data with the
1670 `HtmlLexer`.
1671
1672 Nested JavaScript and CSS is also highlighted.
1673
1674 .. versionadded:: 1.6
1675 """
1676
1677 name = 'HTML+Lasso'
1678 aliases = ['html+lasso']
1679 alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]',
1680 '*.incl', '*.inc', '*.las']
1681 mimetypes = ['text/html+lasso',
1682 'application/x-httpd-lasso',
1683 'application/x-httpd-lasso[89]']
1684
1685 def __init__(self, **options):
1686 super().__init__(HtmlLexer, LassoLexer, **options)
1687
1688 def analyse_text(text):
1689 rv = LassoLexer.analyse_text(text) - 0.01
1690 if html_doctype_matches(text): # same as HTML lexer
1691 rv += 0.5
1692 return rv
1693
1694
1695 class LassoXmlLexer(DelegatingLexer):
1696 """
1697 Subclass of the `LassoLexer` which highlights unhandled data with the
1698 `XmlLexer`.
1699
1700 .. versionadded:: 1.6
1701 """
1702
1703 name = 'XML+Lasso'
1704 aliases = ['xml+lasso']
1705 alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]',
1706 '*.incl', '*.inc', '*.las']
1707 mimetypes = ['application/xml+lasso']
1708
1709 def __init__(self, **options):
1710 super().__init__(XmlLexer, LassoLexer, **options)
1711
1712 def analyse_text(text):
1713 rv = LassoLexer.analyse_text(text) - 0.01
1714 if looks_like_xml(text):
1715 rv += 0.4
1716 return rv
1717
1718
1719 class LassoCssLexer(DelegatingLexer):
1720 """
1721 Subclass of the `LassoLexer` which highlights unhandled data with the
1722 `CssLexer`.
1723
1724 .. versionadded:: 1.6
1725 """
1726
1727 name = 'CSS+Lasso'
1728 aliases = ['css+lasso']
1729 alias_filenames = ['*.css']
1730 mimetypes = ['text/css+lasso']
1731
1732 def __init__(self, **options):
1733 options['requiredelimiters'] = True
1734 super().__init__(CssLexer, LassoLexer, **options)
1735
1736 def analyse_text(text):
1737 rv = LassoLexer.analyse_text(text) - 0.05
1738 if re.search(r'\w+:[^;]+;', text):
1739 rv += 0.1
1740 if 'padding:' in text:
1741 rv += 0.1
1742 return rv
1743
1744
1745 class LassoJavascriptLexer(DelegatingLexer):
1746 """
1747 Subclass of the `LassoLexer` which highlights unhandled data with the
1748 `JavascriptLexer`.
1749
1750 .. versionadded:: 1.6
1751 """
1752
1753 name = 'JavaScript+Lasso'
1754 aliases = ['js+lasso', 'javascript+lasso']
1755 alias_filenames = ['*.js']
1756 mimetypes = ['application/x-javascript+lasso',
1757 'text/x-javascript+lasso',
1758 'text/javascript+lasso']
1759
1760 def __init__(self, **options):
1761 options['requiredelimiters'] = True
1762 super().__init__(JavascriptLexer, LassoLexer, **options)
1763
1764 def analyse_text(text):
1765 rv = LassoLexer.analyse_text(text) - 0.05
1766 return rv
1767
1768
1769 class HandlebarsLexer(RegexLexer):
1770 """
1771 Generic `handlebars <http://handlebarsjs.com/>` template lexer.
1772
1773 Highlights only the Handlebars template tags (stuff between `{{` and `}}`).
1774 Everything else is left for a delegating lexer.
1775
1776 .. versionadded:: 2.0
1777 """
1778
1779 name = "Handlebars"
1780 aliases = ['handlebars']
1781
1782 tokens = {
1783 'root': [
1784 (r'[^{]+', Other),
1785
1786 # Comment start {{! }} or {{!--
1787 (r'\{\{!.*\}\}', Comment),
1788
1789 # HTML Escaping open {{{expression
1790 (r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'),
1791
1792 # {{blockOpen {{#blockOpen {{/blockClose with optional tilde ~
1793 (r'(\{\{)([#~/]+)([^\s}]*)',
1794 bygroups(Comment.Preproc, Number.Attribute, Number.Attribute), 'tag'),
1795 (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'),
1796 ],
1797
1798 'tag': [
1799 (r'\s+', Text),
1800 # HTML Escaping close }}}
1801 (r'\}\}\}', Comment.Special, '#pop'),
1802 # blockClose}}, includes optional tilde ~
1803 (r'(~?)(\}\})', bygroups(Number, Comment.Preproc), '#pop'),
1804
1805 # {{opt=something}}
1806 (r'([^\s}]+)(=)', bygroups(Name.Attribute, Operator)),
1807
1808 # Partials {{> ...}}
1809 (r'(>)(\s*)(@partial-block)', bygroups(Keyword, Text, Keyword)),
1810 (r'(#?>)(\s*)([\w-]+)', bygroups(Keyword, Text, Name.Variable)),
1811 (r'(>)(\s*)(\()', bygroups(Keyword, Text, Punctuation),
1812 'dynamic-partial'),
1813
1814 include('generic'),
1815 ],
1816 'dynamic-partial': [
1817 (r'\s+', Text),
1818 (r'\)', Punctuation, '#pop'),
1819
1820 (r'(lookup)(\s+)(\.|this)(\s+)', bygroups(Keyword, Text,
1821 Name.Variable, Text)),
1822 (r'(lookup)(\s+)(\S+)', bygroups(Keyword, Text,
1823 using(this, state='variable'))),
1824 (r'[\w-]+', Name.Function),
1825
1826 include('generic'),
1827 ],
1828 'variable': [
1829 (r'[()/@a-zA-Z][\w-]*', Name.Variable),
1830 (r'\.[\w-]+', Name.Variable),
1831 (r'(this\/|\.\/|(\.\.\/)+)[\w-]+', Name.Variable),
1832 ],
1833 'generic': [
1834 include('variable'),
1835
1836 # borrowed from DjangoLexer
1837 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
1838 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
1839 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
1840 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
1841 ]
1842 }
1843
1844
1845 class HandlebarsHtmlLexer(DelegatingLexer):
1846 """
1847 Subclass of the `HandlebarsLexer` that highlights unlexed data with the
1848 `HtmlLexer`.
1849
1850 .. versionadded:: 2.0
1851 """
1852
1853 name = "HTML+Handlebars"
1854 aliases = ["html+handlebars"]
1855 filenames = ['*.handlebars', '*.hbs']
1856 mimetypes = ['text/html+handlebars', 'text/x-handlebars-template']
1857
1858 def __init__(self, **options):
1859 super().__init__(HtmlLexer, HandlebarsLexer, **options)
1860
1861
1862 class YamlJinjaLexer(DelegatingLexer):
1863 """
1864 Subclass of the `DjangoLexer` that highlights unlexed data with the
1865 `YamlLexer`.
1866
1867 Commonly used in Saltstack salt states.
1868
1869 .. versionadded:: 2.0
1870 """
1871
1872 name = 'YAML+Jinja'
1873 aliases = ['yaml+jinja', 'salt', 'sls']
1874 filenames = ['*.sls']
1875 mimetypes = ['text/x-yaml+jinja', 'text/x-sls']
1876
1877 def __init__(self, **options):
1878 super().__init__(YamlLexer, DjangoLexer, **options)
1879
1880
1881 class LiquidLexer(RegexLexer):
1882 """
1883 Lexer for `Liquid templates
1884 <http://www.rubydoc.info/github/Shopify/liquid>`_.
1885
1886 .. versionadded:: 2.0
1887 """
1888 name = 'liquid'
1889 aliases = ['liquid']
1890 filenames = ['*.liquid']
1891
1892 tokens = {
1893 'root': [
1894 (r'[^{]+', Text),
1895 # tags and block tags
1896 (r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'),
1897 # output tags
1898 (r'(\{\{)(\s*)([^\s}]+)',
1899 bygroups(Punctuation, Whitespace, using(this, state = 'generic')),
1900 'output'),
1901 (r'\{', Text)
1902 ],
1903
1904 'tag-or-block': [
1905 # builtin logic blocks
1906 (r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'),
1907 (r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace),
1908 combined('end-of-block', 'whitespace', 'generic')),
1909 (r'(else)(\s*)(%\})',
1910 bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'),
1911
1912 # other builtin blocks
1913 (r'(capture)(\s+)([^\s%]+)(\s*)(%\})',
1914 bygroups(Name.Tag, Whitespace, using(this, state = 'variable'),
1915 Whitespace, Punctuation), '#pop'),
1916 (r'(comment)(\s*)(%\})',
1917 bygroups(Name.Tag, Whitespace, Punctuation), 'comment'),
1918 (r'(raw)(\s*)(%\})',
1919 bygroups(Name.Tag, Whitespace, Punctuation), 'raw'),
1920
1921 # end of block
1922 (r'(end(case|unless|if))(\s*)(%\})',
1923 bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'),
1924 (r'(end([^\s%]+))(\s*)(%\})',
1925 bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'),
1926
1927 # builtin tags (assign and include are handled together with usual tags)
1928 (r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)',
1929 bygroups(Name.Tag, Whitespace,
1930 using(this, state='generic'), Punctuation, Whitespace),
1931 'variable-tag-markup'),
1932
1933 # other tags or blocks
1934 (r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup')
1935 ],
1936
1937 'output': [
1938 include('whitespace'),
1939 (r'\}\}', Punctuation, '#pop'), # end of output
1940
1941 (r'\|', Punctuation, 'filters')
1942 ],
1943
1944 'filters': [
1945 include('whitespace'),
1946 (r'\}\}', Punctuation, ('#pop', '#pop')), # end of filters and output
1947
1948 (r'([^\s|:]+)(:?)(\s*)',
1949 bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup')
1950 ],
1951
1952 'filter-markup': [
1953 (r'\|', Punctuation, '#pop'),
1954 include('end-of-tag'),
1955 include('default-param-markup')
1956 ],
1957
1958 'condition': [
1959 include('end-of-block'),
1960 include('whitespace'),
1961
1962 (r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})',
1963 bygroups(using(this, state = 'generic'), Whitespace, Operator,
1964 Whitespace, using(this, state = 'generic'), Whitespace,
1965 Punctuation)),
1966 (r'\b!', Operator),
1967 (r'\bnot\b', Operator.Word),
1968 (r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)',
1969 bygroups(using(this, state = 'generic'), Whitespace, Operator.Word,
1970 Whitespace, using(this, state = 'generic'))),
1971
1972 include('generic'),
1973 include('whitespace')
1974 ],
1975
1976 'generic-value': [
1977 include('generic'),
1978 include('end-at-whitespace')
1979 ],
1980
1981 'operator': [
1982 (r'(\s*)((=|!|>|<)=?)(\s*)',
1983 bygroups(Whitespace, Operator, None, Whitespace), '#pop'),
1984 (r'(\s*)(\bcontains\b)(\s*)',
1985 bygroups(Whitespace, Operator.Word, Whitespace), '#pop'),
1986 ],
1987
1988 'end-of-tag': [
1989 (r'\}\}', Punctuation, '#pop')
1990 ],
1991
1992 'end-of-block': [
1993 (r'%\}', Punctuation, ('#pop', '#pop'))
1994 ],
1995
1996 'end-at-whitespace': [
1997 (r'\s+', Whitespace, '#pop')
1998 ],
1999
2000 # states for unknown markup
2001 'param-markup': [
2002 include('whitespace'),
2003 # params with colons or equals
2004 (r'([^\s=:]+)(\s*)(=|:)',
2005 bygroups(Name.Attribute, Whitespace, Operator)),
2006 # explicit variables
2007 (r'(\{\{)(\s*)([^\s}])(\s*)(\}\})',
2008 bygroups(Punctuation, Whitespace, using(this, state = 'variable'),
2009 Whitespace, Punctuation)),
2010
2011 include('string'),
2012 include('number'),
2013 include('keyword'),
2014 (r',', Punctuation)
2015 ],
2016
2017 'default-param-markup': [
2018 include('param-markup'),
2019 (r'.', Text) # fallback for switches / variables / un-quoted strings / ...
2020 ],
2021
2022 'variable-param-markup': [
2023 include('param-markup'),
2024 include('variable'),
2025 (r'.', Text) # fallback
2026 ],
2027
2028 'tag-markup': [
2029 (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
2030 include('default-param-markup')
2031 ],
2032
2033 'variable-tag-markup': [
2034 (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
2035 include('variable-param-markup')
2036 ],
2037
2038 # states for different values types
2039 'keyword': [
2040 (r'\b(false|true)\b', Keyword.Constant)
2041 ],
2042
2043 'variable': [
2044 (r'[a-zA-Z_]\w*', Name.Variable),
2045 (r'(?<=\w)\.(?=\w)', Punctuation)
2046 ],
2047
2048 'string': [
2049 (r"'[^']*'", String.Single),
2050 (r'"[^"]*"', String.Double)
2051 ],
2052
2053 'number': [
2054 (r'\d+\.\d+', Number.Float),
2055 (r'\d+', Number.Integer)
2056 ],
2057
2058 'generic': [ # decides for variable, string, keyword or number
2059 include('keyword'),
2060 include('string'),
2061 include('number'),
2062 include('variable')
2063 ],
2064
2065 'whitespace': [
2066 (r'[ \t]+', Whitespace)
2067 ],
2068
2069 # states for builtin blocks
2070 'comment': [
2071 (r'(\{%)(\s*)(endcomment)(\s*)(%\})',
2072 bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
2073 Punctuation), ('#pop', '#pop')),
2074 (r'.', Comment)
2075 ],
2076
2077 'raw': [
2078 (r'[^{]+', Text),
2079 (r'(\{%)(\s*)(endraw)(\s*)(%\})',
2080 bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
2081 Punctuation), '#pop'),
2082 (r'\{', Text)
2083 ],
2084 }
2085
2086
2087 class TwigLexer(RegexLexer):
2088 """
2089 `Twig <http://twig.sensiolabs.org/>`_ template lexer.
2090
2091 It just highlights Twig code between the preprocessor directives,
2092 other data is left untouched by the lexer.
2093
2094 .. versionadded:: 2.0
2095 """
2096
2097 name = 'Twig'
2098 aliases = ['twig']
2099 mimetypes = ['application/x-twig']
2100
2101 flags = re.M | re.S
2102
2103 # Note that a backslash is included in the following two patterns
2104 # PHP uses a backslash as a namespace separator
2105 _ident_char = r'[\\\w-]|[^\x00-\x7f]'
2106 _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
2107 _ident_end = r'(?:' + _ident_char + ')*'
2108 _ident_inner = _ident_begin + _ident_end
2109
2110 tokens = {
2111 'root': [
2112 (r'[^{]+', Other),
2113 (r'\{\{', Comment.Preproc, 'var'),
2114 # twig comments
2115 (r'\{\#.*?\#\}', Comment),
2116 # raw twig blocks
2117 (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
2118 r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
2119 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
2120 Other, Comment.Preproc, Text, Keyword, Text,
2121 Comment.Preproc)),
2122 (r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)'
2123 r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})',
2124 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
2125 Other, Comment.Preproc, Text, Keyword, Text,
2126 Comment.Preproc)),
2127 # filter blocks
2128 (r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner,
2129 bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
2130 'tag'),
2131 (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
2132 bygroups(Comment.Preproc, Text, Keyword), 'tag'),
2133 (r'\{', Other),
2134 ],
2135 'varnames': [
2136 (r'(\|)(\s*)(%s)' % _ident_inner,
2137 bygroups(Operator, Text, Name.Function)),
2138 (r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner,
2139 bygroups(Keyword, Text, Keyword, Text, Name.Function)),
2140 (r'(?i)(true|false|none|null)\b', Keyword.Pseudo),
2141 (r'(in|not|and|b-and|or|b-or|b-xor|is'
2142 r'if|elseif|else|import'
2143 r'constant|defined|divisibleby|empty|even|iterable|odd|sameas'
2144 r'matches|starts\s+with|ends\s+with)\b',
2145 Keyword),
2146 (r'(loop|block|parent)\b', Name.Builtin),
2147 (_ident_inner, Name.Variable),
2148 (r'\.' + _ident_inner, Name.Variable),
2149 (r'\.[0-9]+', Number),
2150 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
2151 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
2152 (r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator),
2153 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
2154 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
2155 ],
2156 'var': [
2157 (r'\s+', Text),
2158 (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
2159 include('varnames')
2160 ],
2161 'tag': [
2162 (r'\s+', Text),
2163 (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
2164 include('varnames'),
2165 (r'.', Punctuation),
2166 ],
2167 }
2168
2169
2170 class TwigHtmlLexer(DelegatingLexer):
2171 """
2172 Subclass of the `TwigLexer` that highlights unlexed data with the
2173 `HtmlLexer`.
2174
2175 .. versionadded:: 2.0
2176 """
2177
2178 name = "HTML+Twig"
2179 aliases = ["html+twig"]
2180 filenames = ['*.twig']
2181 mimetypes = ['text/html+twig']
2182
2183 def __init__(self, **options):
2184 super().__init__(HtmlLexer, TwigLexer, **options)
2185
2186
2187 class Angular2Lexer(RegexLexer):
2188 """
2189 Generic
2190 `angular2 <http://victorsavkin.com/post/119943127151/angular-2-template-syntax>`_
2191 template lexer.
2192
2193 Highlights only the Angular template tags (stuff between `{{` and `}}` and
2194 special attributes: '(event)=', '[property]=', '[(twoWayBinding)]=').
2195 Everything else is left for a delegating lexer.
2196
2197 .. versionadded:: 2.1
2198 """
2199
2200 name = "Angular2"
2201 aliases = ['ng2']
2202
2203 tokens = {
2204 'root': [
2205 (r'[^{([*#]+', Other),
2206
2207 # {{meal.name}}
2208 (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'ngExpression'),
2209
2210 # (click)="deleteOrder()"; [value]="test"; [(twoWayTest)]="foo.bar"
2211 (r'([([]+)([\w:.-]+)([\])]+)(\s*)(=)(\s*)',
2212 bygroups(Punctuation, Name.Attribute, Punctuation, Text, Operator, Text),
2213 'attr'),
2214 (r'([([]+)([\w:.-]+)([\])]+)(\s*)',
2215 bygroups(Punctuation, Name.Attribute, Punctuation, Text)),
2216
2217 # *ngIf="..."; #f="ngForm"
2218 (r'([*#])([\w:.-]+)(\s*)(=)(\s*)',
2219 bygroups(Punctuation, Name.Attribute, Text, Operator, Text), 'attr'),
2220 (r'([*#])([\w:.-]+)(\s*)',
2221 bygroups(Punctuation, Name.Attribute, Text)),
2222 ],
2223
2224 'ngExpression': [
2225 (r'\s+(\|\s+)?', Text),
2226 (r'\}\}', Comment.Preproc, '#pop'),
2227
2228 # Literals
2229 (r':?(true|false)', String.Boolean),
2230 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
2231 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
2232 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
2233 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
2234
2235 # Variabletext
2236 (r'[a-zA-Z][\w-]*(\(.*\))?', Name.Variable),
2237 (r'\.[\w-]+(\(.*\))?', Name.Variable),
2238
2239 # inline If
2240 (r'(\?)(\s*)([^}\s]+)(\s*)(:)(\s*)([^}\s]+)(\s*)',
2241 bygroups(Operator, Text, String, Text, Operator, Text, String, Text)),
2242 ],
2243 'attr': [
2244 ('".*?"', String, '#pop'),
2245 ("'.*?'", String, '#pop'),
2246 (r'[^\s>]+', String, '#pop'),
2247 ],
2248 }
2249
2250
2251 class Angular2HtmlLexer(DelegatingLexer):
2252 """
2253 Subclass of the `Angular2Lexer` that highlights unlexed data with the
2254 `HtmlLexer`.
2255
2256 .. versionadded:: 2.0
2257 """
2258
2259 name = "HTML + Angular2"
2260 aliases = ["html+ng2"]
2261 filenames = ['*.ng2']
2262
2263 def __init__(self, **options):
2264 super().__init__(HtmlLexer, Angular2Lexer, **options)

eric ide

mercurial