ThirdParty/Pygments/pygments/lexers/templates.py

changeset 0
de9c2efb9d02
child 12
1d8dd9706f46
equal deleted inserted replaced
-1:000000000000 0:de9c2efb9d02
1 # -*- coding: utf-8 -*-
2 """
3 pygments.lexers.templates
4 ~~~~~~~~~~~~~~~~~~~~~~~~~
5
6 Lexers for various template engines' markup.
7
8 :copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
9 :license: BSD, see LICENSE for details.
10 """
11
12 import re
13 try:
14 set
15 except NameError:
16 from sets import Set as set
17
18 from pygments.lexers.web import \
19 PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
20 from pygments.lexers.agile import PythonLexer
21 from pygments.lexers.compiled import JavaLexer
22 from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
23 include, using, this
24 from pygments.token import Error, Punctuation, \
25 Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
26 from pygments.util import html_doctype_matches, looks_like_xml
27
28 __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
29 'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
30 'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
31 'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
32 'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
33 'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
34 'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
35 'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
36 'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
37 'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MakoLexer',
38 'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
39 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
40 'CheetahXmlLexer', 'CheetahJavascriptLexer',
41 'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer']
42
43
44 class ErbLexer(Lexer):
45 """
46 Generic `ERB <http://ruby-doc.org/core/classes/ERB.html>`_ (Ruby Templating)
47 lexer.
48
49 Just highlights ruby code between the preprocessor directives, other data
50 is left untouched by the lexer.
51
52 All options are also forwarded to the `RubyLexer`.
53 """
54
55 name = 'ERB'
56 aliases = ['erb']
57 mimetypes = ['application/x-ruby-templating']
58
59 _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
60
61 def __init__(self, **options):
62 from pygments.lexers.agile import RubyLexer
63 self.ruby_lexer = RubyLexer(**options)
64 Lexer.__init__(self, **options)
65
66 def get_tokens_unprocessed(self, text):
67 """
68 Since ERB doesn't allow "<%" and other tags inside of ruby
69 blocks we have to use a split approach here that fails for
70 that too.
71 """
72 tokens = self._block_re.split(text)
73 tokens.reverse()
74 state = idx = 0
75 try:
76 while True:
77 # text
78 if state == 0:
79 val = tokens.pop()
80 yield idx, Other, val
81 idx += len(val)
82 state = 1
83 # block starts
84 elif state == 1:
85 tag = tokens.pop()
86 # literals
87 if tag in ('<%%', '%%>'):
88 yield idx, Other, tag
89 idx += 3
90 state = 0
91 # comment
92 elif tag == '<%#':
93 yield idx, Comment.Preproc, tag
94 val = tokens.pop()
95 yield idx + 3, Comment, val
96 idx += 3 + len(val)
97 state = 2
98 # blocks or output
99 elif tag in ('<%', '<%=', '<%-'):
100 yield idx, Comment.Preproc, tag
101 idx += len(tag)
102 data = tokens.pop()
103 r_idx = 0
104 for r_idx, r_token, r_value in \
105 self.ruby_lexer.get_tokens_unprocessed(data):
106 yield r_idx + idx, r_token, r_value
107 idx += len(data)
108 state = 2
109 elif tag in ('%>', '-%>'):
110 yield idx, Error, tag
111 idx += len(tag)
112 state = 0
113 # % raw ruby statements
114 else:
115 yield idx, Comment.Preproc, tag[0]
116 r_idx = 0
117 for r_idx, r_token, r_value in \
118 self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
119 yield idx + 1 + r_idx, r_token, r_value
120 idx += len(tag)
121 state = 0
122 # block ends
123 elif state == 2:
124 tag = tokens.pop()
125 if tag not in ('%>', '-%>'):
126 yield idx, Other, tag
127 else:
128 yield idx, Comment.Preproc, tag
129 idx += len(tag)
130 state = 0
131 except IndexError:
132 return
133
134 def analyse_text(text):
135 if '<%' in text and '%>' in text:
136 return 0.4
137
138
139 class SmartyLexer(RegexLexer):
140 """
141 Generic `Smarty <http://smarty.php.net/>`_ template lexer.
142
143 Just highlights smarty code between the preprocessor directives, other
144 data is left untouched by the lexer.
145 """
146
147 name = 'Smarty'
148 aliases = ['smarty']
149 filenames = ['*.tpl']
150 mimetypes = ['application/x-smarty']
151
152 flags = re.MULTILINE | re.DOTALL
153
154 tokens = {
155 'root': [
156 (r'[^{]+', Other),
157 (r'(\{)(\*.*?\*)(\})',
158 bygroups(Comment.Preproc, Comment, Comment.Preproc)),
159 (r'(\{php\})(.*?)(\{/php\})',
160 bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
161 Comment.Preproc)),
162 (r'(\{)(/?[a-zA-Z_][a-zA-Z0-9_]*)(\s*)',
163 bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
164 (r'\{', Comment.Preproc, 'smarty')
165 ],
166 'smarty': [
167 (r'\s+', Text),
168 (r'\}', Comment.Preproc, '#pop'),
169 (r'#[a-zA-Z_][a-zA-Z0-9_]*#', Name.Variable),
170 (r'\$[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z0-9_]+)*', Name.Variable),
171 (r'[~!%^&*()+=|\[\]:;,.<>/?{}@-]', Operator),
172 ('(true|false|null)\b', Keyword.Constant),
173 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
174 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
175 (r'"(\\\\|\\"|[^"])*"', String.Double),
176 (r"'(\\\\|\\'|[^'])*'", String.Single),
177 (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Attribute)
178 ]
179 }
180
181 def analyse_text(text):
182 rv = 0.0
183 if re.search('\{if\s+.*?\}.*?\{/if\}', text):
184 rv += 0.15
185 if re.search('\{include\s+file=.*?\}', text):
186 rv += 0.15
187 if re.search('\{foreach\s+.*?\}.*?\{/foreach\}', text):
188 rv += 0.15
189 if re.search('\{\$.*?\}', text):
190 rv += 0.01
191 return rv
192
193
194 class DjangoLexer(RegexLexer):
195 """
196 Generic `django <http://www.djangoproject.com/documentation/templates/>`_
197 and `jinja <http://wsgiarea.pocoo.org/jinja/>`_ template lexer.
198
199 It just highlights django/jinja code between the preprocessor directives,
200 other data is left untouched by the lexer.
201 """
202
203 name = 'Django/Jinja'
204 aliases = ['django', 'jinja']
205 mimetypes = ['application/x-django-templating', 'application/x-jinja']
206
207 flags = re.M | re.S
208
209 tokens = {
210 'root': [
211 (r'[^{]+', Other),
212 (r'\{\{', Comment.Preproc, 'var'),
213 # jinja/django comments
214 (r'\{[*#].*?[*#]\}', Comment),
215 # django comments
216 (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
217 r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
218 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
219 Comment, Comment.Preproc, Text, Keyword, Text,
220 Comment.Preproc)),
221 # raw jinja blocks
222 (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
223 r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
224 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
225 Text, Comment.Preproc, Text, Keyword, Text,
226 Comment.Preproc)),
227 # filter blocks
228 (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
229 bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
230 'block'),
231 (r'(\{%)(-?\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
232 bygroups(Comment.Preproc, Text, Keyword), 'block'),
233 (r'\{', Other)
234 ],
235 'varnames': [
236 (r'(\|)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
237 bygroups(Operator, Text, Name.Function)),
238 (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_][a-zA-Z0-9_]*)',
239 bygroups(Keyword, Text, Keyword, Text, Name.Function)),
240 (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
241 (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
242 r'with(?:(?:out)?\s*context)?)\b', Keyword),
243 (r'(loop|block|super|forloop)\b', Name.Builtin),
244 (r'[a-zA-Z][a-zA-Z0-9_]*', Name.Variable),
245 (r'\.[a-zA-Z0-9_]+', Name.Variable),
246 (r':?"(\\\\|\\"|[^"])*"', String.Double),
247 (r":?'(\\\\|\\'|[^'])*'", String.Single),
248 (r'([{}()\[\]+\-*/,:]|[><=]=?)', Operator),
249 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
250 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
251 ],
252 'var': [
253 (r'\s+', Text),
254 (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
255 include('varnames')
256 ],
257 'block': [
258 (r'\s+', Text),
259 (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
260 include('varnames'),
261 (r'.', Punctuation)
262 ]
263 }
264
265 def analyse_text(text):
266 rv = 0.0
267 if re.search(r'\{%\s*(block|extends)', text) is not None:
268 rv += 0.4
269 if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
270 rv += 0.1
271 if re.search(r'\{\{.*?\}\}', text) is not None:
272 rv += 0.1
273 return rv
274
275
276 class MyghtyLexer(RegexLexer):
277 """
278 Generic `myghty templates`_ lexer. Code that isn't Myghty
279 markup is yielded as `Token.Other`.
280
281 *New in Pygments 0.6.*
282
283 .. _myghty templates: http://www.myghty.org/
284 """
285
286 name = 'Myghty'
287 aliases = ['myghty']
288 filenames = ['*.myt', 'autodelegate']
289 mimetypes = ['application/x-myghty']
290
291 tokens = {
292 'root': [
293 (r'\s+', Text),
294 (r'(<%(def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)(?s)',
295 bygroups(Name.Tag, None, Text, Name.Function, Name.Tag,
296 using(this), Name.Tag)),
297 (r'(<%(\w+))(.*?)(>)(.*?)(</%\2\s*>)(?s)',
298 bygroups(Name.Tag, None, Name.Function, Name.Tag,
299 using(PythonLexer), Name.Tag)),
300 (r'(<&[^|])(.*?)(,.*?)?(&>)',
301 bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
302 (r'(<&\|)(.*?)(,.*?)?(&>)(?s)',
303 bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
304 (r'</&>', Name.Tag),
305 (r'(<%!?)(.*?)(%>)(?s)',
306 bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
307 (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
308 (r'(?<=^)(%)([^\n]*)(\n|\Z)',
309 bygroups(Name.Tag, using(PythonLexer), Other)),
310 (r"""(?sx)
311 (.+?) # anything, followed by:
312 (?:
313 (?<=\n)(?=[%#]) | # an eval or comment line
314 (?=</?[%&]) | # a substitution or block or
315 # call start or end
316 # - don't consume
317 (\\\n) | # an escaped newline
318 \Z # end of string
319 )""", bygroups(Other, Operator)),
320 ]
321 }
322
323
324 class MyghtyHtmlLexer(DelegatingLexer):
325 """
326 Subclass of the `MyghtyLexer` that highlights unlexer data
327 with the `HtmlLexer`.
328
329 *New in Pygments 0.6.*
330 """
331
332 name = 'HTML+Myghty'
333 aliases = ['html+myghty']
334 mimetypes = ['text/html+myghty']
335
336 def __init__(self, **options):
337 super(MyghtyHtmlLexer, self).__init__(HtmlLexer, MyghtyLexer,
338 **options)
339
340
341 class MyghtyXmlLexer(DelegatingLexer):
342 """
343 Subclass of the `MyghtyLexer` that highlights unlexer data
344 with the `XmlLexer`.
345
346 *New in Pygments 0.6.*
347 """
348
349 name = 'XML+Myghty'
350 aliases = ['xml+myghty']
351 mimetypes = ['application/xml+myghty']
352
353 def __init__(self, **options):
354 super(MyghtyXmlLexer, self).__init__(XmlLexer, MyghtyLexer,
355 **options)
356
357
358 class MyghtyJavascriptLexer(DelegatingLexer):
359 """
360 Subclass of the `MyghtyLexer` that highlights unlexer data
361 with the `JavascriptLexer`.
362
363 *New in Pygments 0.6.*
364 """
365
366 name = 'JavaScript+Myghty'
367 aliases = ['js+myghty', 'javascript+myghty']
368 mimetypes = ['application/x-javascript+myghty',
369 'text/x-javascript+myghty',
370 'text/javascript+mygthy']
371
372 def __init__(self, **options):
373 super(MyghtyJavascriptLexer, self).__init__(JavascriptLexer,
374 MyghtyLexer, **options)
375
376
377 class MyghtyCssLexer(DelegatingLexer):
378 """
379 Subclass of the `MyghtyLexer` that highlights unlexer data
380 with the `CssLexer`.
381
382 *New in Pygments 0.6.*
383 """
384
385 name = 'CSS+Myghty'
386 aliases = ['css+myghty']
387 mimetypes = ['text/css+myghty']
388
389 def __init__(self, **options):
390 super(MyghtyCssLexer, self).__init__(CssLexer, MyghtyLexer,
391 **options)
392
393
394 class MakoLexer(RegexLexer):
395 """
396 Generic `mako templates`_ lexer. Code that isn't Mako
397 markup is yielded as `Token.Other`.
398
399 *New in Pygments 0.7.*
400
401 .. _mako templates: http://www.makotemplates.org/
402 """
403
404 name = 'Mako'
405 aliases = ['mako']
406 filenames = ['*.mao']
407 mimetypes = ['application/x-mako']
408
409 tokens = {
410 'root': [
411 (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
412 bygroups(Text, Comment.Preproc, Keyword, Other)),
413 (r'(\s*)(%)([^\n]*)(\n|\Z)',
414 bygroups(Text, Comment.Preproc, using(PythonLexer), Other)),
415 (r'(\s*)(##[^\n]*)(\n|\Z)',
416 bygroups(Text, Comment.Preproc, Other)),
417 (r'(?s)<%doc>.*?</%doc>', Comment.Preproc),
418 (r'(<%)([\w\.\:]+)',
419 bygroups(Comment.Preproc, Name.Builtin), 'tag'),
420 (r'(</%)([\w\.\:]+)(>)',
421 bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
422 (r'<%(?=([\w\.\:]+))', Comment.Preproc, 'ondeftags'),
423 (r'(<%(?:!?))(.*?)(%>)(?s)',
424 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
425 (r'(\$\{)(.*?)(\})',
426 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
427 (r'''(?sx)
428 (.+?) # anything, followed by:
429 (?:
430 (?<=\n)(?=%|\#\#) | # an eval or comment line
431 (?=\#\*) | # multiline comment
432 (?=</?%) | # a python block
433 # call start or end
434 (?=\$\{) | # a substitution
435 (?<=\n)(?=\s*%) |
436 # - don't consume
437 (\\\n) | # an escaped newline
438 \Z # end of string
439 )
440 ''', bygroups(Other, Operator)),
441 (r'\s+', Text),
442 ],
443 'ondeftags': [
444 (r'<%', Comment.Preproc),
445 (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
446 include('tag'),
447 ],
448 'tag': [
449 (r'((?:\w+)\s*=)\s*(".*?")',
450 bygroups(Name.Attribute, String)),
451 (r'/?\s*>', Comment.Preproc, '#pop'),
452 (r'\s+', Text),
453 ],
454 'attr': [
455 ('".*?"', String, '#pop'),
456 ("'.*?'", String, '#pop'),
457 (r'[^\s>]+', String, '#pop'),
458 ],
459 }
460
461
462 class MakoHtmlLexer(DelegatingLexer):
463 """
464 Subclass of the `MakoLexer` that highlights unlexed data
465 with the `HtmlLexer`.
466
467 *New in Pygments 0.7.*
468 """
469
470 name = 'HTML+Mako'
471 aliases = ['html+mako']
472 mimetypes = ['text/html+mako']
473
474 def __init__(self, **options):
475 super(MakoHtmlLexer, self).__init__(HtmlLexer, MakoLexer,
476 **options)
477
478 class MakoXmlLexer(DelegatingLexer):
479 """
480 Subclass of the `MakoLexer` that highlights unlexer data
481 with the `XmlLexer`.
482
483 *New in Pygments 0.7.*
484 """
485
486 name = 'XML+Mako'
487 aliases = ['xml+mako']
488 mimetypes = ['application/xml+mako']
489
490 def __init__(self, **options):
491 super(MakoXmlLexer, self).__init__(XmlLexer, MakoLexer,
492 **options)
493
494 class MakoJavascriptLexer(DelegatingLexer):
495 """
496 Subclass of the `MakoLexer` that highlights unlexer data
497 with the `JavascriptLexer`.
498
499 *New in Pygments 0.7.*
500 """
501
502 name = 'JavaScript+Mako'
503 aliases = ['js+mako', 'javascript+mako']
504 mimetypes = ['application/x-javascript+mako',
505 'text/x-javascript+mako',
506 'text/javascript+mako']
507
508 def __init__(self, **options):
509 super(MakoJavascriptLexer, self).__init__(JavascriptLexer,
510 MakoLexer, **options)
511
512 class MakoCssLexer(DelegatingLexer):
513 """
514 Subclass of the `MakoLexer` that highlights unlexer data
515 with the `CssLexer`.
516
517 *New in Pygments 0.7.*
518 """
519
520 name = 'CSS+Mako'
521 aliases = ['css+mako']
522 mimetypes = ['text/css+mako']
523
524 def __init__(self, **options):
525 super(MakoCssLexer, self).__init__(CssLexer, MakoLexer,
526 **options)
527
528
529 # Genshi and Cheetah lexers courtesy of Matt Good.
530
531 class CheetahPythonLexer(Lexer):
532 """
533 Lexer for handling Cheetah's special $ tokens in Python syntax.
534 """
535
536 def get_tokens_unprocessed(self, text):
537 pylexer = PythonLexer(**self.options)
538 for pos, type_, value in pylexer.get_tokens_unprocessed(text):
539 if type_ == Token.Error and value == '$':
540 type_ = Comment.Preproc
541 yield pos, type_, value
542
543
544 class CheetahLexer(RegexLexer):
545 """
546 Generic `cheetah templates`_ lexer. Code that isn't Cheetah
547 markup is yielded as `Token.Other`. This also works for
548 `spitfire templates`_ which use the same syntax.
549
550 .. _cheetah templates: http://www.cheetahtemplate.org/
551 .. _spitfire templates: http://code.google.com/p/spitfire/
552 """
553
554 name = 'Cheetah'
555 aliases = ['cheetah', 'spitfire']
556 filenames = ['*.tmpl', '*.spt']
557 mimetypes = ['application/x-cheetah', 'application/x-spitfire']
558
559 tokens = {
560 'root': [
561 (r'(##[^\n]*)$',
562 (bygroups(Comment))),
563 (r'#[*](.|\n)*?[*]#', Comment),
564 (r'#end[^#\n]*(?:#|$)', Comment.Preproc),
565 (r'#slurp$', Comment.Preproc),
566 (r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
567 (bygroups(Comment.Preproc, using(CheetahPythonLexer),
568 Comment.Preproc))),
569 # TODO support other Python syntax like $foo['bar']
570 (r'(\$)([a-zA-Z_][a-zA-Z0-9_\.]*[a-zA-Z0-9_])',
571 bygroups(Comment.Preproc, using(CheetahPythonLexer))),
572 (r'(\$\{!?)(.*?)(\})(?s)',
573 bygroups(Comment.Preproc, using(CheetahPythonLexer),
574 Comment.Preproc)),
575 (r'''(?sx)
576 (.+?) # anything, followed by:
577 (?:
578 (?=[#][#a-zA-Z]*) | # an eval comment
579 (?=\$[a-zA-Z_{]) | # a substitution
580 \Z # end of string
581 )
582 ''', Other),
583 (r'\s+', Text),
584 ],
585 }
586
587
588 class CheetahHtmlLexer(DelegatingLexer):
589 """
590 Subclass of the `CheetahLexer` that highlights unlexer data
591 with the `HtmlLexer`.
592 """
593
594 name = 'HTML+Cheetah'
595 aliases = ['html+cheetah', 'html+spitfire']
596 mimetypes = ['text/html+cheetah', 'text/html+spitfire']
597
598 def __init__(self, **options):
599 super(CheetahHtmlLexer, self).__init__(HtmlLexer, CheetahLexer,
600 **options)
601
602
603 class CheetahXmlLexer(DelegatingLexer):
604 """
605 Subclass of the `CheetahLexer` that highlights unlexer data
606 with the `XmlLexer`.
607 """
608
609 name = 'XML+Cheetah'
610 aliases = ['xml+cheetah', 'xml+spitfire']
611 mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
612
613 def __init__(self, **options):
614 super(CheetahXmlLexer, self).__init__(XmlLexer, CheetahLexer,
615 **options)
616
617
618 class CheetahJavascriptLexer(DelegatingLexer):
619 """
620 Subclass of the `CheetahLexer` that highlights unlexer data
621 with the `JavascriptLexer`.
622 """
623
624 name = 'JavaScript+Cheetah'
625 aliases = ['js+cheetah', 'javascript+cheetah',
626 'js+spitfire', 'javascript+spitfire']
627 mimetypes = ['application/x-javascript+cheetah',
628 'text/x-javascript+cheetah',
629 'text/javascript+cheetah',
630 'application/x-javascript+spitfire',
631 'text/x-javascript+spitfire',
632 'text/javascript+spitfire']
633
634 def __init__(self, **options):
635 super(CheetahJavascriptLexer, self).__init__(JavascriptLexer,
636 CheetahLexer, **options)
637
638
639 class GenshiTextLexer(RegexLexer):
640 """
641 A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ text
642 templates.
643 """
644
645 name = 'Genshi Text'
646 aliases = ['genshitext']
647 mimetypes = ['application/x-genshi-text', 'text/x-genshi']
648
649 tokens = {
650 'root': [
651 (r'[^#\$\s]+', Other),
652 (r'^(\s*)(##.*)$', bygroups(Text, Comment)),
653 (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
654 include('variable'),
655 (r'[#\$\s]', Other),
656 ],
657 'directive': [
658 (r'\n', Text, '#pop'),
659 (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
660 (r'(choose|when|with)([^\S\n]+)(.*)',
661 bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
662 (r'(choose|otherwise)\b', Keyword, '#pop'),
663 (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
664 ],
665 'variable': [
666 (r'(?<!\$)(\$\{)(.+?)(\})',
667 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
668 (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
669 Name.Variable),
670 ]
671 }
672
673
674 class GenshiMarkupLexer(RegexLexer):
675 """
676 Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
677 `GenshiLexer`.
678 """
679
680 flags = re.DOTALL
681
682 tokens = {
683 'root': [
684 (r'[^<\$]+', Other),
685 (r'(<\?python)(.*?)(\?>)',
686 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
687 # yield style and script blocks as Other
688 (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
689 (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
690 (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
691 include('variable'),
692 (r'[<\$]', Other),
693 ],
694 'pytag': [
695 (r'\s+', Text),
696 (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'pyattr'),
697 (r'/?\s*>', Name.Tag, '#pop'),
698 ],
699 'pyattr': [
700 ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
701 ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
702 (r'[^\s>]+', String, '#pop'),
703 ],
704 'tag': [
705 (r'\s+', Text),
706 (r'py:[a-zA-Z0-9_-]+\s*=', Name.Attribute, 'pyattr'),
707 (r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
708 (r'/?\s*>', Name.Tag, '#pop'),
709 ],
710 'attr': [
711 ('"', String, 'attr-dstring'),
712 ("'", String, 'attr-sstring'),
713 (r'[^\s>]*', String, '#pop')
714 ],
715 'attr-dstring': [
716 ('"', String, '#pop'),
717 include('strings'),
718 ("'", String)
719 ],
720 'attr-sstring': [
721 ("'", String, '#pop'),
722 include('strings'),
723 ("'", String)
724 ],
725 'strings': [
726 ('[^"\'$]+', String),
727 include('variable')
728 ],
729 'variable': [
730 (r'(?<!\$)(\$\{)(.+?)(\})',
731 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
732 (r'(?<!\$)(\$)([a-zA-Z_][a-zA-Z0-9_\.]*)',
733 Name.Variable),
734 ]
735 }
736
737
738 class HtmlGenshiLexer(DelegatingLexer):
739 """
740 A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
741 `kid <http://kid-templating.org/>`_ kid HTML templates.
742 """
743
744 name = 'HTML+Genshi'
745 aliases = ['html+genshi', 'html+kid']
746 alias_filenames = ['*.html', '*.htm', '*.xhtml']
747 mimetypes = ['text/html+genshi']
748
749 def __init__(self, **options):
750 super(HtmlGenshiLexer, self).__init__(HtmlLexer, GenshiMarkupLexer,
751 **options)
752
753 def analyse_text(text):
754 rv = 0.0
755 if re.search('\$\{.*?\}', text) is not None:
756 rv += 0.2
757 if re.search('py:(.*?)=["\']', text) is not None:
758 rv += 0.2
759 return rv + HtmlLexer.analyse_text(text) - 0.01
760
761
762 class GenshiLexer(DelegatingLexer):
763 """
764 A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and
765 `kid <http://kid-templating.org/>`_ kid XML templates.
766 """
767
768 name = 'Genshi'
769 aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
770 filenames = ['*.kid']
771 alias_filenames = ['*.xml']
772 mimetypes = ['application/x-genshi', 'application/x-kid']
773
774 def __init__(self, **options):
775 super(GenshiLexer, self).__init__(XmlLexer, GenshiMarkupLexer,
776 **options)
777
778 def analyse_text(text):
779 rv = 0.0
780 if re.search('\$\{.*?\}', text) is not None:
781 rv += 0.2
782 if re.search('py:(.*?)=["\']', text) is not None:
783 rv += 0.2
784 return rv + XmlLexer.analyse_text(text) - 0.01
785
786
787 class JavascriptGenshiLexer(DelegatingLexer):
788 """
789 A lexer that highlights javascript code in genshi text templates.
790 """
791
792 name = 'JavaScript+Genshi Text'
793 aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
794 'javascript+genshi']
795 alias_filenames = ['*.js']
796 mimetypes = ['application/x-javascript+genshi',
797 'text/x-javascript+genshi',
798 'text/javascript+genshi']
799
800 def __init__(self, **options):
801 super(JavascriptGenshiLexer, self).__init__(JavascriptLexer,
802 GenshiTextLexer,
803 **options)
804
805 def analyse_text(text):
806 return GenshiLexer.analyse_text(text) - 0.05
807
808
809 class CssGenshiLexer(DelegatingLexer):
810 """
811 A lexer that highlights CSS definitions in genshi text templates.
812 """
813
814 name = 'CSS+Genshi Text'
815 aliases = ['css+genshitext', 'css+genshi']
816 alias_filenames = ['*.css']
817 mimetypes = ['text/css+genshi']
818
819 def __init__(self, **options):
820 super(CssGenshiLexer, self).__init__(CssLexer, GenshiTextLexer,
821 **options)
822
823 def analyse_text(text):
824 return GenshiLexer.analyse_text(text) - 0.05
825
826
827 class RhtmlLexer(DelegatingLexer):
828 """
829 Subclass of the ERB lexer that highlights the unlexed data with the
830 html lexer.
831
832 Nested Javascript and CSS is highlighted too.
833 """
834
835 name = 'RHTML'
836 aliases = ['rhtml', 'html+erb', 'html+ruby']
837 filenames = ['*.rhtml']
838 alias_filenames = ['*.html', '*.htm', '*.xhtml']
839 mimetypes = ['text/html+ruby']
840
841 def __init__(self, **options):
842 super(RhtmlLexer, self).__init__(HtmlLexer, ErbLexer, **options)
843
844 def analyse_text(text):
845 rv = ErbLexer.analyse_text(text) - 0.01
846 if html_doctype_matches(text):
847 # one more than the XmlErbLexer returns
848 rv += 0.5
849 return rv
850
851
852 class XmlErbLexer(DelegatingLexer):
853 """
854 Subclass of `ErbLexer` which highlights data outside preprocessor
855 directives with the `XmlLexer`.
856 """
857
858 name = 'XML+Ruby'
859 aliases = ['xml+erb', 'xml+ruby']
860 alias_filenames = ['*.xml']
861 mimetypes = ['application/xml+ruby']
862
863 def __init__(self, **options):
864 super(XmlErbLexer, self).__init__(XmlLexer, ErbLexer, **options)
865
866 def analyse_text(text):
867 rv = ErbLexer.analyse_text(text) - 0.01
868 if looks_like_xml(text):
869 rv += 0.4
870 return rv
871
872
873 class CssErbLexer(DelegatingLexer):
874 """
875 Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
876 """
877
878 name = 'CSS+Ruby'
879 aliases = ['css+erb', 'css+ruby']
880 alias_filenames = ['*.css']
881 mimetypes = ['text/css+ruby']
882
883 def __init__(self, **options):
884 super(CssErbLexer, self).__init__(CssLexer, ErbLexer, **options)
885
886 def analyse_text(text):
887 return ErbLexer.analyse_text(text) - 0.05
888
889
890 class JavascriptErbLexer(DelegatingLexer):
891 """
892 Subclass of `ErbLexer` which highlights unlexed data with the
893 `JavascriptLexer`.
894 """
895
896 name = 'JavaScript+Ruby'
897 aliases = ['js+erb', 'javascript+erb', 'js+ruby', 'javascript+ruby']
898 alias_filenames = ['*.js']
899 mimetypes = ['application/x-javascript+ruby',
900 'text/x-javascript+ruby',
901 'text/javascript+ruby']
902
903 def __init__(self, **options):
904 super(JavascriptErbLexer, self).__init__(JavascriptLexer, ErbLexer,
905 **options)
906
907 def analyse_text(text):
908 return ErbLexer.analyse_text(text) - 0.05
909
910
911 class HtmlPhpLexer(DelegatingLexer):
912 """
913 Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
914
915 Nested Javascript and CSS is highlighted too.
916 """
917
918 name = 'HTML+PHP'
919 aliases = ['html+php']
920 filenames = ['*.phtml']
921 alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
922 '*.php[345]']
923 mimetypes = ['application/x-php',
924 'application/x-httpd-php', 'application/x-httpd-php3',
925 'application/x-httpd-php4', 'application/x-httpd-php5']
926
927 def __init__(self, **options):
928 super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
929
930 def analyse_text(text):
931 rv = PhpLexer.analyse_text(text) - 0.01
932 if html_doctype_matches(text):
933 rv += 0.5
934 return rv
935
936
937 class XmlPhpLexer(DelegatingLexer):
938 """
939 Subclass of `PhpLexer` that higlights unhandled data with the `XmlLexer`.
940 """
941
942 name = 'XML+PHP'
943 aliases = ['xml+php']
944 alias_filenames = ['*.xml', '*.php', '*.php[345]']
945 mimetypes = ['application/xml+php']
946
947 def __init__(self, **options):
948 super(XmlPhpLexer, self).__init__(XmlLexer, PhpLexer, **options)
949
950 def analyse_text(text):
951 rv = PhpLexer.analyse_text(text) - 0.01
952 if looks_like_xml(text):
953 rv += 0.4
954 return rv
955
956
957 class CssPhpLexer(DelegatingLexer):
958 """
959 Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
960 """
961
962 name = 'CSS+PHP'
963 aliases = ['css+php']
964 alias_filenames = ['*.css']
965 mimetypes = ['text/css+php']
966
967 def __init__(self, **options):
968 super(CssPhpLexer, self).__init__(CssLexer, PhpLexer, **options)
969
970 def analyse_text(text):
971 return PhpLexer.analyse_text(text) - 0.05
972
973
974 class JavascriptPhpLexer(DelegatingLexer):
975 """
976 Subclass of `PhpLexer` which highlights unmatched data with the
977 `JavascriptLexer`.
978 """
979
980 name = 'JavaScript+PHP'
981 aliases = ['js+php', 'javascript+php']
982 alias_filenames = ['*.js']
983 mimetypes = ['application/x-javascript+php',
984 'text/x-javascript+php',
985 'text/javascript+php']
986
987 def __init__(self, **options):
988 super(JavascriptPhpLexer, self).__init__(JavascriptLexer, PhpLexer,
989 **options)
990
991 def analyse_text(text):
992 return PhpLexer.analyse_text(text)
993
994
995 class HtmlSmartyLexer(DelegatingLexer):
996 """
997 Subclass of the `SmartyLexer` that highighlights unlexed data with the
998 `HtmlLexer`.
999
1000 Nested Javascript and CSS is highlighted too.
1001 """
1002
1003 name = 'HTML+Smarty'
1004 aliases = ['html+smarty']
1005 alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
1006 mimetypes = ['text/html+smarty']
1007
1008 def __init__(self, **options):
1009 super(HtmlSmartyLexer, self).__init__(HtmlLexer, SmartyLexer, **options)
1010
1011 def analyse_text(text):
1012 rv = SmartyLexer.analyse_text(text) - 0.01
1013 if html_doctype_matches(text):
1014 rv += 0.5
1015 return rv
1016
1017
1018 class XmlSmartyLexer(DelegatingLexer):
1019 """
1020 Subclass of the `SmartyLexer` that highlights unlexed data with the
1021 `XmlLexer`.
1022 """
1023
1024 name = 'XML+Smarty'
1025 aliases = ['xml+smarty']
1026 alias_filenames = ['*.xml', '*.tpl']
1027 mimetypes = ['application/xml+smarty']
1028
1029 def __init__(self, **options):
1030 super(XmlSmartyLexer, self).__init__(XmlLexer, SmartyLexer, **options)
1031
1032 def analyse_text(text):
1033 rv = SmartyLexer.analyse_text(text) - 0.01
1034 if looks_like_xml(text):
1035 rv += 0.4
1036 return rv
1037
1038
1039 class CssSmartyLexer(DelegatingLexer):
1040 """
1041 Subclass of the `SmartyLexer` that highlights unlexed data with the
1042 `CssLexer`.
1043 """
1044
1045 name = 'CSS+Smarty'
1046 aliases = ['css+smarty']
1047 alias_filenames = ['*.css', '*.tpl']
1048 mimetypes = ['text/css+smarty']
1049
1050 def __init__(self, **options):
1051 super(CssSmartyLexer, self).__init__(CssLexer, SmartyLexer, **options)
1052
1053 def analyse_text(text):
1054 return SmartyLexer.analyse_text(text) - 0.05
1055
1056
1057 class JavascriptSmartyLexer(DelegatingLexer):
1058 """
1059 Subclass of the `SmartyLexer` that highlights unlexed data with the
1060 `JavascriptLexer`.
1061 """
1062
1063 name = 'JavaScript+Smarty'
1064 aliases = ['js+smarty', 'javascript+smarty']
1065 alias_filenames = ['*.js', '*.tpl']
1066 mimetypes = ['application/x-javascript+smarty',
1067 'text/x-javascript+smarty',
1068 'text/javascript+smarty']
1069
1070 def __init__(self, **options):
1071 super(JavascriptSmartyLexer, self).__init__(JavascriptLexer, SmartyLexer,
1072 **options)
1073
1074 def analyse_text(text):
1075 return SmartyLexer.analyse_text(text) - 0.05
1076
1077
1078 class HtmlDjangoLexer(DelegatingLexer):
1079 """
1080 Subclass of the `DjangoLexer` that highighlights unlexed data with the
1081 `HtmlLexer`.
1082
1083 Nested Javascript and CSS is highlighted too.
1084 """
1085
1086 name = 'HTML+Django/Jinja'
1087 aliases = ['html+django', 'html+jinja']
1088 alias_filenames = ['*.html', '*.htm', '*.xhtml']
1089 mimetypes = ['text/html+django', 'text/html+jinja']
1090
1091 def __init__(self, **options):
1092 super(HtmlDjangoLexer, self).__init__(HtmlLexer, DjangoLexer, **options)
1093
1094 def analyse_text(text):
1095 rv = DjangoLexer.analyse_text(text) - 0.01
1096 if html_doctype_matches(text):
1097 rv += 0.5
1098 return rv
1099
1100
1101 class XmlDjangoLexer(DelegatingLexer):
1102 """
1103 Subclass of the `DjangoLexer` that highlights unlexed data with the
1104 `XmlLexer`.
1105 """
1106
1107 name = 'XML+Django/Jinja'
1108 aliases = ['xml+django', 'xml+jinja']
1109 alias_filenames = ['*.xml']
1110 mimetypes = ['application/xml+django', 'application/xml+jinja']
1111
1112 def __init__(self, **options):
1113 super(XmlDjangoLexer, self).__init__(XmlLexer, DjangoLexer, **options)
1114
1115 def analyse_text(text):
1116 rv = DjangoLexer.analyse_text(text) - 0.01
1117 if looks_like_xml(text):
1118 rv += 0.4
1119 return rv
1120
1121
1122 class CssDjangoLexer(DelegatingLexer):
1123 """
1124 Subclass of the `DjangoLexer` that highlights unlexed data with the
1125 `CssLexer`.
1126 """
1127
1128 name = 'CSS+Django/Jinja'
1129 aliases = ['css+django', 'css+jinja']
1130 alias_filenames = ['*.css']
1131 mimetypes = ['text/css+django', 'text/css+jinja']
1132
1133 def __init__(self, **options):
1134 super(CssDjangoLexer, self).__init__(CssLexer, DjangoLexer, **options)
1135
1136 def analyse_text(text):
1137 return DjangoLexer.analyse_text(text) - 0.05
1138
1139
1140 class JavascriptDjangoLexer(DelegatingLexer):
1141 """
1142 Subclass of the `DjangoLexer` that highlights unlexed data with the
1143 `JavascriptLexer`.
1144 """
1145
1146 name = 'JavaScript+Django/Jinja'
1147 aliases = ['js+django', 'javascript+django',
1148 'js+jinja', 'javascript+jinja']
1149 alias_filenames = ['*.js']
1150 mimetypes = ['application/x-javascript+django',
1151 'application/x-javascript+jinja',
1152 'text/x-javascript+django',
1153 'text/x-javascript+jinja',
1154 'text/javascript+django',
1155 'text/javascript+jinja']
1156
1157 def __init__(self, **options):
1158 super(JavascriptDjangoLexer, self).__init__(JavascriptLexer, DjangoLexer,
1159 **options)
1160
1161 def analyse_text(text):
1162 return DjangoLexer.analyse_text(text) - 0.05
1163
1164
1165 class JspRootLexer(RegexLexer):
1166 """
1167 Base for the `JspLexer`. Yields `Token.Other` for area outside of
1168 JSP tags.
1169
1170 *New in Pygments 0.7.*
1171 """
1172
1173 tokens = {
1174 'root': [
1175 (r'<%\S?', Keyword, 'sec'),
1176 # FIXME: I want to make these keywords but still parse attributes.
1177 (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
1178 Keyword),
1179 (r'[^<]+', Other),
1180 (r'<', Other),
1181 ],
1182 'sec': [
1183 (r'%>', Keyword, '#pop'),
1184 # note: '\w\W' != '.' without DOTALL.
1185 (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
1186 ],
1187 }
1188
1189
1190 class JspLexer(DelegatingLexer):
1191 """
1192 Lexer for Java Server Pages.
1193
1194 *New in Pygments 0.7.*
1195 """
1196 name = 'Java Server Page'
1197 aliases = ['jsp']
1198 filenames = ['*.jsp']
1199 mimetypes = ['application/x-jsp']
1200
1201 def __init__(self, **options):
1202 super(JspLexer, self).__init__(XmlLexer, JspRootLexer, **options)
1203
1204 def analyse_text(text):
1205 rv = JavaLexer.analyse_text(text) - 0.01
1206 if looks_like_xml(text):
1207 rv += 0.4
1208 if '<%' in text and '%>' in text:
1209 rv += 0.1
1210 return rv
1211
1212
1213 class EvoqueLexer(RegexLexer):
1214 """
1215 For files using the Evoque templating system.
1216
1217 *New in Pygments 1.1.*
1218 """
1219 name = 'Evoque'
1220 aliases = ['evoque']
1221 filenames = ['*.evoque']
1222 mimetypes = ['application/x-evoque']
1223
1224 flags = re.DOTALL
1225
1226 tokens = {
1227 'root': [
1228 (r'[^#$]+', Other),
1229 (r'#\[', Comment.Multiline, 'comment'),
1230 (r'\$\$', Other),
1231 # svn keywords
1232 (r'\$\w+:[^$\n]*\$', Comment.Multiline),
1233 # directives: begin, end
1234 (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
1235 bygroups(Punctuation, Name.Builtin, Punctuation, None,
1236 String, Punctuation, None)),
1237 # directives: evoque, overlay
1238 # see doc for handling first name arg: /directives/evoque/
1239 #+ minor inconsistency: the "name" in e.g. $overlay{name=site_base}
1240 # should be using(PythonLexer), not passed out as String
1241 (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+[^=,%}]+?)?'
1242 r'(.*?)((?(4)%)\})',
1243 bygroups(Punctuation, Name.Builtin, Punctuation, None,
1244 String, using(PythonLexer), Punctuation, None)),
1245 # directives: if, for, prefer, test
1246 (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
1247 bygroups(Punctuation, Name.Builtin, Punctuation, None,
1248 using(PythonLexer), Punctuation, None)),
1249 # directive clauses (no {} expression)
1250 (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
1251 # expressions
1252 (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
1253 bygroups(Punctuation, None, using(PythonLexer),
1254 Name.Builtin, None, None, Punctuation, None)),
1255 (r'#', Other),
1256 ],
1257 'comment': [
1258 (r'[^\]#]', Comment.Multiline),
1259 (r'#\[', Comment.Multiline, '#push'),
1260 (r'\]#', Comment.Multiline, '#pop'),
1261 (r'[\]#]', Comment.Multiline)
1262 ],
1263 }
1264
1265 class EvoqueHtmlLexer(DelegatingLexer):
1266 """
1267 Subclass of the `EvoqueLexer` that highlights unlexed data with the
1268 `HtmlLexer`.
1269
1270 *New in Pygments 1.1.*
1271 """
1272 name = 'HTML+Evoque'
1273 aliases = ['html+evoque']
1274 filenames = ['*.html']
1275 mimetypes = ['text/html+evoque']
1276
1277 def __init__(self, **options):
1278 super(EvoqueHtmlLexer, self).__init__(HtmlLexer, EvoqueLexer,
1279 **options)
1280
1281 class EvoqueXmlLexer(DelegatingLexer):
1282 """
1283 Subclass of the `EvoqueLexer` that highlights unlexed data with the
1284 `XmlLexer`.
1285
1286 *New in Pygments 1.1.*
1287 """
1288 name = 'XML+Evoque'
1289 aliases = ['xml+evoque']
1290 filenames = ['*.xml']
1291 mimetypes = ['application/xml+evoque']
1292
1293 def __init__(self, **options):
1294 super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
1295 **options)

eric ide

mercurial