|
1 # -*- coding: utf-8 -*- |
|
2 """ |
|
3 pygments.lexers.parsers |
|
4 ~~~~~~~~~~~~~~~~~~~~~~~ |
|
5 |
|
6 Lexers for parser generators. |
|
7 |
|
8 :copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS. |
|
9 :license: BSD, see LICENSE for details. |
|
10 """ |
|
11 |
|
12 import re |
|
13 |
|
14 from pygments.lexer import RegexLexer, DelegatingLexer, \ |
|
15 include, bygroups, using, this |
|
16 from pygments.token import Error, Punctuation, Generic, Other, \ |
|
17 Text, Comment, Operator, Keyword, Name, String, Number, Whitespace |
|
18 from pygments.lexers.compiled import JavaLexer, CLexer, CppLexer, \ |
|
19 ObjectiveCLexer, DLexer |
|
20 from pygments.lexers.dotnet import CSharpLexer |
|
21 from pygments.lexers.agile import RubyLexer, PythonLexer, PerlLexer |
|
22 from pygments.lexers.web import ActionScriptLexer |
|
23 # Use TextLexer during development to just focus on one part of a delegating |
|
24 # lexer. |
|
25 from pygments.lexers.special import TextLexer |
|
26 |
|
27 __all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer', |
|
28 'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer', |
|
29 'RagelJavaLexer', 'AntlrLexer', 'AntlrPythonLexer', |
|
30 'AntlrPerlLexer', 'AntlrRubyLexer', 'AntlrCppLexer', |
|
31 #'AntlrCLexer', |
|
32 'AntlrCSharpLexer', 'AntlrObjectiveCLexer', |
|
33 'AntlrJavaLexer', "AntlrActionScriptLexer"] |
|
34 |
|
35 |
|
36 class RagelLexer(RegexLexer): |
|
37 """ |
|
38 A pure `Ragel <http://www.complang.org/ragel/>`_ lexer. Use this for |
|
39 fragments of Ragel. For ``.rl`` files, use RagelEmbeddedLexer instead |
|
40 (or one of the language-specific subclasses). |
|
41 |
|
42 *New in Pygments 1.1.* |
|
43 """ |
|
44 |
|
45 name = 'Ragel' |
|
46 aliases = ['ragel'] |
|
47 filenames = [] |
|
48 |
|
49 tokens = { |
|
50 'whitespace': [ |
|
51 (r'\s+', Whitespace) |
|
52 ], |
|
53 'comments': [ |
|
54 (r'\#.*$', Comment), |
|
55 ], |
|
56 'keywords': [ |
|
57 (r'(access|action|alphtype)\b', Keyword), |
|
58 (r'(getkey|write|machine|include)\b', Keyword), |
|
59 (r'(any|ascii|extend|alpha|digit|alnum|lower|upper)\b', Keyword), |
|
60 (r'(xdigit|cntrl|graph|print|punct|space|zlen|empty)\b', Keyword) |
|
61 ], |
|
62 'numbers': [ |
|
63 (r'0x[0-9A-Fa-f]+', Number.Hex), |
|
64 (r'[+-]?[0-9]+', Number.Integer), |
|
65 ], |
|
66 'literals': [ |
|
67 (r'"(\\\\|\\"|[^"])*"', String), # double quote string |
|
68 (r"'(\\\\|\\'|[^'])*'", String), # single quote string |
|
69 (r'\[(\\\\|\\\]|[^\]])*\]', String), # square bracket literals |
|
70 (r'/(?!\*)(\\\\|\\/|[^/])*/', String.Regex), # regular expressions |
|
71 ], |
|
72 'identifiers': [ |
|
73 (r'[a-zA-Z_][a-zA-Z_0-9]*', Name.Variable), |
|
74 ], |
|
75 'operators': [ |
|
76 (r',', Operator), # Join |
|
77 (r'\||&|-|--', Operator), # Union, Intersection and Subtraction |
|
78 (r'\.|<:|:>|:>>', Operator), # Concatention |
|
79 (r':', Operator), # Label |
|
80 (r'->', Operator), # Epsilon Transition |
|
81 (r'(>|\$|%|<|@|<>)(/|eof\b)', Operator), # EOF Actions |
|
82 (r'(>|\$|%|<|@|<>)(!|err\b)', Operator), # Global Error Actions |
|
83 (r'(>|\$|%|<|@|<>)(\^|lerr\b)', Operator), # Local Error Actions |
|
84 (r'(>|\$|%|<|@|<>)(~|to\b)', Operator), # To-State Actions |
|
85 (r'(>|\$|%|<|@|<>)(\*|from\b)', Operator), # From-State Actions |
|
86 (r'>|@|\$|%', Operator), # Transition Actions and Priorities |
|
87 (r'\*|\?|\+|{[0-9]*,[0-9]*}', Operator), # Repetition |
|
88 (r'!|\^', Operator), # Negation |
|
89 (r'\(|\)', Operator), # Grouping |
|
90 ], |
|
91 'root': [ |
|
92 include('literals'), |
|
93 include('whitespace'), |
|
94 include('comments'), |
|
95 include('keywords'), |
|
96 include('numbers'), |
|
97 include('identifiers'), |
|
98 include('operators'), |
|
99 (r'{', Punctuation, 'host'), |
|
100 (r'=', Operator), |
|
101 (r';', Punctuation), |
|
102 ], |
|
103 'host': [ |
|
104 (r'(' + r'|'.join(( # keep host code in largest possible chunks |
|
105 r'[^{}\'"/#]+', # exclude unsafe characters |
|
106 r'[^\\][\\][{}]', # allow escaped { or } |
|
107 |
|
108 # strings and comments may safely contain unsafe characters |
|
109 r'"(\\\\|\\"|[^"])*"', # double quote string |
|
110 r"'(\\\\|\\'|[^'])*'", # single quote string |
|
111 r'//.*$\n?', # single line comment |
|
112 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment |
|
113 r'\#.*$\n?', # ruby comment |
|
114 |
|
115 # regular expression: There's no reason for it to start |
|
116 # with a * and this stops confusion with comments. |
|
117 r'/(?!\*)(\\\\|\\/|[^/])*/', |
|
118 |
|
119 # / is safe now that we've handled regex and javadoc comments |
|
120 r'/', |
|
121 )) + r')+', Other), |
|
122 |
|
123 (r'{', Punctuation, '#push'), |
|
124 (r'}', Punctuation, '#pop'), |
|
125 ], |
|
126 } |
|
127 |
|
128 |
|
129 class RagelEmbeddedLexer(RegexLexer): |
|
130 """ |
|
131 A lexer for `Ragel`_ embedded in a host language file. |
|
132 |
|
133 This will only highlight Ragel statements. If you want host language |
|
134 highlighting then call the language-specific Ragel lexer. |
|
135 |
|
136 *New in Pygments 1.1.* |
|
137 """ |
|
138 |
|
139 name = 'Embedded Ragel' |
|
140 aliases = ['ragel-em'] |
|
141 filenames = ['*.rl'] |
|
142 |
|
143 tokens = { |
|
144 'root': [ |
|
145 (r'(' + r'|'.join(( # keep host code in largest possible chunks |
|
146 r'[^%\'"/#]+', # exclude unsafe characters |
|
147 r'%(?=[^%]|$)', # a single % sign is okay, just not 2 of them |
|
148 |
|
149 # strings and comments may safely contain unsafe characters |
|
150 r'"(\\\\|\\"|[^"])*"', # double quote string |
|
151 r"'(\\\\|\\'|[^'])*'", # single quote string |
|
152 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment |
|
153 r'//.*$\n?', # single line comment |
|
154 r'\#.*$\n?', # ruby/ragel comment |
|
155 r'/(?!\*)(\\\\|\\/|[^/])*/', # regular expression |
|
156 |
|
157 # / is safe now that we've handled regex and javadoc comments |
|
158 r'/', |
|
159 )) + r')+', Other), |
|
160 |
|
161 # Single Line FSM. |
|
162 # Please don't put a quoted newline in a single line FSM. |
|
163 # That's just mean. It will break this. |
|
164 (r'(%%)(?![{%])(.*)($|;)(\n?)', bygroups(Punctuation, |
|
165 using(RagelLexer), |
|
166 Punctuation, Text)), |
|
167 |
|
168 # Multi Line FSM. |
|
169 (r'(%%%%|%%){', Punctuation, 'multi-line-fsm'), |
|
170 ], |
|
171 'multi-line-fsm': [ |
|
172 (r'(' + r'|'.join(( # keep ragel code in largest possible chunks. |
|
173 r'(' + r'|'.join(( |
|
174 r'[^}\'"\[/#]', # exclude unsafe characters |
|
175 r'}(?=[^%]|$)', # } is okay as long as it's not followed by % |
|
176 r'}%(?=[^%]|$)', # ...well, one %'s okay, just not two... |
|
177 r'[^\\][\\][{}]', # ...and } is okay if it's escaped |
|
178 |
|
179 # allow / if it's preceded with one of these symbols |
|
180 # (ragel EOF actions) |
|
181 r'(>|\$|%|<|@|<>)/', |
|
182 |
|
183 # specifically allow regex followed immediately by * |
|
184 # so it doesn't get mistaken for a comment |
|
185 r'/(?!\*)(\\\\|\\/|[^/])*/\*', |
|
186 |
|
187 # allow / as long as it's not followed by another / or by a * |
|
188 r'/(?=[^/\*]|$)', |
|
189 |
|
190 # We want to match as many of these as we can in one block. |
|
191 # Not sure if we need the + sign here, |
|
192 # does it help performance? |
|
193 )) + r')+', |
|
194 |
|
195 # strings and comments may safely contain unsafe characters |
|
196 r'"(\\\\|\\"|[^"])*"', # double quote string |
|
197 r"'(\\\\|\\'|[^'])*'", # single quote string |
|
198 r"\[(\\\\|\\\]|[^\]])*\]", # square bracket literal |
|
199 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment |
|
200 r'//.*$\n?', # single line comment |
|
201 r'\#.*$\n?', # ruby/ragel comment |
|
202 )) + r')+', using(RagelLexer)), |
|
203 |
|
204 (r'}%%', Punctuation, '#pop'), |
|
205 ] |
|
206 } |
|
207 |
|
208 def analyse_text(text): |
|
209 return '@LANG: indep' in text or 0.1 |
|
210 |
|
211 |
|
212 class RagelRubyLexer(DelegatingLexer): |
|
213 """ |
|
214 A lexer for `Ragel`_ in a Ruby host file. |
|
215 |
|
216 *New in Pygments 1.1.* |
|
217 """ |
|
218 |
|
219 name = 'Ragel in Ruby Host' |
|
220 aliases = ['ragel-ruby', 'ragel-rb'] |
|
221 filenames = ['*.rl'] |
|
222 |
|
223 def __init__(self, **options): |
|
224 super(RagelRubyLexer, self).__init__(RubyLexer, RagelEmbeddedLexer, |
|
225 **options) |
|
226 |
|
227 def analyse_text(text): |
|
228 return '@LANG: ruby' in text |
|
229 |
|
230 |
|
231 class RagelCLexer(DelegatingLexer): |
|
232 """ |
|
233 A lexer for `Ragel`_ in a C host file. |
|
234 |
|
235 *New in Pygments 1.1.* |
|
236 """ |
|
237 |
|
238 name = 'Ragel in C Host' |
|
239 aliases = ['ragel-c'] |
|
240 filenames = ['*.rl'] |
|
241 |
|
242 def __init__(self, **options): |
|
243 super(RagelCLexer, self).__init__(CLexer, RagelEmbeddedLexer, |
|
244 **options) |
|
245 |
|
246 def analyse_text(text): |
|
247 return '@LANG: c' in text |
|
248 |
|
249 |
|
250 class RagelDLexer(DelegatingLexer): |
|
251 """ |
|
252 A lexer for `Ragel`_ in a D host file. |
|
253 |
|
254 *New in Pygments 1.1.* |
|
255 """ |
|
256 |
|
257 name = 'Ragel in D Host' |
|
258 aliases = ['ragel-d'] |
|
259 filenames = ['*.rl'] |
|
260 |
|
261 def __init__(self, **options): |
|
262 super(RagelDLexer, self).__init__(DLexer, RagelEmbeddedLexer, **options) |
|
263 |
|
264 def analyse_text(text): |
|
265 return '@LANG: d' in text |
|
266 |
|
267 |
|
268 class RagelCppLexer(DelegatingLexer): |
|
269 """ |
|
270 A lexer for `Ragel`_ in a CPP host file. |
|
271 |
|
272 *New in Pygments 1.1.* |
|
273 """ |
|
274 |
|
275 name = 'Ragel in CPP Host' |
|
276 aliases = ['ragel-cpp'] |
|
277 filenames = ['*.rl'] |
|
278 |
|
279 def __init__(self, **options): |
|
280 super(RagelCppLexer, self).__init__(CppLexer, RagelEmbeddedLexer, **options) |
|
281 |
|
282 def analyse_text(text): |
|
283 return '@LANG: c++' in text |
|
284 |
|
285 |
|
286 class RagelObjectiveCLexer(DelegatingLexer): |
|
287 """ |
|
288 A lexer for `Ragel`_ in an Objective C host file. |
|
289 |
|
290 *New in Pygments 1.1.* |
|
291 """ |
|
292 |
|
293 name = 'Ragel in Objective C Host' |
|
294 aliases = ['ragel-objc'] |
|
295 filenames = ['*.rl'] |
|
296 |
|
297 def __init__(self, **options): |
|
298 super(RagelObjectiveCLexer, self).__init__(ObjectiveCLexer, |
|
299 RagelEmbeddedLexer, |
|
300 **options) |
|
301 |
|
302 def analyse_text(text): |
|
303 return '@LANG: objc' in text |
|
304 |
|
305 |
|
306 class RagelJavaLexer(DelegatingLexer): |
|
307 """ |
|
308 A lexer for `Ragel`_ in a Java host file. |
|
309 |
|
310 *New in Pygments 1.1.* |
|
311 """ |
|
312 |
|
313 name = 'Ragel in Java Host' |
|
314 aliases = ['ragel-java'] |
|
315 filenames = ['*.rl'] |
|
316 |
|
317 def __init__(self, **options): |
|
318 super(RagelJavaLexer, self).__init__(JavaLexer, RagelEmbeddedLexer, |
|
319 **options) |
|
320 |
|
321 def analyse_text(text): |
|
322 return '@LANG: java' in text |
|
323 |
|
324 |
|
325 class AntlrLexer(RegexLexer): |
|
326 """ |
|
327 Generic `ANTLR`_ Lexer. |
|
328 Should not be called directly, instead |
|
329 use DelegatingLexer for your target language. |
|
330 |
|
331 *New in Pygments 1.1.* |
|
332 |
|
333 .. _ANTLR: http://www.antlr.org/ |
|
334 """ |
|
335 |
|
336 name = 'ANTLR' |
|
337 aliases = ['antlr'] |
|
338 filenames = [] |
|
339 |
|
340 _id = r'[A-Za-z][A-Za-z_0-9]*' |
|
341 _TOKEN_REF = r'[A-Z][A-Za-z_0-9]*' |
|
342 _RULE_REF = r'[a-z][A-Za-z_0-9]*' |
|
343 _STRING_LITERAL = r'\'(?:\\\\|\\\'|[^\']*)\'' |
|
344 _INT = r'[0-9]+' |
|
345 |
|
346 tokens = { |
|
347 'whitespace': [ |
|
348 (r'\s+', Whitespace), |
|
349 ], |
|
350 'comments': [ |
|
351 (r'//.*$', Comment), |
|
352 (r'/\*(.|\n)*?\*/', Comment), |
|
353 ], |
|
354 'root': [ |
|
355 include('whitespace'), |
|
356 include('comments'), |
|
357 |
|
358 (r'(lexer|parser|tree)?(\s*)(grammar\b)(\s*)(' + _id + ')(;)', |
|
359 bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Class, |
|
360 Punctuation)), |
|
361 # optionsSpec |
|
362 (r'options\b', Keyword, 'options'), |
|
363 # tokensSpec |
|
364 (r'tokens\b', Keyword, 'tokens'), |
|
365 # attrScope |
|
366 (r'(scope)(\s*)(' + _id + ')(\s*)({)', |
|
367 bygroups(Keyword, Whitespace, Name.Variable, Whitespace, |
|
368 Punctuation), 'action'), |
|
369 # exception |
|
370 (r'(catch|finally)\b', Keyword, 'exception'), |
|
371 # action |
|
372 (r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)({)', |
|
373 bygroups(Name.Label, Whitespace, Punctuation, Whitespace, |
|
374 Name.Label, Whitespace, Punctuation), 'action'), |
|
375 # rule |
|
376 (r'((?:protected|private|public|fragment)\b)?(\s*)(' + _id + ')(!)?', \ |
|
377 bygroups(Keyword, Whitespace, Name.Label, Punctuation), |
|
378 ('rule-alts', 'rule-prelims')), |
|
379 ], |
|
380 'exception': [ |
|
381 (r'\n', Whitespace, '#pop'), |
|
382 (r'\s', Whitespace), |
|
383 include('comments'), |
|
384 |
|
385 (r'\[', Punctuation, 'nested-arg-action'), |
|
386 (r'\{', Punctuation, 'action'), |
|
387 ], |
|
388 'rule-prelims': [ |
|
389 include('whitespace'), |
|
390 include('comments'), |
|
391 |
|
392 (r'returns\b', Keyword), |
|
393 (r'\[', Punctuation, 'nested-arg-action'), |
|
394 (r'\{', Punctuation, 'action'), |
|
395 # throwsSpec |
|
396 (r'(throws)(\s+)(' + _id + ')', |
|
397 bygroups(Keyword, Whitespace, Name.Label)), |
|
398 (r'(?:(,)(\s*)(' + _id + '))+', |
|
399 bygroups(Punctuation, Whitespace, Name.Label)), # Additional throws |
|
400 # optionsSpec |
|
401 (r'options\b', Keyword, 'options'), |
|
402 # ruleScopeSpec - scope followed by target language code or name of action |
|
403 # TODO finish implementing other possibilities for scope |
|
404 # L173 ANTLRv3.g from ANTLR book |
|
405 (r'(scope)(\s+)({)', bygroups(Keyword, Whitespace, Punctuation), |
|
406 'action'), |
|
407 (r'(scope)(\s+)(' + _id + ')(\s*)(;)', |
|
408 bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)), |
|
409 # ruleAction |
|
410 (r'(@' + _id + ')(\s*)({)', |
|
411 bygroups(Name.Label, Whitespace, Punctuation), 'action'), |
|
412 # finished prelims, go to rule alts! |
|
413 (r':', Punctuation, '#pop') |
|
414 ], |
|
415 'rule-alts': [ |
|
416 include('whitespace'), |
|
417 include('comments'), |
|
418 |
|
419 # These might need to go in a separate 'block' state triggered by ( |
|
420 (r'options\b', Keyword, 'options'), |
|
421 (r':', Punctuation), |
|
422 |
|
423 # literals |
|
424 (r"'(\\\\|\\'|[^'])*'", String), |
|
425 (r'"(\\\\|\\"|[^"])*"', String), |
|
426 (r'<<([^>]|>[^>])>>', String), |
|
427 # identifiers |
|
428 # Tokens start with capital letter. |
|
429 (r'\$?[A-Z_][A-Za-z_0-9]*', Name.Constant), |
|
430 # Rules start with small letter. |
|
431 (r'\$?[a-z_][A-Za-z_0-9]*', Name.Variable), |
|
432 # operators |
|
433 (r'(\+|\||->|=>|=|\(|\)|\.\.|\.|\?|\*|\^|!|\#|~)', Operator), |
|
434 (r',', Punctuation), |
|
435 (r'\[', Punctuation, 'nested-arg-action'), |
|
436 (r'\{', Punctuation, 'action'), |
|
437 (r';', Punctuation, '#pop') |
|
438 ], |
|
439 'tokens': [ |
|
440 include('whitespace'), |
|
441 include('comments'), |
|
442 (r'{', Punctuation), |
|
443 (r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL |
|
444 + ')?(\s*)(;)', |
|
445 bygroups(Name.Label, Whitespace, Punctuation, Whitespace, |
|
446 String, Whitespace, Punctuation)), |
|
447 (r'}', Punctuation, '#pop'), |
|
448 ], |
|
449 'options': [ |
|
450 include('whitespace'), |
|
451 include('comments'), |
|
452 (r'{', Punctuation), |
|
453 (r'(' + _id + r')(\s*)(=)(\s*)(' + |
|
454 '|'.join((_id, _STRING_LITERAL, _INT, '\*'))+ ')(\s*)(;)', |
|
455 bygroups(Name.Variable, Whitespace, Punctuation, Whitespace, |
|
456 Text, Whitespace, Punctuation)), |
|
457 (r'}', Punctuation, '#pop'), |
|
458 ], |
|
459 'action': [ |
|
460 (r'(' + r'|'.join(( # keep host code in largest possible chunks |
|
461 r'[^\${}\'"/\\]+', # exclude unsafe characters |
|
462 |
|
463 # strings and comments may safely contain unsafe characters |
|
464 r'"(\\\\|\\"|[^"])*"', # double quote string |
|
465 r"'(\\\\|\\'|[^'])*'", # single quote string |
|
466 r'//.*$\n?', # single line comment |
|
467 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment |
|
468 |
|
469 # regular expression: There's no reason for it to start |
|
470 # with a * and this stops confusion with comments. |
|
471 r'/(?!\*)(\\\\|\\/|[^/])*/', |
|
472 |
|
473 # backslashes are okay, as long as we are not backslashing a % |
|
474 r'\\(?!%)', |
|
475 |
|
476 # Now that we've handled regex and javadoc comments |
|
477 # it's safe to let / through. |
|
478 r'/', |
|
479 )) + r')+', Other), |
|
480 (r'(\\)(%)', bygroups(Punctuation, Other)), |
|
481 (r'(\$[a-zA-Z]+)(\.?)(text|value)?', |
|
482 bygroups(Name.Variable, Punctuation, Name.Property)), |
|
483 (r'{', Punctuation, '#push'), |
|
484 (r'}', Punctuation, '#pop'), |
|
485 ], |
|
486 'nested-arg-action': [ |
|
487 (r'(' + r'|'.join(( # keep host code in largest possible chunks. |
|
488 r'[^\$\[\]\'"/]+', # exclude unsafe characters |
|
489 |
|
490 # strings and comments may safely contain unsafe characters |
|
491 r'"(\\\\|\\"|[^"])*"', # double quote string |
|
492 r"'(\\\\|\\'|[^'])*'", # single quote string |
|
493 r'//.*$\n?', # single line comment |
|
494 r'/\*(.|\n)*?\*/', # multi-line javadoc-style comment |
|
495 |
|
496 # regular expression: There's no reason for it to start |
|
497 # with a * and this stops confusion with comments. |
|
498 r'/(?!\*)(\\\\|\\/|[^/])*/', |
|
499 |
|
500 # Now that we've handled regex and javadoc comments |
|
501 # it's safe to let / through. |
|
502 r'/', |
|
503 )) + r')+', Other), |
|
504 |
|
505 |
|
506 (r'\[', Punctuation, '#push'), |
|
507 (r'\]', Punctuation, '#pop'), |
|
508 (r'(\$[a-zA-Z]+)(\.?)(text|value)?', |
|
509 bygroups(Name.Variable, Punctuation, Name.Property)), |
|
510 (r'(\\\\|\\\]|\\\[|[^\[\]])+', Other), |
|
511 ] |
|
512 } |
|
513 |
|
514 def analyse_text(text): |
|
515 return re.search(r'^\s*grammar\s+[a-zA-Z0-9]+\s*;', text, re.M) |
|
516 |
|
517 # http://www.antlr.org/wiki/display/ANTLR3/Code+Generation+Targets |
|
518 |
|
519 # TH: I'm not aware of any language features of C++ that will cause |
|
520 # incorrect lexing of C files. Antlr doesn't appear to make a distinction, |
|
521 # so just assume they're C++. No idea how to make Objective C work in the |
|
522 # future. |
|
523 |
|
524 #class AntlrCLexer(DelegatingLexer): |
|
525 # """ |
|
526 # ANTLR with C Target |
|
527 # |
|
528 # *New in Pygments 1.1* |
|
529 # """ |
|
530 # |
|
531 # name = 'ANTLR With C Target' |
|
532 # aliases = ['antlr-c'] |
|
533 # filenames = ['*.G', '*.g'] |
|
534 # |
|
535 # def __init__(self, **options): |
|
536 # super(AntlrCLexer, self).__init__(CLexer, AntlrLexer, **options) |
|
537 # |
|
538 # def analyse_text(text): |
|
539 # return re.match(r'^\s*language\s*=\s*C\s*;', text) |
|
540 |
|
541 class AntlrCppLexer(DelegatingLexer): |
|
542 """ |
|
543 `ANTLR`_ with CPP Target |
|
544 |
|
545 *New in Pygments 1.1.* |
|
546 """ |
|
547 |
|
548 name = 'ANTLR With CPP Target' |
|
549 aliases = ['antlr-cpp'] |
|
550 filenames = ['*.G', '*.g'] |
|
551 |
|
552 def __init__(self, **options): |
|
553 super(AntlrCppLexer, self).__init__(CppLexer, AntlrLexer, **options) |
|
554 |
|
555 def analyse_text(text): |
|
556 return AntlrLexer.analyse_text(text) and \ |
|
557 re.search(r'^\s*language\s*=\s*C\s*;', text, re.M) |
|
558 |
|
559 |
|
560 class AntlrObjectiveCLexer(DelegatingLexer): |
|
561 """ |
|
562 `ANTLR`_ with Objective-C Target |
|
563 |
|
564 *New in Pygments 1.1.* |
|
565 """ |
|
566 |
|
567 name = 'ANTLR With ObjectiveC Target' |
|
568 aliases = ['antlr-objc'] |
|
569 filenames = ['*.G', '*.g'] |
|
570 |
|
571 def __init__(self, **options): |
|
572 super(AntlrObjectiveCLexer, self).__init__(ObjectiveCLexer, |
|
573 AntlrLexer, **options) |
|
574 |
|
575 def analyse_text(text): |
|
576 return AntlrLexer.analyse_text(text) and \ |
|
577 re.search(r'^\s*language\s*=\s*ObjC\s*;', text) |
|
578 |
|
579 |
|
580 class AntlrCSharpLexer(DelegatingLexer): |
|
581 """ |
|
582 `ANTLR`_ with C# Target |
|
583 |
|
584 *New in Pygments 1.1.* |
|
585 """ |
|
586 |
|
587 name = 'ANTLR With C# Target' |
|
588 aliases = ['antlr-csharp', 'antlr-c#'] |
|
589 filenames = ['*.G', '*.g'] |
|
590 |
|
591 def __init__(self, **options): |
|
592 super(AntlrCSharpLexer, self).__init__(CSharpLexer, AntlrLexer, |
|
593 **options) |
|
594 |
|
595 def analyse_text(text): |
|
596 return AntlrLexer.analyse_text(text) and \ |
|
597 re.search(r'^\s*language\s*=\s*CSharp2\s*;', text, re.M) |
|
598 |
|
599 |
|
600 class AntlrPythonLexer(DelegatingLexer): |
|
601 """ |
|
602 `ANTLR`_ with Python Target |
|
603 |
|
604 *New in Pygments 1.1.* |
|
605 """ |
|
606 |
|
607 name = 'ANTLR With Python Target' |
|
608 aliases = ['antlr-python'] |
|
609 filenames = ['*.G', '*.g'] |
|
610 |
|
611 def __init__(self, **options): |
|
612 super(AntlrPythonLexer, self).__init__(PythonLexer, AntlrLexer, |
|
613 **options) |
|
614 |
|
615 def analyse_text(text): |
|
616 return AntlrLexer.analyse_text(text) and \ |
|
617 re.search(r'^\s*language\s*=\s*Python\s*;', text, re.M) |
|
618 |
|
619 |
|
620 class AntlrJavaLexer(DelegatingLexer): |
|
621 """ |
|
622 `ANTLR`_ with Java Target |
|
623 |
|
624 *New in Pygments 1.1* |
|
625 """ |
|
626 |
|
627 name = 'ANTLR With Java Target' |
|
628 aliases = ['antlr-java'] |
|
629 filenames = ['*.G', '*.g'] |
|
630 |
|
631 def __init__(self, **options): |
|
632 super(AntlrJavaLexer, self).__init__(JavaLexer, AntlrLexer, |
|
633 **options) |
|
634 |
|
635 def analyse_text(text): |
|
636 # Antlr language is Java by default |
|
637 return AntlrLexer.analyse_text(text) and 0.9 |
|
638 |
|
639 |
|
640 class AntlrRubyLexer(DelegatingLexer): |
|
641 """ |
|
642 `ANTLR`_ with Ruby Target |
|
643 |
|
644 *New in Pygments 1.1.* |
|
645 """ |
|
646 |
|
647 name = 'ANTLR With Ruby Target' |
|
648 aliases = ['antlr-ruby', 'antlr-rb'] |
|
649 filenames = ['*.G', '*.g'] |
|
650 |
|
651 def __init__(self, **options): |
|
652 super(AntlrRubyLexer, self).__init__(RubyLexer, AntlrLexer, |
|
653 **options) |
|
654 |
|
655 def analyse_text(text): |
|
656 return AntlrLexer.analyse_text(text) and \ |
|
657 re.search(r'^\s*language\s*=\s*Ruby\s*;', text, re.M) |
|
658 |
|
659 |
|
660 class AntlrPerlLexer(DelegatingLexer): |
|
661 """ |
|
662 `ANTLR`_ with Perl Target |
|
663 |
|
664 *New in Pygments 1.1.* |
|
665 """ |
|
666 |
|
667 name = 'ANTLR With Perl Target' |
|
668 aliases = ['antlr-perl'] |
|
669 filenames = ['*.G', '*.g'] |
|
670 |
|
671 def __init__(self, **options): |
|
672 super(AntlrPerlLexer, self).__init__(PerlLexer, AntlrLexer, |
|
673 **options) |
|
674 |
|
675 def analyse_text(text): |
|
676 return AntlrLexer.analyse_text(text) and \ |
|
677 re.search(r'^\s*language\s*=\s*Perl5\s*;', text, re.M) |
|
678 |
|
679 |
|
680 class AntlrActionScriptLexer(DelegatingLexer): |
|
681 """ |
|
682 `ANTLR`_ with ActionScript Target |
|
683 |
|
684 *New in Pygments 1.1.* |
|
685 """ |
|
686 |
|
687 name = 'ANTLR With ActionScript Target' |
|
688 aliases = ['antlr-as', 'antlr-actionscript'] |
|
689 filenames = ['*.G', '*.g'] |
|
690 |
|
691 def __init__(self, **options): |
|
692 super(AntlrActionScriptLexer, self).__init__(ActionScriptLexer, |
|
693 AntlrLexer, **options) |
|
694 |
|
695 def analyse_text(text): |
|
696 return AntlrLexer.analyse_text(text) and \ |
|
697 re.search(r'^\s*language\s*=\s*ActionScript\s*;', text, re.M) |