ThirdParty/Pygments/pygments/lexers/sql.py

changeset 2426
da76c71624de
parent 1705
b0fbc9300f2b
child 2525
8b507a9a2d40
equal deleted inserted replaced
2425:ace8a08028f3 2426:da76c71624de
32 - handles psql backslash commands. 32 - handles psql backslash commands.
33 33
34 The ``tests/examplefiles`` contains a few test files with data to be 34 The ``tests/examplefiles`` contains a few test files with data to be
35 parsed by these lexers. 35 parsed by these lexers.
36 36
37 :copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS. 37 :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
38 :license: BSD, see LICENSE for details. 38 :license: BSD, see LICENSE for details.
39 """ 39 """
40 40
41 import re 41 import re
42 from copy import deepcopy
43 42
44 from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups 43 from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups
45 from pygments.token import Punctuation, \ 44 from pygments.token import Punctuation, \
46 Text, Comment, Operator, Keyword, Name, String, Number, Generic 45 Text, Comment, Operator, Keyword, Name, String, Number, Generic
47 from pygments.lexers import get_lexer_by_name, ClassNotFound 46 from pygments.lexers import get_lexer_by_name, ClassNotFound
59 58
60 def language_callback(lexer, match): 59 def language_callback(lexer, match):
61 """Parse the content of a $-string using a lexer 60 """Parse the content of a $-string using a lexer
62 61
63 The lexer is chosen looking for a nearby LANGUAGE. 62 The lexer is chosen looking for a nearby LANGUAGE.
64
65 Note: this function should have been a `PostgresBase` method, but the
66 rules deepcopy fails in this case.
67 """ 63 """
68 l = None 64 l = None
69 m = language_re.match(lexer.text[match.end():match.end()+100]) 65 m = language_re.match(lexer.text[match.end():match.end()+100])
70 if m is not None: 66 if m is not None:
71 l = lexer._get_lexer(m.group(1)) 67 l = lexer._get_lexer(m.group(1))
91 This is implemented as a mixin to avoid the Lexer metaclass kicking in. 87 This is implemented as a mixin to avoid the Lexer metaclass kicking in.
92 this way the different lexer don't have a common Lexer ancestor. If they 88 this way the different lexer don't have a common Lexer ancestor. If they
93 had, _tokens could be created on this ancestor and not updated for the 89 had, _tokens could be created on this ancestor and not updated for the
94 other classes, resulting e.g. in PL/pgSQL parsed as SQL. This shortcoming 90 other classes, resulting e.g. in PL/pgSQL parsed as SQL. This shortcoming
95 seem to suggest that regexp lexers are not really subclassable. 91 seem to suggest that regexp lexers are not really subclassable.
96
97 `language_callback` should really be our method, but this breaks deepcopy.
98 """ 92 """
99 def get_tokens_unprocessed(self, text, *args): 93 def get_tokens_unprocessed(self, text, *args):
100 # Have a copy of the entire text to be used by `language_callback`. 94 # Have a copy of the entire text to be used by `language_callback`.
101 self.text = text 95 self.text = text
102 for x in super(PostgresBase, self).get_tokens_unprocessed( 96 for x in super(PostgresBase, self).get_tokens_unprocessed(
180 name = 'PL/pgSQL' 174 name = 'PL/pgSQL'
181 aliases = ['plpgsql'] 175 aliases = ['plpgsql']
182 mimetypes = ['text/x-plpgsql'] 176 mimetypes = ['text/x-plpgsql']
183 177
184 flags = re.IGNORECASE 178 flags = re.IGNORECASE
185 tokens = deepcopy(PostgresLexer.tokens) 179 tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.items())
186 180
187 # extend the keywords list 181 # extend the keywords list
188 for i, pattern in enumerate(tokens['root']): 182 for i, pattern in enumerate(tokens['root']):
189 if pattern[1] == Keyword: 183 if pattern[1] == Keyword:
190 tokens['root'][i] = ( 184 tokens['root'][i] = (
214 208
215 name = 'PostgreSQL console - regexp based lexer' 209 name = 'PostgreSQL console - regexp based lexer'
216 aliases = [] # not public 210 aliases = [] # not public
217 211
218 flags = re.IGNORECASE 212 flags = re.IGNORECASE
219 tokens = deepcopy(PostgresLexer.tokens) 213 tokens = dict((k, l[:]) for (k, l) in PostgresLexer.tokens.items())
220 214
221 tokens['root'].append( 215 tokens['root'].append(
222 (r'\\[^\s]+', Keyword.Pseudo, 'psql-command')) 216 (r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
223 tokens['psql-command'] = [ 217 tokens['psql-command'] = [
224 (r'\n', Text, 'root'), 218 (r'\n', Text, 'root'),

eric ide

mercurial