3 pygments.lexers.robotframework |
3 pygments.lexers.robotframework |
4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
5 |
5 |
6 Lexer for Robot Framework. |
6 Lexer for Robot Framework. |
7 |
7 |
8 :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. |
8 :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. |
9 :license: BSD, see LICENSE for details. |
9 :license: BSD, see LICENSE for details. |
10 """ |
10 """ |
11 |
11 |
12 # Copyright 2012 Nokia Siemens Networks Oyj |
12 # Copyright 2012 Nokia Siemens Networks Oyj |
13 # |
13 # |
96 |
96 |
97 def _tokenize(self, var, string, orig_token): |
97 def _tokenize(self, var, string, orig_token): |
98 before = string[:var.start] |
98 before = string[:var.start] |
99 yield before, orig_token |
99 yield before, orig_token |
100 yield var.identifier + '{', SYNTAX |
100 yield var.identifier + '{', SYNTAX |
101 for value, token in self.tokenize(var.base, VARIABLE): |
101 yield from self.tokenize(var.base, VARIABLE) |
102 yield value, token |
|
103 yield '}', SYNTAX |
102 yield '}', SYNTAX |
104 if var.index: |
103 if var.index: |
105 yield '[', SYNTAX |
104 yield '[', SYNTAX |
106 for value, token in self.tokenize(var.index, VARIABLE): |
105 yield from self.tokenize(var.index, VARIABLE) |
107 yield value, token |
|
108 yield ']', SYNTAX |
106 yield ']', SYNTAX |
109 for value, token in self.tokenize(string[var.end:], orig_token): |
107 yield from self.tokenize(string[var.end:], orig_token) |
110 yield value, token |
|
111 |
108 |
112 |
109 |
113 class RowTokenizer: |
110 class RowTokenizer: |
114 |
111 |
115 def __init__(self): |
112 def __init__(self): |
121 keywords = KeywordTable() |
118 keywords = KeywordTable() |
122 self._tables = {'settings': settings, 'setting': settings, |
119 self._tables = {'settings': settings, 'setting': settings, |
123 'metadata': settings, |
120 'metadata': settings, |
124 'variables': variables, 'variable': variables, |
121 'variables': variables, 'variable': variables, |
125 'testcases': testcases, 'testcase': testcases, |
122 'testcases': testcases, 'testcase': testcases, |
|
123 'tasks': testcases, 'task': testcases, |
126 'keywords': keywords, 'keyword': keywords, |
124 'keywords': keywords, 'keyword': keywords, |
127 'userkeywords': keywords, 'userkeyword': keywords} |
125 'userkeywords': keywords, 'userkeyword': keywords} |
128 |
126 |
129 def tokenize(self, row): |
127 def tokenize(self, row): |
130 commented = False |
128 commented = False |
135 if value.startswith('#'): |
133 if value.startswith('#'): |
136 commented = True |
134 commented = True |
137 elif index == 0 and value.startswith('*'): |
135 elif index == 0 and value.startswith('*'): |
138 self._table = self._start_table(value) |
136 self._table = self._start_table(value) |
139 heading = True |
137 heading = True |
140 for value, token in self._tokenize(value, index, commented, |
138 yield from self._tokenize(value, index, commented, |
141 separator, heading): |
139 separator, heading) |
142 yield value, token |
|
143 self._table.end_row() |
140 self._table.end_row() |
144 |
141 |
145 def _start_table(self, header): |
142 def _start_table(self, header): |
146 name = normalize(header, remove='*') |
143 name = normalize(header, remove='*') |
147 return self._tables.get(name, UnknownTable()) |
144 return self._tables.get(name, UnknownTable()) |
152 elif separator: |
149 elif separator: |
153 yield value, SEPARATOR |
150 yield value, SEPARATOR |
154 elif heading: |
151 elif heading: |
155 yield value, HEADING |
152 yield value, HEADING |
156 else: |
153 else: |
157 for value, token in self._table.tokenize(value, index): |
154 yield from self._table.tokenize(value, index) |
158 yield value, token |
|
159 |
155 |
160 |
156 |
161 class RowSplitter: |
157 class RowSplitter: |
162 _space_splitter = re.compile('( {2,})') |
158 _space_splitter = re.compile('( {2,})') |
163 _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))') |
159 _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))') |
164 |
160 |
165 def split(self, row): |
161 def split(self, row): |
166 splitter = (row.startswith('| ') and self._split_from_pipes |
162 splitter = (row.startswith('| ') and self._split_from_pipes |
167 or self._split_from_spaces) |
163 or self._split_from_spaces) |
168 for value in splitter(row): |
164 yield from splitter(row) |
169 yield value |
|
170 yield '\n' |
165 yield '\n' |
171 |
166 |
172 def _split_from_spaces(self, row): |
167 def _split_from_spaces(self, row): |
173 yield '' # Start with (pseudo)separator similarly as with pipes |
168 yield '' # Start with (pseudo)separator similarly as with pipes |
174 for value in self._space_splitter.split(row): |
169 yield from self._space_splitter.split(row) |
175 yield value |
|
176 |
170 |
177 def _split_from_pipes(self, row): |
171 def _split_from_pipes(self, row): |
178 _, separator, rest = self._pipe_splitter.split(row, 1) |
172 _, separator, rest = self._pipe_splitter.split(row, 1) |
179 yield separator |
173 yield separator |
180 while self._pipe_splitter.search(rest): |
174 while self._pipe_splitter.search(rest): |
213 |
207 |
214 |
208 |
215 class Setting(Tokenizer): |
209 class Setting(Tokenizer): |
216 _tokens = (SETTING, ARGUMENT) |
210 _tokens = (SETTING, ARGUMENT) |
217 _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown', |
211 _keyword_settings = ('suitesetup', 'suiteprecondition', 'suiteteardown', |
218 'suitepostcondition', 'testsetup', 'testprecondition', |
212 'suitepostcondition', 'testsetup', 'tasksetup', 'testprecondition', |
219 'testteardown', 'testpostcondition', 'testtemplate') |
213 'testteardown','taskteardown', 'testpostcondition', 'testtemplate', 'tasktemplate') |
220 _import_settings = ('library', 'resource', 'variables') |
214 _import_settings = ('library', 'resource', 'variables') |
221 _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags', |
215 _other_settings = ('documentation', 'metadata', 'forcetags', 'defaulttags', |
222 'testtimeout') |
216 'testtimeout','tasktimeout') |
223 _custom_tokenizer = None |
217 _custom_tokenizer = None |
224 |
218 |
225 def __init__(self, template_setter=None): |
219 def __init__(self, template_setter=None): |
226 Tokenizer.__init__(self) |
220 Tokenizer.__init__(self) |
227 self._template_setter = template_setter |
221 self._template_setter = template_setter |
330 def tokenize(self, value, index): |
324 def tokenize(self, value, index): |
331 if self._continues(value, index): |
325 if self._continues(value, index): |
332 self._tokenizer = self._prev_tokenizer |
326 self._tokenizer = self._prev_tokenizer |
333 yield value, SYNTAX |
327 yield value, SYNTAX |
334 else: |
328 else: |
335 for value_and_token in self._tokenize(value, index): |
329 yield from self._tokenize(value, index) |
336 yield value_and_token |
|
337 self._prev_values_on_row.append(value) |
330 self._prev_values_on_row.append(value) |
338 |
331 |
339 def _continues(self, value, index): |
332 def _continues(self, value, index): |
340 return value == '...' and all(self._is_empty(t) |
333 return value == '...' and all(self._is_empty(t) |
341 for t in self._prev_values_on_row) |
334 for t in self._prev_values_on_row) |