1 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 |
1 # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 |
2 # For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt |
2 # For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt |
3 |
3 |
4 """A simple Python template renderer, for a nano-subset of Django syntax. |
4 """A simple Python template renderer, for a nano-subset of Django syntax. |
5 |
5 |
6 For a detailed discussion of this code, see this chapter from 500 Lines: |
6 For a detailed discussion of this code, see this chapter from 500 Lines: |
7 http://aosabook.org/en/500L/a-template-engine.html |
7 http://aosabook.org/en/500L/a-template-engine.html |
88 |
88 |
89 Comments are within curly-hash markers:: |
89 Comments are within curly-hash markers:: |
90 |
90 |
91 {# This will be ignored #} |
91 {# This will be ignored #} |
92 |
92 |
93 Any of these constructs can have a hypen at the end (`-}}`, `-%}`, `-#}`), |
93 Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped |
|
94 and joined. Be careful, this could join words together! |
|
95 |
|
96 Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`), |
94 which will collapse the whitespace following the tag. |
97 which will collapse the whitespace following the tag. |
95 |
98 |
96 Construct a Templite with the template text, then use `render` against a |
99 Construct a Templite with the template text, then use `render` against a |
97 dictionary context to create a finished string:: |
100 dictionary context to create a finished string:: |
98 |
101 |
152 ops_stack = [] |
155 ops_stack = [] |
153 |
156 |
154 # Split the text to form a list of tokens. |
157 # Split the text to form a list of tokens. |
155 tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) |
158 tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) |
156 |
159 |
157 squash = False |
160 squash = in_joined = False |
158 |
161 |
159 for token in tokens: |
162 for token in tokens: |
160 if token.startswith('{'): |
163 if token.startswith('{'): |
161 start, end = 2, -2 |
164 start, end = 2, -2 |
162 squash = (token[-3] == '-') |
165 squash = (token[-3] == '-') |
194 words[1], |
197 words[1], |
195 self._expr_code(words[3]) |
198 self._expr_code(words[3]) |
196 ) |
199 ) |
197 ) |
200 ) |
198 code.indent() |
201 code.indent() |
|
202 elif words[0] == 'joined': |
|
203 ops_stack.append('joined') |
|
204 in_joined = True |
199 elif words[0].startswith('end'): |
205 elif words[0].startswith('end'): |
200 # Endsomething. Pop the ops stack. |
206 # Endsomething. Pop the ops stack. |
201 if len(words) != 1: |
207 if len(words) != 1: |
202 self._syntax_error("Don't understand end", token) |
208 self._syntax_error("Don't understand end", token) |
203 end_what = words[0][3:] |
209 end_what = words[0][3:] |
204 if not ops_stack: |
210 if not ops_stack: |
205 self._syntax_error("Too many ends", token) |
211 self._syntax_error("Too many ends", token) |
206 start_what = ops_stack.pop() |
212 start_what = ops_stack.pop() |
207 if start_what != end_what: |
213 if start_what != end_what: |
208 self._syntax_error("Mismatched end tag", end_what) |
214 self._syntax_error("Mismatched end tag", end_what) |
209 code.dedent() |
215 if end_what == 'joined': |
|
216 in_joined = False |
|
217 else: |
|
218 code.dedent() |
210 else: |
219 else: |
211 self._syntax_error("Don't understand tag", words[0]) |
220 self._syntax_error("Don't understand tag", words[0]) |
212 else: |
221 else: |
213 # Literal content. If it isn't empty, output it. |
222 # Literal content. If it isn't empty, output it. |
214 if squash: |
223 if in_joined: |
|
224 token = re.sub(r"\s*\n\s*", "", token.strip()) |
|
225 elif squash: |
215 token = token.lstrip() |
226 token = token.lstrip() |
216 if token: |
227 if token: |
217 buffered.append(repr(token)) |
228 buffered.append(repr(token)) |
218 |
229 |
219 if ops_stack: |
230 if ops_stack: |