eric6/ThirdParty/Pygments/pygments/formatters/latex.py

changeset 7983
54c5cfbb1e29
parent 7701
25f42e208e08
equal deleted inserted replaced
7982:48d210e41c65 7983:54c5cfbb1e29
3 pygments.formatters.latex 3 pygments.formatters.latex
4 ~~~~~~~~~~~~~~~~~~~~~~~~~ 4 ~~~~~~~~~~~~~~~~~~~~~~~~~
5 5
6 Formatter for LaTeX fancyvrb output. 6 Formatter for LaTeX fancyvrb output.
7 7
8 :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. 8 :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
9 :license: BSD, see LICENSE for details. 9 :license: BSD, see LICENSE for details.
10 """ 10 """
11 11
12 from io import StringIO 12 from io import StringIO
13 13
14 from pygments.formatter import Formatter 14 from pygments.formatter import Formatter
15 from pygments.lexer import Lexer 15 from pygments.lexer import Lexer, do_insertions
16 from pygments.token import Token, STANDARD_TYPES 16 from pygments.token import Token, STANDARD_TYPES
17 from pygments.util import get_bool_opt, get_int_opt 17 from pygments.util import get_bool_opt, get_int_opt
18 18
19 19
20 __all__ = ['LatexFormatter'] 20 __all__ = ['LatexFormatter']
444 self.right = right 444 self.right = right
445 self.lang = lang 445 self.lang = lang
446 Lexer.__init__(self, **options) 446 Lexer.__init__(self, **options)
447 447
448 def get_tokens_unprocessed(self, text): 448 def get_tokens_unprocessed(self, text):
449 # find and remove all the escape tokens (replace with an empty string)
450 # this is very similar to DelegatingLexer.get_tokens_unprocessed.
451 buffered = ''
452 insertions = []
453 insertion_buf = []
454 for i, t, v in self._find_safe_escape_tokens(text):
455 if t is None:
456 if insertion_buf:
457 insertions.append((len(buffered), insertion_buf))
458 insertion_buf = []
459 buffered += v
460 else:
461 insertion_buf.append((i, t, v))
462 if insertion_buf:
463 insertions.append((len(buffered), insertion_buf))
464 return do_insertions(insertions,
465 self.lang.get_tokens_unprocessed(buffered))
466
467 def _find_safe_escape_tokens(self, text):
468 """ find escape tokens that are not in strings or comments """
469 for i, t, v in self._filter_to(
470 self.lang.get_tokens_unprocessed(text),
471 lambda t: t in Token.Comment or t in Token.String
472 ):
473 if t is None:
474 for i2, t2, v2 in self._find_escape_tokens(v):
475 yield i + i2, t2, v2
476 else:
477 yield i, None, v
478
479 def _filter_to(self, it, pred):
480 """ Keep only the tokens that match `pred`, merge the others together """
449 buf = '' 481 buf = ''
450 idx = 0 482 idx = 0
451 for i, t, v in self.lang.get_tokens_unprocessed(text): 483 for i, t, v in it:
452 if t in Token.Comment or t in Token.String: 484 if pred(t):
453 if buf: 485 if buf:
454 yield from self.get_tokens_aux(idx, buf) 486 yield idx, None, buf
455 buf = '' 487 buf = ''
456 yield i, t, v 488 yield i, t, v
457 else: 489 else:
458 if not buf: 490 if not buf:
459 idx = i 491 idx = i
460 buf += v 492 buf += v
461 if buf: 493 if buf:
462 yield from self.get_tokens_aux(idx, buf) 494 yield idx, None, buf
463 495
464 def get_tokens_aux(self, index, text): 496 def _find_escape_tokens(self, text):
497 """ Find escape tokens within text, give token=None otherwise """
498 index = 0
465 while text: 499 while text:
466 a, sep1, text = text.partition(self.left) 500 a, sep1, text = text.partition(self.left)
467 if a: 501 if a:
468 for i, t, v in self.lang.get_tokens_unprocessed(a): 502 yield index, None, a
469 yield index + i, t, v 503 index += len(a)
470 index += len(a)
471 if sep1: 504 if sep1:
472 b, sep2, text = text.partition(self.right) 505 b, sep2, text = text.partition(self.right)
473 if sep2: 506 if sep2:
474 yield index + len(sep1), Token.Escape, b 507 yield index + len(sep1), Token.Escape, b
475 index += len(sep1) + len(b) + len(sep2) 508 index += len(sep1) + len(b) + len(sep2)

eric ide

mercurial