]>
jfr.im git - dlqueue.git/blob - venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py
7 :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
15 from pip
._vendor
.pygments
.filter import apply_filters
, Filter
16 from pip
._vendor
.pygments
.filters
import get_filter_by_name
17 from pip
._vendor
.pygments
.token
import Error
, Text
, Other
, Whitespace
, _TokenType
18 from pip
._vendor
.pygments
.util
import get_bool_opt
, get_int_opt
, get_list_opt
, \
19 make_analysator
, Future
, guess_decode
20 from pip
._vendor
.pygments
.regexopt
import regex_opt
22 __all__
= ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
23 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
24 'default', 'words', 'line_re']
26 line_re
= re
.compile('.*?\n')
28 _encoding_map
= [(b
'\xef\xbb\xbf', 'utf-8'),
29 (b
'\xff\xfe\0\0', 'utf-32'),
30 (b
'\0\0\xfe\xff', 'utf-32be'),
31 (b
'\xff\xfe', 'utf-16'),
32 (b
'\xfe\xff', 'utf-16be')]
34 _default_analyse
= staticmethod(lambda x
: 0.0)
37 class LexerMeta(type):
39 This metaclass automagically converts ``analyse_text`` methods into
40 static methods which always return float values.
43 def __new__(mcs
, name
, bases
, d
):
44 if 'analyse_text' in d
:
45 d
['analyse_text'] = make_analysator(d
['analyse_text'])
46 return type.__new
__(mcs
, name
, bases
, d
)
49 class Lexer(metaclass
=LexerMeta
):
51 Lexer for a specific language.
53 See also :doc:`lexerdevelopment`, a high-level guide to writing
56 Lexer classes have attributes used for choosing the most appropriate
57 lexer based on various criteria.
59 .. autoattribute:: name
61 .. autoattribute:: aliases
63 .. autoattribute:: filenames
65 .. autoattribute:: alias_filenames
66 .. autoattribute:: mimetypes
68 .. autoattribute:: priority
70 Lexers included in Pygments should have an additional attribute:
72 .. autoattribute:: url
75 You can pass options to the constructor. The basic options recognized
76 by all lexers and processed by the base `Lexer` class are:
79 Strip leading and trailing newlines from the input (default: True).
81 Strip all leading and trailing whitespace from the input
84 Make sure that the input ends with a newline (default: True). This
85 is required for some lexers that consume input linewise.
90 If given and greater than 0, expand tabs in the input (default: 0).
92 If given, must be an encoding name. This encoding will be used to
93 convert the input string to Unicode, if it is not already a Unicode
94 string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
95 Latin1 detection. Can also be ``'chardet'`` to use the chardet
96 library, if it is installed.
98 Overrides the ``encoding`` if given.
101 #: Full name of the lexer, in human-readable form
104 #: A list of short, unique identifiers that can be used to look
105 #: up the lexer from a list, e.g., using `get_lexer_by_name()`.
108 #: A list of `fnmatch` patterns that match filenames which contain
109 #: content for this lexer. The patterns in this list should be unique among
113 #: A list of `fnmatch` patterns that match filenames which may or may not
114 #: contain content for this lexer. This list is used by the
115 #: :func:`.guess_lexer_for_filename()` function, to determine which lexers
116 #: are then included in guessing the correct one. That means that
117 #: e.g. every lexer for HTML and a template language should include
118 #: ``\*.html`` in this list.
121 #: A list of MIME types for content that can be lexed with this lexer.
124 #: Priority, should multiple lexers match and no content is provided
127 #: URL of the language specification/definition. Used in the Pygments
131 def __init__(self
, **options
):
133 This constructor takes arbitrary options as keyword arguments.
134 Every subclass must first process its own options and then call
135 the `Lexer` constructor, since it processes the basic
136 options like `stripnl`.
138 An example looks like this:
140 .. sourcecode:: python
142 def __init__(self, **options):
143 self.compress = options.get('compress', '')
144 Lexer.__init__(self, **options)
146 As these options must all be specifiable as strings (due to the
147 command line usage), there are various utility functions
148 available to help with that, see `Utilities`_.
150 self
.options
= options
151 self
.stripnl
= get_bool_opt(options
, 'stripnl', True)
152 self
.stripall
= get_bool_opt(options
, 'stripall', False)
153 self
.ensurenl
= get_bool_opt(options
, 'ensurenl', True)
154 self
.tabsize
= get_int_opt(options
, 'tabsize', 0)
155 self
.encoding
= options
.get('encoding', 'guess')
156 self
.encoding
= options
.get('inencoding') or self
.encoding
158 for filter_
in get_list_opt(options
, 'filters', ()):
159 self
.add_filter(filter_
)
163 return '<pygments.lexers.%s with %r>' % (self
.__class
__.__name
__,
166 return '<pygments.lexers.%s>' % self
.__class
__.__name
__
168 def add_filter(self
, filter_
, **options
):
170 Add a new stream filter to this lexer.
172 if not isinstance(filter_
, Filter
):
173 filter_
= get_filter_by_name(filter_
, **options
)
174 self
.filters
.append(filter_
)
176 def analyse_text(text
):
178 A static method which is called for lexer guessing.
180 It should analyse the text and return a float in the range
181 from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
182 will not be selected as the most probable one, if it returns
183 ``1.0``, it will be selected immediately. This is used by
186 The `LexerMeta` metaclass automatically wraps this function so
187 that it works like a static method (no ``self`` or ``cls``
188 parameter) and the return value is automatically converted to
189 `float`. If the return value is an object that is boolean `False`
190 it's the same as if the return values was ``0.0``.
193 def get_tokens(self
, text
, unfiltered
=False):
195 This method is the basic interface of a lexer. It is called by
196 the `highlight()` function. It must process the text and return an
197 iterable of ``(tokentype, value)`` pairs from `text`.
199 Normally, you don't need to override this method. The default
200 implementation processes the options recognized by all lexers
201 (`stripnl`, `stripall` and so on), and then yields all tokens
202 from `get_tokens_unprocessed()`, with the ``index`` dropped.
204 If `unfiltered` is set to `True`, the filtering mechanism is
205 bypassed even if filters are defined.
207 if not isinstance(text
, str):
208 if self
.encoding
== 'guess':
209 text
, _
= guess_decode(text
)
210 elif self
.encoding
== 'chardet':
212 from pip
._vendor
import chardet
213 except ImportError as e
:
214 raise ImportError('To enable chardet encoding guessing, '
215 'please install the chardet library '
216 'from http://chardet.feedparser.org/') from e
217 # check for BOM first
219 for bom
, encoding
in _encoding_map
:
220 if text
.startswith(bom
):
221 decoded
= text
[len(bom
):].decode(encoding
, 'replace')
223 # no BOM found, so use chardet
225 enc
= chardet
.detect(text
[:1024]) # Guess using first 1KB
226 decoded
= text
.decode(enc
.get('encoding') or 'utf-8',
230 text
= text
.decode(self
.encoding
)
231 if text
.startswith('\ufeff'):
232 text
= text
[len('\ufeff'):]
234 if text
.startswith('\ufeff'):
235 text
= text
[len('\ufeff'):]
237 # text now *is* a unicode string
238 text
= text
.replace('\r\n', '\n')
239 text
= text
.replace('\r', '\n')
243 text
= text
.strip('\n')
245 text
= text
.expandtabs(self
.tabsize
)
246 if self
.ensurenl
and not text
.endswith('\n'):
250 for _
, t
, v
in self
.get_tokens_unprocessed(text
):
254 stream
= apply_filters(stream
, self
.filters
, self
)
257 def get_tokens_unprocessed(self
, text
):
259 This method should process the text and return an iterable of
260 ``(index, tokentype, value)`` tuples where ``index`` is the starting
261 position of the token within the input text.
263 It must be overridden by subclasses. It is recommended to
264 implement it as a generator to maximize effectiveness.
266 raise NotImplementedError
269 class DelegatingLexer(Lexer
):
271 This lexer takes two lexer as arguments. A root lexer and
272 a language lexer. First everything is scanned using the language
273 lexer, afterwards all ``Other`` tokens are lexed using the root
276 The lexers from the ``template`` lexer package use this base lexer.
279 def __init__(self
, _root_lexer
, _language_lexer
, _needle
=Other
, **options
):
280 self
.root_lexer
= _root_lexer(**options
)
281 self
.language_lexer
= _language_lexer(**options
)
282 self
.needle
= _needle
283 Lexer
.__init
__(self
, **options
)
285 def get_tokens_unprocessed(self
, text
):
289 for i
, t
, v
in self
.language_lexer
.get_tokens_unprocessed(text
):
292 insertions
.append((len(buffered
), lng_buffer
))
296 lng_buffer
.append((i
, t
, v
))
298 insertions
.append((len(buffered
), lng_buffer
))
299 return do_insertions(insertions
,
300 self
.root_lexer
.get_tokens_unprocessed(buffered
))
303 # ------------------------------------------------------------------------------
304 # RegexLexer and ExtendedRegexLexer
308 class include(str): # pylint: disable=invalid-name
310 Indicates that a state should include rules from another state.
317 Indicates the a state should inherit from its superclass.
322 inherit
= _inherit() # pylint: disable=invalid-name
325 class combined(tuple): # pylint: disable=invalid-name
327 Indicates a state combined from multiple states.
330 def __new__(cls
, *args
):
331 return tuple.__new
__(cls
, args
)
333 def __init__(self
, *args
):
334 # tuple.__init__ doesn't do anything
340 A pseudo match object constructed from a string.
343 def __init__(self
, start
, text
):
347 def start(self
, arg
=None):
350 def end(self
, arg
=None):
351 return self
._start
+ len(self
._text
)
353 def group(self
, arg
=None):
355 raise IndexError('No such group')
367 Callback that yields multiple actions for each group in the match.
369 def callback(lexer
, match
, ctx
=None):
370 for i
, action
in enumerate(args
):
373 elif type(action
) is _TokenType
:
374 data
= match
.group(i
+ 1)
376 yield match
.start(i
+ 1), action
, data
378 data
= match
.group(i
+ 1)
381 ctx
.pos
= match
.start(i
+ 1)
382 for item
in action(lexer
,
383 _PseudoMatch(match
.start(i
+ 1), data
), ctx
):
387 ctx
.pos
= match
.end()
393 Special singleton used for indicating the caller class.
400 def using(_other
, **kwargs
):
402 Callback that processes the match with a different lexer.
404 The keyword arguments are forwarded to the lexer, except `state` which
405 is handled separately.
407 `state` specifies the state that the new lexer will start in, and can
408 be an enumerable such as ('root', 'inline', 'string') or a simple
409 string which is assumed to be on top of the root state.
411 Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
414 if 'state' in kwargs
:
415 s
= kwargs
.pop('state')
416 if isinstance(s
, (list, tuple)):
417 gt_kwargs
['stack'] = s
419 gt_kwargs
['stack'] = ('root', s
)
422 def callback(lexer
, match
, ctx
=None):
423 # if keyword arguments are given the callback
424 # function has to create a new lexer instance
426 # XXX: cache that somehow
427 kwargs
.update(lexer
.options
)
428 lx
= lexer
.__class
__(**kwargs
)
432 for i
, t
, v
in lx
.get_tokens_unprocessed(match
.group(), **gt_kwargs
):
435 ctx
.pos
= match
.end()
437 def callback(lexer
, match
, ctx
=None):
438 # XXX: cache that somehow
439 kwargs
.update(lexer
.options
)
440 lx
= _other(**kwargs
)
443 for i
, t
, v
in lx
.get_tokens_unprocessed(match
.group(), **gt_kwargs
):
446 ctx
.pos
= match
.end()
452 Indicates a state or state action (e.g. #pop) to apply.
453 For example default('#pop') is equivalent to ('', Token, '#pop')
454 Note that state tuples may be used as well.
456 .. versionadded:: 2.0
458 def __init__(self
, state
):
464 Indicates a list of literal words that is transformed into an optimized
465 regex that matches any of the words.
467 .. versionadded:: 2.0
469 def __init__(self
, words
, prefix
='', suffix
=''):
475 return regex_opt(self
.words
, prefix
=self
.prefix
, suffix
=self
.suffix
)
478 class RegexLexerMeta(LexerMeta
):
480 Metaclass for RegexLexer, creates the self._tokens attribute from
481 self.tokens on the first instantiation.
484 def _process_regex(cls
, regex
, rflags
, state
):
485 """Preprocess the regular expression component of a token definition."""
486 if isinstance(regex
, Future
):
488 return re
.compile(regex
, rflags
).match
490 def _process_token(cls
, token
):
491 """Preprocess the token component of a token definition."""
492 assert type(token
) is _TokenType
or callable(token
), \
493 'token type must be simple type or callable, not %r' % (token
,)
496 def _process_new_state(cls
, new_state
, unprocessed
, processed
):
497 """Preprocess the state transition action of a token definition."""
498 if isinstance(new_state
, str):
500 if new_state
== '#pop':
502 elif new_state
in unprocessed
:
504 elif new_state
== '#push':
506 elif new_state
[:5] == '#pop:':
507 return -int(new_state
[5:])
509 assert False, 'unknown new state %r' % new_state
510 elif isinstance(new_state
, combined
):
511 # combine a new state from existing ones
512 tmp_state
= '_tmp_%d' % cls
._tmpname
515 for istate
in new_state
:
516 assert istate
!= new_state
, 'circular state ref %r' % istate
517 itokens
.extend(cls
._process
_state
(unprocessed
,
519 processed
[tmp_state
] = itokens
521 elif isinstance(new_state
, tuple):
522 # push more than one state
523 for istate
in new_state
:
524 assert (istate
in unprocessed
or
525 istate
in ('#pop', '#push')), \
526 'unknown new state ' + istate
529 assert False, 'unknown new state def %r' % new_state
531 def _process_state(cls
, unprocessed
, processed
, state
):
532 """Preprocess a single state definition."""
533 assert type(state
) is str, "wrong state name %r" % state
534 assert state
[0] != '#', "invalid state name %r" % state
535 if state
in processed
:
536 return processed
[state
]
537 tokens
= processed
[state
] = []
539 for tdef
in unprocessed
[state
]:
540 if isinstance(tdef
, include
):
541 # it's a state reference
542 assert tdef
!= state
, "circular state reference %r" % state
543 tokens
.extend(cls
._process
_state
(unprocessed
, processed
,
546 if isinstance(tdef
, _inherit
):
547 # should be processed already, but may not in the case of:
548 # 1. the state has no counterpart in any parent
549 # 2. the state includes more than one 'inherit'
551 if isinstance(tdef
, default
):
552 new_state
= cls
._process
_new
_state
(tdef
.state
, unprocessed
, processed
)
553 tokens
.append((re
.compile('').match
, None, new_state
))
556 assert type(tdef
) is tuple, "wrong rule def %r" % tdef
559 rex
= cls
._process
_regex
(tdef
[0], rflags
, state
)
560 except Exception as err
:
561 raise ValueError("uncompilable regex %r in state %r of %r: %s" %
562 (tdef
[0], state
, cls
, err
)) from err
564 token
= cls
._process
_token
(tdef
[1])
569 new_state
= cls
._process
_new
_state
(tdef
[2],
570 unprocessed
, processed
)
572 tokens
.append((rex
, token
, new_state
))
575 def process_tokendef(cls
, name
, tokendefs
=None):
576 """Preprocess a dictionary of token definitions."""
577 processed
= cls
._all
_tokens
[name
] = {}
578 tokendefs
= tokendefs
or cls
.tokens
[name
]
579 for state
in list(tokendefs
):
580 cls
._process
_state
(tokendefs
, processed
, state
)
583 def get_tokendefs(cls
):
585 Merge tokens from superclasses in MRO order, returning a single tokendef
588 Any state that is not defined by a subclass will be inherited
589 automatically. States that *are* defined by subclasses will, by
590 default, override that state in the superclass. If a subclass wishes to
591 inherit definitions from a superclass, it can use the special value
592 "inherit", which will cause the superclass' state definition to be
593 included at that point in the state.
597 for c
in cls
.__mro
__:
598 toks
= c
.__dict
__.get('tokens', {})
600 for state
, items
in toks
.items():
601 curitems
= tokens
.get(state
)
603 # N.b. because this is assigned by reference, sufficiently
604 # deep hierarchies are processed incrementally (e.g. for
605 # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
606 # will not see any inherits in B).
607 tokens
[state
] = items
609 inherit_ndx
= items
.index(inherit
)
612 inheritable
[state
] = inherit_ndx
615 inherit_ndx
= inheritable
.pop(state
, None)
616 if inherit_ndx
is None:
619 # Replace the "inherit" value with the items
620 curitems
[inherit_ndx
:inherit_ndx
+1] = items
622 # N.b. this is the index in items (that is, the superclass
623 # copy), so offset required when storing below.
624 new_inh_ndx
= items
.index(inherit
)
628 inheritable
[state
] = inherit_ndx
+ new_inh_ndx
632 def __call__(cls
, *args
, **kwds
):
633 """Instantiate cls after preprocessing its token definitions."""
634 if '_tokens' not in cls
.__dict
__:
637 if hasattr(cls
, 'token_variants') and cls
.token_variants
:
641 cls
._tokens
= cls
.process_tokendef('', cls
.get_tokendefs())
643 return type.__call
__(cls
, *args
, **kwds
)
646 class RegexLexer(Lexer
, metaclass
=RegexLexerMeta
):
648 Base for simple stateful regular expression-based lexers.
649 Simplifies the lexing process so that you need only
650 provide a list of states and regular expressions.
653 #: Flags for compiling the regular expressions.
654 #: Defaults to MULTILINE.
657 #: At all time there is a stack of states. Initially, the stack contains
658 #: a single state 'root'. The top of the stack is called "the current state".
660 #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
662 #: ``new_state`` can be omitted to signify no state transition.
663 #: If ``new_state`` is a string, it is pushed on the stack. This ensure
664 #: the new current state is ``new_state``.
665 #: If ``new_state`` is a tuple of strings, all of those strings are pushed
666 #: on the stack and the current state will be the last element of the list.
667 #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
668 #: to signify a new, anonymous state combined from the rules of two
669 #: or more existing ones.
670 #: Furthermore, it can be '#pop' to signify going back one step in
671 #: the state stack, or '#push' to push the current state on the stack
672 #: again. Note that if you push while in a combined state, the combined
673 #: state itself is pushed, and not only the state in which the rule is
676 #: The tuple can also be replaced with ``include('state')``, in which
677 #: case the rules from the state named by the string are included in the
681 def get_tokens_unprocessed(self
, text
, stack
=('root',)):
683 Split ``text`` into (tokentype, text) pairs.
685 ``stack`` is the initial stack (default: ``['root']``)
688 tokendefs
= self
._tokens
689 statestack
= list(stack
)
690 statetokens
= tokendefs
[statestack
[-1]]
692 for rexmatch
, action
, new_state
in statetokens
:
693 m
= rexmatch(text
, pos
)
695 if action
is not None:
696 if type(action
) is _TokenType
:
697 yield pos
, action
, m
.group()
699 yield from action(self
, m
)
701 if new_state
is not None:
703 if isinstance(new_state
, tuple):
704 for state
in new_state
:
706 if len(statestack
) > 1:
708 elif state
== '#push':
709 statestack
.append(statestack
[-1])
711 statestack
.append(state
)
712 elif isinstance(new_state
, int):
713 # pop, but keep at least one state on the stack
714 # (random code leading to unexpected pops should
715 # not allow exceptions)
716 if abs(new_state
) >= len(statestack
):
719 del statestack
[new_state
:]
720 elif new_state
== '#push':
721 statestack
.append(statestack
[-1])
723 assert False, "wrong state def: %r" % new_state
724 statetokens
= tokendefs
[statestack
[-1]]
727 # We are here only if all state tokens have been considered
728 # and there was not a match on any of them.
730 if text
[pos
] == '\n':
731 # at EOL, reset state to "root"
732 statestack
= ['root']
733 statetokens
= tokendefs
['root']
734 yield pos
, Whitespace
, '\n'
737 yield pos
, Error
, text
[pos
]
745 A helper object that holds lexer position data.
748 def __init__(self
, text
, pos
, stack
=None, end
=None):
751 self
.end
= end
or len(text
) # end=0 not supported ;-)
752 self
.stack
= stack
or ['root']
755 return 'LexerContext(%r, %r, %r)' % (
756 self
.text
, self
.pos
, self
.stack
)
759 class ExtendedRegexLexer(RegexLexer
):
761 A RegexLexer that uses a context object to store its state.
764 def get_tokens_unprocessed(self
, text
=None, context
=None):
766 Split ``text`` into (tokentype, text) pairs.
767 If ``context`` is given, use this lexer context instead.
769 tokendefs
= self
._tokens
771 ctx
= LexerContext(text
, 0)
772 statetokens
= tokendefs
['root']
775 statetokens
= tokendefs
[ctx
.stack
[-1]]
778 for rexmatch
, action
, new_state
in statetokens
:
779 m
= rexmatch(text
, ctx
.pos
, ctx
.end
)
781 if action
is not None:
782 if type(action
) is _TokenType
:
783 yield ctx
.pos
, action
, m
.group()
786 yield from action(self
, m
, ctx
)
788 # altered the state stack?
789 statetokens
= tokendefs
[ctx
.stack
[-1]]
790 # CAUTION: callback must set ctx.pos!
791 if new_state
is not None:
793 if isinstance(new_state
, tuple):
794 for state
in new_state
:
796 if len(ctx
.stack
) > 1:
798 elif state
== '#push':
799 ctx
.stack
.append(ctx
.stack
[-1])
801 ctx
.stack
.append(state
)
802 elif isinstance(new_state
, int):
803 # see RegexLexer for why this check is made
804 if abs(new_state
) >= len(ctx
.stack
):
807 del ctx
.stack
[new_state
:]
808 elif new_state
== '#push':
809 ctx
.stack
.append(ctx
.stack
[-1])
811 assert False, "wrong state def: %r" % new_state
812 statetokens
= tokendefs
[ctx
.stack
[-1]]
816 if ctx
.pos
>= ctx
.end
:
818 if text
[ctx
.pos
] == '\n':
819 # at EOL, reset state to "root"
821 statetokens
= tokendefs
['root']
822 yield ctx
.pos
, Text
, '\n'
825 yield ctx
.pos
, Error
, text
[ctx
.pos
]
831 def do_insertions(insertions
, tokens
):
833 Helper for lexers which must combine the results of several
836 ``insertions`` is a list of ``(index, itokens)`` pairs.
837 Each ``itokens`` iterable should be inserted at position
838 ``index`` into the token stream given by the ``tokens``
841 The result is a combined token stream.
843 TODO: clean up the code here.
845 insertions
= iter(insertions
)
847 index
, itokens
= next(insertions
)
848 except StopIteration:
856 # iterate over the token stream where we want to insert
857 # the tokens from the insertion list.
858 for i
, t
, v
in tokens
:
859 # first iteration. store the position of first item
863 while insleft
and i
+ len(v
) >= index
:
864 tmpval
= v
[oldi
:index
- i
]
866 yield realpos
, t
, tmpval
867 realpos
+= len(tmpval
)
868 for it_index
, it_token
, it_value
in itokens
:
869 yield realpos
, it_token
, it_value
870 realpos
+= len(it_value
)
873 index
, itokens
= next(insertions
)
874 except StopIteration:
876 break # not strictly necessary
878 yield realpos
, t
, v
[oldi
:]
879 realpos
+= len(v
) - oldi
883 # no normal tokens, set realpos to zero
884 realpos
= realpos
or 0
885 for p
, t
, v
in itokens
:
889 index
, itokens
= next(insertions
)
890 except StopIteration:
892 break # not strictly necessary
895 class ProfilingRegexLexerMeta(RegexLexerMeta
):
896 """Metaclass for ProfilingRegexLexer, collects regex timing info."""
898 def _process_regex(cls
, regex
, rflags
, state
):
899 if isinstance(regex
, words
):
900 rex
= regex_opt(regex
.words
, prefix
=regex
.prefix
,
904 compiled
= re
.compile(rex
, rflags
)
906 def match_func(text
, pos
, endpos
=sys
.maxsize
):
907 info
= cls
._prof
_data
[-1].setdefault((state
, rex
), [0, 0.0])
909 res
= compiled
.match(text
, pos
, endpos
)
917 class ProfilingRegexLexer(RegexLexer
, metaclass
=ProfilingRegexLexerMeta
):
918 """Drop-in replacement for RegexLexer that does profiling of its regexes."""
921 _prof_sort_index
= 4 # defaults to time per call
923 def get_tokens_unprocessed(self
, text
, stack
=('root',)):
924 # this needs to be a stack, since using(this) will produce nested calls
925 self
.__class
__._prof
_data
.append({})
926 yield from RegexLexer
.get_tokens_unprocessed(self
, text
, stack
)
927 rawdata
= self
.__class
__._prof
_data
.pop()
928 data
= sorted(((s
, repr(r
).strip('u\'').replace('\\\\', '\\')[:65],
929 n
, 1000 * t
, 1000 * t
/ n
)
930 for ((s
, r
), (n
, t
)) in rawdata
.items()),
931 key
=lambda x
: x
[self
._prof
_sort
_index
],
933 sum_total
= sum(x
[3] for x
in data
)
936 print('Profiling result for %s lexing %d chars in %.3f ms' %
937 (self
.__class
__.__name
__, len(text
), sum_total
))
939 print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
942 print('%-20s %-65s %5d %8.4f %8.4f' % d
)