A Python parser for MediaWiki wikicode https://mwparserfromhell.readthedocs.io/
Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.
 
 
 
 

65 строки
2.5 KiB

  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2012-2014 Ben Kurtovic <ben.kurtovic@gmail.com>
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in
  13. # all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. """
  23. This package contains the actual wikicode parser, split up into two main
  24. modules: the :py:mod:`~.tokenizer` and the :py:mod:`~.builder`. This module
  25. joins them together under one interface.
  26. """
  27. from .builder import Builder
  28. from .tokenizer import Tokenizer
  29. try:
  30. from ._tokenizer import CTokenizer
  31. use_c = True
  32. except ImportError:
  33. CTokenizer = None
  34. use_c = False
  35. __all__ = ["use_c", "Parser"]
  36. class Parser(object):
  37. """Represents a parser for wikicode.
  38. Actual parsing is a two-step process: first, the text is split up into a
  39. series of tokens by the :py:class:`~.Tokenizer`, and then the tokens are
  40. converted into trees of :py:class:`~.Wikicode` objects and
  41. :py:class:`~.Node`\ s by the :py:class:`~.Builder`.
  42. """
  43. def __init__(self):
  44. if use_c and CTokenizer:
  45. self._tokenizer = CTokenizer()
  46. else:
  47. self._tokenizer = Tokenizer()
  48. self._builder = Builder()
  49. def parse(self, text, context=0, skip_style_tags=False):
  50. """Parse *text*, returning a :py:class:`~.Wikicode` object tree.
  51. If *skip_style_tags* is ``True``, then ``''`` and ``'''`` will not be
  52. parsed, but instead be treated as plain text.
  53. """
  54. tokens = self._tokenizer.tokenize(text, context, skip_style_tags)
  55. code = self._builder.build(tokens)
  56. return code