A Python parser for MediaWiki wikicode https://mwparserfromhell.readthedocs.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

61 lines
2.3 KiB

  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2012-2013 Ben Kurtovic <ben.kurtovic@verizon.net>
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in
  13. # all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. """
  23. This package contains the actual wikicode parser, split up into two main
  24. modules: the :py:mod:`~.tokenizer` and the :py:mod:`~.builder`. This module
  25. joins them together under one interface.
  26. """
  27. from .builder import Builder
  28. from .tokenizer import Tokenizer
  29. try:
  30. from ._tokenizer import CTokenizer
  31. use_c = True
  32. except ImportError:
  33. CTokenizer = None
  34. use_c = False
  35. __all__ = ["use_c", "Parser"]
  36. class Parser(object):
  37. """Represents a parser for wikicode.
  38. Actual parsing is a two-step process: first, the text is split up into a
  39. series of tokens by the :py:class:`~.Tokenizer`, and then the tokens are
  40. converted into trees of :py:class:`~.Wikicode` objects and
  41. :py:class:`~.Node`\ s by the :py:class:`~.Builder`.
  42. """
  43. def __init__(self):
  44. if use_c and CTokenizer:
  45. self._tokenizer = CTokenizer()
  46. else:
  47. self._tokenizer = Tokenizer()
  48. self._builder = Builder()
  49. def parse(self, text, context=0):
  50. """Parse *text*, returning a :py:class:`~.Wikicode` object tree."""
  51. tokens = self._tokenizer.tokenize(text, context)
  52. code = self._builder.build(tokens)
  53. return code