A Python parser for MediaWiki wikicode https://mwparserfromhell.readthedocs.io/
No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.
 
 
 
 

134 líneas
5.7 KiB

  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2012-2014 Ben Kurtovic <ben.kurtovic@gmail.com>
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in
  13. # all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. from __future__ import print_function, unicode_literals
  23. from os import listdir, path
  24. import sys
  25. from mwparserfromhell.compat import py3k
  26. from mwparserfromhell.parser import tokens
  27. class _TestParseError(Exception):
  28. """Raised internally when a test could not be parsed."""
  29. pass
  30. class TokenizerTestCase(object):
  31. """A base test case for tokenizers, whose tests are loaded dynamically.
  32. Subclassed along with unittest2.TestCase to form TestPyTokenizer and
  33. TestCTokenizer. Tests are loaded dynamically from files in the 'tokenizer'
  34. directory.
  35. """
  36. @classmethod
  37. def _build_test_method(cls, funcname, data):
  38. """Create and return a method to be treated as a test case method.
  39. *data* is a dict containing multiple keys: the *input* text to be
  40. tokenized, the expected list of tokens as *output*, and an optional
  41. *label* for the method's docstring.
  42. """
  43. def inner(self):
  44. expected = data["output"]
  45. actual = self.tokenizer().tokenize(data["input"])
  46. self.assertEqual(expected, actual)
  47. if not py3k:
  48. inner.__name__ = funcname.encode("utf8")
  49. inner.__doc__ = data["label"]
  50. return inner
  51. @classmethod
  52. def _load_tests(cls, filename, name, text):
  53. """Load all tests in *text* from the file *filename*."""
  54. tests = text.split("\n---\n")
  55. counter = 1
  56. digits = len(str(len(tests)))
  57. for test in tests:
  58. data = {"name": None, "label": None, "input": None, "output": None}
  59. try:
  60. for line in test.strip().splitlines():
  61. if line.startswith("name:"):
  62. data["name"] = line[len("name:"):].strip()
  63. elif line.startswith("label:"):
  64. data["label"] = line[len("label:"):].strip()
  65. elif line.startswith("input:"):
  66. raw = line[len("input:"):].strip()
  67. if raw[0] == '"' and raw[-1] == '"':
  68. raw = raw[1:-1]
  69. raw = raw.encode("raw_unicode_escape")
  70. data["input"] = raw.decode("unicode_escape")
  71. elif line.startswith("output:"):
  72. raw = line[len("output:"):].strip()
  73. try:
  74. data["output"] = eval(raw, vars(tokens))
  75. except Exception as err:
  76. raise _TestParseError(err)
  77. except _TestParseError as err:
  78. if data["name"]:
  79. error = "Could not parse test '{0}' in '{1}':\n\t{2}"
  80. print(error.format(data["name"], filename, err))
  81. else:
  82. error = "Could not parse a test in '{0}':\n\t{1}"
  83. print(error.format(filename, err))
  84. continue
  85. if not data["name"]:
  86. error = "A test in '{0}' was ignored because it lacked a name"
  87. print(error.format(filename))
  88. continue
  89. if data["input"] is None or data["output"] is None:
  90. error = "Test '{0}' in '{1}' was ignored because it lacked an input or an output"
  91. print(error.format(data["name"], filename))
  92. continue
  93. number = str(counter).zfill(digits)
  94. fname = "test_{0}{1}_{2}".format(name, number, data["name"])
  95. meth = cls._build_test_method(fname, data)
  96. setattr(cls, fname, meth)
  97. counter += 1
  98. @classmethod
  99. def build(cls):
  100. """Load and install all tests from the 'tokenizer' directory."""
  101. def load_file(filename):
  102. with open(filename, "rU") as fp:
  103. text = fp.read()
  104. if not py3k:
  105. text = text.decode("utf8")
  106. name = path.split(filename)[1][:0-len(extension)]
  107. cls._load_tests(filename, name, text)
  108. directory = path.join(path.dirname(__file__), "tokenizer")
  109. extension = ".mwtest"
  110. if len(sys.argv) > 2 and sys.argv[1] == "--use":
  111. for name in sys.argv[2:]:
  112. load_file(path.join(directory, name + extension))
  113. sys.argv = [sys.argv[0]] # So unittest2 doesn't try to load these
  114. cls.skip_others = True
  115. else:
  116. for filename in listdir(directory):
  117. if not filename.endswith(extension):
  118. continue
  119. load_file(path.join(directory, filename))
  120. cls.skip_others = False
  121. TokenizerTestCase.build()