A Python parser for MediaWiki wikicode https://mwparserfromhell.readthedocs.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

329 lines
12 KiB

  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2012 Ben Kurtovic <ben.kurtovic@verizon.net>
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in
  13. # all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. from __future__ import unicode_literals
  23. from math import log
  24. import re
  25. import string
  26. from . import contexts
  27. from . import tokens
  28. from ..compat import htmlentities
  29. __all__ = ["Tokenizer"]
  30. class BadRoute(Exception):
  31. """Raised internally when the current tokenization route is invalid."""
  32. pass
  33. class Tokenizer(object):
  34. """Creates a list of tokens from a string of wikicode."""
  35. START = object()
  36. END = object()
  37. MARKERS = ["{", "}", "[", "]", "<", ">", "|", "=", "&", "#", "*", ";", ":",
  38. "/", "-", "\n", END]
  39. regex = re.compile(r"([{}\[\]<>|=&#*;:/\-\n])", flags=re.IGNORECASE)
  40. def __init__(self):
  41. self._text = None
  42. self._head = 0
  43. self._stacks = []
  44. self._global = 0
  45. @property
  46. def _stack(self):
  47. """The current token stack."""
  48. return self._stacks[-1][0]
  49. @property
  50. def _context(self):
  51. """The current token context."""
  52. return self._stacks[-1][1]
  53. @_context.setter
  54. def _context(self, value):
  55. self._stacks[-1][1] = value
  56. @property
  57. def _textbuffer(self):
  58. """Return the current textbuffer."""
  59. return self._stacks[-1][2]
  60. @_textbuffer.setter
  61. def _textbuffer(self, value):
  62. self._stacks[-1][2] = value
  63. def _push(self, context=0):
  64. """Add a new token stack, context, and textbuffer to the list."""
  65. self._stacks.append([[], context, []])
  66. def _push_textbuffer(self):
  67. """Push the textbuffer onto the stack as a Text node and clear it."""
  68. if self._textbuffer:
  69. self._stack.append(tokens.Text(text="".join(self._textbuffer)))
  70. self._textbuffer = []
  71. def _pop(self):
  72. """Pop the current stack/context/textbuffer, returing the stack."""
  73. self._push_textbuffer()
  74. return self._stacks.pop()[0]
  75. def _fail_route(self):
  76. """Fail the current tokenization route.
  77. Discards the current stack/context/textbuffer and raises
  78. :py:exc:`~mwparserfromhell.parser.tokenizer.BadRoute`.
  79. """
  80. self._pop()
  81. raise BadRoute()
  82. def _write(self, token):
  83. """Write a token to the current token stack."""
  84. self._push_textbuffer()
  85. self._stack.append(token)
  86. def _write_text(self, text):
  87. """Write text to the current textbuffer."""
  88. self._textbuffer.append(text)
  89. def _write_all(self, tokenlist):
  90. """Write a series of tokens to the current stack at once."""
  91. if tokenlist and isinstance(tokenlist[0], tokens.Text):
  92. self._write_text(tokenlist.pop(0).text)
  93. self._push_textbuffer()
  94. self._stack.extend(tokenlist)
  95. def _read(self, delta=0, wrap=False, strict=False):
  96. """Read the value at a relative point in the wikicode.
  97. The value is read from :py:attr:`self._head <_head>` plus the value of
  98. *delta* (which can be negative). If *wrap* is ``False``, we will not
  99. allow attempts to read from the end of the string if ``self._head +
  100. delta`` is negative. If *strict* is ``True``, the route will be failed
  101. (with :py:meth:`_fail_route`) if we try to read from past the end of
  102. the string; otherwise, :py:attr:`self.END <END>` is returned. If we try
  103. to read from before the start of the string, :py:attr:`self.START
  104. <START>` is returned.
  105. """
  106. index = self._head + delta
  107. if index < 0 and (not wrap or abs(index) > len(self._text)):
  108. return self.START
  109. try:
  110. return self._text[index]
  111. except IndexError:
  112. if strict:
  113. self._fail_route()
  114. return self.END
  115. def _parse_template(self):
  116. """Parse a template at the head of the wikicode string."""
  117. reset = self._head
  118. self._head += 2
  119. try:
  120. template = self._parse(contexts.TEMPLATE_NAME)
  121. except BadRoute:
  122. self._head = reset
  123. self._write_text(self._read())
  124. else:
  125. self._write(tokens.TemplateOpen())
  126. self._write_all(template)
  127. self._write(tokens.TemplateClose())
  128. def _verify_template_name(self):
  129. """Verify that a template's name is valid wikisyntax.
  130. The route will be failed if the name contains a newline inside of it
  131. (not merely at the beginning or end).
  132. """
  133. self._push_textbuffer()
  134. if self._stack:
  135. text = [tok for tok in self._stack if isinstance(tok, tokens.Text)]
  136. text = "".join([token.text for token in text])
  137. if text.strip() and "\n" in text.strip():
  138. self._fail_route()
  139. def _handle_template_param(self):
  140. """Handle a template parameter at the head of the string."""
  141. if self._context & contexts.TEMPLATE_NAME:
  142. self._verify_template_name()
  143. self._context ^= contexts.TEMPLATE_NAME
  144. if self._context & contexts.TEMPLATE_PARAM_VALUE:
  145. self._context ^= contexts.TEMPLATE_PARAM_VALUE
  146. self._context |= contexts.TEMPLATE_PARAM_KEY
  147. self._write(tokens.TemplateParamSeparator())
  148. def _handle_template_param_value(self):
  149. """Handle a template parameter's value at the head of the string."""
  150. self._context ^= contexts.TEMPLATE_PARAM_KEY
  151. self._context |= contexts.TEMPLATE_PARAM_VALUE
  152. self._write(tokens.TemplateParamEquals())
  153. def _handle_template_end(self):
  154. """Handle the end of the template at the head of the string."""
  155. if self._context & contexts.TEMPLATE_NAME:
  156. self._verify_template_name()
  157. self._head += 1
  158. return self._pop()
  159. def _parse_heading(self):
  160. """Parse a section heading at the head of the wikicode string."""
  161. self._global |= contexts.GL_HEADING
  162. reset = self._head
  163. self._head += 1
  164. best = 1
  165. while self._read() == "=":
  166. best += 1
  167. self._head += 1
  168. context = contexts.HEADING_LEVEL_1 << min(best - 1, 5)
  169. try:
  170. title, level = self._parse(context)
  171. except BadRoute:
  172. self._head = reset + best - 1
  173. self._write_text("=" * best)
  174. else:
  175. self._write(tokens.HeadingStart(level=level))
  176. if level < best:
  177. self._write_text("=" * (best - level))
  178. self._write_all(title)
  179. self._write(tokens.HeadingEnd())
  180. finally:
  181. self._global ^= contexts.GL_HEADING
  182. def _handle_heading_end(self):
  183. """Handle the end of a section heading at the head of the string."""
  184. reset = self._head
  185. self._head += 1
  186. best = 1
  187. while self._read() == "=":
  188. best += 1
  189. self._head += 1
  190. current = int(log(self._context / contexts.HEADING_LEVEL_1, 2)) + 1
  191. level = min(current, min(best, 6))
  192. try:
  193. after, after_level = self._parse(self._context)
  194. except BadRoute:
  195. if level < best:
  196. self._write_text("=" * (best - level))
  197. self._head = reset + best - 1
  198. return self._pop(), level
  199. else:
  200. self._write_text("=" * best)
  201. self._write_all(after)
  202. return self._pop(), after_level
  203. def _really_parse_entity(self):
  204. """Actually parse a HTML entity and ensure that it is valid."""
  205. self._write(tokens.HTMLEntityStart())
  206. self._head += 1
  207. this = self._read(strict=True)
  208. if this == "#":
  209. numeric = True
  210. self._write(tokens.HTMLEntityNumeric())
  211. self._head += 1
  212. this = self._read(strict=True)
  213. if this[0].lower() == "x":
  214. hexadecimal = True
  215. self._write(tokens.HTMLEntityHex(char=this[0]))
  216. this = this[1:]
  217. if not this:
  218. self._fail_route()
  219. else:
  220. hexadecimal = False
  221. else:
  222. numeric = hexadecimal = False
  223. valid = string.hexdigits if hexadecimal else string.digits
  224. if not numeric and not hexadecimal:
  225. valid += string.ascii_letters
  226. if not all([char in valid for char in this]):
  227. self._fail_route()
  228. self._head += 1
  229. if self._read() != ";":
  230. self._fail_route()
  231. if numeric:
  232. test = int(this, 16) if hexadecimal else int(this)
  233. if test < 1 or test > 0x10FFFF:
  234. self._fail_route()
  235. else:
  236. if this not in htmlentities.entitydefs:
  237. self._fail_route()
  238. self._write(tokens.Text(text=this))
  239. self._write(tokens.HTMLEntityEnd())
  240. def _parse_entity(self):
  241. """Parse a HTML entity at the head of the wikicode string."""
  242. reset = self._head
  243. self._push()
  244. try:
  245. self._really_parse_entity()
  246. except BadRoute:
  247. self._head = reset
  248. self._write_text(self._read())
  249. else:
  250. self._write_all(self._pop())
  251. def _parse(self, context=0):
  252. """Parse the wikicode string, using *context* for when to stop."""
  253. self._push(context)
  254. while True:
  255. this = self._read()
  256. if this not in self.MARKERS:
  257. self._write_text(this)
  258. self._head += 1
  259. continue
  260. if this is self.END:
  261. if self._context & (contexts.TEMPLATE | contexts.HEADING):
  262. self._fail_route()
  263. return self._pop()
  264. prev, next = self._read(-1), self._read(1)
  265. if this == next == "{":
  266. self._parse_template()
  267. elif this == "|" and self._context & contexts.TEMPLATE:
  268. self._handle_template_param()
  269. elif this == "=" and self._context & contexts.TEMPLATE_PARAM_KEY:
  270. self._handle_template_param_value()
  271. elif this == next == "}" and self._context & contexts.TEMPLATE:
  272. return self._handle_template_end()
  273. elif (prev == "\n" or prev == self.START) and this == "=" and not self._global & contexts.GL_HEADING:
  274. self._parse_heading()
  275. elif this == "=" and self._context & contexts.HEADING:
  276. return self._handle_heading_end()
  277. elif this == "\n" and self._context & contexts.HEADING:
  278. self._fail_route()
  279. elif this == "&":
  280. self._parse_entity()
  281. else:
  282. self._write_text(this)
  283. self._head += 1
  284. def tokenize(self, text):
  285. """Build a list of tokens from a string of wikicode and return it."""
  286. split = self.regex.split(text)
  287. self._text = [segment for segment in split if segment]
  288. return self._parse()