A Python parser for MediaWiki wikicode https://mwparserfromhell.readthedocs.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

1159 lines
44 KiB

  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2012-2014 Ben Kurtovic <ben.kurtovic@gmail.com>
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in
  13. # all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. from __future__ import unicode_literals
  23. from math import log
  24. import re
  25. from . import contexts, tokens, ParserError
  26. from ..compat import htmlentities, range
  27. from ..definitions import (get_html_tag, is_parsable, is_single,
  28. is_single_only, is_scheme)
  29. __all__ = ["Tokenizer"]
  30. class BadRoute(Exception):
  31. """Raised internally when the current tokenization route is invalid."""
  32. def __init__(self, context=0):
  33. super(BadRoute, self).__init__()
  34. self.context = context
  35. class _TagOpenData(object):
  36. """Stores data about an HTML open tag, like ``<ref name="foo">``."""
  37. CX_NAME = 1 << 0
  38. CX_ATTR_READY = 1 << 1
  39. CX_ATTR_NAME = 1 << 2
  40. CX_ATTR_VALUE = 1 << 3
  41. CX_QUOTED = 1 << 4
  42. CX_NOTE_SPACE = 1 << 5
  43. CX_NOTE_EQUALS = 1 << 6
  44. CX_NOTE_QUOTE = 1 << 7
  45. def __init__(self):
  46. self.context = self.CX_NAME
  47. self.padding_buffer = {"first": "", "before_eq": "", "after_eq": ""}
  48. self.reset = 0
  49. class Tokenizer(object):
  50. """Creates a list of tokens from a string of wikicode."""
  51. USES_C = False
  52. START = object()
  53. END = object()
  54. MARKERS = ["{", "}", "[", "]", "<", ">", "|", "=", "&", "'", "#", "*", ";",
  55. ":", "/", "-", "\n", START, END]
  56. MAX_DEPTH = 40
  57. MAX_CYCLES = 100000
  58. regex = re.compile(r"([{}\[\]<>|=&'#*;:/\\\"\-!\n])", flags=re.IGNORECASE)
  59. tag_splitter = re.compile(r"([\s\"\\]+)")
  60. def __init__(self):
  61. self._text = None
  62. self._head = 0
  63. self._stacks = []
  64. self._global = 0
  65. self._depth = 0
  66. self._cycles = 0
  67. @property
  68. def _stack(self):
  69. """The current token stack."""
  70. return self._stacks[-1][0]
  71. @property
  72. def _context(self):
  73. """The current token context."""
  74. return self._stacks[-1][1]
  75. @_context.setter
  76. def _context(self, value):
  77. self._stacks[-1][1] = value
  78. @property
  79. def _textbuffer(self):
  80. """The current textbuffer."""
  81. return self._stacks[-1][2]
  82. @_textbuffer.setter
  83. def _textbuffer(self, value):
  84. self._stacks[-1][2] = value
  85. def _push(self, context=0):
  86. """Add a new token stack, context, and textbuffer to the list."""
  87. self._stacks.append([[], context, []])
  88. self._depth += 1
  89. self._cycles += 1
  90. def _push_textbuffer(self):
  91. """Push the textbuffer onto the stack as a Text node and clear it."""
  92. if self._textbuffer:
  93. self._stack.append(tokens.Text(text="".join(self._textbuffer)))
  94. self._textbuffer = []
  95. def _pop(self, keep_context=False):
  96. """Pop the current stack/context/textbuffer, returning the stack.
  97. If *keep_context* is ``True``, then we will replace the underlying
  98. stack's context with the current stack's.
  99. """
  100. self._push_textbuffer()
  101. self._depth -= 1
  102. if keep_context:
  103. context = self._context
  104. stack = self._stacks.pop()[0]
  105. self._context = context
  106. return stack
  107. return self._stacks.pop()[0]
  108. def _can_recurse(self):
  109. """Return whether or not our max recursion depth has been exceeded."""
  110. return self._depth < self.MAX_DEPTH and self._cycles < self.MAX_CYCLES
  111. def _fail_route(self):
  112. """Fail the current tokenization route.
  113. Discards the current stack/context/textbuffer and raises
  114. :py:exc:`~.BadRoute`.
  115. """
  116. context = self._context
  117. self._pop()
  118. raise BadRoute(context)
  119. def _emit(self, token):
  120. """Write a token to the end of the current token stack."""
  121. self._push_textbuffer()
  122. self._stack.append(token)
  123. def _emit_first(self, token):
  124. """Write a token to the beginning of the current token stack."""
  125. self._push_textbuffer()
  126. self._stack.insert(0, token)
  127. def _emit_text(self, text):
  128. """Write text to the current textbuffer."""
  129. self._textbuffer.append(text)
  130. def _emit_all(self, tokenlist):
  131. """Write a series of tokens to the current stack at once."""
  132. if tokenlist and isinstance(tokenlist[0], tokens.Text):
  133. self._emit_text(tokenlist.pop(0).text)
  134. self._push_textbuffer()
  135. self._stack.extend(tokenlist)
  136. def _emit_text_then_stack(self, text):
  137. """Pop the current stack, write *text*, and then write the stack."""
  138. stack = self._pop()
  139. self._emit_text(text)
  140. if stack:
  141. self._emit_all(stack)
  142. self._head -= 1
  143. def _read(self, delta=0, wrap=False, strict=False):
  144. """Read the value at a relative point in the wikicode.
  145. The value is read from :py:attr:`self._head <_head>` plus the value of
  146. *delta* (which can be negative). If *wrap* is ``False``, we will not
  147. allow attempts to read from the end of the string if ``self._head +
  148. delta`` is negative. If *strict* is ``True``, the route will be failed
  149. (with :py:meth:`_fail_route`) if we try to read from past the end of
  150. the string; otherwise, :py:attr:`self.END <END>` is returned. If we try
  151. to read from before the start of the string, :py:attr:`self.START
  152. <START>` is returned.
  153. """
  154. index = self._head + delta
  155. if index < 0 and (not wrap or abs(index) > len(self._text)):
  156. return self.START
  157. try:
  158. return self._text[index]
  159. except IndexError:
  160. if strict:
  161. self._fail_route()
  162. return self.END
  163. def _parse_template(self):
  164. """Parse a template at the head of the wikicode string."""
  165. reset = self._head
  166. try:
  167. template = self._parse(contexts.TEMPLATE_NAME)
  168. except BadRoute:
  169. self._head = reset
  170. raise
  171. self._emit_first(tokens.TemplateOpen())
  172. self._emit_all(template)
  173. self._emit(tokens.TemplateClose())
  174. def _parse_argument(self):
  175. """Parse an argument at the head of the wikicode string."""
  176. reset = self._head
  177. try:
  178. argument = self._parse(contexts.ARGUMENT_NAME)
  179. except BadRoute:
  180. self._head = reset
  181. raise
  182. self._emit_first(tokens.ArgumentOpen())
  183. self._emit_all(argument)
  184. self._emit(tokens.ArgumentClose())
  185. def _parse_template_or_argument(self):
  186. """Parse a template or argument at the head of the wikicode string."""
  187. self._head += 2
  188. braces = 2
  189. while self._read() == "{":
  190. self._head += 1
  191. braces += 1
  192. self._push()
  193. while braces:
  194. if braces == 1:
  195. return self._emit_text_then_stack("{")
  196. if braces == 2:
  197. try:
  198. self._parse_template()
  199. except BadRoute:
  200. return self._emit_text_then_stack("{{")
  201. break
  202. try:
  203. self._parse_argument()
  204. braces -= 3
  205. except BadRoute:
  206. try:
  207. self._parse_template()
  208. braces -= 2
  209. except BadRoute:
  210. return self._emit_text_then_stack("{" * braces)
  211. if braces:
  212. self._head += 1
  213. self._emit_all(self._pop())
  214. if self._context & contexts.FAIL_NEXT:
  215. self._context ^= contexts.FAIL_NEXT
  216. def _handle_template_param(self):
  217. """Handle a template parameter at the head of the string."""
  218. if self._context & contexts.TEMPLATE_NAME:
  219. self._context ^= contexts.TEMPLATE_NAME
  220. elif self._context & contexts.TEMPLATE_PARAM_VALUE:
  221. self._context ^= contexts.TEMPLATE_PARAM_VALUE
  222. else:
  223. self._emit_all(self._pop(keep_context=True))
  224. self._context |= contexts.TEMPLATE_PARAM_KEY
  225. self._emit(tokens.TemplateParamSeparator())
  226. self._push(self._context)
  227. def _handle_template_param_value(self):
  228. """Handle a template parameter's value at the head of the string."""
  229. self._emit_all(self._pop(keep_context=True))
  230. self._context ^= contexts.TEMPLATE_PARAM_KEY
  231. self._context |= contexts.TEMPLATE_PARAM_VALUE
  232. self._emit(tokens.TemplateParamEquals())
  233. def _handle_template_end(self):
  234. """Handle the end of a template at the head of the string."""
  235. if self._context & contexts.TEMPLATE_PARAM_KEY:
  236. self._emit_all(self._pop(keep_context=True))
  237. self._head += 1
  238. return self._pop()
  239. def _handle_argument_separator(self):
  240. """Handle the separator between an argument's name and default."""
  241. self._context ^= contexts.ARGUMENT_NAME
  242. self._context |= contexts.ARGUMENT_DEFAULT
  243. self._emit(tokens.ArgumentSeparator())
  244. def _handle_argument_end(self):
  245. """Handle the end of an argument at the head of the string."""
  246. self._head += 2
  247. return self._pop()
  248. def _parse_wikilink(self):
  249. """Parse an internal wikilink at the head of the wikicode string."""
  250. self._head += 2
  251. reset = self._head - 1
  252. try:
  253. wikilink = self._parse(contexts.WIKILINK_TITLE)
  254. except BadRoute:
  255. self._head = reset
  256. self._emit_text("[[")
  257. else:
  258. self._emit(tokens.WikilinkOpen())
  259. self._emit_all(wikilink)
  260. self._emit(tokens.WikilinkClose())
  261. def _handle_wikilink_separator(self):
  262. """Handle the separator between a wikilink's title and its text."""
  263. self._context ^= contexts.WIKILINK_TITLE
  264. self._context |= contexts.WIKILINK_TEXT
  265. self._emit(tokens.WikilinkSeparator())
  266. def _handle_wikilink_end(self):
  267. """Handle the end of a wikilink at the head of the string."""
  268. self._head += 1
  269. return self._pop()
  270. def _parse_bracketed_uri_scheme(self):
  271. """Parse the URI scheme of a bracket-enclosed external link."""
  272. self._push(contexts.EXT_LINK_URI)
  273. if self._read() == self._read(1) == "/":
  274. self._emit_text("//")
  275. self._head += 2
  276. else:
  277. valid = "abcdefghijklmnopqrstuvwxyz0123456789+.-"
  278. all_valid = lambda: all(char in valid for char in self._read())
  279. scheme = ""
  280. while self._read() is not self.END and all_valid():
  281. scheme += self._read()
  282. self._emit_text(self._read())
  283. self._head += 1
  284. if self._read() != ":":
  285. self._fail_route()
  286. self._emit_text(":")
  287. self._head += 1
  288. slashes = self._read() == self._read(1) == "/"
  289. if slashes:
  290. self._emit_text("//")
  291. self._head += 2
  292. if not is_scheme(scheme, slashes):
  293. self._fail_route()
  294. def _parse_free_uri_scheme(self):
  295. """Parse the URI scheme of a free (no brackets) external link."""
  296. valid = "abcdefghijklmnopqrstuvwxyz0123456789+.-"
  297. scheme = []
  298. try:
  299. # We have to backtrack through the textbuffer looking for our
  300. # scheme since it was just parsed as text:
  301. for chunk in reversed(self._textbuffer):
  302. for char in reversed(chunk):
  303. if char.isspace() or char in self.MARKERS:
  304. raise StopIteration()
  305. if char not in valid:
  306. raise BadRoute()
  307. scheme.append(char)
  308. except StopIteration:
  309. pass
  310. scheme = "".join(reversed(scheme))
  311. slashes = self._read() == self._read(1) == "/"
  312. if not is_scheme(scheme, slashes):
  313. raise BadRoute()
  314. self._push(self._context | contexts.EXT_LINK_URI)
  315. self._emit_text(scheme)
  316. self._emit_text(":")
  317. if slashes:
  318. self._emit_text("//")
  319. self._head += 2
  320. def _handle_free_link_text(self, punct, tail, this):
  321. """Handle text in a free ext link, including trailing punctuation."""
  322. if "(" in this and ")" in punct:
  323. punct = punct[:-1] # ')' is not longer valid punctuation
  324. if this.endswith(punct):
  325. for i in reversed(range(-len(this), 0)):
  326. if i == -len(this) or this[i - 1] not in punct:
  327. break
  328. stripped = this[:i]
  329. if stripped and tail:
  330. self._emit_text(tail)
  331. tail = ""
  332. tail += this[i:]
  333. this = stripped
  334. elif tail:
  335. self._emit_text(tail)
  336. tail = ""
  337. self._emit_text(this)
  338. return punct, tail
  339. def _is_free_link_end(self, this, next):
  340. """Return whether the current head is the end of a free link."""
  341. # Built from _parse()'s end sentinels:
  342. after, ctx = self._read(2), self._context
  343. equal_sign_contexts = contexts.TEMPLATE_PARAM_KEY | contexts.HEADING
  344. return (this in (self.END, "\n", "[", "]", "<", ">") or
  345. this == next == "'" or
  346. (this == "|" and ctx & contexts.TEMPLATE) or
  347. (this == "=" and ctx & equal_sign_contexts) or
  348. (this == next == "}" and ctx & contexts.TEMPLATE) or
  349. (this == next == after == "}" and ctx & contexts.ARGUMENT))
  350. def _really_parse_external_link(self, brackets):
  351. """Really parse an external link."""
  352. if brackets:
  353. self._parse_bracketed_uri_scheme()
  354. invalid = ("\n", " ", "]")
  355. else:
  356. self._parse_free_uri_scheme()
  357. invalid = ("\n", " ", "[", "]")
  358. punct = tuple(",;\.:!?)")
  359. if self._read() is self.END or self._read()[0] in invalid:
  360. self._fail_route()
  361. tail = ""
  362. while True:
  363. this, next = self._read(), self._read(1)
  364. if this == "&":
  365. if tail:
  366. self._emit_text(tail)
  367. tail = ""
  368. self._parse_entity()
  369. elif (this == "<" and next == "!" and self._read(2) ==
  370. self._read(3) == "-"):
  371. if tail:
  372. self._emit_text(tail)
  373. tail = ""
  374. self._parse_comment()
  375. elif not brackets and self._is_free_link_end(this, next):
  376. return self._pop(), tail, -1
  377. elif this is self.END or this == "\n":
  378. self._fail_route()
  379. elif this == next == "{" and self._can_recurse():
  380. if tail:
  381. self._emit_text(tail)
  382. tail = ""
  383. self._parse_template_or_argument()
  384. elif this == "]":
  385. return self._pop(), tail, 0
  386. elif " " in this:
  387. before, after = this.split(" ", 1)
  388. if brackets:
  389. self._emit_text(before)
  390. self._emit(tokens.ExternalLinkSeparator())
  391. if after:
  392. self._emit_text(after)
  393. self._context ^= contexts.EXT_LINK_URI
  394. self._context |= contexts.EXT_LINK_TITLE
  395. self._head += 1
  396. return self._parse(push=False), None, 0
  397. punct, tail = self._handle_free_link_text(punct, tail, before)
  398. return self._pop(), tail + " " + after, 0
  399. elif not brackets:
  400. punct, tail = self._handle_free_link_text(punct, tail, this)
  401. else:
  402. self._emit_text(this)
  403. self._head += 1
  404. def _remove_uri_scheme_from_textbuffer(self, scheme):
  405. """Remove the URI scheme of a new external link from the textbuffer."""
  406. length = len(scheme)
  407. while length:
  408. if length < len(self._textbuffer[-1]):
  409. self._textbuffer[-1] = self._textbuffer[-1][:-length]
  410. break
  411. length -= len(self._textbuffer[-1])
  412. self._textbuffer.pop()
  413. def _parse_external_link(self, brackets):
  414. """Parse an external link at the head of the wikicode string."""
  415. reset = self._head
  416. self._head += 1
  417. try:
  418. bad_context = self._context & contexts.NO_EXT_LINKS
  419. if bad_context or not self._can_recurse():
  420. raise BadRoute()
  421. link, extra, delta = self._really_parse_external_link(brackets)
  422. except BadRoute:
  423. self._head = reset
  424. if not brackets and self._context & contexts.DL_TERM:
  425. self._handle_dl_term()
  426. else:
  427. self._emit_text(self._read())
  428. else:
  429. if not brackets:
  430. scheme = link[0].text.split(":", 1)[0]
  431. self._remove_uri_scheme_from_textbuffer(scheme)
  432. self._emit(tokens.ExternalLinkOpen(brackets=brackets))
  433. self._emit_all(link)
  434. self._emit(tokens.ExternalLinkClose())
  435. self._head += delta
  436. if extra:
  437. self._emit_text(extra)
  438. def _parse_heading(self):
  439. """Parse a section heading at the head of the wikicode string."""
  440. self._global |= contexts.GL_HEADING
  441. reset = self._head
  442. self._head += 1
  443. best = 1
  444. while self._read() == "=":
  445. best += 1
  446. self._head += 1
  447. context = contexts.HEADING_LEVEL_1 << min(best - 1, 5)
  448. try:
  449. title, level = self._parse(context)
  450. except BadRoute:
  451. self._head = reset + best - 1
  452. self._emit_text("=" * best)
  453. else:
  454. self._emit(tokens.HeadingStart(level=level))
  455. if level < best:
  456. self._emit_text("=" * (best - level))
  457. self._emit_all(title)
  458. self._emit(tokens.HeadingEnd())
  459. finally:
  460. self._global ^= contexts.GL_HEADING
  461. def _handle_heading_end(self):
  462. """Handle the end of a section heading at the head of the string."""
  463. reset = self._head
  464. self._head += 1
  465. best = 1
  466. while self._read() == "=":
  467. best += 1
  468. self._head += 1
  469. current = int(log(self._context / contexts.HEADING_LEVEL_1, 2)) + 1
  470. level = min(current, min(best, 6))
  471. try: # Try to check for a heading closure after this one
  472. after, after_level = self._parse(self._context)
  473. except BadRoute:
  474. if level < best:
  475. self._emit_text("=" * (best - level))
  476. self._head = reset + best - 1
  477. return self._pop(), level
  478. else: # Found another closure
  479. self._emit_text("=" * best)
  480. self._emit_all(after)
  481. return self._pop(), after_level
  482. def _really_parse_entity(self):
  483. """Actually parse an HTML entity and ensure that it is valid."""
  484. self._emit(tokens.HTMLEntityStart())
  485. self._head += 1
  486. this = self._read(strict=True)
  487. if this == "#":
  488. numeric = True
  489. self._emit(tokens.HTMLEntityNumeric())
  490. self._head += 1
  491. this = self._read(strict=True)
  492. if this[0].lower() == "x":
  493. hexadecimal = True
  494. self._emit(tokens.HTMLEntityHex(char=this[0]))
  495. this = this[1:]
  496. if not this:
  497. self._fail_route()
  498. else:
  499. hexadecimal = False
  500. else:
  501. numeric = hexadecimal = False
  502. valid = "0123456789abcdefABCDEF" if hexadecimal else "0123456789"
  503. if not numeric and not hexadecimal:
  504. valid += "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
  505. if not all([char in valid for char in this]):
  506. self._fail_route()
  507. self._head += 1
  508. if self._read() != ";":
  509. self._fail_route()
  510. if numeric:
  511. test = int(this, 16) if hexadecimal else int(this)
  512. if test < 1 or test > 0x10FFFF:
  513. self._fail_route()
  514. else:
  515. if this not in htmlentities.entitydefs:
  516. self._fail_route()
  517. self._emit(tokens.Text(text=this))
  518. self._emit(tokens.HTMLEntityEnd())
  519. def _parse_entity(self):
  520. """Parse an HTML entity at the head of the wikicode string."""
  521. reset = self._head
  522. self._push()
  523. try:
  524. self._really_parse_entity()
  525. except BadRoute:
  526. self._head = reset
  527. self._emit_text(self._read())
  528. else:
  529. self._emit_all(self._pop())
  530. def _parse_comment(self):
  531. """Parse an HTML comment at the head of the wikicode string."""
  532. self._head += 4
  533. reset = self._head - 1
  534. self._push()
  535. while True:
  536. this = self._read()
  537. if this == self.END:
  538. self._pop()
  539. self._head = reset
  540. self._emit_text("<!--")
  541. return
  542. if this == self._read(1) == "-" and self._read(2) == ">":
  543. self._emit_first(tokens.CommentStart())
  544. self._emit(tokens.CommentEnd())
  545. self._emit_all(self._pop())
  546. self._head += 2
  547. return
  548. self._emit_text(this)
  549. self._head += 1
  550. def _push_tag_buffer(self, data):
  551. """Write a pending tag attribute from *data* to the stack."""
  552. if data.context & data.CX_QUOTED:
  553. self._emit_first(tokens.TagAttrQuote())
  554. self._emit_all(self._pop())
  555. buf = data.padding_buffer
  556. self._emit_first(tokens.TagAttrStart(pad_first=buf["first"],
  557. pad_before_eq=buf["before_eq"], pad_after_eq=buf["after_eq"]))
  558. self._emit_all(self._pop())
  559. for key in data.padding_buffer:
  560. data.padding_buffer[key] = ""
  561. def _handle_tag_space(self, data, text):
  562. """Handle whitespace (*text*) inside of an HTML open tag."""
  563. ctx = data.context
  564. end_of_value = ctx & data.CX_ATTR_VALUE and not ctx & (data.CX_QUOTED | data.CX_NOTE_QUOTE)
  565. if end_of_value or (ctx & data.CX_QUOTED and ctx & data.CX_NOTE_SPACE):
  566. self._push_tag_buffer(data)
  567. data.context = data.CX_ATTR_READY
  568. elif ctx & data.CX_NOTE_SPACE:
  569. data.context = data.CX_ATTR_READY
  570. elif ctx & data.CX_ATTR_NAME:
  571. data.context |= data.CX_NOTE_EQUALS
  572. data.padding_buffer["before_eq"] += text
  573. if ctx & data.CX_QUOTED and not ctx & data.CX_NOTE_SPACE:
  574. self._emit_text(text)
  575. elif data.context & data.CX_ATTR_READY:
  576. data.padding_buffer["first"] += text
  577. elif data.context & data.CX_ATTR_VALUE:
  578. data.padding_buffer["after_eq"] += text
  579. def _handle_tag_text(self, text):
  580. """Handle regular *text* inside of an HTML open tag."""
  581. next = self._read(1)
  582. if not self._can_recurse() or text not in self.MARKERS:
  583. self._emit_text(text)
  584. elif text == next == "{":
  585. self._parse_template_or_argument()
  586. elif text == next == "[":
  587. self._parse_wikilink()
  588. elif text == "<":
  589. self._parse_tag()
  590. else:
  591. self._emit_text(text)
  592. def _handle_tag_data(self, data, text):
  593. """Handle all sorts of *text* data inside of an HTML open tag."""
  594. for chunk in self.tag_splitter.split(text):
  595. if not chunk:
  596. continue
  597. if data.context & data.CX_NAME:
  598. if chunk in self.MARKERS or chunk.isspace():
  599. self._fail_route() # Tags must start with text, not spaces
  600. data.context = data.CX_NOTE_SPACE
  601. elif chunk.isspace():
  602. self._handle_tag_space(data, chunk)
  603. continue
  604. elif data.context & data.CX_NOTE_SPACE:
  605. if data.context & data.CX_QUOTED:
  606. data.context = data.CX_ATTR_VALUE
  607. self._pop()
  608. self._head = data.reset - 1 # Will be auto-incremented
  609. return # Break early
  610. self._fail_route()
  611. elif data.context & data.CX_ATTR_READY:
  612. data.context = data.CX_ATTR_NAME
  613. self._push(contexts.TAG_ATTR)
  614. elif data.context & data.CX_ATTR_NAME:
  615. if chunk == "=":
  616. data.context = data.CX_ATTR_VALUE | data.CX_NOTE_QUOTE
  617. self._emit(tokens.TagAttrEquals())
  618. continue
  619. if data.context & data.CX_NOTE_EQUALS:
  620. self._push_tag_buffer(data)
  621. data.context = data.CX_ATTR_NAME
  622. self._push(contexts.TAG_ATTR)
  623. else: # data.context & data.CX_ATTR_VALUE assured
  624. escaped = self._read(-1) == "\\" and self._read(-2) != "\\"
  625. if data.context & data.CX_NOTE_QUOTE:
  626. data.context ^= data.CX_NOTE_QUOTE
  627. if chunk == '"' and not escaped:
  628. data.context |= data.CX_QUOTED
  629. self._push(self._context)
  630. data.reset = self._head
  631. continue
  632. elif data.context & data.CX_QUOTED:
  633. if chunk == '"' and not escaped:
  634. data.context |= data.CX_NOTE_SPACE
  635. continue
  636. self._handle_tag_text(chunk)
  637. def _handle_tag_close_open(self, data, token):
  638. """Handle the closing of a open tag (``<foo>``)."""
  639. if data.context & (data.CX_ATTR_NAME | data.CX_ATTR_VALUE):
  640. self._push_tag_buffer(data)
  641. self._emit(token(padding=data.padding_buffer["first"]))
  642. self._head += 1
  643. def _handle_tag_open_close(self):
  644. """Handle the opening of a closing tag (``</foo>``)."""
  645. self._emit(tokens.TagOpenClose())
  646. self._push(contexts.TAG_CLOSE)
  647. self._head += 1
  648. def _handle_tag_close_close(self):
  649. """Handle the ending of a closing tag (``</foo>``)."""
  650. strip = lambda tok: tok.text.rstrip().lower()
  651. closing = self._pop()
  652. if len(closing) != 1 or (not isinstance(closing[0], tokens.Text) or
  653. strip(closing[0]) != strip(self._stack[1])):
  654. self._fail_route()
  655. self._emit_all(closing)
  656. self._emit(tokens.TagCloseClose())
  657. return self._pop()
  658. def _handle_blacklisted_tag(self):
  659. """Handle the body of an HTML tag that is parser-blacklisted."""
  660. while True:
  661. this, next = self._read(), self._read(1)
  662. if this is self.END:
  663. self._fail_route()
  664. elif this == "<" and next == "/":
  665. self._handle_tag_open_close()
  666. self._head += 1
  667. return self._parse(push=False)
  668. elif this == "&":
  669. self._parse_entity()
  670. else:
  671. self._emit_text(this)
  672. self._head += 1
  673. def _handle_single_only_tag_end(self):
  674. """Handle the end of an implicitly closing single-only HTML tag."""
  675. padding = self._stack.pop().padding
  676. self._emit(tokens.TagCloseSelfclose(padding=padding, implicit=True))
  677. self._head -= 1 # Offset displacement done by _handle_tag_close_open
  678. return self._pop()
  679. def _handle_single_tag_end(self):
  680. """Handle the stream end when inside a single-supporting HTML tag."""
  681. stack = self._stack
  682. # We need to find the index of the TagCloseOpen token corresponding to
  683. # the TagOpenOpen token located at index 0:
  684. depth = 1
  685. for index, token in enumerate(stack[2:], 2):
  686. if isinstance(token, tokens.TagOpenOpen):
  687. depth += 1
  688. elif isinstance(token, tokens.TagCloseOpen):
  689. depth -= 1
  690. if depth == 0:
  691. break
  692. padding = stack[index].padding
  693. stack[index] = tokens.TagCloseSelfclose(padding=padding, implicit=True)
  694. return self._pop()
  695. def _really_parse_tag(self):
  696. """Actually parse an HTML tag, starting with the open (``<foo>``)."""
  697. data = _TagOpenData()
  698. self._push(contexts.TAG_OPEN)
  699. self._emit(tokens.TagOpenOpen())
  700. while True:
  701. this, next = self._read(), self._read(1)
  702. can_exit = (not data.context & (data.CX_QUOTED | data.CX_NAME) or
  703. data.context & data.CX_NOTE_SPACE)
  704. if this is self.END:
  705. if self._context & contexts.TAG_ATTR:
  706. if data.context & data.CX_QUOTED:
  707. # Unclosed attribute quote: reset, don't die
  708. data.context = data.CX_ATTR_VALUE
  709. self._pop()
  710. self._head = data.reset
  711. continue
  712. self._pop()
  713. self._fail_route()
  714. elif this == ">" and can_exit:
  715. self._handle_tag_close_open(data, tokens.TagCloseOpen)
  716. self._context = contexts.TAG_BODY
  717. if is_single_only(self._stack[1].text):
  718. return self._handle_single_only_tag_end()
  719. if is_parsable(self._stack[1].text):
  720. return self._parse(push=False)
  721. return self._handle_blacklisted_tag()
  722. elif this == "/" and next == ">" and can_exit:
  723. self._handle_tag_close_open(data, tokens.TagCloseSelfclose)
  724. return self._pop()
  725. else:
  726. self._handle_tag_data(data, this)
  727. self._head += 1
  728. def _handle_invalid_tag_start(self):
  729. """Handle the (possible) start of an implicitly closing single tag."""
  730. reset = self._head + 1
  731. self._head += 2
  732. try:
  733. if not is_single_only(self.tag_splitter.split(self._read())[0]):
  734. raise BadRoute()
  735. tag = self._really_parse_tag()
  736. except BadRoute:
  737. self._head = reset
  738. self._emit_text("</")
  739. else:
  740. tag[0].invalid = True # Set flag of TagOpenOpen
  741. self._emit_all(tag)
  742. def _parse_tag(self):
  743. """Parse an HTML tag at the head of the wikicode string."""
  744. reset = self._head
  745. self._head += 1
  746. try:
  747. tag = self._really_parse_tag()
  748. except BadRoute:
  749. self._head = reset
  750. self._emit_text("<")
  751. else:
  752. self._emit_all(tag)
  753. def _emit_style_tag(self, tag, markup, body):
  754. """Write the body of a tag and the tokens that should surround it."""
  755. self._emit(tokens.TagOpenOpen(wiki_markup=markup))
  756. self._emit_text(tag)
  757. self._emit(tokens.TagCloseOpen())
  758. self._emit_all(body)
  759. self._emit(tokens.TagOpenClose())
  760. self._emit_text(tag)
  761. self._emit(tokens.TagCloseClose())
  762. def _parse_italics(self):
  763. """Parse wiki-style italics."""
  764. reset = self._head
  765. try:
  766. stack = self._parse(contexts.STYLE_ITALICS)
  767. except BadRoute as route:
  768. self._head = reset
  769. if route.context & contexts.STYLE_PASS_AGAIN:
  770. new_ctx = contexts.STYLE_ITALICS | contexts.STYLE_SECOND_PASS
  771. stack = self._parse(new_ctx)
  772. else:
  773. return self._emit_text("''")
  774. self._emit_style_tag("i", "''", stack)
  775. def _parse_bold(self):
  776. """Parse wiki-style bold."""
  777. reset = self._head
  778. try:
  779. stack = self._parse(contexts.STYLE_BOLD)
  780. except BadRoute:
  781. self._head = reset
  782. if self._context & contexts.STYLE_SECOND_PASS:
  783. self._emit_text("'")
  784. return True
  785. elif self._context & contexts.STYLE_ITALICS:
  786. self._context |= contexts.STYLE_PASS_AGAIN
  787. self._emit_text("'''")
  788. else:
  789. self._emit_text("'")
  790. self._parse_italics()
  791. else:
  792. self._emit_style_tag("b", "'''", stack)
  793. def _parse_italics_and_bold(self):
  794. """Parse wiki-style italics and bold together (i.e., five ticks)."""
  795. reset = self._head
  796. try:
  797. stack = self._parse(contexts.STYLE_BOLD)
  798. except BadRoute:
  799. self._head = reset
  800. try:
  801. stack = self._parse(contexts.STYLE_ITALICS)
  802. except BadRoute:
  803. self._head = reset
  804. self._emit_text("'''''")
  805. else:
  806. reset = self._head
  807. try:
  808. stack2 = self._parse(contexts.STYLE_BOLD)
  809. except BadRoute:
  810. self._head = reset
  811. self._emit_text("'''")
  812. self._emit_style_tag("i", "''", stack)
  813. else:
  814. self._push()
  815. self._emit_style_tag("i", "''", stack)
  816. self._emit_all(stack2)
  817. self._emit_style_tag("b", "'''", self._pop())
  818. else:
  819. reset = self._head
  820. try:
  821. stack2 = self._parse(contexts.STYLE_ITALICS)
  822. except BadRoute:
  823. self._head = reset
  824. self._emit_text("''")
  825. self._emit_style_tag("b", "'''", stack)
  826. else:
  827. self._push()
  828. self._emit_style_tag("b", "'''", stack)
  829. self._emit_all(stack2)
  830. self._emit_style_tag("i", "''", self._pop())
  831. def _parse_style(self):
  832. """Parse wiki-style formatting (``''``/``'''`` for italics/bold)."""
  833. self._head += 2
  834. ticks = 2
  835. while self._read() == "'":
  836. self._head += 1
  837. ticks += 1
  838. italics = self._context & contexts.STYLE_ITALICS
  839. bold = self._context & contexts.STYLE_BOLD
  840. if ticks > 5:
  841. self._emit_text("'" * (ticks - 5))
  842. ticks = 5
  843. elif ticks == 4:
  844. self._emit_text("'")
  845. ticks = 3
  846. if (italics and ticks in (2, 5)) or (bold and ticks in (3, 5)):
  847. if ticks == 5:
  848. self._head -= 3 if italics else 2
  849. return self._pop()
  850. elif not self._can_recurse():
  851. if ticks == 3:
  852. if self._context & contexts.STYLE_SECOND_PASS:
  853. self._emit_text("'")
  854. return self._pop()
  855. if self._context & contexts.STYLE_ITALICS:
  856. self._context |= contexts.STYLE_PASS_AGAIN
  857. self._emit_text("'" * ticks)
  858. elif ticks == 2:
  859. self._parse_italics()
  860. elif ticks == 3:
  861. if self._parse_bold():
  862. return self._pop()
  863. else: # ticks == 5
  864. self._parse_italics_and_bold()
  865. self._head -= 1
  866. def _handle_list_marker(self):
  867. """Handle a list marker at the head (``#``, ``*``, ``;``, ``:``)."""
  868. markup = self._read()
  869. if markup == ";":
  870. self._context |= contexts.DL_TERM
  871. self._emit(tokens.TagOpenOpen(wiki_markup=markup))
  872. self._emit_text(get_html_tag(markup))
  873. self._emit(tokens.TagCloseSelfclose())
  874. def _handle_list(self):
  875. """Handle a wiki-style list (``#``, ``*``, ``;``, ``:``)."""
  876. self._handle_list_marker()
  877. while self._read(1) in ("#", "*", ";", ":"):
  878. self._head += 1
  879. self._handle_list_marker()
  880. def _handle_hr(self):
  881. """Handle a wiki-style horizontal rule (``----``) in the string."""
  882. length = 4
  883. self._head += 3
  884. while self._read(1) == "-":
  885. length += 1
  886. self._head += 1
  887. self._emit(tokens.TagOpenOpen(wiki_markup="-" * length))
  888. self._emit_text("hr")
  889. self._emit(tokens.TagCloseSelfclose())
  890. def _handle_dl_term(self):
  891. """Handle the term in a description list (``foo`` in ``;foo:bar``)."""
  892. self._context ^= contexts.DL_TERM
  893. if self._read() == ":":
  894. self._handle_list_marker()
  895. else:
  896. self._emit_text("\n")
  897. def _handle_end(self):
  898. """Handle the end of the stream of wikitext."""
  899. if self._context & contexts.FAIL:
  900. if self._context & contexts.TAG_BODY:
  901. if is_single(self._stack[1].text):
  902. return self._handle_single_tag_end()
  903. if self._context & contexts.DOUBLE:
  904. self._pop()
  905. self._fail_route()
  906. return self._pop()
  907. def _verify_safe(self, this):
  908. """Make sure we are not trying to write an invalid character."""
  909. context = self._context
  910. if context & contexts.FAIL_NEXT:
  911. return False
  912. if context & contexts.WIKILINK_TITLE:
  913. if this == "]" or this == "{":
  914. self._context |= contexts.FAIL_NEXT
  915. elif this == "\n" or this == "[" or this == "}":
  916. return False
  917. return True
  918. elif context & contexts.EXT_LINK_TITLE:
  919. return this != "\n"
  920. elif context & contexts.TEMPLATE_NAME:
  921. if this == "{" or this == "}" or this == "[":
  922. self._context |= contexts.FAIL_NEXT
  923. return True
  924. if this == "]":
  925. return False
  926. if this == "|":
  927. return True
  928. if context & contexts.HAS_TEXT:
  929. if context & contexts.FAIL_ON_TEXT:
  930. if this is self.END or not this.isspace():
  931. return False
  932. else:
  933. if this == "\n":
  934. self._context |= contexts.FAIL_ON_TEXT
  935. elif this is self.END or not this.isspace():
  936. self._context |= contexts.HAS_TEXT
  937. return True
  938. elif context & contexts.TAG_CLOSE:
  939. return this != "<"
  940. else:
  941. if context & contexts.FAIL_ON_EQUALS:
  942. if this == "=":
  943. return False
  944. elif context & contexts.FAIL_ON_LBRACE:
  945. if this == "{" or (self._read(-1) == self._read(-2) == "{"):
  946. if context & contexts.TEMPLATE:
  947. self._context |= contexts.FAIL_ON_EQUALS
  948. else:
  949. self._context |= contexts.FAIL_NEXT
  950. return True
  951. self._context ^= contexts.FAIL_ON_LBRACE
  952. elif context & contexts.FAIL_ON_RBRACE:
  953. if this == "}":
  954. if context & contexts.TEMPLATE:
  955. self._context |= contexts.FAIL_ON_EQUALS
  956. else:
  957. self._context |= contexts.FAIL_NEXT
  958. return True
  959. self._context ^= contexts.FAIL_ON_RBRACE
  960. elif this == "{":
  961. self._context |= contexts.FAIL_ON_LBRACE
  962. elif this == "}":
  963. self._context |= contexts.FAIL_ON_RBRACE
  964. return True
  965. def _parse(self, context=0, push=True):
  966. """Parse the wikicode string, using *context* for when to stop."""
  967. if push:
  968. self._push(context)
  969. while True:
  970. this = self._read()
  971. if self._context & contexts.UNSAFE:
  972. if not self._verify_safe(this):
  973. if self._context & contexts.DOUBLE:
  974. self._pop()
  975. self._fail_route()
  976. if this not in self.MARKERS:
  977. self._emit_text(this)
  978. self._head += 1
  979. continue
  980. if this is self.END:
  981. return self._handle_end()
  982. next = self._read(1)
  983. if this == next == "{":
  984. if self._can_recurse():
  985. self._parse_template_or_argument()
  986. else:
  987. self._emit_text("{")
  988. elif this == "|" and self._context & contexts.TEMPLATE:
  989. self._handle_template_param()
  990. elif this == "=" and self._context & contexts.TEMPLATE_PARAM_KEY:
  991. self._handle_template_param_value()
  992. elif this == next == "}" and self._context & contexts.TEMPLATE:
  993. return self._handle_template_end()
  994. elif this == "|" and self._context & contexts.ARGUMENT_NAME:
  995. self._handle_argument_separator()
  996. elif this == next == "}" and self._context & contexts.ARGUMENT:
  997. if self._read(2) == "}":
  998. return self._handle_argument_end()
  999. else:
  1000. self._emit_text("}")
  1001. elif this == next == "[" and self._can_recurse():
  1002. if not self._context & contexts.NO_WIKILINKS:
  1003. self._parse_wikilink()
  1004. else:
  1005. self._emit_text("[")
  1006. elif this == "|" and self._context & contexts.WIKILINK_TITLE:
  1007. self._handle_wikilink_separator()
  1008. elif this == next == "]" and self._context & contexts.WIKILINK:
  1009. return self._handle_wikilink_end()
  1010. elif this == "[":
  1011. self._parse_external_link(True)
  1012. elif this == ":" and self._read(-1) not in self.MARKERS:
  1013. self._parse_external_link(False)
  1014. elif this == "]" and self._context & contexts.EXT_LINK_TITLE:
  1015. return self._pop()
  1016. elif this == "=" and not self._global & contexts.GL_HEADING:
  1017. if self._read(-1) in ("\n", self.START):
  1018. self._parse_heading()
  1019. else:
  1020. self._emit_text("=")
  1021. elif this == "=" and self._context & contexts.HEADING:
  1022. return self._handle_heading_end()
  1023. elif this == "\n" and self._context & contexts.HEADING:
  1024. self._fail_route()
  1025. elif this == "&":
  1026. self._parse_entity()
  1027. elif this == "<" and next == "!":
  1028. if self._read(2) == self._read(3) == "-":
  1029. self._parse_comment()
  1030. else:
  1031. self._emit_text(this)
  1032. elif this == "<" and next == "/" and self._read(2) is not self.END:
  1033. if self._context & contexts.TAG_BODY:
  1034. self._handle_tag_open_close()
  1035. else:
  1036. self._handle_invalid_tag_start()
  1037. elif this == "<" and not self._context & contexts.TAG_CLOSE:
  1038. if self._can_recurse():
  1039. self._parse_tag()
  1040. else:
  1041. self._emit_text("<")
  1042. elif this == ">" and self._context & contexts.TAG_CLOSE:
  1043. return self._handle_tag_close_close()
  1044. elif this == next == "'" and not self._skip_style_tags:
  1045. result = self._parse_style()
  1046. if result is not None:
  1047. return result
  1048. elif self._read(-1) in ("\n", self.START):
  1049. if this in ("#", "*", ";", ":"):
  1050. self._handle_list()
  1051. elif this == next == self._read(2) == self._read(3) == "-":
  1052. self._handle_hr()
  1053. else:
  1054. self._emit_text(this)
  1055. elif this in ("\n", ":") and self._context & contexts.DL_TERM:
  1056. self._handle_dl_term()
  1057. else:
  1058. self._emit_text(this)
  1059. self._head += 1
  1060. def tokenize(self, text, context=0, skip_style_tags=False):
  1061. """Build a list of tokens from a string of wikicode and return it."""
  1062. self._skip_style_tags = skip_style_tags
  1063. split = self.regex.split(text)
  1064. self._text = [segment for segment in split if segment]
  1065. self._head = self._global = self._depth = self._cycles = 0
  1066. try:
  1067. return self._parse(context)
  1068. except BadRoute: # pragma: no cover (untestable/exceptional case)
  1069. raise ParserError("Python tokenizer exited with BadRoute")