A Python parser for MediaWiki wikicode https://mwparserfromhell.readthedocs.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

794 lines
30 KiB

  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2012-2013 Ben Kurtovic <ben.kurtovic@verizon.net>
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in
  13. # all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. from __future__ import unicode_literals
  23. from math import log
  24. import re
  25. from . import contexts, tokens
  26. from ..compat import htmlentities
  27. from ..tag_defs import is_parsable, is_single, is_single_only
  28. __all__ = ["Tokenizer"]
  29. class BadRoute(Exception):
  30. """Raised internally when the current tokenization route is invalid."""
  31. pass
  32. class _TagOpenData(object):
  33. """Stores data about an HTML open tag, like ``<ref name="foo">``."""
  34. CX_NAME = 1 << 0
  35. CX_ATTR_READY = 1 << 1
  36. CX_ATTR_NAME = 1 << 2
  37. CX_ATTR_VALUE = 1 << 3
  38. CX_QUOTED = 1 << 4
  39. CX_NOTE_SPACE = 1 << 5
  40. CX_NOTE_EQUALS = 1 << 6
  41. CX_NOTE_QUOTE = 1 << 7
  42. def __init__(self):
  43. self.context = self.CX_NAME
  44. self.padding_buffer = {"first": "", "before_eq": "", "after_eq": ""}
  45. self.reset = 0
  46. class Tokenizer(object):
  47. """Creates a list of tokens from a string of wikicode."""
  48. USES_C = False
  49. START = object()
  50. END = object()
  51. MARKERS = ["{", "}", "[", "]", "<", ">", "|", "=", "&", "#", "*", ";", ":",
  52. "/", "-", "\n", END]
  53. MAX_DEPTH = 40
  54. MAX_CYCLES = 100000
  55. regex = re.compile(r"([{}\[\]<>|=&#*;:/\\\"\-!\n])", flags=re.IGNORECASE)
  56. tag_splitter = re.compile(r"([\s\"\\]+)")
  57. def __init__(self):
  58. self._text = None
  59. self._head = 0
  60. self._stacks = []
  61. self._global = 0
  62. self._depth = 0
  63. self._cycles = 0
  64. @property
  65. def _stack(self):
  66. """The current token stack."""
  67. return self._stacks[-1][0]
  68. @property
  69. def _context(self):
  70. """The current token context."""
  71. return self._stacks[-1][1]
  72. @_context.setter
  73. def _context(self, value):
  74. self._stacks[-1][1] = value
  75. @property
  76. def _textbuffer(self):
  77. """The current textbuffer."""
  78. return self._stacks[-1][2]
  79. @_textbuffer.setter
  80. def _textbuffer(self, value):
  81. self._stacks[-1][2] = value
  82. def _push(self, context=0):
  83. """Add a new token stack, context, and textbuffer to the list."""
  84. self._stacks.append([[], context, []])
  85. self._depth += 1
  86. self._cycles += 1
  87. def _push_textbuffer(self):
  88. """Push the textbuffer onto the stack as a Text node and clear it."""
  89. if self._textbuffer:
  90. self._stack.append(tokens.Text(text="".join(self._textbuffer)))
  91. self._textbuffer = []
  92. def _pop(self, keep_context=False):
  93. """Pop the current stack/context/textbuffer, returing the stack.
  94. If *keep_context* is ``True``, then we will replace the underlying
  95. stack's context with the current stack's.
  96. """
  97. self._push_textbuffer()
  98. self._depth -= 1
  99. if keep_context:
  100. context = self._context
  101. stack = self._stacks.pop()[0]
  102. self._context = context
  103. return stack
  104. return self._stacks.pop()[0]
  105. def _can_recurse(self):
  106. """Return whether or not our max recursion depth has been exceeded."""
  107. return self._depth < self.MAX_DEPTH and self._cycles < self.MAX_CYCLES
  108. def _fail_route(self):
  109. """Fail the current tokenization route.
  110. Discards the current stack/context/textbuffer and raises
  111. :py:exc:`~.BadRoute`.
  112. """
  113. self._pop()
  114. raise BadRoute()
  115. def _emit(self, token):
  116. """Write a token to the end of the current token stack."""
  117. self._push_textbuffer()
  118. self._stack.append(token)
  119. def _emit_first(self, token):
  120. """Write a token to the beginning of the current token stack."""
  121. self._push_textbuffer()
  122. self._stack.insert(0, token)
  123. def _emit_text(self, text):
  124. """Write text to the current textbuffer."""
  125. self._textbuffer.append(text)
  126. def _emit_all(self, tokenlist):
  127. """Write a series of tokens to the current stack at once."""
  128. if tokenlist and isinstance(tokenlist[0], tokens.Text):
  129. self._emit_text(tokenlist.pop(0).text)
  130. self._push_textbuffer()
  131. self._stack.extend(tokenlist)
  132. def _emit_text_then_stack(self, text):
  133. """Pop the current stack, write *text*, and then write the stack."""
  134. stack = self._pop()
  135. self._emit_text(text)
  136. if stack:
  137. self._emit_all(stack)
  138. self._head -= 1
  139. def _read(self, delta=0, wrap=False, strict=False):
  140. """Read the value at a relative point in the wikicode.
  141. The value is read from :py:attr:`self._head <_head>` plus the value of
  142. *delta* (which can be negative). If *wrap* is ``False``, we will not
  143. allow attempts to read from the end of the string if ``self._head +
  144. delta`` is negative. If *strict* is ``True``, the route will be failed
  145. (with :py:meth:`_fail_route`) if we try to read from past the end of
  146. the string; otherwise, :py:attr:`self.END <END>` is returned. If we try
  147. to read from before the start of the string, :py:attr:`self.START
  148. <START>` is returned.
  149. """
  150. index = self._head + delta
  151. if index < 0 and (not wrap or abs(index) > len(self._text)):
  152. return self.START
  153. try:
  154. return self._text[index]
  155. except IndexError:
  156. if strict:
  157. self._fail_route()
  158. return self.END
  159. def _parse_template(self):
  160. """Parse a template at the head of the wikicode string."""
  161. reset = self._head
  162. try:
  163. template = self._parse(contexts.TEMPLATE_NAME)
  164. except BadRoute:
  165. self._head = reset
  166. raise
  167. self._emit_first(tokens.TemplateOpen())
  168. self._emit_all(template)
  169. self._emit(tokens.TemplateClose())
  170. def _parse_argument(self):
  171. """Parse an argument at the head of the wikicode string."""
  172. reset = self._head
  173. try:
  174. argument = self._parse(contexts.ARGUMENT_NAME)
  175. except BadRoute:
  176. self._head = reset
  177. raise
  178. self._emit_first(tokens.ArgumentOpen())
  179. self._emit_all(argument)
  180. self._emit(tokens.ArgumentClose())
  181. def _parse_template_or_argument(self):
  182. """Parse a template or argument at the head of the wikicode string."""
  183. self._head += 2
  184. braces = 2
  185. while self._read() == "{":
  186. self._head += 1
  187. braces += 1
  188. self._push()
  189. while braces:
  190. if braces == 1:
  191. return self._emit_text_then_stack("{")
  192. if braces == 2:
  193. try:
  194. self._parse_template()
  195. except BadRoute:
  196. return self._emit_text_then_stack("{{")
  197. break
  198. try:
  199. self._parse_argument()
  200. braces -= 3
  201. except BadRoute:
  202. try:
  203. self._parse_template()
  204. braces -= 2
  205. except BadRoute:
  206. return self._emit_text_then_stack("{" * braces)
  207. if braces:
  208. self._head += 1
  209. self._emit_all(self._pop())
  210. if self._context & contexts.FAIL_NEXT:
  211. self._context ^= contexts.FAIL_NEXT
  212. def _handle_template_param(self):
  213. """Handle a template parameter at the head of the string."""
  214. if self._context & contexts.TEMPLATE_NAME:
  215. self._context ^= contexts.TEMPLATE_NAME
  216. elif self._context & contexts.TEMPLATE_PARAM_VALUE:
  217. self._context ^= contexts.TEMPLATE_PARAM_VALUE
  218. elif self._context & contexts.TEMPLATE_PARAM_KEY:
  219. self._emit_all(self._pop(keep_context=True))
  220. self._context |= contexts.TEMPLATE_PARAM_KEY
  221. self._emit(tokens.TemplateParamSeparator())
  222. self._push(self._context)
  223. def _handle_template_param_value(self):
  224. """Handle a template parameter's value at the head of the string."""
  225. self._emit_all(self._pop(keep_context=True))
  226. self._context ^= contexts.TEMPLATE_PARAM_KEY
  227. self._context |= contexts.TEMPLATE_PARAM_VALUE
  228. self._emit(tokens.TemplateParamEquals())
  229. def _handle_template_end(self):
  230. """Handle the end of a template at the head of the string."""
  231. if self._context & contexts.TEMPLATE_PARAM_KEY:
  232. self._emit_all(self._pop(keep_context=True))
  233. self._head += 1
  234. return self._pop()
  235. def _handle_argument_separator(self):
  236. """Handle the separator between an argument's name and default."""
  237. self._context ^= contexts.ARGUMENT_NAME
  238. self._context |= contexts.ARGUMENT_DEFAULT
  239. self._emit(tokens.ArgumentSeparator())
  240. def _handle_argument_end(self):
  241. """Handle the end of an argument at the head of the string."""
  242. self._head += 2
  243. return self._pop()
  244. def _parse_wikilink(self):
  245. """Parse an internal wikilink at the head of the wikicode string."""
  246. self._head += 2
  247. reset = self._head - 1
  248. try:
  249. wikilink = self._parse(contexts.WIKILINK_TITLE)
  250. except BadRoute:
  251. self._head = reset
  252. self._emit_text("[[")
  253. else:
  254. if self._context & contexts.FAIL_NEXT:
  255. self._context ^= contexts.FAIL_NEXT
  256. self._emit(tokens.WikilinkOpen())
  257. self._emit_all(wikilink)
  258. self._emit(tokens.WikilinkClose())
  259. def _handle_wikilink_separator(self):
  260. """Handle the separator between a wikilink's title and its text."""
  261. self._context ^= contexts.WIKILINK_TITLE
  262. self._context |= contexts.WIKILINK_TEXT
  263. self._emit(tokens.WikilinkSeparator())
  264. def _handle_wikilink_end(self):
  265. """Handle the end of a wikilink at the head of the string."""
  266. self._head += 1
  267. return self._pop()
  268. def _parse_heading(self):
  269. """Parse a section heading at the head of the wikicode string."""
  270. self._global |= contexts.GL_HEADING
  271. reset = self._head
  272. self._head += 1
  273. best = 1
  274. while self._read() == "=":
  275. best += 1
  276. self._head += 1
  277. context = contexts.HEADING_LEVEL_1 << min(best - 1, 5)
  278. try:
  279. title, level = self._parse(context)
  280. except BadRoute:
  281. self._head = reset + best - 1
  282. self._emit_text("=" * best)
  283. else:
  284. self._emit(tokens.HeadingStart(level=level))
  285. if level < best:
  286. self._emit_text("=" * (best - level))
  287. self._emit_all(title)
  288. self._emit(tokens.HeadingEnd())
  289. finally:
  290. self._global ^= contexts.GL_HEADING
  291. def _handle_heading_end(self):
  292. """Handle the end of a section heading at the head of the string."""
  293. reset = self._head
  294. self._head += 1
  295. best = 1
  296. while self._read() == "=":
  297. best += 1
  298. self._head += 1
  299. current = int(log(self._context / contexts.HEADING_LEVEL_1, 2)) + 1
  300. level = min(current, min(best, 6))
  301. try: # Try to check for a heading closure after this one
  302. after, after_level = self._parse(self._context)
  303. except BadRoute:
  304. if level < best:
  305. self._emit_text("=" * (best - level))
  306. self._head = reset + best - 1
  307. return self._pop(), level
  308. else: # Found another closure
  309. self._emit_text("=" * best)
  310. self._emit_all(after)
  311. return self._pop(), after_level
  312. def _really_parse_entity(self):
  313. """Actually parse an HTML entity and ensure that it is valid."""
  314. self._emit(tokens.HTMLEntityStart())
  315. self._head += 1
  316. this = self._read(strict=True)
  317. if this == "#":
  318. numeric = True
  319. self._emit(tokens.HTMLEntityNumeric())
  320. self._head += 1
  321. this = self._read(strict=True)
  322. if this[0].lower() == "x":
  323. hexadecimal = True
  324. self._emit(tokens.HTMLEntityHex(char=this[0]))
  325. this = this[1:]
  326. if not this:
  327. self._fail_route()
  328. else:
  329. hexadecimal = False
  330. else:
  331. numeric = hexadecimal = False
  332. valid = "0123456789abcdefABCDEF" if hexadecimal else "0123456789"
  333. if not numeric and not hexadecimal:
  334. valid += "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
  335. if not all([char in valid for char in this]):
  336. self._fail_route()
  337. self._head += 1
  338. if self._read() != ";":
  339. self._fail_route()
  340. if numeric:
  341. test = int(this, 16) if hexadecimal else int(this)
  342. if test < 1 or test > 0x10FFFF:
  343. self._fail_route()
  344. else:
  345. if this not in htmlentities.entitydefs:
  346. self._fail_route()
  347. self._emit(tokens.Text(text=this))
  348. self._emit(tokens.HTMLEntityEnd())
  349. def _parse_entity(self):
  350. """Parse an HTML entity at the head of the wikicode string."""
  351. reset = self._head
  352. self._push()
  353. try:
  354. self._really_parse_entity()
  355. except BadRoute:
  356. self._head = reset
  357. self._emit_text(self._read())
  358. else:
  359. self._emit_all(self._pop())
  360. def _parse_comment(self):
  361. """Parse an HTML comment at the head of the wikicode string."""
  362. self._head += 4
  363. reset = self._head - 1
  364. try:
  365. comment = self._parse(contexts.COMMENT)
  366. except BadRoute:
  367. self._head = reset
  368. self._emit_text("<!--")
  369. else:
  370. self._emit(tokens.CommentStart())
  371. self._emit_all(comment)
  372. self._emit(tokens.CommentEnd())
  373. self._head += 2
  374. def _push_tag_buffer(self, data):
  375. """Write a pending tag attribute from *data* to the stack."""
  376. if data.context & data.CX_QUOTED:
  377. self._emit_first(tokens.TagAttrQuote())
  378. self._emit_all(self._pop())
  379. buf = data.padding_buffer
  380. self._emit_first(tokens.TagAttrStart(pad_first=buf["first"],
  381. pad_before_eq=buf["before_eq"], pad_after_eq=buf["after_eq"]))
  382. self._emit_all(self._pop())
  383. data.padding_buffer = {key: "" for key in data.padding_buffer}
  384. def _handle_tag_space(self, data, text):
  385. """Handle whitespace (*text*) inside of an HTML open tag."""
  386. ctx = data.context
  387. end_of_value = ctx & data.CX_ATTR_VALUE and not ctx & (data.CX_QUOTED | data.CX_NOTE_QUOTE)
  388. if end_of_value or (ctx & data.CX_QUOTED and ctx & data.CX_NOTE_SPACE):
  389. self._push_tag_buffer(data)
  390. data.context = data.CX_ATTR_READY
  391. elif ctx & data.CX_NOTE_SPACE:
  392. data.context = data.CX_ATTR_READY
  393. elif ctx & data.CX_ATTR_NAME:
  394. data.context |= data.CX_NOTE_EQUALS
  395. data.padding_buffer["before_eq"] += text
  396. if ctx & data.CX_QUOTED and not ctx & data.CX_NOTE_SPACE:
  397. self._emit_text(text)
  398. elif data.context & data.CX_ATTR_READY:
  399. data.padding_buffer["first"] += text
  400. elif data.context & data.CX_ATTR_VALUE:
  401. data.padding_buffer["after_eq"] += text
  402. def _handle_tag_text(self, text):
  403. """Handle regular *text* inside of an HTML open tag."""
  404. next = self._read(1)
  405. if not self._can_recurse() or text not in self.MARKERS:
  406. self._emit_text(text)
  407. elif text == next == "{":
  408. self._parse_template_or_argument()
  409. elif text == next == "[":
  410. self._parse_wikilink()
  411. elif text == "<":
  412. self._parse_tag()
  413. else:
  414. self._emit_text(text)
  415. def _handle_tag_data(self, data, text):
  416. """Handle all sorts of *text* data inside of an HTML open tag."""
  417. for chunk in self.tag_splitter.split(text):
  418. if not chunk:
  419. continue
  420. if data.context & data.CX_NAME:
  421. if chunk in self.MARKERS or chunk.isspace():
  422. self._fail_route() # Tags must start with text, not spaces
  423. data.context = data.CX_NOTE_SPACE
  424. elif chunk.isspace():
  425. self._handle_tag_space(data, chunk)
  426. continue
  427. elif data.context & data.CX_NOTE_SPACE:
  428. if data.context & data.CX_QUOTED:
  429. data.context = data.CX_ATTR_VALUE
  430. self._pop()
  431. self._head = data.reset - 1 # Will be auto-incremented
  432. return # Break early
  433. self._fail_route()
  434. elif data.context & data.CX_ATTR_READY:
  435. data.context = data.CX_ATTR_NAME
  436. self._push(contexts.TAG_ATTR)
  437. elif data.context & data.CX_ATTR_NAME:
  438. if chunk == "=":
  439. data.context = data.CX_ATTR_VALUE | data.CX_NOTE_QUOTE
  440. self._emit(tokens.TagAttrEquals())
  441. continue
  442. if data.context & data.CX_NOTE_EQUALS:
  443. self._push_tag_buffer(data)
  444. data.context = data.CX_ATTR_NAME
  445. self._push(contexts.TAG_ATTR)
  446. elif data.context & data.CX_ATTR_VALUE:
  447. escaped = self._read(-1) == "\\" and self._read(-2) != "\\"
  448. if data.context & data.CX_NOTE_QUOTE:
  449. data.context ^= data.CX_NOTE_QUOTE
  450. if chunk == '"' and not escaped:
  451. data.context |= data.CX_QUOTED
  452. self._push(self._context)
  453. data.reset = self._head
  454. continue
  455. elif data.context & data.CX_QUOTED:
  456. if chunk == '"' and not escaped:
  457. data.context |= data.CX_NOTE_SPACE
  458. continue
  459. self._handle_tag_text(chunk)
  460. def _handle_tag_close_open(self, data, token):
  461. """Handle the closing of a open tag (``<foo>``)."""
  462. if data.context & (data.CX_ATTR_NAME | data.CX_ATTR_VALUE):
  463. self._push_tag_buffer(data)
  464. self._emit(token(padding=data.padding_buffer["first"]))
  465. self._head += 1
  466. def _handle_tag_open_close(self):
  467. """Handle the opening of a closing tag (``</foo>``)."""
  468. self._emit(tokens.TagOpenClose())
  469. self._push(contexts.TAG_CLOSE)
  470. self._head += 1
  471. def _handle_tag_close_close(self):
  472. """Handle the ending of a closing tag (``</foo>``)."""
  473. strip = lambda tok: tok.text.rstrip().lower()
  474. closing = self._pop()
  475. if len(closing) != 1 or (not isinstance(closing[0], tokens.Text) or
  476. strip(closing[0]) != strip(self._stack[1])):
  477. self._fail_route()
  478. self._emit_all(closing)
  479. self._emit(tokens.TagCloseClose())
  480. return self._pop()
  481. def _handle_blacklisted_tag(self):
  482. """Handle the body of an HTML tag that is parser-blacklisted."""
  483. while True:
  484. this, next = self._read(), self._read(1)
  485. self._head += 1
  486. if this is self.END:
  487. self._fail_route()
  488. elif this == "<" and next == "/":
  489. self._handle_tag_open_close()
  490. return self._parse(push=False)
  491. else:
  492. self._emit_text(this)
  493. def _handle_single_only_tag_end(self):
  494. """Handle the end of an implicitly closing single-only HTML tag."""
  495. padding = self._stack.pop().padding
  496. self._emit(tokens.TagCloseSelfclose(padding=padding, implicit=True))
  497. self._head -= 1 # Offset displacement done by _handle_tag_close_open
  498. return self._pop()
  499. def _handle_single_tag_end(self):
  500. """Handle the stream end when inside a single-supporting HTML tag."""
  501. gen = enumerate(self._stack)
  502. index = next(i for i, t in gen if isinstance(t, tokens.TagCloseOpen))
  503. padding = self._stack[index].padding
  504. token = tokens.TagCloseSelfclose(padding=padding, implicit=True)
  505. self._stack[index] = token
  506. return self._pop()
  507. def _really_parse_tag(self):
  508. """Actually parse an HTML tag, starting with the open (``<foo>``)."""
  509. data = _TagOpenData()
  510. self._push(contexts.TAG_OPEN)
  511. self._emit(tokens.TagOpenOpen())
  512. while True:
  513. this, next = self._read(), self._read(1)
  514. can_exit = (not data.context & (data.CX_QUOTED | data.CX_NAME) or
  515. data.context & data.CX_NOTE_SPACE)
  516. if this is self.END:
  517. if self._context & contexts.TAG_ATTR:
  518. if data.context & data.CX_QUOTED:
  519. # Unclosed attribute quote: reset, don't die
  520. data.context = data.CX_ATTR_VALUE
  521. self._pop()
  522. self._head = data.reset
  523. continue
  524. self._pop()
  525. self._fail_route()
  526. elif this == ">" and can_exit:
  527. self._handle_tag_close_open(data, tokens.TagCloseOpen)
  528. self._context = contexts.TAG_BODY
  529. if is_single_only(self._stack[1].text):
  530. return self._handle_single_only_tag_end()
  531. if is_parsable(self._stack[1].text):
  532. return self._parse(push=False)
  533. return self._handle_blacklisted_tag()
  534. elif this == "/" and next == ">" and can_exit:
  535. self._handle_tag_close_open(data, tokens.TagCloseSelfclose)
  536. return self._pop()
  537. else:
  538. self._handle_tag_data(data, this)
  539. self._head += 1
  540. def _handle_invalid_tag_start(self):
  541. """Handle the (possible) start of an implicitly closing single tag."""
  542. reset = self._head + 1
  543. self._head += 2
  544. try:
  545. if not is_single_only(self.tag_splitter.split(self._read())[0]):
  546. raise BadRoute()
  547. tag = self._really_parse_tag()
  548. except BadRoute:
  549. self._head = reset
  550. self._emit_text("</")
  551. else:
  552. tag[0].invalid = True # Set flag of TagOpenOpen
  553. self._emit_all(tag)
  554. def _parse_tag(self):
  555. """Parse an HTML tag at the head of the wikicode string."""
  556. reset = self._head
  557. self._head += 1
  558. try:
  559. tag = self._really_parse_tag()
  560. except BadRoute:
  561. self._head = reset
  562. self._emit_text("<")
  563. else:
  564. self._emit_all(tag)
  565. def _handle_end(self):
  566. """Handle the end of the stream of wikitext."""
  567. fail = (contexts.TEMPLATE | contexts.ARGUMENT | contexts.WIKILINK |
  568. contexts.HEADING | contexts.COMMENT | contexts.TAG)
  569. double_fail = (contexts.TEMPLATE_PARAM_KEY | contexts.TAG_CLOSE)
  570. if self._context & fail:
  571. if self._context & contexts.TAG_BODY:
  572. if is_single(self._stack[1].text):
  573. return self._handle_single_tag_end()
  574. if self._context & double_fail:
  575. self._pop()
  576. self._fail_route()
  577. return self._pop()
  578. def _verify_safe(self, this):
  579. """Make sure we are not trying to write an invalid character."""
  580. context = self._context
  581. if context & contexts.FAIL_NEXT:
  582. return False
  583. if context & contexts.WIKILINK_TITLE:
  584. if this == "]" or this == "{":
  585. self._context |= contexts.FAIL_NEXT
  586. elif this == "\n" or this == "[" or this == "}":
  587. return False
  588. return True
  589. elif context & contexts.TEMPLATE_NAME:
  590. if this == "{" or this == "}" or this == "[":
  591. self._context |= contexts.FAIL_NEXT
  592. return True
  593. if this == "]":
  594. return False
  595. if this == "|":
  596. return True
  597. if context & contexts.HAS_TEXT:
  598. if context & contexts.FAIL_ON_TEXT:
  599. if this is self.END or not this.isspace():
  600. return False
  601. else:
  602. if this == "\n":
  603. self._context |= contexts.FAIL_ON_TEXT
  604. elif this is self.END or not this.isspace():
  605. self._context |= contexts.HAS_TEXT
  606. return True
  607. elif context & contexts.TAG_CLOSE:
  608. return this != "<"
  609. else:
  610. if context & contexts.FAIL_ON_EQUALS:
  611. if this == "=":
  612. return False
  613. elif context & contexts.FAIL_ON_LBRACE:
  614. if this == "{" or (self._read(-1) == self._read(-2) == "{"):
  615. if context & contexts.TEMPLATE:
  616. self._context |= contexts.FAIL_ON_EQUALS
  617. else:
  618. self._context |= contexts.FAIL_NEXT
  619. return True
  620. self._context ^= contexts.FAIL_ON_LBRACE
  621. elif context & contexts.FAIL_ON_RBRACE:
  622. if this == "}":
  623. if context & contexts.TEMPLATE:
  624. self._context |= contexts.FAIL_ON_EQUALS
  625. else:
  626. self._context |= contexts.FAIL_NEXT
  627. return True
  628. self._context ^= contexts.FAIL_ON_RBRACE
  629. elif this == "{":
  630. self._context |= contexts.FAIL_ON_LBRACE
  631. elif this == "}":
  632. self._context |= contexts.FAIL_ON_RBRACE
  633. return True
  634. def _parse(self, context=0, push=True):
  635. """Parse the wikicode string, using *context* for when to stop."""
  636. unsafe = (contexts.TEMPLATE_NAME | contexts.WIKILINK_TITLE |
  637. contexts.TEMPLATE_PARAM_KEY | contexts.ARGUMENT_NAME |
  638. contexts.TAG_CLOSE)
  639. double_unsafe = (contexts.TEMPLATE_PARAM_KEY | contexts.TAG_CLOSE)
  640. if push:
  641. self._push(context)
  642. while True:
  643. this = self._read()
  644. if self._context & unsafe:
  645. if not self._verify_safe(this):
  646. if self._context & double_unsafe:
  647. self._pop()
  648. self._fail_route()
  649. if this not in self.MARKERS:
  650. self._emit_text(this)
  651. self._head += 1
  652. continue
  653. if this is self.END:
  654. return self._handle_end()
  655. next = self._read(1)
  656. if self._context & contexts.COMMENT:
  657. if this == next == "-" and self._read(2) == ">":
  658. return self._pop()
  659. else:
  660. self._emit_text(this)
  661. elif this == next == "{":
  662. if self._can_recurse():
  663. self._parse_template_or_argument()
  664. else:
  665. self._emit_text("{")
  666. elif this == "|" and self._context & contexts.TEMPLATE:
  667. self._handle_template_param()
  668. elif this == "=" and self._context & contexts.TEMPLATE_PARAM_KEY:
  669. self._handle_template_param_value()
  670. elif this == next == "}" and self._context & contexts.TEMPLATE:
  671. return self._handle_template_end()
  672. elif this == "|" and self._context & contexts.ARGUMENT_NAME:
  673. self._handle_argument_separator()
  674. elif this == next == "}" and self._context & contexts.ARGUMENT:
  675. if self._read(2) == "}":
  676. return self._handle_argument_end()
  677. else:
  678. self._emit_text("}")
  679. elif this == next == "[":
  680. if not self._context & contexts.WIKILINK_TITLE and self._can_recurse():
  681. self._parse_wikilink()
  682. else:
  683. self._emit_text("[")
  684. elif this == "|" and self._context & contexts.WIKILINK_TITLE:
  685. self._handle_wikilink_separator()
  686. elif this == next == "]" and self._context & contexts.WIKILINK:
  687. return self._handle_wikilink_end()
  688. elif this == "=" and not self._global & contexts.GL_HEADING:
  689. if self._read(-1) in ("\n", self.START):
  690. self._parse_heading()
  691. else:
  692. self._emit_text("=")
  693. elif this == "=" and self._context & contexts.HEADING:
  694. return self._handle_heading_end()
  695. elif this == "\n" and self._context & contexts.HEADING:
  696. self._fail_route()
  697. elif this == "&":
  698. self._parse_entity()
  699. elif this == "<" and next == "!":
  700. if self._read(2) == self._read(3) == "-":
  701. self._parse_comment()
  702. else:
  703. self._emit_text(this)
  704. elif this == "<" and next == "/" and self._read(2) is not self.END:
  705. if self._context & contexts.TAG_BODY:
  706. self._handle_tag_open_close()
  707. else:
  708. self._handle_invalid_tag_start()
  709. elif this == "<":
  710. if not self._context & contexts.TAG_CLOSE and self._can_recurse():
  711. self._parse_tag()
  712. else:
  713. self._emit_text("<")
  714. elif this == ">" and self._context & contexts.TAG_CLOSE:
  715. return self._handle_tag_close_close()
  716. else:
  717. self._emit_text(this)
  718. self._head += 1
  719. def tokenize(self, text):
  720. """Build a list of tokens from a string of wikicode and return it."""
  721. split = self.regex.split(text)
  722. self._text = [segment for segment in split if segment]
  723. return self._parse()