A Python parser for MediaWiki wikicode https://mwparserfromhell.readthedocs.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

995 lines
38 KiB

  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2012-2013 Ben Kurtovic <ben.kurtovic@verizon.net>
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in
  13. # all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. from __future__ import unicode_literals
  23. from math import log
  24. import re
  25. from . import contexts, tokens
  26. from ..compat import htmlentities
  27. from ..definitions import get_html_tag, is_parsable, is_single, is_single_only
  28. __all__ = ["Tokenizer"]
  29. class BadRoute(Exception):
  30. """Raised internally when the current tokenization route is invalid."""
  31. def __init__(self, context=0):
  32. self.context = context
  33. class _TagOpenData(object):
  34. """Stores data about an HTML open tag, like ``<ref name="foo">``."""
  35. CX_NAME = 1 << 0
  36. CX_ATTR_READY = 1 << 1
  37. CX_ATTR_NAME = 1 << 2
  38. CX_ATTR_VALUE = 1 << 3
  39. CX_QUOTED = 1 << 4
  40. CX_NOTE_SPACE = 1 << 5
  41. CX_NOTE_EQUALS = 1 << 6
  42. CX_NOTE_QUOTE = 1 << 7
  43. def __init__(self):
  44. self.context = self.CX_NAME
  45. self.padding_buffer = {"first": "", "before_eq": "", "after_eq": ""}
  46. self.reset = 0
  47. class Tokenizer(object):
  48. """Creates a list of tokens from a string of wikicode."""
  49. USES_C = False
  50. START = object()
  51. END = object()
  52. MARKERS = ["{", "}", "[", "]", "<", ">", "|", "=", "&", "'", "#", "*", ";",
  53. ":", "/", "-", "\n", START, END]
  54. MAX_DEPTH = 40
  55. MAX_CYCLES = 100000
  56. regex = re.compile(r"([{}\[\]<>|=&'#*;:/\\\"\-!\n])", flags=re.IGNORECASE)
  57. tag_splitter = re.compile(r"([\s\"\\]+)")
  58. def __init__(self):
  59. self._text = None
  60. self._head = 0
  61. self._stacks = []
  62. self._global = 0
  63. self._depth = 0
  64. self._cycles = 0
  65. @property
  66. def _stack(self):
  67. """The current token stack."""
  68. return self._stacks[-1][0]
  69. @property
  70. def _context(self):
  71. """The current token context."""
  72. return self._stacks[-1][1]
  73. @_context.setter
  74. def _context(self, value):
  75. self._stacks[-1][1] = value
  76. @property
  77. def _textbuffer(self):
  78. """The current textbuffer."""
  79. return self._stacks[-1][2]
  80. @_textbuffer.setter
  81. def _textbuffer(self, value):
  82. self._stacks[-1][2] = value
  83. def _push(self, context=0):
  84. """Add a new token stack, context, and textbuffer to the list."""
  85. self._stacks.append([[], context, []])
  86. self._depth += 1
  87. self._cycles += 1
  88. def _push_textbuffer(self):
  89. """Push the textbuffer onto the stack as a Text node and clear it."""
  90. if self._textbuffer:
  91. self._stack.append(tokens.Text(text="".join(self._textbuffer)))
  92. self._textbuffer = []
  93. def _pop(self, keep_context=False):
  94. """Pop the current stack/context/textbuffer, returing the stack.
  95. If *keep_context* is ``True``, then we will replace the underlying
  96. stack's context with the current stack's.
  97. """
  98. self._push_textbuffer()
  99. self._depth -= 1
  100. if keep_context:
  101. context = self._context
  102. stack = self._stacks.pop()[0]
  103. self._context = context
  104. return stack
  105. return self._stacks.pop()[0]
  106. def _can_recurse(self):
  107. """Return whether or not our max recursion depth has been exceeded."""
  108. return self._depth < self.MAX_DEPTH and self._cycles < self.MAX_CYCLES
  109. def _fail_route(self):
  110. """Fail the current tokenization route.
  111. Discards the current stack/context/textbuffer and raises
  112. :py:exc:`~.BadRoute`.
  113. """
  114. context = self._context
  115. self._pop()
  116. raise BadRoute(context)
  117. def _emit(self, token):
  118. """Write a token to the end of the current token stack."""
  119. self._push_textbuffer()
  120. self._stack.append(token)
  121. def _emit_first(self, token):
  122. """Write a token to the beginning of the current token stack."""
  123. self._push_textbuffer()
  124. self._stack.insert(0, token)
  125. def _emit_text(self, text):
  126. """Write text to the current textbuffer."""
  127. self._textbuffer.append(text)
  128. def _emit_all(self, tokenlist):
  129. """Write a series of tokens to the current stack at once."""
  130. if tokenlist and isinstance(tokenlist[0], tokens.Text):
  131. self._emit_text(tokenlist.pop(0).text)
  132. self._push_textbuffer()
  133. self._stack.extend(tokenlist)
  134. def _emit_text_then_stack(self, text):
  135. """Pop the current stack, write *text*, and then write the stack."""
  136. stack = self._pop()
  137. self._emit_text(text)
  138. if stack:
  139. self._emit_all(stack)
  140. self._head -= 1
  141. def _read(self, delta=0, wrap=False, strict=False):
  142. """Read the value at a relative point in the wikicode.
  143. The value is read from :py:attr:`self._head <_head>` plus the value of
  144. *delta* (which can be negative). If *wrap* is ``False``, we will not
  145. allow attempts to read from the end of the string if ``self._head +
  146. delta`` is negative. If *strict* is ``True``, the route will be failed
  147. (with :py:meth:`_fail_route`) if we try to read from past the end of
  148. the string; otherwise, :py:attr:`self.END <END>` is returned. If we try
  149. to read from before the start of the string, :py:attr:`self.START
  150. <START>` is returned.
  151. """
  152. index = self._head + delta
  153. if index < 0 and (not wrap or abs(index) > len(self._text)):
  154. return self.START
  155. try:
  156. return self._text[index]
  157. except IndexError:
  158. if strict:
  159. self._fail_route()
  160. return self.END
  161. def _parse_template(self):
  162. """Parse a template at the head of the wikicode string."""
  163. reset = self._head
  164. try:
  165. template = self._parse(contexts.TEMPLATE_NAME)
  166. except BadRoute:
  167. self._head = reset
  168. raise
  169. self._emit_first(tokens.TemplateOpen())
  170. self._emit_all(template)
  171. self._emit(tokens.TemplateClose())
  172. def _parse_argument(self):
  173. """Parse an argument at the head of the wikicode string."""
  174. reset = self._head
  175. try:
  176. argument = self._parse(contexts.ARGUMENT_NAME)
  177. except BadRoute:
  178. self._head = reset
  179. raise
  180. self._emit_first(tokens.ArgumentOpen())
  181. self._emit_all(argument)
  182. self._emit(tokens.ArgumentClose())
  183. def _parse_template_or_argument(self):
  184. """Parse a template or argument at the head of the wikicode string."""
  185. self._head += 2
  186. braces = 2
  187. while self._read() == "{":
  188. self._head += 1
  189. braces += 1
  190. self._push()
  191. while braces:
  192. if braces == 1:
  193. return self._emit_text_then_stack("{")
  194. if braces == 2:
  195. try:
  196. self._parse_template()
  197. except BadRoute:
  198. return self._emit_text_then_stack("{{")
  199. break
  200. try:
  201. self._parse_argument()
  202. braces -= 3
  203. except BadRoute:
  204. try:
  205. self._parse_template()
  206. braces -= 2
  207. except BadRoute:
  208. return self._emit_text_then_stack("{" * braces)
  209. if braces:
  210. self._head += 1
  211. self._emit_all(self._pop())
  212. if self._context & contexts.FAIL_NEXT:
  213. self._context ^= contexts.FAIL_NEXT
  214. def _handle_template_param(self):
  215. """Handle a template parameter at the head of the string."""
  216. if self._context & contexts.TEMPLATE_NAME:
  217. self._context ^= contexts.TEMPLATE_NAME
  218. elif self._context & contexts.TEMPLATE_PARAM_VALUE:
  219. self._context ^= contexts.TEMPLATE_PARAM_VALUE
  220. elif self._context & contexts.TEMPLATE_PARAM_KEY:
  221. self._emit_all(self._pop(keep_context=True))
  222. self._context |= contexts.TEMPLATE_PARAM_KEY
  223. self._emit(tokens.TemplateParamSeparator())
  224. self._push(self._context)
  225. def _handle_template_param_value(self):
  226. """Handle a template parameter's value at the head of the string."""
  227. self._emit_all(self._pop(keep_context=True))
  228. self._context ^= contexts.TEMPLATE_PARAM_KEY
  229. self._context |= contexts.TEMPLATE_PARAM_VALUE
  230. self._emit(tokens.TemplateParamEquals())
  231. def _handle_template_end(self):
  232. """Handle the end of a template at the head of the string."""
  233. if self._context & contexts.TEMPLATE_PARAM_KEY:
  234. self._emit_all(self._pop(keep_context=True))
  235. self._head += 1
  236. return self._pop()
  237. def _handle_argument_separator(self):
  238. """Handle the separator between an argument's name and default."""
  239. self._context ^= contexts.ARGUMENT_NAME
  240. self._context |= contexts.ARGUMENT_DEFAULT
  241. self._emit(tokens.ArgumentSeparator())
  242. def _handle_argument_end(self):
  243. """Handle the end of an argument at the head of the string."""
  244. self._head += 2
  245. return self._pop()
  246. def _parse_wikilink(self):
  247. """Parse an internal wikilink at the head of the wikicode string."""
  248. self._head += 2
  249. reset = self._head - 1
  250. try:
  251. wikilink = self._parse(contexts.WIKILINK_TITLE)
  252. except BadRoute:
  253. self._head = reset
  254. self._emit_text("[[")
  255. else:
  256. if self._context & contexts.FAIL_NEXT:
  257. self._context ^= contexts.FAIL_NEXT
  258. self._emit(tokens.WikilinkOpen())
  259. self._emit_all(wikilink)
  260. self._emit(tokens.WikilinkClose())
  261. def _handle_wikilink_separator(self):
  262. """Handle the separator between a wikilink's title and its text."""
  263. self._context ^= contexts.WIKILINK_TITLE
  264. self._context |= contexts.WIKILINK_TEXT
  265. self._emit(tokens.WikilinkSeparator())
  266. def _handle_wikilink_end(self):
  267. """Handle the end of a wikilink at the head of the string."""
  268. self._head += 1
  269. return self._pop()
  270. def _really_parse_external_link(self, brackets):
  271. """Really parse an external link."""
  272. # link = self._parse(contexts.EXT_LINK_URL)
  273. raise BadRoute()
  274. def _parse_external_link(self, brackets):
  275. """Parse an external link at the head of the wikicode string."""
  276. reset = self._head
  277. self._head += 1
  278. try:
  279. bad_context = self._context & contexts.INVALID_LINK
  280. if bad_context or not self._can_recurse():
  281. raise BadRoute()
  282. link = self._really_parse_external_link(brackets)
  283. except BadRoute:
  284. self._head = reset
  285. if not brackets and self._context & contexts.DL_TERM:
  286. self._handle_dl_term()
  287. else:
  288. self._emit_text(self._read())
  289. else:
  290. self._emit(tokens.ExternalLinkOpen(brackets))
  291. self._emit_all(link)
  292. self._emit(tokens.ExternalLinkClose())
  293. def _parse_heading(self):
  294. """Parse a section heading at the head of the wikicode string."""
  295. self._global |= contexts.GL_HEADING
  296. reset = self._head
  297. self._head += 1
  298. best = 1
  299. while self._read() == "=":
  300. best += 1
  301. self._head += 1
  302. context = contexts.HEADING_LEVEL_1 << min(best - 1, 5)
  303. try:
  304. title, level = self._parse(context)
  305. except BadRoute:
  306. self._head = reset + best - 1
  307. self._emit_text("=" * best)
  308. else:
  309. self._emit(tokens.HeadingStart(level=level))
  310. if level < best:
  311. self._emit_text("=" * (best - level))
  312. self._emit_all(title)
  313. self._emit(tokens.HeadingEnd())
  314. finally:
  315. self._global ^= contexts.GL_HEADING
  316. def _handle_heading_end(self):
  317. """Handle the end of a section heading at the head of the string."""
  318. reset = self._head
  319. self._head += 1
  320. best = 1
  321. while self._read() == "=":
  322. best += 1
  323. self._head += 1
  324. current = int(log(self._context / contexts.HEADING_LEVEL_1, 2)) + 1
  325. level = min(current, min(best, 6))
  326. try: # Try to check for a heading closure after this one
  327. after, after_level = self._parse(self._context)
  328. except BadRoute:
  329. if level < best:
  330. self._emit_text("=" * (best - level))
  331. self._head = reset + best - 1
  332. return self._pop(), level
  333. else: # Found another closure
  334. self._emit_text("=" * best)
  335. self._emit_all(after)
  336. return self._pop(), after_level
  337. def _really_parse_entity(self):
  338. """Actually parse an HTML entity and ensure that it is valid."""
  339. self._emit(tokens.HTMLEntityStart())
  340. self._head += 1
  341. this = self._read(strict=True)
  342. if this == "#":
  343. numeric = True
  344. self._emit(tokens.HTMLEntityNumeric())
  345. self._head += 1
  346. this = self._read(strict=True)
  347. if this[0].lower() == "x":
  348. hexadecimal = True
  349. self._emit(tokens.HTMLEntityHex(char=this[0]))
  350. this = this[1:]
  351. if not this:
  352. self._fail_route()
  353. else:
  354. hexadecimal = False
  355. else:
  356. numeric = hexadecimal = False
  357. valid = "0123456789abcdefABCDEF" if hexadecimal else "0123456789"
  358. if not numeric and not hexadecimal:
  359. valid += "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
  360. if not all([char in valid for char in this]):
  361. self._fail_route()
  362. self._head += 1
  363. if self._read() != ";":
  364. self._fail_route()
  365. if numeric:
  366. test = int(this, 16) if hexadecimal else int(this)
  367. if test < 1 or test > 0x10FFFF:
  368. self._fail_route()
  369. else:
  370. if this not in htmlentities.entitydefs:
  371. self._fail_route()
  372. self._emit(tokens.Text(text=this))
  373. self._emit(tokens.HTMLEntityEnd())
  374. def _parse_entity(self):
  375. """Parse an HTML entity at the head of the wikicode string."""
  376. reset = self._head
  377. self._push()
  378. try:
  379. self._really_parse_entity()
  380. except BadRoute:
  381. self._head = reset
  382. self._emit_text(self._read())
  383. else:
  384. self._emit_all(self._pop())
  385. def _parse_comment(self):
  386. """Parse an HTML comment at the head of the wikicode string."""
  387. self._head += 4
  388. reset = self._head - 1
  389. self._push()
  390. while True:
  391. this = self._read()
  392. if this == self.END:
  393. self._pop()
  394. self._head = reset
  395. self._emit_text("<!--")
  396. return
  397. if this == self._read(1) == "-" and self._read(2) == ">":
  398. self._emit_first(tokens.CommentStart())
  399. self._emit(tokens.CommentEnd())
  400. self._emit_all(self._pop())
  401. self._head += 2
  402. return
  403. self._emit_text(this)
  404. self._head += 1
  405. def _push_tag_buffer(self, data):
  406. """Write a pending tag attribute from *data* to the stack."""
  407. if data.context & data.CX_QUOTED:
  408. self._emit_first(tokens.TagAttrQuote())
  409. self._emit_all(self._pop())
  410. buf = data.padding_buffer
  411. self._emit_first(tokens.TagAttrStart(pad_first=buf["first"],
  412. pad_before_eq=buf["before_eq"], pad_after_eq=buf["after_eq"]))
  413. self._emit_all(self._pop())
  414. data.padding_buffer = {key: "" for key in data.padding_buffer}
  415. def _handle_tag_space(self, data, text):
  416. """Handle whitespace (*text*) inside of an HTML open tag."""
  417. ctx = data.context
  418. end_of_value = ctx & data.CX_ATTR_VALUE and not ctx & (data.CX_QUOTED | data.CX_NOTE_QUOTE)
  419. if end_of_value or (ctx & data.CX_QUOTED and ctx & data.CX_NOTE_SPACE):
  420. self._push_tag_buffer(data)
  421. data.context = data.CX_ATTR_READY
  422. elif ctx & data.CX_NOTE_SPACE:
  423. data.context = data.CX_ATTR_READY
  424. elif ctx & data.CX_ATTR_NAME:
  425. data.context |= data.CX_NOTE_EQUALS
  426. data.padding_buffer["before_eq"] += text
  427. if ctx & data.CX_QUOTED and not ctx & data.CX_NOTE_SPACE:
  428. self._emit_text(text)
  429. elif data.context & data.CX_ATTR_READY:
  430. data.padding_buffer["first"] += text
  431. elif data.context & data.CX_ATTR_VALUE:
  432. data.padding_buffer["after_eq"] += text
  433. def _handle_tag_text(self, text):
  434. """Handle regular *text* inside of an HTML open tag."""
  435. next = self._read(1)
  436. if not self._can_recurse() or text not in self.MARKERS:
  437. self._emit_text(text)
  438. elif text == next == "{":
  439. self._parse_template_or_argument()
  440. elif text == next == "[":
  441. self._parse_wikilink()
  442. elif text == "<":
  443. self._parse_tag()
  444. else:
  445. self._emit_text(text)
  446. def _handle_tag_data(self, data, text):
  447. """Handle all sorts of *text* data inside of an HTML open tag."""
  448. for chunk in self.tag_splitter.split(text):
  449. if not chunk:
  450. continue
  451. if data.context & data.CX_NAME:
  452. if chunk in self.MARKERS or chunk.isspace():
  453. self._fail_route() # Tags must start with text, not spaces
  454. data.context = data.CX_NOTE_SPACE
  455. elif chunk.isspace():
  456. self._handle_tag_space(data, chunk)
  457. continue
  458. elif data.context & data.CX_NOTE_SPACE:
  459. if data.context & data.CX_QUOTED:
  460. data.context = data.CX_ATTR_VALUE
  461. self._pop()
  462. self._head = data.reset - 1 # Will be auto-incremented
  463. return # Break early
  464. self._fail_route()
  465. elif data.context & data.CX_ATTR_READY:
  466. data.context = data.CX_ATTR_NAME
  467. self._push(contexts.TAG_ATTR)
  468. elif data.context & data.CX_ATTR_NAME:
  469. if chunk == "=":
  470. data.context = data.CX_ATTR_VALUE | data.CX_NOTE_QUOTE
  471. self._emit(tokens.TagAttrEquals())
  472. continue
  473. if data.context & data.CX_NOTE_EQUALS:
  474. self._push_tag_buffer(data)
  475. data.context = data.CX_ATTR_NAME
  476. self._push(contexts.TAG_ATTR)
  477. elif data.context & data.CX_ATTR_VALUE:
  478. escaped = self._read(-1) == "\\" and self._read(-2) != "\\"
  479. if data.context & data.CX_NOTE_QUOTE:
  480. data.context ^= data.CX_NOTE_QUOTE
  481. if chunk == '"' and not escaped:
  482. data.context |= data.CX_QUOTED
  483. self._push(self._context)
  484. data.reset = self._head
  485. continue
  486. elif data.context & data.CX_QUOTED:
  487. if chunk == '"' and not escaped:
  488. data.context |= data.CX_NOTE_SPACE
  489. continue
  490. self._handle_tag_text(chunk)
  491. def _handle_tag_close_open(self, data, token):
  492. """Handle the closing of a open tag (``<foo>``)."""
  493. if data.context & (data.CX_ATTR_NAME | data.CX_ATTR_VALUE):
  494. self._push_tag_buffer(data)
  495. self._emit(token(padding=data.padding_buffer["first"]))
  496. self._head += 1
  497. def _handle_tag_open_close(self):
  498. """Handle the opening of a closing tag (``</foo>``)."""
  499. self._emit(tokens.TagOpenClose())
  500. self._push(contexts.TAG_CLOSE)
  501. self._head += 1
  502. def _handle_tag_close_close(self):
  503. """Handle the ending of a closing tag (``</foo>``)."""
  504. strip = lambda tok: tok.text.rstrip().lower()
  505. closing = self._pop()
  506. if len(closing) != 1 or (not isinstance(closing[0], tokens.Text) or
  507. strip(closing[0]) != strip(self._stack[1])):
  508. self._fail_route()
  509. self._emit_all(closing)
  510. self._emit(tokens.TagCloseClose())
  511. return self._pop()
  512. def _handle_blacklisted_tag(self):
  513. """Handle the body of an HTML tag that is parser-blacklisted."""
  514. while True:
  515. this, next = self._read(), self._read(1)
  516. if this is self.END:
  517. self._fail_route()
  518. elif this == "<" and next == "/":
  519. self._handle_tag_open_close()
  520. self._head += 1
  521. return self._parse(push=False)
  522. elif this == "&":
  523. self._parse_entity()
  524. else:
  525. self._emit_text(this)
  526. self._head += 1
  527. def _handle_single_only_tag_end(self):
  528. """Handle the end of an implicitly closing single-only HTML tag."""
  529. padding = self._stack.pop().padding
  530. self._emit(tokens.TagCloseSelfclose(padding=padding, implicit=True))
  531. self._head -= 1 # Offset displacement done by _handle_tag_close_open
  532. return self._pop()
  533. def _handle_single_tag_end(self):
  534. """Handle the stream end when inside a single-supporting HTML tag."""
  535. gen = enumerate(self._stack)
  536. index = next(i for i, t in gen if isinstance(t, tokens.TagCloseOpen))
  537. padding = self._stack[index].padding
  538. token = tokens.TagCloseSelfclose(padding=padding, implicit=True)
  539. self._stack[index] = token
  540. return self._pop()
  541. def _really_parse_tag(self):
  542. """Actually parse an HTML tag, starting with the open (``<foo>``)."""
  543. data = _TagOpenData()
  544. self._push(contexts.TAG_OPEN)
  545. self._emit(tokens.TagOpenOpen())
  546. while True:
  547. this, next = self._read(), self._read(1)
  548. can_exit = (not data.context & (data.CX_QUOTED | data.CX_NAME) or
  549. data.context & data.CX_NOTE_SPACE)
  550. if this is self.END:
  551. if self._context & contexts.TAG_ATTR:
  552. if data.context & data.CX_QUOTED:
  553. # Unclosed attribute quote: reset, don't die
  554. data.context = data.CX_ATTR_VALUE
  555. self._pop()
  556. self._head = data.reset
  557. continue
  558. self._pop()
  559. self._fail_route()
  560. elif this == ">" and can_exit:
  561. self._handle_tag_close_open(data, tokens.TagCloseOpen)
  562. self._context = contexts.TAG_BODY
  563. if is_single_only(self._stack[1].text):
  564. return self._handle_single_only_tag_end()
  565. if is_parsable(self._stack[1].text):
  566. return self._parse(push=False)
  567. return self._handle_blacklisted_tag()
  568. elif this == "/" and next == ">" and can_exit:
  569. self._handle_tag_close_open(data, tokens.TagCloseSelfclose)
  570. return self._pop()
  571. else:
  572. self._handle_tag_data(data, this)
  573. self._head += 1
  574. def _handle_invalid_tag_start(self):
  575. """Handle the (possible) start of an implicitly closing single tag."""
  576. reset = self._head + 1
  577. self._head += 2
  578. try:
  579. if not is_single_only(self.tag_splitter.split(self._read())[0]):
  580. raise BadRoute()
  581. tag = self._really_parse_tag()
  582. except BadRoute:
  583. self._head = reset
  584. self._emit_text("</")
  585. else:
  586. tag[0].invalid = True # Set flag of TagOpenOpen
  587. self._emit_all(tag)
  588. def _parse_tag(self):
  589. """Parse an HTML tag at the head of the wikicode string."""
  590. reset = self._head
  591. self._head += 1
  592. try:
  593. tag = self._really_parse_tag()
  594. except BadRoute:
  595. self._head = reset
  596. self._emit_text("<")
  597. else:
  598. self._emit_all(tag)
  599. def _emit_style_tag(self, tag, markup, body):
  600. """Write the body of a tag and the tokens that should surround it."""
  601. self._emit(tokens.TagOpenOpen(wiki_markup=markup))
  602. self._emit_text(tag)
  603. self._emit(tokens.TagCloseOpen())
  604. self._emit_all(body)
  605. self._emit(tokens.TagOpenClose())
  606. self._emit_text(tag)
  607. self._emit(tokens.TagCloseClose())
  608. def _parse_italics(self):
  609. """Parse wiki-style italics."""
  610. reset = self._head
  611. try:
  612. stack = self._parse(contexts.STYLE_ITALICS)
  613. except BadRoute as route:
  614. self._head = reset
  615. if route.context & contexts.STYLE_PASS_AGAIN:
  616. stack = self._parse(route.context | contexts.STYLE_SECOND_PASS)
  617. else:
  618. return self._emit_text("''")
  619. self._emit_style_tag("i", "''", stack)
  620. def _parse_bold(self):
  621. """Parse wiki-style bold."""
  622. reset = self._head
  623. try:
  624. stack = self._parse(contexts.STYLE_BOLD)
  625. except BadRoute:
  626. self._head = reset
  627. if self._context & contexts.STYLE_SECOND_PASS:
  628. self._emit_text("'")
  629. return True
  630. elif self._context & contexts.STYLE_ITALICS:
  631. self._context |= contexts.STYLE_PASS_AGAIN
  632. self._emit_text("'''")
  633. else:
  634. self._emit_text("'")
  635. self._parse_italics()
  636. else:
  637. self._emit_style_tag("b", "'''", stack)
  638. def _parse_italics_and_bold(self):
  639. """Parse wiki-style italics and bold together (i.e., five ticks)."""
  640. reset = self._head
  641. try:
  642. stack = self._parse(contexts.STYLE_BOLD)
  643. except BadRoute:
  644. self._head = reset
  645. try:
  646. stack = self._parse(contexts.STYLE_ITALICS)
  647. except BadRoute:
  648. self._head = reset
  649. self._emit_text("'''''")
  650. else:
  651. reset = self._head
  652. try:
  653. stack2 = self._parse(contexts.STYLE_BOLD)
  654. except BadRoute:
  655. self._head = reset
  656. self._emit_text("'''")
  657. self._emit_style_tag("i", "''", stack)
  658. else:
  659. self._push()
  660. self._emit_style_tag("i", "''", stack)
  661. self._emit_all(stack2)
  662. self._emit_style_tag("b", "'''", self._pop())
  663. else:
  664. reset = self._head
  665. try:
  666. stack2 = self._parse(contexts.STYLE_ITALICS)
  667. except BadRoute:
  668. self._head = reset
  669. self._emit_text("''")
  670. self._emit_style_tag("b", "'''", stack)
  671. else:
  672. self._push()
  673. self._emit_style_tag("b", "'''", stack)
  674. self._emit_all(stack2)
  675. self._emit_style_tag("i", "''", self._pop())
  676. def _parse_style(self):
  677. """Parse wiki-style formatting (``''``/``'''`` for italics/bold)."""
  678. self._head += 2
  679. ticks = 2
  680. while self._read() == "'":
  681. self._head += 1
  682. ticks += 1
  683. italics = self._context & contexts.STYLE_ITALICS
  684. bold = self._context & contexts.STYLE_BOLD
  685. if ticks > 5:
  686. self._emit_text("'" * (ticks - 5))
  687. ticks = 5
  688. elif ticks == 4:
  689. self._emit_text("'")
  690. ticks = 3
  691. if (italics and ticks in (2, 5)) or (bold and ticks in (3, 5)):
  692. if ticks == 5:
  693. self._head -= 3 if italics else 2
  694. return self._pop()
  695. elif not self._can_recurse():
  696. if ticks == 3:
  697. if self._context & contexts.STYLE_SECOND_PASS:
  698. self._emit_text("'")
  699. return self._pop()
  700. self._context |= contexts.STYLE_PASS_AGAIN
  701. self._emit_text("'" * ticks)
  702. elif ticks == 2:
  703. self._parse_italics()
  704. elif ticks == 3:
  705. if self._parse_bold():
  706. return self._pop()
  707. elif ticks == 5:
  708. self._parse_italics_and_bold()
  709. self._head -= 1
  710. def _handle_list_marker(self):
  711. """Handle a list marker at the head (``#``, ``*``, ``;``, ``:``)."""
  712. markup = self._read()
  713. if markup == ";":
  714. self._context |= contexts.DL_TERM
  715. self._emit(tokens.TagOpenOpen(wiki_markup=markup))
  716. self._emit_text(get_html_tag(markup))
  717. self._emit(tokens.TagCloseSelfclose())
  718. def _handle_list(self):
  719. """Handle a wiki-style list (``#``, ``*``, ``;``, ``:``)."""
  720. self._handle_list_marker()
  721. while self._read(1) in ("#", "*", ";", ":"):
  722. self._head += 1
  723. self._handle_list_marker()
  724. def _handle_hr(self):
  725. """Handle a wiki-style horizontal rule (``----``) in the string."""
  726. length = 4
  727. self._head += 3
  728. while self._read(1) == "-":
  729. length += 1
  730. self._head += 1
  731. self._emit(tokens.TagOpenOpen(wiki_markup="-" * length))
  732. self._emit_text("hr")
  733. self._emit(tokens.TagCloseSelfclose())
  734. def _handle_dl_term(self):
  735. """Handle the term in a description list (``foo`` in ``;foo:bar``)."""
  736. self._context ^= contexts.DL_TERM
  737. if self._read() == ":":
  738. self._handle_list_marker()
  739. else:
  740. self._emit_text("\n")
  741. def _handle_end(self):
  742. """Handle the end of the stream of wikitext."""
  743. if self._context & contexts.FAIL:
  744. if self._context & contexts.TAG_BODY:
  745. if is_single(self._stack[1].text):
  746. return self._handle_single_tag_end()
  747. if self._context & contexts.DOUBLE:
  748. self._pop()
  749. self._fail_route()
  750. return self._pop()
  751. def _verify_safe(self, this):
  752. """Make sure we are not trying to write an invalid character."""
  753. context = self._context
  754. if context & contexts.FAIL_NEXT:
  755. return False
  756. if context & contexts.WIKILINK:
  757. if context & contexts.WIKILINK_TEXT:
  758. return not (this == self._read(1) == "[")
  759. elif this == "]" or this == "{":
  760. self._context |= contexts.FAIL_NEXT
  761. elif this == "\n" or this == "[" or this == "}":
  762. return False
  763. return True
  764. elif context & contexts.EXT_LINK_TITLE:
  765. return this != "\n"
  766. elif context & contexts.TEMPLATE_NAME:
  767. if this == "{" or this == "}" or this == "[":
  768. self._context |= contexts.FAIL_NEXT
  769. return True
  770. if this == "]":
  771. return False
  772. if this == "|":
  773. return True
  774. if context & contexts.HAS_TEXT:
  775. if context & contexts.FAIL_ON_TEXT:
  776. if this is self.END or not this.isspace():
  777. return False
  778. else:
  779. if this == "\n":
  780. self._context |= contexts.FAIL_ON_TEXT
  781. elif this is self.END or not this.isspace():
  782. self._context |= contexts.HAS_TEXT
  783. return True
  784. elif context & contexts.TAG_CLOSE:
  785. return this != "<"
  786. else:
  787. if context & contexts.FAIL_ON_EQUALS:
  788. if this == "=":
  789. return False
  790. elif context & contexts.FAIL_ON_LBRACE:
  791. if this == "{" or (self._read(-1) == self._read(-2) == "{"):
  792. if context & contexts.TEMPLATE:
  793. self._context |= contexts.FAIL_ON_EQUALS
  794. else:
  795. self._context |= contexts.FAIL_NEXT
  796. return True
  797. self._context ^= contexts.FAIL_ON_LBRACE
  798. elif context & contexts.FAIL_ON_RBRACE:
  799. if this == "}":
  800. if context & contexts.TEMPLATE:
  801. self._context |= contexts.FAIL_ON_EQUALS
  802. else:
  803. self._context |= contexts.FAIL_NEXT
  804. return True
  805. self._context ^= contexts.FAIL_ON_RBRACE
  806. elif this == "{":
  807. self._context |= contexts.FAIL_ON_LBRACE
  808. elif this == "}":
  809. self._context |= contexts.FAIL_ON_RBRACE
  810. return True
  811. def _parse(self, context=0, push=True):
  812. """Parse the wikicode string, using *context* for when to stop."""
  813. if push:
  814. self._push(context)
  815. while True:
  816. this = self._read()
  817. if self._context & contexts.UNSAFE:
  818. if not self._verify_safe(this):
  819. if self._context & contexts.DOUBLE:
  820. self._pop()
  821. self._fail_route()
  822. if this not in self.MARKERS:
  823. self._emit_text(this)
  824. self._head += 1
  825. continue
  826. if this is self.END:
  827. return self._handle_end()
  828. next = self._read(1)
  829. if this == next == "{":
  830. if self._can_recurse():
  831. self._parse_template_or_argument()
  832. else:
  833. self._emit_text("{")
  834. elif this == "|" and self._context & contexts.TEMPLATE:
  835. self._handle_template_param()
  836. elif this == "=" and self._context & contexts.TEMPLATE_PARAM_KEY:
  837. self._handle_template_param_value()
  838. elif this == next == "}" and self._context & contexts.TEMPLATE:
  839. return self._handle_template_end()
  840. elif this == "|" and self._context & contexts.ARGUMENT_NAME:
  841. self._handle_argument_separator()
  842. elif this == next == "}" and self._context & contexts.ARGUMENT:
  843. if self._read(2) == "}":
  844. return self._handle_argument_end()
  845. else:
  846. self._emit_text("}")
  847. elif this == next == "[" and self._can_recurse():
  848. if not self._context & contexts.INVALID_LINK:
  849. self._parse_wikilink()
  850. else:
  851. self._emit_text("[")
  852. elif this == "|" and self._context & contexts.WIKILINK_TITLE:
  853. self._handle_wikilink_separator()
  854. elif this == next == "]" and self._context & contexts.WIKILINK:
  855. return self._handle_wikilink_end()
  856. elif this == "[":
  857. self._parse_external_link(True)
  858. elif this == ":" and self._read(-1) not in self.MARKERS:
  859. self._parse_external_link(False)
  860. elif this == "]" and self._context & contexts.EXT_LINK_TITLE:
  861. return self._pop()
  862. elif this == "=" and not self._global & contexts.GL_HEADING:
  863. if self._read(-1) in ("\n", self.START):
  864. self._parse_heading()
  865. else:
  866. self._emit_text("=")
  867. elif this == "=" and self._context & contexts.HEADING:
  868. return self._handle_heading_end()
  869. elif this == "\n" and self._context & contexts.HEADING:
  870. self._fail_route()
  871. elif this == "&":
  872. self._parse_entity()
  873. elif this == "<" and next == "!":
  874. if self._read(2) == self._read(3) == "-":
  875. self._parse_comment()
  876. else:
  877. self._emit_text(this)
  878. elif this == "<" and next == "/" and self._read(2) is not self.END:
  879. if self._context & contexts.TAG_BODY:
  880. self._handle_tag_open_close()
  881. else:
  882. self._handle_invalid_tag_start()
  883. elif this == "<" and not self._context & contexts.TAG_CLOSE:
  884. if self._can_recurse():
  885. self._parse_tag()
  886. else:
  887. self._emit_text("<")
  888. elif this == ">" and self._context & contexts.TAG_CLOSE:
  889. return self._handle_tag_close_close()
  890. elif this == next == "'":
  891. result = self._parse_style()
  892. if result is not None:
  893. return result
  894. elif self._read(-1) in ("\n", self.START):
  895. if this in ("#", "*", ";", ":"):
  896. self._handle_list()
  897. elif this == next == self._read(2) == self._read(3) == "-":
  898. self._handle_hr()
  899. else:
  900. self._emit_text(this)
  901. elif this in ("\n", ":") and self._context & contexts.DL_TERM:
  902. self._handle_dl_term()
  903. else:
  904. self._emit_text(this)
  905. self._head += 1
  906. def tokenize(self, text):
  907. """Build a list of tokens from a string of wikicode and return it."""
  908. split = self.regex.split(text)
  909. self._text = [segment for segment in split if segment]
  910. return self._parse()