A Python parser for MediaWiki wikicode https://mwparserfromhell.readthedocs.io/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

1475 lines
37 KiB

  1. /*
  2. Tokenizer for MWParserFromHell
  3. Copyright (C) 2012 Ben Kurtovic <ben.kurtovic@verizon.net>
  4. Permission is hereby granted, free of charge, to any person obtaining a copy of
  5. this software and associated documentation files (the "Software"), to deal in
  6. the Software without restriction, including without limitation the rights to
  7. use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
  8. of the Software, and to permit persons to whom the Software is furnished to do
  9. so, subject to the following conditions:
  10. The above copyright notice and this permission notice shall be included in all
  11. copies or substantial portions of the Software.
  12. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  13. IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  14. FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  15. AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  16. LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  17. OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  18. SOFTWARE.
  19. */
  20. #include "tokenizer.h"
  21. static PyObject*
  22. Tokenizer_new(PyTypeObject* type, PyObject* args, PyObject* kwds)
  23. {
  24. Tokenizer* self = (Tokenizer*) type->tp_alloc(type, 0);
  25. return (PyObject*) self;
  26. }
  27. static void
  28. Tokenizer_dealloc(Tokenizer* self)
  29. {
  30. Py_XDECREF(self->text);
  31. Py_XDECREF(self->stacks);
  32. Py_XDECREF(self->topstack);
  33. self->ob_type->tp_free((PyObject*) self);
  34. }
  35. static int
  36. Tokenizer_init(Tokenizer* self, PyObject* args, PyObject* kwds)
  37. {
  38. static char* kwlist[] = {NULL};
  39. if (!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist))
  40. return -1;
  41. self->text = Py_None;
  42. self->topstack = Py_None;
  43. Py_INCREF(Py_None);
  44. Py_INCREF(Py_None);
  45. self->stacks = PyList_New(0);
  46. if (!self->stacks) {
  47. Py_DECREF(self);
  48. return -1;
  49. }
  50. self->head = 0;
  51. self->length = 0;
  52. self->global = 0;
  53. return 0;
  54. }
  55. static int
  56. Tokenizer_set_context(Tokenizer* self, Py_ssize_t value)
  57. {
  58. if (PyList_SetItem(self->topstack, 1, PyInt_FromSsize_t(value)))
  59. return -1;
  60. return 0;
  61. }
  62. static int
  63. Tokenizer_set_textbuffer(Tokenizer* self, PyObject* value)
  64. {
  65. if (PyList_SetItem(self->topstack, 2, value))
  66. return -1;
  67. return 0;
  68. }
  69. /*
  70. Add a new token stack, context, and textbuffer to the list.
  71. */
  72. static int
  73. Tokenizer_push(Tokenizer* self, Py_ssize_t context)
  74. {
  75. PyObject* top = PyList_New(3);
  76. if (!top) return -1;
  77. PyList_SET_ITEM(top, 0, PyList_New(0));
  78. PyList_SET_ITEM(top, 1, PyInt_FromSsize_t(context));
  79. PyList_SET_ITEM(top, 2, PyList_New(0));
  80. Py_XDECREF(self->topstack);
  81. self->topstack = top;
  82. if (PyList_Append(self->stacks, top))
  83. return -1;
  84. return 0;
  85. }
  86. /*
  87. Push the textbuffer onto the stack as a Text node and clear it.
  88. */
  89. static int
  90. Tokenizer_push_textbuffer(Tokenizer* self)
  91. {
  92. if (PySequence_Fast_GET_SIZE(Tokenizer_TEXTBUFFER(self)) > 0) {
  93. PyObject* text = PyUnicode_Join(EMPTY, Tokenizer_TEXTBUFFER(self));
  94. if (!text) return -1;
  95. PyObject* class = PyObject_GetAttrString(tokens, "Text");
  96. if (!class) {
  97. Py_DECREF(text);
  98. return -1;
  99. }
  100. PyObject* kwargs = PyDict_New();
  101. if (!kwargs) {
  102. Py_DECREF(class);
  103. Py_DECREF(text);
  104. return -1;
  105. }
  106. PyDict_SetItemString(kwargs, "text", text);
  107. Py_DECREF(text);
  108. PyObject* token = PyObject_Call(class, NOARGS, kwargs);
  109. Py_DECREF(class);
  110. Py_DECREF(kwargs);
  111. if (!token) return -1;
  112. if (PyList_Append(Tokenizer_STACK(self), token)) {
  113. Py_DECREF(token);
  114. return -1;
  115. }
  116. Py_DECREF(token);
  117. if (Tokenizer_set_textbuffer(self, PyList_New(0)))
  118. return -1;
  119. }
  120. return 0;
  121. }
  122. static int
  123. Tokenizer_delete_top_of_stack(Tokenizer* self)
  124. {
  125. if (PySequence_DelItem(self->stacks, -1))
  126. return -1;
  127. Py_DECREF(self->topstack);
  128. Py_ssize_t size = PySequence_Fast_GET_SIZE(self->stacks);
  129. if (size > 0) {
  130. PyObject* top = PySequence_Fast_GET_ITEM(self->stacks, size - 1);
  131. self->topstack = top;
  132. Py_INCREF(top);
  133. }
  134. else {
  135. self->topstack = NULL;
  136. }
  137. return 0;
  138. }
  139. /*
  140. Pop the current stack/context/textbuffer, returing the stack.
  141. */
  142. static PyObject*
  143. Tokenizer_pop(Tokenizer* self)
  144. {
  145. if (Tokenizer_push_textbuffer(self))
  146. return NULL;
  147. PyObject* stack = Tokenizer_STACK(self);
  148. Py_INCREF(stack);
  149. if (Tokenizer_delete_top_of_stack(self))
  150. return NULL;
  151. return stack;
  152. }
  153. /*
  154. Pop the current stack/context/textbuffer, returing the stack. We will also
  155. replace the underlying stack's context with the current stack's.
  156. */
  157. static PyObject*
  158. Tokenizer_pop_keeping_context(Tokenizer* self)
  159. {
  160. if (Tokenizer_push_textbuffer(self))
  161. return NULL;
  162. PyObject* stack = Tokenizer_STACK(self);
  163. PyObject* context = Tokenizer_CONTEXT(self);
  164. Py_INCREF(stack);
  165. Py_INCREF(context);
  166. if (Tokenizer_delete_top_of_stack(self))
  167. return NULL;
  168. if (PyList_SetItem(self->topstack, 1, context))
  169. return NULL;
  170. return stack;
  171. }
  172. /*
  173. Fail the current tokenization route. Discards the current
  174. stack/context/textbuffer and raises a BadRoute exception.
  175. */
  176. static void*
  177. Tokenizer_fail_route(Tokenizer* self)
  178. {
  179. PyObject* stack = Tokenizer_pop(self);
  180. Py_XDECREF(stack);
  181. FAIL_ROUTE();
  182. return NULL;
  183. }
  184. /*
  185. Write a token to the end of the current token stack.
  186. */
  187. static int
  188. Tokenizer_write(Tokenizer* self, PyObject* token)
  189. {
  190. if (Tokenizer_push_textbuffer(self))
  191. return -1;
  192. if (PyList_Append(Tokenizer_STACK(self), token))
  193. return -1;
  194. return 0;
  195. }
  196. /*
  197. Write a token to the beginning of the current token stack.
  198. */
  199. static int
  200. Tokenizer_write_first(Tokenizer* self, PyObject* token)
  201. {
  202. if (Tokenizer_push_textbuffer(self))
  203. return -1;
  204. if (PyList_Insert(Tokenizer_STACK(self), 0, token))
  205. return -1;
  206. return 0;
  207. }
  208. /*
  209. Write text to the current textbuffer.
  210. */
  211. static int
  212. Tokenizer_write_text(Tokenizer* self, PyObject* text)
  213. {
  214. if (PyList_Append(Tokenizer_TEXTBUFFER(self), text))
  215. return -1;
  216. return 0;
  217. }
  218. /*
  219. Write a series of tokens to the current stack at once.
  220. */
  221. static int
  222. Tokenizer_write_all(Tokenizer* self, PyObject* tokenlist)
  223. {
  224. if (PySequence_Fast_GET_SIZE(tokenlist) > 0) {
  225. PyObject* token = PySequence_Fast_GET_ITEM(tokenlist, 0);
  226. PyObject* class = PyObject_GetAttrString(tokens, "Text");
  227. if (!class) return -1;
  228. PyObject* text;
  229. switch (PyObject_IsInstance(token, class)) {
  230. case 0:
  231. break;
  232. case 1:
  233. text = PyObject_GetAttrString(token, "text");
  234. if (!text) {
  235. Py_DECREF(class);
  236. return -1;
  237. }
  238. if (PySequence_DelItem(tokenlist, 0)) {
  239. Py_DECREF(text);
  240. Py_DECREF(class);
  241. return -1;
  242. }
  243. if (Tokenizer_write_text(self, text)) {
  244. Py_DECREF(text);
  245. Py_DECREF(class);
  246. return -1;
  247. }
  248. Py_DECREF(text);
  249. break;
  250. case -1:
  251. Py_DECREF(class);
  252. return -1;
  253. }
  254. Py_DECREF(class);
  255. }
  256. if (Tokenizer_push_textbuffer(self))
  257. return -1;
  258. PyObject* stack = Tokenizer_STACK(self);
  259. Py_ssize_t size = PySequence_Fast_GET_SIZE(stack);
  260. if (PyList_SetSlice(stack, size, size, tokenlist))
  261. return -1;
  262. return 0;
  263. }
  264. /*
  265. Pop the current stack, write text, and then write the stack.
  266. */
  267. static int
  268. Tokenizer_write_text_then_stack(Tokenizer* self, PyObject* text)
  269. {
  270. PyObject* stack = Tokenizer_pop(self);
  271. if (Tokenizer_write_text(self, text)) {
  272. Py_XDECREF(stack);
  273. return -1;
  274. }
  275. if (stack) {
  276. if (PySequence_Fast_GET_SIZE(stack) > 0) {
  277. if (Tokenizer_write_all(self, stack)) {
  278. Py_DECREF(stack);
  279. return -1;
  280. }
  281. }
  282. Py_DECREF(stack);
  283. }
  284. self->head--;
  285. return 0;
  286. }
  287. /*
  288. Read the value at a relative point in the wikicode, forwards.
  289. */
  290. static PyObject*
  291. Tokenizer_read(Tokenizer* self, Py_ssize_t delta)
  292. {
  293. Py_ssize_t index = self->head + delta;
  294. if (index >= self->length)
  295. return EMPTY;
  296. return PySequence_Fast_GET_ITEM(self->text, index);
  297. }
  298. /*
  299. Read the value at a relative point in the wikicode, backwards.
  300. */
  301. static PyObject*
  302. Tokenizer_read_backwards(Tokenizer* self, Py_ssize_t delta)
  303. {
  304. if (delta > self->head)
  305. return EMPTY;
  306. Py_ssize_t index = self->head - delta;
  307. return PySequence_Fast_GET_ITEM(self->text, index);
  308. }
  309. /*
  310. Parse a template or argument at the head of the wikicode string.
  311. */
  312. static int
  313. Tokenizer_parse_template_or_argument(Tokenizer* self)
  314. {
  315. self->head += 2;
  316. unsigned int braces = 2, i;
  317. while (*Tokenizer_READ(self, 0) == *"{") {
  318. self->head++;
  319. braces++;
  320. }
  321. Tokenizer_push(self, 0);
  322. while (braces) {
  323. if (braces == 1) {
  324. PyObject* text = PyUnicode_FromString("{");
  325. if (Tokenizer_write_text_then_stack(self, text)) {
  326. Py_XDECREF(text);
  327. return -1;
  328. }
  329. Py_XDECREF(text);
  330. return 0;
  331. }
  332. if (braces == 2) {
  333. if (Tokenizer_parse_template(self))
  334. return -1;
  335. if (BAD_ROUTE) {
  336. RESET_ROUTE();
  337. PyObject* text = PyUnicode_FromString("{{");
  338. if (Tokenizer_write_text_then_stack(self, text)) {
  339. Py_XDECREF(text);
  340. return -1;
  341. }
  342. Py_XDECREF(text);
  343. return 0;
  344. }
  345. break;
  346. }
  347. if (Tokenizer_parse_argument(self))
  348. return -1;
  349. if (BAD_ROUTE) {
  350. RESET_ROUTE();
  351. if (Tokenizer_parse_template(self))
  352. return -1;
  353. if (BAD_ROUTE) {
  354. RESET_ROUTE();
  355. char bracestr[braces];
  356. for (i = 0; i < braces; i++) bracestr[i] = *"{";
  357. PyObject* text = PyUnicode_FromStringAndSize(bracestr, braces);
  358. if (Tokenizer_write_text_then_stack(self, text)) {
  359. Py_XDECREF(text);
  360. return -1;
  361. }
  362. Py_XDECREF(text);
  363. return 0;
  364. }
  365. else {
  366. braces -= 2;
  367. }
  368. }
  369. else {
  370. braces -= 3;
  371. }
  372. if (braces) {
  373. self->head++;
  374. }
  375. }
  376. PyObject* tokenlist = Tokenizer_pop(self);
  377. if (!tokenlist) return -1;
  378. if (Tokenizer_write_all(self, tokenlist)) {
  379. Py_DECREF(tokenlist);
  380. return -1;
  381. }
  382. Py_DECREF(tokenlist);
  383. return 0;
  384. }
  385. /*
  386. Parse a template at the head of the wikicode string.
  387. */
  388. static int
  389. Tokenizer_parse_template(Tokenizer* self)
  390. {
  391. PyObject *template, *class, *token;
  392. Py_ssize_t reset = self->head;
  393. template = Tokenizer_parse(self, LC_TEMPLATE_NAME);
  394. if (BAD_ROUTE) {
  395. self->head = reset;
  396. return 0;
  397. }
  398. if (!template) return -1;
  399. class = PyObject_GetAttrString(tokens, "TemplateOpen");
  400. if (!class) {
  401. Py_DECREF(template);
  402. return -1;
  403. }
  404. token = PyObject_CallObject(class, NULL);
  405. Py_DECREF(class);
  406. if (!token) {
  407. Py_DECREF(template);
  408. return -1;
  409. }
  410. if (Tokenizer_write_first(self, token)) {
  411. Py_DECREF(token);
  412. Py_DECREF(template);
  413. return -1;
  414. }
  415. Py_DECREF(token);
  416. if (Tokenizer_write_all(self, template)) {
  417. Py_DECREF(template);
  418. return -1;
  419. }
  420. Py_DECREF(template);
  421. class = PyObject_GetAttrString(tokens, "TemplateClose");
  422. if (!class) return -1;
  423. token = PyObject_CallObject(class, NULL);
  424. Py_DECREF(class);
  425. if (!token) return -1;
  426. if (Tokenizer_write(self, token)) {
  427. Py_DECREF(token);
  428. return -1;
  429. }
  430. Py_DECREF(token);
  431. return 0;
  432. }
  433. /*
  434. Parse an argument at the head of the wikicode string.
  435. */
  436. static int
  437. Tokenizer_parse_argument(Tokenizer* self)
  438. {
  439. PyObject *argument, *class, *token;
  440. Py_ssize_t reset = self->head;
  441. argument = Tokenizer_parse(self, LC_ARGUMENT_NAME);
  442. if (BAD_ROUTE) {
  443. self->head = reset;
  444. return 0;
  445. }
  446. if (!argument) return -1;
  447. class = PyObject_GetAttrString(tokens, "ArgumentOpen");
  448. if (!class) {
  449. Py_DECREF(argument);
  450. return -1;
  451. }
  452. token = PyObject_CallObject(class, NULL);
  453. Py_DECREF(class);
  454. if (!token) {
  455. Py_DECREF(argument);
  456. return -1;
  457. }
  458. if (Tokenizer_write_first(self, token)) {
  459. Py_DECREF(token);
  460. Py_DECREF(argument);
  461. return -1;
  462. }
  463. Py_DECREF(token);
  464. if (Tokenizer_write_all(self, argument)) {
  465. Py_DECREF(argument);
  466. return -1;
  467. }
  468. Py_DECREF(argument);
  469. class = PyObject_GetAttrString(tokens, "ArgumentClose");
  470. if (!class) return -1;
  471. token = PyObject_CallObject(class, NULL);
  472. Py_DECREF(class);
  473. if (!token) return -1;
  474. if (Tokenizer_write(self, token)) {
  475. Py_DECREF(token);
  476. return -1;
  477. }
  478. Py_DECREF(token);
  479. return 0;
  480. }
  481. /*
  482. Verify that there are no unsafe characters in the current stack. The route
  483. will be failed if the name contains any element of unsafes in it (not
  484. merely at the beginning or end). This is used when parsing a template name
  485. or parameter key, which cannot contain newlines.
  486. */
  487. static int
  488. Tokenizer_verify_safe(Tokenizer* self, const char* unsafes[])
  489. {
  490. if (Tokenizer_push_textbuffer(self))
  491. return -1;
  492. PyObject* stack = Tokenizer_STACK(self);
  493. if (stack) {
  494. PyObject* textlist = PyList_New(0);
  495. if (!textlist) return -1;
  496. PyObject* class = PyObject_GetAttrString(tokens, "Text");
  497. if (!class) {
  498. Py_DECREF(textlist);
  499. return -1;
  500. }
  501. int i;
  502. Py_ssize_t length = PySequence_Fast_GET_SIZE(stack);
  503. PyObject *token, *textdata;
  504. for (i = 0; i < length; i++) {
  505. token = PySequence_Fast_GET_ITEM(stack, i);
  506. switch (PyObject_IsInstance(token, class)) {
  507. case 0:
  508. break;
  509. case 1:
  510. textdata = PyObject_GetAttrString(token, "text");
  511. if (!textdata) {
  512. Py_DECREF(textlist);
  513. Py_DECREF(class);
  514. return -1;
  515. }
  516. if (PyList_Append(textlist, textdata)) {
  517. Py_DECREF(textlist);
  518. Py_DECREF(class);
  519. Py_DECREF(textdata);
  520. return -1;
  521. }
  522. Py_DECREF(textdata);
  523. break;
  524. case -1:
  525. Py_DECREF(textlist);
  526. Py_DECREF(class);
  527. return -1;
  528. }
  529. }
  530. Py_DECREF(class);
  531. PyObject* text = PyUnicode_Join(EMPTY, textlist);
  532. if (!text) {
  533. Py_DECREF(textlist);
  534. return -1;
  535. }
  536. Py_DECREF(textlist);
  537. PyObject* stripped = PyObject_CallMethod(text, "strip", NULL);
  538. if (!stripped) {
  539. Py_DECREF(text);
  540. return -1;
  541. }
  542. Py_DECREF(text);
  543. const char* unsafe_char;
  544. PyObject* unsafe;
  545. i = 0;
  546. while (1) {
  547. unsafe_char = unsafes[i];
  548. if (!unsafe_char) break;
  549. unsafe = PyUnicode_FromString(unsafe_char);
  550. if (!unsafe) {
  551. Py_DECREF(stripped);
  552. return -1;
  553. }
  554. switch (PyUnicode_Contains(stripped, unsafe)) {
  555. case 0:
  556. break;
  557. case 1:
  558. Tokenizer_fail_route(self);
  559. case -1:
  560. Py_DECREF(stripped);
  561. Py_DECREF(unsafe);
  562. return -1;
  563. }
  564. i++;
  565. }
  566. }
  567. return 0;
  568. }
  569. /*
  570. Handle a template parameter at the head of the string.
  571. */
  572. static int
  573. Tokenizer_handle_template_param(Tokenizer* self)
  574. {
  575. Py_ssize_t context = Tokenizer_CONTEXT_VAL(self);
  576. if (context & LC_TEMPLATE_NAME) {
  577. const char* unsafes[] = {"\n", "{", "}", "[", "]", NULL};
  578. if (Tokenizer_verify_safe(self, unsafes))
  579. return -1;
  580. if (BAD_ROUTE) return -1;
  581. if (Tokenizer_set_context(self, context ^ LC_TEMPLATE_NAME))
  582. return -1;
  583. }
  584. else if (context & LC_TEMPLATE_PARAM_VALUE) {
  585. if (Tokenizer_set_context(self, context ^ LC_TEMPLATE_PARAM_VALUE))
  586. return -1;
  587. }
  588. if (context & LC_TEMPLATE_PARAM_KEY) {
  589. PyObject* stack = Tokenizer_pop_keeping_context(self);
  590. if (!stack) return -1;
  591. if (Tokenizer_write_all(self, stack)) {
  592. Py_DECREF(stack);
  593. return -1;
  594. }
  595. Py_DECREF(stack);
  596. }
  597. else {
  598. if (Tokenizer_set_context(self, context | LC_TEMPLATE_PARAM_KEY))
  599. return -1;
  600. }
  601. PyObject* class = PyObject_GetAttrString(tokens, "TemplateParamSeparator");
  602. if (!class) return -1;
  603. PyObject* token = PyObject_CallObject(class, NULL);
  604. Py_DECREF(class);
  605. if (!token) return -1;
  606. if (Tokenizer_write(self, token)) {
  607. Py_DECREF(token);
  608. return -1;
  609. }
  610. Py_DECREF(token);
  611. Tokenizer_push(self, Tokenizer_CONTEXT_VAL(self));
  612. return 0;
  613. }
  614. /*
  615. Handle a template parameter's value at the head of the string.
  616. */
  617. static int
  618. Tokenizer_handle_template_param_value(Tokenizer* self)
  619. {
  620. const char* unsafes[] = {"\n", "{{", "}}", NULL};
  621. if (Tokenizer_verify_safe(self, unsafes)) {
  622. if (BAD_ROUTE) {
  623. PyObject* stack = Tokenizer_pop(self);
  624. Py_XDECREF(stack);
  625. }
  626. return -1;
  627. }
  628. PyObject* stack = Tokenizer_pop_keeping_context(self);
  629. if (!stack) return -1;
  630. if (Tokenizer_write_all(self, stack)) {
  631. Py_DECREF(stack);
  632. return -1;
  633. }
  634. Py_DECREF(stack);
  635. Py_ssize_t context = Tokenizer_CONTEXT_VAL(self);
  636. context ^= LC_TEMPLATE_PARAM_KEY;
  637. context |= LC_TEMPLATE_PARAM_VALUE;
  638. if (Tokenizer_set_context(self, context))
  639. return -1;
  640. PyObject* class = PyObject_GetAttrString(tokens, "TemplateParamEquals");
  641. if (!class) return -1;
  642. PyObject* token = PyObject_CallObject(class, NULL);
  643. Py_DECREF(class);
  644. if (!token) return -1;
  645. if (Tokenizer_write(self, token)) {
  646. Py_DECREF(token);
  647. return -1;
  648. }
  649. Py_DECREF(token);
  650. return 0;
  651. }
  652. /*
  653. Handle the end of a template at the head of the string.
  654. */
  655. static PyObject*
  656. Tokenizer_handle_template_end(Tokenizer* self)
  657. {
  658. PyObject* stack;
  659. Py_ssize_t context = Tokenizer_CONTEXT_VAL(self);
  660. if (context & LC_TEMPLATE_NAME) {
  661. const char* unsafes[] = {"\n", "{", "}", "[", "]", NULL};
  662. if (Tokenizer_verify_safe(self, unsafes))
  663. return NULL;
  664. }
  665. else if (context & LC_TEMPLATE_PARAM_KEY) {
  666. stack = Tokenizer_pop_keeping_context(self);
  667. if (!stack) return NULL;
  668. if (Tokenizer_write_all(self, stack)) {
  669. Py_DECREF(stack);
  670. return NULL;
  671. }
  672. Py_DECREF(stack);
  673. }
  674. self->head++;
  675. stack = Tokenizer_pop(self);
  676. return stack;
  677. }
  678. /*
  679. Handle the separator between an argument's name and default.
  680. */
  681. static int
  682. Tokenizer_handle_argument_separator(Tokenizer* self)
  683. {
  684. const char* unsafes[] = {"\n", "{{", "}}", NULL};
  685. if (Tokenizer_verify_safe(self, unsafes))
  686. return -1;
  687. Py_ssize_t context = Tokenizer_CONTEXT_VAL(self);
  688. context ^= LC_ARGUMENT_NAME;
  689. context |= LC_ARGUMENT_DEFAULT;
  690. if (Tokenizer_set_context(self, context))
  691. return -1;
  692. PyObject* class = PyObject_GetAttrString(tokens, "ArgumentSeparator");
  693. if (!class) return -1;
  694. PyObject* token = PyObject_CallObject(class, NULL);
  695. Py_DECREF(class);
  696. if (!token) return -1;
  697. if (Tokenizer_write(self, token)) {
  698. Py_DECREF(token);
  699. return -1;
  700. }
  701. Py_DECREF(token);
  702. return 0;
  703. }
  704. /*
  705. Handle the end of an argument at the head of the string.
  706. */
  707. static PyObject*
  708. Tokenizer_handle_argument_end(Tokenizer* self)
  709. {
  710. if (Tokenizer_CONTEXT_VAL(self) & LC_ARGUMENT_NAME) {
  711. const char* unsafes[] = {"\n", "{{", "}}", NULL};
  712. if (Tokenizer_verify_safe(self, unsafes))
  713. return NULL;
  714. }
  715. self->head += 2;
  716. PyObject* stack = Tokenizer_pop(self);
  717. return stack;
  718. }
  719. /*
  720. Parse an internal wikilink at the head of the wikicode string.
  721. */
  722. static int
  723. Tokenizer_parse_wikilink(Tokenizer* self)
  724. {
  725. self->head += 2;
  726. Py_ssize_t reset = self->head - 1;
  727. PyObject *class, *token;
  728. PyObject *wikilink = Tokenizer_parse(self, LC_WIKILINK_TITLE);
  729. if (!wikilink) return -1;
  730. if (BAD_ROUTE) {
  731. RESET_ROUTE();
  732. self->head = reset;
  733. PyObject* text = PyUnicode_FromString("[[");
  734. if (!text) return -1;
  735. if (Tokenizer_write_text(self, text)) {
  736. Py_XDECREF(text);
  737. return -1;
  738. }
  739. return 0;
  740. }
  741. class = PyObject_GetAttrString(tokens, "WikilinkOpen");
  742. if (!class) {
  743. Py_DECREF(wikilink);
  744. return -1;
  745. }
  746. token = PyObject_CallObject(class, NULL);
  747. Py_DECREF(class);
  748. if (!token) {
  749. Py_DECREF(wikilink);
  750. return -1;
  751. }
  752. if (Tokenizer_write(self, token)) {
  753. Py_DECREF(token);
  754. Py_DECREF(wikilink);
  755. return -1;
  756. }
  757. Py_DECREF(token);
  758. if (Tokenizer_write_all(self, wikilink)) {
  759. Py_DECREF(wikilink);
  760. return -1;
  761. }
  762. Py_DECREF(wikilink);
  763. class = PyObject_GetAttrString(tokens, "WikilinkClose");
  764. if (!class) return -1;
  765. token = PyObject_CallObject(class, NULL);
  766. Py_DECREF(class);
  767. if (!token) return -1;
  768. if (Tokenizer_write(self, token)) {
  769. Py_DECREF(token);
  770. return -1;
  771. }
  772. Py_DECREF(token);
  773. return 0;
  774. }
  775. /*
  776. Handle the separator between a wikilink's title and its text.
  777. */
  778. static int
  779. Tokenizer_handle_wikilink_separator(Tokenizer* self)
  780. {
  781. const char* unsafes[] = {"\n", "{", "}", "[", "]", NULL};
  782. if (Tokenizer_verify_safe(self, unsafes))
  783. return -1;
  784. Py_ssize_t context = Tokenizer_CONTEXT_VAL(self);
  785. context ^= LC_WIKILINK_TITLE;
  786. context |= LC_WIKILINK_TEXT;
  787. if (Tokenizer_set_context(self, context))
  788. return -1;
  789. PyObject* class = PyObject_GetAttrString(tokens, "WikilinkSeparator");
  790. if (!class) return -1;
  791. PyObject* token = PyObject_CallObject(class, NULL);
  792. Py_DECREF(class);
  793. if (!token) return -1;
  794. if (Tokenizer_write(self, token)) {
  795. Py_DECREF(token);
  796. return -1;
  797. }
  798. Py_DECREF(token);
  799. return 0;
  800. }
  801. /*
  802. Handle the end of a wikilink at the head of the string.
  803. */
  804. static PyObject*
  805. Tokenizer_handle_wikilink_end(Tokenizer* self)
  806. {
  807. if (Tokenizer_CONTEXT_VAL(self) & LC_WIKILINK_TITLE) {
  808. const char* unsafes[] = {"\n", "{", "}", "[", "]", NULL};
  809. if (Tokenizer_verify_safe(self, unsafes))
  810. return NULL;
  811. }
  812. self->head += 1;
  813. PyObject* stack = Tokenizer_pop(self);
  814. return stack;
  815. }
  816. /*
  817. Parse a section heading at the head of the wikicode string.
  818. */
  819. static int
  820. Tokenizer_parse_heading(Tokenizer* self)
  821. {
  822. self->global |= GL_HEADING;
  823. Py_ssize_t reset = self->head;
  824. self->head += 1;
  825. Py_ssize_t best = 1;
  826. PyObject* text;
  827. int i;
  828. while (*Tokenizer_READ(self, 0) == *"=") {
  829. best++;
  830. self->head++;
  831. }
  832. Py_ssize_t context = LC_HEADING_LEVEL_1 << (best > 5 ? 5 : best - 1);
  833. HeadingData* heading = (HeadingData*) Tokenizer_parse(self, context);
  834. if (BAD_ROUTE) {
  835. RESET_ROUTE();
  836. self->head = reset + best - 1;
  837. char blocks[best];
  838. for (i = 0; i < best; i++) blocks[i] = *"=";
  839. text = PyUnicode_FromStringAndSize(blocks, best);
  840. if (!text) return -1;
  841. if (Tokenizer_write_text_then_stack(self, text)) {
  842. Py_DECREF(text);
  843. return -1;
  844. }
  845. Py_DECREF(text);
  846. self->global ^= GL_HEADING;
  847. return 0;
  848. }
  849. PyObject* level = PyInt_FromSsize_t(heading->level);
  850. if (!level) {
  851. Py_DECREF(heading->title);
  852. free(heading);
  853. return -1;
  854. }
  855. PyObject* class = PyObject_GetAttrString(tokens, "HeadingStart");
  856. if (!class) {
  857. Py_DECREF(level);
  858. Py_DECREF(heading->title);
  859. free(heading);
  860. return -1;
  861. }
  862. PyObject* kwargs = PyDict_New();
  863. if (!kwargs) {
  864. Py_DECREF(class);
  865. Py_DECREF(level);
  866. Py_DECREF(heading->title);
  867. free(heading);
  868. return -1;
  869. }
  870. PyDict_SetItemString(kwargs, "level", level);
  871. Py_DECREF(level);
  872. PyObject* token = PyObject_Call(class, NOARGS, kwargs);
  873. Py_DECREF(class);
  874. Py_DECREF(kwargs);
  875. if (!token) {
  876. Py_DECREF(heading->title);
  877. free(heading);
  878. return -1;
  879. }
  880. if (Tokenizer_write(self, token)) {
  881. Py_DECREF(token);
  882. Py_DECREF(heading->title);
  883. free(heading);
  884. return -1;
  885. }
  886. Py_DECREF(token);
  887. if (heading->level < best) {
  888. Py_ssize_t diff = best - heading->level;
  889. char diffblocks[diff];
  890. for (i = 0; i < diff; i++) diffblocks[i] = *"=";
  891. PyObject* text = PyUnicode_FromStringAndSize(diffblocks, diff);
  892. if (!text) {
  893. Py_DECREF(heading->title);
  894. free(heading);
  895. return -1;
  896. }
  897. if (Tokenizer_write_text_then_stack(self, text)) {
  898. Py_DECREF(text);
  899. Py_DECREF(heading->title);
  900. free(heading);
  901. return -1;
  902. }
  903. Py_DECREF(text);
  904. }
  905. if (Tokenizer_write_all(self, heading->title)) {
  906. Py_DECREF(heading->title);
  907. free(heading);
  908. return -1;
  909. }
  910. Py_DECREF(heading->title);
  911. free(heading);
  912. class = PyObject_GetAttrString(tokens, "HeadingEnd");
  913. if (!class) return -1;
  914. token = PyObject_CallObject(class, NULL);
  915. Py_DECREF(class);
  916. if (!token) return -1;
  917. if (Tokenizer_write(self, token)) {
  918. Py_DECREF(token);
  919. return -1;
  920. }
  921. Py_DECREF(token);
  922. self->global ^= GL_HEADING;
  923. return 0;
  924. }
  925. /*
  926. Handle the end of a section heading at the head of the string.
  927. */
  928. static HeadingData*
  929. Tokenizer_handle_heading_end(Tokenizer* self)
  930. {
  931. Py_ssize_t reset = self->head;
  932. self->head += 1;
  933. Py_ssize_t best = 1;
  934. PyObject* text;
  935. int i;
  936. while (*Tokenizer_READ(self, 0) == *"=") {
  937. best++;
  938. self->head++;
  939. }
  940. Py_ssize_t current = log2(Tokenizer_CONTEXT_VAL(self) / LC_HEADING_LEVEL_1) + 1;
  941. Py_ssize_t level = current > best ? (best > 6 ? 6 : best) : (current > 6 ? 6 : current);
  942. Py_ssize_t context = Tokenizer_CONTEXT_VAL(self);
  943. HeadingData* after = (HeadingData*) Tokenizer_parse(self, context);
  944. if (BAD_ROUTE) {
  945. RESET_ROUTE();
  946. if (level < best) {
  947. Py_ssize_t diff = best - level;
  948. char diffblocks[diff];
  949. for (i = 0; i < diff; i++) diffblocks[i] = *"=";
  950. text = PyUnicode_FromStringAndSize(diffblocks, diff);
  951. if (!text) return NULL;
  952. if (Tokenizer_write_text_then_stack(self, text)) {
  953. Py_DECREF(text);
  954. return NULL;
  955. }
  956. Py_DECREF(text);
  957. }
  958. self->head = reset + best - 1;
  959. }
  960. else {
  961. char blocks[best];
  962. for (i = 0; i < best; i++) blocks[i] = *"=";
  963. text = PyUnicode_FromStringAndSize(blocks, best);
  964. if (!text) {
  965. Py_DECREF(after->title);
  966. free(after);
  967. return NULL;
  968. }
  969. if (Tokenizer_write_text_then_stack(self, text)) {
  970. Py_DECREF(text);
  971. Py_DECREF(after->title);
  972. free(after);
  973. return NULL;
  974. }
  975. Py_DECREF(text);
  976. if (Tokenizer_write_all(self, after->title)) {
  977. Py_DECREF(after->title);
  978. free(after);
  979. return NULL;
  980. }
  981. Py_DECREF(after->title);
  982. level = after->level;
  983. free(after);
  984. }
  985. PyObject* stack = Tokenizer_pop(self);
  986. if (!stack) return NULL;
  987. HeadingData* heading = malloc(sizeof(HeadingData));
  988. if (!heading) {
  989. PyErr_NoMemory();
  990. return NULL;
  991. }
  992. heading->title = stack;
  993. heading->level = level;
  994. return heading;
  995. }
  996. /*
  997. Actually parse an HTML entity and ensure that it is valid.
  998. */
  999. static int
  1000. Tokenizer_really_parse_entity(Tokenizer* self)
  1001. {
  1002. return 0;
  1003. }
  1004. /*
  1005. Parse an HTML entity at the head of the wikicode string.
  1006. */
  1007. static int
  1008. Tokenizer_parse_entity(Tokenizer* self)
  1009. {
  1010. Py_ssize_t reset = self->head;
  1011. if (Tokenizer_push(self, 0))
  1012. return -1;
  1013. if (Tokenizer_really_parse_entity(self))
  1014. return -1;
  1015. if (BAD_ROUTE) {
  1016. RESET_ROUTE();
  1017. self->head = reset;
  1018. if (Tokenizer_write_text(self, Tokenizer_read(self, 0)))
  1019. return -1;
  1020. return 0;
  1021. }
  1022. PyObject* tokenlist = Tokenizer_pop(self);
  1023. if (!tokenlist) return -1;
  1024. if (Tokenizer_write_all(self, tokenlist)) {
  1025. Py_DECREF(tokenlist);
  1026. return -1;
  1027. }
  1028. Py_DECREF(tokenlist);
  1029. return 0;
  1030. }
  1031. /*
  1032. Parse an HTML comment at the head of the wikicode string.
  1033. */
  1034. static int
  1035. Tokenizer_parse_comment(Tokenizer* self)
  1036. {
  1037. self->head += 4;
  1038. Py_ssize_t reset = self->head - 1;
  1039. PyObject *class, *token;
  1040. PyObject *comment = Tokenizer_parse(self, LC_WIKILINK_TITLE);
  1041. if (!comment) return -1;
  1042. if (BAD_ROUTE) {
  1043. RESET_ROUTE();
  1044. self->head = reset;
  1045. PyObject* text = PyUnicode_FromString("<!--");
  1046. if (!text) return -1;
  1047. if (Tokenizer_write_text(self, text)) {
  1048. Py_XDECREF(text);
  1049. return -1;
  1050. }
  1051. return 0;
  1052. }
  1053. class = PyObject_GetAttrString(tokens, "CommentStart");
  1054. if (!class) {
  1055. Py_DECREF(comment);
  1056. return -1;
  1057. }
  1058. token = PyObject_CallObject(class, NULL);
  1059. Py_DECREF(class);
  1060. if (!token) {
  1061. Py_DECREF(comment);
  1062. return -1;
  1063. }
  1064. if (Tokenizer_write(self, token)) {
  1065. Py_DECREF(token);
  1066. Py_DECREF(comment);
  1067. return -1;
  1068. }
  1069. Py_DECREF(token);
  1070. if (Tokenizer_write_all(self, comment)) {
  1071. Py_DECREF(comment);
  1072. return -1;
  1073. }
  1074. Py_DECREF(comment);
  1075. class = PyObject_GetAttrString(tokens, "CommentEnd");
  1076. if (!class) return -1;
  1077. token = PyObject_CallObject(class, NULL);
  1078. Py_DECREF(class);
  1079. if (!token) return -1;
  1080. if (Tokenizer_write(self, token)) {
  1081. Py_DECREF(token);
  1082. return -1;
  1083. }
  1084. Py_DECREF(token);
  1085. self->head += 2;
  1086. return 0;
  1087. }
  1088. /*
  1089. Parse the wikicode string, using context for when to stop.
  1090. */
  1091. static PyObject*
  1092. Tokenizer_parse(Tokenizer* self, Py_ssize_t context)
  1093. {
  1094. PyObject *this;
  1095. Py_UNICODE this_data, next, next_next, last;
  1096. Py_ssize_t this_context;
  1097. Py_ssize_t fail_contexts = (
  1098. LC_TEMPLATE | LC_ARGUMENT | LC_WIKILINK | LC_HEADING | LC_COMMENT);
  1099. int is_marker, i;
  1100. Tokenizer_push(self, context);
  1101. while (1) {
  1102. this = Tokenizer_read(self, 0);
  1103. this_data = *PyUnicode_AS_UNICODE(this);
  1104. is_marker = 0;
  1105. for (i = 0; i < NUM_MARKERS; i++) {
  1106. if (*MARKERS[i] == this_data) {
  1107. is_marker = 1;
  1108. break;
  1109. }
  1110. }
  1111. if (!is_marker) {
  1112. Tokenizer_write_text(self, this);
  1113. self->head++;
  1114. continue;
  1115. }
  1116. this_context = Tokenizer_CONTEXT_VAL(self);
  1117. if (this_data == *"") {
  1118. if (this_context & LC_TEMPLATE_PARAM_KEY) {
  1119. PyObject* trash = Tokenizer_pop(self);
  1120. Py_XDECREF(trash);
  1121. }
  1122. if (this_context & fail_contexts) {
  1123. return Tokenizer_fail_route(self);
  1124. }
  1125. return Tokenizer_pop(self);
  1126. }
  1127. next = *Tokenizer_READ(self, 1);
  1128. if (this_context & LC_COMMENT) {
  1129. if (this_data == next && next == *"-") {
  1130. if (*Tokenizer_READ(self, 2) == *">") {
  1131. return Tokenizer_pop(self);
  1132. }
  1133. }
  1134. Tokenizer_write_text(self, this);
  1135. }
  1136. else if (this_data == next && next == *"{") {
  1137. if (Tokenizer_parse_template_or_argument(self))
  1138. return NULL;
  1139. }
  1140. else if (this_data == *"|" && this_context & LC_TEMPLATE) {
  1141. if (Tokenizer_handle_template_param(self))
  1142. return NULL;
  1143. }
  1144. else if (this_data == *"=" && this_context & LC_TEMPLATE_PARAM_KEY) {
  1145. if (Tokenizer_handle_template_param_value(self))
  1146. return NULL;
  1147. }
  1148. else if (this_data == next && next == *"}" && this_context & LC_TEMPLATE) {
  1149. return Tokenizer_handle_template_end(self);
  1150. }
  1151. else if (this_data == *"|" && this_context & LC_ARGUMENT_NAME) {
  1152. if (Tokenizer_handle_argument_separator(self))
  1153. return NULL;
  1154. }
  1155. else if (this_data == next && next == *"}" && this_context & LC_ARGUMENT) {
  1156. if (*Tokenizer_READ(self, 2) == *"}") {
  1157. return Tokenizer_handle_argument_end(self);
  1158. }
  1159. Tokenizer_write_text(self, this);
  1160. }
  1161. else if (this_data == next && next == *"[") {
  1162. if (!(this_context & LC_WIKILINK_TITLE)) {
  1163. if (Tokenizer_parse_wikilink(self))
  1164. return NULL;
  1165. }
  1166. else {
  1167. Tokenizer_write_text(self, this);
  1168. }
  1169. }
  1170. else if (this_data == *"|" && this_context & LC_WIKILINK_TITLE) {
  1171. if (Tokenizer_handle_wikilink_separator(self))
  1172. return NULL;
  1173. }
  1174. else if (this_data == next && next == *"]" && this_context & LC_WIKILINK) {
  1175. return Tokenizer_handle_wikilink_end(self);
  1176. }
  1177. else if (this_data == *"=" && !(self->global & GL_HEADING)) {
  1178. last = *PyUnicode_AS_UNICODE(Tokenizer_read_backwards(self, 1));
  1179. if (last == *"\n" || last == *"") {
  1180. if (Tokenizer_parse_heading(self))
  1181. return NULL;
  1182. }
  1183. else {
  1184. Tokenizer_write_text(self, this);
  1185. }
  1186. }
  1187. else if (this_data == *"=" && this_context & LC_HEADING) {
  1188. return (PyObject*) Tokenizer_handle_heading_end(self);
  1189. }
  1190. else if (this_data == *"\n" && this_context & LC_HEADING) {
  1191. return Tokenizer_fail_route(self);
  1192. }
  1193. else if (this_data == *"&") {
  1194. if (Tokenizer_parse_entity(self))
  1195. return NULL;
  1196. }
  1197. else if (this_data == *"<" && next == *"!") {
  1198. next_next = *Tokenizer_READ(self, 2);
  1199. if (next_next == *Tokenizer_READ(self, 3) && next_next == *"-") {
  1200. if (Tokenizer_parse_comment(self))
  1201. return NULL;
  1202. }
  1203. else {
  1204. Tokenizer_write_text(self, this);
  1205. }
  1206. }
  1207. else {
  1208. Tokenizer_write_text(self, this);
  1209. }
  1210. self->head++;
  1211. }
  1212. }
  1213. /*
  1214. Build a list of tokens from a string of wikicode and return it.
  1215. */
  1216. static PyObject*
  1217. Tokenizer_tokenize(Tokenizer* self, PyObject* args)
  1218. {
  1219. PyObject* text;
  1220. if (!PyArg_ParseTuple(args, "U", &text)) {
  1221. /* Failed to parse a Unicode object; try a string instead. */
  1222. PyErr_Clear();
  1223. const char* encoded;
  1224. Py_ssize_t size;
  1225. if (!PyArg_ParseTuple(args, "s#", &encoded, &size)) {
  1226. return NULL;
  1227. }
  1228. PyObject* temp;
  1229. temp = PyUnicode_FromStringAndSize(encoded, size);
  1230. if (!text)
  1231. return NULL;
  1232. Py_XDECREF(self->text);
  1233. text = PySequence_Fast(temp, "expected a sequence");
  1234. Py_XDECREF(temp);
  1235. self->text = text;
  1236. }
  1237. else {
  1238. Py_XDECREF(self->text);
  1239. self->text = PySequence_Fast(text, "expected a sequence");
  1240. }
  1241. self->length = PySequence_Length(self->text);
  1242. return Tokenizer_parse(self, 0);
  1243. }
  1244. PyMODINIT_FUNC
  1245. init_tokenizer(void)
  1246. {
  1247. PyObject* module;
  1248. TokenizerType.tp_new = PyType_GenericNew;
  1249. if (PyType_Ready(&TokenizerType) < 0)
  1250. return;
  1251. module = Py_InitModule("_tokenizer", module_methods);
  1252. Py_INCREF(&TokenizerType);
  1253. PyModule_AddObject(module, "CTokenizer", (PyObject*) &TokenizerType);
  1254. EMPTY = PyUnicode_FromString("");
  1255. NOARGS = PyTuple_New(0);
  1256. char* name = "mwparserfromhell.parser";
  1257. PyObject* globals = PyEval_GetGlobals();
  1258. PyObject* locals = PyEval_GetLocals();
  1259. PyObject* fromlist = PyList_New(1);
  1260. if (!fromlist) return;
  1261. PyObject* submodname = PyBytes_FromString("tokens");
  1262. if (!submodname) {
  1263. Py_DECREF(fromlist);
  1264. return;
  1265. }
  1266. PyList_SET_ITEM(fromlist, 0, submodname);
  1267. PyObject* tokmodule = PyImport_ImportModuleLevel(name, globals, locals, fromlist, 0);
  1268. Py_DECREF(fromlist);
  1269. if (!tokmodule) {
  1270. return;
  1271. }
  1272. tokens = PyObject_GetAttrString(tokmodule, "tokens");
  1273. Py_DECREF(tokmodule);
  1274. }