diff --git a/mwparserfromhell/parser/tokenizer.c b/mwparserfromhell/parser/tokenizer.c index 10a03a9..faed5d7 100644 --- a/mwparserfromhell/parser/tokenizer.c +++ b/mwparserfromhell/parser/tokenizer.c @@ -2783,12 +2783,6 @@ Tokenizer_handle_table_cell(Tokenizer* self, const char *markup, line_context)) return -1; padding = Tokenizer_parse_as_table_style(self, '|', 0); - if (BAD_ROUTE) { - trash = Tokenizer_pop(self); - Py_XDECREF(trash); - self->head = reset; - return 0; - } if (!padding) return -1; style = Tokenizer_pop(self); diff --git a/mwparserfromhell/parser/tokenizer.py b/mwparserfromhell/parser/tokenizer.py index 9787c5f..dd5d6d9 100644 --- a/mwparserfromhell/parser/tokenizer.py +++ b/mwparserfromhell/parser/tokenizer.py @@ -1325,7 +1325,7 @@ class Tokenizer(object): elif this in ("\n", ":") and self._context & contexts.DL_TERM: self._handle_dl_term() if this == "\n": - # kill potential table contexts + # Kill potential table contexts self._context &= ~contexts.TABLE_CELL_LINE_CONTEXTS # Start of table parsing elif this == "{" and next == "|" and (self._read(-1) in ("\n", self.START) or diff --git a/tests/tokenizer/tags_wikimarkup.mwtest b/tests/tokenizer/tags_wikimarkup.mwtest index 04f617a..c709ba7 100644 --- a/tests/tokenizer/tags_wikimarkup.mwtest +++ b/tests/tokenizer/tags_wikimarkup.mwtest @@ -447,6 +447,13 @@ output: [TagOpenOpen(wiki_markup=":"), Text(text="dd"), TagCloseSelfclose(), Tag --- +name: dt_dd_mix4 +label: another example of correct dt/dd usage, with a trigger for a specific parse route +input: ";foo]:bar" +output: [TagOpenOpen(wiki_markup=";"), Text(text="dt"), TagCloseSelfclose(), Text(text="foo]"), TagOpenOpen(wiki_markup=":"), Text(text="dd"), TagCloseSelfclose(), Text(text="bar")] + +--- + name: ul_ol_dt_dd_mix label: an assortment of uls, ols, dds, and dts input: ";:#*foo\n:#*;foo\n#*;:foo\n*;:#foo"