From a4c2fd023adfe95fdd5552cc2bab90a0bbc16a2a Mon Sep 17 00:00:00 2001 From: Ben Kurtovic Date: Sat, 5 Jul 2014 01:00:11 -0400 Subject: [PATCH] Remove some useless code in the tokenizers. --- mwparserfromhell/parser/tokenizer.c | 4 +--- mwparserfromhell/parser/tokenizer.py | 8 +++----- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/mwparserfromhell/parser/tokenizer.c b/mwparserfromhell/parser/tokenizer.c index 6ab8570..963e7d7 100644 --- a/mwparserfromhell/parser/tokenizer.c +++ b/mwparserfromhell/parser/tokenizer.c @@ -832,8 +832,6 @@ static int Tokenizer_parse_wikilink(Tokenizer* self) Py_DECREF(wikilink); if (Tokenizer_emit(self, WikilinkClose)) return -1; - if (self->topstack->context & LC_FAIL_NEXT) - self->topstack->context ^= LC_FAIL_NEXT; return 0; } @@ -1718,7 +1716,7 @@ Tokenizer_handle_tag_data(Tokenizer* self, TagData* data, Py_UNICODE chunk) return -1; } } - else if (data->context & TAG_ATTR_VALUE) { + else { // data->context & TAG_ATTR_VALUE assured escaped = (Tokenizer_READ_BACKWARDS(self, 1) == '\\' && Tokenizer_READ_BACKWARDS(self, 2) != '\\'); if (data->context & TAG_NOTE_QUOTE) { diff --git a/mwparserfromhell/parser/tokenizer.py b/mwparserfromhell/parser/tokenizer.py index 9af9204..6430f0f 100644 --- a/mwparserfromhell/parser/tokenizer.py +++ b/mwparserfromhell/parser/tokenizer.py @@ -255,7 +255,7 @@ class Tokenizer(object): self._context ^= contexts.TEMPLATE_NAME elif self._context & contexts.TEMPLATE_PARAM_VALUE: self._context ^= contexts.TEMPLATE_PARAM_VALUE - elif self._context & contexts.TEMPLATE_PARAM_KEY: + else: self._emit_all(self._pop(keep_context=True)) self._context |= contexts.TEMPLATE_PARAM_KEY self._emit(tokens.TemplateParamSeparator()) @@ -296,8 +296,6 @@ class Tokenizer(object): self._head = reset self._emit_text("[[") else: - if self._context & contexts.FAIL_NEXT: - self._context ^= contexts.FAIL_NEXT self._emit(tokens.WikilinkOpen()) self._emit_all(wikilink) self._emit(tokens.WikilinkClose()) @@ -687,7 +685,7 @@ class Tokenizer(object): self._push_tag_buffer(data) data.context = data.CX_ATTR_NAME self._push(contexts.TAG_ATTR) - elif data.context & data.CX_ATTR_VALUE: + else: # data.context & data.CX_ATTR_VALUE assured escaped = self._read(-1) == "\\" and self._read(-2) != "\\" if data.context & data.CX_NOTE_QUOTE: data.context ^= data.CX_NOTE_QUOTE @@ -943,7 +941,7 @@ class Tokenizer(object): elif ticks == 3: if self._parse_bold(): return self._pop() - elif ticks == 5: + else: # ticks == 5 self._parse_italics_and_bold() self._head -= 1