A Python parser for MediaWiki wikicode https://mwparserfromhell.readthedocs.io/
Não pode escolher mais do que 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

test_pytokenizer.py 1.8 KiB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748
  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2012-2014 Ben Kurtovic <ben.kurtovic@gmail.com>
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in
  13. # all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. from __future__ import unicode_literals
  23. try:
  24. import unittest2 as unittest
  25. except ImportError:
  26. import unittest
  27. from mwparserfromhell.parser.tokenizer import Tokenizer
  28. from ._test_tokenizer import TokenizerTestCase
  29. class TestPyTokenizer(TokenizerTestCase, unittest.TestCase):
  30. """Test cases for the Python tokenizer."""
  31. @classmethod
  32. def setUpClass(cls):
  33. cls.tokenizer = Tokenizer
  34. if not TokenizerTestCase.skip_others:
  35. def test_uses_c(self):
  36. """make sure the Python tokenizer identifies as not using C"""
  37. self.assertFalse(Tokenizer.USES_C)
  38. self.assertFalse(Tokenizer().USES_C)
  39. if __name__ == "__main__":
  40. unittest.main(verbosity=2)