A Python robot that edits Wikipedia and interacts with people over IRC https://en.wikipedia.org/wiki/User:EarwigBot
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

233 lines
9.7 KiB

  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2009-2016 Ben Kurtovic <ben.kurtovic@gmail.com>
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in
  13. # all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. import re
  23. import sqlite3 as sqlite
  24. from threading import Lock
  25. from time import time
  26. from urlparse import urlparse
  27. from earwigbot import exceptions
  28. __all__ = ["ExclusionsDB"]
  29. DEFAULT_SOURCES = {
  30. "all": [ # Applies to all, but located on enwiki
  31. "User:EarwigBot/Copyvios/Exclusions",
  32. "User:EranBot/Copyright/Blacklist"
  33. ],
  34. "enwiki": [
  35. "Wikipedia:Mirrors and forks/ABC", "Wikipedia:Mirrors and forks/DEF",
  36. "Wikipedia:Mirrors and forks/GHI", "Wikipedia:Mirrors and forks/JKL",
  37. "Wikipedia:Mirrors and forks/MNO", "Wikipedia:Mirrors and forks/PQR",
  38. "Wikipedia:Mirrors and forks/STU", "Wikipedia:Mirrors and forks/VWXYZ"
  39. ]
  40. }
  41. _RE_STRIP_PREFIX = r"^https?://(www\.)?"
  42. class ExclusionsDB(object):
  43. """
  44. **EarwigBot: Wiki Toolset: Exclusions Database Manager**
  45. Controls the :file:`exclusions.db` file, which stores URLs excluded from
  46. copyright violation checks on account of being known mirrors, for example.
  47. """
  48. def __init__(self, sitesdb, dbfile, logger):
  49. self._sitesdb = sitesdb
  50. self._dbfile = dbfile
  51. self._logger = logger
  52. self._db_access_lock = Lock()
  53. def __repr__(self):
  54. """Return the canonical string representation of the ExclusionsDB."""
  55. res = "ExclusionsDB(sitesdb={0!r}, dbfile={1!r}, logger={2!r})"
  56. return res.format(self._sitesdb, self._dbfile, self._logger)
  57. def __str__(self):
  58. """Return a nice string representation of the ExclusionsDB."""
  59. return "<ExclusionsDB at {0}>".format(self._dbfile)
  60. def _create(self):
  61. """Initialize the exclusions database with its necessary tables."""
  62. script = """
  63. CREATE TABLE sources (source_sitename, source_page);
  64. CREATE TABLE updates (update_sitename, update_time);
  65. CREATE TABLE exclusions (exclusion_sitename, exclusion_url);
  66. """
  67. query = "INSERT INTO sources VALUES (?, ?);"
  68. sources = []
  69. for sitename, pages in DEFAULT_SOURCES.iteritems():
  70. for page in pages:
  71. sources.append((sitename, page))
  72. with sqlite.connect(self._dbfile) as conn:
  73. conn.executescript(script)
  74. conn.executemany(query, sources)
  75. def _load_source(self, site, source):
  76. """Load from a specific source and return a set of URLs."""
  77. urls = set()
  78. try:
  79. data = site.get_page(source, follow_redirects=True).get()
  80. except exceptions.PageNotFoundError:
  81. return urls
  82. if source == "User:EarwigBot/Copyvios/Exclusions":
  83. for line in data.splitlines():
  84. match = re.match(r"^\s*url\s*=\s*(?:\<nowiki\>\s*)?(.+?)\s*(?:\</nowiki\>\s*)?(?:#.*?)?$", line)
  85. if match:
  86. url = re.sub(_RE_STRIP_PREFIX, "", match.group(1))
  87. if url:
  88. urls.add(url)
  89. return urls
  90. if source == "User:EranBot/Copyright/Blacklist":
  91. for line in data.splitlines()[1:]:
  92. line = re.sub(r"(#|==).*$", "", line).strip()
  93. if line:
  94. urls.add("re:" + line)
  95. return urls
  96. for line in data.splitlines():
  97. if re.match(r"^(\s*\|?\s*url\s*=)|(\*?\s*Site:)", line):
  98. for url in re.findall(r"(https?://.+?)(?:[ [\]<>{}()]|$)", line):
  99. url = re.sub(_RE_STRIP_PREFIX, "", url)
  100. if url:
  101. urls.add(url)
  102. return urls
  103. def _update(self, sitename):
  104. """Update the database from listed sources in the index."""
  105. query1 = "SELECT source_page FROM sources WHERE source_sitename = ?"
  106. query2 = "SELECT exclusion_url FROM exclusions WHERE exclusion_sitename = ?"
  107. query3 = "DELETE FROM exclusions WHERE exclusion_sitename = ? AND exclusion_url = ?"
  108. query4 = "INSERT INTO exclusions VALUES (?, ?)"
  109. query5 = "SELECT 1 FROM updates WHERE update_sitename = ?"
  110. query6 = "UPDATE updates SET update_time = ? WHERE update_sitename = ?"
  111. query7 = "INSERT INTO updates VALUES (?, ?)"
  112. if sitename == "all":
  113. site = self._sitesdb.get_site("enwiki")
  114. else:
  115. site = self._sitesdb.get_site(sitename)
  116. with self._db_access_lock, sqlite.connect(self._dbfile) as conn:
  117. urls = set()
  118. for (source,) in conn.execute(query1, (sitename,)):
  119. urls |= self._load_source(site, source)
  120. for (url,) in conn.execute(query2, (sitename,)):
  121. if url in urls:
  122. urls.remove(url)
  123. else:
  124. conn.execute(query3, (sitename, url))
  125. conn.executemany(query4, [(sitename, url) for url in urls])
  126. if conn.execute(query5, (sitename,)).fetchone():
  127. conn.execute(query6, (int(time()), sitename))
  128. else:
  129. conn.execute(query7, (sitename, int(time())))
  130. def _get_last_update(self, sitename):
  131. """Return the UNIX timestamp of the last time the db was updated."""
  132. query = "SELECT update_time FROM updates WHERE update_sitename = ?"
  133. with self._db_access_lock, sqlite.connect(self._dbfile) as conn:
  134. try:
  135. result = conn.execute(query, (sitename,)).fetchone()
  136. except sqlite.OperationalError:
  137. self._create()
  138. return 0
  139. return result[0] if result else 0
  140. def sync(self, sitename, force=False):
  141. """Update the database if it hasn't been updated recently.
  142. This updates the exclusions database for the site *sitename* and "all".
  143. Site-specific lists are considered stale after 48 hours; global lists
  144. after 12 hours.
  145. """
  146. max_staleness = 60 * 60 * (12 if sitename == "all" else 48)
  147. time_since_update = int(time() - self._get_last_update(sitename))
  148. if force or time_since_update > max_staleness:
  149. log = u"Updating stale database: {0} (last updated {1} seconds ago)"
  150. self._logger.info(log.format(sitename, time_since_update))
  151. self._update(sitename)
  152. else:
  153. log = u"Database for {0} is still fresh (last updated {1} seconds ago)"
  154. self._logger.debug(log.format(sitename, time_since_update))
  155. if sitename != "all":
  156. self.sync("all", force=force)
  157. def check(self, sitename, url):
  158. """Check whether a given URL is in the exclusions database.
  159. Return ``True`` if the URL is in the database, or ``False`` otherwise.
  160. """
  161. normalized = re.sub(_RE_STRIP_PREFIX, "", url.lower())
  162. query = """SELECT exclusion_url FROM exclusions
  163. WHERE exclusion_sitename = ? OR exclusion_sitename = ?"""
  164. with self._db_access_lock, sqlite.connect(self._dbfile) as conn:
  165. for (excl,) in conn.execute(query, (sitename, "all")):
  166. excl = excl.lower()
  167. if excl.startswith("*."):
  168. parsed = urlparse(url.lower())
  169. matches = excl[2:] in parsed.netloc
  170. if matches and "/" in excl:
  171. excl_path = excl[excl.index("/") + 1]
  172. matches = excl_path.startswith(parsed.path)
  173. elif excl.startswith("re:"):
  174. try:
  175. matches = re.match(excl[3:], normalized)
  176. except re.error:
  177. continue
  178. else:
  179. matches = normalized.startswith(excl)
  180. if matches:
  181. log = u"Exclusion detected in {0} for {1}"
  182. self._logger.debug(log.format(sitename, url))
  183. return True
  184. log = u"No exclusions in {0} for {1}".format(sitename, url)
  185. self._logger.debug(log)
  186. return False
  187. def get_mirror_hints(self, page, try_mobile=True):
  188. """Return a list of strings that indicate the existence of a mirror.
  189. The source parser checks for the presence of these strings inside of
  190. certain HTML tag attributes (``"href"`` and ``"src"``).
  191. """
  192. site = page.site
  193. path = urlparse(page.url).path
  194. roots = [site.domain]
  195. scripts = ["index.php", "load.php", "api.php"]
  196. if try_mobile:
  197. fragments = re.search(r"^([\w]+)\.([\w]+).([\w]+)$", site.domain)
  198. if fragments:
  199. roots.append("{0}.m.{1}.{2}".format(*fragments.groups()))
  200. general = [root + site._script_path + "/" + script
  201. for root in roots for script in scripts]
  202. specific = [root + path for root in roots]
  203. return general + specific