A Python robot that edits Wikipedia and interacts with people over IRC https://en.wikipedia.org/wiki/User:EarwigBot
25개 이상의 토픽을 선택하실 수 없습니다. Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

site.py 20 KiB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. # -*- coding: utf-8 -*-
  2. from cookielib import CookieJar
  3. from gzip import GzipFile
  4. from json import loads
  5. from re import escape as re_escape, match as re_match
  6. from StringIO import StringIO
  7. from time import sleep
  8. from urllib import unquote_plus, urlencode
  9. from urllib2 import build_opener, HTTPCookieProcessor, URLError
  10. from urlparse import urlparse
  11. from wiki.category import Category
  12. from wiki.constants import *
  13. from wiki.exceptions import *
  14. from wiki.page import Page
  15. from wiki.user import User
  16. class Site(object):
  17. """
  18. EarwigBot's Wiki Toolset: Site Class
  19. Represents a Site, with support for API queries and returning Pages, Users,
  20. and Categories. The constructor takes a bunch of arguments and you probably
  21. won't need to call it directly, rather tools.get_site() for returning Site
  22. instances, tools.add_site() for adding new ones to config, and
  23. tools.del_site() for removing old ones from config, should suffice.
  24. Public methods:
  25. name -- returns our name (or "wikiid"), like "enwiki"
  26. project -- returns our project name, like "wikipedia"
  27. lang -- returns our language code, like "en"
  28. domain -- returns our web domain, like "en.wikipedia.org"
  29. api_query -- does an API query with the given kwargs as params
  30. namespace_id_to_name -- given a namespace ID, returns associated name(s)
  31. namespace_name_to_id -- given a namespace name, returns associated id
  32. get_page -- returns a Page object for the given title
  33. get_category -- returns a Category object for the given title
  34. get_user -- returns a User object for the given username
  35. """
  36. def __init__(self, name=None, project=None, lang=None, base_url=None,
  37. article_path=None, script_path=None, sql=(None, None),
  38. namespaces=None, login=(None, None), cookiejar=None,
  39. user_agent=None, assert_edit=None, maxlag=None):
  40. """Constructor for new Site instances.
  41. This probably isn't necessary to call yourself unless you're building a
  42. Site that's not in your config and you don't want to add it - normally
  43. all you need is tools.get_site(name), which creates the Site for you
  44. based on your config file. We accept a bunch of kwargs, but the only
  45. ones you really "need" are `base_url` and `script_path` - this is
  46. enough to figure out an API url. `login`, a tuple of
  47. (username, password), is highly recommended. `cookiejar` will be used
  48. to store cookies, and we'll use a normal CookieJar if none is given.
  49. First, we'll store the given arguments as attributes, then set up our
  50. URL opener. We'll load any of the attributes that weren't given from
  51. the API, and then log in if a username/pass was given and we aren't
  52. already logged in.
  53. """
  54. # Attributes referring to site information, filled in by an API query
  55. # if they are missing (and an API url can be determined):
  56. self._name = name
  57. self._project = project
  58. self._lang = lang
  59. self._base_url = base_url
  60. self._article_path = article_path
  61. self._script_path = script_path
  62. self._sql = sql
  63. self._namespaces = namespaces
  64. # Attributes used when querying the API:
  65. self._assert_edit = assert_edit
  66. self._maxlag = maxlag
  67. self._max_retries = 5
  68. # Set up cookiejar and URL opener for making API queries:
  69. if cookiejar is not None:
  70. self._cookiejar = cookiejar
  71. else:
  72. self._cookiejar = CookieJar()
  73. if user_agent is None:
  74. user_agent = USER_AGENT # Set default UA from wiki.constants
  75. self._opener = build_opener(HTTPCookieProcessor(self._cookiejar))
  76. self._opener.addheaders = [("User-Agent", user_agent),
  77. ("Accept-Encoding", "gzip")]
  78. # Get all of the above attributes that were not specified as arguments:
  79. self._load_attributes()
  80. # If we have a name/pass and the API says we're not logged in, log in:
  81. self._login_info = name, password = login
  82. if name is not None and password is not None:
  83. logged_in_as = self._get_username_from_cookies()
  84. if logged_in_as is None or name != logged_in_as:
  85. self._login(login)
  86. def _api_query(self, params, tries=0, wait=5):
  87. """Do an API query with `params` as a dict of parameters.
  88. This will first attempt to construct an API url from self._base_url and
  89. self._script_path. We need both of these, or else we'll raise
  90. SiteAPIError.
  91. We'll encode the given params, adding format=json along the way, as
  92. well as &assert= and &maxlag= based on self._assert_edit and _maxlag.
  93. We make the request through self._opener, which has built-in cookie
  94. support via self._cookiejar, a User-Agent (wiki.constants.USER_AGENT),
  95. and Accept-Encoding set to "gzip".
  96. Assuming everything went well, we'll gunzip the data (if compressed),
  97. load it as a JSON object, and return it.
  98. If our request failed for some reason, we'll raise SiteAPIError with
  99. details. If that reason was due to maxlag, we'll sleep for a bit and
  100. then repeat the query until we exceed self._max_retries.
  101. There's helpful MediaWiki API documentation at
  102. <http://www.mediawiki.org/wiki/API>.
  103. """
  104. if self._base_url is None or self._script_path is None:
  105. e = "Tried to do an API query, but no API URL is known."
  106. raise SiteAPIError(e)
  107. url = ''.join((self._base_url, self._script_path, "/api.php"))
  108. params["format"] = "json" # This is the only format we understand
  109. if self._assert_edit: # If requested, ensure that we're logged in
  110. params["assert"] = self._assert_edit
  111. if self._maxlag: # If requested, don't overload the servers
  112. params["maxlag"] = self._maxlag
  113. data = urlencode(params)
  114. print url, data # debug code
  115. try:
  116. response = self._opener.open(url, data)
  117. except URLError as error:
  118. if hasattr(error, "reason"):
  119. e = "API query failed: {0}.".format(error.reason)
  120. elif hasattr(error, "code"):
  121. e = "API query failed: got an error code of {0}."
  122. e = e.format(error.code)
  123. else:
  124. e = "API query failed."
  125. raise SiteAPIError(e)
  126. result = response.read()
  127. if response.headers.get("Content-Encoding") == "gzip":
  128. stream = StringIO(result)
  129. gzipper = GzipFile(fileobj=stream)
  130. result = gzipper.read()
  131. try:
  132. res = loads(result) # Parse as a JSON object
  133. except ValueError:
  134. e = "API query failed: JSON could not be decoded."
  135. raise SiteAPIError(e)
  136. try:
  137. code = res["error"]["code"]
  138. info = res["error"]["info"]
  139. except KeyError:
  140. return res
  141. if code == "maxlag":
  142. if tries >= self._max_retries:
  143. e = "Maximum number of retries reached ({0})."
  144. raise SiteAPIError(e.format(self._max_retries))
  145. tries += 1
  146. msg = 'Server says: "{0}". Retrying in {1} seconds ({2}/{3}).'
  147. print msg.format(info, wait, tries, self._max_retries)
  148. sleep(wait)
  149. return self._api_query(params, tries=tries, wait=wait*3)
  150. else:
  151. e = 'API query failed: got error "{0}"; server says: "{1}".'
  152. raise SiteAPIError(e.format(code, info))
  153. def _load_attributes(self, force=False):
  154. """Load data about our Site from the API.
  155. This function is called by __init__() when one of the site attributes
  156. was not given as a keyword argument. We'll do an API query to get the
  157. missing data, but only if there actually *is* missing data.
  158. Additionally, you can call this with `force=True` to forcibly reload
  159. all attributes.
  160. """
  161. # All attributes to be loaded, except _namespaces, which is a special
  162. # case because it requires additional params in the API query:
  163. attrs = [self._name, self._project, self._lang, self._base_url,
  164. self._article_path, self._script_path]
  165. params = {"action": "query", "meta": "siteinfo"}
  166. if not self._namespaces or force:
  167. params["siprop"] = "general|namespaces|namespacealiases"
  168. result = self._api_query(params)
  169. self._load_namespaces(result)
  170. elif all(attrs): # Everything is already specified and we're not told
  171. return # to force a reload, so do nothing
  172. else: # We're only loading attributes other than _namespaces
  173. params["siprop"] = "general"
  174. result = self._api_query(params)
  175. res = result["query"]["general"]
  176. self._name = res["wikiid"]
  177. self._project = res["sitename"].lower()
  178. self._lang = res["lang"]
  179. self._base_url = res["server"]
  180. self._article_path = res["articlepath"]
  181. self._script_path = res["scriptpath"]
  182. def _load_namespaces(self, result):
  183. """Fill self._namespaces with a dict of namespace IDs and names.
  184. Called by _load_attributes() with API data as `result` when
  185. self._namespaces was not given as an kwarg to __init__().
  186. """
  187. self._namespaces = {}
  188. for namespace in result["query"]["namespaces"].values():
  189. ns_id = namespace["id"]
  190. name = namespace["*"]
  191. try:
  192. canonical = namespace["canonical"]
  193. except KeyError:
  194. self._namespaces[ns_id] = [name]
  195. else:
  196. if name != canonical:
  197. self._namespaces[ns_id] = [name, canonical]
  198. else:
  199. self._namespaces[ns_id] = [name]
  200. for namespace in result["query"]["namespacealiases"]:
  201. ns_id = namespace["id"]
  202. alias = namespace["*"]
  203. self._namespaces[ns_id].append(alias)
  204. def _get_cookie(self, name, domain):
  205. """Return the named cookie unless it is expired or doesn't exist."""
  206. for cookie in self._cookiejar:
  207. if cookie.name == name and cookie.domain == domain:
  208. if cookie.is_expired():
  209. break
  210. return cookie
  211. def _get_username_from_cookies(self):
  212. """Try to return our username based solely on cookies.
  213. First, we'll look for a cookie named self._name + "Token", like
  214. "enwikiToken". If it exists and isn't expired, we'll assume it's valid
  215. and try to return the value of the cookie self._name + "UserName" (like
  216. "enwikiUserName"). This should work fine on wikis without single-user
  217. login.
  218. If `enwikiToken` doesn't exist, we'll try to find a cookie named
  219. `centralauth_Token`. If this exists and is not expired, we'll try to
  220. return the value of `centralauth_User`.
  221. If we didn't get any matches, we'll return None. Our goal here isn't to
  222. return the most likely username, or what we *want* our username to be
  223. (for that, we'd do self._login_info[0]), but rather to get our current
  224. username without an unnecessary ?action=query&meta=userinfo API query.
  225. """
  226. domain = self.domain()
  227. name = ''.join((self._name, "Token"))
  228. cookie = self._get_cookie(name, domain)
  229. if cookie is not None:
  230. name = ''.join((self._name, "UserName"))
  231. user_name = self._get_cookie(name, domain)
  232. if user_name is not None:
  233. return user_name.value
  234. name = "centralauth_Token"
  235. for cookie in self._cookiejar:
  236. if cookie.domain_initial_dot is False or cookie.is_expired():
  237. continue
  238. if cookie.name != name:
  239. continue
  240. # Build a regex that will match domains this cookie affects:
  241. search = ''.join(("(.*?)", re_escape(cookie.domain)))
  242. if re_match(search, domain): # Test it against our site
  243. user_name = self._get_cookie("centralauth_User", cookie.domain)
  244. if user_name is not None:
  245. return user_name.value
  246. def _get_username_from_api(self):
  247. """Do a simple API query to get our username and return it.
  248. This is a reliable way to make sure we are actually logged in, because
  249. it doesn't deal with annoying cookie logic, but it results in an API
  250. query that is unnecessary in some cases.
  251. Called by _get_username() (in turn called by get_user() with no
  252. username argument) when cookie lookup fails, probably indicating that
  253. we are logged out.
  254. """
  255. params = {"action": "query", "meta": "userinfo"}
  256. result = self._api_query(params)
  257. return result["query"]["userinfo"]["name"]
  258. def _get_username(self):
  259. """Return the name of the current user, whether logged in or not.
  260. First, we'll try to deduce it solely from cookies, to avoid an
  261. unnecessary API query. For the cookie-detection method, see
  262. _get_username_from_cookies()'s docs.
  263. If our username isn't in cookies, then we're probably not logged in, or
  264. something fishy is going on (like forced logout). In this case, do a
  265. single API query for our username (or IP address) and return that.
  266. """
  267. name = self._get_username_from_cookies()
  268. if name is not None:
  269. return name
  270. return self._get_username_from_api()
  271. def _save_cookiejar(self):
  272. """Try to save our cookiejar after doing a (normal) login or logout.
  273. Calls the standard .save() method with no filename. Don't fret if our
  274. cookiejar doesn't support saving (CookieJar raises AttributeError,
  275. FileCookieJar raises NotImplementedError) or no default filename was
  276. given (LWPCookieJar and MozillaCookieJar raise ValueError).
  277. """
  278. try:
  279. self._cookiejar.save()
  280. except (AttributeError, NotImplementedError, ValueError):
  281. pass
  282. def _login(self, login, token=None, attempt=0):
  283. """Safely login through the API.
  284. Normally, this is called by __init__() if a username and password have
  285. been provided and no valid login cookies were found. The only other
  286. time it needs to be called is when those cookies expire, which is done
  287. automatically by api_query() if a query fails.
  288. Recent versions of MediaWiki's API have fixed a CSRF vulnerability,
  289. requiring login to be done in two separate requests. If the response
  290. from from our initial request is "NeedToken", we'll do another one with
  291. the token. If login is successful, we'll try to save our cookiejar.
  292. Raises LoginError on login errors (duh), like bad passwords and
  293. nonexistent usernames.
  294. `login` is a (username, password) tuple. `token` is the token returned
  295. from our first request, and `attempt` is to prevent getting stuck in a
  296. loop if MediaWiki isn't acting right.
  297. """
  298. name, password = login
  299. params = {"action": "login", "lgname": name, "lgpassword": password}
  300. if token is not None:
  301. params["lgtoken"] = token
  302. result = self._api_query(params)
  303. res = result["login"]["result"]
  304. if res == "Success":
  305. self._save_cookiejar()
  306. elif res == "NeedToken" and attempt == 0:
  307. token = result["login"]["token"]
  308. return self._login(login, token, attempt=1)
  309. else:
  310. if res == "Illegal":
  311. e = "The provided username is illegal."
  312. elif res == "NotExists":
  313. e = "The provided username does not exist."
  314. elif res == "EmptyPass":
  315. e = "No password was given."
  316. elif res == "WrongPass" or res == "WrongPluginPass":
  317. e = "The given password is incorrect."
  318. else:
  319. e = "Couldn't login; server says '{0}'.".format(res)
  320. raise LoginError(e)
  321. def _logout(self):
  322. """Safely logout through the API.
  323. We'll do a simple API request (api.php?action=logout), clear our
  324. cookiejar (which probably contains now-invalidated cookies) and try to
  325. save it, if it supports that sort of thing.
  326. """
  327. params = {"action": "logout"}
  328. self._api_query(params)
  329. self._cookiejar.clear()
  330. self._save_cookiejar()
  331. def api_query(self, **kwargs):
  332. """Do an API query with `kwargs` as the parameters.
  333. See _api_query()'s documentation for details.
  334. """
  335. return self._api_query(kwargs)
  336. def name(self):
  337. """Returns the Site's name (or "wikiid" in the API), like "enwiki"."""
  338. return self._name
  339. def project(self):
  340. """Returns the Site's project name in lowercase, like "wikipedia"."""
  341. return self._project
  342. def lang(self):
  343. """Returns the Site's language code, like "en" or "es"."""
  344. return self._lang
  345. def domain(self):
  346. """Returns the Site's web domain, like "en.wikipedia.org"."""
  347. return urlparse(self._base_url).netloc
  348. def namespace_id_to_name(self, ns_id, all=False):
  349. """Given a namespace ID, returns associated namespace names.
  350. If all is False (default), we'll return the first name in the list,
  351. which is usually the localized version. Otherwise, we'll return the
  352. entire list, which includes the canonical name.
  353. For example, returns u"Wikipedia" if ns_id=4 and all=False on enwiki;
  354. returns [u"Wikipedia", u"Project"] if ns_id=4 and all=True.
  355. Raises NamespaceNotFoundError if the ID is not found.
  356. """
  357. try:
  358. if all:
  359. return self._namespaces[ns_id]
  360. else:
  361. return self._namespaces[ns_id][0]
  362. except KeyError:
  363. e = "There is no namespace with id {0}.".format(ns_id)
  364. raise NamespaceNotFoundError(e)
  365. def namespace_name_to_id(self, name):
  366. """Given a namespace name, returns the associated ID.
  367. Like namespace_id_to_name(), but reversed. Case is ignored, because
  368. namespaces are assumed to be case-insensitive.
  369. Raises NamespaceNotFoundError if the name is not found.
  370. """
  371. lname = name.lower()
  372. for ns_id, names in self._namespaces.items():
  373. lnames = [n.lower() for n in names] # Be case-insensitive
  374. if lname in lnames:
  375. return ns_id
  376. e = "There is no namespace with name '{0}'.".format(name)
  377. raise NamespaceNotFoundError(e)
  378. def get_page(self, title, follow_redirects=False):
  379. """Returns a Page object for the given title (pagename).
  380. Will return a Category object instead if the given title is in the
  381. category namespace. As Category is a subclass of Page, this should not
  382. cause problems.
  383. Note that this doesn't do any direct checks for existence or
  384. redirect-following - Page's methods provide that.
  385. """
  386. prefixes = self.namespace_id_to_name(NS_CATEGORY, all=True)
  387. prefix = title.split(":", 1)[0]
  388. if prefix != title: # Avoid a page that is simply "Category"
  389. if prefix in prefixes:
  390. return Category(self, title, follow_redirects)
  391. return Page(self, title, follow_redirects)
  392. def get_category(self, catname, follow_redirects=False):
  393. """Returns a Category object for the given category name.
  394. `catname` should be given *without* a namespace prefix. This method is
  395. really just shorthand for get_page("Category:" + catname).
  396. """
  397. prefix = self.namespace_id_to_name(NS_CATEGORY)
  398. pagename = ':'.join((prefix, catname))
  399. return Category(self, pagename, follow_redirects)
  400. def get_user(self, username=None):
  401. """Returns a User object for the given username.
  402. If `username` is left as None, then a User object representing the
  403. currently logged-in (or anonymous!) user is returned.
  404. """
  405. if username is None:
  406. username = self._get_username()
  407. return User(self, username)