A copyright violation detector running on Wikimedia Cloud Services https://tools.wmflabs.org/copyvios/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

72 lines
1.9 KiB

  1. # -*- coding: utf-8 -*-
  2. from os.path import expanduser
  3. from urlparse import parse_qs
  4. from flask import g, request
  5. import oursql
  6. from sqlalchemy.pool import manage
  7. oursql = manage(oursql)
  8. __all__ = ["Query", "get_globals_db", "get_cache_db", "httpsfix", "urlstrip"]
  9. class Query(object):
  10. def __init__(self, method="GET"):
  11. self.query = {}
  12. if method == "GET":
  13. parsed = parse_qs(request.environ["QUERY_STRING"])
  14. elif method == "POST":
  15. size = int(request.environ.get("CONTENT_LENGTH", 0))
  16. parsed = parse_qs(request.environ["wsgi.input"].read(size))
  17. else:
  18. parsed = {}
  19. for key, value in parsed.iteritems():
  20. try:
  21. self.query[key] = value[-1].decode("utf8")
  22. except UnicodeDecodeError:
  23. pass
  24. def __getattr__(self, key):
  25. return self.query.get(key)
  26. def __setattr__(self, key, value):
  27. if key == "query":
  28. super(Query, self).__setattr__(key, value)
  29. else:
  30. self.query[key] = value
  31. def _connect_db(name):
  32. args = g.bot.config.wiki["_copyviosSQL"][name]
  33. args["read_default_file"] = expanduser("~/.my.cnf")
  34. args["autoping"] = True
  35. args["autoreconnect"] = True
  36. return oursql.connect(**args)
  37. def get_globals_db():
  38. if not g.globals_db:
  39. g.globals_db = _connect_db("globals")
  40. return g.globals_db
  41. def get_cache_db():
  42. if not g.cache_db:
  43. g.cache_db = _connect_db("cache")
  44. return g.cache_db
  45. def httpsfix(context, url):
  46. if url.startswith("http://"):
  47. url = url[len("http:"):]
  48. return url
  49. def urlstrip(context, url):
  50. if url.startswith("http://"):
  51. url = url[7:]
  52. if url.startswith("https://"):
  53. url = url[8:]
  54. if url.startswith("www."):
  55. url = url[4:]
  56. if url.endswith("/"):
  57. url = url[:-1]
  58. return url