A copyright violation detector running on Wikimedia Cloud Services https://tools.wmflabs.org/copyvios/
Вы не можете выбрать более 25 тем Темы должны начинаться с буквы или цифры, могут содержать дефисы(-) и должны содержать не более 35 символов.

12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
12 лет назад
13 лет назад
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. <%!
  2. from datetime import datetime
  3. from hashlib import sha256
  4. from itertools import count
  5. from os.path import expanduser
  6. from re import sub, UNICODE
  7. from sys import path
  8. from time import time
  9. from urlparse import parse_qs, urlparse
  10. from earwigbot import exceptions
  11. from earwigbot.bot import Bot
  12. import oursql
  13. def get_results(bot, lang, project, name, all_projects, title, url, query):
  14. site = get_site(bot, lang, project, name, all_projects)
  15. if not site:
  16. return None, None, None
  17. page = site.get_page(title)
  18. if page.exists in [page.PAGE_MISSING, page.PAGE_INVALID]:
  19. return site, page, None
  20. # if url:
  21. # result = get_url_specific_results(page, url)
  22. # else:
  23. # conn = open_sql_connection(bot, "copyvioCache")
  24. # if not query.get("nocache"):
  25. # result = get_cached_results(page, conn)
  26. # if query.get("nocache") or not result:
  27. # result = get_fresh_results(page, conn)
  28. tstart = time()
  29. mc1 = __import__("earwigbot").wiki.copyvios.MarkovChain(page.get())
  30. mc2 = __import__("earwigbot").wiki.copyvios.MarkovChain(u"This is some random textual content for a page.")
  31. mci = __import__("earwigbot").wiki.copyvios.MarkovChainIntersection(mc1, mc2)
  32. result = __import__("earwigbot").wiki.copyvios.CopyvioCheckResult(
  33. True, 0.67123, "http://example.com/", 7, mc1, (mc2, mci))
  34. result.cached = False
  35. result.tdiff = time() - tstart
  36. # END TEST BLOCK
  37. return site, page, result
  38. def get_site(bot, lang, project, name, all_projects):
  39. if project not in [proj[0] for proj in all_projects]:
  40. return None
  41. if project == "wikimedia": # Special sites:
  42. try:
  43. return bot.wiki.get_site(name=name)
  44. except exceptions.SiteNotFoundError:
  45. try:
  46. return bot.wiki.add_site(lang=lang, project=project)
  47. except exceptions.APIError:
  48. return None
  49. try:
  50. return bot.wiki.get_site(lang=lang, project=project)
  51. except exceptions.SiteNotFoundError:
  52. try:
  53. return bot.wiki.add_site(lang=lang, project=project)
  54. except exceptions.APIError:
  55. return None
  56. def get_url_specific_results(page, url):
  57. t_start = time()
  58. result = page.copyvio_compare(url)
  59. result.cached = False
  60. result.tdiff = time() - t_start
  61. return result
  62. def open_sql_connection(bot, dbname):
  63. conn_args = bot.config.wiki["_toolserverSQL"][dbname]
  64. if "read_default_file" not in conn_args and "user" not in conn_args and "passwd" not in conn_args:
  65. conn_args["read_default_file"] = expanduser("~/.my.cnf")
  66. if "autoping" not in conn_args:
  67. conn_args["autoping"] = True
  68. if "autoreconnect" not in conn_args:
  69. conn_args["autoreconnect"] = True
  70. return oursql.connect(**conn_args)
  71. def get_cached_results(page, conn):
  72. query1 = "DELETE FROM cache WHERE cache_time < DATE_SUB(CURRENT_TIMESTAMP, INTERVAL 3 DAY)"
  73. query2 = "SELECT cache_url, cache_time, cache_queries, cache_process_time FROM cache WHERE cache_id = ? AND cache_hash = ?"
  74. pageid = page.pageid()
  75. hash = sha256(page.get()).hexdigest()
  76. t_start = time()
  77. with conn.cursor() as cursor:
  78. cursor.execute(query1)
  79. cursor.execute(query2, (pageid, hash))
  80. results = cursor.fetchall()
  81. if not results:
  82. return None
  83. url, cache_time, num_queries, original_tdiff = results[0]
  84. result = page.copyvio_compare(url)
  85. result.cached = True
  86. result.queries = num_queries
  87. result.tdiff = time() - t_start
  88. result.original_tdiff = original_tdiff
  89. result.cache_time = cache_time.strftime("%b %d, %Y %H:%M:%S UTC")
  90. result.cache_age = format_date(cache_time)
  91. return result
  92. def format_date(cache_time):
  93. diff = datetime.utcnow() - cache_time
  94. if diff.seconds > 3600:
  95. return "{0} hours".format(diff.seconds / 3600)
  96. if diff.seconds > 60:
  97. return "{0} minutes".format(diff.seconds / 60)
  98. return "{0} seconds".format(diff.seconds)
  99. def get_fresh_results(page, conn):
  100. t_start = time()
  101. result = page.copyvio_check(max_queries=10)
  102. result.cached = False
  103. result.tdiff = time() - t_start
  104. cache_result(page, result, conn)
  105. return result
  106. def cache_result(page, result, conn):
  107. pageid = page.pageid()
  108. hash = sha256(page.get()).hexdigest()
  109. query1 = "SELECT 1 FROM cache WHERE cache_id = ?"
  110. query2 = "DELETE FROM cache WHERE cache_id = ?"
  111. query3 = "INSERT INTO cache VALUES (?, ?, ?, CURRENT_TIMESTAMP, ?, ?)"
  112. with conn.cursor() as cursor:
  113. cursor.execute(query1, (pageid,))
  114. if cursor.fetchall():
  115. cursor.execute(query2, (pageid,))
  116. cursor.execute(query3, (pageid, hash, result.url, result.queries,
  117. result.tdiff))
  118. def get_sites(bot):
  119. max_staleness = 60 * 60 * 24 * 7
  120. conn = open_sql_connection(bot, "globals")
  121. query1 = "SELECT update_time FROM updates WHERE update_service = ?"
  122. query2 = "SELECT lang_code, lang_name FROM language"
  123. query3 = "SELECT project_code, project_name FROM project"
  124. with conn.cursor() as cursor:
  125. cursor.execute(query1, ("sites",))
  126. try:
  127. time_since_update = int(time() - cursor.fetchall()[0][0])
  128. except IndexError:
  129. time_since_update = time()
  130. if time_since_update > max_staleness:
  131. update_sites(bot.wiki.get_site(), cursor)
  132. cursor.execute(query2)
  133. langs = [(code, name.decode("unicode_escape")) for (code, name) in cursor.fetchall()]
  134. cursor.execute(query3)
  135. projects = cursor.fetchall()
  136. return langs, projects
  137. def update_sites(site, cursor):
  138. matrix = site.api_query(action="sitematrix")["sitematrix"]
  139. del matrix["count"]
  140. languages, projects = set(), set()
  141. for site in matrix.itervalues():
  142. if isinstance(site, list): # Special sites
  143. for special in site:
  144. if "closed" not in special and "private" not in special:
  145. full = urlparse(special["url"]).netloc
  146. if full.count(".") == 1: # No subdomain, so use "www"
  147. lang, project = "www", full.split(".")[0]
  148. else:
  149. lang, project = full.rsplit(".", 2)[:2]
  150. code = u"{0}::{1}".format(lang, special["dbname"])
  151. name = special["code"].capitalize()
  152. languages.add((code, u"{0} ({1})".format(lang, name)))
  153. projects.add((project, project.capitalize()))
  154. continue
  155. this = set()
  156. for web in site["site"]:
  157. if "closed" in web:
  158. continue
  159. project = "wikipedia" if web["code"] == u"wiki" else web["code"]
  160. this.add((project, project.capitalize()))
  161. if this:
  162. code = site["code"]
  163. name = site["name"].encode("unicode_escape")
  164. languages.add((code, u"{0} ({1})".format(code, name)))
  165. projects |= this
  166. save_site_updates(cursor, languages, projects)
  167. def save_site_updates(cursor, languages, projects):
  168. query1 = "SELECT lang_code, lang_name FROM language"
  169. query2 = "DELETE FROM language WHERE lang_code = ? AND lang_name = ?"
  170. query3 = "INSERT INTO language VALUES (?, ?)"
  171. query4 = "SELECT project_code, project_name FROM project"
  172. query5 = "DELETE FROM project WHERE project_code = ? AND project_name = ?"
  173. query6 = "INSERT INTO project VALUES (?, ?)"
  174. query7 = "SELECT 1 FROM updates WHERE update_service = ?"
  175. query8 = "UPDATE updates SET update_time = ? WHERE update_service = ?"
  176. query9 = "INSERT INTO updates VALUES (?, ?)"
  177. synchronize_sites_with_db(cursor, languages, query1, query2, query3)
  178. synchronize_sites_with_db(cursor, projects, query4, query5, query6)
  179. cursor.execute(query7, ("sites",))
  180. if cursor.fetchall():
  181. cursor.execute(query8, (time(), "sites"))
  182. else:
  183. cursor.execute(query9, ("sites", time()))
  184. def synchronize_sites_with_db(cursor, updates, q_list, q_rmv, q_update):
  185. removals = []
  186. cursor.execute(q_list)
  187. for site in cursor:
  188. updates.remove(site) if site in updates else removals.append(site)
  189. cursor.executemany(q_rmv, removals)
  190. cursor.executemany(q_update, updates)
  191. def highlight_delta(chain, delta):
  192. processed = []
  193. prev_prev = prev = chain.START
  194. i = 0
  195. all_words = chain.text.split()
  196. paragraphs = chain.text.split("\n")
  197. for paragraph in paragraphs:
  198. processed_words = []
  199. words = paragraph.split(" ")
  200. for word, i in zip(words, count(i)):
  201. try:
  202. next = strip_word(all_words[i+1])
  203. except IndexError:
  204. next = chain.END
  205. sword = strip_word(word)
  206. block = (prev_prev, prev) # Block for before
  207. alock = (prev, sword) # Block for after
  208. before = [block in delta.chain and sword in delta.chain[block]]
  209. after = [alock in delta.chain and next in delta.chain[alock]]
  210. is_first = i == 0
  211. is_last = i + 1 == len(all_words)
  212. res = highlight_word(word, before, after, is_first, is_last)
  213. processed_words.append(res)
  214. prev_prev = prev
  215. prev = sword
  216. processed.append(u" ".join(processed_words))
  217. i += 1
  218. return u"<br /><br />".join(processed)
  219. def highlight_word(word, before, after, is_first, is_last):
  220. if before and after:
  221. # Word is in the middle of a highlighted block, so don't change
  222. # anything unless this is the first word (force block to start) or
  223. # the last word (force block to end):
  224. res = word
  225. if is_first:
  226. res = u'<span class="cv-hl">' + res
  227. if is_last:
  228. res += u'</span>'
  229. elif before:
  230. # Word is the last in a highlighted block, so fade it out and then
  231. # end the block; force open a block before the word if this is the
  232. # first word:
  233. res = fade_word(word, u"out") + u"</span>"
  234. if is_first:
  235. res = u'<span class="cv-hl">' + res
  236. elif after:
  237. # Word is the first in a highlighted block, so start the block and
  238. # then fade it in; force close the block after the word if this is
  239. # the last word:
  240. res = u'<span class="cv-hl">' + fade_word(word, u"in")
  241. if is_last:
  242. res += u"</span>"
  243. else:
  244. # Word is completely outside of a highlighted block, so do nothing:
  245. res = word
  246. return res
  247. def fade_word(word, dir):
  248. if len(word) <= 4:
  249. return u'<span class="cv-hl-{0}">{1}</span>'.format(dir, word)
  250. if dir == u"out":
  251. return u'{0}<span class="cv-hl-out">{1}</span>'.format(word[:-4], word[-4:])
  252. return u'<span class="cv-hl-in">{0}</span>{1}'.format(word[:4], word[4:])
  253. def strip_word(word):
  254. return sub("[^\w\s-]", "", word.lower(), flags=UNICODE)
  255. def urlstrip(url):
  256. if url.startswith("http://"):
  257. url = url[7:]
  258. if url.startswith("https://"):
  259. url = url[8:]
  260. if url.startswith("www."):
  261. url = url[4:]
  262. if url.endswith("/"):
  263. url = url[:-1]
  264. return url
  265. %>\
  266. <%
  267. lang = orig_lang = project = name = title = url = None
  268. query = parse_qs(environ["QUERY_STRING"])
  269. if "lang" in query:
  270. lang = orig_lang = query["lang"][0].decode("utf8").lower()
  271. if "::" in lang:
  272. lang, name = lang.split("::", 1)
  273. if "project" in query:
  274. project = query["project"][0].decode("utf8").lower()
  275. if "title" in query:
  276. title = query["title"][0].decode("utf8")
  277. if "url" in query:
  278. url = query["url"][0].decode("utf8")
  279. bot = Bot(".earwigbot")
  280. default_site = bot.wiki.get_site()
  281. all_langs, all_projects = get_sites(bot)
  282. if lang and project and title:
  283. site, page, result = get_results(bot, lang, project, name,
  284. all_projects, title, url, query)
  285. else:
  286. site, page, result = default_site, None, None
  287. %>\
  288. <%include file="/support/header.mako" args="environ=environ, title='Copyvio Detector', add_css=('copyvios.css',), add_js=('copyvios.js',)"/>
  289. <h1>Copyvio Detector</h1>
  290. <p>This tool attempts to detect <a href="//en.wikipedia.org/wiki/WP:COPYVIO">copyright violations</a> in articles. Simply give the title of the page you want to check and hit Submit. The tool will then search for its content elsewhere on the web and display a report if a similar webpage is found. If you also provide a URL, it will not query any search engines and instead display a report comparing the article to that particular webpage, like the <a href="//toolserver.org/~dcoetzee/duplicationdetector/">Duplication Detector</a>. Check out the <a href="//en.wikipedia.org/wiki/User:EarwigBot/Copyvios/FAQ">FAQ</a> for more information and technical details.</p>
  291. <form action="${environ['PATH_INFO']}" method="get">
  292. <table>
  293. <tr>
  294. <td>Site:</td>
  295. <td>
  296. <tt>http://</tt>
  297. <select name="lang">
  298. <% selected_lang = orig_lang if orig_lang else default_site.lang %>
  299. % for code, name in all_langs:
  300. % if code == selected_lang:
  301. <option value="${code}" selected="selected">${name}</option>
  302. % else:
  303. <option value="${code}">${name}</option>
  304. % endif
  305. % endfor
  306. </select>
  307. <tt>.</tt>
  308. <select name="project">
  309. <% selected_project = project if project else default_site.project %>
  310. % for code, name in all_projects:
  311. % if code == selected_project:
  312. <option value="${code}" selected="selected">${name}</option>
  313. % else:
  314. <option value="${code}">${name}</option>
  315. % endif
  316. % endfor
  317. </select>
  318. <tt>.org</tt>
  319. </td>
  320. </tr>
  321. <tr>
  322. <td>Page title:</td>
  323. % if page:
  324. <td><input type="text" name="title" size="60" value="${page.title | h}" /></td>
  325. % elif title:
  326. <td><input type="text" name="title" size="60" value="${title | h}" /></td>
  327. % else:
  328. <td><input type="text" name="title" size="60" /></td>
  329. % endif
  330. </tr>
  331. <tr>
  332. <td>URL (optional):</td>
  333. % if url:
  334. <td><input type="text" name="url" size="120" value="${url | h}" /></td>
  335. % else:
  336. <td><input type="text" name="url" size="120" /></td>
  337. % endif
  338. </tr>
  339. % if query.get("nocache") or page:
  340. <tr>
  341. <td>Bypass cache:</td>
  342. % if query.get("nocache"):
  343. <td><input type="checkbox" name="nocache" value="1" checked="checked" /></td>
  344. % else:
  345. <td><input type="checkbox" name="nocache" value="1" /></td>
  346. % endif
  347. </tr>
  348. % endif
  349. <tr>
  350. <td><button type="submit">Submit</button></td>
  351. </tr>
  352. </table>
  353. </form>
  354. % if project and lang and title and not page:
  355. <div class="divider"></div>
  356. <div id="cv-result-yes">
  357. <p>The given site, (project=<b><tt>${project}</tt></b>, language=<b><tt>${lang}</tt></b>) doesn't seem to exist. <a href="//${lang}.${project}.org/">Check its URL?</a></p>
  358. </div>
  359. % elif project and lang and title and page and not result:
  360. <div class="divider"></div>
  361. <div id="cv-result-yes">
  362. <p>The given page, <a href="${page.url}">${page.title | h}</a>, doesn't seem to exist.</p>
  363. </div>
  364. % elif page:
  365. <div class="divider"></div>
  366. <div id="cv-result-${'yes' if result.violation else 'no'}">
  367. % if result.violation:
  368. <h2 id="cv-result-header"><a href="${page.url}">${page.title | h}</a> is a suspected violation of <a href="${result.url | h}">${result.url | urlstrip}</a>.</h2>
  369. % else:
  370. <h2 id="cv-result-header">No violations detected in <a href="${page.url()}">${page.title | h}</a>.</h2>
  371. % endif
  372. <ul id="cv-result-list">
  373. <li><b><tt>${round(result.confidence * 100, 1)}%</tt></b> confidence of a violation.</li>
  374. % if result.cached:
  375. <li>Results are <a id="cv-cached" href="#">cached
  376. <span>To save time (and money), this tool will retain the results of checks for up to 72 hours. This includes the URL of the "violated" source, but neither its content nor the content of the article. Future checks on the same page (assuming it remains unchanged) will not involve additional search queries, but a fresh comparison against the source URL will be made. If the page is modified, a new check will be run.</span>
  377. </a> from ${result.cache_time} (${result.cache_age} ago). <a href="${environ['REQUEST_URI'].decode("utf8") | h}&amp;nocache=1">Bypass the cache.</a></li>
  378. % else:
  379. <li>Results generated in <tt>${round(result.tdiff, 3)}</tt> seconds using <tt>${result.queries}</tt> queries.</li>
  380. % endif
  381. <li><a id="cv-result-detail-link" href="#cv-result-detail" onclick="copyvio_toggle_details()">Show details:</a></li>
  382. </ul>
  383. <div id="cv-result-detail" style="display: none;">
  384. <ul id="cv-result-detail-list">
  385. <li>Trigrams: <i>Article:</i> <tt>${result.article_chain.size()}</tt> / <i>Source:</i> <tt>${result.source_chain.size()}</tt> / <i>Delta:</i> <tt>${result.delta_chain.size()}</tt></li>
  386. % if result.cached:
  387. % if result.queries:
  388. <li>Retrieved from cache in <tt>${round(result.tdiff, 3)}</tt> seconds (originally generated in <tt>${round(result.original_tdiff, 3)}</tt>s using <tt>${result.queries}</tt> queries; <tt>${round(result.original_tdiff - result.tdiff, 3)}</tt>s saved).</li>
  389. % else:
  390. <li>Retrieved from cache in <tt>${round(result.tdiff, 3)}</tt> seconds (originally generated in <tt>${round(result.original_tdiff, 3)}</tt>s; <tt>${round(result.original_tdiff - result.tdiff, 3)}</tt>s saved).</li>
  391. % endif
  392. % endif
  393. % if result.queries:
  394. <li><i>Fun fact:</i> The Wikimedia Foundation paid Yahoo! Inc. <a href="http://info.yahoo.com/legal/us/yahoo/search/bosspricing/details.html">$${result.queries * 0.0008} USD</a> for these results.</li>
  395. % endif
  396. </ul>
  397. <table id="cv-chain-table">
  398. <tr>
  399. <td>Article: <div class="cv-chain-detail"><p>${highlight_delta(result.article_chain, result.delta_chain)}</p></div></td>
  400. <td>Source: <div class="cv-chain-detail"><p>${highlight_delta(result.source_chain, result.delta_chain)}</p></div></td>
  401. </tr>
  402. </table>
  403. </div>
  404. </div>
  405. % endif
  406. <%include file="/support/footer.mako" args="environ=environ"/>