A semantic search engine for source code https://bitshift.benkurtovic.com/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

243 line
8.8 KiB

  1. """
  2. :synopsis: Main crawler module, to oversee all site-specific crawlers.
  3. Contains all website/framework-specific Class crawlers.
  4. """
  5. import logging
  6. import math
  7. import time
  8. import threading
  9. import requests
  10. from . import indexer
  11. class GitHubCrawler(threading.Thread):
  12. """
  13. Crawler that retrieves links to all of GitHub's public repositories.
  14. GitHubCrawler is a threaded singleton that queries GitHub's API for urls
  15. to its public repositories, which it inserts into a :class:`Queue.Queue`
  16. shared with :class:`indexer.GitIndexer`.
  17. :ivar clone_queue: (:class:`Queue.Queue`) Contains :class:`GitRepository`
  18. with repository metadata retrieved by :class:`GitHubCrawler`, and other Git
  19. crawlers, to be processed by :class:`indexer.GitIndexer`.
  20. :ivar _logger: (:class:`logging.Logger`) A class-specific logger object.
  21. """
  22. AUTHENTICATION = {
  23. "client_id" : "436cb884ae09be7f2a4e",
  24. "client_secret" : "8deeefbc2439409c5b7a092fd086772fe8b1f24e"
  25. }
  26. def __init__(self, clone_queue, run_event):
  27. """
  28. Create an instance of the singleton `GitHubCrawler`.
  29. :param clone_queue: see :attr:`self.clone_queue`
  30. :type clone_queue: see :attr:`self.clone_queue`
  31. """
  32. self.clone_queue = clone_queue
  33. self.run_event = run_event
  34. self._logger = logging.getLogger("%s.%s" %
  35. (__name__, self.__class__.__name__))
  36. self._logger.info("Starting.")
  37. super(GitHubCrawler, self).__init__(name=self.__class__.__name__)
  38. def run(self):
  39. """
  40. Query the GitHub API for data about every public repository.
  41. Pull all of GitHub's repositories by making calls to its API in a loop,
  42. accessing a subsequent page of results via the "next" URL returned in an
  43. API response header. Uses Severyn Kozak's (sevko) authentication
  44. credentials. For every new repository, a :class:`GitRepository` is
  45. inserted into :attr:`self.clone_queue`.
  46. """
  47. next_api_url = "https://api.github.com/repositories"
  48. api_request_interval = 5e3 / 60 ** 2
  49. while next_api_url and self.run_event.is_set():
  50. start_time = time.time()
  51. try:
  52. resp = requests.get(next_api_url, params=self.AUTHENTICATION)
  53. except requests.ConnectionError:
  54. self._logger.exception("API %s call failed:" % next_api_url)
  55. time.sleep(0.5)
  56. continue
  57. queue_percent_full = (float(self.clone_queue.qsize()) /
  58. self.clone_queue.maxsize) * 100
  59. self._logger.info("API call made. Queue size: %d/%d, %d%%." %
  60. ((self.clone_queue.qsize(), self.clone_queue.maxsize,
  61. queue_percent_full)))
  62. repo_names = [repo["full_name"] for repo in resp.json()]
  63. repo_ranks = self._get_repository_ranks(repo_names)
  64. for repo in resp.json():
  65. while self.clone_queue.full():
  66. time.sleep(1)
  67. self.clone_queue.put(indexer.GitRepository(
  68. repo["html_url"], repo["full_name"], "GitHub",
  69. repo_ranks[repo["full_name"]]))
  70. if int(resp.headers["x-ratelimit-remaining"]) == 0:
  71. time.sleep(int(resp.headers["x-ratelimit-reset"]) -
  72. time.time())
  73. next_api_url = resp.headers["link"].split(">")[0][1:]
  74. sleep_time = api_request_interval - (time.time() - start_time)
  75. if sleep_time > 0:
  76. time.sleep(sleep_time)
  77. def _get_repository_ranks(self, repo_names):
  78. """
  79. Return the ranks for several repositories.
  80. Queries the GitHub API for the number of stargazers for any given
  81. repositories, and blocks if the query limit is exceeded. The rank is
  82. calculated using these numbers.
  83. :param repo_names: An array of repository names, in
  84. `username/repository_name` format.
  85. :type repo_names: str
  86. :return: A dictionary mapping repository names to ranks.
  87. Example dictionary:
  88. .. code-block:: python
  89. {
  90. "user/repository" : 0.2564949357461537
  91. }
  92. :rtype: dictionary
  93. """
  94. API_URL = "https://api.github.com/search/repositories"
  95. REPOS_PER_QUERY = 25
  96. repo_ranks = {}
  97. for names in [repo_names[ind:ind + REPOS_PER_QUERY] for ind in
  98. xrange(0, len(repo_names), REPOS_PER_QUERY)]:
  99. query_url = "%s?q=%s" % (API_URL,
  100. "+".join("repo:%s" % name for name in names))
  101. params = self.AUTHENTICATION
  102. resp = requests.get(query_url,
  103. params=params,
  104. headers={
  105. "Accept" : "application/vnd.github.preview"
  106. })
  107. if int(resp.headers["x-ratelimit-remaining"]) == 0:
  108. sleep_time = int(resp.headers["x-ratelimit-reset"]) - \
  109. time.time() + 1
  110. if sleep_time > 0:
  111. logging.info("API quota exceeded. Sleep time: %d." %
  112. sleep_time)
  113. time.sleep(sleep_time)
  114. for repo in resp.json()["items"]:
  115. stars = repo["stargazers_count"]
  116. rank = min(math.log(max(stars, 1), 5000), 1.0)
  117. repo_ranks[repo["full_name"]] = rank
  118. for name in repo_names:
  119. if name not in repo_ranks:
  120. repo_ranks[name] = 0.1
  121. return repo_ranks
  122. class BitbucketCrawler(threading.Thread):
  123. """
  124. Crawler that retrieves links to all of Bitbucket's public repositories.
  125. BitbucketCrawler is a threaded singleton that queries Bitbucket's API for
  126. urls to its public repositories, and inserts them as
  127. :class:`indexer.GitRepository` into a :class:`Queue.Queue` shared with
  128. :class:`indexer.GitIndexer`.
  129. :ivar clone_queue: (:class:`Queue.Queue`) The shared queue to insert
  130. :class:`indexer.GitRepository` repository urls into.
  131. :ivar _logger: (:class:`logging.Logger`) A class-specific logger object.
  132. """
  133. def __init__(self, clone_queue, run_event):
  134. """
  135. Create an instance of the singleton `BitbucketCrawler`.
  136. :param clone_queue: see :attr:`self.clone_queue`
  137. :type clone_queue: see :attr:`self.clone_queue`
  138. """
  139. self.clone_queue = clone_queue
  140. self.run_event = run_event
  141. self._logger = logging.getLogger("%s.%s" %
  142. (__name__, self.__class__.__name__))
  143. self._logger.info("Starting.")
  144. super(BitbucketCrawler, self).__init__(name=self.__class__.__name__)
  145. def run(self):
  146. """
  147. Query the Bitbucket API for data about every public repository.
  148. Query the Bitbucket API's "/repositories" endpoint and read its
  149. paginated responses in a loop; any "git" repositories have their
  150. clone-urls and names inserted into a :class:`indexer.GitRepository` in
  151. :attr:`self.clone_queue`.
  152. """
  153. next_api_url = "https://api.bitbucket.org/2.0/repositories"
  154. while self.run_event.is_set():
  155. try:
  156. response = requests.get(next_api_url).json()
  157. except requests.ConnectionError:
  158. self._logger.exception("API %s call failed:", next_api_url)
  159. time.sleep(0.5)
  160. continue
  161. queue_percent_full = (float(self.clone_queue.qsize()) /
  162. self.clone_queue.maxsize) * 100
  163. self._logger.info("API call made. Queue size: %d/%d, %d%%." %
  164. ((self.clone_queue.qsize(), self.clone_queue.maxsize,
  165. queue_percent_full)))
  166. for repo in response["values"]:
  167. if repo["scm"] == "git":
  168. while self.clone_queue.full():
  169. time.sleep(1)
  170. clone_links = repo["links"]["clone"]
  171. clone_url = (clone_links[0]["href"] if
  172. clone_links[0]["name"] == "https" else
  173. clone_links[1]["href"])
  174. try:
  175. watchers = requests.get(
  176. repo["links"]["watchers"]["href"])
  177. num = len(watchers.json()["values"])
  178. rank = min(math.log(max(num, 1), 500), 1.0)
  179. except requests.ConnectionError:
  180. err = "API %s call failed:" % next_api_url
  181. self._logger.exception(err)
  182. time.sleep(0.5)
  183. continue
  184. self.clone_queue.put(indexer.GitRepository(
  185. clone_url, repo["full_name"], "Bitbucket"), rank)
  186. next_api_url = response["next"]
  187. time.sleep(0.2)