Commits

Brad Montgomery  committed 8fed387 Draft

fixed forks hack to work with bitbucket redesign

  • Participants
  • Parent commits e089699

Comments (0)

Files changed (1)

File bitbucket/api.py

         that can be retreived from a repo's page. So, we scrape it.
         Yes, this feels dirty :(
         """
-        url = 'https://bitbucket.org/%s/%s/src/' % (self.username, self.slug)
+        url = 'https://bitbucket.org/{0}/{1}'.format(self.username, self.slug)
         response = urlopen(Request(url))
         if response.code == 200:
-            self.scraped_content = strip_tags(response.read().lower())
+            self.scraped_content = response.read().lower()
         else:   
             self.scraped_content = '' 
         response.close()
 
     def forks(self):
         """ 
-        This is a dirty hack to get the number of Forks/Queues by scraping the src page.
-        If the scraping fails, this just returns None.
+        This is a dirty hack to get the number of Forks/Queues by scraping
+        the project page. If the scraping fails, this just returns None.
+
+        Number of forks is in content that looks like this:
+            
+            <a href="#forks">
+                <span class="value">1</span>
+                Fork
+            </a>
+
         """
         if not self.scraped_content:
             self._scrape()
         if self.scraped_content:
-            # Dirty hack to find X in the first occurance of the string: "Forks/Queues (X)"
-            start_search = self.scraped_content.find('forks/queues')
-            if start_search > 0:
-                open_paren_location = self.scraped_content.find('(', start_search)
-                close_paren_location = self.scraped_content.find(')', start_search)
-                num_forks = self.scraped_content[open_paren_location + 1:close_paren_location] # first part of the slice includes the paren
+            start = self.scraped_content.find('<a href="#forks">')
+            if start > 0:
+                end = start + self.scraped_content[start:].find("</span>")
+                num_forks = strip_tags(self.scraped_content[start:end].strip())
                 return int(num_forks)
         return None