Commits

edanm committed 7d22fbf

Added support for all SE sites

Comments (0)

Files changed (8)

stack2blog/settings_edancomp.py

 from base_settings import *
+import os
+
+SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
 
 DEBUG = True
 TEMPLATE_DEBUG = DEBUG
 
 DATABASE_ENGINE = 'sqlite3'           # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
-DATABASE_NAME   = 'e:/Projects/Web/Stack2Blog/db/db.sqlite'
+DATABASE_NAME   = os.path.abspath(os.path.join(SITE_ROOT, "../../../db/db.sqlite")).replace("\\", "/")
 DATABASE_USER = ''             # Not used with sqlite3.
 DATABASE_PASSWORD = ''         # Not used with sqlite3.
 DATABASE_HOST = ''             # Set to empty string for localhost. Not used with sqlite3.
 
 # Absolute path to the directory that holds media.
 # Example: "/home/media/media.lawrence.com/"
-MEDIA_ROOT = 'e:/Projects/Web/Stack2Blog/site_media'
+MEDIA_ROOT = os.path.abspath(os.path.join(SITE_ROOT, "../../../site_media")).replace("\\", "/")

stack2blog/stack2blogapp/stackcore.py

+# stackcore.py - JSONModel/Enumeration + other utility classes that don't really belong now that the API's multi-file
+# This file is safe to "import *" - it doesn't even
+# import anything itself.
+
+## JSONModel base class
+class JSONModel(object):
+	"""The base class of all the objects which describe API objects directly - ie, those which take JSON objects as parameters to their constructor."""
+
+	def __init__(self, json, site, skip_ext=False):
+		self.json_ob = DictObject(json)
+		self.site = site
+
+		for f in [x for x in self.transfer if hasattr(self.json_ob, x)]:
+			setattr(self, f, getattr(self.json_ob, f))
+
+		if hasattr(self, '_extend') and not skip_ext:
+			self._extend(self.json_ob, site)
+
+	def fetch(self):
+		"""Fetches all the data that the model can describe, not just the attributes which were specified in the original response."""
+		if hasattr(self, 'fetch_callback'):
+			res = self.fetch_callback(self, self.site)
+
+			if isinstance(res, dict):
+				self.__init__(res, self.site)
+			elif hasattr(res, 'json_ob'):
+				self.__init__(res.json_ob, self.site)
+			else:
+				raise ValueError('Supplied fetch callback did not return a usable value.')
+		else:
+			return False
+	
+	# Allows the easy creation of updateable, partial classes
+	@classmethod
+	def partial(cls, fetch_callback, site, populate):
+		"""Creates a partial description of the API object, with the proviso that the full set of data can be fetched later."""
+
+		model = cls({}, site, True)
+		
+		for k, v in populate.iteritems():
+			setattr(model, k, v)
+
+		model.fetch_callback = fetch_callback
+
+	# for use with Lazy classes that need a callback to actually set the model property
+	def _up(self, a):
+		"""Returns a function which can be used with the LazySequence class to actually update the results properties on the model with the
+new fetched data."""
+
+		def inner(m):
+			setattr(self, a, m)
+		return inner
+
+class Enumeration(object):
+	"""Provides a base class for enumeration classes. (Similar to 'enum' types in other languages.)"""
+
+	@classmethod
+	def from_string(cls, text, typ=None):
+		if typ is not None:
+			if hasattr(typ, '_map') and text in typ._map:
+				return getattr(typ, typ._map[text])
+			elif hasattr(typ, text[0].upper() + text[1:]):
+				return getattr(typ, text[0].upper() + text[1:])
+			else:
+				return None
+		else:
+			return cls.from_string(text, cls)
+
+class StackExchangeError(Exception):
+	"""A generic error thrown on a bad HTTP request during a StackExchange API request."""
+	def __init__(self, urlerror):
+		self.urlerror = urlerror
+	def __str__(self):
+		return 'Received HTTP error \'%d\'.' % self.urlerror.code
+
+
+class StackExchangeResultset(tuple):
+	"""Defines an immutable, paginated resultset. This class can be used as a tuple, but provides extended metadata as well, including methods
+to fetch the next page."""
+
+	def __new__(cls, items, page, pagesize, build_info):
+		cls.page, cls.pagesize, cls.build_info = page, pagesize, build_info
+		return tuple.__new__(cls, items)
+	
+	def reload(self):
+		"""Refreshes the data in the resultset with fresh API data. Note that this doesn't work with extended resultsets."""
+		# kind of a cheat, but oh well
+		return self.fetch_page(self.page)
+
+	def fetch_page(self, page, **kw):
+		"""Returns a new resultset containing data from the specified page of the results. It re-uses all parameters that were passed in
+to the initial function which created the resultset."""
+		new_params = list(self.build_info)
+		new_params[4] = new_params[4].copy()
+		new_params[4].update(kw)
+		new_params[4]['page'] = page
+		return new_params[0].build(*new_params[1:])
+	
+	def fetch_extended(self, page):
+		"""Returns a new resultset containing data from this resultset AND from the specified page."""
+		next = self.fetch_page(page)
+		extended = self + next
+
+		# max(0, ...) is so a non-zero, positive result for page is always found
+		return StackExchangeResultset(extended, max(1, self.page - 1), self.pagesize + next.pagesize, self.build_info)
+
+	def fetch_next(self):
+		"""Returns the resultset of the data in the next page."""
+		return self.fetch_page(self.page + 1)
+	
+	def extend_next(self):
+		"""Returns a new resultset containing data from this resultset AND from the next page."""
+		return self.fetch_extended(self.page + 1)
+
+class NeedsAwokenError(Exception):
+	"""An error raised when an attempt is made to access a property of a lazy collection that requires the data to have been fetched,
+but whose data has not yet been requested."""
+
+	def __init__(self, lazy):
+		self.lazy = lazy
+	def __str__(self):
+		return 'Could not return requested data; the sequence of "%s" has not been fetched.' % self.lazy.m_lazy
+
+class StackExchangeLazySequence(list):
+	"""Provides a sequence which *can* contain extra data available on an object. It is 'lazy' in the sense that data is only fetched when
+required - not on object creation."""
+
+	def __init__(self, m_type, count, site, url, fetch=None, collection=None):
+		self.m_type = m_type
+		self.count = count
+		self.site = site
+		self.url = url
+		self.fetch_callback = fetch
+		self.collection = collection if collection != None else self._collection(url)
+	
+	def _collection(self, c):
+		return c.split('/')[-1]
+
+	def __len__(self):
+		if self.count != None:
+			return self.count
+		else:
+			raise NeedsAwokenError(self)
+	
+	def fetch(self, **kw):
+		"""Fetch, from the API, the data this sequence is meant to hold."""
+
+		res = self.site.build(self.url, self.m_type, self.collection, kw)
+		if self.fetch_callback != None:
+			self.fetch_callback(res)
+		return res
+
+#### Hack, because I can't be bothered to fix my mistaking JSON's output for an object not a dict
+# Attrib: Eli Bendersky, http://stackoverflow.com/questions/1305532/convert-python-dict-to-object/1305663#1305663
+class DictObject:
+    def __init__(self, entries): 
+        self.__dict__.update(entries)
+
+class JSONMangler(object):
+	"""This class handles all sorts of random JSON-handling stuff"""
+
+	@staticmethod
+	def paginated_to_resultset(site, json, typ, collection, params):
+		page = json['page']
+		pagesize = json['pagesize']
+		items = []
+
+		# create strongly-typed objects from the JSON items
+		for json_item in json[collection]:
+			json_item['_params_'] = params[-1] # convenient access to the kw hash
+			items.append(typ(json_item, site))
+
+		return StackExchangeResultset(items, page, pagesize, params)
+	
+	@staticmethod
+	def normal_to_resultset(site, json, typ, collection):
+		return tuple([typ(x, site) for x in json[collection]])
+	
+	@classmethod
+	def json_to_resultset(cls, site, json, typ, collection, params=None):
+		if 'page' in json:
+			# we have a paginated resultset
+			return cls.paginated_to_resultset(site, json, typ, collection, params)
+		else:
+			# this isn't paginated (unlikely but possible - eg badges)
+			return cls.normal_to_resultset(site, json, typ, collection)
+
+

stack2blog/stack2blogapp/stackexchange.py

-import urllib2, httplib, datetime, operator, StringIO, gzip
-try:
-	import json
-except ImportError:
-	try:
-		import simplejson as json
-	except ImportError:
-		from django.utils import simplejson as json
-
+import datetime, operator
+from stackweb import WebRequestManager
+from stackcore import *
 
 # Site constants
 StackOverflow = 'api.stackoverflow.com'
 StackApps = 'api.stackapps.com'
 MetaStackOverflow = 'api.meta.stackoverflow.com'
 
-#### Hack, because I can't be bothered to fix my mistaking JSON's output for an object not a dict
-# Attrib: Eli Bendersky, http://stackoverflow.com/questions/1305532/convert-python-dict-to-object/1305663#1305663
-class DictObject:
-    def __init__(self, entries):
-        self.__dict__.update(entries)
-
-class StackExchangeError(Exception):
-	"""A generic error thrown on a bad HTTP request during a StackExchange API request."""
-	def __init__(self, urlerror):
-		self.urlerror = urlerror
-	def __str__(self):
-		return 'Received HTTP error \'%d\'.' % self.urlerror.code
-
-class StackExchangeResultset(tuple):
-	"""Defines an immutable, paginated resultset. This class can be used as a tuple, but provides extended metadata as well, including methods
-to fetch the next page."""
-
-	def __new__(cls, items, page, pagesize, build_info):
-		cls.page, cls.pagesize, cls.build_info = page, pagesize, build_info
-		return tuple.__new__(cls, items)
-
-	def reload(self):
-		"""Refreshes the data in the resultset with fresh API data. Note that this doesn't work with extended resultsets."""
-		# kind of a cheat, but oh well
-		return self.fetch_page(self.page)
-
-	def fetch_page(self, page, **kw):
-		"""Returns a new resultset containing data from the specified page of the results. It re-uses all parameters that were passed in
-to the initial function which created the resultset."""
-		new_params = list(self.build_info)
-		new_params[4] = new_params[4].copy()
-		new_params[4].update(kw)
-		new_params[4]['page'] = page
-		return new_params[0].build(*new_params[1:])
-
-	def fetch_extended(self, page):
-		"""Returns a new resultset containing data from this resultset AND from the specified page."""
-		next = self.fetch_page(page)
-		extended = self + next
-
-		# max(0, ...) is so a non-zero, positive result for page is always found
-		return StackExchangeResultset(extended, max(1, self.page - 1), self.pagesize + next.pagesize, self.build_info)
-
-	def fetch_next(self):
-		"""Returns the resultset of the data in the next page."""
-		return self.fetch_page(self.page + 1)
-
-	def extend_next(self):
-		"""Returns a new resultset containing data from this resultset AND from the next page."""
-		return self.fetch_extended(self.page + 1)
-
-class Enumeration(object):
-	"""Provides a base class for enumeration classes. (Similar to 'enum' types in other languages.)"""
-
-	@classmethod
-	def from_string(cls, text, typ=None):
-		if typ is not None:
-			if hasattr(typ, '_map') and text in typ._map:
-				return getattr(typ, typ._map[text])
-			elif hasattr(typ, text[0].upper() + text[1:]):
-				return getattr(typ, text[0].upper() + text[1:])
-			else:
-				return None
-		else:
-			return cls.from_string(text, cls)
-
-class NeedsAwokenError(Exception):
-	"""An error raised when an attempt is made to access a property of a lazy collection that requires the data to have been fetched,
-but whose data has not yet been requested."""
-
-	def __init__(self, lazy):
-		self.lazy = lazy
-	def __str__(self):
-		return 'Could not return requested data; the sequence of "%s" has not been fetched.' % self.lazy.m_lazy
-
-class StackExchangeLazySequence(list):
-	"""Provides a sequence which *can* contain extra data available on an object. It is 'lazy' in the sense that data is only fetched when
-required - not on object creation."""
-
-	def __init__(self, m_type, count, site, url, fetch=None, collection=None):
-		self.m_type = m_type
-		self.count = count
-		self.site = site
-		self.url = url
-		self.fetch_callback = fetch
-		self.collection = collection if collection != None else self._collection(url)
-
-	def _collection(self, c):
-		return c.split('/')[-1]
-
-	def __len__(self):
-		if self.count != None:
-			return self.count
-		else:
-			raise NeedsAwokenError(self)
-
-	def fetch(self, **kw):
-		"""Fetch, from the API, the data this sequence is meant to hold."""
-
-		res = self.site.build(self.url, self.m_type, self.collection, kw)
-		if self.fetch_callback != None:
-			self.fetch_callback(res)
-		return res
-
-## JSONModel base class
-class JSONModel(object):
-	"""The base class of all the objects which describe API objects directly - ie, those which take JSON objects as parameters to their constructor."""
-
-	def __init__(self, json, site, skip_ext=False):
-		self.json_ob = DictObject(json)
-		self.site = site
-
-		for f in [x for x in self.transfer if hasattr(self.json_ob, x)]:
-			setattr(self, f, getattr(self.json_ob, f))
-
-		if hasattr(self, '_extend') and not skip_ext:
-			self._extend(self.json_ob, site)
-
-	def fetch(self):
-		"""Fetches all the data that the model can describe, not just the attributes which were specified in the original response."""
-		if hasattr(self, 'fetch_callback'):
-			res = self.fetch_callback(self, self.site)
-
-			if isinstance(res, dict):
-				self.__init__(res, self.site)
-			elif hasattr(res, 'json_ob'):
-				self.__init__(res.json_ob, self.site)
-			else:
-				raise ValueError('Supplied fetch callback did not return a usable value.')
-		else:
-			return False
-
-	# Allows the easy creation of updateable, partial classes
-	@classmethod
-	def partial(cls, fetch_callback, site, populate):
-		"""Creates a partial description of the API object, with the proviso that the full set of data can be fetched later."""
-
-		model = cls({}, site, True)
-
-		for k, v in populate.iteritems():
-			setattr(model, k, v)
-
-		model.fetch_callback = fetch_callback
-
-	# for use with Lazy classes that need a callback to actually set the model property
-	def _up(self, a):
-		"""Returns a function which can be used with the LazySequence class to actually update the results properties on the model with the
-new fetched data."""
-
-		def inner(m):
-			setattr(self, a, m)
-		return inner
-
 ##### Statistics    ###
 class Statistics(JSONModel):
 	"""Stores statistics for a StackExchange site."""
 
 		self.votes = (self.up_vote_count, self.down_vote_count)
 		self.url = 'http://' + self.site.root_domain + '/questions/' + str(self.question_id) + '/' + str(self.id) + '#' + str(self.id)
-
+	
 	def _get_user(s,id):
-		s._owner = self.site.user(id)
+		s._owner = s.site.user(id)
 		return s._owner
 	def _set_user(s,ob):
 		s._owner = pb
 	def _get_quest(s,id):
-		s._question = self.site.question(id)
+		s._question = s.site.question(id)
 		return s._question
 	def _set_quest(s,ob):
 		s._question = ob
-
+	
 	question = property(lambda self: self._question if self._question is not None else self._get_quest(self.question_id), _set_quest)
 	owner = property(lambda self: self._owner if self._owner is not None else self._get_user(self.owner_id), _set_user)
 
 
 		if hasattr(json, 'owner'):
 			self.owner_id = json.owner['user_id']
-
+	
 			owner_dict = json.owner
 			owner_dict['id'] = self.owner_id
 			del owner_dict['user_id']
 			owner_dict['user_type'] = UserType.from_string(owner_dict['user_type'])
-
+	
 			self.owner = User.partial(lambda self: self.site.user(self.id), site, owner_dict)
 
 		self.url = 'http://' + self.site.root_domain + '/questions/' + str(self.id)
 	transfer = ('post_id', 'score', 'edit_count', 'body')
 	def _extend(self, json, site):
 		self.id = json.comment_id
-
+		
 		self.creation_date = datetime.date.fromtimestamp(json.creation_date)
 		self.owner_id = json.owner['owner_id'] if 'owner_id' in json.owner else json.owner['user_id']
 		self.owner = User.partial(lambda self: self.site.user(self.id), site, {
-			'id': self.owner_id,
+			'id': self.owner_id, 
 			'user_type': Enumeration.from_string(json.owner['user_type'], UserType),
 			'display_name': json.owner['display_name'],
 			'reputation': json.owner['reputation'],
 			'email_hash': json.owner['email_hash']})
-
+		
 		if hasattr(json, 'reply_to'):
 			self.reply_to_user_id = json.reply_to['user_id']
 			self.reply_to = User.partial(lambda self: self.site.user(self.id), site, {
 				'display_name': json.reply_to['display_name'],
 				'reputation': json.reply_to['reputation'],
 				'email_hash': json.reply_to['email_hash']})
-
+		
 		self.post_type = PostType.from_string(json.post_type)
 	def get_post(self):
 		if self.post_type == PostType.Question:
 			return self.site.question(self.post_id)
 		elif self.post_type == PostType.Answer:
 			return self.site.answer(self.post_id)
-
+	
 	def __unicode__(self):
 		return u'Comment ' + str(self.id)
 	def __str__(self):
 	def _extend(self, json, site):
 		self.id = json.badge_id
 		self.recipients = StackExchangeLazySequence(User, None, site, json.badges_recipients_url, self._up('recipients'))
-
+	
 	def __str__(self):
 		return self.name
 
 		self.timeline_type = TimelineEventType.from_string(json.timeline_type)
 		self.post_type = PostType.from_string(json.post_type)
 		self.creation_date = datetime.date.fromtimestamp(json.creation_date)
-
+	
 class TimelineEventType(Enumeration):
 	"""Denotes the type of a timeline event."""
 	_map = {'askoranswered': 'AskOrAnswered'}
 	"""Describes a user on a StackExchange site."""
 
 	transfer = ('display_name', 'reputation', 'email_hash', 'age', 'website_url', 'location', 'about_me',
-		'view_count', 'up_vote_count', 'down_vote_count')
+		'view_count', 'up_vote_count', 'down_vote_count', 'association_id')
 	def _extend(self, json, site):
 		self.id = json.user_id
 		self.user_type = Enumeration.from_string(json.user_type, UserType)
 		}
 		self.gold_badges, self.silver_badges, self.bronze_badges = self.badge_counts_t
 		self.badge_total = reduce(operator.add, self.badge_counts_t)
-
+		
 		self.url = 'http://' + self.site.root_domain + '/users/' + str(self.id)
-
+	
 	def __unicode__(self):
 		return 'User %d [%s]' % (self.id, self.display_name)
 	def __str__(self):
 	def __init__(self, domain, app_key=None):
 		self.domain = domain
 		self.app_key = app_key
-		self.api_version = '0.8'
+		self.api_version = '0.9'
 		self.use_gzip = True
 
 		self.include_body = False
 		Comment: 'comments/%s',
 		Question: 'questions/%s',
 	}
-
+	
 	def _kw_to_str(self, ob):
 		try:
 			if not isinstance(ob, str):
 	def _request(self, to, params):
 		url = 'http://' + self.domain + '/' + self.api_version + '/' + to
 
-		done = False
+		new_params = {}
 		for k, v in params.iteritems():
-			if not done:
-				url += '?'
-				done = True
-			else: url += '&'
+			new_params[k] = self._kw_to_str(v)
+		if self.app_key != None:
+			new_params['app_key'] = self.app_key
 
-			url += '%s=%s' % (k, self._kw_to_str(v))
+		request_mgr = WebRequestManager(gzip=self.use_gzip)
+		json, info = request_mgr.json_request(url, new_params)
+		
+		self.rate_limit = (int(info.getheader('X-RateLimit-Current')), int(info.getheader('X-RateLimit-Max')))
+		self.requests_used = self.rate_limit[1] - self.rate_limit[0]
+		self.requests_left = self.rate_limit[0]
 
-		if self.app_key != None:
-			url += ('?' if not '?' in url else '&') + 'key=' + self.app_key
-
-		try:
-			request = urllib2.Request(url)
-			request.add_header('Accept-encoding', 'gzip')
-			req_open = urllib2.build_opener()
-			conn = req_open.open(request)
-
-			req_data = conn.read()
-
-			if self.use_gzip:
-				data_stream = StringIO.StringIO(req_data)
-				gzip_stream = gzip.GzipFile(fileobj=data_stream)
-				actual_data = gzip_stream.read()
-			else:
-				actual_data = req_data
-
-			dump = json.loads(actual_data)
-
-			info = conn.info()
-			self.rate_limit = (int(info.getheader('X-RateLimit-Current')), int(info.getheader('X-RateLimit-Max')))
-			self.requests_used = self.rate_limit[1] - self.rate_limit[0]
-			self.requests_left = self.rate_limit[0]
-
-			conn.close()
-
-			return dump
-		except urllib2.URLError, e:
-			raise StackExchangeError(e)
-
+		return json
+	
 	def _user_prop(self, qs, typ, coll, kw, prop='user_id'):
 		if prop not in kw:
 			raise LookupError('No user ID provided.')
 			kw['comments'] = str(self.include_comments).lower()
 
 		json = self._request(url, kw)
-
-		if 'page' in json:
-			# we have a paginated resultset
-			page = json['page']
-			pagesize = json['pagesize']
-			items = []
-
-			# create strongly-typed objects from the JSON items
-			for json_item in json[collection]:
-				json_item['_params_'] = kw	# convenient access to the kw hash
-				items.append(typ(json_item, self))
-
-			return StackExchangeResultset(items, page, pagesize, (self, url, typ, collection, kw))
-		else:
-			# this isn't a paginated resultset (unlikely, but possible - eg badges)
-			return tuple([typ(x, self) for x in json[collection]])
-
+		return JSONMangler.json_to_resultset(self, json, typ, collection, (self, url, typ, collection, kw))
+		
 	def build_from_snippet(self, json, typ):
 		return StackExchangeResultSet([typ(x, self) for x in json])
-
+	
 	def _get(self, typ, ids, coll, kw):
 		root = self.URL_Roots[typ] % ';'.join([str(x) for x in ids])
 		return self.build(root, typ, coll, kw)
 
 		u, = self.users((nid,), **kw)
 		return u
-
+	
 	def users(self, ids, **kw):
 		"""Retrieves a list of the users with the IDs specified in the `ids' parameter."""
 		return self._get(User, ids, 'users', kw)
 
 		a, = self.answers((nid,), **kw)
 		return a
-
+	
 	def answers(self, ids=None, **kw):
 		"""Retrieves a set of the answers with the IDs specified in the 'ids' parameter, or by the
 		user_id specified."""
 		"""Retrieves an object representing a comment with the ID `nid`."""
 		c, = self.comments((nid,), **kw)
 		return c
-
+	
 	def comments(self, ids=None, **kw):
 		"""Retrieves a set of the comments with the IDs specified in the 'ids' parameter."""
 		if ids == None:
 			return self._user_prop('comments', Comment, 'comments', kw)
 		else:
 			return self._get(Comment, ids, 'comments', kw)
-
+	
 	def question(self, nid, **kw):
 		"""Retrieves an object representing a question with the ID `nid`. Note that an answer ID can not be specified -
 unlike on the actual site, you will receive an error rather than a redirect to the actual question."""
 		q, = self.questions((nid,), **kw)
 		return q
-
+	
 	def questions(self, ids=None, **kw):
 		"""Retrieves a set of the comments with the IDs specified in the 'ids' parameter."""
 		if 'answers' not in kw:
 			return self._user_prop('questions', Question, 'questions', kw)
 		else:
 			return self._get(Question, ids, 'questions', kw)
-
+	
 	def recent_questions(self, **kw):
 		"""Returns the set of the most recent questions on the site, by last activity."""
 		if 'answers' not in kw:
 			kw['answers'] = 'true'
 		return self.build('questions', Question, 'questions', kw)
-
+	
 	def users_with_badge(self, bid, **kw):
 		"""Returns the set of all the users who have been awarded the badge with the ID 'bid'."""
 		return self.build('badges/' + str(bid), User, 'users', kw)
-
+	
 	def all_badges(self, **kw):
 		"""Returns the set of all the badges which can be awarded on the site, excluding those which are awarded for specific tags."""
 		return self.build('badges', Badge, 'badges', kw)
-
+	
 	def badges(self, ids=None, **kw):
 		"""Returns the users with the badges with IDs."""
 		if ids == None:
 		"""Returns an object representing the badge with the ID 'nid'."""
 		b, = self.badges((nid,), kw)
 		return b
-
+	
 	def all_tag_badges(self, **kw):
 		"""Returns the set of all the tag-based badges: those which are awarded for performance on a specific tag."""
 		return self.build('badges/tags', Badge, 'badges', kw)
-
+	
 	def all_tags(self, **kw):
 		return self.build('tags', Tag, 'tags', kw)
-
+	
 	def stats(self, **kw):
 		return self.build('stats', Statistics, 'statistics', kw)[0]

stack2blog/stack2blogapp/stackweb.py

+# stackweb.py - Core classes for web-request stuff
+
+import urllib2, httplib, datetime, operator, StringIO, gzip
+try:
+	import json
+except ImportError:
+	try:
+		import simplejson as json
+	except ImportError:
+		from django.utils import simplejson as json
+
+class WebRequest(object):
+	data = ''
+	info = None
+
+	def __init__(self, data, info):
+		self.data = data
+		self.info = info
+
+	def __str__(self):
+		return str(self.data)
+
+class WebRequestManager(object):
+	def __init__(self, **kw):
+		self.use_gzip = kw['gzip'] if 'gzip' in kw else True
+
+	def request(self, url, params):
+		done = False
+		for k, v in params.iteritems():
+			if not done:
+				url += '?'
+				done = True
+			else: url += '&'
+
+			url += '%s=%s' % (k, v)
+		request = urllib2.Request(url)
+
+		if self.use_gzip:
+			request.add_header('Accept-encoding', 'gzip')
+		req_open = urllib2.build_opener()
+		conn = req_open.open(request)
+
+		req_data = conn.read()
+
+		if self.use_gzip:
+			data_stream = StringIO.StringIO(req_data)
+			gzip_stream = gzip.GzipFile(fileobj=data_stream)
+			actual_data = gzip_stream.read()
+		else:
+			actual_data = req_data
+
+		info = conn.info()
+		conn.close()
+
+		return WebRequest(actual_data, info)
+
+	def json_request(self, to, params):
+		req = self.request(to, params)
+		return (json.loads(req.data), req.info)
+

stack2blog/stack2blogapp/static_media/style.css

 
 #home_container #user_details label{
 	display: inline-block;
-	width: 80px;
+	width: 150px;
 }
 
 #home_container #user_details .input_userid {

stack2blog/stack2blogapp/templates/home.html

 <div id="home_container" class="container_12">
 
 	<div class="details_wrap">
-		<form id="user_details" class="grid_4" action="" method="post">
+		<form id="user_details" class="grid_8" action="" method="post">
+			<label for="site">Choose site:</label>
+			<select id="site" name="site">
+				{% for name, api in api_site_names %}
+					<option value="{{name}}">{{name}}</option>
+				{% endfor%}
+			</select>
+			<br/>
+			<br/>
+
 			<label for="userid">UserID: </label>
 			<input class="input_userid" type="text" name="userid" value="" id="userid"/>
 			<a class="find_id" href="how_to_find_your_stackoverflow_id/">How do I find my id?</a>

stack2blog/stack2blogapp/views.py

 import types
 from pdb import set_trace
 
+site_names = [
+	  ("StackOverflow", "api.stackoverflow.com")
+	, ("Meta.StackOverflow", "api.meta.stackoverflow.com")
+	, ("ServerFault", "api.serverfault.com")
+	, ("SuperUser", "api.superuser.com")
+	, ("StackApps", "api.stackapps.com")
+	, ("Webapps", "api.webapps.stackexchange.com")
+	, ("Meta.Webapps", "api.meta.webapps.stackexchange.com")
+]
+
+def siteNameToApi(name):
+	'''
+	Given a name of a site, returns the api.
+	Returns none if it's an invalid site.
+	'''
+	for site_name in site_names:
+		if site_name[0] == name:
+			return site_name
+	else:
+		return None
+
 
 # Create your views here.
 def home_page(request):
 
 		request.session["error_message"] = ""
 
+		api_site_names = site_names
+
 		return render_to_response('home.html', locals(), context_instance=RequestContext(request))
 
 	# Post. Make sure all is well.
 	try:
 		if not request.POST.has_key("userid"):
 			raise RuntimeError("POST call doesn't have userid variable")
+		if not request.POST.has_key("site"):
+			raise RuntimeError("POST call doesn't have site variable")
+
+		site = request.POST["site"]
 
 		try:
 			userid = int(request.POST.get("userid", ""))
 		return HttpResponseRedirect("/")
 
 	# everything was fine, redirect to user page.
-	return HttpResponseRedirect("/userpage/" + str(userid))
+	return HttpResponseRedirect("/userpage/" + site + "/" + str(userid))
 
 
 def blurbifyAnswer(answer_obj):
 	answer_obj.body_blurb = meth
 	return answer_obj
 
-def userpage(request, userid):
+def userpage(request, site, userid):
 	try:
 		userid = int(userid)
 	except ValueError:
-		bacHomeWithError("Invalid user ID - must be a number")
+		request.session["error_message"] = "There was an error processing your user page. Please make sure your id is correct, then try again."
+		return HttpResponseRedirect("/")
+
+	# Find out which site he wants.
+	site = siteNameToApi(site)
+	if not site:
+		request.session["error_message"] = "The site you picked does not have a valid api."
+		return HttpResponseRedirect("/")
+	site_api = site[1]
 
 	cached = False
 	# Check if this user is the one in the session.
 		# Remember the userid.
 		request.session["so_userid"] = userid
 
-		site = stackexchange.Site(stackexchange.StackOverflow, app_key = "SL5xzbpFYUCSae3tqVlL7A")
+		site = stackexchange.Site(site_api, app_key = "SL5xzbpFYUCSae3tqVlL7A", )
 		# site.be_inclusive()
 		# so_user = site.user(userid)
 

stack2blog/urls.py

 
     (r'^$', home_page),
     (r'^how_to_find_your_stackoverflow_id/$', how_to_find_id),
-    (r'^userpage/(\d*)/?$', userpage),
+    (r'^userpage/(.*)/(\d*)/?$', userpage),
     (r'^answer/(\d*)/?$', post_answer),
     (r'^perform_post/(\d*)/?$', perform_post),
 )
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.