Commits

Lynn Rees committed 91892ef

- cleanup

Comments (0)

Files changed (10)

 
 try:
     from setuptools import setup
-except:
+except ImportError:
     from distutils.core import setup
 
 setup(
-	name='tabola',
-	version='0.0.6',
-	author='L. C. Rees',
-	author_email='lcrees@gmail.com',
-	url='http://pypi.python.com/tabola/',
-	packages=['tabola', 'tabola.formats'],
-	install_requires=['xlwt', 'simplejson', 'PyYAML', 'xlrd'],
-	license='MIT',
-	classifiers=(
-		'Development Status :: 4 - Beta',
-		'License :: OSI Approved :: MIT License',
-		'Programming Language :: Python',
+    name='tabola',
+    version='0.0.6',
+    author='L. C. Rees',
+    author_email='lcrees@gmail.com',
+    url='http://pypi.python.com/tabola/',
+    packages=['tabola', 'tabola.formats'],
+    install_requires=['xlwt', 'simplejson', 'PyYAML', 'xlrd'],
+    license='MIT',
+    classifiers=(
+        'Development Status :: 4 - Beta',
+        'License :: OSI Approved :: MIT License',
         'Programming Language :: Python',
-	),
-    zip_safe = True,
-)
+        'Programming Language :: Python',
+    ),
+    zip_safe=True,
+)
 '''tabola.'''
 
 from tabola.core import (
-	Databook, Dataset, InvalidDatasetType, InvalidDimensions, UnsupportedFormat,
-) 
+    Databook, Dataset, InvalidDatasetType, InvalidDimensions,
+    UnsupportedFormat,
+)
 
 from tabola.formats import FORMATS as formats
 
-def all(iterable):
-    for element in iterable:
-        if not element: return False
-    return True
 
 class InvalidDatasetType(Exception):
 
 
 
 class InvalidDimensions(Exception):
-    
+
     '''Invalid size.'''
 
-    
+
 class UnsupportedFormat(NotImplementedError):
-    
+
     '''Format is not supported'''
 
 
 
     def __delitem__(self, key):
         del self._data[key]
-        
+
     def __iter__(self):
         return self._data.__iter__()
-        
+
     def _package(self, dicts=True):
         '''Packages Dataset into lists of dictionaries for transmission.'''
         if self.headers:
         else:
             data = list(list(r) for r in self._data)
         return data
-    
+
     @classmethod
     def _register_formats(cls):
         '''Adds format properties.'''
     
     '''A book of Dataset objects.'''
 
-    def __init__(self, sets=[]):
+    def __init__(self, sets=None):
+        if sets is None:
+            sets = []
         self._datasets = sets
         self._register_formats()
         
             raise InvalidDatasetType()
 
     def clear(self):
-        self._datasets = []
+        self._datasets = []

tabola/formats/__init__.py

 import _yaml as yaml
 import _iif as iif
 
-FORMATS = (csv, json, xls, yaml, iif)
+FORMATS = (csv, json, xls, yaml, iif)

tabola/formats/_csv.py

 title = 'csv'
 extentions = ('csv',)
 
+
 def export_set(dataset):
-	'''Returns CSV representation of Dataset.'''
-	stream = cStringIO.StringIO()
-	_csv = csv.writer(stream)
-	for row in dataset._package(False): _csv.writerow(row)
-	return stream.getvalue()
+    '''Returns CSV representation of Dataset.'''
+    stream = cStringIO.StringIO()
+    _csv = csv.writer(stream)
+    for row in dataset._package(False): _csv.writerow(row)
+    return stream.getvalue()
+
 
 def import_set(dset, in_stream, headers=True):
-	'''Returns dataset from CSV stream.'''
-	dset.clear()
-	rows = csv.reader(in_stream.split())
-	for i, row in enumerate(rows):
-		if i == 0 and headers:
-			dset.headers = row
-		else:
-			dset.append_row(row)
+    '''Returns dataset from CSV stream.'''
+    dset.clear()
+    rows = csv.reader(in_stream.split())
+    for i, row in enumerate(rows):
+        if i == 0 and headers:
+            dset.headers = row
+        else:
+            dset.append_row(row)

tabola/formats/_iif.py

 title = 'iif'
 extentions = ('iif',)
 
+
 def export_set(dataset):
-	'''Returns CSV representation of Dataset.'''
-	stream = cStringIO.StringIO()
-	_csv = csv.writer(stream, delimiter='\t')
-	for row in dataset._package(False): _csv.writerow(row)
-	return stream.getvalue()
+    '''Returns CSV representation of Dataset.'''
+    stream = cStringIO.StringIO()
+    _csv = csv.writer(stream, delimiter='\t')
+    for row in dataset._package(False): _csv.writerow(row)
+    return stream.getvalue()
+
 
 def import_set(dset, in_stream, headers=True):
-	'''Returns dataset from CSV stream.'''
-	dset.clear()
-	rows = csv.reader(in_stream.split(), delimiter='\t')
-	for i, row in enumerate(rows):
-		if i == 0 and headers:
-			dset.headers = row
-		else:
-			dset.append_row(row)
+    '''Returns dataset from CSV stream.'''
+    dset.clear()
+    rows = csv.reader(in_stream.split(), delimiter='\t')
+    for i, row in enumerate(rows):
+        if i == 0 and headers:
+            dset.headers = row
+        else:
+            dset.append_row(row)

tabola/formats/_json.py

 try:
     import simplejson as json
 except ImportError:
-    from python import json
-    
+    import json
+
 import tabola.core
 
+
 title = 'json'
 extensions = ('json', 'jsn')
 
+
 def export_set(dataset):
     '''Returns JSON representation of Dataset.'''
     return json.dumps(dataset.dict)
 
+
 def export_book(databook):
     '''Returns JSON representation of Databook.'''
     return json.dumps(databook._package())
-    
+
+
 def import_set(dset, in_stream):
     '''Returns dataset from JSON stream.'''
     dset.clear()
     dset.dict = json.loads(in_stream)
 
+
 def import_book(dbook, in_stream):
     '''Returns databook from JSON stream.'''
     dbook.clear()
         data = tabola.core.Dataset()
         data.title = sheet['title']
         data.dict = sheet['data']
-        dbook.append_dataset(data)
+        dbook.append_dataset(data)

tabola/formats/_xls.py

 title = 'xls'
 extensions = ('xls',)
 
+
 def export_set(dataset):
     '''Returns XLS representation of Dataset.'''
     wb = xlwt.Workbook(encoding='utf8')
     wb.save(stream)
     return stream.getvalue()
 
+
 def export_book(databook):
     '''Returns XLS representation of DataBook.'''
     wb = xlwt.Workbook(encoding='utf8')
     wb.save(stream)
     return stream.getvalue()
 
+
 def import_set(dset, sheet, headers=True):
     '''Returns dataset from CSV stream.'''
     dset.clear()
             dset.headers = new_row
         else:
             dset.append_row(new_row)
-    
+
+
 def import_book(databook, in_stream, headers=True):
     '''Returns dataset from CSV stream.'''
     databook.clear()

tabola/formats/_yaml.py

 title = 'yaml'
 extensions = ('yaml', 'yml')
 
+
 def export_set(dataset):
-	'''Returns YAML representation of Dataset.'''
-	return yaml.dump(dataset.dict)
+    '''Returns YAML representation of Dataset.'''
+    return yaml.dump(dataset.dict)
+
 
 def export_book(databook):
-	'''Returns YAML representation of Databook.'''
-	return yaml.dump(databook._package())
+    '''Returns YAML representation of Databook.'''
+    return yaml.dump(databook._package())
+
 
 def import_set(dset, in_stream):
-	'''Returns dataset from YAML stream.'''
-	dset.clear()
-	dset.dict = yaml.load(in_stream)
+    '''Returns dataset from YAML stream.'''
+    dset.clear()
+    dset.dict = yaml.load(in_stream)
+
 
 def import_book(dbook, in_stream):
-	'''Returns databook from YAML stream.'''
-	dbook.clear()
-	for sheet in yaml.load(in_stream):
-		data = tabola.core.Dataset()
-		data.title = sheet['title']
-		data.dict = sheet['data']
-		dbook.append_dataset(data)
+    '''Returns databook from YAML stream.'''
+    dbook.clear()
+    for sheet in yaml.load(in_stream):
+        data = tabola.core.Dataset()
+        data.title = sheet['title']
+        data.dict = sheet['data']
+        dbook.append_dataset(data)

tabola/tabola_test.py

 #        '''Teardown.'''
 #        pass
 
-
     def test_empty_append(self):
         '''Verify append() correctly adds tuple with no headers.'''
         new_row = (1, 2, 3)
         data.append_row(new_row)
         # Verify width/data
-        self.assertTrue(data.width==len(new_row))
-        self.assertTrue(data[0]==new_row)
+        self.assertTrue(data.width == len(new_row))
+        self.assertTrue(data[0] == new_row)
 
     def test_empty_append_with_headers(self):
         '''Verify append() correctly detects mismatch of number of
         self.assertEquals(len(data.headers), 3)
         self.assertEquals(data.width, 3)
         new_col = ('foo', 'bar')
-        self.assertRaises(tabola.InvalidDimensions, data.append_column, new_col)
+        self.assertRaises(
+            tabola.InvalidDimensions, data.append_column, new_col,
+        )
 
     def test_header_slicing(self):
         '''Verify slicing by headers.'''
         self.assertEqual(
-            self.founders['first_name'], 
+            self.founders['first_name'],
             [self.john[0], self.george[0], self.tom[0]],
         )
         self.assertEqual(
         self.assertEqual(self.founders[1:3], [self.george, self.tom])
         self.assertEqual(self.founders[2:], [self.tom])
 
-
     def test_delete(self):
         '''Verify deleting from dataset works.'''
         # Delete from front of object
     
 
 if __name__ == '__main__':
-    unittest.main()
+    unittest.main()