Commits

Mike Bayer  committed 611b7ec

-whitespace bonanza, contd

  • Participants
  • Parent commits 594122d

Comments (0)

Files changed (156)

File doc/build/builder/builders.py

         builder.config.html_context['site_base'] = builder.config['site_base']
 
         self.lookup = TemplateLookup(directories=builder.config.templates_path,
-            #format_exceptions=True, 
+            #format_exceptions=True,
             imports=[
                 "from builder import util"
             ]
 
         # RTD layout
         if rtd:
-            # add variables if not present, such 
+            # add variables if not present, such
             # as if local test of READTHEDOCS variable
             if 'MEDIA_URL' not in context:
                 context['MEDIA_URL'] = "http://media.readthedocs.org/"
             'sqlpopup':[
                 (
                     r'(.*?\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK|COMMIT|ALTER|UPDATE|CREATE|DROP|PRAGMA|DESCRIBE).*?(?:{stop}\n?|$))',
-                    bygroups(using(PythonConsoleLexer), Token.Sql.Popup), 
+                    bygroups(using(PythonConsoleLexer), Token.Sql.Popup),
                     "#pop"
                 )
             ],
             'opensqlpopup':[
                 (
                     r'.*?(?:{stop}\n*|$)',
-                    Token.Sql, 
+                    Token.Sql,
                     "#pop"
                 )
             ]
             'sqlpopup':[
                 (
                     r'(.*?\n)((?:PRAGMA|BEGIN|SELECT|INSERT|DELETE|ROLLBACK|COMMIT|ALTER|UPDATE|CREATE|DROP|PRAGMA|DESCRIBE).*?(?:{stop}\n?|$))',
-                    bygroups(using(PythonLexer), Token.Sql.Popup), 
+                    bygroups(using(PythonLexer), Token.Sql.Popup),
                     "#pop"
                 )
             ],
             'opensqlpopup':[
                 (
                     r'.*?(?:{stop}\n*|$)',
-                    Token.Sql, 
+                    Token.Sql,
                     "#pop"
                 )
             ]

File doc/build/conf.py

 
 site_base = "http://www.sqlalchemy.org"
 
-# arbitrary number recognized by builders.py, incrementing this 
+# arbitrary number recognized by builders.py, incrementing this
 # will force a rebuild
 build_number = 3
 

File doc/build/testdocs.py

 rootlogger.addHandler(handler)
 
 
-def teststring(s, name, globs=None, verbose=None, report=True, 
-               optionflags=0, extraglobs=None, raise_on_error=False, 
+def teststring(s, name, globs=None, verbose=None, report=True,
+               optionflags=0, extraglobs=None, raise_on_error=False,
                parser=doctest.DocTestParser()):
 
     from doctest import DebugRunner, DocTestRunner, master

File examples/association/__init__.py

 
 This directory includes the following examples:
 
-* basic_association.py - illustrate a many-to-many relationship between an 
+* basic_association.py - illustrate a many-to-many relationship between an
   "Order" and a collection of "Item" objects, associating a purchase price
   with each via an association object called "OrderItem"
 * proxied_association.py - same example as basic_association, adding in

File examples/association/basic_association.py

 The association object pattern is a form of many-to-many which
 associates additional data with each association between parent/child.
 
-The example illustrates an "order", referencing a collection 
+The example illustrates an "order", referencing a collection
 of "items", with a particular price paid associated with each "item".
 
 """
 
     # query the order, print items
     order = session.query(Order).filter_by(customer_name='john smith').one()
-    print [(order_item.item.description, order_item.price) 
+    print [(order_item.item.description, order_item.price)
            for order_item in order.order_items]
 
     # print customers who bought 'MySQL Crowbar' on sale

File examples/association/dict_of_sets_with_default.py

     key = Column(String)
 
     values = association_proxy("elements", "value")
-    """Bridge the association from 'elements' over to the 
+    """Bridge the association from 'elements' over to the
     'value' element of C."""
 
     def __init__(self, key, values=None):

File examples/beaker_caching/__init__.py

 """
 Illustrates how to embed Beaker cache functionality within
 the Query object, allowing full cache control as well as the
-ability to pull "lazy loaded" attributes from long term cache 
+ability to pull "lazy loaded" attributes from long term cache
 as well.
 
 In this demo, the following techniques are illustrated:
 
 * Using custom subclasses of Query
-* Basic technique of circumventing Query to pull from a 
+* Basic technique of circumventing Query to pull from a
   custom cache source instead of the database.
 * Rudimental caching with Beaker, using "regions" which allow
   global control over a fixed set of configurations.
-* Using custom MapperOption objects to configure options on 
-  a Query, including the ability to invoke the options 
+* Using custom MapperOption objects to configure options on
+  a Query, including the ability to invoke the options
   deep within an object graph when lazy loads occur.
 
 E.g.::
 Listing of files:
 
     environment.py - Establish the Session, the Beaker cache
-    manager, data / cache file paths, and configurations, 
+    manager, data / cache file paths, and configurations,
     bootstrap fixture data if necessary.
 
-    caching_query.py - Represent functions and classes 
+    caching_query.py - Represent functions and classes
     which allow the usage of Beaker caching with SQLAlchemy.
     Introduces a query option called FromCache.
 

File examples/beaker_caching/advanced.py

 """advanced.py
 
-Illustrate usage of Query combined with the FromCache option, 
+Illustrate usage of Query combined with the FromCache option,
 including front-end loading, cache invalidation, namespace techniques
 and collection caching.
 
     start/end are integers, range is then
     "person <start>" - "person <end>".
 
-    The cache option we set up is called "name_range", indicating 
+    The cache option we set up is called "name_range", indicating
     a range of names for the Person class.
 
     The `Person.addresses` collections are also cached.  Its basically
     another level of tuning here, as that particular cache option
-    can be transparently replaced with joinedload(Person.addresses). 
+    can be transparently replaced with joinedload(Person.addresses).
     The effect is that each Person and his/her Address collection
     is cached either together or separately, affecting the kind of
     SQL that emits for unloaded Person objects as well as the distribution
 print "\ntwenty five through forty, invalidate first:\n"
 print ", ".join([p.name for p in load_name_range(25, 40, True)])
 
-# illustrate the address loading from either cache/already 
+# illustrate the address loading from either cache/already
 # on the Person
 print "\n\nPeople plus addresses, two through twelve, addresses possibly from cache"
 for p in load_name_range(2, 12):
     print p.format_full()
 
-# illustrate the address loading from either cache/already 
+# illustrate the address loading from either cache/already
 # on the Person
 print "\n\nPeople plus addresses, two through twelve, addresses from cache"
 for p in load_name_range(2, 12):

File examples/beaker_caching/caching_query.py

    parameters on a Query
  * RelationshipCache - a variant of FromCache which is specific
    to a query invoked during a lazy load.
- * _params_from_query - extracts value parameters from 
+ * _params_from_query - extracts value parameters from
    a Query.
 
 The rest of what's here are standard SQLAlchemy and
 from sqlalchemy.sql import visitors
 
 class CachingQuery(Query):
-    """A Query subclass which optionally loads full results from a Beaker 
+    """A Query subclass which optionally loads full results from a Beaker
     cache region.
 
     The CachingQuery stores additional state that allows it to consult
     a Beaker cache before accessing the database:
 
-    * A "region", which is a cache region argument passed to a 
+    * A "region", which is a cache region argument passed to a
       Beaker CacheManager, specifies a particular cache configuration
       (including backend implementation, expiration times, etc.)
     * A "namespace", which is a qualifying name that identifies a
-      group of keys within the cache.  A query that filters on a name 
-      might use the name "by_name", a query that filters on a date range 
+      group of keys within the cache.  A query that filters on a name
+      might use the name "by_name", a query that filters on a date range
       to a joined table might use the name "related_date_range".
 
     When the above state is present, a Beaker cache is retrieved.
 
-    The "namespace" name is first concatenated with 
-    a string composed of the individual entities and columns the Query 
+    The "namespace" name is first concatenated with
+    a string composed of the individual entities and columns the Query
     requests, i.e. such as ``Query(User.id, User.name)``.
 
     The Beaker cache is then loaded from the cache manager based
     on the region and composed namespace.  The key within the cache
     itself is then constructed against the bind parameters specified
-    by this query, which are usually literals defined in the 
+    by this query, which are usually literals defined in the
     WHERE clause.
 
     The FromCache and RelationshipCache mapper options below represent
     return cache, cache_key
 
 def _namespace_from_query(namespace, query):
-    # cache namespace - the token handed in by the 
+    # cache namespace - the token handed in by the
     # option + class we're querying against
     namespace = " ".join([namespace] + [str(x) for x in query._entities])
 
     if hasattr(query, '_cache_parameters'):
         region, namespace, cache_key = query._cache_parameters
         raise ValueError("This query is already configured "
-                        "for region %r namespace %r" % 
+                        "for region %r namespace %r" %
                         (region, namespace)
                     )
     query._cache_parameters = region, namespace, cache_key
         be a name uniquely describing the target Query's
         lexical structure.
 
-        :param cache_key: optional.  A string cache key 
+        :param cache_key: optional.  A string cache key
         that will serve as the key to the query.   Use this
         if your query has a huge amount of parameters (such
-        as when using in_()) which correspond more simply to 
+        as when using in_()) which correspond more simply to
         some other identifier.
 
         """
         _set_cache_parameters(query, self.region, self.namespace, self.cache_key)
 
 class RelationshipCache(MapperOption):
-    """Specifies that a Query as called within a "lazy load" 
+    """Specifies that a Query as called within a "lazy load"
        should load results from a cache."""
 
     propagate_to_loaders = True
                 if (cls, key) in self._relationship_options:
                     relationship_option = self._relationship_options[(cls, key)]
                     _set_cache_parameters(
-                            query, 
-                            relationship_option.region, 
-                            relationship_option.namespace, 
+                            query,
+                            relationship_option.region,
+                            relationship_option.namespace,
                             None)
 
     def and_(self, option):

File examples/beaker_caching/environment.py

 """environment.py
 
-Establish data / cache file paths, and configurations, 
+Establish data / cache file paths, and configurations,
 bootstrap fixture data if necessary.
 
 """

File examples/beaker_caching/fixture_data.py

         person = Person(
                     "person %.2d" % i,
                     Address(
-                        street="street %.2d" % i, 
+                        street="street %.2d" % i,
                         postal_code=all_post_codes[random.randint(0, len(all_post_codes) - 1)]
                     )
                 )

File examples/beaker_caching/helloworld.py

 # remove the Session.  next query starts from scratch.
 Session.remove()
 
-# load again, using the same FromCache option. now they're cached 
+# load again, using the same FromCache option. now they're cached
 # under "all_people", no SQL is emitted.
 print "loading people....again!"
 people = Session.query(Person).options(FromCache("default", "all_people")).all()
 
-# want to load on some different kind of query ?  change the namespace 
+# want to load on some different kind of query ?  change the namespace
 # you send to FromCache
 print "loading people two through twelve"
 people_two_through_twelve = Session.query(Person).\
 
 # the data is cached under the "namespace" you send to FromCache, *plus*
 # the bind parameters of the query.    So this query, having
-# different literal parameters under "Person.name.between()" than the 
+# different literal parameters under "Person.name.between()" than the
 # previous one, issues new SQL...
 print "loading people five through fifteen"
 people_five_through_fifteen = Session.query(Person).\
 
 
 # invalidate the cache for the three queries we've done.  Recreate
-# each Query, which includes at the very least the same FromCache, 
-# same list of objects to be loaded, and the same parameters in the 
+# each Query, which includes at the very least the same FromCache,
+# same list of objects to be loaded, and the same parameters in the
 # same order, then call invalidate().
 print "invalidating everything"
 Session.query(Person).options(FromCache("default", "all_people")).invalidate()

File examples/beaker_caching/local_session_caching.py

 import collections
 
 class ScopedSessionNamespace(container.MemoryNamespaceManager):
-    """A Beaker cache type which will cache objects locally on 
+    """A Beaker cache type which will cache objects locally on
     the current session.
 
     When used with the query_cache system, the effect is that the objects
 
     # identity is preserved - person10 is the *same* object that's
     # ultimately inside the cache.   So it is safe to manipulate
-    # the not-queried-for attributes of objects when using such a 
-    # cache without the need to invalidate - however, any change 
-    # that would change the results of a cached query, such as 
-    # inserts, deletes, or modification to attributes that are 
+    # the not-queried-for attributes of objects when using such a
+    # cache without the need to invalidate - however, any change
+    # that would change the results of a cached query, such as
+    # inserts, deletes, or modification to attributes that are
     # part of query criterion, still require careful invalidation.
     from caching_query import _get_cache_parameters
     cache, key = _get_cache_parameters(q)

File examples/beaker_caching/model.py

 """Model.   We are modeling Person objects with a collection
-of Address objects.  Each Address has a PostalCode, which 
+of Address objects.  Each Address has a PostalCode, which
 in turn references a City and then a Country:
 
 Person --(1..n)--> Address
     def __str__(self):
         return "%s\t"\
               "%s, %s\t"\
-              "%s" % (self.street, self.city.name, 
+              "%s" % (self.street, self.city.name,
                 self.postal_code.code, self.country.name)
 
 class Person(Base):

File examples/beaker_caching/relation_caching.py

 """relationship_caching.py
 
-Load a set of Person and Address objects, specifying that 
-related PostalCode, City, Country objects should be pulled from long 
+Load a set of Person and Address objects, specifying that
+related PostalCode, City, Country objects should be pulled from long
 term cache.
 
 """

File examples/custom_attributes/custom_management.py

 """Illustrates customized class instrumentation, using
 the :mod:`sqlalchemy.ext.instrumentation` extension package.
 
-In this example, mapped classes are modified to 
+In this example, mapped classes are modified to
 store their state in a dictionary attached to an attribute
 named "_goofy_dict", instead of using __dict__.
-this example illustrates how to replace SQLAlchemy's class 
+this example illustrates how to replace SQLAlchemy's class
 descriptors with a user-defined system.
 
 
 if __name__ == '__main__':
     meta = MetaData(create_engine('sqlite://'))
 
-    table1 = Table('table1', meta, 
-                    Column('id', Integer, primary_key=True), 
+    table1 = Table('table1', meta,
+                    Column('id', Integer, primary_key=True),
                     Column('name', Text))
-    table2 = Table('table2', meta, 
-                    Column('id', Integer, primary_key=True), 
-                    Column('name', Text), 
+    table2 = Table('table2', meta,
+                    Column('id', Integer, primary_key=True),
+                    Column('name', Text),
                     Column('t1id', Integer, ForeignKey('table1.id')))
     meta.create_all()
 

File examples/dynamic_dict/__init__.py

 """Illustrates how to place a dictionary-like facade on top of a "dynamic" relation, so
-that dictionary operations (assuming simple string keys) can operate upon a large 
+that dictionary operations (assuming simple string keys) can operate upon a large
 collection without loading the full collection at once.
 
 """

File examples/elementtree/__init__.py

   represented in a separate table.  The nodes are associated in a hierarchy using an adjacency list
   structure.  A query function is introduced which can search for nodes along any path with a given
   structure of attributes, basically a (very narrow) subset of xpath.
-* ``optimized_al.py`` - Uses the same strategy as ``adjacency_list.py``, but associates each 
-  DOM row with its owning document row, so that a full document of DOM nodes can be 
+* ``optimized_al.py`` - Uses the same strategy as ``adjacency_list.py``, but associates each
+  DOM row with its owning document row, so that a full document of DOM nodes can be
   loaded using O(1) queries - the construction of the "hierarchy" is performed after
   the load in a non-recursive fashion and is much more efficient.
 
     session.add(Document(file, doc))
     session.commit()
 
-    # locate documents with a certain path/attribute structure 
+    # locate documents with a certain path/attribute structure
     for document in find_document('/somefile/header/field2[@attr=foo]'):
         # dump the XML
         print document

File examples/elementtree/optimized_al.py

 """This script duplicates adjacency_list.py, but optimizes the loading
-of XML nodes to be based on a "flattened" datamodel. Any number of XML documents, 
-each of arbitrary complexity, can be loaded in their entirety via a single query 
+of XML nodes to be based on a "flattened" datamodel. Any number of XML documents,
+each of arbitrary complexity, can be loaded in their entirety via a single query
 which joins on only three tables.
 
 """
     Column('filename', String(30), unique=True),
 )
 
-# stores XML nodes in an adjacency list model.  This corresponds to 
+# stores XML nodes in an adjacency list model.  This corresponds to
 # Element and SubElement objects.
 elements = Table('elements', meta,
     Column('element_id', Integer, primary_key=True),
 
 ########################## PART IV - Persistence Mapping #####################
 
-# Node class.  a non-public class which will represent 
+# Node class.  a non-public class which will represent
 # the DB-persisted Element/SubElement object.  We cannot create mappers for
-# ElementTree elements directly because they are at the very least not new-style 
+# ElementTree elements directly because they are at the very least not new-style
 # classes, and also may be backed by native implementations.
 # so here we construct an adapter.
 class _Node(object):
     pass
 
-# Attribute class.  also internal, this will represent the key/value attributes stored for 
+# Attribute class.  also internal, this will represent the key/value attributes stored for
 # a particular Node.
 class _Attribute(object):
     def __init__(self, name, value):

File examples/elementtree/pickle.py

 """illustrates a quick and dirty way to persist an XML document expressed using ElementTree and pickle.
 
-This is a trivial example using PickleType to marshal/unmarshal the ElementTree 
+This is a trivial example using PickleType to marshal/unmarshal the ElementTree
 document into a binary column.  Compare to explicit.py which stores the individual components of the ElementTree
 structure in distinct rows using two additional mapped entities.  Note that the usage of both
 styles of persistence are identical, as is the structure of the main Document class.

File examples/generic_associations/__init__.py

 """
-Illustrates various methods of associating multiple types of 
+Illustrates various methods of associating multiple types of
 parents with a particular child object.
 
-The examples all use the declarative extension along with 
+The examples all use the declarative extension along with
 declarative mixins.   Each one presents the identical use
 case at the end - two classes, ``Customer`` and ``Supplier``, both
 subclassing the ``HasAddresses`` mixin, which ensures that the

File examples/generic_associations/discriminator_on_association.py

 table is used so that traditional foreign key constraints may be used.
 
 This configuration has the advantage that a fixed set of tables
-are used, with no extra-table-per-parent needed.   The individual 
-Address record can also locate its parent with no need to scan 
+are used, with no extra-table-per-parent needed.   The individual
+Address record can also locate its parent with no need to scan
 amongst many tables.
 
 """
 class Base(object):
     """Base class which provides automated table name
     and surrogate primary key column.
-    
+
     """
     @declared_attr
     def __tablename__(cls):
 class AddressAssociation(Base):
     """Associates a collection of Address objects
     with a particular parent.
-    
+
     """
     __tablename__ = "address_association"
 
     @classmethod
     def creator(cls, discriminator):
-        """Provide a 'creator' function to use with 
+        """Provide a 'creator' function to use with
         the association proxy."""
 
         return lambda addresses:AddressAssociation(
-                                addresses=addresses, 
+                                addresses=addresses,
                                 discriminator=discriminator)
 
     discriminator = Column(String)
         return getattr(self, "%s_parent" % self.discriminator)
 
 class Address(Base):
-    """The Address class.   
-    
-    This represents all address records in a 
+    """The Address class.
+
+    This represents all address records in a
     single table.
-    
+
     """
-    association_id = Column(Integer, 
+    association_id = Column(Integer,
                         ForeignKey("address_association.id")
                     )
     street = Column(String)
     city = Column(String)
     zip = Column(String)
     association = relationship(
-                    "AddressAssociation", 
+                    "AddressAssociation",
                     backref="addresses")
 
     parent = association_proxy("association", "parent")
 
     def __repr__(self):
         return "%s(street=%r, city=%r, zip=%r)" % \
-            (self.__class__.__name__, self.street, 
+            (self.__class__.__name__, self.street,
             self.city, self.zip)
 
 class HasAddresses(object):
     """HasAddresses mixin, creates a relationship to
     the address_association table for each parent.
-    
+
     """
     @declared_attr
     def address_association_id(cls):
-        return Column(Integer, 
+        return Column(Integer,
                                 ForeignKey("address_association.id"))
 
     @declared_attr
                     "address_association", "addresses",
                     creator=AddressAssociation.creator(discriminator)
                 )
-        return relationship("AddressAssociation", 
-                    backref=backref("%s_parent" % discriminator, 
+        return relationship("AddressAssociation",
+                    backref=backref("%s_parent" % discriminator,
                                         uselist=False))
 
 
 
 session.add_all([
     Customer(
-        name='customer 1', 
+        name='customer 1',
         addresses=[
             Address(
                     street='123 anywhere street',

File examples/generic_associations/table_per_association.py

 
 This configuration has the advantage that all Address
 rows are in one table, so that the definition of "Address"
-can be maintained in one place.   The association table 
+can be maintained in one place.   The association table
 contains the foreign key to Address so that Address
 has no dependency on the system.
 
 class Base(object):
     """Base class which provides automated table name
     and surrogate primary key column.
-    
+
     """
     @declared_attr
     def __tablename__(cls):
 Base = declarative_base(cls=Base)
 
 class Address(Base):
-    """The Address class.   
-    
-    This represents all address records in a 
+    """The Address class.
+
+    This represents all address records in a
     single table.
-    
+
     """
     street = Column(String)
     city = Column(String)
 
     def __repr__(self):
         return "%s(street=%r, city=%r, zip=%r)" % \
-            (self.__class__.__name__, self.street, 
+            (self.__class__.__name__, self.street,
             self.city, self.zip)
 
 class HasAddresses(object):
     """HasAddresses mixin, creates a new address_association
     table for each parent.
-    
+
     """
     @declared_attr
     def addresses(cls):
         address_association = Table(
             "%s_addresses" % cls.__tablename__,
             cls.metadata,
-            Column("address_id", ForeignKey("address.id"), 
+            Column("address_id", ForeignKey("address.id"),
                                 primary_key=True),
-            Column("%s_id" % cls.__tablename__, 
-                                ForeignKey("%s.id" % cls.__tablename__), 
+            Column("%s_id" % cls.__tablename__,
+                                ForeignKey("%s.id" % cls.__tablename__),
                                 primary_key=True),
         )
         return relationship(Address, secondary=address_association)
 
 session.add_all([
     Customer(
-        name='customer 1', 
+        name='customer 1',
         addresses=[
             Address(
                     street='123 anywhere street',

File examples/generic_associations/table_per_related.py

 class Base(object):
     """Base class which provides automated table name
     and surrogate primary key column.
-    
+
     """
     @declared_attr
     def __tablename__(cls):
 Base = declarative_base(cls=Base)
 
 class Address(object):
-    """Define columns that will be present in each 
+    """Define columns that will be present in each
     'Address' table.
-    
+
     This is a declarative mixin, so additional mapped
     attributes beyond simple columns specified here
     should be set up using @declared_attr.
-    
+
     """
     street = Column(String)
     city = Column(String)
 
     def __repr__(self):
         return "%s(street=%r, city=%r, zip=%r)" % \
-            (self.__class__.__name__, self.street, 
+            (self.__class__.__name__, self.street,
             self.city, self.zip)
 
 class HasAddresses(object):
     """HasAddresses mixin, creates a new Address class
     for each parent.
-    
+
     """
     @declared_attr
     def addresses(cls):
             "%sAddress" % cls.__name__,
             (Address, Base,),
             dict(
-                __tablename__ = "%s_address" % 
+                __tablename__ = "%s_address" %
                             cls.__tablename__,
-                parent_id = Column(Integer, 
+                parent_id = Column(Integer,
                     ForeignKey("%s.id" % cls.__tablename__)),
                 parent = relationship(cls)
             )
 
 session.add_all([
     Customer(
-        name='customer 1', 
+        name='customer 1',
         addresses=[
             Customer.Address(
                     street='123 anywhere street',

File examples/graphs/directed_graph.py

 class Edge(Base):
     __tablename__ = 'edge'
 
-    lower_id = Column(Integer, 
-                        ForeignKey('node.node_id'), 
+    lower_id = Column(Integer,
+                        ForeignKey('node.node_id'),
                         primary_key=True)
 
-    higher_id = Column(Integer, 
-                        ForeignKey('node.node_id'), 
+    higher_id = Column(Integer,
+                        ForeignKey('node.node_id'),
                         primary_key=True)
 
     lower_node = relationship(Node,
-                                primaryjoin=lower_id==Node.node_id, 
+                                primaryjoin=lower_id==Node.node_id,
                                 backref='lower_edges')
     higher_node = relationship(Node,
-                                primaryjoin=higher_id==Node.node_id, 
+                                primaryjoin=higher_id==Node.node_id,
                                 backref='higher_edges')
 
     # here we have lower.node_id <= higher.node_id

File examples/inheritance/concrete.py

 
 metadata = MetaData()
 
-managers_table = Table('managers', metadata, 
+managers_table = Table('managers', metadata,
     Column('employee_id', Integer, primary_key=True),
     Column('name', String(50)),
     Column('manager_data', String(40))
 )
 
-engineers_table = Table('engineers', metadata, 
+engineers_table = Table('engineers', metadata,
     Column('employee_id', Integer, primary_key=True),
     Column('name', String(50)),
     Column('engineer_info', String(40))

File examples/inheritance/joined.py

     id = Column(Integer, primary_key=True)
     name = Column(String(50))
 
-    employees = relationship("Person", 
+    employees = relationship("Person",
                     backref='company',
                     cascade='all, delete-orphan')
 
     def __repr__(self):
         return "Engineer %s, status %s, engineer_name %s, "\
                 "primary_language %s" % \
-                    (self.name, self.status, 
+                    (self.name, self.status,
                         self.engineer_name, self.primary_language)
 
 class Manager(Person):
 
 c = Company(name='company1', employees=[
     Manager(
-        name='pointy haired boss', 
+        name='pointy haired boss',
         status='AAB',
         manager_name='manager1'),
-    Engineer(name='dilbert', 
+    Engineer(name='dilbert',
         status='BBA',
-        engineer_name='engineer1', 
+        engineer_name='engineer1',
         primary_language='java'),
     Person(name='joesmith'),
-    Engineer(name='wally', 
+    Engineer(name='wally',
             status='CGG',
-            engineer_name='engineer2', 
+            engineer_name='engineer2',
             primary_language='python'),
-    Manager(name='jsmith', 
+    Manager(name='jsmith',
                 status='ABA',
                 manager_name='manager2')
 ])
 for e in c.employees:
     print e
 
-# query using with_polymorphic. 
+# query using with_polymorphic.
 eng_manager = with_polymorphic(Person, [Engineer, Manager], aliased=True)
 print session.query(eng_manager).\
             filter(
         eng_manager,
         Company.employees
     ).filter(
-        or_(eng_manager.Engineer.engineer_name=='engineer1', 
+        or_(eng_manager.Engineer.engineer_name=='engineer1',
             eng_manager.Manager.manager_name=='manager2')
     ).all()
 

File examples/inheritance/single.py

 metadata = MetaData()
 
 # a table to store companies
-companies = Table('companies', metadata, 
+companies = Table('companies', metadata,
    Column('company_id', Integer, primary_key=True),
    Column('name', String(50)))
 
-employees_table = Table('employees', metadata, 
+employees_table = Table('employees', metadata,
     Column('employee_id', Integer, primary_key=True),
     Column('company_id', Integer, ForeignKey('companies.company_id')),
     Column('name', String(50)),
     def __repr__(self):
         return "Engineer %s, status %s, engineer_name %s, "\
                     "primary_language %s" % \
-                        (self.name, self.status, 
+                        (self.name, self.status,
                         self.engineer_name, self.primary_language)
 class Manager(Person):
     def __repr__(self):

File examples/nested_sets/nested_sets.py

 class Employee(Base):
     __tablename__ = 'personnel'
     __mapper_args__ = {
-        'extension':NestedSetExtension(), 
+        'extension':NestedSetExtension(),
         'batch':False  # allows extension to fire for each instance before going to the next.
     }
 

File examples/postgis/postgis.py

 class TextualGisElement(GisElement, expression.Function):
     """Represents a Geometry value as expressed within application code; i.e. in wkt format.
 
-    Extends expression.Function so that the value is interpreted as 
+    Extends expression.Function so that the value is interpreted as
     GeomFromText(value) in a SQL expression context.
 
     """
                 return value
         return process
 
-# other datatypes can be added as needed, which 
+# other datatypes can be added as needed, which
 # currently only affect DDL statements.
 
 class Point(Geometry):
 # DDL integration
 
 class GISDDL(object):
-    """A DDL extension which integrates SQLAlchemy table create/drop 
+    """A DDL extension which integrates SQLAlchemy table create/drop
     methods with PostGis' AddGeometryColumn/DropGeometryColumn functions.
 
     Usage::
 
 
 class GisAttribute(AttributeExtension):
-    """Intercepts 'set' events on a mapped instance attribute and 
+    """Intercepts 'set' events on a mapped instance attribute and
     converts the incoming value to a GIS expression.
 
     """
 
     """
     return column_property(
-                Column(*args, **kw), 
-                extension=GisAttribute(), 
+                Column(*args, **kw),
+                extension=GisAttribute(),
                 comparator_factory=GisComparator
             )
 

File examples/sharding/__init__.py

 * a function which can return a list of shard ids which apply to a particular
   instance identifier; this is called "id_chooser".  If it returns all shard ids,
   all shards will be searched.
-* a function which can return a list of shard ids to try, given a particular 
-  Query ("query_chooser").  If it returns all shard ids, all shards will be 
+* a function which can return a list of shard ids to try, given a particular
+  Query ("query_chooser").  If it returns all shard ids, all shards will be
   queried and the results joined together.
 
 In this example, four sqlite databases will store information about weather
 
 The construction of generic sharding routines is an ambitious approach
 to the issue of organizing instances among multiple databases.   For a
-more plain-spoken alternative, the "distinct entity" approach 
+more plain-spoken alternative, the "distinct entity" approach
 is a simple method of assigning objects to different tables (and potentially
-database nodes) in an explicit way - described on the wiki at 
+database nodes) in an explicit way - described on the wiki at
 `EntityName <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/EntityName>`_.
 
 """

File examples/versioning/_lib.py

 """copy of ComparableEntity and eq_() from test.lib.
 
 This is just to support running the example outside of
-the SQLA testing environment which is no longer part of 
+the SQLA testing environment which is no longer part of
 SQLAlchemy as of 0.7.
 
 """

File examples/versioning/history_meta.py

     versioned_cls = type.__new__(type, "%sHistory" % cls.__name__, bases, {})
 
     m = mapper(
-            versioned_cls, 
-            table, 
-            inherits=super_history_mapper, 
+            versioned_cls,
+            table,
+            inherits=super_history_mapper,
             polymorphic_on=polymorphic_on,
             polymorphic_identity=local_mapper.polymorphic_identity
             )
             try:
                 prop = obj_mapper.get_property_by_column(obj_col)
             except UnmappedColumnError:
-                # in the case of single table inheritance, there may be 
+                # in the case of single table inheritance, there may be
                 # columns on the mapped table intended for the subclass only.
-                # the "unmapped" status of the subclass column on the 
+                # the "unmapped" status of the subclass column on the
                 # base class is a feature of the declarative module as of sqla 0.5.2.
                 continue
 

File examples/versioning/test_versioning.py

         eq_(
             sess.query(BaseClassHistory).order_by(BaseClassHistory.id).all(),
             [
-                SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1), 
-                BaseClassHistory(id=2, name=u'base1', type=u'base', version=1), 
+                SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
+                BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
                 SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1)
             ]
         )
         eq_(
             sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
             [
-                SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1), 
-                BaseClassHistory(id=2, name=u'base1', type=u'base', version=1), 
-                SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1), 
+                SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
+                BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
+                SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1),
                 SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2)
             ]
         )
         eq_(
             sess.query(BaseClassHistory).order_by(BaseClassHistory.id, BaseClassHistory.version).all(),
             [
-                SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1), 
-                BaseClassHistory(id=2, name=u'base1', type=u'base', version=1), 
-                BaseClassHistory(id=2, name=u'base1mod', type=u'base', version=2), 
-                SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1), 
+                SubClassSeparatePkHistory(id=1, name=u'sep1', type=u'sep', version=1),
+                BaseClassHistory(id=2, name=u'base1', type=u'base', version=1),
+                BaseClassHistory(id=2, name=u'base1mod', type=u'base', version=2),
+                SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=1),
                 SubClassSamePkHistory(id=3, name=u'same1', type=u'same', version=2)
             ]
         )
     try:
         import pkg_resources
     except ImportError:
-        return do_download()       
+        return do_download()
     try:
         pkg_resources.require("setuptools>="+version); return
     except pkg_resources.VersionConflict, e:

File lib/sqlalchemy/connectors/mxodbc.py

             return False
 
     def _get_server_version_info(self, connection):
-        # eGenix suggests using conn.dbms_version instead 
+        # eGenix suggests using conn.dbms_version instead
         # of what we're doing here
         dbapi_con = connection.connection
         version = []

File lib/sqlalchemy/connectors/pyodbc.py

                 if 'port' in keys and not 'port' in query:
                     port = ',%d' % int(keys.pop('port'))
 
-                connectors = ["DRIVER={%s}" % 
+                connectors = ["DRIVER={%s}" %
                                 keys.pop('driver', self.pyodbc_driver_name),
                               'Server=%s%s' % (keys.pop('host', ''), port),
                               'Database=%s' % keys.pop('database', '') ]
                 connectors.append("Trusted_Connection=Yes")
 
             # if set to 'Yes', the ODBC layer will try to automagically
-            # convert textual data from your database encoding to your 
-            # client encoding.  This should obviously be set to 'No' if 
-            # you query a cp1253 encoded database from a latin1 client... 
+            # convert textual data from your database encoding to your
+            # client encoding.  This should obviously be set to 'No' if
+            # you query a cp1253 encoded database from a latin1 client...
             if 'odbc_autotranslate' in keys:
                 connectors.append("AutoTranslate=%s" %
                                     keys.pop("odbc_autotranslate"))
         if self._user_supports_unicode_binds is not None:
             self.supports_unicode_binds = self._user_supports_unicode_binds
         else:
-            self.supports_unicode_binds = (not self.freetds or 
+            self.supports_unicode_binds = (not self.freetds or
                                             self.freetds_driver_version >= '0.91'
                                             ) and not self.easysoft
         # end Py2K

File lib/sqlalchemy/connectors/zxJDBC.py

     def _create_jdbc_url(self, url):
         """Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
         return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host,
-                                      url.port is not None 
+                                      url.port is not None
                                         and ':%s' % url.port or '',
                                       url.database)
 
         opts = self._driver_kwargs()
         opts.update(url.query)
         return [
-                [self._create_jdbc_url(url), 
-                url.username, url.password, 
+                [self._create_jdbc_url(url),
+                url.username, url.password,
                 self.jdbc_driver_name],
                 opts]
 

File lib/sqlalchemy/dialects/__init__.py

 
 def _auto_fn(name):
     """default dialect importer.
-    
+
     plugs into the :class:`.PluginLoader`
     as a first-hit system.
-    
+
     """
     if "." in name:
         dialect, driver = name.split(".")

File lib/sqlalchemy/dialects/access/base.py

 
 .. note::
 
-    The Access dialect is **non-functional as of SQLAlchemy 0.6**, 
+    The Access dialect is **non-functional as of SQLAlchemy 0.6**,
     pending development efforts to bring it up-to-date.
 
 
                 # self._last_inserted_ids[0] is None:
                 self.cursor.execute("SELECT @@identity AS lastrowid")
                 row = self.cursor.fetchone()
-                self._last_inserted_ids = [int(row[0])] 
+                self._last_inserted_ids = [int(row[0])]
                 #+ self._last_inserted_ids[1:]
                 # print "LAST ROW ID", self._last_inserted_ids
 
 
                 colargs = \
                 {
-                    'nullable': not(col.Required or 
+                    'nullable': not(col.Required or
                                     col.Attributes & const.dbAutoIncrField),
                 }
                 default = col.DefaultValue
                         if isinstance(thecol.type, AcInteger) and \
                                 not (thecol.default and
                                 isinstance(
-                                        thecol.default.arg, 
+                                        thecol.default.arg,
                                         schema.Sequence
                                 )):
                             thecol.autoincrement = False
         # This is necessary, so we get the latest updates
         dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
 
-        names = [t.Name for t in dtbs.TableDefs 
+        names = [t.Name for t in dtbs.TableDefs
                 if t.Name[:4] != "MSys" and t.Name[:4] != "~TMP"]
         dtbs.Close()
         return names
                           'length':             'len',
                           }
     def visit_function(self, func):
-        """Access function names differ from the ANSI SQL names; 
+        """Access function names differ from the ANSI SQL names;
         rewrite common ones"""
         func.name = self.function_rewrites.get(func.name, func.name)
         return super(AccessCompiler, self).visit_function(func)

File lib/sqlalchemy/dialects/firebird/__init__.py

     dialect
 
 __all__ = (
-    'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME', 
+    'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
     'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB',
     'dialect'
 )

File lib/sqlalchemy/dialects/informix/base.py

 .. note::
 
     The Informix dialect functions on current SQLAlchemy versions
-    but is not regularly tested, and may have many issues and 
+    but is not regularly tested, and may have many issues and
     caveats not currently handled.
 
 """
         c = connection.execute(
         """select t1.constrname as cons_name,
                  t4.colname as local_column, t7.tabname as remote_table,
-                 t6.colname as remote_column, t7.owner as remote_owner 
+                 t6.colname as remote_column, t7.owner as remote_owner
             from sysconstraints as t1 , systables as t2 ,
                  sysindexes as t3 , syscolumns as t4 ,
                  sysreferences as t5 , syscolumns as t6 , systables as t7 ,
              and t3.tabid = t2.tabid and t3.idxname = t1.idxname
              and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3,
              t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10,
-             t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16) 
+             t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16)
              and t5.constrid = t1.constrid and t8.constrid = t5.primary
              and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3,
              t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10,
 
         # Select the column positions from sysindexes for sysconstraints
         data = connection.execute(
-            """select t2.* 
+            """select t2.*
             from systables as t1, sysindexes as t2, sysconstraints as t3
             where t1.tabid=t2.tabid and t1.tabname=? and t1.owner=?
             and t2.idxname=t3.idxname and t3.constrtype='P'""",
         c = connection.execute(
             """select t1.colname
             from syscolumns as t1, systables as t2
-            where t2.tabname=? and t1.tabid = t2.tabid and 
+            where t2.tabname=? and t1.tabid = t2.tabid and
             t1.colno in (%s)""" % place_holder,
             table_name, *colpositions
         ).fetchall()
             c = connection.execute(
                 """select t1.colname
                 from syscolumns as t1, systables as t2
-                where t2.tabname=? and t1.tabid = t2.tabid and 
+                where t2.tabname=? and t1.tabid = t2.tabid and
                 t1.colno in (%s)""" % place_holder,
                 table_name, *colnames
             ).fetchall()

File lib/sqlalchemy/dialects/maxdb/base.py

 
 .. note::
 
-    The MaxDB dialect is **non-functional as of SQLAlchemy 0.6**, 
+    The MaxDB dialect is **non-functional as of SQLAlchemy 0.6**,
     pending development efforts to bring it up-to-date.
 
 Overview
                                     value[20:])])
         else:
             raise exc.InvalidRequestError(
-                "datetimeformat '%s' is not supported." % 
+                "datetimeformat '%s' is not supported." %
                 dialect.datetimeformat)
         return process
 
                 if value is None:
                     return None
                 else:
-                    return datetime.date(int(value[0:4]), int(value[4:6]), 
+                    return datetime.date(int(value[0:4]), int(value[4:6]),
                                          int(value[6:8]))
         elif dialect.datetimeformat == 'iso':
             def process(value):
                 if value is None:
                     return None
                 else:
-                    return datetime.date(int(value[0:4]), int(value[5:7]), 
+                    return datetime.date(int(value[0:4]), int(value[5:7]),
                                          int(value[8:10]))
         else:
             raise exc.InvalidRequestError(
-                "datetimeformat '%s' is not supported." % 
+                "datetimeformat '%s' is not supported." %
                 dialect.datetimeformat)
         return process
 
                 if value is None:
                     return None
                 else:
-                    return datetime.time(int(value[0:4]), int(value[4:6]), 
+                    return datetime.time(int(value[0:4]), int(value[4:6]),
                                          int(value[6:8]))
         elif dialect.datetimeformat == 'iso':
             def process(value):
                                          int(value[8:10]))
         else:
             raise exc.InvalidRequestError(
-                "datetimeformat '%s' is not supported." % 
+                "datetimeformat '%s' is not supported." %
                 dialect.datetimeformat)
         return process
 

File lib/sqlalchemy/dialects/mssql/__init__.py

 
 
 __all__ = (
-    'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR', 
+    'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
     'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
-    'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME', 
+    'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
     'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
     'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
 )

File lib/sqlalchemy/dialects/mssql/adodbapi.py

 class MSDateTime_adodbapi(MSDateTime):
     def result_processor(self, dialect, coltype):
         def process(value):
-            # adodbapi will return datetimes with empty time 
+            # adodbapi will return datetimes with empty time
             # values as datetime.date() objects.
             # Promote them back to full datetime.datetime()
             if type(value) is datetime.date:
 
         connectors = ["Provider=SQLOLEDB"]
         if 'port' in keys:
-            connectors.append ("Data Source=%s, %s" % 
+            connectors.append ("Data Source=%s, %s" %
                                 (keys.get("host"), keys.get("port")))
         else:
             connectors.append ("Data Source=%s" % keys.get("host"))

File lib/sqlalchemy/dialects/mssql/pymssql.py

     mssql+pymssql://<username>:<password>@<freetds_name>
 
 Adding "?charset=utf8" or similar will cause pymssql to return
-strings as Python unicode objects.   This can potentially improve 
-performance in some scenarios as decoding of strings is 
+strings as Python unicode objects.   This can potentially improve
+performance in some scenarios as decoding of strings is
 handled natively.
 
 Limitations

File lib/sqlalchemy/dialects/mssql/pyodbc.py

 
     dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
 
-* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection 
+* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection
   that would appear like::
 
     DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
 
 * ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection
   string which includes the port
-  information using the comma syntax. This will create the following 
+  information using the comma syntax. This will create the following
   connection string::
 
     DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
 Unicode Binds
 ^^^^^^^^^^^^^
 
-The current state of PyODBC on a unix backend with FreeTDS and/or 
+The current state of PyODBC on a unix backend with FreeTDS and/or
 EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC
-versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically 
+versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically
 alter how strings are received.  The PyODBC dialect attempts to use all the information
 it knows to determine whether or not a Python unicode literal can be
 passed directly to the PyODBC driver or not; while SQLAlchemy can encode
 bytestrings for certain encodings and requires a Python unicode object,
 while the author has observed widespread cases where a Python unicode
 is completely misinterpreted by PyODBC, particularly when dealing with
-the information schema tables used in table reflection, and the value 
+the information schema tables used in table reflection, and the value
 must first be encoded to a bytestring.
 
 It is for this reason that whether or not unicode literals for bound
-parameters be sent to PyODBC can be controlled using the 
-``supports_unicode_binds`` parameter to ``create_engine()``.  When 
-left at its default of ``None``, the PyODBC dialect will use its 
+parameters be sent to PyODBC can be controlled using the
+``supports_unicode_binds`` parameter to ``create_engine()``.  When
+left at its default of ``None``, the PyODBC dialect will use its
 best guess as to whether or not the driver deals with unicode literals
 well.  When ``False``, unicode literals will be encoded first, and when
 ``True`` unicode literals will be passed straight through.  This is an interim
 
         super(MSExecutionContext_pyodbc, self).pre_exec()
 
-        # don't embed the scope_identity select into an 
+        # don't embed the scope_identity select into an
         # "INSERT .. DEFAULT VALUES"
         if self._select_lastrowid and \
                 self.dialect.use_scope_identity and \
     def post_exec(self):
         if self._embedded_scope_identity:
             # Fetch the last inserted id from the manipulated statement
-            # We may have to skip over a number of result sets with 
+            # We may have to skip over a number of result sets with
             # no data (due to triggers, etc.)
             while True:
                 try:
-                    # fetchall() ensures the cursor is consumed 
+                    # fetchall() ensures the cursor is consumed
                     # without closing it (FreeTDS particularly)
                     row = self.cursor.fetchall()[0]
                     break

File lib/sqlalchemy/dialects/mssql/zxjdbc.py

 
     def _get_server_version_info(self, connection):
         return tuple(
-                    int(x) 
+                    int(x)
                     for x in connection.connection.dbversion.split('.')
                 )
 

File lib/sqlalchemy/dialects/mysql/base.py

 -------------------
 
 MySQL features an automatic connection close behavior, for connections that have
-been idle for eight hours or more.   To circumvent having this issue, use the 
+been idle for eight hours or more.   To circumvent having this issue, use the
 ``pool_recycle`` option which controls the maximum age of any connection::
 
     engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
 Transaction Isolation Level
 ---------------------------
 
-:func:`.create_engine` accepts an ``isolation_level`` 
-parameter which results in the command ``SET SESSION 
-TRANSACTION ISOLATION LEVEL <level>`` being invoked for 
+:func:`.create_engine` accepts an ``isolation_level``
+parameter which results in the command ``SET SESSION
+TRANSACTION ISOLATION LEVEL <level>`` being invoked for
 every new connection. Valid values for this parameter are
-``READ COMMITTED``, ``READ UNCOMMITTED``, 
+``READ COMMITTED``, ``READ UNCOMMITTED``,
 ``REPEATABLE READ``, and ``SERIALIZABLE``::
 
     engine = create_engine(
-                    "mysql://scott:tiger@localhost/test", 
+                    "mysql://scott:tiger@localhost/test",
                     isolation_level="READ UNCOMMITTED"
                 )
 
 This is in contradiction to the default setting on most MySQL DBAPI drivers,
 which is "number of rows actually modified/deleted".  For this reason, the
 SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag,
-or whatever is equivalent for the DBAPI in use, on connect, unless the flag value 
+or whatever is equivalent for the DBAPI in use, on connect, unless the flag value
 is overridden using DBAPI-specific options
 (such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the
 OurSQL driver).
 ~~~~~~~~~~~~~
 
 Some MySQL storage engines permit you to specify an index type when creating
-an index or primary key constraint. SQLAlchemy provides this feature via the 
+an index or primary key constraint. SQLAlchemy provides this feature via the
 ``mysql_using`` parameter on :class:`.Index`::
 
     Index('my_index', my_table.c.data, mysql_using='hash')
     PrimaryKeyConstraint("data", mysql_using='hash')
 
 The value passed to the keyword argument will be simply passed through to the
-underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index 
+underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
 type for your MySQL storage engine.
 
 More information can be found at:
 
     def get_select_precolumns(self, select):
         """Add special MySQL keywords in place of DISTINCT.
-        
-        .. note:: 
-        
+
+        .. note::
+
           this usage is deprecated.  :meth:`.Select.prefix_with`
           should be used for special keywords at the start
           of a SELECT.
-          
+
         """
         if isinstance(select._distinct, basestring):
             return select._distinct.upper() + " "
             if limit is None:
                 # hardwire the upper limit.  Currently
                 # needed by OurSQL with Python 3
-                # (https://bugs.launchpad.net/oursql/+bug/686232), 
+                # (https://bugs.launchpad.net/oursql/+bug/686232),
                 # but also is consistent with the usage of the upper
                 # bound as part of MySQL's "syntax" for OFFSET with
                 # no LIMIT
                 return ' \n LIMIT %s, %s' % (
-                                self.process(sql.literal(offset)), 
+                                self.process(sql.literal(offset)),
                                 "18446744073709551615")
             else:
                 return ' \n LIMIT %s, %s' % (
-                                self.process(sql.literal(offset)), 
+                                self.process(sql.literal(offset)),
                                 self.process(sql.literal(limit)))
         else:
             # No offset provided, so just use the limit
             return None
 
     def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
-        return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw) 
+        return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
                     for t in [from_table] + list(extra_froms))
 
-    def update_from_clause(self, update_stmt, from_table, 
+    def update_from_clause(self, update_stmt, from_table,
                                 extra_froms, from_hints, **kw):
         return None
 
             constraint_string += "KEY %s (%s)" % (
                         self.preparer.quote(
                             "idx_autoinc_%s" % auto_inc_column.name, None
-                        ), 
+                        ),
                         self.preparer.format_column(auto_inc_column)
                     )
 
 
         opts = dict(
             (
-                k[len(self.dialect.name)+1:].upper(), 
+                k[len(self.dialect.name)+1:].upper(),
                 v
             )
             for k, v in table.kwargs.items()
                 arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
 
             if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
-                       'DEFAULT_CHARACTER_SET', 'CHARACTER_SET', 
+                       'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
                        'DEFAULT_CHARSET',
                        'DEFAULT_COLLATE'):
                 opt = opt.replace('_', ' ')
         table = preparer.format_table(index.table)
         columns = [preparer.quote(c.name, c.quote) for c in index.columns]
         name = preparer.quote(
-                    self._index_identifier(index.name), 
+                    self._index_identifier(index.name),
                     index.quote)
 
         text = "CREATE "
         if type_.precision is None:
             return self._extend_numeric(type_, "NUMERIC")
         elif type_.scale is None:
-            return self._extend_numeric(type_, 
-                            "NUMERIC(%(precision)s)" % 
+            return self._extend_numeric(type_,
+                            "NUMERIC(%(precision)s)" %
                             {'precision': type_.precision})
         else:
-            return self._extend_numeric(type_, 
-                            "NUMERIC(%(precision)s, %(scale)s)" % 
+            return self._extend_numeric(type_,
+                            "NUMERIC(%(precision)s, %(scale)s)" %
                             {'precision': type_.precision, 'scale' : type_.scale})
 
     def visit_DECIMAL(self, type_):
         if type_.precision is None:
             return self._extend_numeric(type_, "DECIMAL")
         elif type_.scale is None:
-            return self._extend_numeric(type_, 
-                            "DECIMAL(%(precision)s)" % 
+            return self._extend_numeric(type_,
+                            "DECIMAL(%(precision)s)" %
                             {'precision': type_.precision})
         else:
-            return self._extend_numeric(type_, 
-                            "DECIMAL(%(precision)s, %(scale)s)" % 
+            return self._extend_numeric(type_,
+                            "DECIMAL(%(precision)s, %(scale)s)" %
                             {'precision': type_.precision, 'scale' : type_.scale})
 
     def visit_DOUBLE(self, type_):
         if self._mysql_type(type_) and \
             type_.scale is not None and \
             type_.precision is not None:
-            return self._extend_numeric(type_, 
+            return self._extend_numeric(type_,
                             "FLOAT(%s, %s)" % (type_.precision, type_.scale))
         elif type_.precision is not None:
             return self._extend_numeric(type_, "FLOAT(%s)" % (type_.precision,))