Commits

Mike Bayer committed c367a02

- with InstanceState more public, underscore all its methods
that change object state as these aren't intended for public
use.

Comments (0)

Files changed (14)

lib/sqlalchemy/orm/attributes.py

         """set an attribute value on the given instance and 'commit' it."""
 
         dict_[self.key] = value
-        state.commit(dict_, [self.key])
+        state._commit(dict_, [self.key])
         return value
 
 class ScalarAttributeImpl(AttributeImpl):
 
         state.dict[self.key] = user_data
 
-        state.commit(dict_, [self.key])
+        state._commit(dict_, [self.key])
 
         if self.key in state._pending_mutations:
             # pending items exist.  issue a modified event,

lib/sqlalchemy/orm/instrumentation.py

         self.originals = {}
 
         self._bases = [mgr for mgr in [
-                        manager_of_class(base) 
+                        manager_of_class(base)
                         for base in self.class_.__bases__
                         if isinstance(base, type)
                  ] if mgr is not None]
 
     def _instrument_init(self):
         # TODO: self.class_.__init__ is often the already-instrumented
-        # __init__ from an instrumented superclass.  We still need to make 
+        # __init__ from an instrumented superclass.  We still need to make
         # our own wrapper, but it would
         # be nice to wrap the original __init__ and not our existing wrapper
         # of such, since this adds method overhead.
             self.uninstall_descriptor(key)
         del self[key]
         for cls in self.class_.__subclasses__():
-            manager = manager_of_class(cls) 
+            manager = manager_of_class(cls)
             if manager:
                 manager.uninstrument_attribute(key, True)
 
 
     def new_instance(self, state=None):
         instance = self.class_.__new__(self.class_)
-        setattr(instance, self.STATE_ATTR, 
+        setattr(instance, self.STATE_ATTR,
                     state or self._state_constructor(instance, self))
         return instance
 
     def setup_instance(self, instance, state=None):
-        setattr(instance, self.STATE_ATTR, 
+        setattr(instance, self.STATE_ATTR,
                     state or self._state_constructor(instance, self))
 
     def teardown_instance(self, instance):
 def _generate_init(class_, class_manager):
     """Build an __init__ decorator that triggers ClassManager events."""
 
-    # TODO: we should use the ClassManager's notion of the 
+    # TODO: we should use the ClassManager's notion of the
     # original '__init__' method, once ClassManager is fixed
     # to always reference that.
     original__init__ = class_.__init__
 def __init__(%(apply_pos)s):
     new_state = class_manager._new_state_if_none(%(self_arg)s)
     if new_state:
-        return new_state.initialize_instance(%(apply_kw)s)
+        return new_state._initialize_instance(%(apply_kw)s)
     else:
         return original__init__(%(apply_kw)s)
 """

lib/sqlalchemy/orm/loading.py

 
         if context.refresh_state and query._only_load_props \
                     and context.refresh_state in context.progress:
-            context.refresh_state.commit(
+            context.refresh_state._commit(
                     context.refresh_state.dict, query._only_load_props)
             context.progress.pop(context.refresh_state)
 
-        statelib.InstanceState.commit_all_states(
+        statelib.InstanceState._commit_all_states(
             context.progress.items(),
             session.identity_map
         )
 
-        for ii, (dict_, attrs) in context.partials.iteritems():
-            ii.commit(dict_, attrs)
+        for state, (dict_, attrs) in context.partials.iteritems():
+            state._commit(dict_, attrs)
 
         for row in rows:
             yield row

lib/sqlalchemy/orm/mapper.py

     """Define the correlation of class attributes to database table
     columns.
 
-    Instances of this class should be constructed via the
-    :func:`~sqlalchemy.orm.mapper` function.
+    The :class:`.Mapper` object is instantiated using the
+    :func:`~sqlalchemy.orm.mapper` function.    For information
+    about instantiating new :class:`.Mapper` objects, see
+    that function's documentation.
+
+
+    When :func:`.mapper` is used
+    explicitly to link a user defined class with table
+    metadata, this is referred to as *classical mapping*.
+    Modern SQLAlchemy usage tends to favor the
+    :mod:`sqlalchemy.ext.declarative` extension for class
+    configuration, which
+    makes usage of :func:`.mapper` behind the scenes.
+
+    Given a particular class known to be mapped by the ORM,
+    the :class:`.Mapper` which maintains it can be acquired
+    using the :func:`.inspect` function::
+
+        from sqlalchemy import inspect
+
+        mapper = inspect(MyClass)
+
+    A class which was mapped by the :mod:`sqlalchemy.ext.declarative`
+    extension will also have its mapper available via the ``__mapper__``
+    attribute.
+
 
     """
     def __init__(self,
 
     @util.memoized_property
     def attr(self):
+        """A namespace of all :class:`.MapperProperty` objects
+        associated this mapper.
+
+        This is an object that provides each property based on
+        its key name.  For instance, the mapper for a
+        ``User`` class which has ``User.name`` attribute would
+        provide ``mapper.attr.name``, which would be the
+        :class:`.ColumnProperty` representing the ``name``
+        column.   The namespace object can also be iterated,
+        which would yield each :class:`.MapperProperty`.
+
+        :class:`.Mapper` has several pre-filtered views
+        of this attribute which limit the types of properties
+        returned, inclding :attr:`.synonyms`, :attr:`.column_attrs`,
+        :attr:`.relationships`, and :attr:`.composites`.
+
+
+        """
         if _new_mappers:
             configure_mappers()
         return util.ImmutableProperties(self._props)
 
     @_memoized_configured_property
     def synonyms(self):
+        """Return a namespace of all :class:`.SynonymProperty`
+        properties maintained by this :class:`.Mapper`.
+
+        See also:
+
+        :attr:`.Mapper.attr` - namespace of all :class:`.MapperProperty`
+        objects.
+
+        """
         return self._filter_properties(descriptor_props.SynonymProperty)
 
     @_memoized_configured_property
     def column_attrs(self):
+        """Return a namespace of all :class:`.ColumnProperty`
+        properties maintained by this :class:`.Mapper`.
+
+        See also:
+
+        :attr:`.Mapper.attr` - namespace of all :class:`.MapperProperty`
+        objects.
+
+        """
         return self._filter_properties(properties.ColumnProperty)
 
     @_memoized_configured_property
     def relationships(self):
+        """Return a namespace of all :class:`.RelationshipProperty`
+        properties maintained by this :class:`.Mapper`.
+
+        See also:
+
+        :attr:`.Mapper.attr` - namespace of all :class:`.MapperProperty`
+        objects.
+
+        """
         return self._filter_properties(properties.RelationshipProperty)
 
     @_memoized_configured_property
     def composites(self):
+        """Return a namespace of all :class:`.CompositeProperty`
+        properties maintained by this :class:`.Mapper`.
+
+        See also:
+
+        :attr:`.Mapper.attr` - namespace of all :class:`.MapperProperty`
+        objects.
+
+        """
         return self._filter_properties(descriptor_props.CompositeProperty)
 
     def _filter_properties(self, type_):

lib/sqlalchemy/orm/persistence.py

 from ..sql import expression
 
 def save_obj(base_mapper, states, uowtransaction, single=False):
-    """Issue ``INSERT`` and/or ``UPDATE`` statements for a list 
+    """Issue ``INSERT`` and/or ``UPDATE`` statements for a list
     of objects.
 
     This is called within the context of a UOWTransaction during a
         return
 
     states_to_insert, states_to_update = _organize_states_for_save(
-                                                base_mapper, 
-                                                states, 
+                                                base_mapper,
+                                                states,
                                                 uowtransaction)
 
     cached_connections = _cached_connection_dict(base_mapper)
 
     for table, mapper in base_mapper._sorted_tables.iteritems():
-        insert = _collect_insert_commands(base_mapper, uowtransaction, 
+        insert = _collect_insert_commands(base_mapper, uowtransaction,
                                 table, states_to_insert)
 
-        update = _collect_update_commands(base_mapper, uowtransaction, 
+        update = _collect_update_commands(base_mapper, uowtransaction,
                                 table, states_to_update)
 
         if update:
-            _emit_update_statements(base_mapper, uowtransaction, 
-                                    cached_connections, 
+            _emit_update_statements(base_mapper, uowtransaction,
+                                    cached_connections,
                                     mapper, table, update)
 
         if insert:
-            _emit_insert_statements(base_mapper, uowtransaction, 
-                                    cached_connections, 
+            _emit_insert_statements(base_mapper, uowtransaction,
+                                    cached_connections,
                                     table, insert)
 
-    _finalize_insert_update_commands(base_mapper, uowtransaction, 
+    _finalize_insert_update_commands(base_mapper, uowtransaction,
                                     states_to_insert, states_to_update)
 
 def post_update(base_mapper, states, uowtransaction, post_update_cols):
     cached_connections = _cached_connection_dict(base_mapper)
 
     states_to_update = _organize_states_for_post_update(
-                                    base_mapper, 
+                                    base_mapper,
                                     states, uowtransaction)
 
 
     for table, mapper in base_mapper._sorted_tables.iteritems():
-        update = _collect_post_update_commands(base_mapper, uowtransaction, 
-                                            table, states_to_update, 
+        update = _collect_post_update_commands(base_mapper, uowtransaction,
+                                            table, states_to_update,
                                             post_update_cols)
 
         if update:
-            _emit_post_update_statements(base_mapper, uowtransaction, 
-                                    cached_connections, 
+            _emit_post_update_statements(base_mapper, uowtransaction,
+                                    cached_connections,
                                     mapper, table, update)
 
 def delete_obj(base_mapper, states, uowtransaction):
     cached_connections = _cached_connection_dict(base_mapper)
 
     states_to_delete = _organize_states_for_delete(
-                                        base_mapper, 
+                                        base_mapper,
                                         states,
                                         uowtransaction)
 
     table_to_mapper = base_mapper._sorted_tables
 
     for table in reversed(table_to_mapper.keys()):
-        delete = _collect_delete_commands(base_mapper, uowtransaction, 
+        delete = _collect_delete_commands(base_mapper, uowtransaction,
                                 table, states_to_delete)
 
         mapper = table_to_mapper[table]
 
-        _emit_delete_statements(base_mapper, uowtransaction, 
+        _emit_delete_statements(base_mapper, uowtransaction,
                     cached_connections, mapper, table, delete)
 
     for state, state_dict, mapper, has_identity, connection \
     states_to_update = []
 
     for state, dict_, mapper, connection in _connections_for_states(
-                                            base_mapper, uowtransaction, 
+                                            base_mapper, uowtransaction,
                                             states):
 
         has_identity = bool(state.key)
         else:
             mapper.dispatch.before_update(mapper, connection, state)
 
-        # detect if we have a "pending" instance (i.e. has 
-        # no instance_key attached to it), and another instance 
-        # with the same identity key already exists as persistent. 
+        # detect if we have a "pending" instance (i.e. has
+        # no instance_key attached to it), and another instance
+        # with the same identity key already exists as persistent.
         # convert to an UPDATE if so.
         if not has_identity and \
             instance_key in uowtransaction.session.identity_map:
             if not uowtransaction.is_deleted(existing):
                 raise orm_exc.FlushError(
                     "New instance %s with identity key %s conflicts "
-                    "with persistent instance %s" % 
+                    "with persistent instance %s" %
                     (state_str(state), instance_key,
                      state_str(existing)))
 
             base_mapper._log_debug(
                 "detected row switch for identity %s.  "
                 "will update %s, remove %s from "
-                "transaction", instance_key, 
+                "transaction", instance_key,
                 state_str(state), state_str(existing))
 
             # remove the "delete" flag from the existing element
 
         if not has_identity and not row_switch:
             states_to_insert.append(
-                (state, dict_, mapper, connection, 
+                (state, dict_, mapper, connection,
                 has_identity, instance_key, row_switch)
             )
         else:
             states_to_update.append(
-                (state, dict_, mapper, connection, 
+                (state, dict_, mapper, connection,
                 has_identity, instance_key, row_switch)
             )
 
     return states_to_insert, states_to_update
 
-def _organize_states_for_post_update(base_mapper, states, 
+def _organize_states_for_post_update(base_mapper, states,
                                                 uowtransaction):
     """Make an initial pass across a set of states for UPDATE
     corresponding to post_update.
 
-    This includes obtaining key information for each state 
-    including its dictionary, mapper, the connection to use for 
+    This includes obtaining key information for each state
+    including its dictionary, mapper, the connection to use for
     the execution per state.
 
     """
-    return list(_connections_for_states(base_mapper, uowtransaction, 
+    return list(_connections_for_states(base_mapper, uowtransaction,
                                             states))
 
 def _organize_states_for_delete(base_mapper, states, uowtransaction):
     states_to_delete = []
 
     for state, dict_, mapper, connection in _connections_for_states(
-                                            base_mapper, uowtransaction, 
+                                            base_mapper, uowtransaction,
                                             states):
 
         mapper.dispatch.before_delete(mapper, connection, state)
 
-        states_to_delete.append((state, dict_, mapper, 
+        states_to_delete.append((state, dict_, mapper,
                 bool(state.key), connection))
     return states_to_delete
 
-def _collect_insert_commands(base_mapper, uowtransaction, table, 
+def _collect_insert_commands(base_mapper, uowtransaction, table,
                                                 states_to_insert):
     """Identify sets of values to use in INSERT statements for a
     list of states.
             if col is mapper.version_id_col:
                 params[col.key] = mapper.version_id_generator(None)
             else:
-                # pull straight from the dict for 
+                # pull straight from the dict for
                 # pending objects
                 prop = mapper._columntoproperty[col]
                 value = state_dict.get(prop.key, None)
                 else:
                     params[col.key] = value
 
-        insert.append((state, state_dict, params, mapper, 
+        insert.append((state, state_dict, params, mapper,
                         connection, value_params, has_all_pks))
     return insert
 
-def _collect_update_commands(base_mapper, uowtransaction, 
+def _collect_update_commands(base_mapper, uowtransaction,
                                 table, states_to_update):
     """Identify sets of values to use in UPDATE statements for a
     list of states.
             if col is mapper.version_id_col:
                 params[col._label] = \
                     mapper._get_committed_state_attr_by_column(
-                                    row_switch or state, 
-                                    row_switch and row_switch.dict 
+                                    row_switch or state,
+                                    row_switch and row_switch.dict
                                                 or state_dict,
                                     col)
 
                 prop = mapper._columntoproperty[col]
                 history = attributes.get_state_history(
-                    state, prop.key, 
+                    state, prop.key,
                     attributes.PASSIVE_NO_INITIALIZE
                 )
                 if history.added:
                     params[col.key] = mapper.version_id_generator(
                                                 params[col._label])
 
-                    # HACK: check for history, in case the 
+                    # HACK: check for history, in case the
                     # history is only
-                    # in a different table than the one 
+                    # in a different table than the one
                     # where the version_id_col is.
                     for prop in mapper._columntoproperty.itervalues():
                         history = attributes.get_state_history(
-                                state, prop.key, 
+                                state, prop.key,
                                 attributes.PASSIVE_NO_INITIALIZE)
                         if history.added:
                             hasdata = True
             else:
                 prop = mapper._columntoproperty[col]
                 history = attributes.get_state_history(
-                                state, prop.key, 
+                                state, prop.key,
                                 attributes.PASSIVE_NO_INITIALIZE)
                 if history.added:
                     if isinstance(history.added[0],
                                 value = history.added[0]
                                 params[col._label] = value
                             else:
-                                # use the old value to 
+                                # use the old value to
                                 # locate the row
                                 value = history.deleted[0]
                                 params[col._label] = value
                             "Can't update table "
                             "using NULL for primary "
                             "key value")
-            update.append((state, state_dict, params, mapper, 
+            update.append((state, state_dict, params, mapper,
                             connection, value_params))
     return update
 
 
-def _collect_post_update_commands(base_mapper, uowtransaction, table, 
+def _collect_post_update_commands(base_mapper, uowtransaction, table,
                         states_to_update, post_update_cols):
     """Identify sets of values to use in UPDATE statements for a
     list of states within a post_update operation.
             elif col in post_update_cols:
                 prop = mapper._columntoproperty[col]
                 history = attributes.get_state_history(
-                            state, prop.key, 
+                            state, prop.key,
                             attributes.PASSIVE_NO_INITIALIZE)
                 if history.added:
                     value = history.added[0]
                     params[col.key] = value
                     hasdata = True
         if hasdata:
-            update.append((state, state_dict, params, mapper, 
+            update.append((state, state_dict, params, mapper,
                             connection))
     return update
 
-def _collect_delete_commands(base_mapper, uowtransaction, table, 
+def _collect_delete_commands(base_mapper, uowtransaction, table,
                                 states_to_delete):
-    """Identify values to use in DELETE statements for a list of 
+    """Identify values to use in DELETE statements for a list of
     states to be deleted."""
 
     delete = util.defaultdict(list)
     return delete
 
 
-def _emit_update_statements(base_mapper, uowtransaction, 
+def _emit_update_statements(base_mapper, uowtransaction,
                         cached_connections, mapper, table, update):
     """Emit UPDATE statements corresponding to value lists collected
     by _collect_update_commands()."""
 
         _postfetch(
                 mapper,
-                uowtransaction, 
-                table, 
-                state, 
-                state_dict, 
-                c.context.prefetch_cols, 
+                uowtransaction,
+                table,
+                state,
+                state_dict,
+                c.context.prefetch_cols,
                 c.context.postfetch_cols,
-                c.context.compiled_parameters[0], 
+                c.context.compiled_parameters[0],
                 value_params)
         rows += c.rowcount
 
 
     elif needs_version_id:
         util.warn("Dialect %s does not support updated rowcount "
-                "- versioning cannot be verified." % 
+                "- versioning cannot be verified." %
                 c.dialect.dialect_description,
                 stacklevel=12)
 
-def _emit_insert_statements(base_mapper, uowtransaction, 
+def _emit_insert_statements(base_mapper, uowtransaction,
                         cached_connections, table, insert):
     """Emit INSERT statements corresponding to value lists collected
     by _collect_insert_commands()."""
     statement = base_mapper._memo(('insert', table), table.insert)
 
     for (connection, pkeys, hasvalue, has_all_pks), \
-        records in groupby(insert, 
-                            lambda rec: (rec[4], 
-                                    rec[2].keys(), 
-                                    bool(rec[5]), 
+        records in groupby(insert,
+                            lambda rec: (rec[4],
+                                    rec[2].keys(),
+                                    bool(rec[5]),
                                     rec[6])
     ):
         if has_all_pks and not hasvalue:
             c = cached_connections[connection].\
                                 execute(statement, multiparams)
 
-            for (state, state_dict, params, mapper, 
+            for (state, state_dict, params, mapper,
                     conn, value_params, has_all_pks), \
                     last_inserted_params in \
                     zip(records, c.context.compiled_parameters):
                 _postfetch(
                         mapper,
-                        uowtransaction, 
+                        uowtransaction,
                         table,
-                        state, 
+                        state,
                         state_dict,
                         c.context.prefetch_cols,
                         c.context.postfetch_cols,
-                        last_inserted_params, 
+                        last_inserted_params,
                         value_params)
 
         else:
 
                 if primary_key is not None:
                     # set primary key attributes
-                    for pk, col in zip(primary_key, 
+                    for pk, col in zip(primary_key,
                                     mapper._pks_by_table[table]):
                         prop = mapper._columntoproperty[col]
                         if state_dict.get(prop.key) is None:
                             # TODO: would rather say:
                             #state_dict[prop.key] = pk
                             mapper._set_state_attr_by_column(
-                                        state, 
-                                        state_dict, 
+                                        state,
+                                        state_dict,
                                         col, pk)
 
                 _postfetch(
                         mapper,
-                        uowtransaction, 
-                        table, 
-                        state, 
+                        uowtransaction,
+                        table,
+                        state,
                         state_dict,
-                        result.context.prefetch_cols, 
+                        result.context.prefetch_cols,
                         result.context.postfetch_cols,
-                        result.context.compiled_parameters[0], 
+                        result.context.compiled_parameters[0],
                         value_params)
 
 
 
-def _emit_post_update_statements(base_mapper, uowtransaction, 
+def _emit_post_update_statements(base_mapper, uowtransaction,
                             cached_connections, mapper, table, update):
     """Emit UPDATE statements corresponding to value lists collected
     by _collect_post_update_commands()."""
 
     # execute each UPDATE in the order according to the original
     # list of states to guarantee row access order, but
-    # also group them into common (connection, cols) sets 
+    # also group them into common (connection, cols) sets
     # to support executemany().
     for key, grouper in groupby(
         update, lambda rec: (rec[4], rec[2].keys())
     ):
         connection = key[0]
-        multiparams = [params for state, state_dict, 
+        multiparams = [params for state, state_dict,
                                 params, mapper, conn in grouper]
         cached_connections[connection].\
                             execute(statement, multiparams)
 
 
-def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, 
+def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
                                     mapper, table, delete):
     """Emit DELETE statements corresponding to value lists collected
     by _collect_delete_commands()."""
 
         if need_version_id:
             clause.clauses.append(
-                mapper.version_id_col == 
+                mapper.version_id_col ==
                 sql.bindparam(
-                        mapper.version_id_col.key, 
+                        mapper.version_id_col.key,
                         type_=mapper.version_id_col.type
                 )
             )
                 if rows != len(del_objects):
                     raise orm_exc.StaleDataError(
                         "DELETE statement on table '%s' expected to "
-                        "delete %d row(s); %d were matched." % 
+                        "delete %d row(s); %d were matched." %
                         (table.description, len(del_objects), c.rowcount)
                     )
             else:
                 util.warn(
                     "Dialect %s does not support deleted rowcount "
-                    "- versioning cannot be verified." % 
+                    "- versioning cannot be verified." %
                     connection.dialect.dialect_description,
                     stacklevel=12)
                 connection.execute(statement, del_objects)
             connection.execute(statement, del_objects)
 
 
-def _finalize_insert_update_commands(base_mapper, uowtransaction, 
+def _finalize_insert_update_commands(base_mapper, uowtransaction,
                             states_to_insert, states_to_update):
     """finalize state on states that have been inserted or updated,
     including calling after_insert/after_update events.
 
         if mapper._readonly_props:
             readonly = state.unmodified_intersection(
-                [p.key for p in mapper._readonly_props 
+                [p.key for p in mapper._readonly_props
                     if p.expire_on_flush or p.key not in state.dict]
             )
             if readonly:
-                state.expire_attributes(state.dict, readonly)
+                state._expire_attributes(state.dict, readonly)
 
         # if eager_defaults option is enabled,
         # refresh whatever has been expired.
         else:
             mapper.dispatch.after_update(mapper, connection, state)
 
-def _postfetch(mapper, uowtransaction, table, 
+def _postfetch(mapper, uowtransaction, table,
                 state, dict_, prefetch_cols, postfetch_cols,
                             params, value_params):
     """Expire attributes in need of newly persisted database state,
             mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
 
     if postfetch_cols:
-        state.expire_attributes(state.dict, 
-                            [mapper._columntoproperty[c].key 
-                            for c in postfetch_cols if c in 
+        state._expire_attributes(state.dict,
+                            [mapper._columntoproperty[c].key
+                            for c in postfetch_cols if c in
                             mapper._columntoproperty]
                         )
 
     # TODO: this still goes a little too often.  would be nice to
     # have definitive list of "columns that changed" here
     for m, equated_pairs in mapper._table_to_equated[table]:
-        sync.populate(state, m, state, m, 
-                                        equated_pairs, 
+        sync.populate(state, m, state, m,
+                                        equated_pairs,
                                         uowtransaction,
                                         mapper.passive_updates)
 
 
     """
     # if session has a connection callable,
-    # organize individual states with the connection 
+    # organize individual states with the connection
     # to use for update
     if uowtransaction.session.connection_callable:
         connection_callable = \
         except KeyError:
             raise sa_exc.ArgumentError(
                             "Valid strategies for session synchronization "
-                            "are %s" % (", ".join(sorted(repr(x) 
+                            "are %s" % (", ".join(sorted(repr(x)
                                 for x in lookup.keys()))))
         else:
             return klass(*arg)
         }, synchronize_session, query, values)
 
     def _do_exec(self):
-        update_stmt = sql.update(self.primary_table, 
+        update_stmt = sql.update(self.primary_table,
                             self.context.whereclause, self.values)
 
         self.result = self.query.session.execute(
 
     def _do_post(self):
         session = self.query.session
-        session.dispatch.after_bulk_update(session, self.query, 
+        session.dispatch.after_bulk_update(session, self.query,
                                 self.context, self.result)
 
 class BulkDelete(BulkUD):
         }, synchronize_session, query)
 
     def _do_exec(self):
-        delete_stmt = sql.delete(self.primary_table, 
+        delete_stmt = sql.delete(self.primary_table,
                                     self.context.whereclause)
 
-        self.result = self.query.session.execute(delete_stmt, 
+        self.result = self.query.session.execute(delete_stmt,
                                     params=self.query._params)
         self.rowcount = self.result.rowcount
 
     def _do_post(self):
         session = self.query.session
-        session.dispatch.after_bulk_delete(session, self.query, 
+        session.dispatch.after_bulk_delete(session, self.query,
                         self.context, self.result)
 
 class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
-    """BulkUD which handles UPDATEs using the "evaluate" 
+    """BulkUD which handles UPDATEs using the "evaluate"
     method of session resolution."""
 
     def _additional_evaluators(self,evaluator_compiler):
             for key in to_evaluate:
                 dict_[key] = self.value_evaluators[key](obj)
 
-            state.commit(dict_, list(to_evaluate))
+            state._commit(dict_, list(to_evaluate))
 
-            # expire attributes with pending changes 
+            # expire attributes with pending changes
             # (there was no autoflush, so they are overwritten)
-            state.expire_attributes(dict_,
+            state._expire_attributes(dict_,
                             set(evaluated_keys).
                                 difference(to_evaluate))
             states.add(state)
         session._register_altered(states)
 
 class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
-    """BulkUD which handles DELETEs using the "evaluate" 
+    """BulkUD which handles DELETEs using the "evaluate"
     method of session resolution."""
 
     def _do_post_synchronize(self):
         self.query.session._remove_newly_deleted(
-                [attributes.instance_state(obj) 
+                [attributes.instance_state(obj)
                     for obj in self.matched_objects])
 
 class BulkUpdateFetch(BulkFetch, BulkUpdate):
-    """BulkUD which handles UPDATEs using the "fetch" 
+    """BulkUD which handles UPDATEs using the "fetch"
     method of session resolution."""
 
     def _do_post_synchronize(self):
         session._register_altered(states)
 
 class BulkDeleteFetch(BulkFetch, BulkDelete):
-    """BulkUD which handles DELETEs using the "fetch" 
+    """BulkUD which handles DELETEs using the "fetch"
     method of session resolution."""
 
     def _do_post_synchronize(self):

lib/sqlalchemy/orm/properties.py

                 impl = dest_state.get_impl(self.key)
                 impl.set(dest_state, dest_dict, value, None)
         elif dest_state.has_identity and self.key not in dest_dict:
-            dest_state.expire_attributes(dest_dict, [self.key])
+            dest_state._expire_attributes(dest_dict, [self.key])
 
     class Comparator(PropComparator):
         @util.memoized_instancemethod

lib/sqlalchemy/orm/session.py

                  expire_on_commit=True, **kwargs):
     """Generate a custom-configured :class:`.Session` class.
 
-    The returned object is a subclass of :class:`.Session`, which, when instantiated
-    with no arguments, uses the keyword arguments configured here as its
-    constructor arguments.
+    The returned object is a subclass of :class:`.Session`, which,
+    when instantiated with no arguments, uses the keyword arguments
+    configured here as its constructor arguments.
 
-    It is intended that the :func:`.sessionmaker()` function be called within the
-    global scope of an application, and the returned class be made available
-    to the rest of the application as the single class used to instantiate
-    sessions.
+    It is intended that the :func:`.sessionmaker()` function be called
+    within the global scope of an application, and the returned class
+    be made available to the rest of the application as the single
+    class used to instantiate sessions.
 
     e.g.::
 
         Session.configure(bind=create_engine('sqlite:///foo.db'))
 
         sess = Session()
-    
+
     For options, see the constructor options for :class:`.Session`.
-    
+
     """
     kwargs['bind'] = bind
     kwargs['autoflush'] = autoflush
 
     .. versionchanged:: 0.4
         Direct usage of :class:`.SessionTransaction` is not typically
-        necessary; use the :meth:`.Session.rollback` and 
-        :meth:`.Session.commit` methods on :class:`.Session` itself to 
+        necessary; use the :meth:`.Session.rollback` and
+        :meth:`.Session.commit` methods on :class:`.Session` itself to
         control the transaction.
 
     The current instance of :class:`.SessionTransaction` for a given
     The :class:`.SessionTransaction` object is **not** thread-safe.
 
     See also:
-    
+
     :meth:`.Session.rollback`
-    
+
     :meth:`.Session.commit`
 
     :attr:`.Session.is_active`
-    
+
     :meth:`.SessionEvents.after_commit`
-    
+
     :meth:`.SessionEvents.after_rollback`
-    
+
     :meth:`.SessionEvents.after_soft_rollback`
-    
+
     .. index::
       single: thread safety; SessionTransaction
 
 
         for s in self.session.identity_map.all_states():
             if not dirty_only or s.modified or s in self._dirty:
-                s.expire(s.dict, self.session.identity_map._modified)
+                s._expire(s.dict, self.session.identity_map._modified)
 
     def _remove_snapshot(self):
         assert self._is_transaction_boundary
 
         if not self.nested and self.session.expire_on_commit:
             for s in self.session.identity_map.all_states():
-                s.expire(s.dict, self.session.identity_map._modified)
+                s._expire(s.dict, self.session.identity_map._modified)
 
     def _connection_for_bind(self, bind):
         self._assert_is_active()
         '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
         'close', 'commit', 'connection', 'delete', 'execute', 'expire',
         'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
-        'is_modified', 
-        'merge', 'query', 'refresh', 'rollback', 
+        'is_modified',
+        'merge', 'query', 'refresh', 'rollback',
         'scalar')
 
 
     def __init__(self, bind=None, autoflush=True, expire_on_commit=True,
                 _enable_transaction_accounting=True,
-                 autocommit=False, twophase=False, 
+                 autocommit=False, twophase=False,
                  weak_identity_map=True, binds=None, extension=None,
                  query_cls=query.Query):
         """Construct a new Session.
 
-        See also the :func:`.sessionmaker` function which is used to 
+        See also the :func:`.sessionmaker` function which is used to
         generate a :class:`.Session`-producing callable with a given
         set of arguments.
 
           by any of these methods, the ``Session`` is ready for the next usage,
           which will again acquire and maintain a new connection/transaction.
 
-        :param autoflush: When ``True``, all query operations will issue a 
+        :param autoflush: When ``True``, all query operations will issue a
            ``flush()`` call to this ``Session`` before proceeding. This is a
            convenience feature so that ``flush()`` need not be called repeatedly
            in order for database queries to retrieve results. It's typical that
            attribute/object access subsequent to a completed transaction will load
            from the most recent database state.
 
-        :param extension: An optional 
+        :param extension: An optional
            :class:`~.SessionExtension` instance, or a list
            of such instances, which will receive pre- and post- commit and flush
            events, as well as a post-rollback event. **Deprecated.**
             be called. This allows each database to roll back the entire
             transaction, before each transaction is committed.
 
-        :param weak_identity_map:  Defaults to ``True`` - when set to 
-           ``False``, objects placed in the :class:`.Session` will be 
-           strongly referenced until explicitly removed or the 
+        :param weak_identity_map:  Defaults to ``True`` - when set to
+           ``False``, objects placed in the :class:`.Session` will be
+           strongly referenced until explicitly removed or the
            :class:`.Session` is closed.  **Deprecated** - this option
            is obsolete.
 
         transaction or nested transaction, an error is raised, unless
         ``subtransactions=True`` or ``nested=True`` is specified.
 
-        The ``subtransactions=True`` flag indicates that this :meth:`~.Session.begin` 
+        The ``subtransactions=True`` flag indicates that this :meth:`~.Session.begin`
         can create a subtransaction if a transaction is already in progress.
         For documentation on subtransactions, please see :ref:`session_subtransactions`.
 
 
         By default, the :class:`.Session` also expires all database
         loaded state on all ORM-managed attributes after transaction commit.
-        This so that subsequent operations load the most recent 
+        This so that subsequent operations load the most recent
         data from the database.   This behavior can be disabled using
         the ``expire_on_commit=False`` option to :func:`.sessionmaker` or
         the :class:`.Session` constructor.
 
         self.transaction.prepare()
 
-    def connection(self, mapper=None, clause=None, 
-                        bind=None, 
-                        close_with_result=False, 
+    def connection(self, mapper=None, clause=None,
+                        bind=None,
+                        close_with_result=False,
                         **kw):
-        """Return a :class:`.Connection` object corresponding to this 
+        """Return a :class:`.Connection` object corresponding to this
         :class:`.Session` object's transactional state.
 
         If this :class:`.Session` is configured with ``autocommit=False``,
         is returned, or if no transaction is in progress, a new one is begun
         and the :class:`.Connection` returned (note that no transactional state
         is established with the DBAPI until the first SQL statement is emitted).
-        
+
         Alternatively, if this :class:`.Session` is configured with ``autocommit=True``,
-        an ad-hoc :class:`.Connection` is returned using :meth:`.Engine.contextual_connect` 
+        an ad-hoc :class:`.Connection` is returned using :meth:`.Engine.contextual_connect`
         on the underlying :class:`.Engine`.
 
         Ambiguity in multi-bind or unbound :class:`.Session` objects can be resolved through
-        any of the optional keyword arguments.   This ultimately makes usage of the 
+        any of the optional keyword arguments.   This ultimately makes usage of the
         :meth:`.get_bind` method for resolution.
 
         :param bind:
           ``clause``.
 
         :param clause:
-            A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, 
-            :func:`~.sql.expression.text`, 
+            A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
+            :func:`~.sql.expression.text`,
             etc.) which will be used to locate a bind, if a bind
             cannot otherwise be identified.
 
         :param close_with_result: Passed to :meth:`Engine.connect`, indicating
           the :class:`.Connection` should be considered "single use", automatically
-          closing when the first result set is closed.  This flag only has 
+          closing when the first result set is closed.  This flag only has
           an effect if this :class:`.Session` is configured with ``autocommit=True``
           and does not already have a  transaction in progress.
 
         :param \**kw:
           Additional keyword arguments are sent to :meth:`get_bind()`,
-          allowing additional arguments to be passed to custom 
+          allowing additional arguments to be passed to custom
           implementations of :meth:`get_bind`.
 
         """
         if bind is None:
             bind = self.get_bind(mapper, clause=clause, **kw)
 
-        return self._connection_for_bind(bind, 
+        return self._connection_for_bind(bind,
                                         close_with_result=close_with_result)
 
     def _connection_for_bind(self, engine, **kwargs):
         set to ``True`` so that an ``autocommit=True`` :class:`.Session`
         with no active transaction will produce a result that auto-closes
         the underlying :class:`.Connection`.
-        
+
         :param clause:
-            A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, 
+            A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
             :func:`~.sql.expression.text`, etc.) or string SQL statement to be executed.  The clause
             will also be used to locate a bind, if this :class:`.Session`
             is not bound to a single engine already, and the ``mapper``
           that connection will be used.  This argument takes
           precedence over ``mapper`` and ``clause`` when locating
           a bind.
-          
+
         :param \**kw:
           Additional keyword arguments are sent to :meth:`get_bind()`,
-          allowing additional arguments to be passed to custom 
+          allowing additional arguments to be passed to custom
           implementations of :meth:`get_bind`.
 
         """
 
     def get_bind(self, mapper=None, clause=None):
         """Return a "bind" to which this :class:`.Session` is bound.
-        
-        The "bind" is usually an instance of :class:`.Engine`, 
+
+        The "bind" is usually an instance of :class:`.Engine`,
         except in the case where the :class:`.Session` has been
         explicitly bound directly to a :class:`.Connection`.
 
-        For a multiply-bound or unbound :class:`.Session`, the 
-        ``mapper`` or ``clause`` arguments are used to determine the 
+        For a multiply-bound or unbound :class:`.Session`, the
+        ``mapper`` or ``clause`` arguments are used to determine the
         appropriate bind to return.
-        
+
         Note that the "mapper" argument is usually present
         when :meth:`.Session.get_bind` is called via an ORM
-        operation such as a :meth:`.Session.query`, each 
-        individual INSERT/UPDATE/DELETE operation within a 
+        operation such as a :meth:`.Session.query`, each
+        individual INSERT/UPDATE/DELETE operation within a
         :meth:`.Session.flush`, call, etc.
-        
+
         The order of resolution is:
-        
+
         1. if mapper given and session.binds is present,
            locate a bind based on mapper.
         2. if clause given and session.binds is present,
            locate a bind based on :class:`.Table` objects
            found in the given clause present in session.binds.
         3. if session.bind is present, return that.
-        4. if clause given, attempt to return a bind 
+        4. if clause given, attempt to return a bind
            linked to the :class:`.MetaData` ultimately
            associated with the clause.
         5. if mapper given, attempt to return a bind
-           linked to the :class:`.MetaData` ultimately 
+           linked to the :class:`.MetaData` ultimately
            associated with the :class:`.Table` or other
            selectable to which the mapper is mapped.
         6. No bind can be found, :class:`.UnboundExecutionError`
            is raised.
-         
+
         :param mapper:
           Optional :func:`.mapper` mapped class or instance of
           :class:`.Mapper`.   The bind can be derived from a :class:`.Mapper`
           is mapped for a bind.
 
         :param clause:
-            A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, 
-            :func:`~.sql.expression.text`, 
+            A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`,
+            :func:`~.sql.expression.text`,
             etc.).  If the ``mapper`` argument is not present or could not produce
             a bind, the given expression construct will be searched for a bound
-            element, typically a :class:`.Table` associated with bound 
+            element, typically a :class:`.Table` associated with bound
             :class:`.MetaData`.
 
         """
     @util.contextmanager
     def no_autoflush(self):
         """Return a context manager that disables autoflush.
-        
+
         e.g.::
-        
+
             with session.no_autoflush:
-                
+
                 some_object = SomeClass()
                 session.add(some_object)
                 # won't autoflush
                 some_object.related_thing = session.query(SomeRelated).first()
-        
+
         Operations that proceed within the ``with:`` block
         will not be subject to flushes occurring upon query
         access.  This is useful when initializing a series
         of objects which involve existing database queries,
         where the uncompleted object should not yet be flushed.
-        
+
         .. versionadded:: 0.7.6
 
         """
         mode is turned on.
 
         :param attribute_names: optional.  An iterable collection of
-          string attribute names indicating a subset of attributes to 
+          string attribute names indicating a subset of attributes to
           be refreshed.
 
-        :param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query` 
+        :param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query`
           as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`.
 
         """
     def expire_all(self):
         """Expires all persistent instances within this Session.
 
-        When any attributes on a persistent instance is next accessed, 
+        When any attributes on a persistent instance is next accessed,
         a query will be issued using the
         :class:`.Session` object's current transactional context in order to
         load all expired attributes for the given instance.   Note that
-        a highly isolated transaction will return the same values as were 
+        a highly isolated transaction will return the same values as were
         previously read in that same transaction, regardless of changes
         in database state outside of that transaction.
 
-        To expire individual objects and individual attributes 
+        To expire individual objects and individual attributes
         on those objects, use :meth:`Session.expire`.
 
-        The :class:`.Session` object's default behavior is to 
+        The :class:`.Session` object's default behavior is to
         expire all state whenever the :meth:`Session.rollback`
         or :meth:`Session.commit` methods are called, so that new
         state can be loaded for the new transaction.   For this reason,
-        calling :meth:`Session.expire_all` should not be needed when 
+        calling :meth:`Session.expire_all` should not be needed when
         autocommit is ``False``, assuming the transaction is isolated.
 
         """
         for state in self.identity_map.all_states():
-            state.expire(state.dict, self.identity_map._modified)
+            state._expire(state.dict, self.identity_map._modified)
 
     def expire(self, instance, attribute_names=None):
         """Expire the attributes on an instance.
         attribute is next accessed, a query will be issued to the
         :class:`.Session` object's current transactional context in order to
         load all expired attributes for the given instance.   Note that
-        a highly isolated transaction will return the same values as were 
+        a highly isolated transaction will return the same values as were
         previously read in that same transaction, regardless of changes
         in database state outside of that transaction.
 
         To expire all objects in the :class:`.Session` simultaneously,
         use :meth:`Session.expire_all`.
 
-        The :class:`.Session` object's default behavior is to 
+        The :class:`.Session` object's default behavior is to
         expire all state whenever the :meth:`Session.rollback`
         or :meth:`Session.commit` methods are called, so that new
         state can be loaded for the new transaction.   For this reason,
     def _expire_state(self, state, attribute_names):
         self._validate_persistent(state)
         if attribute_names:
-            state.expire_attributes(state.dict, attribute_names)
+            state._expire_attributes(state.dict, attribute_names)
         else:
             # pre-fetch the full cascade since the expire is going to
             # remove associations
         """Expire a state if persistent, else expunge if pending"""
 
         if state.key:
-            state.expire(state.dict, self.identity_map._modified)
+            state._expire(state.dict, self.identity_map._modified)
         elif state in self._new:
             self._new.pop(state)
             state._detach()
                 if state.key is None:
                     state.key = instance_key
                 elif state.key != instance_key:
-                    # primary key switch. use discard() in case another 
-                    # state has already replaced this one in the identity 
+                    # primary key switch. use discard() in case another
+                    # state has already replaced this one in the identity
                     # map (see test/orm/test_naturalpks.py ReversePKsTest)
                     self.identity_map.discard(state)
                     state.key = instance_key
 
                 self.identity_map.replace(state)
 
-        statelib.InstanceState.commit_all_states(
+        statelib.InstanceState._commit_all_states(
             ((state, state.dict) for state in states),
             self.identity_map
         )
 
         mapper = _state_mapper(state)
         for o, m, st_, dct_ in mapper.cascade_iterator(
-                                    'save-update', 
-                                    state, 
+                                    'save-update',
+                                    state,
                                     halt_on=self._contains_state):
             self._save_or_update_impl(st_)
 
         if state in self._deleted:
             return
 
-        # ensure object is attached to allow the 
+        # ensure object is attached to allow the
         # cascade operation to load deferred attributes
         # and collections
         self._attach(state, include_before=True)
         try:
             self.autoflush = False
             return self._merge(
-                            attributes.instance_state(instance), 
-                            attributes.instance_dict(instance), 
+                            attributes.instance_state(instance),
+                            attributes.instance_dict(instance),
                             load=load, _recursive=_recursive)
         finally:
             self.autoflush = autoflush
             new_instance = True
 
         elif not _none_set.issubset(key[1]) or \
-                    (mapper.allow_partial_pks and 
+                    (mapper.allow_partial_pks and
                     not _none_set.issuperset(key[1])):
             merged = self.query(mapper.class_).get(key[1])
         else:
             # version check if applicable
             if mapper.version_id_col is not None:
                 existing_version = mapper._get_state_attr_by_column(
-                            state, 
-                            state_dict, 
+                            state,
+                            state_dict,
                             mapper.version_id_col,
                             passive=attributes.PASSIVE_NO_INITIALIZE)
 
                 merged_version = mapper._get_state_attr_by_column(
-                            merged_state, 
-                            merged_dict, 
+                            merged_state,
+                            merged_dict,
                             mapper.version_id_col,
                             passive=attributes.PASSIVE_NO_INITIALIZE)
 
             merged_state.load_options = state.load_options
 
             for prop in mapper.iterate_properties:
-                prop.merge(self, state, state_dict, 
-                                merged_state, merged_dict, 
+                prop.merge(self, state, state_dict,
+                                merged_state, merged_dict,
                                 load, _recursive)
 
         if not load:
             # remove any history
-            merged_state.commit_all(merged_dict, self.identity_map)
+            merged_state._commit_all(merged_dict, self.identity_map)
 
         if new_instance:
             merged_state.manager.dispatch.load(merged_state, None)
         ''not'' participate in any persistence operations; its state
         for almost all purposes will remain either "transient" or
         "detached", except for the case of relationship loading.
-        
+
         Also note that backrefs will often not work as expected.
         Altering a relationship-bound attribute on the target object
         may not fire off a backref event, if the effective value
         is what was already loaded from a foreign-key-holding value.
-        
+
         The :meth:`.Session.enable_relationship_loading` method supersedes
         the ``load_on_pending`` flag on :func:`.relationship`.   Unlike
         that flag, :meth:`.Session.enable_relationship_loading` allows
         an object to remain transient while still being able to load
-        related items.   
-        
+        related items.
+
         To make a transient object associated with a :class:`.Session`
         via :meth:`.Session.enable_relationship_loading` pending, add
         it to the :class:`.Session` using :meth:`.Session.add` normally.
         is not intended for general use.
 
         .. versionadded:: 0.8
-        
+
         """
         state = attributes.instance_state(obj)
         self._attach(state, include_before=True)
 
         Database operations will be issued in the current transactional
         context and do not affect the state of the transaction, unless an
-        error occurs, in which case the entire transaction is rolled back. 
+        error occurs, in which case the entire transaction is rolled back.
         You may flush() as often as you like within a transaction to move
         changes from Python to the database's transaction buffer.
 
         will create a transaction on the fly that surrounds the entire set of
         operations int the flush.
 
-        :param objects: Optional; restricts the flush operation to operate 
+        :param objects: Optional; restricts the flush operation to operate
           only on elements that are in the given collection.
-          
+
           This feature is for an extremely narrow set of use cases where
-          particular objects may need to be operated upon before the 
+          particular objects may need to be operated upon before the
           full flush() occurs.  It is not intended for general use.
 
         """
             raise
 
 
-    def is_modified(self, instance, include_collections=True, 
+    def is_modified(self, instance, include_collections=True,
                             passive=True):
-        """Return ``True`` if the given instance has locally 
+        """Return ``True`` if the given instance has locally
         modified attributes.
 
         This method retrieves the history for each instrumented
         attribute on the instance and performs a comparison of the current
         value to its previously committed value, if any.
-        
+
         It is in effect a more expensive and accurate
-        version of checking for the given instance in the 
-        :attr:`.Session.dirty` collection; a full test for 
+        version of checking for the given instance in the
+        :attr:`.Session.dirty` collection; a full test for
         each attribute's net "dirty" status is performed.
-        
+
         E.g.::
-        
+
             return session.is_modified(someobject)
 
         .. versionchanged:: 0.8
-            When using SQLAlchemy 0.7 and earlier, the ``passive`` 
+            When using SQLAlchemy 0.7 and earlier, the ``passive``
             flag should **always** be explicitly set to ``True``,
-            else SQL loads/autoflushes may proceed which can affect 
+            else SQL loads/autoflushes may proceed which can affect
             the modified state itself:
             ``session.is_modified(someobject, passive=True)``\ .
-            In 0.8 and above, the behavior is corrected and 
+            In 0.8 and above, the behavior is corrected and
             this flag is ignored.
 
         A few caveats to this method apply:
 
-        * Instances present in the :attr:`.Session.dirty` collection may report 
-          ``False`` when tested with this method.  This is because 
+        * Instances present in the :attr:`.Session.dirty` collection may report
+          ``False`` when tested with this method.  This is because
           the object may have received change events via attribute
-          mutation, thus placing it in :attr:`.Session.dirty`, 
+          mutation, thus placing it in :attr:`.Session.dirty`,
           but ultimately the state is the same as that loaded from
           the database, resulting in no net change here.
         * Scalar attributes may not have recorded the previously set
           it skips the expense of a SQL call if the old value isn't present,
           based on the assumption that an UPDATE of the scalar value is
           usually needed, and in those few cases where it isn't, is less
-          expensive on average than issuing a defensive SELECT. 
+          expensive on average than issuing a defensive SELECT.
 
           The "old" value is fetched unconditionally upon set only if the attribute
           container has the ``active_history`` flag set to ``True``. This flag
           is set typically for primary key attributes and scalar object references
-          that are not a simple many-to-one.  To set this flag for 
+          that are not a simple many-to-one.  To set this flag for
           any arbitrary mapped column, use the ``active_history`` argument
           with :func:`.column_property`.
-          
+
         :param instance: mapped instance to be tested for pending changes.
         :param include_collections: Indicates if multivalued collections should be
          included in the operation.  Setting this to ``False`` is a way to detect
         for attr in state.manager.attributes:
             if \
                 (
-                    not include_collections and 
+                    not include_collections and
                     hasattr(attr.impl, 'get_collection')
                 ) or not hasattr(attr.impl, 'get_history'):
                 continue
 
             (added, unchanged, deleted) = \
-                    attr.impl.get_history(state, dict_, 
+                    attr.impl.get_history(state, dict_,
                             passive=attributes.NO_CHANGE)
 
             if added or deleted:
     @property
     def is_active(self):
         """True if this :class:`.Session` has an active transaction.
-        
+
         This indicates if the :class:`.Session` is capable of emitting
         SQL, as from the :meth:`.Session.execute`, :meth:`.Session.query`,
-        or :meth:`.Session.flush` methods.   If False, it indicates 
+        or :meth:`.Session.flush` methods.   If False, it indicates
         that the innermost transaction has been rolled back, but enclosing
         :class:`.SessionTransaction` objects remain in the transactional
         stack, which also must be rolled back.
-        
+
         This flag is generally only useful with a :class:`.Session`
         configured in its default mode of ``autocommit=False``.
 
 
     identity_map = None
     """A mapping of object identities to objects themselves.
-    
+
     Iterating through ``Session.identity_map.values()`` provides
-    access to the full set of persistent objects (i.e., those 
+    access to the full set of persistent objects (i.e., those
     that have row identity) currently in the session.
-    
+
     See also:
-    
+
     :func:`.identity_key` - operations involving identity keys.
-    
+
     """
 
     @property
     @property
     def dirty(self):
         """The set of all persistent instances considered dirty.
-        
+
         E.g.::
-        
+
             some_mapped_object in session.dirty
 
         Instances are considered dirty when they were modified but not
 def make_transient(instance):
     """Make the given instance 'transient'.
 
-    This will remove its association with any 
+    This will remove its association with any
     session and additionally will remove its "identity key",
     such that it's as though the object were newly constructed,
     except retaining its values.   It also resets the
     had been explicitly deleted by its session.
 
     Attributes which were "expired" or deferred at the
-    instance level are reverted to undefined, and 
+    instance level are reverted to undefined, and
     will not trigger any loads.
 
     """
     if s:
         s._expunge_state(state)
 
-    # remove expired state and 
+    # remove expired state and
     # deferred callables
     state.callables.clear()
     if state.key:

lib/sqlalchemy/orm/state.py

         else:
             return {}
 
-    def initialize_instance(*mixed, **kwargs):
+    def _initialize_instance(*mixed, **kwargs):
         self, instance, args = mixed[0], mixed[1], mixed[2:]
         manager = self.manager
 
         manager.setup_instance(inst, self)
         manager.dispatch.unpickle(self, state)
 
-    def initialize(self, key):
+    def _initialize(self, key):
         """Set this attribute to an empty value or collection,
            based on the AttributeImpl in use."""
 
         self.manager.get_impl(key).initialize(self, self.dict)
 
-    def reset(self, dict_, key):
+    def _reset(self, dict_, key):
         """Remove the given attribute and any
            callables associated with it."""
 
         dict_.pop(key, None)
         self.callables.pop(key, None)
 
-    def expire_attribute_pre_commit(self, dict_, key):
+    def _expire_attribute_pre_commit(self, dict_, key):
         """a fast expire that can be called by column loaders during a load.
 
         The additional bookkeeping is finished up in commit_all().
         dict_.pop(key, None)
         self.callables[key] = self
 
-    def set_callable(self, dict_, key, callable_):
+    def _set_callable(self, dict_, key, callable_):
         """Remove the given attribute and set the given callable
            as a loader."""
 
         dict_.pop(key, None)
         self.callables[key] = callable_
 
-    def expire(self, dict_, modified_set):
+    def _expire(self, dict_, modified_set):
         self.expired = True
         if self.modified:
             modified_set.discard(self)
 
         self.manager.dispatch.expire(self, None)
 
-    def expire_attributes(self, dict_, attribute_names):
+    def _expire_attributes(self, dict_, attribute_names):
         pending = self.__dict__.get('_pending_mutations', None)
 
         for key in attribute_names:
                         ))
             self.modified = True
 
-    def commit(self, dict_, keys):
+    def _commit(self, dict_, keys):
         """Commit attributes.
 
         This is used by a partial-attribute load operation to mark committed
                             intersection(dict_):
             del self.callables[key]
 
-    def commit_all(self, dict_, instance_dict=None):
+    def _commit_all(self, dict_, instance_dict=None):
         """commit all attributes unconditionally.
 
         This is used after a flush() or a full load/refresh
         "expired" after this step if a value was not populated in state.dict.
 
         """
-        self.commit_all_states([(self, dict_)], instance_dict)
+        self._commit_all_states([(self, dict_)], instance_dict)
 
     @classmethod
-    def commit_all_states(self, iter, instance_dict=None):
+    def _commit_all_states(self, iter, instance_dict=None):
         """Mass version of commit_all()."""
 
         for state, dict_ in iter:

lib/sqlalchemy/orm/strategies.py

                 return fetch_col, None, None
         else:
             def expire_for_non_present_col(state, dict_, row):
-                state.expire_attribute_pre_commit(dict_, key)
+                state._expire_attribute_pre_commit(dict_, key)
             return expire_for_non_present_col, None, None
 
 log.class_logger(ColumnLoader)
 
         elif not self.is_class_level:
             def set_deferred_for_local_state(state, dict_, row):
-                state.set_callable(dict_, key, LoadDeferredColumns(state, key))
+                state._set_callable(dict_, key, LoadDeferredColumns(state, key))
             return set_deferred_for_local_state, None, None
         else:
             def reset_col_for_deferred(state, dict_, row):
                 # reset state on the key so that deferred callables
                 # fire off on next access.
-                state.reset(dict_, key)
+                state._reset(dict_, key)
             return reset_col_for_deferred, None, None
 
     def init_class_attribute(self, mapper):
 
     def create_row_processor(self, context, path, mapper, row, adapter):
         def invoke_no_load(state, dict_, row):
-            state.initialize(self.key)
+            state._initialize(self.key)
         return invoke_no_load, None, None
 
 log.class_logger(NoLoader)
                 # "lazyload" option on a "no load"
                 # attribute - "eager" attributes always have a
                 # class-level lazyloader installed.
-                state.set_callable(dict_, key, LoadLazyAttribute(state, key))
+                state._set_callable(dict_, key, LoadLazyAttribute(state, key))
             return set_lazy_callable, None, None
         else:
             def reset_for_lazy_callable(state, dict_, row):
                 # this is needed in
                 # populate_existing() types of scenarios to reset
                 # any existing state.
-                state.reset(dict_, key)
+                state._reset(dict_, key)
 
             return reset_for_lazy_callable, None, None
 

test/ext/test_extendedattr.py

             u.email_address = 'lala@123.com'
 
             self.assert_(u.user_id == 7 and u.user_name == 'john' and u.email_address == 'lala@123.com')
-            attributes.instance_state(u).commit_all(attributes.instance_dict(u))
+            attributes.instance_state(u)._commit_all(attributes.instance_dict(u))
             self.assert_(u.user_id == 7 and u.user_name == 'john' and u.email_address == 'lala@123.com')
 
             u.user_name = 'heythere'
                 assert Foo in instrumentation._instrumentation_factory._state_finders
 
             f = Foo()
-            attributes.instance_state(f).expire(attributes.instance_dict(f), set())
+            attributes.instance_state(f)._expire(attributes.instance_dict(f), set())
             eq_(f.a, "this is a")
             eq_(f.b, 12)
 
             f.a = "this is some new a"
-            attributes.instance_state(f).expire(attributes.instance_dict(f), set())
+            attributes.instance_state(f)._expire(attributes.instance_dict(f), set())
             eq_(f.a, "this is a")
             eq_(f.b, 12)
 
-            attributes.instance_state(f).expire(attributes.instance_dict(f), set())
+            attributes.instance_state(f)._expire(attributes.instance_dict(f), set())
             f.a = "this is another new a"
             eq_(f.a, "this is another new a")
             eq_(f.b, 12)
 
-            attributes.instance_state(f).expire(attributes.instance_dict(f), set())
+            attributes.instance_state(f)._expire(attributes.instance_dict(f), set())
             eq_(f.a, "this is a")
             eq_(f.b, 12)
 
             eq_(f.a, None)
             eq_(f.b, 12)
 
-            attributes.instance_state(f).commit_all(attributes.instance_dict(f))
+            attributes.instance_state(f)._commit_all(attributes.instance_dict(f))
             eq_(f.a, None)
             eq_(f.b, 12)
 
             f1.bars.append(b1)
             eq_(attributes.get_state_history(attributes.instance_state(f1), 'bars'), ([b1], [], []))
 
-            attributes.instance_state(f1).commit_all(attributes.instance_dict(f1))
-            attributes.instance_state(b1).commit_all(attributes.instance_dict(b1))
+            attributes.instance_state(f1)._commit_all(attributes.instance_dict(f1))
+            attributes.instance_state(b1)._commit_all(attributes.instance_dict(b1))
 
             eq_(attributes.get_state_history(attributes.instance_state(f1), 'name'), ((), ['f1'], ()))
             eq_(attributes.get_state_history(attributes.instance_state(f1), 'bars'), ((), [b1], ()))

test/orm/inheritance/test_basic.py

                                 ['counter2']) is None
 
         s1.id = 1
-        attributes.instance_state(s1).commit_all(s1.__dict__, None)
+        attributes.instance_state(s1)._commit_all(s1.__dict__, None)
         assert m._optimized_get_statement(attributes.instance_state(s1),
                                 ['counter2']) is not None
 

test/orm/test_attributes.py

         b2 = B()
 
         A.b.impl.append(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b1, None
         )
 
             "Object <B at .*?> not "
             "associated with <A at .*?> on attribute 'b'",
             A.b.impl.remove,
-                attributes.instance_state(a1), 
+                attributes.instance_state(a1),
                 attributes.instance_dict(a1), b2, None
         )
 
         b2 = B()
 
         A.b.impl.append(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b1, None
         )
 
         assert a1.b is b1
 
         A.b.impl.pop(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b2, None
         )
         assert a1.b is b1
         b1 = B()
 
         A.b.impl.append(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b1, None
         )
 
         assert a1.b is b1
 
         A.b.impl.pop(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b1, None
         )
         assert a1.b is None
         b2 = B()
 
         A.b.impl.append(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b1, None
         )
 
             ValueError,
             r"list.remove\(x\): x not in list",
             A.b.impl.remove,
-                attributes.instance_state(a1), 
+                attributes.instance_state(a1),
                 attributes.instance_dict(a1), b2, None
         )
 
         b2 = B()
 
         A.b.impl.append(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b1, None
         )
 
         assert a1.b == [b1]
 
         A.b.impl.pop(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b2, None
         )
         assert a1.b == [b1]
         b1 = B()
 
         A.b.impl.append(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b1, None
         )
 
         assert a1.b == [b1]
 
         A.b.impl.pop(
-            attributes.instance_state(a1), 
+            attributes.instance_state(a1),
             attributes.instance_dict(a1), b1, None
         )
         assert a1.b == []
         u.email_address = 'lala@123.com'
         self.assert_(u.user_id == 7 and u.user_name == 'john'
                      and u.email_address == 'lala@123.com')
-        attributes.instance_state(u).commit_all(attributes.instance_dict(u))
+        attributes.instance_state(u)._commit_all(attributes.instance_dict(u))
         self.assert_(u.user_id == 7 and u.user_name == 'john'
                      and u.email_address == 'lala@123.com')
         u.user_name = 'heythere'
 
         instrumentation.register_class(Foo)
         instrumentation.register_class(Bar)
-        attributes.register_attribute(Foo, 
-                                    'bars', 
-                                    uselist=True, 
+        attributes.register_attribute(Foo,
+                                    'bars',
+                                    uselist=True,
                                     useobject=True)
 
         assert_raises_message(
         attributes.register_attribute(Foo, 'b', uselist=False, useobject=False)
 
         f = Foo()
-        attributes.instance_state(f).expire(attributes.instance_dict(f),
+        attributes.instance_state(f)._expire(attributes.instance_dict(f),
                 set())
         eq_(f.a, 'this is a')
         eq_(f.b, 12)
         f.a = 'this is some new a'
-        attributes.instance_state(f).expire(attributes.instance_dict(f),
+        attributes.instance_state(f)._expire(attributes.instance_dict(f),
                 set())
         eq_(f.a, 'this is a')
         eq_(f.b, 12)
-        attributes.instance_state(f).expire(attributes.instance_dict(f),
+        attributes.instance_state(f)._expire(attributes.instance_dict(f),
                 set())
         f.a = 'this is another new a'
         eq_(f.a, 'this is another new a')
         eq_(f.b, 12)
-        attributes.instance_state(f).expire(attributes.instance_dict(f),
+        attributes.instance_state(f)._expire(attributes.instance_dict(f),
                 set())
         eq_(f.a, 'this is a')
         eq_(f.b, 12)
         del f.a
         eq_(f.a, None)
         eq_(f.b, 12)
-        attributes.instance_state(f).commit_all(attributes.instance_dict(f),
+        attributes.instance_state(f)._commit_all(attributes.instance_dict(f),