Commits

Mike Bayer  committed 33ed2d3

OK! let's turn this around completely. Forget making a single count across
all platforms. let's instead store callcounts for *all* observed platforms in a datafile.
Will try to get enough platforms in the file for jenkins to have meaningful results.
for platforms not in the file, it's just skiptest.

  • Participants
  • Parent commits 928e426

Comments (0)

Files changed (8)

File test/aaa_profiling/test_orm.py

 
 class MergeTest(fixtures.MappedTest):
 
-    __requires__ = 'cpython',
-
     @classmethod
     def define_tables(cls, metadata):
         Table('parent', metadata, Column('id', Integer,
         # down from 185 on this this is a small slice of a usually
         # bigger operation so using a small variance
 
-        @profiling.function_call_count(96, variance=0.10)
+        @profiling.function_call_count(variance=0.10)
         def go1():
             return sess2.merge(p1, load=False)
         p2 = go1()
 
         # third call, merge object already present. almost no calls.
 
-        @profiling.function_call_count(16, variance=0.10)
+        @profiling.function_call_count(variance=0.10)
         def go2():
             return sess2.merge(p2, load=False)
         go2()
 
-    @testing.only_on('sqlite', 'Call counts tailored to pysqlite')
     def test_merge_load(self):
         Parent = self.classes.Parent
 
         # using sqlite3 the C extension took it back up to approx. 1257
         # (py2.6)
 
-        @profiling.function_call_count(1016, variance=.10)
+        @profiling.function_call_count()
         def go():
             p2 = sess2.merge(p1)
         go()
 
         # one more time, count the SQL
 
+        def go2():
+            p2 = sess2.merge(p1)
         sess2 = sessionmaker(testing.db)()
-        self.assert_sql_count(testing.db, go, 2)
+        self.assert_sql_count(testing.db, go2, 2)
 
 class LoadManyToOneFromIdentityTest(fixtures.MappedTest):
     """test overhead associated with many-to-one fetches.
 
     """
 
-    # only need to test for unexpected variance in a large call
-    # count here,
-    # so remove some platforms that have wildly divergent
-    # callcounts.
-    __requires__ = 'python25', 'cpython'
-    __unsupported_on__ = 'postgresql+pg8000', 'mysql+pymysql'
 
     @classmethod
     def define_tables(cls, metadata):
         parents = sess.query(Parent).all()
 
 
-        @profiling.function_call_count(110761, variance=.2)
+        @profiling.function_call_count(variance=.2)
         def go():
             for p in parents:
                 p.child
         parents = sess.query(Parent).all()
         children = sess.query(Child).all()
 
-        @profiling.function_call_count(16988)
+        @profiling.function_call_count()
         def go():
             for p in parents:
                 p.child
         go()
 
 class MergeBackrefsTest(fixtures.MappedTest):
-    __only_on__ = 'sqlite'  # keep things simple
 
     @classmethod
     def define_tables(cls, metadata):
         s = Session()
         s.add_all([
             A(id=i,
-                bs=[B(id=(i * 50) + j) for j in xrange(1, 50)],
+                bs=[B(id=(i * 5) + j) for j in xrange(1, 5)],
                 c=C(id=i),
-                ds=[D(id=(i * 50) + j) for j in xrange(1, 50)]
+                ds=[D(id=(i * 5) + j) for j in xrange(1, 5)]
             )
-            for i in xrange(1, 50)
+            for i in xrange(1, 5)
         ])
         s.commit()
 
-    @profiling.function_call_count(1092497, variance=.10)
+    @profiling.function_call_count(variance=.10)
     def test_merge_pending_with_all_pks(self):
         A, B, C, D = self.classes.A, self.classes.B, \
                     self.classes.C, self.classes.D
         s = Session()
         for a in [
             A(id=i,
-                bs=[B(id=(i * 50) + j) for j in xrange(1, 50)],
+                bs=[B(id=(i * 5) + j) for j in xrange(1, 5)],
                 c=C(id=i),
-                ds=[D(id=(i * 50) + j) for j in xrange(1, 50)]
+                ds=[D(id=(i * 5) + j) for j in xrange(1, 5)]
             )
-            for i in xrange(1, 50)
+            for i in xrange(1, 5)
         ]:
             s.merge(a)
 

File test/aaa_profiling/test_pool.py

                          use_threadlocal=True)
 
 
-    @profiling.function_call_count(47, variance=.15)
+    @profiling.function_call_count()
     def test_first_connect(self):
         conn = pool.connect()
 
         conn = pool.connect()
         conn.close()
 
-        @profiling.function_call_count(17, variance=.10)
+        @profiling.function_call_count()
         def go():
             conn2 = pool.connect()
             return conn2
     def test_second_samethread_connect(self):
         conn = pool.connect()
 
-        @profiling.function_call_count(6)
+        @profiling.function_call_count()
         def go():
             return pool.connect()
         c2 = go()

File test/aaa_profiling/test_resultset.py

 
 
 class ResultSetTest(fixtures.TestBase, AssertsExecutionResults):
-    __requires__ = 'cpython', 'cextensions',
-    __only_on__ = 'sqlite'
 
     @classmethod
     def setup_class(cls):
         global t, t2, metadata
         metadata = MetaData(testing.db)
-        t = Table('table', metadata, *[Column('field%d' % fnum, String)
+        t = Table('table', metadata, *[Column('field%d' % fnum, String(50))
                   for fnum in range(NUM_FIELDS)])
         t2 = Table('table2', metadata, *[Column('field%d' % fnum,
-                   Unicode) for fnum in range(NUM_FIELDS)])
+                   Unicode(50)) for fnum in range(NUM_FIELDS)])
 
     def setup(self):
         metadata.create_all()
     def teardown(self):
         metadata.drop_all()
 
-    @profiling.function_call_count(316)
+    @profiling.function_call_count()
     def test_string(self):
         [tuple(row) for row in t.select().execute().fetchall()]
 
-    @profiling.function_call_count(316)
+    @profiling.function_call_count()
     def test_unicode(self):
         [tuple(row) for row in t2.select().execute().fetchall()]
 
     def test_contains_doesnt_compile(self):
         row = t.select().execute().first()
         c1 = Column('some column', Integer) + Column("some other column", Integer)
-        @profiling.function_call_count(9, variance=.15)
+        @profiling.function_call_count()
         def go():
             c1 in row
         go()
 
 class ExecutionTest(fixtures.TestBase):
-    __requires__ = 'cpython',
-    __only_on__ = 'sqlite'
 
     def test_minimal_connection_execute(self):
         # create an engine without any instrumentation.
         # ensure initial connect activities complete
         c.execute("select 1")
 
-        @profiling.function_call_count(40, variance=.10)
+        @profiling.function_call_count()
         def go():
             c.execute("select 1")
         go()
         # ensure initial connect activities complete
         e.execute("select 1")
 
-        @profiling.function_call_count(62, variance=.5)
+        @profiling.function_call_count()
         def go():
             e.execute("select 1")
         go()

File test/aaa_profiling/test_zoomark.py

         metadata = MetaData(engine)
         engine.connect()
 
-    @profiling.function_call_count(3896)
     def test_profile_1_create_tables(self):
         self.test_baseline_1_create_tables()
 
-    @profiling.function_call_count(4200)
+    @profiling.function_call_count()
     def test_profile_1a_populate(self):
         self.test_baseline_1a_populate()
 
-    @profiling.function_call_count(218)
+    @profiling.function_call_count()
     def test_profile_2_insert(self):
         self.test_baseline_2_insert()
 
-    @profiling.function_call_count(2700)
+    @profiling.function_call_count()
     def test_profile_3_properties(self):
         self.test_baseline_3_properties()
 
-    @profiling.function_call_count(8500)
+    @profiling.function_call_count()
     def test_profile_4_expressions(self):
         self.test_baseline_4_expressions()
 
-    @profiling.function_call_count(875, variance=0.10)
+    @profiling.function_call_count()
     def test_profile_5_aggregates(self):
         self.test_baseline_5_aggregates()
 
-    @profiling.function_call_count(1475)
+    @profiling.function_call_count()
     def test_profile_6_editing(self):
         self.test_baseline_6_editing()
 
-    @profiling.function_call_count(1970)
+    @profiling.function_call_count()
     def test_profile_7_multiview(self):
         self.test_baseline_7_multiview()
 

File test/aaa_profiling/test_zoomark_orm.py

         session = sessionmaker(engine)()
         engine.connect()
 
-    @profiling.function_call_count(5600, variance=0.25)
     def test_profile_1_create_tables(self):
         self.test_baseline_1_create_tables()
 
-    @profiling.function_call_count(5786)
+    @profiling.function_call_count()
     def test_profile_1a_populate(self):
         self.test_baseline_1a_populate()
 
-    @profiling.function_call_count(388)
+    @profiling.function_call_count()
     def test_profile_2_insert(self):
         self.test_baseline_2_insert()
 
-    @profiling.function_call_count(5702)
+    @profiling.function_call_count()
     def test_profile_3_properties(self):
         self.test_baseline_3_properties()
 
-    @profiling.function_call_count(16303)
+    @profiling.function_call_count()
     def test_profile_4_expressions(self):
         self.test_baseline_4_expressions()
 
-    @profiling.function_call_count(900)
+    @profiling.function_call_count()
     def test_profile_5_aggregates(self):
         self.test_baseline_5_aggregates()
 
-    @profiling.function_call_count(2545)
+    @profiling.function_call_count()
     def test_profile_6_editing(self):
         self.test_baseline_6_editing()
 

File test/bootstrap/noseplugin.py

     _set_table_options, base_config, db, db_label, db_url, file_config, post_configure,
     pre_configure)
 
+testing = None
+engines = None
+util = None
+
 log = logging.getLogger('nose.plugins.sqlalchemy')
 
 class NoseSQLAlchemy(Plugin):
                  "a db-default/InnoDB combo.")
         opt("--table-option", action="append", dest="tableopts", default=[],
             help="Add a dialect-specific table option, key=value")
-
+        opt("--write-profiles", action="store_true", dest="write_profiles", default=False,
+                help="Write/update profiling data.")
         global file_config
         file_config = ConfigParser.ConfigParser()
         file_config.readfp(StringIO.StringIO(base_config))
 
     def beforeTest(self, test):
         testing.resetwarnings()
+        testing.current_test = test.id()
 
     def afterTest(self, test):
         engines.testing_reaper._after_test_ctx()

File test/lib/profiles.txt

+# /Users/classic/dev/sqlalchemy/./test/lib/profiles.txt
+# This file is written out on a per-environment basis.
+# For each test in aaa_profiling, the corresponding function and 
+# environment is located within this file.  If it doesn't exist,
+# the file is altered and written out again.
+# If a callcount does exist, it is compared to what we received. 
+# assertions are raised if the counts do not match.
+# 
+# To add a new callcount test, apply the function_call_count 
+# decorator and re-run the tests - it will be added here.
+# 
+# The file is versioned so that well known platforms are available
+# for assertions.  Source control updates on local test environments
+# not already listed will create locally modified versions of the 
+# file that can be committed, or not, as well.
+
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.6_sqlite_pysqlite_nocextensions 62
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_mysql_mysqldb_cextensions 62
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_postgresql_psycopg2_cextensions 62
+test.aaa_profiling.test_compiler.CompileTest.test_insert 2.7_sqlite_pysqlite_cextensions 62
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.6_sqlite_pysqlite_nocextensions 149
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_mysql_mysqldb_cextensions 149
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_postgresql_psycopg2_cextensions 149
+test.aaa_profiling.test_compiler.CompileTest.test_select 2.7_sqlite_pysqlite_cextensions 149
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.6_sqlite_pysqlite_nocextensions 57
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_mysql_mysqldb_cextensions 57
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_postgresql_psycopg2_cextensions 57
+test.aaa_profiling.test_compiler.CompileTest.test_update 2.7_sqlite_pysqlite_cextensions 57
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.6_sqlite_pysqlite_nocextensions 117
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_mysql_mysqldb_cextensions 117
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_postgresql_psycopg2_cextensions 117
+test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause 2.7_sqlite_pysqlite_cextensions 117
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.6_sqlite_pysqlite_nocextensions 17987
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_mysql_mysqldb_cextensions 17987
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_postgresql_psycopg2_cextensions 17987
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity 2.7_sqlite_pysqlite_cextensions 17987
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.6_sqlite_pysqlite_nocextensions 114788
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_mysql_mysqldb_cextensions 122288
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_postgresql_psycopg2_cextensions 114788
+test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity 2.7_sqlite_pysqlite_cextensions 113760
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.6_sqlite_pysqlite_nocextensions 18941
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_mysql_mysqldb_cextensions 19449
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_postgresql_psycopg2_cextensions 18873
+test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks 2.7_sqlite_pysqlite_cextensions 18885
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.6_sqlite_pysqlite_nocextensions 1113
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_mysql_mysqldb_cextensions 1295
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_postgresql_psycopg2_cextensions 1154
+test.aaa_profiling.test_orm.MergeTest.test_merge_load 2.7_sqlite_pysqlite_cextensions 1103
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.6_sqlite_pysqlite_nocextensions 98,16
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_mysql_mysqldb_cextensions 98,16
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2_cextensions 98,16
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_cextensions 98,16
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.6_sqlite_pysqlite_nocextensions 67
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_mysql_mysqldb_cextensions 67
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_postgresql_psycopg2_cextensions 67
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_cextensions 67
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.6_sqlite_pysqlite_nocextensions 29
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_mysql_mysqldb_cextensions 29
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_postgresql_psycopg2_cextensions 29
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect 2.7_sqlite_pysqlite_cextensions 29
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.6_sqlite_pysqlite_nocextensions 6
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_mysql_mysqldb_cextensions 6
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_postgresql_psycopg2_cextensions 6
+test.aaa_profiling.test_pool.QueuePoolTest.test_second_samethread_connect 2.7_sqlite_pysqlite_cextensions 6
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.6_sqlite_pysqlite_nocextensions 42
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_mysql_mysqldb_cextensions 40
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_postgresql_psycopg2_cextensions 40
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute 2.7_sqlite_pysqlite_cextensions 40
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.6_sqlite_pysqlite_nocextensions 65
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_mysql_mysqldb_cextensions 63
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_postgresql_psycopg2_cextensions 63
+test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute 2.7_sqlite_pysqlite_cextensions 63
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.6_sqlite_pysqlite_nocextensions 9
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_mysql_mysqldb_cextensions 9
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_postgresql_psycopg2_cextensions 9
+test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile 2.7_sqlite_pysqlite_cextensions 9
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.6_sqlite_pysqlite_nocextensions 370
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_mysql_mysqldb_cextensions 408
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_postgresql_psycopg2_cextensions 20394
+test.aaa_profiling.test_resultset.ResultSetTest.test_string 2.7_sqlite_pysqlite_cextensions 350
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.6_sqlite_pysqlite_nocextensions 370
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_mysql_mysqldb_cextensions 408
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_postgresql_psycopg2_cextensions 20394
+test.aaa_profiling.test_resultset.ResultSetTest.test_unicode 2.7_sqlite_pysqlite_cextensions 350
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_1a_populate 2.7_postgresql_psycopg2_cextensions 4915
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_2_insert 2.7_postgresql_psycopg2_cextensions 247
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_3_properties 2.7_postgresql_psycopg2_cextensions 3093
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_4_expressions 2.7_postgresql_psycopg2_cextensions 10062
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_5_aggregates 2.7_postgresql_psycopg2_cextensions 998
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_6_editing 2.7_postgresql_psycopg2_cextensions 1654
+test.aaa_profiling.test_zoomark.ZooMarkTest.test_profile_7_multiview 2.7_postgresql_psycopg2_cextensions 2154
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_1a_populate 2.7_postgresql_psycopg2_cextensions 5842
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_2_insert 2.7_postgresql_psycopg2_cextensions 391
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_3_properties 2.7_postgresql_psycopg2_cextensions 5846
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_4_expressions 2.7_postgresql_psycopg2_cextensions 17885
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_5_aggregates 2.7_postgresql_psycopg2_cextensions 1011
+test.aaa_profiling.test_zoomark_orm.ZooMarkTest.test_profile_6_editing 2.7_postgresql_psycopg2_cextensions 2555

File test/lib/profiling.py

 import os
 import sys
 from test.lib.util import gc_collect, decorator
+from test.lib import testing
 from nose import SkipTest
 import pstats
 import time
 import collections
+from sqlalchemy import util
 try:
     import cProfile
 except ImportError:
     cProfile = None
+from sqlalchemy.util.compat import jython, pypy, win32
+
+from test.lib.requires import _has_cextensions
+_has_cext = _has_cextensions()
 
 def profiled(target=None, **target_opts):
     """Function profiling.
         return result
     return decorate
 
-def function_call_count(count=None, variance=0.05):
+
+class ProfileStatsFile(object):
+    """"Store per-platform/fn profiling results in a file.
+
+    We're still targeting Py2.5, 2.4 on 0.7 with no dependencies,
+    so no json lib :(  need to roll something silly
+
+    """
+    def __init__(self):
+        dirname, fname = os.path.split(__file__)
+        self.short_fname = "profiles.txt"
+        self.fname = os.path.join(dirname, self.short_fname)
+        self.data = collections.defaultdict(lambda: collections.defaultdict(dict))
+        self._read()
+
+
+    @util.memoized_property
+    def platform_key(self):
+
+        dbapi_key = testing.db.name + "_" + testing.db.driver
+
+        # keep it at 2.7, 3.1, 3.2, etc. for now.
+        py_version = '.'.join([str(v) for v in sys.version_info[0:2]])
+
+        platform_tokens = [py_version]
+        platform_tokens.append(dbapi_key)
+        if jython:
+            platform_tokens.append("jython")
+        if pypy:
+            platform_tokens.append("pypy")
+        if win32:
+            platform_tokens.append("win")
+        platform_tokens.append(_has_cext and "cextensions" or "nocextensions")
+        return "_".join(platform_tokens)
+
+    def has_stats(self):
+        test_key = testing.current_test
+        return test_key in self.data and self.platform_key in self.data[test_key]
+
+    def result(self, callcount, write):
+        test_key = testing.current_test
+        per_fn = self.data[test_key]
+        per_platform = per_fn[self.platform_key]
+
+        if 'counts' not in per_platform:
+            per_platform['counts'] = counts = []
+        else:
+            counts = per_platform['counts']
+
+        if 'current_count' not in per_platform:
+            per_platform['current_count'] = current_count = 0
+        else:
+            current_count = per_platform['current_count']
+
+        has_count = len(counts) > current_count
+
+        if not has_count:
+            counts.append(callcount)
+            if write:
+                self._write()
+            result = None
+        else:
+            result = per_platform['lineno'], counts[current_count]
+        per_platform['current_count'] += 1
+        return result
+
+
+    def _header(self):
+        return \
+        "# %s\n"\
+        "# This file is written out on a per-environment basis.\n"\
+        "# For each test in aaa_profiling, the corresponding function and \n"\
+        "# environment is located within this file.  If it doesn't exist,\n"\
+        "# the file is altered and written out again.\n"\
+        "# If a callcount does exist, it is compared to what we received. \n"\
+        "# assertions are raised if the counts do not match.\n"\
+        "# \n"\
+        "# To add a new callcount test, apply the function_call_count \n"\
+        "# decorator and re-run the tests - it will be added here.\n"\
+        "# \n"\
+        "# The file is versioned so that well known platforms are available\n"\
+        "# for assertions.  Source control updates on local test environments\n"\
+        "# not already listed will create locally modified versions of the \n"\
+        "# file that can be committed, or not, as well.\n\n"\
+        "" % (self.fname)
+
+    def _read(self):
+        profile_f = open(self.fname)
+        for lineno, line in enumerate(profile_f):
+            line = line.strip()
+            if not line or line.startswith("#"):
+                continue
+
+            test_key, platform_key, counts = line.split()
+            per_fn = self.data[test_key]
+            per_platform = per_fn[platform_key]
+            per_platform['counts'] = [int(count) for count in counts.split(",")]
+            per_platform['lineno'] = lineno + 1
+            per_platform['current_count'] = 0
+        profile_f.close()
+
+    def _write(self):
+        print("Writing profile file %s" % self.fname)
+        profile_f = open(self.fname, "w")
+        profile_f.write(self._header())
+        for test_key in sorted(self.data):
+
+            per_fn = self.data[test_key]
+            for platform_key in sorted(per_fn):
+                per_platform = per_fn[platform_key]
+                profile_f.write(
+                    "%s %s %s\n" % (
+                        test_key,
+                        platform_key, ",".join(str(count) for count in per_platform['counts'])
+                    )
+                )
+        profile_f.close()
+
+_profile_stats = ProfileStatsFile()
+
+from sqlalchemy.util.compat import update_wrapper
+
+def function_call_count(variance=0.05):
     """Assert a target for a test case's function call count.
 
     The main purpose of this assertion is to detect changes in
     callcounts for various functions - the actual number is not as important.
-    Therefore, to help with cross-compatibility between Python interpreter
-    platforms/versions
-    as well as whether or not C extensions are in use, the call count is
-    "adjusted" to account for various functions that don't appear in all
-    environments.   This introduces the small possibility for a significant change
-    in callcount to be missed, in the case that the callcount is local
-    to that set of skipped functions.
+    Callcounts are stored in a file keyed to Python version and OS platform
+    information.  This file is generated automatically for new tests,
+    and versioned so that unexpected changes in callcounts will be detected.
 
     """
 
-    py_version = '.'.join([str(v) for v in sys.version_info])
+    def decorate(fn):
+        def wrap(*args, **kw):
 
-    @decorator
-    def decorate(fn, *args, **kw):
-        if cProfile is None:
-            raise SkipTest("cProfile is not installed")
-        if count is None:
-            raise SkipTest("no count installed")
-        gc_collect()
+            from test.bootstrap.config import options
+            write = options.write_profiles
 
-        stats_total_calls, callcount, fn_result = _count_calls(
-            {"exclude": _exclude},
-            fn, *args, **kw
-        )
-        print("Pstats calls: %d Adjusted callcount %d  Expected %d" % (
-                stats_total_calls,
-                callcount,
-                count
-            ))
-        deviance = int(callcount * variance)
-        if abs(callcount - count) > deviance:
-            raise AssertionError(
-                "Adjusted function call count %s not within %s%% "
-                "of expected %s. (cProfile reported %s "
-                    "calls, Python version %s)" % (
-                callcount, (variance * 100),
-                count, stats_total_calls, py_version))
-        return fn_result
+            if cProfile is None:
+                raise SkipTest("cProfile is not installed")
 
+            if not _profile_stats.has_stats() and not write:
+                raise SkipTest("No profiling stats available on this "
+                            "platform for this function.  Run tests with "
+                            "--write-profiles to add statistics to %s for "
+                            "this platform." % _profile_stats.short_fname)
+
+            gc_collect()
+
+
+            timespent, load_stats, fn_result = _profile(
+                fn, *args, **kw
+            )
+            stats = load_stats()
+            callcount = stats.total_calls
+
+            expected = _profile_stats.result(callcount, write)
+            if expected is None:
+                expected_count = None
+            else:
+                line_no, expected_count = expected
+
+            print("Pstats calls: %d Expected %s" % (
+                    callcount,
+                    expected_count
+                )
+            )
+            stats.print_stats()
+
+            if expected_count:
+                deviance = int(callcount * variance)
+                if abs(callcount - expected_count) > deviance:
+                    raise AssertionError(
+                        "Adjusted function call count %s not within %s%% "
+                        "of expected %s. (Delete line %d of file %s to regenerate "
+                            "this callcount, when tests are run with --write-profiles.)"
+                        % (
+                        callcount, (variance * 100),
+                        expected_count, line_no,
+                        _profile_stats.fname))
+            return fn_result
+        return update_wrapper(wrap, fn)
     return decorate
 
-py3k = sys.version_info >= (3,)
-
-def _paths(key, stats, cache, seen=None):
-    if seen is None:
-        seen = collections.defaultdict(int)
-    fname, lineno, fn_name = key
-    if seen.get(key):
-        return
-
-    if key in cache:
-        for item in cache[key]:
-            yield item
-    else:
-        seen[key] += 1
-        try:
-            path_element = (fname, lineno, fn_name)
-            paths_to_yield = []
-            (cc, nc, tt, ct, callers) = stats[key]
-            if not callers:
-                paths_to_yield.append((path_element,))
-                yield (path_element,)
-
-            for subkey in callers:
-                sub_cc, sub_nc, sub_tt, sub_ct = callers[subkey]
-                parent_iterator = list(_paths(subkey, stats, cache, seen))
-                path_element = (fname, lineno, fn_name)
-                for parent in parent_iterator:
-                    paths_to_yield.append(parent + (path_element,))
-                    yield parent + (path_element,)
-            cache[key] = paths_to_yield
-        finally:
-            seen[key] -= 1
-
-def _exclude(path):
-
-    for pfname, plineno, pfuncname in path:
-        if "compat" in pfname or \
-            "processors" in pfname or \
-            "cutils" in pfname:
-            return True
-        if "threading.py" in pfname:
-            return True
-        if "cprocessors" in pfuncname:
-            return True
-
-    if (
-            "result.py" in pfname or
-            "engine/base.py" in pfname
-        ) and pfuncname in ("__iter__", "__getitem__", "__len__", "__init__"):
-        return True
-
-    if "utf_8.py" in pfname and pfuncname == "decode":
-        return True
-
-    # hasattr seems to be inconsistent
-    if "hasattr" in path[-1][2]:
-        return True
-
-    if path[-1][2] in (
-            "<built-in method exec>",
-            "<listcomp>"
-            ):
-        return True
-
-    if '_thread.RLock' in path[-1][2]:
-        return True
-
-    return False
-
-def _count_calls(options, fn, *args, **kw):
-    total_time, stats_loader, fn_result = _profile(fn, *args, **kw)
-    exclude_fn = options.get("exclude", None)
-
-    stats = stats_loader()
-
-    callcount = 0
-    report = []
-    path_cache = {}
-    for (fname, lineno, fn_name), (cc, nc, tt, ct, callers) \
-                in stats.stats.items():
-        exclude = 0
-        paths = list(_paths((fname, lineno, fn_name), stats.stats, path_cache))
-        for path in paths:
-            if not path:
-                continue
-            if exclude_fn and exclude_fn(path):
-                exclude += 1
-
-        adjusted_cc = cc
-        if exclude:
-            adjust = (float(exclude) / len(paths)) * cc
-            adjusted_cc -= adjust
-        dirname, fname = os.path.split(fname)
-        #print(" ".join([str(x) for x in [fname, lineno, fn_name, cc, adjusted_cc]]))
-        report.append(" ".join([str(x) for x in [fname, lineno, fn_name, cc, adjusted_cc]]))
-        callcount += adjusted_cc
-
-    #stats.sort_stats('calls', 'cumulative')
-    #stats.print_stats()
-    report.sort()
-    print "\n".join(report)
-    return stats.total_calls, callcount, fn_result
 
 def _profile(fn, *args, **kw):
     filename = "%s.prof" % fn.__name__
     ended = time.time()
 
     return ended - began, load_stats, locals()['result']
+