Anonymous avatar Anonymous committed 5de1ae1

Supressing of stdout removed from multi.py
Test functions for multiprocess environment refactored to reuse global options, eliminate code doubling and use Benchmark objects directly
Large amount of options added for the multiprocess environment
Export to CSV added to multi.py
Option to supress the detailed output added to test.py
run_test and Benchmark class refactored to count time more without globals magic
option to override memcache backend address and port restored

Comments (0)

Files changed (3)

 *.orig
 *.prof
 *.out
+*.csv
 *~
 *.DS_Store
 Thumbs.db
+#!/usr/bin/env python
+
 import test
 import time
-import sys
 from multiprocessing import Pool
 import memcache
 import pylibmc
 import handlersocket
 import handlersocket_innodb
 
-PROCESSES_TO_RUN = 5
-
 class WritableObject:
     def __init__(self):
         pass
     def flush(self):
         pass
 
-sys.stdout = WritableObject()
+def base_test(test_no, backend):
+    global options
+    test.options = options
+    b = test.Benchmark(backend, options, 'test_%d' % test_no)
+    return b.total_time
 
+# You cannot pass modules or functions to the processes
 def memcache_test(test_no):
-    test.main('memcache', memcache, 'test_%d' % test_no)
-    return test.total_time
+    return base_test(test_no, memcache)
 
 def pylibmc_test(test_no):
-    test.main('pylibmc', pylibmc, 'test_%d' % test_no)
-    return test.total_time
+    return base_test(test_no, pylibmc)
 
 def handlersocket_test(test_no):
-    test.main('handlersocket', handlersocket, 'test_%d' % test_no)
-    return test.total_time
+    return base_test(test_no, handlersocket)
 
 def handlersocket_innodb_test(test_no):
-    test.main('handlersocket_innodb', handlersocket_innodb, 'test_%d' % test_no)
-    return test.total_time
+    return base_test(test_no, handlersocket_innodb)
 
 if __name__ == '__main__':
-    pool = Pool(processes=PROCESSES_TO_RUN)
+    from optparse import OptionParser
+    parser = OptionParser()
+
+    parser.add_option('-a', '--server-address', dest='server_address',
+            default='127.0.0.1:11211',
+            help="address:port of memcached [default: 127.0.0.1:11211]")
+    parser.add_option('-o', '--output-csv', dest='csv',
+            default=False,
+            help="save test results into given csv file")
+
+    parser.add_option('-n', '--num-tests', dest='num_tests', type='int',
+            default=1000,
+            help="repeat counts of each test [default: 1000]")
+
+    parser.add_option('-f', '--min-processes', dest='min_processes', type='int',
+            default=1,
+            help="amount of processes to start with [default: 1]")
+
+    parser.add_option('-t', '--max-processes', dest='max_processes', type='int',
+            default=5,
+            help="maximal amount of processes to run [default: 5]")
+
+    parser.add_option('-v', '--verbose', dest='verbose',
+            action='store_true', default=False,
+            help="show traceback infomation if a test fails")
+
+    global options
+    options, args = parser.parse_args()
+    options.quiet = True
+    
+    pool = Pool(processes=options.max_processes)
+    
+    grand_total = []
 
     for func in [memcache_test, pylibmc_test, handlersocket_test, handlersocket_innodb_test,]:
-        sys.stdout = sys.__stdout__
+        line_in_total = [func.__name__]
         print "=" * 20
         print "Testing %s" % func.__name__
         print "=" * 20
 
-        for i in range(1, PROCESSES_TO_RUN + 1):
+        for i in range(options.min_processes, options.max_processes + 1):
             start_time = time.time()
             timings = pool.map(func, range(i))
     
             end_time = time.time()
 
             print "Overall time for %d processes is %f seconds" % (i, end_time - start_time)
-            print "Average clean (without setup/teardown) execution time is %f seconds per thread\n" % (sum(timings, 0.0) / len(timings))
+            avg = sum(timings, 0.0) / len(timings)
+            
+            line_in_total.append(avg)
+            print "Average clean (without setup/teardown) execution time is %f seconds per thread\n" % avg
+        
+        grand_total.append(line_in_total)
+            
+    if options.csv:
+        import csv
+        writer = csv.writer(open(options.csv, "wb"))
+        writer.writerows(grand_total)
 import sys
 
 
-global total_time
+global options
 
 def run_test(func, name):
-    sys.stdout.write(name + ': ')
-    sys.stdout.flush()
+    global options
+    
+    if not options.quiet:
+        sys.stdout.write(name + ': ')
+        sys.stdout.flush()
+        
     start_time = time.time()
     try:
         func()
     except:
+        if options.quiet:
+            print >> sys.stderr, name + ': ',
         print >> sys.stderr, "failed or not supported"
-        global options
         if options.verbose:
             import traceback; traceback.print_exc()
+            
+        return 0
     else:
         end_time = time.time()
-        global total_time
-        total_time += end_time - start_time
-        print "%f seconds" % (end_time - start_time)
+        if not options.quiet:
+            print "%f seconds" % (end_time - start_time)
+        return end_time - start_time
 
 
 class BigObject(object):
 
 class Benchmark(object):
     def __init__(self, module, options, key_prefix = ''):
+        self.total_time = 0
+
         self.module = module
         self.options = options
         self.key_prefix =  key_prefix
         self.test_get_big_object()
         self.test_get_multi()
         self.test_p_app_get()
+        
         #self.test_get_list()
 
     def init_server(self):
-        #self.mc = self.module.Client([self.options.server_address])
-        self.mc = self.module.Client(["127.0.0.1:11211"])
+        self.mc = self.module.Client([self.options.server_address])
         self.mc.set(self.key_prefix + 'bench_key', "E" * 50)
 
         num_tests = self.options.num_tests
             for i in range(10):
                 for key, value in pairs:
                     set_(key, value)
-        run_test(test, 'test_set')
+        self.total_time += run_test(test, 'test_set')
 
         for key, value in pairs:
             self.mc.delete(key)
                 set_(key, value)
                 result = get_(key)
                 assert result == value
-        run_test(test, 'test_set_get')
+        self.total_time += run_test(test, 'test_set_get')
 
         for key, value in pairs:
           self.mc.delete(key)
                     assert result == value
 
 
-        run_test(test, 'test_pure_get')
+        self.total_time += run_test(test, 'test_pure_get')
 
         for key, value in pairs:
           self.mc.delete(key)
                 index += 1
                 if(index % 5 == 0):
                     set_(key, value)
-        run_test(test, 'test_random_get')
+        self.total_time += run_test(test, 'test_random_get')
 
     def test_set_same(self):
         set_ = self.mc.set
             for i in range(10):
                 for i in xrange(self.options.num_tests):
                     set_(self.key_prefix + 'key', 'value')
-        run_test(test, 'test_set_same')
+        self.total_time += run_test(test, 'test_set_same')
 
         self.mc.delete(self.key_prefix + 'key')
 
             for key, value in pairs:
                 set_(key, value)
 
-        run_test(test, 'test_set_big_object (100 objects)')
+        self.total_time += run_test(test, 'test_set_big_object (100 objects)')
 
         for key, value in pairs:
             self.mc.delete(key)
                 result = get_(key)
                 assert result == value
 
-        run_test(test, 'test_set_get_big_object (100 objects)')
+        self.total_time += run_test(test, 'test_set_get_big_object (100 objects)')
 
         for key, value in pairs:
           self.mc.delete(key)
                 set_(key, value)
                 result = get_(key)
                 assert result == value
-        run_test(test, 'test_set_get_big_string (100 objects)')
+        self.total_time += run_test(test, 'test_set_get_big_string (100 objects)')
 
     def test_set_big_string(self):
         set_ = self.mc.set
         def test():
             for key, value in pairs:
                 set_(key, value)
-        run_test(test, 'test_set_big_string (100 objects)')
+        self.total_time += run_test(test, 'test_set_big_string (100 objects)')
 
         for key, value in pairs:
             self.mc.delete(key)
             for key, value in pairs:
                 result = get(key)
                 assert result == value
-        run_test(test, 'test_get')
+        self.total_time += run_test(test, 'test_get')
 
         for key, value in pairs:
             self.mc.delete(key)
             for i in xrange(100):
                 result = get((self.key_prefix + 'bkey%d') % i)
                 assert result == expected_values[i]
-        run_test(test, 'test_get_big_object (100 objects)')
+        self.total_time += run_test(test, 'test_get_big_object (100 objects)')
 
         for key, value in pairs:
             self.mc.delete(key)
             for key in keys:
                 self.mc.get(self.key_prefix + key)
 
-        run_test(test, 'test_p_app_get')
+        self.total_time += run_test(test, 'test_p_app_get')
 
     def test_get_multi(self):
         pairs = zip(self.keys, self.values)
         def test():
             result = self.mc.get_multi(keys)
             assert result == expected_result
-        run_test(test, 'test_get_multi')
+        self.total_time += run_test(test, 'test_get_multi')
 
         for key, value in pairs:
             self.mc.delete(key)
         def test():
             result = self.mc.get_list(keys)
             assert result == expected_result
-        run_test(test, 'test_get_list')
+        self.total_time += run_test(test, 'test_get_list')
 
         for key in self.keys:
             self.mc.delete(key)
     parser.add_option('-v', '--verbose', dest='verbose',
             action='store_true', default=False,
             help="show traceback infomation if a test fails")
-
     parser.add_option('-p', '--profile', dest='profile',
             action='store_true', default=False,
             help="store profiling log for every test")
+    parser.add_option('-q', '--quiet', dest='quiet',
+            action='store_true', default=False,
+            help="don't output test results")
             
     global options
     options, args = parser.parse_args()
 
-    global total_time
-    total_time = 0
-
-    
     print "Benchmarking %s..." % module_name
     
     if options.profile:
         prof = hotshot.Profile("%s.prof" % name)
         prof.start()
         
-    Benchmark(module, options, prefix)
+    b = Benchmark(module, options, prefix)
 
     if options.profile:
         prof.stop()
         prof.close()
 
-    print "Total_time is %f" % total_time
+    print "Total_time is %f" % b.total_time
     print '---'
 
 
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.