Commits

Remi Meier committed 2ec8cbf

add files

Comments (0)

Files changed (86)

benchmarks/3np1/bench.py

+#!/usr/bin/python
+
+from abstract_threading import (Future, atomic)
+from stmlist import STMList, NonSTMList
+
+def three_n_plus_1(n):
+    steps = 0
+    while n > 1:
+        n = n / 2 if n & 1 == 0 \
+          else n * 3 + 1
+        steps += 1
+    return steps
+
+
+def for_set(ns):
+    results = []
+    for n in ns:
+        results.append(three_n_plus_1(n))
+
+def split_every_n(it, n):
+    return (it[i:i+n] for i in xrange(0, len(it), n))
+
+def join(fs):
+    for f in fs:
+        f()
+
+def run(ths=4, upto=1000, groups=100):
+    ths = int(ths)
+    upto = int(upto)
+    groups = int(groups)
+    print ths, upto, groups
+
+    fs = []
+    todo = split_every_n(range(upto), groups)
+
+    try:
+        while True:
+            for _ in xrange(ths):
+                ns = todo.next()
+                fs.append(Future(for_set, ns))
+            join(fs)
+    except StopIteration:
+        join(fs)
+
+
+
+if __name__ == '__main__':
+    run()

benchmarks/3np1/bench_append.py

+#!/usr/bin/python
+
+from abstract_threading import (Future, atomic)
+from stmlist import STMList, NonSTMList
+
+def three_n_plus_1(n):
+    steps = 0
+    while n > 1:
+        n = n / 2 if n & 1 == 0 \
+          else n * 3 + 1
+        steps += 1
+    return steps
+
+
+def for_set(ns, results):
+    with atomic:
+        for n in ns:
+            res = three_n_plus_1(n)
+            results.append(res)
+
+def split_every_n(it, n):
+    return (it[i:i+n] for i in xrange(0, len(it), n))
+
+def join(fs):
+    for f in fs:
+        f()
+
+def run(ths=4, upto=1000, groups=100, kind='old'):
+    ths = int(ths)
+    upto = int(upto)
+    groups = int(groups)
+    assert kind in ('old', 'stm')
+    kind = STMList if kind == 'stm' else NonSTMList
+    print ths, upto, groups, kind
+
+    fs = []
+    todo = split_every_n(range(upto), groups)
+    results = kind()
+    try:
+        while True:
+            for _ in xrange(ths):
+                ns = todo.next()
+                fs.append(Future(for_set, ns, results))
+            join(fs)
+    except StopIteration:
+        join(fs)
+
+
+
+if __name__ == '__main__':
+    run()

benchmarks/3np1/bench_atomic.py

+#!/usr/bin/python
+
+from abstract_threading import (Future, atomic)
+from stmlist import STMList, NonSTMList
+
+def three_n_plus_1(n):
+    steps = 0
+    while n > 1:
+        n = n / 2 if n & 1 == 0 \
+          else n * 3 + 1
+        steps += 1
+    return steps
+
+
+def for_set(ns):
+    results = []
+    with atomic:
+        for n in ns:
+            results.append(three_n_plus_1(n))
+
+def split_every_n(it, n):
+    return (it[i:i+n] for i in xrange(0, len(it), n))
+
+def join(fs):
+    for f in fs:
+        f()
+
+def run(ths=4, upto=1000, groups=100):
+    ths = int(ths)
+    upto = int(upto)
+    groups = int(groups)
+    print ths, upto, groups
+
+    fs = []
+    todo = split_every_n(range(upto), groups)
+
+    try:
+        while True:
+            for _ in xrange(ths):
+                ns = todo.next()
+                fs.append(Future(for_set, ns))
+            join(fs)
+    except StopIteration:
+        join(fs)
+
+
+
+if __name__ == '__main__':
+    run()

benchmarks/3np1/bench_element_access.py

+#!/usr/bin/python
+
+from abstract_threading import (Future, atomic)
+from stmlist import STMList, NonSTMList
+
+def three_n_plus_1(n):
+    steps = 0
+    while n > 1:
+        n = n / 2 if n & 1 == 0 \
+          else n * 3 + 1
+        steps += 1
+    return steps
+
+
+def for_set(ns, results):
+    with atomic:
+        for n in ns:
+            results[n] = three_n_plus_1(n)
+
+def split_every_n(it, n):
+    return (it[i:i+n] for i in xrange(0, len(it), n))
+
+def join(fs):
+    for f in fs:
+        f()
+
+def run(ths=4, upto=1000, groups=100):
+    ths = int(ths)
+    upto = int(upto)
+    groups = int(groups)
+    print ths, upto, groups
+
+    fs = []
+    todo = split_every_n(range(upto), groups)
+    results = [0] * upto
+    
+    try:
+        while True:
+            for _ in xrange(ths):
+                ns = todo.next()
+                fs.append(Future(for_set, ns, results))
+            join(fs)
+    except StopIteration:
+        join(fs)
+
+
+
+if __name__ == '__main__':
+    run()

benchmarks/3np1/execute.sh

+#!/bin/bash
+
+# non-atomic
+LD_PRELOAD=/usr/lib/libbfd.so ~/mutrace/mutrace.in --all -d pypy-c-limit-halving ../bench.py -k1 bench.py 32 200000 1000
+pypy-c-limit-halving ../bench.py bench.py 32 200000 1000
+PYPYLOG=stm-log:stm.log pypy-c-limit-halving ../bench.py -k1 bench.py 32 200000 1000
+
+# atomic:
+LD_PRELOAD=/usr/lib/libbfd.so ~/mutrace/mutrace.in --all -d pypy-c-limit-halving ../bench.py -k1 bench_atomic.py 32 200000 1000
+pypy-c-limit-halving ../bench.py bench_atomic.py 32 200000 1000
+PYPYLOG=stm-log:stm_atomic.log pypy-c-limit-halving ../bench.py -k1 bench_atomic.py 32 200000 1000

benchmarks/3np1/execute_append.sh

+#!/bin/bash
+
+PYPYLOG=stm-log:stm_append_good.log pypy-c-limit-halving ../bench.py -k1 -p bench_append.py 10 40000 1000 stm
+PYPYLOG=stm-log:stm_append_bad.log pypy-c-limit-halving ../bench.py -k1 -p bench_append.py 10 40000 1000 old
+
+PYPYLOG=stm-log:forget.log pypy-c-limit-halving ../bench.py -k1 -p bench_append.py 1 40000 1000 stm
+PYPYLOG=stm-log:forget.log pypy-c-limit-halving ../bench.py -k1 -p bench_append.py 1 40000 1000 old

benchmarks/3np1/execute_list_strategies.sh

+#!/bin/bash
+PYPYLOG=stm-log:stm.log pypy-c-limit-halving bench.py -k1 3np1/bench_element_access.py 10 40000 1000
+PYPYLOG=stm-log:stm.log pypy-c-limit-halving-list-strategies bench.py -k1 3np1/bench_element_access.py 10 40000 1000
+
+PYPYLOG=stm-log:forget.log pypy-c-limit-halving bench.py -k1 3np1/bench_element_access.py 1 40000 1000
+PYPYLOG=stm-log:forget.log pypy-c-limit-halving-list-strategies bench.py -k1 3np1/bench_element_access.py 1 40000 1000

benchmarks/3np1/genplots.sh

+#!/bin/bash
+plot_log.py stm.log --threads 1-32 --legend --no-rma --pdf stm.pdf --compress
+plot_log.py stm_atomic.log --threads 1-32 --legend --no-rma --pdf stm_atomic.pdf --compress

benchmarks/3np1/genplots_append.sh

+#!/bin/bash
+
+plot_log.py stm_append_bad.log --legend --no-rma --pdf stm_append_bad.pdf --threads 1-10 --no-should-commit
+plot_log.py stm_append_good.log --legend --no-rma --pdf stm_append_good.pdf --threads 1-10 --no-should-commit

benchmarks/3np1/genplots_lists.sh

+#!/bin/bash
+
+plot_log.py stm_list_bad.log --legend --no-rma --pdf stm_list_bad.pdf --threads 1-10 --no-should-commit
+plot_log.py stm_list_good.log --legend --no-rma --pdf stm_list_good.pdf --threads 1-10 --no-should-commit

benchmarks/bench.py

+
+import time
+import math
+import imp, os, sys
+import json
+import contextlib
+
+def import_file(filepath):
+    mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])
+    return imp.load_source(mod_name, filepath)
+
+
+class DummyFile(object):
+    def write(self, x): pass
+
+@contextlib.contextmanager
+def nostdout():
+    save_stdout = sys.stdout
+    sys.stdout = DummyFile()
+    yield
+    sys.stdout = save_stdout
+
+
+def avg(xs):
+    return sum(xs) / len(xs)
+
+def std_dev(xs):
+    N = len(xs)
+    mu = avg(xs)
+    var = sum([(x - mu)**2 for x in xs]) / N
+    return math.sqrt(var)
+
+def within_error(args, times):
+    ts = sorted(times)[:args.k]
+    best = ts[0]
+    best *= (1.0 + args.error)
+
+    for t in ts:
+        if t > best:
+            return False
+    return True
+
+
+
+def main(args):
+    folder = os.path.dirname(args.file)
+    if folder:
+        os.chdir(folder)
+    sys.path.insert(0, '.')
+    test = import_file(os.path.basename(args.file))
+
+    times = []
+    k = 1
+    while True:
+        time.sleep(0.2)
+        if not args.q:
+            print "Run {}/{}".format(k, args.k)
+
+        test_time = time.time()
+        if args.p:
+            test.run(*args.more)
+        else:
+            with nostdout():
+                test.run(*args.more)
+        times.append(time.time() - test_time)
+
+        if k >= args.k:
+            if within_error(args, times):
+                break
+            elif not args.q:
+                print "error was not within", args.error
+        k += 1
+
+    if not args.q:
+        print "times:", times
+
+    times = sorted(times)[:args.k]
+    result = {'best':min(times),
+              'error':args.error,
+              'std_dev(k)':std_dev(times)}
+    print json.dumps(result)
+
+
+
+if __name__ == '__main__':
+    import argparse
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-k', default=3, help='K-best K', type=int)
+    parser.add_argument('-e', '--error', default=0.05, type=float,
+                        help='relative allowed error [0.05]')
+    parser.add_argument('-q', action='store_const',
+                        const=True, default=False,
+                        help='mute except for best run')
+    parser.add_argument('-p', action='store_const',
+                        const=True, default=False,
+                        help='print to stdout what the benchmark prints')
+    parser.add_argument('file', help='file to run')
+    parser.add_argument('more', nargs="*", help='file.run() arguments')
+
+    args = parser.parse_args()
+    if not args.q:
+        print args
+    main(args)

benchmarks/env.sh

+export PYTHONPATH=$PWD/../../common/:$PYTHONPATH
+
+export PATH=$PWD/pypy-env/bin:$PATH
+export PATH=$HOME/pypy_dir/pypy/tool:$PATH
+

benchmarks/fibonacci/execute.sh

+#!/bin/bash
+
+LD_PRELOAD=/usr/lib/libbfd.so ~/mutrace/mutrace.in --all -d pypy-c-limit-halving fib_parallel.py
+PYPYLOG=stm-log:stm.log time -p pypy-c-limit-halving fib_parallel.py

benchmarks/fibonacci/fib.py

+#!/usr/bin/python
+import thread
+thread.start_new_thread(lambda:0, ())
+
+def fib(n):
+    if n <= 1:
+        return n
+    return fib(n-1) + fib(n-2)
+
+
+def run():
+    print fib(30)
+
+if __name__ == '__main__':
+    run()

benchmarks/flask/app.py

+from flask import Flask
+app = Flask(__name__)
+
+import thread
+from __pypy__.thread import atomic
+
+@app.route("/")
+def hello():
+    with atomic:
+        i = 1000000
+        while i:
+            i -= 1
+    return 'Hello World!'
+
+
+if __name__ == "__main__":
+    import sys
+    sys.setcheckinterval(100000)
+    app.run(threaded=True, host='0.0.0.0')

benchmarks/flask/app_setcheck.py

+from flask import Flask
+app = Flask(__name__)
+
+import thread
+from __pypy__.thread import atomic
+
+@app.route("/")
+def hello():
+    with atomic:
+        i = 1000000
+        while i:
+            i -= 1
+    return 'Hello World!'
+
+
+if __name__ == "__main__":
+    import argparse
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--check', type=int, default=200000, 
+                        help='setcheckinterval')
+    
+    args = parser.parse_args()
+    import sys
+    sys.setcheckinterval(args.check)
+    
+    app.run(threaded=True, host='0.0.0.0')

benchmarks/flask/app_should_commit.py

+from flask import Flask
+app = Flask(__name__)
+
+import thread
+from __pypy__.thread import atomic
+
+@app.route("/")
+def hello():
+    thread.should_commit()
+    with atomic:
+        i = 1000000
+        while i:
+            i -= 1
+    return 'Hello World!'
+
+
+if __name__ == "__main__":
+    app.run(threaded=True, host='0.0.0.0')

benchmarks/flask/bench.sh

+#!/bin/sh
+
+echo "WITHOUT PENALTY"
+PYPYLOG=stm-log:stm_no_penalty.log pypy-c-limit-halving app.py 2> /dev/null &
+sleep 2
+openload -l 8 127.0.0.1:5000 4
+killall -9 pypy-c-limit-halving
+
+echo "WITHOUT PENALTY, SERIAL"
+PYPYLOG=stm-log:forget.log pypy-c-limit-halving app.py 2> /dev/null &
+sleep 2
+openload -l 8 127.0.0.1:5000 1
+killall -9 pypy-c-limit-halving
+
+
+echo "WITH SHOULD_COMMIT"
+PYPYLOG=stm-log:stm_should_commit.log pypy-c-limit-halving app_should_commit.py 2> /dev/null &
+sleep 2
+openload -l 8 127.0.0.1:5000 4
+killall -9 pypy-c-limit-halving
+
+
+echo "WITH PENALTY"
+PYPYLOG=stm-log:stm_penalty.log pypy-c-contention-penalty-no-scenarios app.py 2> /dev/null &
+sleep 2
+openload -l 8 127.0.0.1:5000 4
+killall -9 pypy-c-contention-penalty-no-scenarios

benchmarks/flask/bench_setcheck.sh

+#!/bin/sh
+
+echo "" > setcheck.log 
+for check in 10 100 1000 10000 100000 1000000 10000000 ; do
+    echo $check
+    echo "== $check ==" >> setcheck.log
+    PYPYLOG=stm-log:stm_setcheck_$check.log pypy-c-contention-penalty-no-scenarios app_setcheck.py --check $check 2> /dev/null &
+    sleep 3
+    openload -l 8 127.0.0.1:5000 4 2>> setcheck.log >> setcheck.log
+    killall -9 pypy-c-contention-penalty-no-scenarios
+done;

benchmarks/flask/genplots.sh

+#!/bin/bash
+
+plot_log.py stm_no_penalty.log --legend --no-rma --pdf stm_flask_bad.pdf --threads 1-5 --no-should-commit
+plot_log.py stm_should_commit.log --legend --no-rma --pdf stm_flask_should_commit.pdf --threads 1-5
+plot_log.py stm_penalty.log --legend --no-rma --pdf stm_flask_good.pdf --threads 1-5 --no-should-commit
+./plot_setcheck.py --figure-size 6x2 

benchmarks/flask/plot_setcheck.py

+#!/usr/bin/python
+
+##########################################################
+""" TODO: print thread-descriptor info on commit/abort """
+##########################################################
+
+import matplotlib
+import os
+import sys
+matplotlib.use('gtkagg')
+
+
+args = None
+import matplotlib.pyplot as plt
+# import pprint - slow as hell
+
+
+ys = {10:[3.95, 7.99, 7.55, 5.39, 6.37, 7.46, 5.26],
+      100:[7.16, 7.35, 8.82, 9.65, 7.91, 7.94, 7.56],
+      1000:[9.81, 13.38, 14.25, 14.90, 13.26, 14.51, 14.69],
+      10000:[8.83, 14.51, 14.35, 14.4, 11.96, 13.94, 14.94],
+      100000:[8.23, 14.59, 14.48, 13.81, 13.64, 13.75, 13.72],
+      1000000:[3.64, 4.63, 8.25, 3.63, 3.64, 6.46, 6.48],
+      10000000:[3.55, 3.59, 3.55, 3.55, 3.56, 3.55, 3.55],
+      }
+
+
+def plot_tps(ax):
+    import numpy as np
+    x = []
+    y = []
+    yerr = []
+
+    for k in sorted(ys.keys()):
+        v = ys[k]
+        x.append(k)
+        y.append(np.mean(v))
+        yerr.append(np.std(v))
+
+    ax.errorbar(x, y, yerr=yerr)
+
+
+def main():
+    global fig
+
+    print "Draw..."
+    fig = plt.figure()
+
+    ax = fig.add_subplot(111)
+
+    plot_tps(ax)
+
+    ax.set_xscale('log')
+    ax.set_ylabel("Requests per Second (TPS)")
+    ax.set_xlabel("reads\_limit")
+    ax.set_xlim(5, 20000000)
+
+    #axs[0].set_ylim(0, len(x))
+    #ax.set_yticks([r+0.5 for r in range(len(logs))])
+    #ax.set_yticklabels(range(1, len(logs)+1))
+    #axs[0].set_xticks([])
+    print "Drawn."
+
+    # def label_format(x, pos):
+    #     return "%.2f" % (abs((x - left) * 1e-6), )
+    # major_formatter = matplotlib.ticker.FuncFormatter(label_format)
+    # axs[0].xaxis.set_major_formatter(major_formatter)
+
+    #legend = ax.legend()
+
+    plt.draw()
+    file_name = "setcheck.pdf"
+    plt.savefig(file_name, format='pdf',
+                # bbox_extra_artists=(legend,),
+                bbox_inches='tight', pad_inches=0)
+
+
+
+if __name__ == "__main__":
+    import argparse
+    parser = argparse.ArgumentParser(description='Plot stm log files')
+    parser.add_argument('--figure-size', default='6x4',
+                        help='set figure size in inches: format=6x4')
+    parser.add_argument('--font-size', default='10.0',
+                        help='set font size in pts: 10.0')
+    parser.add_argument('--png-dpi', default='300',
+                        help='set dpi of png output: 300')
+
+
+    args = parser.parse_args()
+    matplotlib.rcParams.update(
+        {'figure.figsize': tuple(map(int, args.figure_size.split('x'))),
+         'font.size': float(args.font_size),
+         'savefig.dpi': int(args.png_dpi),
+         })
+
+
+    main()

common/abstract_threading.py

+
+from Queue import Queue, Empty, Full
+from threading import Thread, Condition
+import thread
+from __pypy__.thread import atomic
+
+class Worker(Thread):
+    """Thread executing tasks from a given tasks queue"""
+    def __init__(self, queue):
+        Thread.__init__(self)
+        self.daemon = True
+        self.next_task = None
+        self.cond = Condition()
+        self.queue = queue
+        self.start()
+
+    def run(self):
+        # the next line registers the at_commit_cb on interpreter
+        # level for this thread. This should be fixed in the 
+        # interpreter (it causes a conflict in stmgcintf.register_at_commit_cb).
+        thread.at_commit(lambda : 0, ())
+
+        while True:
+            with self.cond:
+                while self.next_task is None:
+                    self.cond.wait()
+
+                func, args, kargs = self.next_task
+                self.next_task = None
+
+                try:
+                    func(*args, **kargs)
+                except Exception as e:
+                    print e
+
+            # first time put in queue by threadpool on creation
+            try:
+                self.queue.put_nowait(self)
+            except Full:
+                # thread limit reached, I'll show myself out..
+                return
+
+
+class ThreadPool(object):
+    def __init__(self, thread_queue_size=12):
+        self.threads = Queue(thread_queue_size)
+
+    def add_task(self, func, *args, **kargs):
+        try:
+            worker = self.threads.get_nowait()
+        except Empty:
+            worker = Worker(self.threads)
+
+        with worker.cond:
+            worker.next_task = (func, args, kargs)
+            worker.cond.notify_all()
+
+
+
+
+import multiprocessing
+_thread_pool = ThreadPool(2 * multiprocessing.cpu_count())
+
+
+
+
+class Future(object):
+    def __init__(self, func, *args, **kwargs):
+        self._done = False
+        self._result = None
+        self._exception = None
+        self._cond = Condition()
+
+        assert hasattr(func, "__call__")
+
+        _thread_pool.add_task(self._task, func, *args, **kwargs)
+
+
+    def _task(self, func, *args, **kwargs):
+        with self._cond:
+            try:
+                self._result = func(*args, **kwargs)
+            except Exception as e:
+                self._exception = e
+            finally:
+                self._done = True
+                # several points/threads in the program
+                # may wait for the result (notify_all):
+                self._cond.notify_all()
+
+
+    def __call__(self):
+        with self._cond:
+            while not self._done:
+                self._cond.wait()
+
+        if self._exception:
+            raise self._exception
+
+        return self._result
+
+
+
+class AtomicFuture(Future):
+    def _task(self, func, *args, **kwargs):
+        with self._cond:
+            try:
+                with atomic:
+                    self._result = func(*args, **kwargs)
+            except Exception as e:
+                self._exception = e
+            finally:
+                self._done = True
+                # several points/threads in the program
+                # may wait for the result (notify_all):
+                self._cond.notify_all()

common/abstract_transactions.py

+
+from Queue import Queue, Empty, Full
+from threading import Thread, Condition
+import thread
+from __pypy__.thread import atomic, is_atomic
+
+class Worker(Thread):
+    """Thread executing tasks"""
+    def __init__(self, queue):
+        Thread.__init__(self)
+        self.daemon = True
+        self.next_task = None
+        self.cond = Condition()
+        self.queue = queue
+        self.start()
+
+    def run(self):
+        while True:
+            with self.cond:
+                while self.next_task is None:
+                    self.cond.wait()
+
+                func, args, kargs = self.next_task
+                self.next_task = None
+
+                try:
+                    func(*args, **kargs)
+                except Exception as e:
+                    print e
+
+            try:
+                self.queue.put_nowait(self)
+            except Full:
+                # thread limit reached, I'll show myself out..
+                return
+
+
+class ThreadPool(object):
+    def __init__(self, thread_queue_size=12):
+        self.threads = Queue(thread_queue_size)
+
+    def add_task(self, func, *args, **kargs):
+        try:
+            worker = self.threads.get_nowait()
+        except Empty:
+            worker = Worker(self.threads)
+
+        with worker.cond:
+            worker.next_task = (func, args, kargs)
+            worker.cond.notify_all()
+
+import multiprocessing
+_thread_pool = ThreadPool(2 * multiprocessing.cpu_count())
+
+
+### Futures ###
+
+BEGIN_ATOMIC = 1
+END_ATOMIC = 2
+RETURN = 3
+
+class Future(object):
+    def __init__(self, func, *args, **kwargs):
+        self._done = False
+        self._result = None
+        self._exception = None
+        self._cond = Condition()
+
+        assert hasattr(func, "__call__")
+        assert hasattr(func, '__protected__') \
+          or hasattr(func, '__unprotected__')
+
+        _thread_pool.add_task(self._task, func, *args, **kwargs)
+
+
+    def _task(self, func, *args, **kwargs):
+        with self._cond:
+            try:
+                self._result = self._call(func, args, kwargs)
+            except Exception as e:
+                self._exception = e
+            finally:
+                self._done = True
+                # several points/threads in the program
+                # may wait for the result (notify_all):
+                self._cond.notify_all()
+
+
+    def _call(self, func, args=(), kargs={}):
+        assert hasattr(func, '__protected__') \
+          or hasattr(func, '__unprotected__')
+
+        atomically = hasattr(func, '__protected__')
+
+        try:
+            gen = func(*args, **kargs)
+            res = None
+            inner_res = None
+            while True:
+                if (atomically and res is None) or res is BEGIN_ATOMIC:
+                    #thread.add_penalty(1000, 0)
+                    #thread.should_commit()
+                    with atomic:
+                        res = gen.send(inner_res)
+                elif (not atomically and res is None) or res is END_ATOMIC:
+                    assert not is_atomic(), \
+                      "Calling unprotected code from protected " \
+                      "context is not allowed."
+                    res = gen.send(inner_res)
+                elif isinstance(res, tuple):
+                    if res[0] is RETURN:
+                        return res[1]
+                    try:
+                        inner_res = self._call(*res)
+                        res = None # call with inner_res
+                        continue
+                    except StopIteration:
+                        raise
+                    except Exception as e:
+                        if atomically:
+                            with atomic:
+                                res = gen.throw(e)
+                        else:
+                            res = gen.throw(e)
+                else:
+                    res = gen.send(inner_res)
+                inner_res = None
+        except StopIteration:
+            pass
+
+        return res
+
+
+    def __call__(self):
+        with self._cond:
+            while not self._done:
+                self._cond.wait()
+
+        if self._exception:
+            raise self._exception
+
+        return self._result
+
+
+### DECORATORS ###
+from functools import wraps
+import inspect
+
+def protected(f):
+    @wraps(f)
+    def inner_gen(*args, **kwargs):
+        if not is_atomic():
+            raise Exception("Protected function needs to be called "\
+                            "from protected/atomic context "+str(f))
+        yield f(*args, **kwargs)
+    #
+    @wraps(f)
+    def inner(*args, **kwargs):
+        # can't check for atomic on instantiation of generator
+        return f(*args, **kwargs)
+    #
+    res = inner
+    if not inspect.isgeneratorfunction(f):
+        res = inner_gen
+    res.__protected__ = True
+    return res
+
+
+def unprotected(f):
+    @wraps(f)
+    def inner_gen(*args, **kwargs):
+        if is_atomic():
+            raise Exception("Unprotected function needs to be called "\
+                            "from unprotected/non-atomic context "+str(f))
+        yield f(*args, **kwargs)
+    #
+    @wraps(f)
+    def inner(*args, **kwargs):
+        # can't check for atomic on instantiation of generator
+        return f(*args, **kwargs)
+    #
+    res = inner
+    if not inspect.isgeneratorfunction(f):
+        res = inner_gen
+    res.__unprotected__ = True
+    return res
+
+
+def penalize(penalty=1000):
+    def really_decorate(f):
+        @wraps(f)
+        def inner(*args, **kwargs):
+            thread.add_penalty(penalty, 0)
+            try:
+                res = f(*args, **kwargs)
+            finally:
+                thread.add_penalty(penalty, 0)
+            return res
+
+        return inner
+    return really_decorate
+
+def validate(f):
+    @wraps(f)
+    def inner(*args, **kwargs):
+        thread.log_checkpoint("VALIDATE_ENTER({})".format(f))
+        try:
+            res = f(*args, **kwargs)
+        finally:
+            thread.log_checkpoint("VALIDATE_LEAVE({})".format(f))
+        return res
+    return inner

common/stmlist.py

+
+from __pypy__.thread import atomic
+from thread import at_commit, run_commit
+from threading import local
+
+
+def atomically(func):
+    def inner(*args, **kwargs):
+        with atomic:
+            return func(*args, **kwargs)
+    return inner
+
+class STMList(object):
+    def __init__(self):
+        self._shared_list = []
+
+    def append(self, item):
+        at_commit(self._shared_list.append, (item,))
+
+    def extend(self, other):
+        at_commit(self._get_local_list().extend, (other,))
+
+    @atomically
+    def __getitem__(self, idx):
+        # can possibly be made better:
+        run_commit()
+        return self._shared_list[idx]
+
+    @atomically
+    def __setitem__(self, idx, v):
+        # can possibly be made better
+        run_commit()
+        self._shared_list[idx] = v
+
+    @atomically
+    def __len__(self):
+        # can possibly be made better
+        run_commit()
+        return len(self._shared_list)
+
+    @atomically
+    def __repr__(self):
+        # can possibly be made better
+        run_commit()
+        return repr(self._shared_list)
+
+
+
+class STMListOld(object):
+    '''
+    Maintains a thread-local list where transactions
+    append/extend to. This list is empty or non-existant
+    between transactions.
+    At commit time, it inevitably pushes the changes
+    to the thread-shared list and empties the thread-local
+    list.
+    '''
+
+    def __init__(self):
+        self._shared_list = []
+        self._local = local()
+
+    def _get_local_list(self):
+        return self._local.__dict__.setdefault('l', [])
+        # try:
+        #     return self._local.__dict__['l']
+        # except AttributeError:
+        #     self._local.__dict__['l']
+        # return self._local.list
+
+    @atomically
+    def append(self, item):
+        self._get_local_list().append(item)
+        at_commit(self.__commit, ())
+
+    @atomically
+    def extend(self, other):
+        self._get_local_list().extend(other)
+        at_commit(self.__commit, ())
+
+    @atomically
+    def __getitem__(self, idx):
+        # can possibly be made better:
+        run_commit()
+        return self._shared_list[idx]
+
+    @atomically
+    def __setitem__(self, idx, v):
+        # can possibly be made better
+        run_commit()
+        self._shared_list[idx] = v
+
+    @atomically
+    def __len__(self):
+        # can possibly be made better
+        run_commit()
+        return len(self._shared_list)
+
+    @atomically
+    def __repr__(self):
+        # can possibly be made better
+        run_commit()
+        return repr(self._shared_list)
+
+
+    def __commit(self):
+        # will always run atomically
+        l = self._get_local_list()
+        if l:
+            self._shared_list.extend(l)
+            del l[:]
+
+class NonSTMList(object):
+    def __init__(self):
+        self._shared_list = []
+
+    @atomically
+    def append(self, item):
+        self._shared_list.append(item)
+
+    @atomically
+    def extend(self, other):
+        self._shared_list.extend(other)
+
+    @atomically
+    def __getitem__(self, idx):
+        return self._shared_list[idx]
+
+    @atomically
+    def __setitem__(self, idx, v):
+        self._shared_list[idx] = v
+
+    @atomically
+    def __len__(self):
+        return len(self._shared_list)
+
+    @atomically
+    def __repr__(self):
+        return repr(self._shared_list)
+
+
+
+if __name__ == '__main__':
+    # tests:
+    l = STMList()
+    l.append(1)
+    run_commit()
+    assert l[0]==1 and len(l)==1
+    print l
+
+    with atomic:
+        l[0] = 2
+        l.append(1)
+        a = l[0]
+        b = l[1]
+        c = len(l)
+    assert a==2 and b==1 and c==2
+    print l
+
+    l.append(3)
+    assert len(l)==3
+    print l

experimental_benchmarks/3np1/bench.py

+#!/usr/bin/python
+
+from abstract_threading import (Future, atomic)
+from stmlist import STMList
+
+def three_n_plus_1(n, res):
+    steps = 0
+    while n > 1:
+        n = n / 2 if n & 1 == 0 \
+          else n * 3 + 1
+        steps += 1
+
+    res.append(steps)
+
+
+def for_set(ns, res):
+    for n in ns:
+        # with atomic:
+        three_n_plus_1(n, res)
+
+
+def split_every_n(it, n):
+    return (it[i:i+n] for i in xrange(0, len(it), n))
+
+def join(fs):
+    for f in fs:
+        f()
+
+def run(ths=4, upto=1000, groups=100, use_stm=False):
+    ths = int(ths)
+    upto = int(upto)
+    groups = int(groups)
+    use_stm = bool(use_stm)
+
+    if use_stm:
+        results = STMList()
+    else:
+        results = []
+    fs = []
+    todo = split_every_n(range(upto), groups)
+
+    try:
+        while True:
+            for _ in xrange(ths):
+                ns = todo.next()
+                fs.append(Future(for_set, ns, results))
+            join(fs)
+    except StopIteration:
+        join(fs)
+
+    print results
+
+if __name__ == '__main__':
+    run()

experimental_benchmarks/3np1/bench2.py

+#!/usr/bin/python
+
+from abstract_threading import (Future, atomic)
+from stmlist import STMList, NonSTMList
+
+def three_n_plus_1(n):
+    steps = 0
+    while n > 1:
+        n = n / 2 if n & 1 == 0 \
+          else n * 3 + 1
+        steps += 1
+    return steps
+
+
+def for_set(ns, res):
+    results = []
+    with atomic:
+        for n in ns:
+            results.append(three_n_plus_1(n))
+    res.append(results)
+
+def split_every_n(it, n):
+    return (it[i:i+n] for i in xrange(0, len(it), n))
+
+def join(fs):
+    for f in fs:
+        f()
+
+def run(ths=4, upto=1000, groups=100, use_stm='stm'):
+    ths = int(ths)
+    upto = int(upto)
+    groups = int(groups)
+    use_stm = use_stm == 'stm'
+    print ths, upto, groups, use_stm
+
+    if use_stm:
+        results = STMList()
+    else:
+        results = NonSTMList()
+    fs = []
+    todo = split_every_n(range(upto), groups)
+
+    try:
+        while True:
+            for _ in xrange(ths):
+                ns = todo.next()
+                fs.append(Future(for_set, ns, results))
+            join(fs)
+    except StopIteration:
+        join(fs)
+
+    print len(results)
+
+if __name__ == '__main__':
+    run()

experimental_benchmarks/bench.py

+
+import time
+import math
+import imp, os, sys
+import json
+import contextlib
+
+def import_file(filepath):
+    mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1])
+    return imp.load_source(mod_name, filepath)
+
+
+class DummyFile(object):
+    def write(self, x): pass
+
+@contextlib.contextmanager
+def nostdout():
+    save_stdout = sys.stdout
+    sys.stdout = DummyFile()
+    yield
+    sys.stdout = save_stdout
+
+
+def avg(xs):
+    return sum(xs) / len(xs)
+
+def std_dev(xs):
+    N = len(xs)
+    mu = avg(xs)
+    var = sum([(x - mu)**2 for x in xs]) / N
+    return math.sqrt(var)
+
+def within_error(args, times):
+    ts = sorted(times)[:args.k]
+    best = ts[0]
+    best *= (1.0 + args.error)
+
+    for t in ts:
+        if t > best:
+            return False
+    return True
+
+
+
+def main(args):
+    folder = os.path.dirname(args.file)
+    os.chdir(folder)
+    sys.path.insert(0, '.')
+    test = import_file(os.path.basename(args.file))
+
+    times = []
+    k = 1
+    while True:
+        time.sleep(0.2)
+        if not args.q:
+            print "Run {}/{}".format(k, args.k)
+
+        test_time = time.time()
+        if args.p:
+            test.run(*args.more)
+        else:
+            with nostdout():
+                test.run(*args.more)
+        times.append(time.time() - test_time)
+
+        if k >= args.k:
+            if within_error(args, times):
+                break
+            elif not args.q:
+                print "error was not within", args.error
+        k += 1
+
+    if not args.q:
+        print "times:", times
+
+    times = sorted(times)[:args.k]
+    result = {'best':min(times),
+              'error':args.error,
+              'std_dev(k)':std_dev(times)}
+    print json.dumps(result)
+
+
+
+if __name__ == '__main__':
+    import argparse
+
+    parser = argparse.ArgumentParser()
+    parser.add_argument('-k', default=3, help='K-best K', type=int)
+    parser.add_argument('-e', '--error', default=0.05, type=float,
+                        help='relative allowed error [0.05]')
+    parser.add_argument('-q', action='store_const',
+                        const=True, default=False,
+                        help='mute except for best run')
+    parser.add_argument('-p', action='store_const',
+                        const=True, default=False,
+                        help='print to stdout what the benchmark prints')
+    parser.add_argument('file', help='file to run')
+    parser.add_argument('more', nargs="*", help='file.run() arguments')
+
+    args = parser.parse_args()
+    if not args.q:
+        print args
+    main(args)

experimental_benchmarks/dataflow-bench/bench.py

+#!/usr/bin/python
+
+from abstract_threading import Future
+import thread
+from thread import atomic
+import collections
+import string, sys, time
+
+
+class Node(object):
+    pass
+
+
+class ElementAdd(Node):
+    def __init__(self):
+        self.next = []
+
+    def do(self, in1, in2):
+        with atomic:
+            out = []
+            for i, j in zip(in1, in2):
+                out.append(i + j)
+        return [Future(self.next[0].do, out)]
+
+class Dup(Node):
+    def __init__(self):
+        self.next = []
+
+    def do(self, in1):
+        with atomic:
+            dup = in1[:]
+        if len(self.next) == 1:
+            return [Future(self.next[0].do, in1, dup)]
+        return [Future(self.next[0].do, in1),
+                Future(self.next[1].do, dup)]
+
+class Result(Node):
+    def do(self, in1):
+        self.result = in1
+        return self
+
+def _wait_for_result(futures):
+    if isinstance(futures, Result):
+        return futures
+
+    todo = []
+    for f in futures:
+        todo.append(f())
+    for t in todo:
+        res = _wait_for_result(t)
+        if isinstance(res, Result):
+            return res
+
+def eval(first_node, *args):
+    return _wait_for_result(first_node.do(*args))
+
+
+
+def main():
+    xs = [1]*10000
+    first = Dup()
+    add = ElementAdd()
+    add.next.append(Result())
+    first.next.append(add)
+    print (eval(first, xs).result)
+
+if __name__ == '__main__':
+    main()

experimental_benchmarks/env.sh

+export PATH=$PWD/pypy-env/bin:$PATH
+export PATH=$HOME/pypy_dir/pypy/tool:$PATH
+
+export PYTHONPATH=$PWD/../common/:$PYTHONPATH

experimental_benchmarks/flask-bench/app.py

+from flask import Flask
+app = Flask(__name__)
+
+import thread
+from __pypy__.thread import atomic
+
+@app.route("/")
+def hello():
+    #thread.should_commit()
+    with atomic:
+        i = 1000000
+        while i:
+            i -= 1
+    return 'Hello World!'
+
+
+if __name__ == "__main__":
+    app.run(threaded=True, host='0.0.0.0')

experimental_benchmarks/flask-bench/bench.sh

+#!/bin/sh
+
+echo "WITHOUT PENALTY"
+PYPYLOG=stm-log:stm_log1.log pypy-without-penalty app.py 2> /dev/null &
+sleep 2
+openload -l 8 127.0.0.1:5000 4
+kill -9 %1
+
+echo "WITH PENALTY"
+PYPYLOG=stm-log:stm_log2.log pypy-c app.py 2> /dev/null &
+sleep 2
+openload -l 8 127.0.0.1:5000 4
+kill -9 %1

experimental_benchmarks/flask-bench/werkzeugapp.py

+from werkzeug.wrappers import Request, Response
+import thread
+from __pypy__.thread import atomic
+
+@Request.application
+def application(request):
+    #thread.should_commit()
+    with atomic:
+        i = 1000000
+        while i:
+            i -= 1
+    return Response('Hello World!')
+
+if __name__ == '__main__':
+    from werkzeug.serving import run_simple
+    run_simple('0.0.0.0', 5000, application,
+               threaded=True, processes=1)

experimental_benchmarks/mandelbrot/mandelbrot.py

+
+
+
+
+from abstract_threading import Future, atomic
+import Image, sys
+
+
+
+
+def calculate(a, b, im_size, max_iter=255):
+    print "a:%s, b:%s, im_size:%s" % (a, b, im_size)
+    ar, ai = a
+    br, bi = b
+    width, height = im_size
+    imag_step = (bi - ai) / (height - 1)
+    real_step = (br - ar) / (width - 1)
+    print "real/width:%s, imag/height:%s" % (real_step, imag_step)
+
+    with atomic:
+        result = [[0] * width for y in xrange(height)]
+    for y in xrange(height):
+        zi = ai + y * imag_step
+        for x in xrange(width):
+            zr = ar + x * real_step
+            z = complex(zr, zi)
+            c = z
+            for i in xrange(max_iter):
+                if abs(z) > 2.0:
+                    break
+                z = z * z + c
+            result[y][x] = i
+
+    return result
+
+def save_img(image, file_name='out.png'):
+    im = Image.new("RGB", (len(image[0]), len(image)))
+    out = im.load()
+
+    for y in xrange(len(image)):
+        for x in xrange(len(image[0])):
+            c = image[y][x]
+            out[x,y] = c, c, c
+    im.save(file_name, 'PNG')
+
+def save_to_file(image, file_name='out.txt'):
+    with atomic:
+        s = "\n".join(map(str, image))
+    with open(file_name, 'w') as f:
+        f.write(s)
+
+
+def merge_imgs(imgs):
+    res = []
+    for img in imgs:
+        for y in img:
+            res.append(y)
+    return res
+
+
+def run(threads=1):
+    ar, ai = -2.0, -1.5
+    br, bi = 1.0, 1.5
+    width, height = 512, 512
+
+    step = (bi - ai) / threads
+    res = []
+    ai = -1.5
+    bi = ai + step
+    for i in xrange(threads):
+        res.append(Future(calculate,
+                          a=(ar, ai + i * step),
+                          b=(br, bi + i * step),
+                          im_size=(width, int(height / threads))
+            ))
+
+    res = [f() for f in res]
+    return merge_imgs(res)
+
+
+
+if __name__ == '__main__':
+    image = run(int(sys.argv[1]))
+    save_to_file(image)
+    # save_img(image) don't run on STM, allocates 4000GB of memory
Add a comment to this file

experimental_benchmarks/markov-bench/__init__.py

Empty file added.

experimental_benchmarks/markov-bench/bench.py

+#!/usr/bin/python
+
+from abstract_threading import Future
+import thread
+import collections
+import string, sys, time
+from stm_dict import ProxyDict, SplittingDict, BoxingDict
+dict = BoxingDict
+
+noise = string.punctuation + string.whitespace
+def process_text(text):
+    words = text.lower().split()
+    #print len(words)
+    #time.sleep(0.1)
+    #thread.should_commit()
+
+    for w in words:
+        res = w.strip(noise)
+        if len(w) > 3 and res.isalpha():
+            yield res
+
+## def update_props(props, words):
+##     p = props
+##     entry = None
+##     for word in words:
+##         if p is None:
+##             p = dict()
+##             entry[1] = p
+
+##         entry = p.setdefault(word, [0, None])
+##         entry[0] += 1
+##         p = entry[1]
+
+
+def update_props(props, words):
+    p = props
+    length = len(words)
+    for i, word in enumerate(words):
+        if i < length - 1:
+            p = p.setdefault(word, dict())
+        else:
+            p[word] = None
+            break
+
+def process_file(filename, props, depth=2):
+    print "Start", filename
+
+    with open(filename) as f:
+        text = f.read()
+
+    words = process_text(text)
+    buf = collections.deque(maxlen=depth)
+    # crashes if < depth-1 words
+    for _ in range(depth - 1):
+        buf.append(words.next())
+
+    for word in words:
+        buf.append(word)
+        with thread.atomic:
+            update_props(props, buf)
+
+    print "Finished", filename
+
+
+def main():
+    import os
+
+    N = 2
+    files = os.listdir('data')[:N]
+
+    props = dict()
+    res = []
+    for f in files:
+        res.append(Future(process_file,
+                          os.path.join('data', f), props))
+        if args.serial:
+            res[0]()
+            del res[0]
+
+    [f() for f in res]
+
+    #import pprint
+    #pprint.pprint(props)
+
+
+
+
+if __name__ == '__main__':
+    import argparse
+
+    parser = argparse.ArgumentParser()
+    flag='store_true'
+    parser.add_argument('--serial', action=flag, help='do serial execution')
+
+    args = parser.parse_args()
+    main()

experimental_benchmarks/markov-bench/data/0ws2610.txt

+***The Project Gutenberg's Etext of Shakespeare's First Folio***
+*********************The Tragedie of Hamlet*********************
+
+This is our 3rd edition of most of these plays.  See the index.
+
+
+Copyright laws are changing all over the world, be sure to check
+the copyright laws for your country before posting these files!!
+
+Please take a look at the important information in this header.
+We encourage you to keep this file on your own disk, keeping an
+electronic path open for the next readers.  Do not remove this.
+
+
+**Welcome To The World of Free Plain Vanilla Electronic Texts**
+
+**Etexts Readable By Both Humans and By Computers, Since 1971**
+
+*These Etexts Prepared By Hundreds of Volunteers and Donations*
+
+Information on contacting Project Gutenberg to get Etexts, and
+further information is included below.  We need your donations.
+
+
+The Tragedie of Hamlet
+
+by William Shakespeare
+
+July, 2000  [Etext #2265]
+
+
+***The Project Gutenberg's Etext of Shakespeare's First Folio***
+*********************The Tragedie of Hamlet*********************
+
+*****This file should be named 0ws2610.txt or 0ws2610.zip******
+
+Corrected EDITIONS of our etexts get a new NUMBER, 0ws2611.txt
+VERSIONS based on separate sources get new LETTER, 0ws2610a.txt
+
+
+Project Gutenberg Etexts are usually created from multiple editions,
+all of which are in the Public Domain in the United States, unless a
+copyright notice is included.  Therefore, we usually do NOT keep any
+of these books in compliance with any particular paper edition.
+
+
+We are now trying to release all our books one month in advance
+of the official release dates, leaving time for better editing.
+
+Please note:  neither this list nor its contents are final till
+midnight of the last day of the month of any such announcement.
+The official release date of all Project Gutenberg Etexts is at
+Midnight, Central Time, of the last day of the stated month.  A
+preliminary version may often be posted for suggestion, comment
+and editing by those who wish to do so.  To be sure you have an
+up to date first edition [xxxxx10x.xxx] please check file sizes
+in the first week of the next month.  Since our ftp program has
+a bug in it that scrambles the date [tried to fix and failed] a
+look at the file size will have to do, but we will try to see a
+new copy has at least one byte more or less.
+
+
+Information about Project Gutenberg (one page)
+
+We produce about two million dollars for each hour we work.  The
+time it takes us, a rather conservative estimate, is fifty hours
+to get any etext selected, entered, proofread, edited, copyright
+searched and analyzed, the copyright letters written, etc.  This
+projected audience is one hundred million readers.  If our value
+per text is nominally estimated at one dollar then we produce $2
+million dollars per hour this year as we release thirty-six text
+files per month, or 432 more Etexts in 1999 for a total of 2000+
+If these reach just 10% of the computerized population, then the
+total should reach over 200 billion Etexts given away this year.
+
+The Goal of Project Gutenberg is to Give Away One Trillion Etext
+Files by December 31, 2001.  [10,000 x 100,000,000 = 1 Trillion]
+This is ten thousand titles each to one hundred million readers,
+which is only ~5% of the present number of computer users.
+
+At our revised rates of production, we will reach only one-third
+of that goal by the end of 2001, or about 3,333 Etexts unless we
+manage to get some real funding; currently our funding is mostly
+from Michael Hart's salary at Carnegie-Mellon University, and an
+assortment of sporadic gifts; this salary is only good for a few
+more years, so we are looking for something to replace it, as we
+don't want Project Gutenberg to be so dependent on one person.
+
+We need your donations more than ever!
+
+
+All donations should be made to "Project Gutenberg/CMU": and are
+tax deductible to the extent allowable by law.  (CMU = Carnegie-
+Mellon University).
+
+For these and other matters, please mail to:
+
+Project Gutenberg
+P. O. Box  2782
+Champaign, IL 61825
+
+When all other email fails. . .try our Executive Director:
+Michael S. Hart <hart@pobox.com>
+hart@pobox.com forwards to hart@prairienet.org and archive.org
+if your mail bounces from archive.org, I will still see it, if
+it bounces from prairienet.org, better resend later on. . . .
+
+We would prefer to send you this information by email.
+
+******
+
+To access Project Gutenberg etexts, use any Web browser
+to view http://promo.net/pg.  This site lists Etexts by
+author and by title, and includes information about how
+to get involved with Project Gutenberg.  You could also
+download our past Newsletters, or subscribe here.  This
+is one of our major sites, please email hart@pobox.com,
+for a more complete list of our various sites.
+
+To go directly to the etext collections, use FTP or any
+Web browser to visit a Project Gutenberg mirror (mirror
+sites are available on 7 continents; mirrors are listed
+at http://promo.net/pg).
+
+Mac users, do NOT point and click, typing works better.
+
+Example FTP session:
+
+ftp sunsite.unc.edu
+login: anonymous
+password: your@login
+cd pub/docs/books/gutenberg
+cd etext90 through etext99
+dir [to see files]
+get or mget [to get files. . .set bin for zip files]
+GET GUTINDEX.??  [to get a year's listing of books, e.g., GUTINDEX.99]
+GET GUTINDEX.ALL [to get a listing of ALL books]
+
+***
+
+**Information prepared by the Project Gutenberg legal advisor**
+
+(Three Pages)
+
+
+***START**THE SMALL PRINT!**FOR PUBLIC DOMAIN ETEXTS**START***
+Why is this "Small Print!" statement here?  You know: lawyers.
+They tell us you might sue us if there is something wrong with
+your copy of this etext, even if you got it for free from
+someone other than us, and even if what's wrong is not our
+fault.  So, among other things, this "Small Print!" statement
+disclaims most of our liability to you.  It also tells you how
+you can distribute copies of this etext if you want to.
+
+*BEFORE!* YOU USE OR READ THIS ETEXT
+By using or reading any part of this PROJECT GUTENBERG-tm
+etext, you indicate that you understand, agree to and accept
+this "Small Print!" statement.  If you do not, you can receive
+a refund of the money (if any) you paid for this etext by
+sending a request within 30 days of receiving it to the person
+you got it from.  If you received this etext on a physical
+medium (such as a disk), you must return it with your request.
+
+ABOUT PROJECT GUTENBERG-TM ETEXTS
+This PROJECT GUTENBERG-tm etext, like most PROJECT GUTENBERG-
+tm etexts, is a "public domain" work distributed by Professor
+Michael S. Hart through the Project Gutenberg Association at
+Carnegie-Mellon University (the "Project").  Among other
+things, this means that no one owns a United States copyright
+on or for this work, so the Project (and you!) can copy and
+distribute it in the United States without permission and
+without paying copyright royalties.  Special rules, set forth
+below, apply if you wish to copy and distribute this etext
+under the Project's "PROJECT GUTENBERG" trademark.
+
+To create these etexts, the Project expends considerable
+efforts to identify, transcribe and proofread public domain
+works.  Despite these efforts, the Project's etexts and any
+medium they may be on may contain "Defects".  Among other
+things, Defects may take the form of incomplete, inaccurate or
+corrupt data, transcription errors, a copyright or other
+intellectual property infringement, a defective or damaged
+disk or other etext medium, a computer virus, or computer
+codes that damage or cannot be read by your equipment.
+
+LIMITED WARRANTY; DISCLAIMER OF DAMAGES
+But for the "Right of Replacement or Refund" described below,
+[1] the Project (and any other party you may receive this
+etext from as a PROJECT GUTENBERG-tm etext) disclaims all
+liability to you for damages, costs and expenses, including
+legal fees, and [2] YOU HAVE NO REMEDIES FOR NEGLIGENCE OR
+UNDER STRICT LIABILITY, OR FOR BREACH OF WARRANTY OR CONTRACT,
+INCLUDING BUT NOT LIMITED TO INDIRECT, CONSEQUENTIAL, PUNITIVE
+OR INCIDENTAL DAMAGES, EVEN IF YOU GIVE NOTICE OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+If you discover a Defect in this etext within 90 days of
+receiving it, you can receive a refund of the money (if any)
+you paid for it by sending an explanatory note within that
+time to the person you received it from.  If you received it
+on a physical medium, you must return it with your note, and
+such person may choose to alternatively give you a replacement
+copy.  If you received it electronically, such person may
+choose to alternatively give you a second opportunity to
+receive it electronically.
+
+THIS ETEXT IS OTHERWISE PROVIDED TO YOU "AS-IS".  NO OTHER
+WARRANTIES OF ANY KIND, EXPRESS OR IMPLIED, ARE MADE TO YOU AS
+TO THE ETEXT OR ANY MEDIUM IT MAY BE ON, INCLUDING BUT NOT
+LIMITED TO WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A
+PARTICULAR PURPOSE.
+
+Some states do not allow disclaimers of implied warranties or
+the exclusion or limitation of consequential damages, so the
+above disclaimers and exclusions may not apply to you, and you
+may have other legal rights.
+
+INDEMNITY
+You will indemnify and hold the Project, its directors,
+officers, members and agents harmless from all liability, cost
+and expense, including legal fees, that arise directly or
+indirectly from any of the following that you do or cause:
+[1] distribution of this etext, [2] alteration, modification,
+or addition to the etext, or [3] any Defect.
+
+DISTRIBUTION UNDER "PROJECT GUTENBERG-tm"
+You may distribute copies of this etext electronically, or by
+disk, book or any other medium if you either delete this
+"Small Print!" and all other references to Project Gutenberg,
+or:
+
+[1]  Only give exact copies of it.  Among other things, this
+     requires that you do not remove, alter or modify the
+     etext or this "small print!" statement.  You may however,
+     if you wish, distribute this etext in machine readable
+     binary, compressed, mark-up, or proprietary form,
+     including any form resulting from conversion by word pro-
+     cessing or hypertext software, but only so long as
+     *EITHER*:
+
+     [*]  The etext, when displayed, is clearly readable, and
+          does *not* contain characters other than those
+          intended by the author of the work, although tilde
+          (~), asterisk (*) and underline (_) characters may
+          be used to convey punctuation intended by the
+          author, and additional characters may be used to
+          indicate hypertext links; OR
+
+     [*]  The etext may be readily converted by the reader at
+          no expense into plain ASCII, EBCDIC or equivalent
+          form by the program that displays the etext (as is
+          the case, for instance, with most word processors);
+          OR
+
+     [*]  You provide, or agree to also provide on request at
+          no additional cost, fee or expense, a copy of the
+          etext in its original plain ASCII form (or in EBCDIC
+          or other equivalent proprietary form).
+
+[2]  Honor the etext refund and replacement provisions of this
+     "Small Print!" statement.
+
+[3]  Pay a trademark license fee to the Project of 20% of the
+     net profits you derive calculated using the method you
+     already use to calculate your applicable taxes.  If you
+     don't derive profits, no royalty is due.  Royalties are
+     payable to "Project Gutenberg Association/Carnegie-Mellon
+     University" within the 60 days following each
+     date you prepare (or were legally required to prepare)
+     your annual (or equivalent periodic) tax return.
+
+WHAT IF YOU *WANT* TO SEND MONEY EVEN IF YOU DON'T HAVE TO?
+The Project gratefully accepts contributions in money, time,
+scanning machines, OCR software, public domain etexts, royalty
+free copyright licenses, and every other sort of contribution
+you can think of.  Money should be paid to "Project Gutenberg
+Association / Carnegie-Mellon University".
+
+*END*THE SMALL PRINT! FOR PUBLIC DOMAIN ETEXTS*Ver.04.29.93*END*
+
+
+
+
+
+Project Gutenberg's Etext of Shakespeare's The Tragedie of Hamlet
+
+
+
+
+Executive Director's Notes:
+
+In addition to the notes below, and so you will *NOT* think all
+the spelling errors introduced by the printers of the time have
+been corrected, here are the first few lines of Hamlet, as they
+are presented herein:
+
+  Barnardo. Who's there?
+  Fran. Nay answer me: Stand & vnfold
+your selfe
+
+   Bar. Long liue the King
+
+***
+
+As I understand it, the printers often ran out of certain words
+or letters they had often packed into a "cliche". . .this is the
+original meaning of the term cliche. . .and thus, being unwilling
+to unpack the cliches, and thus you will see some substitutions
+that look very odd. . .such as the exchanges of u for v, v for u,
+above. . .and you may wonder why they did it this way, presuming
+Shakespeare did not actually write the play in this manner. . . .
+
+The answer is that they MAY have packed "liue" into a cliche at a
+time when they were out of "v"'s. . .possibly having used "vv" in
+place of some "w"'s, etc.  This was a common practice of the day,
+as print was still quite expensive, and they didn't want to spend
+more on a wider selection of characters than they had to.
+
+You will find a lot of these kinds of "errors" in this text, as I
+have mentioned in other times and places, many "scholars" have an
+extreme attachment to these errors, and many have accorded them a
+very high place in the "canon" of Shakespeare.  My father read an
+assortment of these made available to him by Cambridge University
+in England for several months in a glass room constructed for the
+purpose.  To the best of my knowledge he read ALL those available
+. . .in great detail. . .and determined from the various changes,