Commits

Michael Forbes committed 69c5127

Added working analysis, filter, and sound generation tools.
- Sound generation not tested with the non-blocking interface
(which should be much better) b/c that requires os X >= 10.7.
The blocking interface has artifacts.

  • Participants
  • Parent commits 8788c76

Comments (0)

Files changed (10)

+syntax: glob
+
+*.pyc

File .makefiles/Makefile

+TOP_DIR ?= $(realpath .)
+BUILD   ?= $(TOP_DIR)/build
+
+help:
+	@echo "Please use \`make <target>\` where <target> is one of:"
+	@echo " help     to show this help message  "
+	@echo " clean    to remove all generated output "
+	@echo " rst-html to make all README.html files" 
+	@echo " watch    to continuously make rst-html as README.rst changes"
+
+include .makefiles/rst2html.mk
+
+#doc:
+#	$(MAKE) -C doc html
+
+clean: clean.rst
+	rm -rf build
+	find . -name *.pyc -exec rm {} \;
+	find . -name *.pyo -exec rm {} \;
+	rm superfluid/version.py
+#	$(MAKE) -C doc clean
+#	find . -name ".README.html" -exec rm {} \;
+
+.PHONY: help clean

File .makefiles/rst2html.mk

+# Documentation.  Automatic compilation of the README.rst files to aid editing.
+# The watch target will continually run make, generating files as they change,
+# optionally opening them in a browser.
+
+RST2HTML ?= rst2html.py
+README   ?= README.rst
+
+TOP_DIR         ?= $(realpath .)
+RST_HTML_INPUT  ?= $(TOP_DIR)
+RST_HTML_OUTPUT ?= $(TOP_DIR)
+
+uname = $(shell uname)
+
+ifeq ($(uname), Darwin)
+  AUTO_OPEN       ?= open $@; sleep 1; open $<
+endif
+
+# Update the following to contain all files that should be built by watch.
+all_readmes := $(shell find "$(RST_HTML_INPUT)" -type f -name "$(README)")
+all_html = $(patsubst $(RST_HTML_INPUT)/%.rst, $(RST_HTML_OUTPUT)/%.html, $(all_readmes))
+
+$(RST_HTML_OUTPUT)/%.html: $(RST_HTML_INPUT)/%.rst
+	$(RST2HTML) $< > $@
+	$(AUTO_OPEN)
+
+watch:
+	while sleep 1; do make -q rst-html || make rst-html ; done
+
+rst-html: $(all_html)
+
+.PHONY: watch rst-html debug.rst clean.rst
+
+clean.rst:
+	rm -f $(all_html)
+
+debug.rst:
+	@echo "RST_HTML_INPUT = $(RST_HTML_INPUT)"
+	@echo "RST_HTML_OUTPUT = $(RST_HTML_OUTPUT)"
+	@echo "all_readmes = $(all_readmes)"
+	@echo "all_html = $(all_html)"
+# Top level makefile for convenience.  All the details are in the .makefiles
+# directory.  Run the following for a list of commands
+#
+# make help
+
+include .makefiles/Makefile
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="Docutils 0.9.1: http://docutils.sourceforge.net/" />
+<title>Notch Filtering for Tinnitus Treatment</title>
+<style type="text/css">
+
+/*
+:Author: David Goodger (goodger@python.org)
+:Id: $Id: html4css1.css 7434 2012-05-11 21:06:27Z milde $
+:Copyright: This stylesheet has been placed in the public domain.
+
+Default cascading style sheet for the HTML output of Docutils.
+
+See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to
+customize this style sheet.
+*/
+
+/* used to remove borders from tables and images */
+.borderless, table.borderless td, table.borderless th {
+  border: 0 }
+
+table.borderless td, table.borderless th {
+  /* Override padding for "table.docutils td" with "! important".
+     The right padding separates the table cells. */
+  padding: 0 0.5em 0 0 ! important }
+
+.first {
+  /* Override more specific margin styles with "! important". */
+  margin-top: 0 ! important }
+
+.last, .with-subtitle {
+  margin-bottom: 0 ! important }
+
+.hidden {
+  display: none }
+
+a.toc-backref {
+  text-decoration: none ;
+  color: black }
+
+blockquote.epigraph {
+  margin: 2em 5em ; }
+
+dl.docutils dd {
+  margin-bottom: 0.5em }
+
+object[type="image/svg+xml"], object[type="application/x-shockwave-flash"] {
+  overflow: hidden;
+}
+
+/* Uncomment (and remove this text!) to get bold-faced definition list terms
+dl.docutils dt {
+  font-weight: bold }
+*/
+
+div.abstract {
+  margin: 2em 5em }
+
+div.abstract p.topic-title {
+  font-weight: bold ;
+  text-align: center }
+
+div.admonition, div.attention, div.caution, div.danger, div.error,
+div.hint, div.important, div.note, div.tip, div.warning {
+  margin: 2em ;
+  border: medium outset ;
+  padding: 1em }
+
+div.admonition p.admonition-title, div.hint p.admonition-title,
+div.important p.admonition-title, div.note p.admonition-title,
+div.tip p.admonition-title {
+  font-weight: bold ;
+  font-family: sans-serif }
+
+div.attention p.admonition-title, div.caution p.admonition-title,
+div.danger p.admonition-title, div.error p.admonition-title,
+div.warning p.admonition-title {
+  color: red ;
+  font-weight: bold ;
+  font-family: sans-serif }
+
+/* Uncomment (and remove this text!) to get reduced vertical space in
+   compound paragraphs.
+div.compound .compound-first, div.compound .compound-middle {
+  margin-bottom: 0.5em }
+
+div.compound .compound-last, div.compound .compound-middle {
+  margin-top: 0.5em }
+*/
+
+div.dedication {
+  margin: 2em 5em ;
+  text-align: center ;
+  font-style: italic }
+
+div.dedication p.topic-title {
+  font-weight: bold ;
+  font-style: normal }
+
+div.figure {
+  margin-left: 2em ;
+  margin-right: 2em }
+
+div.footer, div.header {
+  clear: both;
+  font-size: smaller }
+
+div.line-block {
+  display: block ;
+  margin-top: 1em ;
+  margin-bottom: 1em }
+
+div.line-block div.line-block {
+  margin-top: 0 ;
+  margin-bottom: 0 ;
+  margin-left: 1.5em }
+
+div.sidebar {
+  margin: 0 0 0.5em 1em ;
+  border: medium outset ;
+  padding: 1em ;
+  background-color: #ffffee ;
+  width: 40% ;
+  float: right ;
+  clear: right }
+
+div.sidebar p.rubric {
+  font-family: sans-serif ;
+  font-size: medium }
+
+div.system-messages {
+  margin: 5em }
+
+div.system-messages h1 {
+  color: red }
+
+div.system-message {
+  border: medium outset ;
+  padding: 1em }
+
+div.system-message p.system-message-title {
+  color: red ;
+  font-weight: bold }
+
+div.topic {
+  margin: 2em }
+
+h1.section-subtitle, h2.section-subtitle, h3.section-subtitle,
+h4.section-subtitle, h5.section-subtitle, h6.section-subtitle {
+  margin-top: 0.4em }
+
+h1.title {
+  text-align: center }
+
+h2.subtitle {
+  text-align: center }
+
+hr.docutils {
+  width: 75% }
+
+img.align-left, .figure.align-left, object.align-left {
+  clear: left ;
+  float: left ;
+  margin-right: 1em }
+
+img.align-right, .figure.align-right, object.align-right {
+  clear: right ;
+  float: right ;
+  margin-left: 1em }
+
+img.align-center, .figure.align-center, object.align-center {
+  display: block;
+  margin-left: auto;
+  margin-right: auto;
+}
+
+.align-left {
+  text-align: left }
+
+.align-center {
+  clear: both ;
+  text-align: center }
+
+.align-right {
+  text-align: right }
+
+/* reset inner alignment in figures */
+div.align-right {
+  text-align: inherit }
+
+/* div.align-center * { */
+/*   text-align: left } */
+
+ol.simple, ul.simple {
+  margin-bottom: 1em }
+
+ol.arabic {
+  list-style: decimal }
+
+ol.loweralpha {
+  list-style: lower-alpha }
+
+ol.upperalpha {
+  list-style: upper-alpha }
+
+ol.lowerroman {
+  list-style: lower-roman }
+
+ol.upperroman {
+  list-style: upper-roman }
+
+p.attribution {
+  text-align: right ;
+  margin-left: 50% }
+
+p.caption {
+  font-style: italic }
+
+p.credits {
+  font-style: italic ;
+  font-size: smaller }
+
+p.label {
+  white-space: nowrap }
+
+p.rubric {
+  font-weight: bold ;
+  font-size: larger ;
+  color: maroon ;
+  text-align: center }
+
+p.sidebar-title {
+  font-family: sans-serif ;
+  font-weight: bold ;
+  font-size: larger }
+
+p.sidebar-subtitle {
+  font-family: sans-serif ;
+  font-weight: bold }
+
+p.topic-title {
+  font-weight: bold }
+
+pre.address {
+  margin-bottom: 0 ;
+  margin-top: 0 ;
+  font: inherit }
+
+pre.literal-block, pre.doctest-block, pre.math, pre.code {
+  margin-left: 2em ;
+  margin-right: 2em }
+
+pre.code .ln { /* line numbers */
+  color: grey;
+}
+
+.code {
+  background-color: #eeeeee
+}
+
+span.classifier {
+  font-family: sans-serif ;
+  font-style: oblique }
+
+span.classifier-delimiter {
+  font-family: sans-serif ;
+  font-weight: bold }
+
+span.interpreted {
+  font-family: sans-serif }
+
+span.option {
+  white-space: nowrap }
+
+span.pre {
+  white-space: pre }
+
+span.problematic {
+  color: red }
+
+span.section-subtitle {
+  /* font-size relative to parent (h1..h6 element) */
+  font-size: 80% }
+
+table.citation {
+  border-left: solid 1px gray;
+  margin-left: 1px }
+
+table.docinfo {
+  margin: 2em 4em }
+
+table.docutils {
+  margin-top: 0.5em ;
+  margin-bottom: 0.5em }
+
+table.footnote {
+  border-left: solid 1px black;
+  margin-left: 1px }
+
+table.docutils td, table.docutils th,
+table.docinfo td, table.docinfo th {
+  padding-left: 0.5em ;
+  padding-right: 0.5em ;
+  vertical-align: top }
+
+table.docutils th.field-name, table.docinfo th.docinfo-name {
+  font-weight: bold ;
+  text-align: left ;
+  white-space: nowrap ;
+  padding-left: 0 }
+
+h1 tt.docutils, h2 tt.docutils, h3 tt.docutils,
+h4 tt.docutils, h5 tt.docutils, h6 tt.docutils {
+  font-size: 100% }
+
+ul.auto-toc {
+  list-style-type: none }
+
+</style>
+</head>
+<body>
+<div class="document" id="notch-filtering-for-tinnitus-treatment">
+<h1 class="title">Notch Filtering for Tinnitus Treatment</h1>
+
+<!-- -*- rst -*- -*- restructuredtext -*- -->
+<!-- This file should be written using the restructure text -->
+<!-- conventions.  It will be displayed on the bitbucket source page and -->
+<!-- serves as the documentation of the directory. -->
+<p>There is some <a class="reference external" href="http://www.plosone.org/article/info:doi/10.1371/journal.pone.0024685">clinical evidence</a> that notch filtering music can be useful for
+treating tinnitus.</p>
+<div class="section" id="tasks">
+<h1>Tasks</h1>
+<ol class="arabic">
+<li><p class="first">Determine the patient's frequency response.</p>
+<p>Provide a GUI for determining the patients frequency response.  The GUI
+should allow the user to adjust the amplitude of a sinusoidal signal of
+various frequencies until they can just barely hear it.  There should be a
+simple mode, where the system picks the frequencies, and the user simply has
+a single slider that they adjust, and a more advanced mode where the current
+response is displayed and can be adjusted.</p>
+<p>The end result of this is a list of frequencies and amplitudes defining the
+user's hearing threshold.  Note that the absolute value of the amplitudes
+will depend on the volume adjustment of the computer.  Repeating the test at
+a different volume setting should reproduce different results that can be
+brought into alignment by scaling. (This should be checked.)  It should also
+be independent of the headphone/speaker setup.</p>
+<div class="warning">
+<p class="first admonition-title">Warning</p>
+<p class="last">Poor quality sound systems and speakers/headphones can really
+muck this up. One should at least do an aliasing check such as suggested
+<a class="reference external" href="http://www.audiocheck.net/audiotests_aliasing.php">here</a>.  Playing the appropriate sound files should produce a continuously
+descending tone - any perception of ascending tones is due to aliasing
+artifacts from a cheap sound card.</p>
+</div>
+</li>
+</ol>
+<blockquote>
+<p>Finally, one should probably reproduce the test with various levels of
+background noise as the results are likely to depend on the presence of
+noise, and the resulting filter might want to be designed to take this into
+account.</p>
+<div class="note">
+<p class="first admonition-title">Note</p>
+<p class="last">Note that once this is established, the same setup can be used to
+test the quality of headphones.  Or a mic can be used, but the frequency
+response of the mic must also be established and removed.</p>
+</div>
+</blockquote>
+<ol class="arabic" start="2">
+<li><p class="first">Determine the patient's tinitus frequency, range, and intensity.</p>
+<p>This is more difficult to do accurately.  The patient will need to be able to
+adjust the frequency of the pitch and the amplitude.  To simplify the
+procedure, you might like to do this as a bisection search. &quot;Which sounds
+more like your tinnitus, A, B, or can't tell?&quot; where signals A and B have
+different pitch or different intensity.  This should be repeated several
+times, again with various noise backgrounds.</p>
+<p>The intensity should be determined relative to the patient's frequency
+response and must thus be done under the same conditions (volume setting,
+headphones etc.) as 1).</p>
+</li>
+<li><p class="first">Design the filter.</p>
+<p>The filter design should be fairly simple at this point.  Include a smooth
+overall gain to compensate for hearing loss, and provide a notch filter to
+compensate for the tinnitus.</p>
+<div class="warning">
+<p class="first admonition-title">Warning</p>
+<p class="last">Include a limit to the amount you amplify the signal by in the
+high-frequency region.  The patient's hearing loss may prevent them from
+hearing a large amplification at high frequencies, but it might still be
+damaging to their hearing.  (I have no evidence either way: it is just
+probably best to err on the side of caution here.)</p>
+</div>
+</li>
+<li><p class="first">Test the filter.</p>
+<p>Generate noise with a power level greater than or equal to the tinnitus
+strength in the tinnitus region.  Applying the filter to this noise should
+completely mask the tinnitus.  Lower amplitude noise should also mask the
+tinnitus, but maybe not completely.  (Though it might mask it completely.)</p>
+<p>Feedback could be obtained from the user at this point, adjusting the
+tinnitus location, width, and the noise background to refine the filter.  My
+guess is it will be easier for the user to answer &quot;With which sound do you
+hear less tinnitus: A or B?&quot; than the previous ones if the tinnitus almost
+vanishes in one of them.</p>
+</li>
+</ol>
+</div>
+<div class="section" id="fourier-analysis">
+<h1>Fourier Analysis</h1>
+<p>Our first task is to see the nature of a signal, so we can understand if the
+notch filter is working.  Note that simply applying an FFT to the full signal
+will produce a very noisy frequency spectrum.  The noise can be reduced by
+either smoothing, or applying a window</p>
+<!-- .................... .. -->
+<!-- References and links .. -->
+<!-- .................... .. -->
+</div>
+</div>
+</body>
+</html>
+.. -*- rst -*- -*- restructuredtext -*-
+
+.. This file should be written using the restructure text
+.. conventions.  It will be displayed on the bitbucket source page and
+.. serves as the documentation of the directory.
+
+========================================
+ Notch Filtering for Tinnitus Treatment
+========================================
+
+There is some `clinical evidence`__ that notch filtering music can be useful for
+treating tinnitus.
+
+__ http://www.plosone.org/article/info:doi/10.1371/journal.pone.0024685
+
+Tasks
+=====
+
+1) Determine the patient's frequency response.
+
+   Provide a GUI for determining the patients frequency response.  The GUI
+   should allow the user to adjust the amplitude of a sinusoidal signal of
+   various frequencies until they can just barely hear it.  There should be a
+   simple mode, where the system picks the frequencies, and the user simply has
+   a single slider that they adjust, and a more advanced mode where the current
+   response is displayed and can be adjusted.
+
+   The end result of this is a list of frequencies and amplitudes defining the
+   user's hearing threshold.  Note that the absolute value of the amplitudes
+   will depend on the volume adjustment of the computer.  Repeating the test at
+   a different volume setting should reproduce different results that can be
+   brought into alignment by scaling. (This should be checked.)  It should also
+   be independent of the headphone/speaker setup.
+
+   .. warning:: Poor quality sound systems and speakers/headphones can really
+      muck this up. One should at least do an aliasing check such as suggested
+      here__.  Playing the appropriate sound files should produce a continuously
+      descending tone - any perception of ascending tones is due to aliasing
+      artifacts from a cheap sound card.
+
+__ http://www.audiocheck.net/audiotests_aliasing.php
+
+   Finally, one should probably reproduce the test with various levels of
+   background noise as the results are likely to depend on the presence of
+   noise, and the resulting filter might want to be designed to take this into
+   account.
+
+   .. note:: Note that once this is established, the same setup can be used to
+      test the quality of headphones.  Or a mic can be used, but the frequency
+      response of the mic must also be established and removed.
+
+2) Determine the patient's tinitus frequency, range, and intensity.
+
+   This is more difficult to do accurately.  The patient will need to be able to
+   adjust the frequency of the pitch and the amplitude.  To simplify the
+   procedure, you might like to do this as a bisection search. "Which sounds
+   more like your tinnitus, A, B, or can't tell?" where signals A and B have
+   different pitch or different intensity.  This should be repeated several
+   times, again with various noise backgrounds.
+
+   The intensity should be determined relative to the patient's frequency
+   response and must thus be done under the same conditions (volume setting,
+   headphones etc.) as 1).
+
+3) Design the filter.
+
+   The filter design should be fairly simple at this point.  Include a smooth
+   overall gain to compensate for hearing loss, and provide a notch filter to
+   compensate for the tinnitus.
+
+   .. warning:: Include a limit to the amount you amplify the signal by in the
+      high-frequency region.  The patient's hearing loss may prevent them from
+      hearing a large amplification at high frequencies, but it might still be
+      damaging to their hearing.  (I have no evidence either way: it is just
+      probably best to err on the side of caution here.)
+
+4) Test the filter.
+
+   Generate noise with a power level greater than or equal to the tinnitus
+   strength in the tinnitus region.  Applying the filter to this noise should
+   completely mask the tinnitus.  Lower amplitude noise should also mask the
+   tinnitus, but maybe not completely.  (Though it might mask it completely.)
+
+   Feedback could be obtained from the user at this point, adjusting the
+   tinnitus location, width, and the noise background to refine the filter.  My
+   guess is it will be easier for the user to answer "With which sound do you
+   hear less tinnitus: A or B?" than the previous ones if the tinnitus almost
+   vanishes in one of them.
+   
+
+Fourier Analysis
+================
+Our first task is to see the nature of a signal, so we can understand if the
+notch filter is working.  Note that simply applying an FFT to the full signal
+will produce a very noisy frequency spectrum.  The noise can be reduced by
+either smoothing, or applying a window 
+
+.. .................... ..
+.. References and links ..
+.. .................... ..
+
+.. |virtualenv.py| replace:: ``virtualenv.py``
+.. _virtualenv.py: https://raw.github.com/pypa/virtualenv/master/virtualenv.py
+
+.. |EPD| replace:: Enthough Python Distribution
+.. _EPD: http://www.enthought.com/products/epd.php
+
+.. _mercurial: http://mercurial.selenic.com/
+.. _virtualenv: http://www.virtualenv.org/en/latest/
+.. _IPython: http://ipython.org/
+.. _Ipython notebook: \
+   http://ipython.org/ipython-doc/dev/interactive/htmlnotebook.html
+.. |pip| replace:: ``pip``
+.. _pip: http://www.pip-installer.org/
+.. _git: http://git-scm.com/
+.. _github: https://github.com
+.. _RunSnakeRun: http://www.vrplumber.com/programming/runsnakerun/
+.. _GSL: http://www.gnu.org/software/gsl/
+.. _pygsl: https://bitbucket.org/mforbes/pygsl
+.. _Sphinx: http://sphinx-doc.org/
+.. _SciPy: http://www.scipy.org/
+.. _NumPy: http://numpy.scipy.org/
+.. _Python: http://www.python.org/
+.. _Matlab: http://www.mathworks.com/products/matlab/
+r"""Analysis tools.
+"""
+from __future__ import division
+
+from numpy import fft
+import numpy as np
+
+import scipy.signal
+import scipy.interpolate
+sp = scipy
+
+from matplotlib import pyplot as plt
+
+_Hz = 2.0 * np.pi
+    
+
+class AnalyzeSignal(object):
+    def __init__(self, rate=44100, fignum=1):
+        self.rate = rate
+        self.fignum = fignum
+        self.max_freq = 14000 * _Hz
+        self.dt = 1. / self.rate
+
+    def analyze(self, y, window='hamming'):
+        skip = max(1, int(np.ceil(self.max_freq * self.dt)))
+        skip = 1
+        print skip
+        Ns = len(y) // skip
+        win = sp.signal.get_window(window=window, Nx=Ns)
+        yf = 0
+        for _i in xrange(skip):
+            yf = yf + fft.fft(y[_i::skip][:Ns] * win)
+        yf /= skip
+
+        freqs = fft.fftfreq(n=Ns, d=skip * self.dt)
+        power = abs(yf)**2
+        power /= power.max()
+
+        #spower = sp.interpolate.UnivariateSpline(x=freqs, y=power)(freqs)
+        t = self.dt * np.arange(len(y))
+        
+        plt.figure(self.fignum, figsize=(15, 5))
+        plt.clf()
+        plt.subplot(121)
+
+        plt.plot(t, y)
+        plt.xlabel('t (s)')
+        plt.ylabel('y')
+
+        plt.subplot(122)
+        inds = np.argsort(freqs)
+        plt.semilogy(freqs[inds], power[inds] + 1e-10, '-')
+        #plt.semilogy(freqs[inds], spower[inds] + 1e-10, '-')
+        #plt.ylim([0, 1e-1])
+        plt.xlabel('freq (Hz)')
+        plt.ylabel('power (rel)')
+        plt.draw()
+
+    def get_t(self, T):
+        N = int(T / self.dt)
+        t = self.dt * np.arange(N)
+        return t
+
+
+def noise_example(T=1):
+    a = AnalyzeSignal()
+    N = int(T / a.dt)
+    t = a.get_t(T)
+    y = np.sin(440 * _Hz * t)
+    y = np.sign(y)
+    #np.random.seed(1)
+    #y = (np.random.rand(N) - 0.5)*np.sin(440 * _Hz * t)
+    a.analyze(y)
+    return a
+r"""Filter Design."""
+from __future__ import division
+
+import math
+
+from numpy import fft
+import numpy as np
+
+import scipy.signal
+import scipy.interpolate
+sp = scipy
+
+from matplotlib import pyplot as plt
+
+_Hz = 2.0 * np.pi
+
+
+class Filter(object):
+    def __init__(self, rate=44100):
+        self.rate = rate
+        self.nyq = rate / 2.0
+
+    def filter(self, y):
+        y_filtered = sp.signal.lfilter(self.taps, 1.0, y)
+        return y_filtered
+
+    def respose(self):
+        r"""Compute the response of the filter."""
+        w, a = sp.signal.freqz(self.taps, 1.0)
+        return 2.0 * self.nyq * w / (2 * np.pi), a
+
+
+class Notch(Filter):
+    def __init__(self, freq=7688.0, width=math.sqrt(2), numtaps=100,
+                 max_freq=9000, **kw):
+        Filter.__init__(self, **kw)
+        f0 = freq / width
+        f1 = freq * width
+        self.freq = np.array([0, f0, f0, f1, f1, self.nyq, self.nyq])
+        #self.nyq = 1.0
+        #self.freq = np.array([0, 0.5, 0.5, 0.7, 0.7, 1.0]) * self.nyq
+        self.gain = [1.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0]
+        self.numtaps = numtaps
+        self.taps = sp.signal.firwin2(
+            numtaps=self.numtaps, freq=self.freq, gain=self.gain,
+            nyq=self.nyq)
+
+r"""Module to generate and play various sounds.
+
+We use callbacks to generate the sounds asynchronously so that the UI thread can
+remain responsive without affecting the sound.
+
+A problem with blocking calls is that the delay between the end of the blocking
+call and the start of the next call creates a perceptible frequency shift.  This
+would need to be compensated for.
+"""
+from __future__ import division
+
+import time
+import threading
+
+import numpy as np
+
+import pyaudio
+
+
+class SoundBase(object):
+    r"""Base class.
+
+    Opens the audio object and performs initializations.
+    """
+    channels = 1
+    format = pyaudio.paFloat32
+
+    def __init__(self, rate=44100, timeout=0.1,
+                 sample_length=0.1):
+        r"""
+        Parameters
+        ----------
+        rate : int
+           Sample rate (samples per second).
+        """
+        self.rate = rate
+        self.pyaudio = pyaudio.PyAudio()
+        self.threads = []
+        self.timeout = timeout
+
+    def __del__(self):
+        self.pyaudio.terminate()
+
+    def play(self, blocking=pyaudio.__version__ < "0.2.7"):
+        if blocking:
+            thread = threading.Thread(target=self._play_blocking)
+        else:
+            thread = threading.Thread(target=self._play_non_blocking)
+        thread.start()
+        
+    def _play_blocking(self):
+        stream = self.pyaudio.open(
+            rate=self.rate, channels=self.channels,
+            format=self.format, output=True)
+        self.playing = True
+        try:
+            while self.playing:
+                stream.write(self.get_sample())
+        finally:
+            stream.close()
+
+    def _play_non_blocking(self):
+        r"""Requires pyaudio 2.7.4 or higher."""
+        stream = self.pyaudio.open(
+            rate=self.rate, channels=self.channels,
+            format=self.format, output=True,
+            stream_callback=self.get_sample)
+        self.playing = True
+        try:
+            stream.start_stream()
+            while self.playing and stream.is_active():
+                time.sleep(self.timeout)
+        finally:
+            stream.stop_stream()
+            stream.close()
+        
+    def get_sample(self):
+        r"""Return the sample as a string."""
+        #if not hasattr(self, 'sample'):
+        #    self.compute_sample()
+        return self.sample
+
+    def compute_sample(self):
+        self.freq = 440.0
+        if False:
+            self.sample_length = 0.1
+            self.cycles = int(self.freq * self.sample_length)
+        else:
+            self.cycles = 1
+        self.sample_length = self.cycles / self.freq
+        self.samples = self.rate * self.sample_length
+        self.t = np.arange(self.samples) * self.sample_length / self.samples
+        self.y = (0.25 * np.sin(self.t * 2 * np.pi * self.freq)
+                  ).astype(np.float32)
+        self.sample = self.y.tostring()
+
+
+class PureBlock(SoundBase):
+    def __init__(self, *v, **kw):
+        SoundBase.__init__(self, *v, **kw)
+    
+    def get_sample(self, repeat=1):
+        self.sample
 """
 from __future__ import division
 
+from threading import Thread
+
 import numpy as np
 
 from matplotlib import pyplot as plt
     r"""Represents the noise and applied filters."""
     
     def __init__(self, *v, **kw):
-        self.t = np.linspace(0, 1, 100)
+        self.t = np.arange(self.N) * self.T / self.N
+        plt.figure(1, figsize=(12, 5))
         plt.clf()
+        plt.subplot(121)
         self.line = plt.plot(self.t, 0 * self.t)[0]
         plt.ylim([-1, 1])
         plt.xlabel('t')
         plt.ylabel('y')
+
+        plt.subplot(122)
+        dt = 1.0/self.N
+        self.w = np.fft.fftfreq(self.N, dt)
+        self.fft = plt.loglog(self.w, 1 + 0*self.w)[0]
+        plt.xlabel('w (Hz)')
+        plt.ylabel('FFT (Power)')
+
+        self.y = self.amplitude * np.random.rand(self.N)
         HasTraits.__init__(self, *v, **kw)
 
+    N = 44100                   # Sample rate
+    T = 1.0                     # Length of clip
+
     amplitude = Range(0.0, 1.0)
     phase = Range(-1.0, 1.0)
     freq = Range(1.0, 10.0)
+    play = Bool
 
     traits_view = View(Item(name='amplitude'),
                        Item(name='phase'),
-                       Item(name='freq'))
+                       Item(name='freq'),
+                       Item(name='play'))
 
     @on_trait_change('amplitude, phase, freq')
     def update_plot(self):
-        y = self.amplitude * np.sin(
-            (self.t * self.freq + self.phase) * 2 * np.pi)
-        self.line.set_data(self.t, y)
+        #self.y = self.amplitude * np.sin(
+        #    (self.t * self.freq + self.phase) * 2 * np.pi)
+        self.y = self.amplitude * np.random.rand(self.N)
+        self.line.set_data(self.t, self.y)
         plt.draw()
 
+    @on_trait_change('amplitude, phase, freq')
+    def update_fft(self):
+        yf = np.fft.fft(self.y)
+        P = abs(yf)**2
+        P /= P.max()
+        self.fft.set_data(self.w, P)
+        plt.draw()
+
+    def play_sound(self):
+        pass
 
 noise = Noise(
     amplitude=0.5,
     phase=0,
     freq=4.0)
 
-noise.configure_traits()
+#noise.configure_traits()