Commits

Matt Chaput  committed a6fbf6f

Converted tests to use py.test instead of nose.

  • Participants
  • Parent commits 76684ac

Comments (1)

Files changed (34)

File .idea/testrunner.xml

 <?xml version="1.0" encoding="UTF-8"?>
 <project version="4">
   <component name="TestRunnerService">
-    <option name="projectConfiguration" value="Nosetests" />
-    <option name="PROJECT_TEST_RUNNER" value="Nosetests" />
+    <option name="projectConfiguration" value="py.test" />
+    <option name="PROJECT_TEST_RUNNER" value="py.test" />
   </component>
 </project>
 
 
 import os.path, sys
 from setuptools import setup, find_packages
+from setuptools.command.test import test as TestCommand
+
+try:
+    import pytest
+except ImportError:
+    pytest = None
 
 sys.path.insert(0, os.path.abspath("src"))
 from whoosh import __version__, versionstring
 
+
+class PyTest(TestCommand):
+    def finalize_options(self):
+        TestCommand.finalize_options(self)
+        self.test_args = []
+        self.test_suite = True
+
+    def run_tests(self):
+        #import here, cause outside the eggs aren't loaded
+        import pytest
+        pytest.main(self.test_args)
+
+
 if __name__ == "__main__":
     setup(
         name="Whoosh",
         url="http://bitbucket.org/mchaput/whoosh",
 
         zip_safe=True,
-        test_suite="nose.collector",
+        tests_require=['pytest'],
+        cmdclass = {'test': pytest},
 
         classifiers=[
         "Development Status :: 5 - Production/Stable",

File src/whoosh/query/qcore.py

     # in this query
     error = None
 
+    def __unicode__(self):
+        raise NotImplementedError
+
+    def __getitem__(self, item):
+        raise NotImplementedError
+
     def __or__(self, query):
         """Allows you to use | between query objects to wrap them in an Or
         query.

File tests/test_analysis.py

 
 from __future__ import with_statement
 
-from nose.tools import assert_equal, assert_not_equal  # @UnresolvedImport
-from nose.tools import assert_raises  # @UnresolvedImport
+import pytest
 
 from whoosh import analysis, fields, qparser
 from whoosh.compat import u, unichr, text_type
     value = u("AAAaaaBBBbbbCCCcccDDDddd")
 
     rex = analysis.RegexTokenizer("[A-Z]+")
-    assert_equal([t.text for t in rex(value)], ["AAA", "BBB", "CCC", "DDD"])
+    assert [t.text for t in rex(value)] == ["AAA", "BBB", "CCC", "DDD"]
 
     rex = analysis.RegexTokenizer("[A-Z]+", gaps=True)
-    assert_equal([t.text for t in rex(value)], ["aaa", "bbb", "ccc", "ddd"])
+    assert [t.text for t in rex(value)] == ["aaa", "bbb", "ccc", "ddd"]
 
 
 def test_path_tokenizer():
     value = u("/alfa/bravo/charlie/delta/")
     pt = analysis.PathTokenizer()
-    assert_equal([t.text for t in pt(value)], ["/alfa", "/alfa/bravo",
-                                               "/alfa/bravo/charlie",
-                                               "/alfa/bravo/charlie/delta"])
+    assert [t.text for t in pt(value)] == ["/alfa", "/alfa/bravo",
+                                           "/alfa/bravo/charlie",
+                                           "/alfa/bravo/charlie/delta"]
 
 
 def test_composition1():
     ca = analysis.RegexTokenizer() | analysis.LowercaseFilter()
-    assert_equal(ca.__class__.__name__, "CompositeAnalyzer")
-    assert_equal(ca[0].__class__.__name__, "RegexTokenizer")
-    assert_equal(ca[1].__class__.__name__, "LowercaseFilter")
-    assert_equal([t.text for t in ca(u("ABC 123"))], ["abc", "123"])
+    assert ca.__class__.__name__ == "CompositeAnalyzer"
+    assert ca[0].__class__.__name__ == "RegexTokenizer"
+    assert ca[1].__class__.__name__ == "LowercaseFilter"
+    assert [t.text for t in ca(u("ABC 123"))] == ["abc", "123"]
 
 
 def test_composition2():
     ca = analysis.RegexTokenizer() | analysis.LowercaseFilter()
     sa = ca | analysis.StopFilter()
-    assert_equal(len(sa), 3)
-    assert_equal(sa.__class__.__name__, "CompositeAnalyzer")
-    assert_equal(sa[0].__class__.__name__, "RegexTokenizer")
-    assert_equal(sa[1].__class__.__name__, "LowercaseFilter")
-    assert_equal(sa[2].__class__.__name__, "StopFilter")
-    assert_equal([t.text for t in sa(u("The ABC 123"))], ["abc", "123"])
+    assert len(sa), 3
+    assert sa.__class__.__name__ == "CompositeAnalyzer"
+    assert sa[0].__class__.__name__ == "RegexTokenizer"
+    assert sa[1].__class__.__name__ == "LowercaseFilter"
+    assert sa[2].__class__.__name__ == "StopFilter"
+    assert [t.text for t in sa(u("The ABC 123"))], ["abc", "123"]
 
 
 def test_composition3():
     sa = analysis.RegexTokenizer() | analysis.StopFilter()
-    assert_equal(sa.__class__.__name__, "CompositeAnalyzer")
+    assert sa.__class__.__name__ == "CompositeAnalyzer"
 
 
 def test_composing_functions():
-    from operator import or_
-
     tokenizer = analysis.RegexTokenizer()
 
     def filter(tokens):
             t.text = t.text.upper()
             yield t
 
-    assert_raises(TypeError, or_, tokenizer, filter)
+    with pytest.raises(TypeError):
+        tokenizer | filter
 
 
 def test_shared_composition():
     ana1 = shared | analysis.NgramFilter(3)
     ana2 = shared | analysis.DoubleMetaphoneFilter()
 
-    assert_equal([t.text for t in ana1(u("hello"))], ["hel", "ell", "llo"])
-    assert_equal([t.text for t in ana2(u("hello"))], ["HL"])
+    assert [t.text for t in ana1(u("hello"))] == ["hel", "ell", "llo"]
+    assert [t.text for t in ana2(u("hello"))] == ["HL"]
 
 
 def test_multifilter():
     mf = analysis.MultiFilter(a=f1, b=f2)
     ana = analysis.RegexTokenizer(r"\S+") | mf
     text = u("ALFA BRAVO CHARLIE")
-    assert_equal([t.text for t in ana(text, mode="a")],
-                 ["alfa", "bravo", "charlie"])
-    assert_equal([t.text for t in ana(text, mode="b")],
-                 ["ALFA", "BRAVO", "CHARLIE"])
+    assert [t.text for t in ana(text, mode="a")] == ["alfa", "bravo", "charlie"]
+    assert [t.text for t in ana(text, mode="b")] == ["ALFA", "BRAVO", "CHARLIE"]
 
 
 def test_tee_filter():
     f2 = analysis.ReverseTextFilter()
     ana = analysis.RegexTokenizer(r"\S+") | analysis.TeeFilter(f1, f2)
     result = " ".join([t.text for t in ana(target)])
-    assert_equal(result, "alfa aflA bravo ovarB charlie eilrahC")
+    assert result == "alfa aflA bravo ovarB charlie eilrahC"
 
     class ucfilter(analysis.Filter):
         def __call__(self, tokens):
     f2 = analysis.ReverseTextFilter() | ucfilter()
     ana = analysis.RegexTokenizer(r"\S+") | analysis.TeeFilter(f1, f2)
     result = " ".join([t.text for t in ana(target)])
-    assert_equal(result, "alfa AFLA bravo OVARB charlie EILRAHC")
+    assert result == "alfa AFLA bravo OVARB charlie EILRAHC"
 
     f1 = analysis.PassFilter()
     f2 = analysis.BiWordFilter()
            | analysis.TeeFilter(f1, f2)
            | analysis.LowercaseFilter())
     result = " ".join([t.text for t in ana(target)])
-    assert_equal(result, "alfa alfa-bravo bravo bravo-charlie charlie")
+    assert result == "alfa alfa-bravo bravo bravo-charlie charlie"
 
 
 def test_intraword():
     ana = analysis.RegexTokenizer(r"\S+") | iwf
 
     def check(text, ls):
-        assert_equal([(t.pos, t.text) for t in ana(text)], ls)
+        assert [(t.pos, t.text) for t in ana(text)] == ls
 
     check(u("PowerShot)"), [(0, "Power"), (1, "Shot"), (1, "PowerShot")])
     check(u("A's+B's&C's"), [(0, "A"), (1, "B"), (2, "C"), (2, "ABC")])
     target = u("WiKiWo-rd")
     tokens = [(t.text, t.startchar, t.endchar)
               for t in ana(target, chars=True)]
-    assert_equal(tokens, [("wi", 0, 2), ("ki", 2, 4), ("wo", 4, 6),
-                          ("rd", 7, 9), ("wikiword", 0, 9)])
+    assert tokens == [("wi", 0, 2), ("ki", 2, 4), ("wo", 4, 6),
+                      ("rd", 7, 9), ("wikiword", 0, 9)]
 
     target = u("Zo WiKiWo-rd")
     tokens = [(t.text, t.startchar, t.endchar)
               for t in ana(target, chars=True)]
-    assert_equal(tokens, [("zo", 0, 2), ("wi", 3, 5), ("ki", 5, 7),
-                          ("wo", 7, 9), ("rd", 10, 12), ("wikiword", 3, 12)])
+    assert tokens == [("zo", 0, 2), ("wi", 3, 5), ("ki", 5, 7),
+                      ("wo", 7, 9), ("rd", 10, 12), ("wikiword", 3, 12)]
 
 
 def test_intraword_possessive():
     target = u("O'Malley's-Bar")
     tokens = [(t.text, t.startchar, t.endchar)
               for t in ana(target, chars=True)]
-    assert_equal(tokens, [("o", 0, 1), ("malley", 2, 8), ("bar", 11, 14),
-                          ("omalleybar", 0, 14)])
+    assert tokens == [("o", 0, 1), ("malley", 2, 8), ("bar", 11, 14),
+                      ("omalleybar", 0, 14)]
 
 
 def test_word_segments():
     ana = analysis.RegexTokenizer(r"\S+") | cwf
     target = u("alfacharlie bravodelta delto bravo subalfa")
     tokens = [t.text for t in ana(target)]
-    assert_equal(tokens, ["alfacharlie", "alfa", "charlie", "bravodelta",
-                          "bravo", "delta", "delto", "bravo", "subalfa"])
+    assert tokens == ["alfacharlie", "alfa", "charlie", "bravodelta",
+                      "bravo", "delta", "delto", "bravo", "subalfa"]
 
     cwf = analysis.CompoundWordFilter(wordset, keep_compound=False)
     ana = analysis.RegexTokenizer(r"\S+") | cwf
     target = u("alfacharlie bravodelta delto bravo subalfa")
     tokens = [t.text for t in ana(target)]
-    assert_equal(tokens, ["alfa", "charlie", "bravo", "delta", "delto",
-                          "bravo", "subalfa"])
+    assert tokens == ["alfa", "charlie", "bravo", "delta", "delto", "bravo",
+                      "subalfa"]
 
 
 def test_biword():
     ana = analysis.RegexTokenizer(r"\w+") | analysis.BiWordFilter()
     result = [t.copy() for t
               in ana(u("the sign of four"), chars=True, positions=True)]
-    assert_equal(["the-sign", "sign-of", "of-four"], [t.text for t in result])
-    assert_equal([(0, 8), (4, 11), (9, 16)], [(t.startchar, t.endchar)
-                                              for t in result])
-    assert_equal([0, 1, 2], [t.pos for t in result])
+    assert ["the-sign", "sign-of", "of-four"] == [t.text for t in result]
+    assert [(0, 8), (4, 11), (9, 16)] == [(t.startchar, t.endchar)
+                                          for t in result]
+    assert [0, 1, 2] == [t.pos for t in result]
 
     result = [t.copy() for t in ana(u("single"))]
-    assert_equal(len(result), 1)
-    assert_equal(result[0].text, "single")
+    assert len(result) == 1
+    assert result[0].text == "single"
 
 
 def test_shingles():
     ana = analysis.RegexTokenizer(r"\w+") | analysis.ShingleFilter(3, " ")
     source = u("better a witty fool than a foolish wit")
     results = [t.copy() for t in ana(source, positions=True, chars=True)]
-    assert_equal([t.text for t in results],
-                 [u('better a witty'), u('a witty fool'), u('witty fool than'),
-                  u('fool than a'), u('than a foolish'), u('a foolish wit')])
-    assert_equal([t.pos for t in results], list(range(len(results))))
+    assert [t.text for t in results] == [u('better a witty'), u('a witty fool'),
+                                         u('witty fool than'), u('fool than a'),
+                                         u('than a foolish'),
+                                         u('a foolish wit')]
+    assert [t.pos for t in results] == list(range(len(results)))
     for t in results:
-        assert_equal(t.text, source[t.startchar:t.endchar])
+        assert t.text == source[t.startchar:t.endchar]
 
 
 def test_unicode_blocks():
     from whoosh.support.unicode import blocks, blockname, blocknum
 
-    assert_equal(blockname(u('a')), 'Basic Latin')
-    assert_equal(blockname(unichr(0x0b80)), 'Tamil')
-    assert_equal(blockname(unichr(2048)), None)
-    assert_equal(blocknum(u('a')), 0)
-    assert_equal(blocknum(unichr(0x0b80)), 22)
-    assert_equal(blocknum(unichr(2048)), None)
-    assert_equal(blocknum(u('a')), blocks.Basic_Latin)  # @UndefinedVariable
-    assert_equal(blocknum(unichr(0x0b80)), blocks.Tamil)  # @UndefinedVariable
+    assert blockname(u('a')) == 'Basic Latin'
+    assert blockname(unichr(0x0b80)) == 'Tamil'
+    assert blockname(unichr(2048)) is None
+    assert blocknum(u('a')) == 0
+    assert blocknum(unichr(0x0b80)) == 22
+    assert blocknum(unichr(2048)) is None
+    assert blocknum(u('a')) == blocks.Basic_Latin  # @UndefinedVariable
+    assert blocknum(unichr(0x0b80)) == blocks.Tamil  # @UndefinedVariable
 
 
 def test_double_metaphone():
              'Parachute': ('PRKT', None),
              'Nowhere': ('NR', None),
              'Tux': ('TKS', None)}
+
+    dmn = name = None
     for name in names.keys():
         dmn = double_metaphone(name)
-    assert_equal(dmn, names[name])
+    assert dmn == names[name]
 
     mf = (analysis.RegexTokenizer()
           | analysis.LowercaseFilter()
           | analysis.DoubleMetaphoneFilter())
     results = [(t.text, t.boost) for t in mf(u("Spruce View"))]
-    assert_equal(results, [('SPRS', 1.0), ('F', 1.0), ('FF', 0.5)])
+    assert results == [('SPRS', 1.0), ('F', 1.0), ('FF', 0.5)]
 
     mf = (analysis.RegexTokenizer()
           | analysis.LowercaseFilter()
           | analysis.DoubleMetaphoneFilter(combine=True))
     results = [(t.text, t.boost) for t in mf(u("Spruce View"))]
-    assert_equal(results, [('spruce', 1.0), ('SPRS', 1.0), ('view', 1.0),
-                           ('F', 1.0), ('FF', 0.5)])
+    assert results == [('spruce', 1.0), ('SPRS', 1.0), ('view', 1.0),
+                       ('F', 1.0), ('FF', 0.5)]
 
     namefield = fields.TEXT(analyzer=mf)
     texts = list(namefield.process_text(u("Spruce View"), mode="query"))
-    assert_equal(texts, [u('spruce'), 'SPRS', u('view'), 'F', 'FF'])
+    assert texts == [u('spruce'), 'SPRS', u('view'), 'F', 'FF']
 
 
 def test_substitution():
     mf = analysis.RegexTokenizer(r"\S+") | analysis.SubstitutionFilter("-", "")
-    assert_equal([t.text for t in mf(u("one-two th-re-ee four"))],
-                 ["onetwo", "threee", "four"])
+    assert ([t.text for t in mf(u("one-two th-re-ee four"))]
+            == ["onetwo", "threee", "four"])
 
     mf = (analysis.RegexTokenizer(r"\S+")
           | analysis.SubstitutionFilter("([^=]*)=(.*)", r"\2=\1"))
-    assert_equal([t.text for t in mf(u("a=b c=d ef"))], ["b=a", "d=c", "ef"])
+    assert [t.text for t in mf(u("a=b c=d ef"))] == ["b=a", "d=c", "ef"]
 
 
 def test_delimited_attribute():
     ana = analysis.RegexTokenizer(r"\S+") | analysis.DelimitedAttributeFilter()
     results = [(t.text, t.boost) for t in ana(u("image render^2 file^0.5"))]
-    assert_equal(results, [("image", 1.0), ("render", 2.0), ("file", 0.5)])
+    assert results == [("image", 1.0), ("render", 2.0), ("file", 0.5)]
 
 
 def test_porter2():
                'plotted']
     singles = [stem(w) for w in plurals]
 
-    assert_equal(singles, ['caress', 'fli', 'die', 'mule', 'deni', 'die',
-                           'agre', 'own', 'humbl', 'size', 'meet', 'state',
-                           'siez', 'item', 'sensat', 'tradit', 'refer',
-                           'colon', 'plot'])
-    assert_equal(stem("bill's"), "bill")
-    assert_equal(stem("y's"), "y")
+    assert singles == ['caress', 'fli', 'die', 'mule', 'deni', 'die',
+                       'agre', 'own', 'humbl', 'size', 'meet', 'state',
+                       'siez', 'item', 'sensat', 'tradit', 'refer',
+                       'colon', 'plot']
+    assert stem("bill's") == "bill"
+    assert stem("y's") == "y"
 
 
-@skip_if_unavailable("Stemmer")
-def test_pystemmer():
-    ana = (analysis.RegexTokenizer()
-           | analysis.LowercaseFilter()
-           | analysis.PyStemmerFilter())
-    schema = fields.Schema(text=fields.TEXT(analyzer=ana))
-    st = RamStorage()
-
-    ix = st.create_index(schema)
-    with ix.writer() as w:
-        w.add_document(text=u("rains falling strangely"))
-
-    ix = st.open_index()
-    with ix.writer() as w:
-        w.add_document(text=u("pains stalling strongly"))
-
-    ix = st.open_index()
-    with ix.reader() as r:
-        assert_equal(list(r.field_terms("text")),
-                     ["fall", "pain", "rain", "stall", "strang", "strong"])
+#def test_pystemmer():
+#    Stemmer = pytest.importorskip("Stemmer")
+#
+#    ana = (analysis.RegexTokenizer()
+#           | analysis.LowercaseFilter()
+#           | analysis.PyStemmerFilter())
+#    schema = fields.Schema(text=fields.TEXT(analyzer=ana))
+#    st = RamStorage()
+#
+#    ix = st.create_index(schema)
+#    with ix.writer() as w:
+#        w.add_document(text=u("rains falling strangely"))
+#
+#    ix = st.open_index()
+#    with ix.writer() as w:
+#        w.add_document(text=u("pains stalling strongly"))
+#
+#    ix = st.open_index()
+#    with ix.reader() as r:
+#        assert (list(r.field_terms("text"))
+#                == ["fall", "pain", "rain", "stall", "strang", "strong"])
 
 
 def test_url():
             analysis.StandardAnalyzer(analysis.url_pattern, stoplist=None)]
     for ana in anas:
         ts = [t.text for t in ana(sample)]
-        assert_equal(ts, [u('visit'), u('http://bitbucket.org/mchaput/whoosh'),
-                          u('or'), u('urn:isbn:5930502'), u('or'),
-                          u('http://www.apple.com/')])
+        assert ts == [u('visit'), u('http://bitbucket.org/mchaput/whoosh'),
+                      u('or'), u('urn:isbn:5930502'), u('or'),
+                      u('http://www.apple.com/')]
 
 
 def test_name_field():
     qp = qparser.QueryParser("name", schema)
     q = qp.parse(u("leaf eriksen"), normalize=False)
     r = s.search(q)
-    assert_equal(len(r), 1)
+    assert len(r) == 1
 
 
 def test_start_pos():
     ana = analysis.RegexTokenizer(r"\S+") | analysis.LowercaseFilter()
     kw = {"positions": True}
     tks = formats.tokens(u("alfa bravo charlie delta"), ana, kw)
-    assert_equal([t.pos for t in tks], [0, 1, 2, 3])
+    assert [t.pos for t in tks] == [0, 1, 2, 3]
 
     kw["start_pos"] = 3
     ts = [t.copy() for t in formats.tokens(u("A B C D").split(), ana, kw)]
-    assert_equal(" ".join([t.text for t in ts]), "A B C D")
-    assert_equal([t.pos for t in ts], [3, 4, 5, 6])
+    assert " ".join([t.text for t in ts]) == "A B C D"
+    assert [t.pos for t in ts] == [3, 4, 5, 6]
 
 
 def test_frowny_face():
     ana = analysis.RegexTokenizer(r"\S+") | analysis.IntraWordFilter()
     # text is all delimiters
     tokens = [t.text for t in ana(u(":-("))]
-    assert_equal(tokens, [])
+    assert tokens == []
 
     # text has consecutive delimiters
     tokens = [t.text for t in ana(u("LOL:)"))]
-    assert_equal(tokens, ["LOL"])
+    assert tokens == ["LOL"]
 
 
 def test_ngrams():
         return "/".join(t.text for t in tokens)
 
     f = analysis.NgramFilter(3, 4)
-    assert_equal(dotest(f), "abc/abcd/bcd/bcde/cde/cdef/def/defg/efg/klm")
+    assert dotest(f) == "abc/abcd/bcd/bcde/cde/cdef/def/defg/efg/klm"
 
     f = analysis.NgramFilter(3, 4, at="start")
-    assert_equal(dotest(f), "abc/abcd/klm")
+    assert dotest(f) == "abc/abcd/klm"
 
     f = analysis.NgramFilter(3, 4, at="end")
-    assert_equal(dotest(f), "defg/efg/klm")
+    assert dotest(f) == "defg/efg/klm"
 
     ana = tk | analysis.NgramFilter(2, 5, at="end")
     tokens = [(t.text, t.startchar, t.endchar) for t in ana(s, chars=True)]
-    assert_equal(tokens, [("cdefg", 2, 7), ("defg", 3, 7), ("efg", 4, 7),
-                          ("fg", 5, 7), ("ij", 10, 12), ("klm", 13, 16),
-                          ("lm", 14, 16)])
+    assert tokens == [("cdefg", 2, 7), ("defg", 3, 7), ("efg", 4, 7),
+                      ("fg", 5, 7), ("ij", 10, 12), ("klm", 13, 16),
+                      ("lm", 14, 16)]
 
 
 @skip_if_unavailable("__future__", "unicode_literals")
     for lang, source, target in domain:
         ana = analysis.LanguageAnalyzer(lang)
         words = [t.text for t in ana(source)]
-        assert_equal(words, target)
+        assert words == target
 
 
 def test_pickleability():
     ana = analysis.LanguageAnalyzer("en")
-    pick = dumps(ana, -1)
+    _ = dumps(ana, -1)
 
 

File tests/test_bits.py

-from nose.tools import assert_equal  # @UnresolvedImport
-
 from whoosh.filedb.filestore import RamStorage
 from whoosh.idsets import BitSet, OnDiskBitSet, SortedIntSet
 
 
     b.update([0, 2, 4, 6, 7])
     assert b
-    assert_equal([(n in b) for n in range(10)],
-                 [True, False, True, False, True, False, True, True, False,
-                  False])
+    assert ([(n in b) for n in range(10)] ==
+            [True, False, True, False, True, False, True, True, False, False])
 
     b.add(9)
     assert 9 in b
-    assert_equal(len(b), 6)
+    assert len(b) == 6
 
-    assert_equal(list(b.invert(10)), [1, 3, 5, 8])
+    assert list(b.invert(10)) == [1, 3, 5, 8]
 
     b.discard(6)
-    assert_equal(list(b), [0, 2, 4, 7, 9])
-    assert_equal(len(b), 5)
+    assert list(b) == [0, 2, 4, 7, 9]
+    assert len(b) == 5
 
 
 def test_len(c=BitSet):
     b.add(3)
     b.add(5)
     b.add(1024)
-    assert_equal(len(b), 3)
+    assert len(b) == 3
     b.add(5)
-    assert_equal(len(b), 3)
+    assert len(b) == 3
     b.discard(1000)
-    assert_equal(len(b), 3)
+    assert len(b) == 3
     b.discard(5)
-    assert_equal(len(b), 2)
+    assert len(b) == 2
 
 
 def test_union(c=BitSet):
-    assert_equal(c([2, 4, 5]) | c([3, 9]), c([2, 3, 4, 5, 9]))
+    assert c([2, 4, 5]) | c([3, 9]) == c([2, 3, 4, 5, 9])
     b = c([2, 4, 5])
     b.update([3, 9])
-    assert_equal(list(b), [2, 3, 4, 5, 9])
+    assert list(b) == [2, 3, 4, 5, 9]
     b = c([2, 4, 5])
     b.update(c([3, 9]))
-    assert_equal(list(b), [2, 3, 4, 5, 9])
+    assert list(b) == [2, 3, 4, 5, 9]
     b = c([1, 2])
     b.update([1, 5, 9])
-    assert_equal(list(b), [1, 2, 5, 9])
+    assert list(b) == [1, 2, 5, 9]
 
 
 def test_intersection(c=BitSet):
-    assert_equal(c([2, 4, 5]) & c([3, 9]), c())
-    assert_equal(c([2, 4, 5]) & c([4, 5, 9]), c([4, 5]))
+    assert c([2, 4, 5]) & c([3, 9]) == c()
+    assert c([2, 4, 5]) & c([4, 5, 9]) == c([4, 5])
     b = c([2, 4, 5])
-    assert_equal(b.intersection([4, 5, 9]), c([4, 5]))
+    assert b.intersection([4, 5, 9]) == c([4, 5])
     b.intersection_update([4, 5, 9])
-    assert_equal(list(b), [4, 5])
+    assert list(b) == [4, 5]
     b = c([2, 4, 5])
     b.intersection_update(c([4, 5, 9]))
-    assert_equal(list(b), [4, 5])
+    assert list(b) == [4, 5]
 
 
 def test_difference(c=BitSet):
-    assert_equal(c([1, 3, 50, 72]) - c([3, 72]), c([1, 50]))
-    assert_equal(list(c([1, 3, 50, 72]).difference([3, 72])), [1, 50])
+    assert c([1, 3, 50, 72]) - c([3, 72]) == c([1, 50])
+    assert list(c([1, 3, 50, 72]).difference([3, 72])) == [1, 50]
     b = c([1, 3, 50, 72])
     b.difference_update(c([3, 72]))
-    assert_equal(list(b), [1, 50])
+    assert list(b) == [1, 50]
     b = c([1, 3, 50, 72])
     b.difference_update([3, 72])
-    assert_equal(list(b), [1, 50])
+    assert list(b) == [1, 50]
 
 
 def test_copy(c=BitSet):
     b = c([1, 5, 100, 60])
-    assert_equal(b, b.copy())
+    assert b == b.copy()
 
 
 def test_clear(c=BitSet):
     b = c([1, 5, 100, 60])
     b.clear()
-    assert_equal(list(b), [])
+    assert list(b) == []
 
 
 def test_isdisjoint(c=BitSet):
 
 def test_before_after(c=BitSet):
     b = c([10, 11, 30, 50, 80])
-    assert_equal(b.after(0), 10)
-    assert_equal(b.after(7), 10)
-    assert_equal(b.after(8), 10)
-    assert_equal(b.after(10), 11)
-    assert_equal(b.after(11), 30)
-    assert_equal(b.after(30), 50)
-    assert_equal(b.after(33), 50)
-    assert_equal(b.after(38), 50)
-    assert_equal(b.after(41), 50)
-    assert_equal(b.after(42), 50)
-    assert_equal(b.after(45), 50)
-    assert_equal(b.after(47), 50)
-    assert_equal(b.after(50), 80)
-    assert_equal(b.after(80), None)
+    assert b.after(0) == 10
+    assert b.after(7) == 10
+    assert b.after(8) == 10
+    assert b.after(10) == 11
+    assert b.after(11) == 30
+    assert b.after(30) == 50
+    assert b.after(33) == 50
+    assert b.after(38) == 50
+    assert b.after(41) == 50
+    assert b.after(42) == 50
+    assert b.after(45) == 50
+    assert b.after(47) == 50
+    assert b.after(50) == 80
+    assert b.after(80) is None
 
-    assert_equal(b.before(0), None)
-    assert_equal(b.before(99), 80)
-    assert_equal(b.before(81), 80)
-    assert_equal(b.before(80), 50)
-    assert_equal(b.before(50), 30)
-    assert_equal(b.before(48), 30)
-    assert_equal(b.before(46), 30)
-    assert_equal(b.before(45), 30)
-    assert_equal(b.before(44), 30)
-    assert_equal(b.before(42), 30)
-    assert_equal(b.before(38), 30)
-    assert_equal(b.before(36), 30)
-    assert_equal(b.before(34), 30)
-    assert_equal(b.before(33), 30)
-    assert_equal(b.before(32), 30)
-    assert_equal(b.before(30), 11)
-    assert_equal(b.before(11), 10)
-    assert_equal(b.before(10), None)
+    assert b.before(0) is None
+    assert b.before(99) == 80
+    assert b.before(81) == 80
+    assert b.before(80) == 50
+    assert b.before(50) == 30
+    assert b.before(48) == 30
+    assert b.before(46) == 30
+    assert b.before(45) == 30
+    assert b.before(44) == 30
+    assert b.before(42) == 30
+    assert b.before(38) == 30
+    assert b.before(36) == 30
+    assert b.before(34) == 30
+    assert b.before(33) == 30
+    assert b.before(32) == 30
+    assert b.before(30) == 11
+    assert b.before(11) == 10
+    assert b.before(10) is None
 
     b = c([7])
-    assert_equal(b.after(0), 7)
+    assert b.after(0) == 7
     b = c([8])
-    assert_equal(b.after(0), 8)
+    assert b.after(0) == 8
     b = c([9])
-    assert_equal(b.after(0), 9)
+    assert b.after(0) == 9
 
     b = c([7])
-    assert_equal(b.before(16), 7)
+    assert b.before(16) == 7
     b = c([8])
-    assert_equal(b.before(16), 8)
+    assert b.before(16) == 8
     b = c([9])
-    assert_equal(b.before(16), 9)
+    assert b.before(16) == 9
 
     b = c([49])
-    assert_equal(b.after(0), 49)
+    assert b.after(0) == 49
 
 
 def test_sortedintset():
 
     f = st.open_file("test")
     b = OnDiskBitSet(f, 0, size)
-    assert_equal(list(b), list(bs))
+    assert list(b) == list(bs)
 
-    assert_equal(b.after(0), 10)
-    assert_equal(b.after(10), 11)
-    assert_equal(b.after(80), None)
-    assert_equal(b.after(99), None)
+    assert b.after(0) == 10
+    assert b.after(10) == 11
+    assert b.after(80) is None
+    assert b.after(99) is None
 
-    assert_equal(b.before(0), None)
-    assert_equal(b.before(99), 80)
-    assert_equal(b.before(80), 50)
-    assert_equal(b.before(10), None)
+    assert b.before(0) is None
+    assert b.before(99) == 80
+    assert b.before(80) == 50
+    assert b.before(10) is None
 
     f.seek(0)
     b = BitSet.from_disk(f, size)
-    assert_equal(list(b), list(bs))
+    assert list(b) == list(bs)

File tests/test_classify.py

 from __future__ import with_statement
 
-from nose.tools import assert_equal  # @UnresolvedImport
-
 from whoosh import analysis, classify, fields, formats
 from whoosh.compat import u, text_type
 from whoosh.filedb.filestore import RamStorage
     with ix.reader() as r:
         exp = classify.Expander(r, "content")
         exp.add_text(text)
-        assert_equal([t[0] for t in exp.expanded_terms(3)],
-                     ["particles", "velocity", "field"])
+        assert ([t[0] for t in exp.expanded_terms(3)]
+                == ["particles", "velocity", "field"])
 
 
 def test_keyterms():
     with ix.searcher() as s:
         docnum = s.document_number(path="/a")
         keys = list(s.key_terms([docnum], "content", numterms=3))
-        assert_equal([t[0] for t in keys],
-                     [u("collision"), u("calculations"), u("damped")])
+        assert ([t[0] for t in keys]
+                == [u("collision"), u("calculations"), u("damped")])
 
 
 def test_keyterms_from_text():
     ix = create_index()
     with ix.searcher() as s:
         keys = list(s.key_terms_from_text("content", text))
-        assert_equal([t[0] for t in keys],
-                     ["particles", "velocity", "field"])
+        assert [t[0] for t in keys] == ["particles", "velocity", "field"]
 
 
 def test_more_like_this():
         with ix.searcher() as s:
             docnum = s.document_number(id=u("1"))
             r = s.more_like(docnum, "text", **kwargs)
-            assert_equal([hit["id"] for hit in r], ["6", "2", "3"])
+            assert [hit["id"] for hit in r] == ["6", "2", "3"]
 
     schema = fields.Schema(id=fields.ID(stored=True),
                            text=fields.TEXT(stored=True))
         with ix.searcher() as s:
             docnum = s.document_number(id="3")
             r = s.more_like(docnum, "text")
-            assert_equal([hit["id"] for hit in r], ["5", "4"])
+            assert [hit["id"] for hit in r] == ["5", "4"]

File tests/test_codecs.py

 import random
 from array import array
 
-from nose.tools import assert_equal, assert_not_equal  # @UnresolvedImport
-
 from whoosh import analysis, fields, formats, query
 from whoosh.compat import u, b, text_type
 from whoosh.compat import array_tobytes, xrange
         seg.set_doc_count(4)
 
         pdr = codec.per_document_reader(st, seg)
-        assert_equal(pdr.doc_count_all(), 4)
-        assert_equal(pdr.stored_fields(0), {"a": "hello", "b": "there"})
+        assert pdr.doc_count_all() == 4
+        assert pdr.stored_fields(0) == {"a": "hello", "b": "there"}
         # Note: access out of order
-        assert_equal(pdr.stored_fields(3), {"a": "alfa", "b": "bravo"})
-        assert_equal(pdr.stored_fields(1),
-                     {"a": "one", "b": "two", "c": "three"})
+        assert pdr.stored_fields(3), {"a": "alfa", "b": "bravo"}
+        assert pdr.stored_fields(1) == {"a": "one", "b": "two", "c": "three"}
 
         sfs = list(pdr.all_stored_fields())
-        assert_equal(len(sfs), 4)
-        assert_equal(sfs, [{"a": "hello", "b": "there"},
-                           {"a": "one", "b": "two", "c": "three"},
-                           {},
-                           {"a": "alfa", "b": "bravo"},
-                           ])
+        assert len(sfs) == 4
+        assert sfs == [{"a": "hello", "b": "there"},
+                       {"a": "one", "b": "two", "c": "three"},
+                       {},
+                       {"a": "alfa", "b": "bravo"},
+                       ]
         pdr.close()
 
 
     for i, (fieldname, text) in enumerate(terms):
         assert (fieldname, b(text)) in tr
         ti = tr.term_info(fieldname, b(text))
-        assert_equal(ti.weight(), i)
-        assert_equal(ti.doc_frequency(), 1)
+        assert ti.weight() == i
+        assert ti.doc_frequency() == 1
 
 
 def test_w2_block():
     m = tr.matcher("a", b("b"), schema["a"].format)
     block = m.block
     block.read_ids()
-    assert_equal(block.min_length(), 1)
-    assert_equal(block.max_length(), 5)
-    assert_equal(block.max_weight(), 5.0)
-    assert_equal(block.min_id(), 0)
-    assert_equal(block.max_id(), 4)
-    assert_equal(list(block.ids), [0, 1, 2, 3, 4])
-    assert_equal(list(block.read_weights()), [2.0, 5.0, 3.0, 4.0, 1.0])
-    assert_equal(list(block.read_values()), [b("test1"), b("test2"),
-                                             b("test3"), b("test4"), b("test5")
-                                             ])
+    assert block.min_length() == 1
+    assert block.max_length() == 5
+    assert block.max_weight() == 5.0
+    assert block.min_id() == 0
+    assert block.max_id() == 4
+    assert list(block.ids) == [0, 1, 2, 3, 4]
+    assert list(block.read_weights()) == [2.0, 5.0, 3.0, 4.0, 1.0]
+    assert list(block.read_values()) == [b("test1"), b("test2"), b("test3"),
+                                         b("test4"), b("test5")]
 
     seg = codec.new_segment(st, "test")
     fw = codec.field_writer(st, seg)
     m = tr.matcher("a", b("b"), schema["a"].format)
     block = m.block
     block.read_ids()
-    assert_equal(len(block), 4)
-    assert_equal(list(block.ids), [0, 1, 2, 5])
-    assert_equal(list(block.weights), [1.0, 2.0, 12.0, 6.5])
-    assert_equal(block.values, None)
-    assert_equal(block.min_length(), 1)
-    assert_equal(block.max_length(), blen(420))
-    assert_equal(block.max_weight(), 12.0)
+    assert len(block) == 4
+    assert list(block.ids) == [0, 1, 2, 5]
+    assert list(block.weights) == [1.0, 2.0, 12.0, 6.5]
+    assert block.values is None
+    assert block.min_length() == 1
+    assert block.max_length() == blen(420)
+    assert block.max_weight() == 12.0
 
     ti = tr.term_info("a", b("b"))
-    assert_equal(ti.weight(), 21.5)
-    assert_equal(ti.doc_frequency(), 4)
-    assert_equal(ti.min_length(), 1)
-    assert_equal(ti.max_length(), blen(420))
-    assert_equal(ti.max_weight(), 12.0)
+    assert ti.weight() == 21.5
+    assert ti.doc_frequency() == 4
+    assert ti.min_length() == 1
+    assert ti.max_length() == blen(420)
+    assert ti.max_weight() == 12.0
 
 
 def test_docwriter_one():
     seg.set_doc_count(1)
 
     pdr = codec.per_document_reader(st, seg)
-    assert_equal(pdr.doc_field_length(0, "text"), 4)
-    assert_equal(pdr.stored_fields(0), {"text": "Testing one two three"})
+    assert pdr.doc_field_length(0, "text") == 4
+    assert pdr.stored_fields(0) == {"text": "Testing one two three"}
 
 
 def test_docwriter_two():
     seg.set_doc_count(2)
 
     pdr = codec.per_document_reader(st, seg)
-    assert_equal(pdr.doc_field_length(0, "title"), 2)
-    assert_equal(pdr.doc_field_length(0, "text"), 4)
-    assert_equal(pdr.doc_field_length(1, "title"), 3)
-    assert_equal(pdr.doc_field_length(1, "text"), 1)
+    assert pdr.doc_field_length(0, "title") == 2
+    assert pdr.doc_field_length(0, "text") == 4
+    assert pdr.doc_field_length(1, "title") == 3
+    assert pdr.doc_field_length(1, "text") == 1
 
-    assert_equal(pdr.stored_fields(0),
-                 {"title": ("a", "b"), "text": "Testing one two three"})
-    assert_equal(pdr.stored_fields(1),
-                 {"title": "The second document", "text": 500})
+    assert (pdr.stored_fields(0)
+            == {"title": ("a", "b"), "text": "Testing one two three"})
+    assert (pdr.stored_fields(1)
+            == {"title": "The second document", "text": 500})
 
 
 def test_vector():
     seg.set_doc_count(1)
 
     pdr = codec.per_document_reader(st, seg)
-    assert_equal(pdr.stored_fields(0), {})
+    assert pdr.stored_fields(0) == {}
 
     m = pdr.vector(0, "title", field.vector)
     assert m.is_active()
     while m.is_active():
         ps.append((m.id(), m.weight(), m.value()))
         m.next()
-    assert_equal(ps, [(u("alfa"), 1.0, b("t1")), (u("bravo"), 2.0, b("t2"))])
+    assert ps == [(u("alfa"), 1.0, b("t1")), (u("bravo"), 2.0, b("t2"))]
 
 
 def test_vector_values():
 
     vr = codec.per_document_reader(st, seg)
     m = vr.vector(0, "f1", field.vector)
-    assert_equal(list(m.items_as("frequency")), [("alfa", 2), ("bravo", 1),
-                                                 ("charlie", 1)])
+    assert (list(m.items_as("frequency"))
+            == [("alfa", 2), ("bravo", 1), ("charlie", 1)])
 
 
 def test_no_lengths():
     seg.set_doc_count(3)
 
     pdr = codec.per_document_reader(st, seg)
-    assert_equal(pdr.doc_field_length(0, "name"), 0)
-    assert_equal(pdr.doc_field_length(1, "name"), 0)
-    assert_equal(pdr.doc_field_length(2, "name"), 0)
+    assert pdr.doc_field_length(0, "name") == 0
+    assert pdr.doc_field_length(1, "name") == 0
+    assert pdr.doc_field_length(2, "name") == 0
 
 
 def test_store_zero():
     seg.set_doc_count(1)
 
     sr = codec.per_document_reader(st, seg)
-    assert_equal(sr.stored_fields(0), {"name": 0})
+    assert sr.stored_fields(0) == {"name": 0}
 
 
 def test_fieldwriter_single_term():
     tr = codec.terms_reader(st, seg)
     assert ("text", b("alfa")) in tr
     ti = tr.term_info("text", b("alfa"))
-    assert_equal(ti.weight(), 1.5)
-    assert_equal(ti.doc_frequency(), 1)
-    assert_equal(ti.min_length(), 1)
-    assert_equal(ti.max_length(), 1)
-    assert_equal(ti.max_weight(), 1.5)
-    assert_equal(ti.min_id(), 0)
-    assert_equal(ti.max_id(), 0)
+    assert ti.weight() == 1.5
+    assert ti.doc_frequency() == 1
+    assert ti.min_length() == 1
+    assert ti.max_length() == 1
+    assert ti.max_weight() == 1.5
+    assert ti.min_id() == 0
+    assert ti.max_id() == 0
 
 
 def test_fieldwriter_two_terms():
     tr = codec.terms_reader(st, seg)
     assert ("text", b("alfa")) in tr
     ti = tr.term_info("text", b("alfa"))
-    assert_equal(ti.weight(), 3.0)
-    assert_equal(ti.doc_frequency(), 2)
-    assert_equal(ti.min_length(), 1)
-    assert_equal(ti.max_length(), 2)
-    assert_equal(ti.max_weight(), 2.0)
-    assert_equal(ti.min_id(), 0)
-    assert_equal(ti.max_id(), 1)
+    assert ti.weight() == 3.0
+    assert ti.doc_frequency() == 2
+    assert ti.min_length() == 1
+    assert ti.max_length() == 2
+    assert ti.max_weight() == 2.0
+    assert ti.min_id() == 0
+    assert ti.max_id() == 1
     assert ("text", b("bravo")) in tr
     ti = tr.term_info("text", b("bravo"))
-    assert_equal(ti.weight(), 5.0)
-    assert_equal(ti.doc_frequency(), 2)
-    assert_equal(ti.min_length(), 2)
-    assert_equal(ti.max_length(), 3)
-    assert_equal(ti.max_weight(), 3.0)
-    assert_equal(ti.min_id(), 0)
-    assert_equal(ti.max_id(), 2)
+    assert ti.weight() == 5.0
+    assert ti.doc_frequency() == 2
+    assert ti.min_length() == 2
+    assert ti.max_length() == 3
+    assert ti.max_weight() == 3.0
+    assert ti.min_id() == 0
+    assert ti.max_id() == 2
 
     m = tr.matcher("text", b("bravo"), field.format)
-    assert_equal(list(m.all_ids()), [0, 2])
+    assert list(m.all_ids()) == [0, 2]
 
 
 def test_fieldwriter_multiblock():
 
     tr = codec.terms_reader(st, seg)
     ti = tr.term_info("text", b("alfa"))
-    assert_equal(ti.weight(), 15.0)
-    assert_equal(ti.doc_frequency(), 5)
-    assert_equal(ti.min_length(), 1)
-    assert_equal(ti.max_length(), 5)
-    assert_equal(ti.max_weight(), 5.0)
-    assert_equal(ti.min_id(), 0)
-    assert_equal(ti.max_id(), 4)
+    assert ti.weight() == 15.0
+    assert ti.doc_frequency() == 5
+    assert ti.min_length() == 1
+    assert ti.max_length() == 5
+    assert ti.max_weight() == 5.0
+    assert ti.min_id() == 0
+    assert ti.max_id() == 4
 
     ps = []
     m = tr.matcher("text", b("alfa"), field.format)
     while m.is_active():
         ps.append((m.id(), m.weight(), m.value()))
         m.next()
-    assert_equal(ps, [(0, 2.0, b("test1")), (1, 5.0, b("test2")),
-                      (2, 3.0, b("test3")), (3, 4.0, b("test4")),
-                      (4, 1.0, b("test5"))])
+    assert ps == [(0, 2.0, b("test1")), (1, 5.0, b("test2")),
+                  (2, 3.0, b("test3")), (3, 4.0, b("test4")),
+                  (4, 1.0, b("test5"))]
 
 
 def test_term_values():
 
     tr = codec.terms_reader(st, seg)
     ps = [(term, ti.weight(), ti.doc_frequency()) for term, ti in tr.items()]
-    assert_equal(ps, [(("f1", b("alfa")), 2.0, 1),
-                      (("f1", b("bravo")), 1.0, 1),
-                      (("f1", b("charlie")), 1.0, 1)])
+    assert ps == [(("f1", b("alfa")), 2.0, 1), (("f1", b("bravo")), 1.0, 1),
+                  (("f1", b("charlie")), 1.0, 1)]
 
 
 def test_skip():
 
     tr = codec.terms_reader(st, seg)
     m = tr.matcher("f1", b("test"), fieldobj.format)
-    assert_equal(m.id(), 1)
+    assert m.id() == 1
     m.skip_to(220)
-    assert_equal(m.id(), 283)
+    assert m.id() == 283
     m.skip_to(1)
-    assert_equal(m.id(), 283)
+    assert m.id() == 283
     m.skip_to(1000)
-    assert_equal(m.id(), 1024)
+    assert m.id() == 1024
     m.skip_to(1800)
-    assert_equal(m.id(), 1800)
+    assert m.id() == 1800
 
 
 def test_spelled_field():
     assert gr.has_root("text")
     cur = gr.cursor("text")
     strings = list(cur.flatten_strings())
-    assert_equal(type(strings[0]), text_type)
-    assert_equal(strings, ["special", "specific"])
+    assert type(strings[0]) == text_type
+    assert strings == ["special", "specific"]
 
 
 def test_special_spelled_field():
     fw.close()
 
     tr = codec.terms_reader(st, seg)
-    assert_equal(list(tr.terms()), [("text", b("special")),
-                                    ("text", b("specific"))])
+    assert list(tr.terms()) == [("text", b("special")), ("text", b("specific"))]
 
     cur = codec.graph_reader(st, seg).cursor("text")
-    assert_equal(list(cur.flatten_strings()), ["specials", "specifically"])
+    assert list(cur.flatten_strings()) == ["specials", "specifically"]
 
 
 @skip_if_unavailable("ast")
     with ix.reader() as r:
         assert r.has_column("a")
         c = r.column_reader("a")
-        assert_equal(c[2], u("charlie delta echo"))
+        assert c[2] == u("charlie delta echo")
 
     w = ix.writer(codec=PlainTextCodec())
     w.commit(optimize=True)
         reader = s.reader()
 
         r = s.search(query.Term("a", "delta"))
-        assert_equal(len(r), 3)
-        assert_equal([hit["b"] for hit in r], [1000, 5.5, True])
+        assert len(r) == 3
+        assert [hit["b"] for hit in r] == [1000, 5.5, True]
 
-        assert_equal(" ".join(s.field_terms("a")),
-                     "alfa bravo charlie delta echo foxtrot india")
+        assert (" ".join(s.field_terms("a"))
+                == "alfa bravo charlie delta echo foxtrot india")
 
-        assert_equal(reader.doc_field_length(2, "a"), 3)
+        assert reader.doc_field_length(2, "a"), 3
 
         cfield = schema["c"]
-        assert_equal(type(cfield), fields.NUMERIC)
+        assert type(cfield), fields.NUMERIC
         sortables = list(cfield.sortable_terms(reader, "c"))
-        assert_not_equal(sortables, [])
-        assert_equal([cfield.from_bytes(t) for t in sortables],
-                     [-200, -100, 100, 200, 300])
+        assert sortables
+        assert ([cfield.from_bytes(t) for t in sortables]
+                == [-200, -100, 100, 200, 300])
 
         assert reader.has_column("a")
         c = reader.column_reader("a")
-        assert_equal(c[2], u("charlie delta echo"))
+        assert c[2] == u("charlie delta echo")
 
         assert reader.has_column("c")
         c = reader.column_reader("c")
-        assert_equal(list(c), [100, 200, 300, -100, -200])
+        assert list(c) == [100, 200, 300, -100, -200]
 
         assert s.has_vector(2, "a")
         v = s.vector(2, "a")
-        assert_equal(" ".join(v.all_ids()), "charlie delta echo")
+        assert " ".join(v.all_ids()) == "charlie delta echo"
 
 
 def test_memory_codec():
     assert ("a", "delta") in reader
     q = query.Term("a", "delta")
     r = s.search(q)
-    assert_equal(len(r), 3)
-    assert_equal([hit["b"] for hit in r], [1000, 5.5, True])
+    assert len(r) == 3
+    assert [hit["b"] for hit in r] == [1000, 5.5, True]
 
-    assert_equal(" ".join(s.field_terms("a")),
-                 "alfa bravo charlie delta echo foxtrot india")
+    assert (" ".join(s.field_terms("a"))
+            == "alfa bravo charlie delta echo foxtrot india")
 
     cfield = schema["c"]
     c_sortables = cfield.sortable_terms(reader, "c")
     c_values = [cfield.from_bytes(t) for t in c_sortables]
-    assert_equal(c_values, [-200, -100, 100, 200, 300])
+    assert c_values, [-200, -100, 100, 200, 300]
 
     c_values = list(reader.column_reader("c"))
-    assert_equal(c_values, [100, 200, 300, -100, -200])
+    assert c_values == [100, 200, 300, -100, -200]
 
     assert s.has_vector(2, "a")
     v = s.vector(2, "a")
-    assert_equal(" ".join(v.all_ids()), "charlie delta echo")
+    assert " ".join(v.all_ids()) == "charlie delta echo"
 
     assert reader.has_word_graph("d")
     gr = reader.word_graph("d")
-    assert_equal(" ".join(gr.flatten_strings()),
-                 "aching dipping echoing filling going hopping opening "
-                 "pulling quelling rolling selling timing using whining "
-                 "yelling")
+    assert (" ".join(gr.flatten_strings()) ==
+            "aching dipping echoing filling going hopping opening "
+            "pulling quelling rolling selling timing using whining "
+            "yelling")
 
 
 def test_memory_multiwrite():
             w.add_document(line=u(line))
 
     reader = codec.reader(schema)
-    assert_equal([sf["line"] for sf in reader.all_stored_fields()], domain)
-    assert_equal(" ".join(reader.field_terms("line")),
-                 "alfa bravo charlie delta echo foxtrot india juliet")
+    assert [sf["line"] for sf in reader.all_stored_fields()] == domain
+    assert (" ".join(reader.field_terms("line"))
+            == "alfa bravo charlie delta echo foxtrot india juliet")

File tests/test_collector.py

 from __future__ import with_statement
 
-from nose.tools import assert_equal  # @UnresolvedImport
-
 from whoosh import fields, query
 from whoosh.compat import u
 from whoosh.filedb.filestore import RamStorage
     w.commit()
 
     with ix.searcher() as s:
-        assert_equal(s.doc_frequency("text", u("charlie")), 2)
+        assert s.doc_frequency("text", u("charlie")) == 2
         r = s.search(query.Term("text", u("charlie")))
-        assert_equal([hit["id"] for hit in r], [1, 3])
-        assert_equal(len(r), 2)
+        assert [hit["id"] for hit in r] == [1, 3]
+        assert len(r) == 2

File tests/test_columns.py

 from __future__ import with_statement
 import inspect, random, sys
 
-from nose.tools import assert_equal  # @UnresolvedImport
-
 from whoosh import columns, fields, query
 from whoosh.compat import b, u, BytesIO, bytes_type, text_type
 from whoosh.compat import izip, xrange, dumps, loads
 
     f = st.open_file("test")
     msr = compound.CompoundStorage(f)
-    assert_equal(msr.open_file("a").read(), b("123456789abc"))
-    assert_equal(msr.open_file("b").read(), b("abcdefghijk"))
-    assert_equal(msr.open_file("c").read(), b("AaBbCcDdEeFfGgHh"))
+    assert msr.open_file("a").read() == b("123456789abc")
+    assert msr.open_file("b").read() == b("abcdefghijk")
+    assert msr.open_file("c").read() == b("AaBbCcDdEeFfGgHh")
 
 
 def test_random_multistream():
         f = st.open_file("test")
         msr = compound.CompoundStorage(f)
         for name, value in domain.items():
-            assert_equal(msr.open_file(name).read(), value)
+            assert msr.open_file(name).read() == value
         msr.close()
 
 
 
     f = st.open_file("test1")
     r = c.reader(f, 5, length, len(values))
-    assert_equal(values, list(r))
+    assert values == list(r)
     for x in range(len(values)):
-        assert_equal(values[x], r[x])
+        assert values[x] == r[x]
     f.close()
 
     # Sparse
 
     f = st.open_file("test2")
     r = c.reader(f, 5, length, doccount)
-    assert_equal(target, list(r))
+    assert target == list(r)
     for x in range(doccount):
-        assert_equal(target[x], r[x])
+        assert target[x] == r[x]
 
     lr = r.load()
-    assert_equal(target, list(lr))
+    assert target == list(lr)
     f.close()
 
 
 
     with ix.reader() as r:
         scr = r.column_reader("s")
-        assert_equal(list(scr), ["alfa", "juliet"])
+        assert list(scr) == ["alfa", "juliet"]
 
         ncr = r.column_reader("n")
-        assert_equal(list(ncr), [100, 10])
+        assert list(ncr) == [100, 10]
 
 
 def test_column_field():
             assert r.has_column("b")
 
             cra = r.column_reader("a")
-            assert_equal(cra[0], u("alfa bravo"))
-            assert_equal(type(cra[0]), text_type)
+            assert cra[0] == u("alfa bravo")
+            assert type(cra[0]) == text_type
 
             crb = r.column_reader("b")
-            assert_equal(crb[0], b("charlie delta"))
-            assert_equal(type(crb[0]), bytes_type)
+            assert crb[0] == b("charlie delta")
+            assert type(crb[0]) == bytes_type
 
 
 def test_column_query():
                 return [s.stored_fields(docnum)["id"] for docnum in q.docs(s)]
 
             q = query.ColumnQuery("a", u("bravo"))
-            assert_equal(check(q), [2])
+            assert check(q) == [2]
 
             q = query.ColumnQuery("b", 30)
-            assert_equal(check(q), [3])
+            assert check(q) == [3]
 
             q = query.ColumnQuery("a", lambda v: v != u("delta"))
-            assert_equal(check(q), [1, 2, 3, 5, 6])
+            assert check(q) == [1, 2, 3, 5, 6]
 
             q = query.ColumnQuery("b", lambda v: v > 30)
-            assert_equal(check(q), [4, 5, 6])
+            assert check(q) == [4, 5, 6]
 
 
 def test_ref_switch():
             v = cr[i]
             # Column ignores additional unique values after 65535
             if i <= 65535 - 1:
-                assert_equal(v, str(i).encode("latin1"))
+                assert v == str(i).encode("latin1")
             else:
-                assert_equal(v, b(''))
+                assert v == b('')
         f.close()
 
     rw(255)
             warnings.simplefilter("always")
             rw(65537)
 
-            assert_equal(len(w), 2)
+            assert len(w) == 2
             assert issubclass(w[-1].category, UserWarning)
     else:
         rw(65537)

File tests/test_compound.py

 from __future__ import with_statement
 
-from nose.tools import assert_equal  # @UnresolvedImport
-
 from whoosh.compat import b
 from whoosh.filedb.compound import CompoundStorage
 from whoosh.filedb.filestore import RamStorage
     f = CompoundStorage(st.open_file("f"))
     with f.open_file("a") as af:
         for x in alist:
-            assert_equal(x, af.read_int())
-        assert_equal(af.read(), b(''))
+            assert x == af.read_int()
+        assert af.read() == b('')
 
     with f.open_file("b") as bf:
         for x in blist:
-            assert_equal(x, bf.read_varint())
-        assert_equal(bf.read(), b(''))
+            assert x == bf.read_varint()
+        assert bf.read() == b('')
 
     with f.open_file("c") as cf:
         for x in clist:
-            assert_equal(x, cf.read_int())
-        assert_equal(cf.read(), b(''))
+            assert x == cf.read_int()
+        assert cf.read() == b('')
 
 
 def test_simple_compound_mmap():

File tests/test_dateparse.py

-from nose.tools import assert_equal  # @UnresolvedImport
-
 from whoosh.qparser.dateparse import *
 
 
     for key in adatetime.units:
         val = getattr(at, key)
         target = kwargs.get(key)
-        assert_equal(val, target, "at.%s=%r not %r in %r" % (key, val, target, at))
+        assert val == target, "at.%s=%r not %r in %r" % (key, val, target, at)
 
 
 def assert_timespan(ts, sargs, eargs):
 def assert_unamb_span(ts, sargs, eargs):
     startdt = adatetime(**sargs).floor()
     enddt = adatetime(**eargs).ceil()
-    assert_equal(ts.start, startdt, "start %s != %s" % (ts.start, startdt))
-    assert_equal(ts.end, enddt, "end %s != %s" % (ts.end, enddt))
+    assert ts.start == startdt, "start %s != %s" % (ts.start, startdt)
+    assert ts.end == enddt, "end %s != %s" % (ts.end, enddt)
 
 
 def assert_datespan(ts, startdate, enddate):
     assert ts.__class__ is timespan
-    assert_equal(ts.start, startdate)
-    assert_equal(ts.end, enddate)
+    assert ts.start == startdate
+    assert ts.end == enddate
 
 
 #
     assert_adatetime(d.date_from("this month", basedate), year=2010, month=9)
     assert_adatetime(d.date_from("this year", basedate), year=2010)
 
-    assert_equal(d.date_from("now", basedate), basedate)
+    assert d.date_from("now", basedate) == basedate
 
 
 def test_plustime(rt=english.plusdate):
-    assert_equal(rt.date_from("+1hr", basedate), basedate + timedelta(hours=1))
-    assert_equal(rt.date_from("+5mins", basedate),
-                 basedate + timedelta(minutes=5))
-    assert_equal(rt.date_from("+20s", basedate),
-                 basedate + timedelta(seconds=20))
+    assert rt.date_from("+1hr", basedate) == basedate + timedelta(hours=1)
+    assert rt.date_from("+5mins", basedate) == basedate + timedelta(minutes=5)
+    assert rt.date_from("+20s", basedate) == basedate + timedelta(seconds=20)
 
-    assert_equal(rt.date_from("- 2 h", basedate),
-                 basedate + timedelta(hours=-2))
-    assert_equal(rt.date_from("- 25 minutes", basedate),
-                 basedate + timedelta(minutes=-25))
-    assert_equal(rt.date_from("-400 secs", basedate),
-                 basedate + timedelta(seconds=-400))
+    assert rt.date_from("- 2 h", basedate) == basedate + timedelta(hours=-2)
+    assert rt.date_from("- 25 minutes", basedate) == basedate + timedelta(minutes=-25)
+    assert rt.date_from("-400 secs", basedate) == basedate + timedelta(seconds=-400)
 
-    assert_equal(rt.date_from("+1hr 5m", basedate),
-                 basedate + timedelta(hours=1, minutes=5))
-    assert_equal(rt.date_from("-8hr 12m", basedate),
-                 basedate + timedelta(hours=-8, minutes=-12))
-    assert_equal(rt.date_from("+1hr 5s", basedate),
-                 basedate + timedelta(hours=1, seconds=5))
-    assert_equal(rt.date_from("+1hr 12m 5s", basedate),
-                 basedate + timedelta(hours=1, minutes=12, seconds=5))
-    assert_equal(rt.date_from("-1hr 5s", basedate),
-                 basedate + timedelta(hours=-1, seconds=-5))
-    assert_equal(rt.date_from("-1hr 12m 5s", basedate),
-                 basedate + timedelta(hours=-1, minutes=-12, seconds=-5))
+    assert rt.date_from("+1hr 5m", basedate) == basedate + timedelta(hours=1, minutes=5)
+    assert rt.date_from("-8hr 12m", basedate) == basedate + timedelta(hours=-8, minutes=-12)
+    assert rt.date_from("+1hr 5s", basedate) == basedate + timedelta(hours=1, seconds=5)
+    assert rt.date_from("+1hr 12m 5s", basedate) == basedate + timedelta(hours=1, minutes=12, seconds=5)
+    assert rt.date_from("-1hr 5s", basedate) == basedate + timedelta(hours=-1, seconds=-5)
+    assert rt.date_from("-1hr 12m 5s", basedate) == basedate + timedelta(hours=-1, minutes=-12, seconds=-5)
 
 
 def test_relative_days():
     # "next monday" on monday
-    assert_equal(relative_days(0, 0, 1), 7)
+    assert relative_days(0, 0, 1) == 7
     # "last monday" on monday
-    assert_equal(relative_days(0, 0, -1), -7)
+    assert relative_days(0, 0, -1) == -7
     # "next tuesday" on wednesday
-    assert_equal(relative_days(2, 1, 1), 6)
+    assert relative_days(2, 1, 1) == 6
     # "last tuesday" on wednesay
-    assert_equal(relative_days(2, 1, -1), -1)
+    assert relative_days(2, 1, -1) == -1
     # "last monday" on sunday
-    assert_equal(relative_days(6, 0, -1), -6)
+    assert relative_days(6, 0, -1) == -6
     # "next monday" on sunday
-    assert_equal(relative_days(6, 0, 1), 1)
+    assert relative_days(6, 0, 1) == 1
     # "next wednesday" on tuesday
-    assert_equal(relative_days(1, 2, 1), 1)
+    assert relative_days(1, 2, 1) == 1
     # "last wednesday" on tuesday
-    assert_equal(relative_days(1, 2, -1), -6)
+    assert relative_days(1, 2, -1) == -6
     # "last wednesday" on thursday
-    assert_equal(relative_days(3, 2, -1), -1)
+    assert relative_days(3, 2, -1) == -1
     # "next wednesday" on thursday
-    assert_equal(relative_days(3, 2, 1), 6)
+    assert relative_days(3, 2, 1) == 6
     # "last wednesday" on tuesday
-    assert_equal(relative_days(1, 2, -1), -6)
+    assert relative_days(1, 2, -1) == -6
     # "next wednesday" on tuesday
-    assert_equal(relative_days(1, 2, 1), 1)
+    assert relative_days(1, 2, 1) == 1
 
 
 def test_dayname(p=english.dayname):
 
 
 def test_reldate(p=english.plusdate):
-    assert_equal(p.date_from("+1y", basedate),
-                 basedate + relativedelta(years=1))
-    assert_equal(p.date_from("+2mo", basedate),
-                 basedate + relativedelta(months=2))
-    assert_equal(p.date_from("+3w", basedate),
-                 basedate + relativedelta(weeks=3))
-    assert_equal(p.date_from("+5d", basedate),
-                 basedate + relativedelta(days=5))
-    assert_equal(p.date_from("+5days", basedate),
-                 basedate + relativedelta(days=5))
+    assert p.date_from("+1y", basedate) == basedate + relativedelta(years=1)
+    assert p.date_from("+2mo", basedate) == basedate + relativedelta(months=2)
+    assert p.date_from("+3w", basedate) == basedate + relativedelta(weeks=3)
+    assert p.date_from("+5d", basedate) == basedate + relativedelta(days=5)
+    assert p.date_from("+5days", basedate) == basedate + relativedelta(days=5)
 
-    assert_equal(p.date_from("-6yr", basedate),
-                 basedate + relativedelta(years=-6))
-    assert_equal(p.date_from("- 7 mons", basedate),
-                 basedate + relativedelta(months=-7))
-    assert_equal(p.date_from("-8 wks", basedate),
-                 basedate + relativedelta(weeks=-8))
-    assert_equal(p.date_from("- 9 dy", basedate),
-                 basedate + relativedelta(days=-9))
+    assert p.date_from("-6yr", basedate) == basedate + relativedelta(years=-6)
+    assert p.date_from("- 7 mons", basedate) == basedate + relativedelta(months=-7)
+    assert p.date_from("-8 wks", basedate) == basedate + relativedelta(weeks=-8)
+    assert p.date_from("- 9 dy", basedate) == basedate + relativedelta(days=-9)
 
-    assert_equal(p.date_from("+1y 12mo 400d", basedate),
-                 basedate + relativedelta(years=1, months=12, days=400))
-    assert_equal(p.date_from("-7mo 8d", basedate),
-                 basedate + relativedelta(months=-7, days=-8))
-    assert_equal(p.date_from("+5wks 2d", basedate),
-                 basedate + relativedelta(weeks=5, days=2))
-    assert_equal(p.date_from("-1y 1w", basedate),
-                 basedate + relativedelta(years=-1, weeks=-1))
+    assert p.date_from("+1y 12mo 400d", basedate) == basedate + relativedelta(years=1, months=12, days=400)
+    assert p.date_from("-7mo 8d", basedate) == basedate + relativedelta(months=-7, days=-8)
+    assert p.date_from("+5wks 2d", basedate) == basedate + relativedelta(weeks=5, days=2)
+    assert p.date_from("-1y 1w", basedate) == basedate + relativedelta(years=-1, weeks=-1)
 
-    assert_equal(p.date_from("+1y 2d 5h 12s", basedate),
-                 basedate + relativedelta(years=1, days=2, hours=5,
-                                          seconds=12))
+    assert p.date_from("+1y 2d 5h 12s", basedate) == basedate + relativedelta(years=1, days=2, hours=5, seconds=12)
 
 
 def test_bundle_subs(p=english.bundle):

File tests/test_dawg.py

 from __future__ import with_statement
 
-from nose.tools import assert_equal, assert_raises  # @UnresolvedImport
+import pytest
 
 import random
 from array import array
 
 def test_empty_fieldname():
     gw = fst.GraphWriter(RamStorage().create_file("test"))
-    assert_raises(ValueError, gw.start_field, "")
-    assert_raises(ValueError, gw.start_field, None)
-    assert_raises(ValueError, gw.start_field, 0)
+    with pytest.raises(ValueError):
+        gw.start_field("")
+    with pytest.raises(ValueError):
+        gw.start_field(None)
+    with pytest.raises(ValueError):
+        gw.start_field(0)
 
 
 def test_empty_key():
     gw = fst.GraphWriter(RamStorage().create_file("test"))
     gw.start_field("_")
-    assert_raises(KeyError, gw.insert, b(""))
-    assert_raises(KeyError, gw.insert, "")
-    assert_raises(KeyError, gw.insert, u(""))
-    assert_raises(KeyError, gw.insert, [])
+    with pytest.raises(KeyError):
+        gw.insert(b(""))
+    with pytest.raises(KeyError):
+        gw.insert("")
+    with pytest.raises(KeyError):
+        gw.insert(u(""))
+    with pytest.raises(KeyError):
+        gw.insert([])
 
 
 def test_keys_out_of_order():
     gw = fst.GraphWriter(f)
     gw.start_field("test")
     gw.insert("alfa")
-    assert_raises(KeyError, gw.insert, "abba")
+    with pytest.raises(KeyError):
+        gw.insert("abba")
 
 
 def test_duplicate_keys():
     st = gwrite(enlist("alfa bravo bravo bravo charlie"))
     cur = fst.Cursor(greader(st))
-    assert_equal(list(cur.flatten_strings()), ["alfa", "bravo", "charlie"])
+    assert list(cur.flatten_strings()) == ["alfa", "bravo", "charlie"]
 
 
 def test_inactive_raise():
     cur = fst.Cursor(greader(st))
     while cur.is_active():
         cur.next_arc()
-    assert_raises(fst.InactiveCursor, cur.label)
-    assert_raises(fst.InactiveCursor, cur.prefix)
-    assert_raises(fst.InactiveCursor, cur.prefix_bytes)
-    assert_raises(fst.InactiveCursor, list, cur.peek_key())
-    assert_raises(fst.InactiveCursor, cur.peek_key_bytes)
-    assert_raises(fst.InactiveCursor, cur.stopped)
-    assert_raises(fst.InactiveCursor, cur.value)
-    assert_raises(fst.InactiveCursor, cur.accept)
-    assert_raises(fst.InactiveCursor, cur.at_last_arc)
-    assert_raises(fst.InactiveCursor, cur.next_arc)
-    assert_raises(fst.InactiveCursor, cur.follow)
-    assert_raises(fst.InactiveCursor, cur.switch_to, b("a"))
-    assert_raises(fst.InactiveCursor, cur.skip_to, b("a"))
-    assert_raises(fst.InactiveCursor, list, cur.flatten())
-    assert_raises(fst.InactiveCursor, list, cur.flatten_v())
-    assert_raises(fst.InactiveCursor, list, cur.flatten_strings())
-    assert_raises(fst.InactiveCursor, cur.find_path, b("a"))
+    pytest.raises(fst.InactiveCursor, cur.label)
+    pytest.raises(fst.InactiveCursor, cur.prefix)
+    pytest.raises(fst.InactiveCursor, cur.prefix_bytes)
+    pytest.raises(fst.InactiveCursor, list, cur.peek_key())
+    pytest.raises(fst.InactiveCursor, cur.peek_key_bytes)
+    pytest.raises(fst.InactiveCursor, cur.stopped)
+    pytest.raises(fst.InactiveCursor, cur.value)
+    pytest.raises(fst.InactiveCursor, cur.accept)
+    pytest.raises(fst.InactiveCursor, cur.at_last_arc)
+    pytest.raises(fst.InactiveCursor, cur.next_arc)
+    pytest.raises(fst.InactiveCursor, cur.follow)
+    pytest.raises(fst.InactiveCursor, cur.switch_to, b("a"))
+    pytest.raises(fst.InactiveCursor, cur.skip_to, b("a"))
+    pytest.raises(fst.InactiveCursor, list, cur.flatten())
+    pytest.raises(fst.InactiveCursor, list, cur.flatten_v())
+    pytest.raises(fst.InactiveCursor, list, cur.flatten_strings())
+    pytest.raises(fst.InactiveCursor, cur.find_path, b("a"))
 
 
 def test_types():
              (fst.IntListValues, [0, 6, 97], []))
 
     for t, v, z in types:
-        assert_equal(t.common(None, v), None)
-        assert_equal(t.common(v, None), None)
-        assert_equal(t.common(None, None), None)
-        assert_equal(t.subtract(v, None), v)
-        assert_equal(t.subtract(None, v), None)
-        assert_equal(t.subtract(None, None), None)
-        assert_equal(t.add(v, None), v)
-        assert_equal(t.add(None, v), v)
-        assert_equal(t.add(None, None), None)
+        assert t.common(None, v) is None
+        assert t.common(v, None) is None
+        assert t.common(None, None) is None
+        assert t.subtract(v, None) == v
+        assert t.subtract(None, v) is None
+        assert t.subtract(None, None) is None
+        assert t.add(v, None) == v
+        assert t.add(None, v) == v
+        assert t.add(None, None) is None
         f = st.create_file("test")
         t.write(f, v)
         t.write(f, z)
         f.close()
         f = st.open_file("test")
-        assert_equal(t.read(f), v)
-        assert_equal(t.read(f), z)
+        assert t.read(f) == v
+        assert t.read(f) == z
 
-    assert_equal(fst.IntValues.common(100, 20), 20)
-    assert_equal(fst.IntValues.add(20, 80), 100)
-    assert_equal(fst.IntValues.subtract(100, 80), 20)
+    assert fst.IntValues.common(100, 20) == 20
+    assert fst.IntValues.add(20, 80) == 100
+    assert fst.IntValues.subtract(100, 80) == 20
 
-    assert_equal(fst.BytesValues.common(b("abc"), b("abc")), b("abc"))
-    assert_equal(fst.BytesValues.common(b("abcde"), b("abfgh")), b("ab"))
-    assert_equal(fst.BytesValues.common(b("abcde"), b("ab")), b("ab"))
-    assert_equal(fst.BytesValues.common(b("ab"), b("abcde")), b("ab"))
-    assert_equal(fst.BytesValues.common(None, b("abcde")), None)
-    assert_equal(fst.BytesValues.common(b("ab"), None), None)
+    assert fst.BytesValues.common(b("abc"), b("abc")) == b("abc")
+    assert fst.BytesValues.common(b("abcde"), b("abfgh")) == b("ab")
+    assert fst.BytesValues.common(b("abcde"), b("ab")) == b("ab")
+    assert fst.BytesValues.common(b("ab"), b("abcde")) == b("ab")
+    assert fst.BytesValues.common(None, b("abcde")) is None
+    assert fst.BytesValues.common(b("ab"), None) is None
 
     a1 = array("i", [0, 12, 123, 42])
     a2 = array("i", [0, 12, 420])
     cm = array("i", [0, 12])
-    assert_equal(fst.ArrayValues.common(a1, a1), a1)
-    assert_equal(fst.ArrayValues.common(a1, a2), cm)
-    assert_equal(fst.ArrayValues.common(a2, a1), cm)
-    assert_equal(fst.ArrayValues.common(None, a1), None)
-    assert_equal(fst.ArrayValues.common(a2, None), None)
+    assert fst.ArrayValues.common(a1, a1) == a1
+    assert fst.ArrayValues.common(a1, a2) == cm
+    assert fst.ArrayValues.common(a2, a1) == cm
+    assert fst.ArrayValues.common(None, a1) is None
+    assert fst.ArrayValues.common(a2, None) is None
 
 
 def _fst_roundtrip(domain, t):
         f = st.open_file("test")
         gr = fst.GraphReader(f, vtype=t)
         cur = fst.Cursor(gr)
-        assert_equal(list(cur.flatten_v()), domain)
+        assert list(cur.flatten_v()) == domain
         f.close()
 
 
         gwrite(words, st)
         gr = greader(st)
         cur = fst.Cursor(gr)
-        assert_equal(list(cur.flatten_strings()), words)
+        assert list(cur.flatten_strings()) == words
         gr.close()
 
 
         for key in sample:
             cur.reset()
             cur.find_path(key)
-            assert_equal(cur.prefix_bytes(), key)
+            assert cur.prefix_bytes() == key
         gr.close()
 
 
 
     cur1.find_path(b("blo"))
     cur2.find_path(b("glo"))
-    assert_equal(cur1.stack[-1].target, cur2.stack[-1].target)
+    assert cur1.stack[-1].target == cur2.stack[-1].target
 
 
 def test_fields():
         gr = fst.GraphReader(st.open_file("test"))
         cur1 = fst.Cursor(gr, gr.root("f1"))
         cur2 = fst.Cursor(gr, gr.root("f2"))
-        assert_equal(list(cur1.flatten_strings()), ["a", "aa", "ab"])
-        assert_equal(list(cur2.flatten_strings()), ["ba", "baa", "bab"])
+        assert list(cur1.flatten_strings()) == ["a", "aa", "ab"]
+        assert list(cur2.flatten_strings()) == ["ba", "baa", "bab"]
         gr.close()
 
 
         gr = greader(st)
         s = set(fst.within(gr, "01", k=1))
         gr.close()
-    assert_equal(s, set(["0", "00", "01", "011", "010",
-                         "001", "10", "101", "1", "11"]))
+    assert s == set(["0", "00", "01", "011", "010", "001", "10", "101", "1", "11"])
 
 
 def test_within_match():
     st = gwrite(enlist("abc def ghi"))
     gr = greader(st)
-    assert_equal(set(fst.within(gr, "def")), set(["def"]))
+    assert set(fst.within(gr, "def")) == set(["def"])
 
 
 def test_within_insert():
     st = gwrite(enlist("00 01 10 11"))
     gr = greader(st)
     s = set(fst.within(gr, "0"))
-    assert_equal(s, set(["00", "01", "10"]))
+    assert s == set(["00", "01", "10"])
 
 
 def test_within_delete():
     st = gwrite(enlist("abc def ghi"))
     gr = greader(st)
-    assert_equal(set(fst.within(gr, "df")), set(["def"]))
+    assert set(fst.within(gr, "df")) == set(["def"])
 
     st = gwrite(enlist("0"))
     gr = greader(st)
-    assert_equal(list(fst.within(gr, "01")), ["0"])
+    assert list(fst.within(gr, "01")) == ["0"]
 
 
 def test_within_replace():
     st = gwrite(enlist("abc def ghi"))
     gr = greader(st)
-    assert_equal(set(fst.within(gr, "dez")), set(["def"]))
+    assert set(fst.within(gr, "dez")) == set(["def"])
 
     st = gwrite(enlist("00 01 10 11"))
     gr = greader(st)
     s = set(fst.within(gr, "00"))
-    assert_equal(s, set(["00", "10", "01"]), s)
+    assert s == set(["00", "10", "01"])
 
 
 def test_within_transpose():
     st = gwrite(enlist("abc def ghi"))
     gr = greader(st)
     s = set(fst.within(gr, "dfe"))
-    assert_equal(s, set(["def"]))
+    assert s == set(["def"])
 
 
 def test_within_k2():
     st = gwrite(enlist("abc bac cba"))
     gr = greader(st)
     s = set(fst.within(gr, "cb", k=2))
-    assert_equal(s, set(["abc", "cba"]))
+    assert s == set(["abc", "cba"])
 
 
 def test_within_prefix():
     st = gwrite(enlist("aabc aadc babc badc"))
     gr = greader(st)
     s = set(fst.within(gr, "aaxc", prefix=2))
-    assert_equal(s, set(["aabc", "aadc"]))
+    assert s == set(["aabc", "aadc"])
 
 
 def test_skip():
     cur = gr.cursor()
     while not cur.stopped():
         cur.follow()
-    assert_equal(cur.prefix_bytes(), b("abcd"))
+    assert cur.prefix_bytes() == b("abcd")
     assert cur.accept()
 
     cur = gr.cursor()
     while not cur.stopped():
         cur.follow()
-    assert_equal(cur.prefix_bytes(), b("abcd"))
+    assert cur.prefix_bytes() == b("abcd")
     cur.skip_to(b("cdaa"))
-    assert_equal(cur.peek_key_bytes(), b("cdqr1"))
-    assert_equal(cur.prefix_bytes(), b("cdq"))
+    assert cur.peek_key_bytes() == b("cdqr1")
+    assert cur.prefix_bytes() == b("cdq")
 
     cur = gr.cursor()
     while not cur.stopped():
     gw.close()
 
     cur = fst.GraphReader(st.open_file("test")).cursor()
-    assert_equal(list(cur.flatten()), domain)
+    assert list(cur.flatten()) == domain
 
 
 def test_insert_unicode():
     gw.close()
 
     cur = fst.GraphReader(st.open_file("test")).cursor()
-    assert_equal(list(cur.flatten_strings()), domain)
+    assert list(cur.flatten_strings()) == domain
 
 
 def test_within_unicode():
 
     gr = fst.GraphReader(st.open_file("test"))
     s = list(fst.within(gr, u("\uc774.\ud76c")))
-    assert_equal(s, [u("\uc774\uc124\ud76c")])
+    assert s == [u("\uc774\uc124\ud76c")]

File tests/test_fields.py

 from __future__ import with_statement
 from datetime import datetime, timedelta
 
-from nose.tools import assert_equal, assert_not_equal  # @UnresolvedImport
-from nose.tools import assert_raises  # @UnresolvedImport
+import pytest
 
 from whoosh import fields, qparser, query
 from whoosh.compat import long_type, u, b, xrange
 def test_schema_eq():
     a = fields.Schema()
     b = fields.Schema()
-    assert_equal(a, b)
+    assert a == b
 
     a = fields.Schema(id=fields.ID)
     b = a.copy()
-    assert_equal(a["id"], b["id"])
-    assert_equal(a, b)
+    assert a["id"] == b["id"]
+    assert a == b
 
     c = fields.Schema(id=fields.TEXT)
-    assert_not_equal(a, c)
+    assert a != c
 
 
 def test_creation1():
     s.add("quick", fields.NGRAM)
     s.add("note", fields.STORED)
 
-    assert_equal(s.names(), ["content", "note", "path", "quick", "tags",
-                             "title"])
+    assert s.names() == ["content", "note", "path", "quick", "tags", "title"]
     assert "content" in s
     assert "buzz" not in s
     assert isinstance(s["tags"], fields.KEYWORD)
                       b=fields.ID,
                       c=fields.KEYWORD(scorable=True))
 
-    assert_equal(s.names(), ["a", "b", "c"])
+    assert s.names() == ["a", "b", "c"]
     assert "a" in s
     assert "b" in s
     assert "c" in s
         date = fields.DATETIME
 
     ix = RamStorage().create_index(MySchema)
-    assert_equal(ix.schema.names(), ["content", "date", "path", "title"])
+    assert ix.schema.names() == ["content", "date", "path", "title"]
 
     ix = RamStorage().create_index(MySchema())
-    assert_equal(ix.schema.names(), ["content", "date", "path", "title"])
+    assert ix.schema.names() == ["content", "date", "path", "title"]
 
-    assert_raises(fields.FieldConfigurationError, RamStorage().create_index,
-                  object())
+    with pytest.raises(fields.FieldConfigurationError):
+        RamStorage().create_index(object())
 
 
 def test_declarative_inherit():
         title = fields.TEXT
 
     s = Grandchild()