Commits

Matt Chaput committed a849acd

Removed unused imports that were breaking under Python 2.5. Fixes issue #187.

Comments (0)

Files changed (2)

tests/test_parsing.py

-from __future__ import print_function
-
 from nose.tools import assert_equal  #@UnresolvedImport
 
 from whoosh import analysis, fields, query
                                         plugins.RangePlugin()])
     ns = p.tag("a [b to c} d")
     assert_equal(repr(ns), "<AndGroup <None:'a'>, < >, <None:['b' 'c'}>, < >, <None:'d'>>")
-    
+
     assert_equal(repr(p.process("a {b to]")), "<AndGroup <None:'a'>, <None:{'b' None]>>")
     assert_equal(repr(p.process("[to c] d")), "<AndGroup <None:[None 'c']>, <None:'d'>>")
     assert_equal(repr(p.process("[to]")), "<AndGroup <None:[None None]>>")
-    
+
 def test_sq_range():
     p = default.QueryParser("t", None, [plugins.WhitespacePlugin(),
                                         plugins.SingleQuotePlugin(),
 def test_groups():
     p = default.QueryParser("t", None, [plugins.WhitespacePlugin(),
                                         plugins.GroupPlugin()])
-    
+
     ns = p.process("a ((b c) d) e")
     assert_equal(repr(ns), "<AndGroup <None:'a'>, <AndGroup <AndGroup <None:'b'>, <None:'c'>>, <None:'d'>>, <None:'e'>>")
 
     assert_equal(repr(ns), "<AndGroup <None:'a'>, <^ 3.0>>")
     ns = p.filterize(ns)
     assert_equal(repr(ns), "<AndGroup <None:'a' ^3.0>>")
-    
+
     assert_equal(repr(p.process("a (b c)^2.5")), "<AndGroup <None:'a'>, <AndGroup <None:'b'>, <None:'c'> ^2.5>>")
     assert_equal(repr(p.process("a (b c)^.5 d")), "<AndGroup <None:'a'>, <AndGroup <None:'b'>, <None:'c'> ^0.5>, <None:'d'>>")
     assert_equal(repr(p.process("^2 a")), "<AndGroup <None:'^2'>, <None:'a'>>")
     assert_equal(q.__class__, query.Term)
     assert_equal(q.fieldname, "content")
     assert_equal(q.text, "test")
-    
+
     mq = default.MultifieldParser(("title", "content"), s)
     q = mq.parse(u("test"))
     assert_equal(q.__class__, query.Or)
     assert_equal(q[1].fieldname, "content")
     assert_equal(q[0].text, "test")
     assert_equal(q[1].text, "test")
-    
+
     q = mq.parse(u("title:test"))
     assert_equal(q.__class__, query.Term)
     assert_equal(q.fieldname, "title")
 def test_multifield():
     schema = fields.Schema(content=fields.TEXT, title=fields.TEXT,
                            cat=fields.KEYWORD, date=fields.DATETIME)
-    
+
     qs = u("a (b c cat:d) OR (b c cat:e)")
     qp = default.MultifieldParser(['x', 'y'], schema)
-    
+
     q = qp.parse(qs)
     assert_equal(text_type(q), "((x:a OR y:a) AND (((x:b OR y:b) AND (x:c OR y:c) AND cat:d) OR ((x:b OR y:b) AND (x:c OR y:c) AND cat:e)))")
 
     fieldmap = {'nisbah': [u('\u0646\u0633\u0628\u0629')],
                 'abc123': ['xyz']}
     qp.add_plugin(plugins.FieldAliasPlugin(fieldmap))
-    
+
     q = qp.parse(u("abc123:456"))
     assert_equal(q.__class__, query.Term)
     assert_equal(q.fieldname, u('abc123'))
     assert_equal(q.text, u('456'))
-    
+
     q = qp.parse(u("abc123:456 def"))
     assert_equal(text_type(q), u("(abc123:456 AND content:def)"))
-    
+
     q = qp.parse(u('\u0646\u0633\u0628\u0629:\u0627\u0644\u0641\u0644\u0633\u0637\u064a\u0646\u064a'))
     assert_equal(q.__class__, query.Term)
     assert_equal(q.fieldname, u('nisbah'))
     assert_equal(q.text, u('\u0627\u0644\u0641\u0644\u0633\u0637\u064a\u0646\u064a'))
-    
+
     q = qp.parse(u("abc123 (xyz:123 OR qrs)"))
     assert_equal(text_type(q), "(content:abc123 AND (abc123:123 OR content:qrs))")
 
     assert_equal(q.__class__, query.Term)
     assert_equal(q.fieldname, "url")
     assert_equal(q.text, "test")
-    
+
     q = qp.parse(u("url: test"))
     assert_equal(q.__class__, query.And)
     assert_equal(q[0].__class__, query.Term)
     assert_equal(q[1].fieldname, "content")
     assert_equal(q[0].text, "url")
     assert_equal(q[1].text, "test")
-    
+
     q = qp.parse(u("url:"))
     assert_equal(q.__class__, query.Term)
     assert_equal(q.fieldname, "content")
     assert_equal(q.text, "url")
-    
+
     s = fields.Schema(foo=fields.KEYWORD)
     qp = default.QueryParser("foo", s)
     q = qp.parse(u("blah:"))
     assert_equal(q.text, "blah:")
 
 def test_andor():
-    qp  = default.QueryParser("a", None)
+    qp = default.QueryParser("a", None)
     q = qp.parse("a AND b OR c AND d OR e AND f")
     assert_equal(text_type(q), "((a:a AND a:b) OR (a:c AND a:d) OR (a:e AND a:f))")
-    
+
     q = qp.parse("aORb")
     assert_equal(q, query.Term("a", "aORb"))
-    
+
     q = qp.parse("aOR b")
     assert_equal(q, query.And([query.Term("a", "aOR"), query.Term("a", "b")]))
-    
+
     q = qp.parse("a ORb")
     assert_equal(q, query.And([query.Term("a", "a"), query.Term("a", "ORb")]))
-    
+
     assert_equal(qp.parse("OR"), query.Term("a", "OR"))
 
 def test_andnot():
     assert_equal(q.b.__class__, query.Term)
     assert_equal(q.a.text, "this")
     assert_equal(q.b.text, "that")
-    
+
     q = qp.parse(u("foo ANDNOT bar baz"))
     assert_equal(q.__class__, query.And)
     assert_equal(len(q), 2)
     assert_equal(q[0].__class__, query.AndNot)
     assert_equal(q[1].__class__, query.Term)
-    
+
     q = qp.parse(u("foo fie ANDNOT bar baz"))
     assert_equal(q.__class__, query.And)
     assert_equal(len(q), 3)
     assert_equal(q[0].__class__, query.Term)
     assert_equal(q[1].__class__, query.AndNot)
     assert_equal(q[2].__class__, query.Term)
-    
+
     q = qp.parse(u("a AND b ANDNOT c"))
     assert_equal(q.__class__, query.AndNot)
     assert_equal(text_type(q), "((content:a AND content:b) ANDNOT content:c)")
     assert_equal(q[1].fieldname, "fn")
     assert_equal(q[2].text, "5.67")
     assert_equal(q[3].text, "hi^5x")
-    
+
     q = qp.parse("alfa (bravo OR charlie)^2.5 ^3")
-    print("q=", q)
     assert_equal(len(q), 3)
     assert_equal(q[0].boost, 1.0)
     assert_equal(q[1].boost, 2.5)
     assert_equal(q[2].text, "?star*s?")
     assert_equal(q[3].__class__, query.Term)
     assert_equal(q[3].text, "test")
-    
+
 def test_wildcard2():
     qp = default.QueryParser("content", None)
     q = qp.parse(u("*the?e*"))
     assert_equal(q.__class__, query.Wildcard)
     assert_equal(q.text, "*the?e*")
-    
+
 def test_parse_fieldname_underscores():
     s = fields.Schema(my_name=fields.ID(stored=True), my_value=fields.TEXT)
     qp = default.QueryParser("my_value", schema=s)
     q = qp.parse(u("word*"))
     assert_equal(q.__class__, query.Prefix)
     assert_equal(q.text, "word")
-    
+
     q = qp.parse(u("first* second"))
     assert_equal(q[0].__class__, query.Prefix)
     assert_equal(q[0].text, "first")
     assert_equal(q[0].text, "hell's")
     assert_equal(q[1].text, "hot")
     assert_equal(q[2].text, "i stab at thee")
-    
+
     q = qp.parse("alfa zulu:'bravo charlie' delta")
     assert_equal(q.__class__.__name__, 'And')
     assert_equal(len(q), 3)
     assert_equal((q[0].fieldname, q[0].text), ("text", "alfa"))
     assert_equal((q[1].fieldname, q[1].text), ("zulu", "bravo charlie"))
     assert_equal((q[2].fieldname, q[2].text), ("text", "delta"))
-    
+
     q = qp.parse("The rest 'is silence")
     assert_equal(q.__class__, query.And)
     assert_equal(len(q), 4)
-    assert_equal([t.text for t in q.subqueries], ["The", "rest", "'is" ,"silence"])
-    
+    assert_equal([t.text for t in q.subqueries], ["The", "rest", "'is" , "silence"])
+
     q = qp.parse("I don't like W's stupid face")
     assert_equal(q.__class__, query.And)
     assert_equal(len(q), 6)
-    assert_equal([t.text for t in q.subqueries], ["I", "don't", "like" ,"W's", "stupid", "face"])
-    
+    assert_equal([t.text for t in q.subqueries], ["I", "don't", "like" , "W's", "stupid", "face"])
+
     q = qp.parse("I forgot the drinkin' in '98")
     assert_equal(q.__class__, query.And)
     assert_equal(len(q), 6)
-    assert_equal([t.text for t in q.subqueries], ["I", "forgot", "the" ,"drinkin'", "in", "'98"])
+    assert_equal([t.text for t in q.subqueries], ["I", "forgot", "the" , "drinkin'", "in", "'98"])
 
 #    def test_escaping():
 #        qp = default.QueryParser("text", None)
 #        q = qp.parse(u("ab\u005c\u005c*"))
 #        assert q.__class__, query.Wildcard)
 #        assert q.text, u("ab\u005c*"))
-    
+
 def test_phrase_phrase():
     qp = default.QueryParser("content", None)
     q = qp.parse('"alfa bravo" "charlie delta echo"^2.2 test:"foxtrot golf"')
     assert_equal(q[2].__class__, query.Phrase)
     assert_equal(q[2].words, ["foxtrot", "golf"])
     assert_equal(q[2].fieldname, "test")
-    
+
 def test_weird_characters():
     qp = default.QueryParser("content", None)
     q = qp.parse(u(".abcd@gmail.com"))
     q = qp.parse(u("?"))
     assert_equal(q.__class__, query.Wildcard)
     assert_equal(q.text, "?")
-    
+
 def test_euro_chars():
     schema = fields.Schema(text=fields.TEXT)
     qp = default.QueryParser("text", schema)
     assert_equal(q.text, u("stra\xdfe"))
 
 def test_star():
-    schema = fields.Schema(text = fields.TEXT(stored=True))
+    schema = fields.Schema(text=fields.TEXT(stored=True))
     qp = default.QueryParser("text", schema)
     q = qp.parse(u("*"))
     assert_equal(q.__class__, query.Every)
     q = qp.parse(u("*h?ll*"))
     assert_equal(q.__class__, query.Wildcard)
     assert_equal(q.text, "*h?ll*")
-    
+
     q = qp.parse(u("h?pe"))
     assert_equal(q.__class__, query.Wildcard)
     assert_equal(q.text, "h?pe")
-    
+
     q = qp.parse(u("*? blah"))
     assert_equal(q.__class__, query.And)
     assert_equal(q[0].__class__, query.Wildcard)
     assert_equal(q[0].text, "*?")
     assert_equal(q[1].__class__, query.Term)
     assert_equal(q[1].text, "blah")
-    
+
     q = qp.parse(u("*ending"))
     assert_equal(q.__class__, query.Wildcard)
     assert_equal(q.text, "*ending")
-    
+
     q = qp.parse(u("*q"))
     assert_equal(q.__class__, query.Wildcard)
     assert_equal(q.text, "*q")
 def test_star_field():
     schema = fields.Schema(text=fields.TEXT)
     qp = default.QueryParser("text", schema)
-    
+
     q = qp.parse(u("*:*"))
     assert_equal(q.__class__, query.Every)
     assert_equal(q.fieldname, None)
-    
+
     # This gets parsed to a term with text="*:test" which is then analyzed down
     # to just "test"
     q = qp.parse(u("*:test"))
     assert_equal(q.__class__, query.Term)
     assert_equal(q.fieldname, "text")
     assert_equal(q.text, "test")
-    
+
 def test_range_query():
-    schema = fields.Schema(name=fields.ID(stored=True), text = fields.TEXT(stored=True))
+    schema = fields.Schema(name=fields.ID(stored=True), text=fields.TEXT(stored=True))
     qp = default.QueryParser("text", schema)
-    
+
     q = qp.parse(u("[alfa to bravo}"))
     assert_equal(q.__class__, query.TermRange)
     assert_equal(q.start, "alfa")
     assert_equal(q.end, "bravo")
     assert_equal(q.startexcl, False)
     assert_equal(q.endexcl, True)
-    
+
     q = qp.parse(u("['hello there' to 'what ever']"))
     assert_equal(q.__class__, query.TermRange)
     assert_equal(q.start, "hello there")
     assert_equal(q.end, "what ever")
     assert_equal(q.startexcl, False)
     assert_equal(q.endexcl, False)
-    
+
     q = qp.parse(u("name:{'to' to 'b'}"))
     assert_equal(q.__class__, query.TermRange)
     assert_equal(q.start, "to")
     assert_equal(q.end, "b")
     assert_equal(q.startexcl, True)
     assert_equal(q.endexcl, True)
-    
+
     q = qp.parse(u("name:{'a' to 'to']"))
     assert_equal(q.__class__, query.TermRange)
     assert_equal(q.start, "a")
     assert_equal(q.end, "to")
     assert_equal(q.startexcl, True)
     assert_equal(q.endexcl, False)
-    
+
     q = qp.parse(u("name:[a to to]"))
     assert_equal(q.__class__, query.TermRange)
     assert_equal(q.start, "a")
     assert_equal(q.end, "to")
-    
+
     q = qp.parse(u("name:[to to b]"))
     assert_equal(q.__class__, query.TermRange)
     assert_equal(q.start, "to")
     assert_equal(q.end, "b")
-    
+
     q = qp.parse(u("[alfa to alfa]"))
     assert_equal(q.__class__, query.Term)
     assert_equal(q.text, "alfa")
-    
+
     q = qp.parse(u("Ind* AND name:[d TO]"))
     assert_equal(q.__class__, query.And)
     assert_equal(q[0].__class__, query.Prefix)
     assert_equal(q[0].text, "ind")
     assert_equal(q[1].start, "d")
     assert_equal(q[1].fieldname, "name")
-    
+
     q = qp.parse(u("name:[d TO]"))
     assert_equal(q.__class__, query.TermRange)
     assert_equal(q.start, "d")
 def test_numeric_range():
     schema = fields.Schema(id=fields.STORED, number=fields.NUMERIC)
     qp = default.QueryParser("number", schema)
-    
+
     teststart = 40
     testend = 100
-    
+
     q = qp.parse("[%s to *]" % teststart)
     assert_equal(q, query.NullQuery)
-    
+
     q = qp.parse("[%s to]" % teststart)
     assert_equal(q.__class__, query.NumericRange)
     assert_equal(q.start, teststart)
     assert_equal(q.end, None)
-    
+
     q = qp.parse("[to %s]" % testend)
     assert_equal(q.__class__, query.NumericRange)
     assert_equal(q.start, None)
     assert_equal(q.end, testend)
-    
+
     q = qp.parse("[%s to %s]" % (teststart, testend))
     assert_equal(q.__class__, query.NumericRange)
     assert_equal(q.start, teststart)
     assert_equal(q.end, testend)
-    
+
 def test_regressions():
     qp = default.QueryParser("f", None)
-    
+
     # From 0.3.18, these used to require escaping. Mostly good for
     # regression testing.
     assert_equal(qp.parse(u("re-inker")), query.Term("f", "re-inker"))
             == query.And([query.Term("f", "daler-rowney"),
                           query.Term("f", "pearl"),
                           query.Term("f", "bell bronze")]))
-    
+
     q = qp.parse(u('22" BX'))
     assert_equal(q, query.And([query.Term("f", '22"'), query.Term("f", "BX")]))
-    
+
 def test_empty_ranges():
     schema = fields.Schema(name=fields.TEXT, num=fields.NUMERIC,
                            date=fields.DATETIME)
     qp = default.QueryParser("text", schema)
-    
+
     for fname in ("name", "date"):
         q = qp.parse(u("%s:[to]") % fname)
         assert_equal(q.__class__, query.Every)
     # Need an analyzer that won't mangle a URL
     a = analysis.SimpleAnalyzer("\\S+")
     schema = fields.Schema(id=fields.ID, text=fields.TEXT(analyzer=a))
-    
+
     qp = default.QueryParser("text", schema)
     q = qp.parse(u("id:/code http://localhost/"))
     assert_equal(q.__class__, query.And)
     assert_equal(q[1].text, "http://localhost/")
 
 def test_stopped():
-    schema = fields.Schema(text = fields.TEXT)
+    schema = fields.Schema(text=fields.TEXT)
     qp = default.QueryParser("text", schema)
     q = qp.parse(u("a b"), debug=True)
     assert_equal(q, query.NullQuery)
-    
+
 def test_analyzing_terms():
     schema = fields.Schema(text=fields.TEXT(analyzer=analysis.StemmingAnalyzer()))
     qp = default.QueryParser("text", schema)
     q = qp.parse(u("Indexed!"))
     assert_equal(q.__class__, query.Term)
     assert_equal(q.text, "index")
-    
+
 def test_simple():
     parser = default.SimpleParser("x", None)
     q = parser.parse(u("alfa bravo charlie delta"))
     assert_equal(text_type(q), "(x:alfa OR x:bravo OR x:charlie OR x:delta)")
-    
+
     q = parser.parse(u("alfa +bravo charlie delta"))
     assert_equal(text_type(q), "(x:bravo ANDMAYBE (x:alfa OR x:charlie OR x:delta))")
-    
+
     q = parser.parse(u("alfa +bravo -charlie delta"))
     assert_equal(text_type(q), "((x:bravo ANDMAYBE (x:alfa OR x:delta)) ANDNOT x:charlie)")
-    
+
     q = parser.parse(u("- alfa +bravo + delta"))
     assert_equal(text_type(q), "((x:bravo AND x:delta) ANDNOT x:alfa)")
 
     parser = default.DisMaxParser({"body": 0.8, "title": 2.5}, None)
     q = parser.parse(u("alfa bravo charlie"))
     assert_equal(text_type(q), "(DisMax(body:alfa^0.8 title:alfa^2.5) OR DisMax(body:bravo^0.8 title:bravo^2.5) OR DisMax(body:charlie^0.8 title:charlie^2.5))")
-    
+
     q = parser.parse(u("alfa +bravo charlie"))
     assert_equal(text_type(q), "(DisMax(body:bravo^0.8 title:bravo^2.5) ANDMAYBE (DisMax(body:alfa^0.8 title:alfa^2.5) OR DisMax(body:charlie^0.8 title:charlie^2.5)))")
-    
+
     q = parser.parse(u("alfa -bravo charlie"))
     assert_equal(text_type(q), "((DisMax(body:alfa^0.8 title:alfa^2.5) OR DisMax(body:charlie^0.8 title:charlie^2.5)) ANDNOT DisMax(body:bravo^0.8 title:bravo^2.5))")
-    
+
     q = parser.parse(u("alfa -bravo +charlie"))
     assert_equal(text_type(q), "((DisMax(body:charlie^0.8 title:charlie^2.5) ANDMAYBE DisMax(body:alfa^0.8 title:alfa^2.5)) ANDNOT DisMax(body:bravo^0.8 title:bravo^2.5))")
 
 def test_many_clauses():
     qs = "1" + (" OR 1" * 1000)
-    
+
     parser = default.QueryParser("content", None)
     parser.parse(qs)
-    
+
 def test_roundtrip():
     parser = default.QueryParser("a", None)
     q = parser.parse(u("a OR ((b AND c AND d AND e) OR f OR g) ANDNOT h"))
     assert_equal(text_type(q), "((a:a OR (a:b AND a:c AND a:d AND a:e) OR a:f OR a:g) ANDNOT a:h)")
-    
+
 def test_ngrams():
     schema = fields.Schema(grams=fields.NGRAM)
     parser = default.QueryParser('grams', schema)
     parser.remove_plugin_class(plugins.WhitespacePlugin)
-    
+
     q = parser.parse(u("Hello There"))
     assert_equal(q.__class__, query.And)
     assert_equal(len(q), 8)
     assert_equal([sq.text for sq in q], ["hell", "ello", "llo ", "lo t", "o th", " the", "ther", "here"])
-    
+
 def test_ngramwords():
     schema = fields.Schema(grams=fields.NGRAMWORDS(queryor=True))
     parser = default.QueryParser('grams', schema)
-    
+
     q = parser.parse(u("Hello Tom"))
     assert_equal(q.__class__, query.And)
     assert_equal(q[0].__class__, query.Or)
     schema = fields.Schema(text=textfield)
     parser = default.QueryParser('text', schema)
     qstring = u("chaw-bacon")
-    
+
     texts = list(schema["text"].process_text(qstring))
     assert_equal(texts, ["chaw", "bacon"])
-    
+
     q = parser.parse(qstring)
-    print("q=", q.__unicode__())
     assert_equal(q.__class__, query.And)
     assert_equal(len(q), 2)
     assert_equal(q[0].__class__, query.Term)
     schema = fields.Schema(text=textfield)
     parser = default.QueryParser('text', schema)
     qstring = u("chaw-bacon")
-    
+
     texts = list(schema["text"].process_text(qstring))
     assert_equal(texts, ["chaw", "bacon"])
-    
+
     q = parser.parse(qstring)
     assert_equal(q.__class__, query.Or)
     assert_equal(len(q), 2)
     schema = fields.Schema(text=textfield)
     parser = default.QueryParser("text", schema)
     qstring = u("chaw-bacon")
-    
+
     texts = list(schema["text"].process_text(qstring))
     assert_equal(texts, ["chaw", "bacon"])
-    
+
     q = parser.parse(qstring)
     assert_equal(q.__class__, query.Phrase)
 
     parser = default.QueryParser("text", schema)
     q = parser.parse(u"foo bar")
     assert_equal(q.__unicode__(), "(text:foo AND text:bar)")
-    
+
     q = parser.parse(u"'foo bar'")  # single quotes
     assert_equal(q.__unicode__(), "(text:foo OR text:bar)")
 
 def test_operator_queries():
     qp = default.QueryParser("f", None)
-    
+
     q = qp.parse("a AND b OR c AND d")
     assert_equal(text_type(q), "((f:a AND f:b) OR (f:c AND f:d))")
-    
+
     q = qp.parse("a OR b OR c OR d")
     assert_equal(text_type(q), "(f:a OR f:b OR f:c OR f:d)")
-    
+
     q = qp.parse("a ANDMAYBE b ANDNOT c REQUIRE d")
     assert_equal(text_type(q), "((f:a ANDMAYBE (f:b ANDNOT f:c)) REQUIRE f:d)")
 
 #    p = make_parser(right_andmaybe)
 #    q = p.parse("(a ANDMAYBE b) ANDMAYBE (c ANDMAYBE d)")
 #    assert_equal(text_type(q), "((f:a ANDMAYBE f:b) ANDMAYBE (f:c ANDMAYBE f:d))")
-    
+
 def test_not_assoc():
     qp = default.QueryParser("text", None)
     q = qp.parse(u("a AND NOT b OR c"))
     assert_equal(text_type(q), "((text:a AND NOT text:b) OR text:c)")
-    
+
     qp = default.QueryParser("text", None)
     q = qp.parse(u("a NOT (b OR c)"))
     assert_equal(text_type(q), "(text:a AND NOT (text:b OR text:c))")
-    
+
 def test_fieldname_space():
     qp = default.QueryParser("a", None)
     q = qp.parse("Man Ray: a retrospective")
     assert_equal(text_type(q), "(a:Man AND a:Ray: AND a:a AND a:retrospective)")
-    
+
 def test_fieldname_fieldname():
     qp = default.QueryParser("a", None)
     q = qp.parse("a:b:")
     assert_equal(q, query.Term("a", "b:"))
-    
+
 def test_paren_fieldname():
-    schema = fields.Schema(kind=fields.ID, content=fields.TEXT) 
+    schema = fields.Schema(kind=fields.ID, content=fields.TEXT)
 
     qp = default.QueryParser("content", schema)
     q = qp.parse(u("(kind:1d565 OR kind:7c584) AND (stuff)"))
 def test_star_paren():
     qp = default.QueryParser("content", None)
     q = qp.parse(u("(*john*) AND (title:blog)"))
-    
+
     assert_equal(q.__class__, query.And)
     assert_equal(q[0].__class__, query.Wildcard)
     assert_equal(q[1].__class__, query.Term)

tests/test_queries.py

-from __future__ import print_function
-
 from nose.tools import assert_equal, assert_not_equal  #@UnresolvedImport
 
 import copy
 def test_existing_terms():
     s = fields.Schema(key=fields.ID, value=fields.TEXT)
     ix = RamStorage().create_index(s)
-    
+
     w = ix.writer()
     w.add_document(key=u("a"), value=u("alfa bravo charlie delta echo"))
     w.add_document(key=u("b"), value=u("foxtrot golf hotel india juliet"))
     w.commit()
-    
+
     r = ix.reader()
     q = QueryParser("value", None).parse(u('alfa hotel tango "sierra bravo"'))
-    
+
     ts = q.existing_terms(r, phrases=False)
     assert_equal(sorted(ts), [("value", "alfa"), ("value", "hotel")])
-    
+
     ts = q.existing_terms(r)
     assert_equal(sorted(ts), [("value", "alfa"), ("value", "bravo"), ("value", "hotel")])
-    
+
     ts = set()
     q.existing_terms(r, ts, reverse=True)
     assert_equal(sorted(ts), [("value", "sierra"), ("value", "tango")])
 def test_wildcard_existing_terms():
     s = fields.Schema(key=fields.ID, value=fields.TEXT)
     ix = RamStorage().create_index(s)
-    
+
     w = ix.writer()
     w.add_document(key=u("a"), value=u("alfa bravo bear charlie delta"))
     w.add_document(key=u("a"), value=u("boggle echo render rendering renders"))
     w.commit()
     r = ix.reader()
     qp = QueryParser("value", ix.schema)
-    
+
     def words(terms):
         z = []
         for t in terms:
             assert t[0] == "value"
             z.append(t[1])
         return " ".join(sorted(z))
-    
+
     q = qp.parse(u("b*"))
     ts = q.existing_terms(r)
     assert_equal(ts, set())
     ts = q.existing_terms(r, expand=True)
     assert_equal(words(ts), "bear boggle bravo")
-    
+
     q = qp.parse(u("[a TO f]"))
     ts = q.existing_terms(r)
     assert_equal(ts, set())
     ts = q.existing_terms(r, expand=True)
     assert_equal(words(ts), "alfa bear boggle bravo charlie delta echo")
-    
+
     q = query.Variations("value", "render")
     ts = q.existing_terms(r, expand=False)
     assert_equal(ts, set())
     ts = q.existing_terms(r, expand=True)
     assert_equal(words(ts), "render rendering renders")
-    
+
 def test_replace():
     q = And([Or([Term("a", "b"), Term("b", "c")], boost=1.2), Variations("a", "b", boost=2.0)])
     q = q.replace("a", "b", "BB")
             q.text = q.text.upper()
             return q
         return q.apply(visit)
-    
+
     before = And([Not(Term("a", u("b"))), Variations("a", u("c")), Not(FuzzyTerm("a", u("d")))])
     after = visit(before)
     assert_equal(after, And([Not(Term("a", u("B"))), Variations("a", u("C")),
                              Not(FuzzyTerm("a", u("D")))]))
-    
+
     def term2var(q):
         if isinstance(q, Term):
             return Variations(q.fieldname, q.text)
         if isinstance(q, Phrase):
             q.boost *= 2.0
         return q
-    
+
     before = And([Term("a", u("b")), Or([Term("c", u("d")), Phrase("a", [u("e"), u("f")])]),
                   Phrase("a", [u("g"), u("h")], boost=0.25)])
     after = before.accept(boost_phrases)
 def test_simplify():
     s = fields.Schema(k=fields.ID, v=fields.TEXT)
     ix = RamStorage().create_index(s)
-    
+
     w = ix.writer()
     w.add_document(k=u("1"), v=u("aardvark apple allan alfa bear bee"))
     w.add_document(k=u("2"), v=u("brie glue geewhiz goop julia"))
     w.commit()
-    
+
     r = ix.reader()
     q1 = And([Prefix("v", "b", boost=2.0), Term("v", "juliet")])
     q2 = And([Or([Term('v', u('bear'), boost=2.0), Term('v', u('bee'), boost=2.0),
                   Term('v', u('brie'), boost=2.0)]), Term('v', 'juliet')])
     assert_equal(q1.simplify(r), q2)
-    
+
 def test_merge_ranges():
     q = And([TermRange("f1", u("a"), None), TermRange("f1", None, u("z"))])
     assert_equal(q.normalize(), TermRange("f1", u("a"), u("z")))
-    
+
     q = And([NumericRange("f1", None, u("aaaaa")), NumericRange("f1", u("zzzzz"), None)])
     assert_equal(q.normalize(), q)
-    
+
     q = And([TermRange("f1", u("a"), u("z")), TermRange("f1", "b", "x")])
     assert_equal(q.normalize(), TermRange("f1", u("a"), u("z")))
-    
+
     q = And([TermRange("f1", u("a"), u("m")), TermRange("f1", u("f"), u("q"))])
     assert_equal(q.normalize(), TermRange("f1", u("f"), u("m")))
-    
+
     q = Or([TermRange("f1", u("a"), u("m")), TermRange("f1", u("f"), u("q"))])
     assert_equal(q.normalize(), TermRange("f1", u("a"), u("q")))
-    
+
     q = Or([TermRange("f1", u("m"), None), TermRange("f1", None, u("n"))])
     assert_equal(q.normalize(), Every("f1"))
-    
+
     q = And([Every("f1"), Term("f1", "a"), Variations("f1", "b")])
     assert_equal(q.normalize(), Every("f1"))
-    
+
     q = Or([Term("f1", u("q")), TermRange("f1", u("m"), None), TermRange("f1", None, u("n"))])
     assert_equal(q.normalize(), Every("f1"))
-    
+
     q = And([Or([Term("f1", u("a")), Term("f1", u("b"))]), Every("f1")])
     assert_equal(q.normalize(), Every("f1"))
-    
+
     q = And([Term("f1", u("a")), And([Or([Every("f1")])])])
     assert_equal(q.normalize(), Every("f1"))
-    
+
 def test_normalize_compound():
     def oq():
         return Or([Term("a", u("a")), Term("a", u("b"))])
         if level == 0:
             return oq()
         else:
-            return Or([nq(level-1), nq(level-1), nq(level-1)])
-    
+            return Or([nq(level - 1), nq(level - 1), nq(level - 1)])
+
     q = nq(7)
     q = q.normalize()
     assert_equal(q, Or([Term("a", u("a")), Term("a", u("b"))]))
 def test_duplicates():
     q = And([Term("a", u("b")), Term("a", u("b"))])
     assert_equal(q.normalize(), Term("a", u("b")))
-    
+
     q = And([Prefix("a", u("b")), Prefix("a", u("b"))])
     assert_equal(q.normalize(), Prefix("a", u("b")))
-    
+
     q = And([Variations("a", u("b")), And([Variations("a", u("b")), Term("a", u("b"))])])
     assert_equal(q.normalize(), And([Variations("a", u("b")), Term("a", u("b"))]))
-    
+
     q = And([Term("a", u("b")), Prefix("a", u("b")), Term("a", u("b"), boost=1.1)])
     assert_equal(q.normalize(), q)
-    
+
     # Wildcard without * or ? normalizes to Term
     q = And([Wildcard("a", u("b")), And([Wildcard("a", u("b")), Term("a", u("b"))])])
     assert_equal(q.normalize(), Term("a", u("b")))
         assert_equal(q1, q1a)
         assert_equal(hash(q1), hash(q1a))
         assert_not_equal(q1, q2)
-        
+
     do(Term("a", u("b"), boost=1.1), Term("a", u("b"), boost=1.5))
     do(And([Term("a", u("b")), Term("c", u("d"))], boost=1.1),
        And([Term("a", u("b")), Term("c", u("d"))], boost=1.5))
     # do(AndMaybe)
     # do(AndNot)
     # do(Otherwise)
-    
+
     do(SpanFirst(Term("a", u("b")), limit=1), SpanFirst(Term("a", u("b")), limit=2))
     do(SpanNear(Term("a", u("b")), Term("c", u("d"))),
        SpanNear(Term("a", u("b")), Term("c", u("e"))))
 
 def test_highlight_daterange():
     from datetime import datetime
-    
+
     schema = fields.Schema(id=fields.ID(unique=True, stored=True),
                            title=fields.TEXT(stored=True),
                            content=fields.TEXT(stored=True),
                            released=fields.DATETIME(stored=True))
     ix = RamStorage().create_index(schema)
-    
+
     w = ix.writer()
     w.update_document(
         id=u('1'),
         title=u('Life Aquatic'),
         content=u('A nautic film crew sets out to kill a gigantic shark.'),
-        released=datetime(2004,12,25)
+        released=datetime(2004, 12, 25)
     )
     w.update_document(
         id=u('2'),
         title=u('Darjeeling Limited'),
         content=u('Three brothers meet in India for a life changing train journey.'),
-        released=datetime(2007,10,27)
+        released=datetime(2007, 10, 27)
     )
     w.commit()
-    
+
     s = ix.searcher()
     r = s.search(Term('content', u('train')), terms=True)
     assert_equal(len(r), 1)
     assert_equal(r[0]["id"], "2")
     assert_equal(r[0].highlights("content"), 'for a life changing <b class="match term0">train</b> journey')
-    
-    r = s.search(DateRange('released', datetime(2007,1,1), None))
+
+    r = s.search(DateRange('released', datetime(2007, 1, 1), None))
     assert_equal(len(r), 1)
     assert_equal(r[0].highlights("content"), '')