1. coady
  2. lupyne

Source

lupyne / test / local.py

coady 9c627a4 
coady bf7aa1e 
coady d4cd68a 
coady c9f504b 

coady f25b485 
coady 03a1dc7 
coady 4ff3b7c 
coady 4966a8a 
coady 24c531b 
coady f10f065 
coady c9f504b 
coady 5fcd126 
coady 1d0aaa9 
coady c9f504b 
coady cb3366e 

coady 900f845 
coady cb3366e 
coady f25b485 
coady f10f065 
coady 3da3aaa 


coady 900f845 
coady 3da3aaa 

coady 53d5b69 

coady d48fe5c 
coady 53d5b69 

coady c26f198 
coady c9f504b 



coady c26f198 
coady b4bfd86 
coady c9f504b 
coady 480df97 
coady c26f198 
coady f044217 
coady cb3366e 
coady aa45a6b 
coady 6f5a76b 

coady cb3366e 





coady 70bbb72 
coady 54134d5 
coady 2d73621 

coady d9e31b5 

coady e13fd34 


coady d9e31b5 
coady 9679db6 


coady d305ef6 
coady a25825b 
coady d9e31b5 


coady 9679db6 
coady d9e31b5 



coady c8f7118 
coady 805dc0f 
coady d9e31b5 

coady c9f504b 
coady d9e31b5 
coady d30b8c0 
coady 805dc0f 
coady d9e31b5 

coady 786d6be 

coady d9e31b5 
coady d9d63bb 
coady d9e31b5 





coady 4e868ee 
coady d9e31b5 









coady ce07b01 
coady 5a57fdd 
coady 786d6be 
coady 5a57fdd 







coady a9d63db 
coady 54134d5 
coady affc662 
coady 5a317da 
coady 5fb2071 

coady 54134d5 





coady affc662 


coady 54134d5 


coady d9e31b5 

coady 065217a 
coady d9e31b5 
coady 065217a 
coady 2806430 








coady 49fb22f 
coady c9f504b 

coady 49fb22f 
coady d9e31b5 
coady 49fb22f 

coady c8f7118 
coady e5adc31 
coady c8f7118 
coady 5a57fdd 


coady a7f1ecb 
coady 738589b 
coady 53d5b69 
coady 48c4784 
coady a7f1ecb 
coady 5a57fdd 



coady c9f504b 
coady 480df97 
coady c26f198 
coady d9e31b5 

coady c26f198 


coady 88481df 
coady c26f198 
coady be1f4d7 
coady 5a57fdd 
coady be1f4d7 
coady f62e078 
coady 5bb6a88 
coady 53d5b69 
coady f62e078 


coady 887ca4b 
coady f62e078 
coady 887ca4b 
coady f62e078 
coady 962a37f 
coady 20b6b7f 


coady 1f5eaf3 
coady 887ca4b 
coady 77ae7de 
coady 59b0f26 



coady 53d5b69 
coady 8cec017 
coady 77ae7de 
coady 8cec017 
coady e29d90b 
coady d9e31b5 

coady c9f504b 

coady d9e31b5 

coady 59b829a 

coady d9e31b5 
coady c9f504b 
coady d9e31b5 
coady c9f504b 

coady d9e31b5 
coady 9c627a4 
coady d9e31b5 
coady c9f504b 
coady d9e31b5 

coady 88481df 
coady c9f504b 
coady 4966a8a 
coady dd6378d 
coady c9f504b 
coady d9e31b5 
coady c9f504b 

coady 4966a8a 
coady dd6378d 
coady 68c2349 

coady 0fc4283 
coady dd6378d 

coady 54134d5 
coady dd6378d 




coady 54134d5 
coady 132be02 

coady 9c627a4 
coady 132be02 

coady d9e31b5 
coady 5528dc7 
coady d9e31b5 
coady c9f504b 
coady d9e31b5 

coady 8e2b913 

coady 82096ed 

coady 53d5b69 
coady 03c899f 
coady 53d5b69 
coady c9f504b 
coady d9e31b5 

coady d9d63bb 


coady e29d90b 
coady 5a317da 
coady d9d63bb 




coady e29d90b 
coady d9d63bb 
coady a8a089c 

coady e79906e 

coady cc8b39b 

coady 6700352 



coady b908a94 
coady e2eaea9 

coady 87417af 

coady affc662 
coady a9d63db 
coady 87417af 



coady affc662 
coady 87417af 
coady 4c923bd 
coady 87417af 
coady 4c923bd 
coady aa45a6b 
coady 54134d5 



coady 6313b84 








coady 44a8d3c 












coady 1a3adeb 



coady 5a57fdd 
coady 538b42a 




coady 738589b 
coady 538b42a 






coady d9e31b5 

coady 538b42a 
coady 480df97 

coady c26f198 
coady d9e31b5 
coady c26f198 

coady cc89c87 
coady c26f198 

coady 88483be 
coady c84bcfa 
coady c26f198 

coady 786d6be 

coady 8d075b9 
coady d9e31b5 
coady c84bcfa 
coady d9e31b5 
coady 6015bcb 
coady d9e31b5 

coady c84bcfa 

coady d9e31b5 


coady 5bb6a88 
coady f9333a6 
coady 5bb6a88 
coady c84bcfa 
coady e29d90b 
coady c84bcfa 

coady 53d5b69 
coady c84bcfa 
coady f49b281 




coady 53d5b69 
coady 0ab4f05 





coady 48c4784 
coady 0ab4f05 



coady 480df97 

coady 2806430 
coady 8e2f112 


coady d48fe5c 

coady 8e2f112 



coady d48fe5c 

coady 8e2f112 
coady d48fe5c 
coady 8e2f112 


















coady d48fe5c 



coady 8e2f112 
coady 24c531b 
coady 8e2f112 

coady 24c531b 



coady 8e2f112 
coady 39c4e5a 
coady 434bd85 



coady 480df97 
coady 6eac9d7 
coady 2806430 
coady 3521daf 






coady 480df97 
coady 99030c5 

coady 8e2f112 
coady 480df97 

coady 1456e59 
coady 480df97 
coady 6eac9d7 



coady 4ff3b7c 


coady 8e2f112 
coady 480df97 
coady 8e2f112 

coady 480df97 

coady 8e2f112 
coady d26b73b 
coady 1456e59 

coady 4ff3b7c 
coady 1456e59 

coady 0fc4283 
coady 1456e59 
coady 4ff3b7c 
coady 1456e59 
coady 8e2f112 
coady 8562a08 
coady 59b0f26 
coady 8562a08 



coady 5a317da 
coady 8562a08 
coady 65bab11 




coady 4ff3b7c 
coady 2806430 

coady 54134d5 
coady 5bb6a88 


coady 54134d5 

coady 9c627a4 
coady 54134d5 
coady 5a317da 
coady 54134d5 










coady 5a57fdd 
coady 54134d5 








coady 0fc4283 
coady 54134d5 








coady f3cfa31 




coady df54066 

coady 753b33c 
coady a685328 


coady 463623d 











coady a685328 
coady 463623d 

coady a685328 
coady 463623d 


coady a685328 
coady 463623d 


coady affc662 



coady 20b6b7f 












coady 79e1d27 


coady f4e8985 
coady 79e1d27 





coady 03c899f 
coady 79e1d27 
coady 03c899f 








coady 79e1d27 




coady 03c899f 



coady 79e1d27 
coady 03c899f 
coady 79e1d27 

coady a685328 
coady c9f504b 
coady 8004224 
coady c9f504b 
  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
from future_builtins import map, zip
import unittest
import os
import tempfile, shutil
import itertools
import collections
import warnings
import datetime
import math
import bisect
import contextlib
import lucene
from lupyne import engine
from . import fixture

class typeAsPayload(engine.TokenFilter):
    "Custom implementation of lucene TypeAsPayloadTokenFilter."
    def setattrs(self):
        self.payload = self.type.encode('utf8')

@contextlib.contextmanager
def assertWarns(*categories):
    with warnings.catch_warnings(record=True) as messages:
        yield
    for message, category in itertools.izip_longest(messages, categories):
         assert issubclass(message.category, category), message

class Filter(lucene.PythonFilter):
    "Broken filter to test errors are raised."
    def getDocIdSet(self, indexReader):
        assert False

class BaseTest(unittest.TestCase):
    def setUp(self):
        self.tempdir = tempfile.mkdtemp(dir=os.path.dirname(__file__))
    def tearDown(self):
        shutil.rmtree(self.tempdir)

class TestCase(BaseTest):
    
    def testInterface(self):
        "Indexer and document interfaces."
        self.assertRaises(TypeError, engine.IndexSearcher)
        analyzer = lucene.StandardAnalyzer(lucene.Version.values()[-1])
        stemmer = engine.Analyzer(analyzer, lucene.PorterStemFilter, typeAsPayload)
        for token in stemmer.tokens('hello'):
            assert token.positionIncrement == 1
            assert engine.TokenFilter(lucene.EmptyTokenStream()).payload is None
            assert token.term == 'hello'
            assert token.type == token.payload == '<ALPHANUM>'
            assert token.offset == (0, 5)
            token.term = token.type = ''
            token.offset, token.positionIncrement = (0, 0), 0
        assert str(stemmer.parse('hellos', field=['body', 'title'])) == 'body:hello title:hello'
        assert str(stemmer.parse('hellos', field={'body': 1.0, 'title': 2.0})) == 'body:hello title:hello^2.0'
        indexer = engine.Indexer(analyzer=stemmer, version=lucene.Version.LUCENE_30, writeLockTimeout=100L)
        assert indexer.writeLockTimeout == 100
        self.assertRaises(lucene.JavaError, engine.Indexer, indexer.directory)
        indexer.set('text')
        indexer.set('name', store=True, index=False, boost=2.0)
        for field in indexer.fields['name'].items('sample'):
            assert isinstance(field, lucene.Field) and field.boost == 2.0
        indexer.set('tag', store=True, index=True)
        searcher = indexer.indexSearcher
        indexer.commit()
        assert searcher is indexer.indexSearcher
        assert not searcher.search(count=1)
        indexer.add(text='hello worlds', name='sample', tag=['python', 'search'])
        assert len(indexer) == 1 and list(indexer) == []
        assert not indexer.optimized
        indexer.commit()
        assert searcher is not indexer.indexSearcher
        assert list(indexer) == [0]
        assert indexer.current and indexer.optimized
        assert 0 in indexer and 1 not in indexer
        doc = indexer[0]
        assert doc == {'tag': ['python', 'search'], 'name': ['sample']}
        assert doc['name'] == 'sample' and doc['tag'] == 'python'
        assert doc.dict('tag') == {'name': 'sample', 'tag': ['python', 'search']}
        assert doc.dict(name=None, missing=True) == {'name': 'sample', 'missing': True}
        self.assertRaises(KeyError, doc.__getitem__, 'key')
        assert doc.getlist('name') == ['sample'] and doc.getlist('key') == []
        assert indexer.get(0, 'name').dict() == {'name': 'sample'}
        assert not list(indexer.termvector(0, 'tag'))
        assert indexer.count('text', 'hello') == indexer.count('text:hello') == 1
        assert sorted(indexer.names()) == ['name', 'tag', 'text']
        assert sorted(indexer.names('indexed', isIndexed=True)) == ['tag', 'text']
        assert indexer.names('unindexed', isIndexed=False) == ['name']
        assert list(indexer.terms('text')) == ['hello', 'world']
        assert list(indexer.terms('text', 'h', 'v')) == ['hello']
        assert dict(indexer.terms('text', 'w', counts=True)) == {'world': 1}
        assert list(indexer.terms('test')) == []
        assert list(indexer.docs('text', 'hello')) == [0]
        assert list(indexer.docs('text', 'hi')) == []
        assert list(indexer.docs('text', 'world', counts=True)) == [(0, 1)]
        assert list(indexer.positions('text', 'world')) == [(0, [1])]
        assert list(indexer.positions('text', 'world', payloads=True)) == [(0, [(1, '<ALPHANUM>')])]
        hits = indexer.search('text:hello')
        assert len(hits) == hits.count == 1
        assert hits.ids == [0]
        score, = hits.scores
        assert 0 < score < 1
        assert dict(hits.items()) == {0: score}
        data = hits[0].dict()
        assert data['__id__'] == 0 and '__score__' in data
        assert not indexer.search('hello') and indexer.search('hello', field='text')
        assert indexer.search('text:hello hi') and not indexer.search('text:hello hi', op='and')
        assert indexer.search('text:*hello', allowLeadingWildcard=True)
        query = engine.Query.multiphrase('text', ('hello', 'hi'), None, 'world')
        assert str(query).startswith('text:"(hello hi) ') and list(query.positions) == [0, 2]
        query = engine.Query.wildcard('text', '*')
        assert str(query) == 'text:*' and isinstance(query, lucene.WildcardQuery)
        assert str(lucene.MatchAllDocsQuery() | query) == '*:* text:*'
        assert str(lucene.MatchAllDocsQuery() - query) == '*:* -text:*'
        query = +query
        query &= engine.Query.fuzzy('text', 'hello')
        query |= engine.Query.fuzzy('text', 'hello', 0.1)
        assert str(query) == '+text:* +text:hello~0.5 text:hello~0.1'
        query = engine.Query.span('text', 'world')
        assert str(query.mask('name')) == 'mask(text:world) as name'
        assert str(query.payload()) == 'spanPayCheck(text:world, payloadRef: )'
        assert isinstance(query.filter(cache=False), lucene.SpanQueryFilter) and isinstance(query.filter(), lucene.CachingSpanFilter)
        query = engine.Query.disjunct(0.1, query, name='sample')
        assert str(query) == '(text:world | name:sample)~0.1'
        query = engine.Query.near('text', 'hello', ('tag', 'python'), slop=-1, inOrder=False)
        assert str(query) == 'spanNear([text:hello, mask(tag:python) as text], -1, false)' and indexer.count(query) == 1
        query = engine.Query.near('text', 'hello', 'world')
        (doc, items), = indexer.spans(query, payloads=True)
        (start, stop, payloads), = items
        assert doc == 0 and start == 0 and stop == 2 and payloads == ['<ALPHANUM>', '<ALPHANUM>']
        (doc, count), = indexer.spans(query.payload('<ALPHANUM>', '<ALPHANUM>'))
        assert doc == 0 and count == 1
        assert not indexer.search(query.payload('<>'))
        query = engine.Query.near('text', 'hello', 'world', collectPayloads=False)
        (doc, items), = indexer.spans(query, payloads=True)
        assert doc == 0 and items == []
        indexer.delete('name:sample')
        indexer.delete('tag', 'python')
        assert 0 in indexer and len(indexer) == 1 and indexer.segments == {'_0': 1}
        indexer.commit()
        assert 0 not in indexer and len(indexer) == 0 and sum(indexer.segments.values()) == 0
        indexer.add(tag='test', name='old')
        indexer.update('tag', boost=2.0, tag='test')
        indexer.commit()
        assert [indexer[id].dict() for id in indexer] == [{'tag': 'test'}]
        indexer.update('tag', 'test', {'name': 'new'})
        indexer.commit()
        assert [indexer[id].dict() for id in indexer] == [{'name': 'new'}]
        indexer.deleteAll()
        indexer.commit()
        temp = engine.Indexer(self.tempdir)
        temp.add()
        temp.commit()
        indexer += temp
        indexer += temp.directory
        indexer += self.tempdir
        assert len(indexer) == 3
        indexer.add(text=lucene.WhitespaceTokenizer(lucene.StringReader('?')), name=lucene.JArray_byte('{}'))
        indexer.commit()
        assert indexer[next(indexer.docs('text', '?'))] == {'name': ['{}']}
        reader = engine.indexers.IndexReader(indexer.indexReader)
        assert reader[0].dict() == {} and reader.count('text', '?') == 1
        assert len(reader.comparator('text')) == 4
        indexer.delete('text', '?')
        indexer.commit(merge=True)
        assert not indexer.hasDeletions()
        indexer.commit(merge=1)
        assert indexer.optimized
        del reader.indexReader
        self.assertRaises(AttributeError, getattr, reader, 'maxDoc')
        del indexer.indexSearcher
        self.assertRaises(AttributeError, getattr, indexer, 'search')
    
    def testBasic(self):
        "Text fields and simple searches."
        self.assertRaises(lucene.JavaError, engine.Indexer, self.tempdir, 'r')
        indexer = engine.Indexer(self.tempdir)
        for name, params in fixture.constitution.fields.items():
            indexer.set(name, **params)
        for doc in fixture.constitution.docs():
            indexer.add(doc, boost=('article' in doc) + 1.0)
        indexer.commit()
        searcher = engine.IndexSearcher.load(self.tempdir)
        engine.IndexSearcher.load(searcher.directory) # ensure directory isn't closed
        assert len(indexer) == len(searcher) and lucene.RAMDirectory.instance_(searcher.directory)
        assert indexer.filters == indexer.spellcheckers == {}
        assert indexer.facets(lucene.MatchAllDocsQuery(), 'amendment')
        assert indexer.suggest('amendment', '')
        assert list(indexer.filters) == list(indexer.spellcheckers) == ['amendment']
        indexer.delete('amendment', doc['amendment'])
        indexer.add(doc)
        reader = indexer.indexReader
        indexer.commit(filters=True, spellcheckers=True)
        assert reader.refCount == 0
        assert list(indexer.filters) == list(indexer.spellcheckers) == ['amendment']
        doc['amendment'] = engine.Analyzer(lucene.WhitespaceTokenizer).tokens(doc['amendment'])
        doc['date'] = engine.Analyzer(lucene.WhitespaceTokenizer).tokens(doc['date']), 2.0
        scores = list(searcher.match(doc, 'text:congress', 'text:law', 'amendment:27', 'date:19*'))
        assert 0.0 == scores[0] < scores[1] < scores[2] < scores[3] == 1.0
        searcher = engine.MultiSearcher([indexer.indexReader, self.tempdir])
        assert searcher.refCount == 1
        assert searcher.count() == len(searcher) == 2 * len(indexer)
        searcher.sorters['amendment'] = engine.SortField('amenmdment', int)
        comparator = searcher.comparator('amendment')
        assert comparator is searcher.comparator('amendment') and set(map(type, comparator)) == set([int])
        assert searcher is searcher.reopen()
        assert searcher.facets(lucene.MatchAllDocsQuery(), 'amendment')['amendment'] == dict.fromkeys(map(str, range(1, 28)), 2)
        reader = searcher.indexReader
        del searcher
        self.assertRaises(lucene.JavaError, reader.isCurrent)
        assert len(indexer) == len(indexer.search()) == 35
        assert sorted(indexer.names()) == ['amendment', 'article', 'date', 'text']
        articles = list(indexer.terms('article'))
        articles.remove('Preamble')
        assert sorted(map(int, articles)) == range(1, 8)
        assert sorted(map(int, indexer.terms('amendment'))) == range(1, 28)
        assert list(itertools.islice(indexer.terms('text', 'right'), 2)) == ['right', 'rights']
        assert list(indexer.terms('text', 'right*')) == ['right', 'rights']
        assert list(indexer.terms('text', 'right', minSimilarity=0.5)) == ['eight', 'right', 'rights']
        word, count = next(indexer.terms('text', 'people', counts=True))
        assert word == 'people' and count == 8
        docs = dict(indexer.docs('text', 'people', counts=True))
        counts = docs.values()
        assert len(docs) == count and all(counts) and sum(counts) > count
        positions = dict(indexer.positions('text', 'people'))
        assert list(map(len, positions.values())) == counts
        hit, = indexer.search('"We the People"', field='text')
        assert hit['article'] == 'Preamble'
        assert sorted(hit.dict()) == ['__id__', '__score__', 'article']
        hits = indexer.search('people', field='text')
        assert hits[0]['article'] == 'Preamble'
        assert len(hits) == hits.count == 8
        assert set(map(type, hits.ids)) == set([int]) and set(map(type, hits.scores)) == set([float])
        assert hits.maxscore == max(hits.scores)
        ids = hits.ids
        hits = indexer.search('people', count=5, field='text')
        assert hits.ids == ids[:len(hits)]
        assert len(hits) == 5 and hits.count == 8
        assert not any(map(math.isnan, hits.scores))
        assert hits.maxscore == max(hits.scores)
        hits = indexer.search('text:people', count=5, sort=lucene.Sort.INDEXORDER)
        assert sorted(hits.ids) == hits.ids
        sort = engine.SortField('amendment', type=int)
        hits = indexer.search('text:people', count=5, sort=sort)
        assert [hit.get('amendment') for hit in hits] == [None, None, '1', '2', '4']
        assert all(map(math.isnan, hits.scores))
        hits = indexer.search('text:right', count=10**7, sort=sort, scores=True)
        assert not any(map(math.isnan, hits.scores)) and sorted(hits.scores, reverse=True) != hits.scores
        assert math.isnan(hits.maxscore)
        hits = indexer.search('text:right', count=2, sort=sort, maxscore=True)
        assert hits.maxscore > max(hits.scores)
        comparator = indexer.comparator('amendment', type=int, parser=lambda value: int(value or -1))
        hits = indexer.search('text:people', sort=comparator.__getitem__)
        assert sorted(hits.ids) == hits.ids and hits.ids != ids
        comparator = list(zip(*map(indexer.comparator, ['article', 'amendment'])))
        hits = indexer.search('text:people', sort=comparator.__getitem__)
        assert sorted(hits.ids) != hits.ids
        hits = indexer.search('text:people', count=5, sort='amendment', reverse=True)
        assert [hit['amendment'] for hit in hits] == ['9', '4', '2', '17', '10']
        hit, = indexer.search('freedom', field='text')
        assert hit['amendment'] == '1'
        assert sorted(hit.dict()) == ['__id__', '__score__', 'amendment', 'date']
        hits = indexer.search('text:right')
        for name in ('amendment', 'article'):
            indexer.filters[name] = engine.Query.prefix(name, '').filter()
        query = engine.Query.term('text', 'right', boost=2.0)
        assert query.boost == 2.0
        assert indexer.facets(str(query), 'amendment', 'article') == {'amendment': 12, 'article': 1}
        self.assertRaises(TypeError, indexer.overlap, query.filter(), lucene.QueryWrapperFilter(query))
        hits = indexer.search('text:people', filter=query.filter())
        assert len(hits) == 4
        hit, = indexer.search('date:192*')
        assert hit['amendment'] == '19'
        hits = indexer.search('date:[1919 TO 1921]')
        amendments = ['18', '19']
        assert sorted(hit['amendment'] for hit in hits) == amendments
        query = engine.Query.range('date', '1919', '1921')
        hits = indexer.search(filter=query.filter())
        assert sorted(hit['amendment'] for hit in hits) == amendments
        hits = indexer.search(query | engine.Query.term('text', 'vote'))
        assert set(hit.get('amendment') for hit in hits) > set(amendments)
        hit, = indexer.search(query & engine.Query.term('text', 'vote'))
        assert hit['amendment'] == '19'
        hit, = indexer.search(query - engine.Query.all(text='vote'))
        assert hit['amendment'] == '18'
        hit, = indexer.search(engine.Query.all(text=['persons', 'papers']))
        assert hit['amendment'] == '4'
        hit, = indexer.search(engine.Query.phrase('text', 'persons', None, 'papers'))
        assert hit['amendment'] == '4'
        hit, = indexer.search(engine.Query.multiphrase('text', 'persons', ['houses', 'papers']))
        assert hit['amendment'] == '4'
        query = engine.Query.term('text', 'persons')
        assert str(-query) == '-text:persons'
        query = +query
        query -= engine.Query.term('text', 'papers')
        assert set(query.terms()) == set([('text', 'persons'), ('text', 'papers')])
        assert str(query[-1]) == '-text:papers'
        assert len(query) == len(list(query)) == 2
        span = engine.Query.span('text', 'persons')
        count = indexer.count(span)
        assert indexer.count(engine.Query.span(engine.Query.prefix('text', 'person'))) > count
        near = engine.Query.near('text', 'persons', 'papers', slop=1, inOrder=False)
        assert indexer.count(span - near) == count
        near = span.near(engine.Query.span('text', 'papers') | engine.Query.span('text', 'things'), slop=1)
        assert indexer.count(span - near) == count - 1
        assert 0 < indexer.count(span[:100]) < count
        assert 0 < indexer.count(span[50:100]) == indexer.count(span[:100] - span[:50]) < indexer.count(span[:100])
        spans = dict(indexer.spans(span))
        assert len(spans) == count and spans == dict(indexer.docs('text', 'persons', counts=True))
        near = engine.Query.near('text', 'persons', 'papers', slop=2)
        (id, positions), = indexer.spans(near, positions=True)
        assert indexer[id]['amendment'] == '4' and positions in ([(3, 6)], [(10, 13)])
        assert 'persons' in indexer.termvector(id, 'text')
        assert dict(indexer.termvector(id, 'text', counts=True))['persons'] == 2
        assert dict(indexer.positionvector(id, 'text'))['persons'] in ([3, 26], [10, 48])
        assert dict(indexer.positionvector(id, 'text', offsets=True))['persons'] == [(46, 53), (301, 308)]
        query = indexer.morelikethis(0)
        assert str(query) == 'text:united text:states'
        hits = indexer.search(query & engine.Query.prefix('article', ''))
        assert len(hits) == 8 and hits[0]['article'] == 'Preamble'
        assert str(indexer.morelikethis(0, 'article')) == ''
        assert str(indexer.morelikethis(0, minDocFreq=3)) == 'text:establish text:united text:states'
        assert str(indexer.morelikethis('jury', 'text', minDocFreq=4, minTermFreq=1)) == 'text:jury'
        assert str(indexer.morelikethis('jury', 'article')) == ''
        self.assertRaises(lucene.JavaError, indexer.morelikethis, 'jury')
        assert indexer.suggest('missing', '') == list(indexer.correct('missing', '')) == []
        assert indexer.suggest('text', '')[:8] == ['shall', 'states', 'any', 'have', 'united', 'congress', 'state', 'constitution']
        assert indexer.suggest('text', 'con')[:2] == ['congress', 'constitution']
        assert indexer.suggest('text', 'congress') == ['congress']
        assert indexer.suggest('text', 'congresses') == []
        assert list(indexer.correct('text', 'writ', distance=0, minSimilarity=None)) == ['writ']
        assert list(indexer.correct('text', 'write', distance=0, minSimilarity=None)) == []
        assert list(indexer.correct('text', 'write', distance=0)) == ['crime', 'writs', 'written', 'writ']
        assert list(indexer.correct('text', 'write', distance=0, minSimilarity=0.7)) == ['writs', 'writ']
        assert list(indexer.correct('text', 'write', distance=1, minSimilarity=None)) == ['writs', 'writ']
        assert list(indexer.correct('text', 'write', distance=1)) == ['writs', 'writ', 'crime', 'written']
        assert list(indexer.correct('text', 'write', distance=1, minSimilarity=0.7)) == ['writs', 'writ']
        assert list(indexer.correct('text', 'write', minSimilarity=0.9)) == ['writs', 'writ', 'crime', 'written']
        query = indexer.parse('text:write', spellcheck=True)
        assert lucene.TermQuery.instance_(query) and str(query) == 'text:writs'
        query = indexer.parse('"hello world"', field='text', spellcheck=True)
        assert lucene.PhraseQuery.instance_(query) and str(query) == 'text:"held would"'
        assert str(indexer.parse('vwxyz', field='text', spellcheck=True)) == 'text:vwxyz'
        with indexer.snapshot() as commit:
            self.assertRaises(lucene.JavaError, indexer.snapshot().__enter__)
        files = set(os.listdir(self.tempdir))
        path = os.path.join(self.tempdir, 'temp')
        with indexer.snapshot('backup') as commit:
            indexer.commit(merge=1)
            assert indexer.indexCommit.generation > commit.generation
            engine.indexers.copy(commit, path)
            assert set(os.listdir(path)) == set(commit.fileNames) < files < set(os.listdir(self.tempdir))
            filepath = os.path.join(path, commit.segmentsFileName)
            os.remove(filepath)
            open(filepath, 'w').close()
            self.assertRaises(OSError, engine.indexers.copy, commit, path)
        del indexer
        assert engine.Indexer(self.tempdir)
        assert not os.path.exists(os.path.join(self.tempdir, commit.segmentsFileName))
    
    def testAdvanced(self):
        "Large data set with hierarchical fields."
        indexer = engine.Indexer(self.tempdir)
        for name, params in fixture.zipcodes.fields.items():
            indexer.set(name, **params)
        indexer.fields['location'] = engine.NestedField('state.county.city')
        for doc in fixture.zipcodes.docs():
            if doc['state'] in ('CA', 'AK', 'WY', 'PR'):
                lat, lng = ('{0:08.3f}'.format(doc.pop(l)) for l in ['latitude', 'longitude'])
                location = '.'.join(doc[name] for name in ['state', 'county', 'city'])
                indexer.add(doc, latitude=lat, longitude=lng, location=location)
        indexer.commit()
        assert set(['state', 'zipcode']) < set(indexer.names('indexed', isIndexed=True))
        assert set(['latitude', 'longitude', 'county', 'city']) == set(indexer.names('unindexed', isIndexed=False))
        states = list(indexer.terms('state'))
        assert states[0] == 'AK' and states[-1] == 'WY'
        counties = [term.split('.')[-1] for term in indexer.terms('state.county', 'CA', 'CA~')]
        field = indexer.fields['location']
        hits = indexer.search(field.prefix('CA'))
        assert sorted(set(hit['county'] for hit in hits)) == counties
        assert counties[0] == 'Alameda' and counties[-1] == 'Yuba'
        cities = [term.split('.')[-1] for term in indexer.terms('state.county.city', 'CA.Los Angeles', 'CA.Los Angeles~')]
        hits = indexer.search(field.prefix('CA.Los Angeles'))
        assert sorted(set(hit['city'] for hit in hits)) == cities
        assert cities[0] == 'Acton' and cities[-1] == 'Woodland Hills'
        hit, = indexer.search('zipcode:90210')
        assert hit['state'] == 'CA' and hit['county'] == 'Los Angeles' and hit['city'] == 'Beverly Hills' and hit['longitude'] == '-118.406'
        query = engine.Query.prefix('zipcode', '90')
        (field, facets), = indexer.facets(query.filter(), 'state.county').items()
        assert field == 'state.county'
        la, orange = sorted(filter(facets.get, facets))
        assert la == 'CA.Los Angeles' and facets[la] > 100
        assert orange == 'CA.Orange' and facets[orange] > 10
        (field, facets), = indexer.facets(query, ('state.county', 'CA.*')).items()
        assert all(value.startswith('CA.') for value in facets) and set(facets) < set(indexer.filters['state.county'])
        for count in (None, len(indexer)):
            hits = indexer.search(query, count=count, timeout=0.01)
            assert 0 <= len(hits) <= indexer.count(query) and hits.count in (None, len(hits)) and hits.maxscore in (None, 1.0)
            hits = indexer.search(query, count=count, timeout=-1)
            assert len(hits) == 0 and hits.count is hits.maxscore is None
        self.assertRaises(lucene.JavaError, indexer.search, filter=Filter())
        directory = lucene.RAMDirectory()
        query = engine.Query.term('state', 'CA')
        size = indexer.copy(directory, query)
        searcher = engine.IndexSearcher(directory)
        assert len(searcher) == size and list(searcher.terms('state')) == ['CA']
        path = os.path.join(self.tempdir, 'temp')
        size = indexer.copy(path, exclude=query, merge=1)
        assert len(searcher) + size == len(indexer)
        searcher = engine.IndexSearcher(path)
        assert searcher.optimized and 'CA' not in searcher.terms('state')
        directory.close()
    
    def testSpatial(self):
        "Spatial tiles."
        indexer = engine.Indexer(self.tempdir, 'w')
        for name, params in fixture.zipcodes.fields.items():
            indexer.set(name, **params)
        for name in ('longitude', 'latitude'):
            indexer.set(name, engine.NumericField, store=True)
        field = indexer.fields['tile'] = engine.PointField('tile', precision=15, step=2, store=True)
        points = []
        for doc in fixture.zipcodes.docs():
            if doc['state'] == 'CA':
                point = doc['longitude'], doc['latitude']
                indexer.add(doc, tile=[point])
                if doc['city'] == 'Los Angeles':
                    points.append(point)
        assert len(list(engine.PolygonField('', precision=15).items(points))) > len(points)
        indexer.commit()
        city, zipcode, tile = 'Beverly Hills', '90210', '023012311120332'
        hit, = indexer.search('zipcode:' + zipcode)
        assert (hit['tile'] == tile or int(hit['tile']) == int(tile, 4)) and hit['city'] == city
        hit, = indexer.search(field.prefix(tile))
        assert hit['zipcode'] == zipcode and hit['city'] == city
        x, y = (float(hit[l]) for l in ['longitude', 'latitude'])
        assert field.coords(tile[:4]) == (2, 9)
        bottom, left, top, right = field.decode(tile)
        assert left < x < right and bottom < y < top
        hits = indexer.search(field.near(x, y))
        cities = set(hit['city'] for hit in hits)
        assert set([city]) == cities
        hits = indexer.search(field.near(x, y, precision=10))
        cities = set(hit['city'] for hit in hits)
        assert city in cities and len(cities) > 10
        query = field.within(x, y, 10**4)
        assert len(query) < 3
        distances = indexer.distances(x, y, 'longitude', 'latitude')
        hits = indexer.search(query, sort=distances.__getitem__)
        assert hits[0]['zipcode'] == zipcode and distances[hits[0].id] < 10
        cities = set(hit['city'] for hit in hits)
        assert city in cities and 100 > len(cities) > 50
        hits = indexer.search(field.within(x, y, 10**5, limit=100))
        cities = set(hit['city'] for hit in hits)
        assert city in cities and len(cities) > 100
        ranges = 10**2, 10**5
        groups = hits.groupby(lambda id: bisect.bisect_left(ranges, distances[id]))
        counts = dict((hits.value, len(hits)) for hits in groups)
        assert 1 == counts[0] < counts[2] < counts[1]
        assert len(field.within(x, y, 10**8)) == 1
        self.assertRaises(NameError, list, field.radiate(y, x, 1, 0))
        hits = hits.filter(lambda id: distances[id] < 10**4)
        assert 0 < len(hits) < sum(counts.values())
        hits = hits.sorted(distances.__getitem__, reverse=True)
        assert 0 == distances[hits.ids[-1]] < distances[hits.ids[0]] < 10**4
    
    def testFields(self):
        "Custom fields."
        self.assertRaises(lucene.JavaError, engine.Field, '', store='invalid')
        self.assertRaises(AttributeError, engine.Field, '', omit='value')
        self.assertRaises(lucene.JavaError, engine.Field, '', index=False)
        field = engine.Field('', index=True, analyzed=True, omitNorms=True, termvector=True, withPositions=True, withOffsets=True)
        field, = field.items(' ')
        attrs = 'indexed', 'tokenized', 'termVectorStored', 'storePositionWithTermVector', 'storeOffsetWithTermVector', 'omitNorms'
        assert all(getattr(field, attr) for attr in attrs)
        indexer = engine.Indexer(self.tempdir)
        indexer.set('amendment', engine.FormatField, format='{0:02d}', store=True)
        indexer.set('size', engine.FormatField, format='{0:04d}', store=True)
        field = indexer.fields['date'] = engine.NestedField('Y-m-d', sep='-', store=True)
        for doc in fixture.constitution.docs():
            if 'amendment' in doc:
                indexer.add(amendment=int(doc['amendment']), date=doc['date'], size=len(doc['text']))
        indexer.commit()
        query = engine.Query.range('amendment', '', indexer.fields['amendment'].format(10))
        assert indexer.count(query) == 9
        query = engine.Query.prefix('amendment', '0')
        assert indexer.count(query) == 9
        query = field.prefix('1791-12-15')
        assert indexer.count(query) == 10
        query = field.range('', '1921-12', lower=False, upper=True)
        assert str(query) == 'Y-m:{ TO 1921-12]', query
        assert indexer.count(query) == 19
        query = field.range('1919-01-01', '1921-12-31')
        assert str(query) == 'Y-m-d:[1919-01-01 TO 1921-12-31}'
        hits = indexer.search(query)
        assert [hit['amendment'] for hit in hits] == ['18', '19']
        assert [hit['Y-m-d'].split('-')[0] for hit in hits] == ['1919', '1920']
        field = indexer.fields['size']
        sizes = dict((id, int(indexer[id]['size'])) for id in indexer)
        ids = sorted((id for id in sizes if sizes[id] >= 1000), key=sizes.get)
        query = engine.Query.range('size', '1000', None)
        hits = indexer.search(query, sort=sizes.get)
        assert hits.ids == ids
        hits = indexer.search(query, count=3, sort=engine.SortField('size', type=long))
        assert hits.ids == ids[:len(hits)]
        query = engine.Query.range('size', None, '1000')
        assert indexer.count(query) == len(sizes) - len(ids)
        indexer.sorters['year'] = engine.SortField('Y-m-d', type=int, parser=lambda date: int(date.split('-')[0]))
        assert indexer.comparator('year')[:10] == [1791] * 10
        cache = len(lucene.FieldCache.DEFAULT.cacheEntries)
        hits = indexer.search(count=3, sort='year')
        assert [int(hit['amendment']) for hit in hits] == [1, 2, 3]
        hits = indexer.search(count=3, sort='year', reverse=True)
        assert [int(hit['amendment']) for hit in hits] == [27, 26, 25]
        assert indexer.count(filter=indexer.sorters['year'].filter(None, 1792)) == 10
        assert cache == len(lucene.FieldCache.DEFAULT.cacheEntries)
        indexer.add()
        indexer.commit(sorters=True)
        cache = len(lucene.FieldCache.DEFAULT.cacheEntries)
        assert indexer.comparator('year')[-1] == 0
        assert cache == len(lucene.FieldCache.DEFAULT.cacheEntries)
    
    def testNumeric(self):
        "Numeric fields."
        indexer = engine.Indexer(self.tempdir)
        indexer.set('amendment', engine.NumericField, store=True)
        indexer.set('date', engine.DateTimeField, store=True)
        indexer.set('size', engine.NumericField, store=True, step=5)
        for doc in fixture.constitution.docs():
            if 'amendment' in doc:
                indexer.add(amendment=int(doc['amendment']), date=[tuple(map(int, doc['date'].split('-')))], size=len(doc['text']))
        indexer.commit()
        assert indexer.count(filter=indexer.fields['amendment'].filter(None, 10)) == 9
        field = indexer.fields['date']
        query = field.prefix((1791, 12))
        assert indexer.count(query) == 10
        query = field.prefix(datetime.date(1791, 12, 15))
        assert indexer.count(query) == 10
        query = field.range(None, (1921, 12), lower=False, upper=True)
        assert indexer.count(query) == 19
        query = field.range(datetime.date(1919, 1, 1), datetime.date(1921, 12, 31))
        hits = indexer.search(query)
        assert [hit['amendment'] for hit in hits] == ['18', '19']
        assert [datetime.datetime.utcfromtimestamp(float(hit['date'])).year for hit in hits] == [1919, 1920]
        assert indexer.count(field.within(seconds=100)) == indexer.count(field.within(weeks=1)) == 0
        query = field.duration([2009], days=-100*365)
        assert indexer.count(query) == 12
        field = indexer.fields['size']
        assert len(list(indexer.terms('size'))) > len(indexer)
        sizes = dict((id, int(indexer[id]['size'])) for id in indexer)
        ids = sorted((id for id in sizes if sizes[id] >= 1000), key=sizes.get)
        query = field.range(1000, None)
        hits = indexer.search(query, sort=sizes.get)
        assert hits.ids == ids
        hits = indexer.search(query, count=3, sort=engine.SortField('size', type=long))
        assert hits.ids == ids[:len(hits)]
        query = field.range(None, 1000)
        assert indexer.count(query) == len(sizes) - len(ids)
        self.assertRaises(OverflowError, list, field.items(-2**64))
        nf, = field.items(0.5)
        assert nf.numericValue.doubleValue() == 0.5
        assert str(field.range(-2**64, 0)) == 'size:[* TO 0}'
        assert str(field.range(0, 2**64)) == 'size:[0 TO *}'
        assert str(field.range(0.5, None, upper=True)) == 'size:[0.5 TO *]'
        for step, count in zip(range(0, 20, field.step), (26, 19, 3, 1)):
            sizes = list(indexer.numbers('size', step))
            assert len(sizes) == count and all(isinstance(size, int) for size in sizes)
            numbers = dict(indexer.numbers('size', step, type=float, counts=True))
            assert sum(numbers.values()) == len(indexer) and all(isinstance(number, float) for number in numbers)
        hit, = indexer.search(indexer.fields['amendment'].term(1))
        assert hit['amendment'] == '1'
    
    def testHighlighting(self):
        "Highlighting text fragments."
        indexer = engine.Indexer()
        indexer.set('text', store=True, termvector=True, withPositions=True, withOffsets=True)
        for doc in fixture.constitution.docs():
            if 'amendment' in doc:
                indexer.add(text=doc['text'])
        indexer.commit()
        highlighter = indexer.highlighter('persons', 'text')
        for id in indexer:
            fragments = highlighter.fragments(id)
            assert len(fragments) == ('persons' in indexer[id]['text'])
            assert all('<b>persons</b>' in fragment.lower() for fragment in fragments)
        id = 3
        text = indexer[id]['text']
        query = '"persons, houses, papers"'
        highlighter = indexer.highlighter(query, '', terms=True, fields=True, formatter=lucene.SimpleHTMLFormatter('*', '*'))
        fragments = highlighter.fragments(text, count=3)
        assert len(fragments) == 2 and fragments[0].count('*') == 2*3 and '*persons*' in fragments[1]
        highlighter = indexer.highlighter(query, '', terms=True)
        highlighter.textFragmenter = lucene.SimpleFragmenter(200)
        fragment, = highlighter.fragments(text, count=3)
        assert len(fragment) > len(text) and fragment.count('<B>persons</B>') == 2
        fragment, = indexer.highlighter(query, 'text', tag='em').fragments(id, count=3)
        assert len(fragment) < len(text) and fragment.index('<em>persons') < fragment.index('papers</em>')
        fragment, = indexer.highlighter(query, 'text').fragments(id)
        assert fragment.count('<b>') == fragment.count('</b>') == 1
        highlighter = indexer.highlighter(query, 'text', fragListBuilder=lucene.SingleFragListBuilder())
        text, = highlighter.fragments(id)
        assert fragment in text and len(text) > len(fragment)
    
    def testNearRealTime(self):
        "Near real-time index updates."
        indexer = engine.Indexer(version=lucene.Version.LUCENE_30, nrt=True)
        indexer.add()
        assert indexer.count() == 0 and not indexer.current
        indexer.refresh(filters=True)
        assert indexer.count() == 1 and indexer.current
        searcher = engine.IndexSearcher(indexer.directory)
        assert searcher.count() == 0 and searcher.current
        indexer.add()
        indexer.commit()
        assert indexer.count() == engine.IndexSearcher(indexer.directory).count() == 2
    
    def testFilters(self):
        "Custom filters."
        if lucene.VERSION < '3.5':
            return self.assertRaises(AssertionError, engine.queries.TermsFilter, '')
        indexer = engine.Indexer()
        indexer.set('name', store=True, index=True)
        for name in ('alpha', 'bravo', 'charlie'):
            indexer.add(name=name)
        indexer.commit()
        filter = engine.TermsFilter('name')
        assert indexer.count(filter=filter) == len(filter.readers) == 0
        filter.add('alpha', 'bravo')
        filter.discard('bravo', 'charlie')
        assert filter.values == set(['alpha'])
        parallel = engine.ParallelIndexer('name')
        parallel.set('priority', index=True)
        for name in ('alpha', 'bravo', 'delta'):
            parallel.update(name, priority='high')
        parallel.commit()
        filter = parallel.termsfilter(engine.Query.term('priority', 'high').filter(), indexer)
        assert [hit['name'] for hit in indexer.search(filter=filter)] == ['alpha', 'bravo']
        indexer.add(name='delta')
        indexer.delete('name', 'alpha')
        indexer.commit()
        assert filter.readers > set(indexer.sequentialSubReaders)
        assert [hit['name'] for hit in indexer.search(filter=filter)] == ['bravo', 'delta']
        parallel.update('bravo')
        parallel.update('charlie', priority='high')
        parallel.commit()
        assert [hit['name'] for hit in indexer.search(filter=filter)] == ['charlie', 'delta']
        parallel.commit()
        filter.refresh(indexer)
        assert filter.readers == set(indexer.sequentialSubReaders)

if __name__ == '__main__':
    lucene.initVM()
    unittest.main()