⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 testindexreader.java

📁 Lucene a java open-source SearchEngine Framework
💻 JAVA
📖 第 1 页 / 共 4 页
字号:
package org.apache.lucene.index;/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements.  See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License.  You may obtain a copy of the License at * *     http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */import org.apache.lucene.util.LuceneTestCase;import junit.framework.TestSuite;import junit.textui.TestRunner;import org.apache.lucene.analysis.WhitespaceAnalyzer;import org.apache.lucene.analysis.standard.StandardAnalyzer;import org.apache.lucene.document.Document;import org.apache.lucene.document.Field;import org.apache.lucene.index.IndexReader.FieldOption;import org.apache.lucene.search.Hits;import org.apache.lucene.search.IndexSearcher;import org.apache.lucene.search.TermQuery;import org.apache.lucene.store.*;import org.apache.lucene.util._TestUtil;import java.io.File;import java.io.FileNotFoundException;import java.io.IOException;import java.util.*;public class TestIndexReader extends LuceneTestCase{    /** Main for running test case by itself. */    public static void main(String args[]) {        TestRunner.run (new TestSuite(TestIndexReader.class));//        TestRunner.run (new TestIndexReader("testBasicDelete"));//        TestRunner.run (new TestIndexReader("testDeleteReaderWriterConflict"));//        TestRunner.run (new TestIndexReader("testDeleteReaderReaderConflict"));//        TestRunner.run (new TestIndexReader("testFilesOpenClose"));    }    public TestIndexReader(String name) {        super(name);    }    public void testIsCurrent() throws Exception    {      RAMDirectory d = new MockRAMDirectory();      IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true);      addDocumentWithFields(writer);      writer.close();      // set up reader:      IndexReader reader = IndexReader.open(d);      assertTrue(reader.isCurrent());      // modify index by adding another document:      writer = new IndexWriter(d, new StandardAnalyzer(), false);      addDocumentWithFields(writer);      writer.close();      assertFalse(reader.isCurrent());      // re-create index:      writer = new IndexWriter(d, new StandardAnalyzer(), true);      addDocumentWithFields(writer);      writer.close();      assertFalse(reader.isCurrent());      reader.close();      d.close();    }    /**     * Tests the IndexReader.getFieldNames implementation     * @throws Exception on error     */    public void testGetFieldNames() throws Exception    {        RAMDirectory d = new MockRAMDirectory();        // set up writer        IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true);        addDocumentWithFields(writer);        writer.close();        // set up reader        IndexReader reader = IndexReader.open(d);        Collection fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);        assertTrue(fieldNames.contains("keyword"));        assertTrue(fieldNames.contains("text"));        assertTrue(fieldNames.contains("unindexed"));        assertTrue(fieldNames.contains("unstored"));        reader.close();        // add more documents        writer = new IndexWriter(d, new StandardAnalyzer(), false);        // want to get some more segments here        for (int i = 0; i < 5*writer.getMergeFactor(); i++)        {            addDocumentWithFields(writer);        }        // new fields are in some different segments (we hope)        for (int i = 0; i < 5*writer.getMergeFactor(); i++)        {            addDocumentWithDifferentFields(writer);        }        // new termvector fields        for (int i = 0; i < 5*writer.getMergeFactor(); i++)        {          addDocumentWithTermVectorFields(writer);        }                writer.close();        // verify fields again        reader = IndexReader.open(d);        fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL);        assertEquals(13, fieldNames.size());    // the following fields        assertTrue(fieldNames.contains("keyword"));        assertTrue(fieldNames.contains("text"));        assertTrue(fieldNames.contains("unindexed"));        assertTrue(fieldNames.contains("unstored"));        assertTrue(fieldNames.contains("keyword2"));        assertTrue(fieldNames.contains("text2"));        assertTrue(fieldNames.contains("unindexed2"));        assertTrue(fieldNames.contains("unstored2"));        assertTrue(fieldNames.contains("tvnot"));        assertTrue(fieldNames.contains("termvector"));        assertTrue(fieldNames.contains("tvposition"));        assertTrue(fieldNames.contains("tvoffset"));        assertTrue(fieldNames.contains("tvpositionoffset"));                // verify that only indexed fields were returned        fieldNames = reader.getFieldNames(IndexReader.FieldOption.INDEXED);        assertEquals(11, fieldNames.size());    // 6 original + the 5 termvector fields         assertTrue(fieldNames.contains("keyword"));        assertTrue(fieldNames.contains("text"));        assertTrue(fieldNames.contains("unstored"));        assertTrue(fieldNames.contains("keyword2"));        assertTrue(fieldNames.contains("text2"));        assertTrue(fieldNames.contains("unstored2"));        assertTrue(fieldNames.contains("tvnot"));        assertTrue(fieldNames.contains("termvector"));        assertTrue(fieldNames.contains("tvposition"));        assertTrue(fieldNames.contains("tvoffset"));        assertTrue(fieldNames.contains("tvpositionoffset"));                // verify that only unindexed fields were returned        fieldNames = reader.getFieldNames(IndexReader.FieldOption.UNINDEXED);        assertEquals(2, fieldNames.size());    // the following fields        assertTrue(fieldNames.contains("unindexed"));        assertTrue(fieldNames.contains("unindexed2"));                        // verify index term vector fields          fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR);        assertEquals(1, fieldNames.size());    // 1 field has term vector only        assertTrue(fieldNames.contains("termvector"));                fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION);        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors        assertTrue(fieldNames.contains("tvposition"));                fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET);        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors        assertTrue(fieldNames.contains("tvoffset"));                        fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET);        assertEquals(1, fieldNames.size());    // 4 fields are indexed with term vectors        assertTrue(fieldNames.contains("tvpositionoffset"));        reader.close();        d.close();    }  public void testTermVectors() throws Exception {    RAMDirectory d = new MockRAMDirectory();    // set up writer    IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true);    // want to get some more segments here    // new termvector fields    for (int i = 0; i < 5 * writer.getMergeFactor(); i++) {      Document doc = new Document();        doc.add(new Field("tvnot","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.NO));        doc.add(new Field("termvector","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.YES));        doc.add(new Field("tvoffset","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_OFFSETS));        doc.add(new Field("tvposition","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS));        doc.add(new Field("tvpositionoffset","one two two three three three", Field.Store.YES, Field.Index.TOKENIZED, Field.TermVector.WITH_POSITIONS_OFFSETS));        writer.addDocument(doc);    }    writer.close();    IndexReader reader = IndexReader.open(d);    FieldSortedTermVectorMapper mapper = new FieldSortedTermVectorMapper(new TermVectorEntryFreqSortedComparator());    reader.getTermFreqVector(0, mapper);    Map map = mapper.getFieldToTerms();    assertTrue("map is null and it shouldn't be", map != null);    assertTrue("map Size: " + map.size() + " is not: " + 4, map.size() == 4);    Set set = (Set) map.get("termvector");    for (Iterator iterator = set.iterator(); iterator.hasNext();) {      TermVectorEntry entry = (TermVectorEntry) iterator.next();      assertTrue("entry is null and it shouldn't be", entry != null);      System.out.println("Entry: " + entry);    }      }  private void assertTermDocsCount(String msg,                                     IndexReader reader,                                     Term term,                                     int expected)    throws IOException    {        TermDocs tdocs = null;        try {            tdocs = reader.termDocs(term);            assertNotNull(msg + ", null TermDocs", tdocs);            int count = 0;            while(tdocs.next()) {                count++;            }            assertEquals(msg + ", count mismatch", expected, count);        } finally {            if (tdocs != null)                tdocs.close();        }    }    public void testBasicDelete() throws IOException    {        Directory dir = new MockRAMDirectory();        IndexWriter writer = null;        IndexReader reader = null;        Term searchTerm = new Term("content", "aaa");        //  add 100 documents with term : aaa        writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);        for (int i = 0; i < 100; i++)        {            addDoc(writer, searchTerm.text());        }        writer.close();        // OPEN READER AT THIS POINT - this should fix the view of the        // index at the point of having 100 "aaa" documents and 0 "bbb"        reader = IndexReader.open(dir);        assertEquals("first docFreq", 100, reader.docFreq(searchTerm));        assertTermDocsCount("first reader", reader, searchTerm, 100);        reader.close();        // DELETE DOCUMENTS CONTAINING TERM: aaa        int deleted = 0;        reader = IndexReader.open(dir);        deleted = reader.deleteDocuments(searchTerm);        assertEquals("deleted count", 100, deleted);        assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));        assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);        // open a 2nd reader to make sure first reader can        // commit its changes (.del) while second reader        // is open:        IndexReader reader2 = IndexReader.open(dir);        reader.close();        // CREATE A NEW READER and re-test        reader = IndexReader.open(dir);        assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));        assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);        reader.close();        reader2.close();        dir.close();    }    // Make sure attempts to make changes after reader is    // closed throws IOException:    public void testChangesAfterClose() throws IOException    {        Directory dir = new RAMDirectory();        IndexWriter writer = null;        IndexReader reader = null;        Term searchTerm = new Term("content", "aaa");        //  add 11 documents with term : aaa        writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);        for (int i = 0; i < 11; i++)        {            addDoc(writer, searchTerm.text());        }        writer.close();        reader = IndexReader.open(dir);        // Close reader:        reader.close();        // Then, try to make changes:        try {          reader.deleteDocument(4);          fail("deleteDocument after close failed to throw IOException");        } catch (AlreadyClosedException e) {          // expected        }        try {          reader.setNorm(5, "aaa", 2.0f);          fail("setNorm after close failed to throw IOException");        } catch (AlreadyClosedException e) {          // expected        }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -