⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 testindexreader.java

📁 索引aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
💻 JAVA
📖 第 1 页 / 共 2 页
字号:
package org.apache.lucene.index;/** * Copyright 2004 The Apache Software Foundation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * *     http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */import junit.framework.TestCase;import junit.framework.TestSuite;import junit.textui.TestRunner;import org.apache.lucene.store.Directory;import org.apache.lucene.store.RAMDirectory;import org.apache.lucene.store.FSDirectory;import org.apache.lucene.analysis.standard.StandardAnalyzer;import org.apache.lucene.analysis.WhitespaceAnalyzer;import org.apache.lucene.document.Document;import org.apache.lucene.document.Field;import java.util.Collection;import java.io.IOException;import java.io.File;public class TestIndexReader extends TestCase{    /** Main for running test case by itself. */    public static void main(String args[]) {        TestRunner.run (new TestSuite(TestIndexReader.class));//        TestRunner.run (new TestIndexReader("testBasicDelete"));//        TestRunner.run (new TestIndexReader("testDeleteReaderWriterConflict"));//        TestRunner.run (new TestIndexReader("testDeleteReaderReaderConflict"));//        TestRunner.run (new TestIndexReader("testFilesOpenClose"));    }    public TestIndexReader(String name) {        super(name);    }    /**     * Tests the IndexReader.getFieldNames implementation     * @throws Exception on error     */    public void testGetFieldNames() throws Exception    {        RAMDirectory d = new RAMDirectory();        // set up writer        IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true);        addDocumentWithFields(writer);        writer.close();        // set up reader        IndexReader reader = IndexReader.open(d);        Collection fieldNames = reader.getFieldNames();        assertTrue(fieldNames.contains("keyword"));        assertTrue(fieldNames.contains("text"));        assertTrue(fieldNames.contains("unindexed"));        assertTrue(fieldNames.contains("unstored"));        // add more documents        writer = new IndexWriter(d, new StandardAnalyzer(), false);        // want to get some more segments here        for (int i = 0; i < 5*writer.mergeFactor; i++)        {            addDocumentWithFields(writer);        }        // new fields are in some different segments (we hope)        for (int i = 0; i < 5*writer.mergeFactor; i++)        {            addDocumentWithDifferentFields(writer);        }        writer.close();        // verify fields again        reader = IndexReader.open(d);        fieldNames = reader.getFieldNames();        assertEquals(9, fieldNames.size());    // the following fields + an empty one (bug?!)        assertTrue(fieldNames.contains("keyword"));        assertTrue(fieldNames.contains("text"));        assertTrue(fieldNames.contains("unindexed"));        assertTrue(fieldNames.contains("unstored"));        assertTrue(fieldNames.contains("keyword2"));        assertTrue(fieldNames.contains("text2"));        assertTrue(fieldNames.contains("unindexed2"));        assertTrue(fieldNames.contains("unstored2"));        // verify that only indexed fields were returned        Collection indexedFieldNames = reader.getFieldNames(true);        assertEquals(6, indexedFieldNames.size());        assertTrue(indexedFieldNames.contains("keyword"));        assertTrue(indexedFieldNames.contains("text"));        assertTrue(indexedFieldNames.contains("unstored"));        assertTrue(indexedFieldNames.contains("keyword2"));        assertTrue(indexedFieldNames.contains("text2"));        assertTrue(indexedFieldNames.contains("unstored2"));        // verify that only unindexed fields were returned        Collection unindexedFieldNames = reader.getFieldNames(false);        assertEquals(3, unindexedFieldNames.size());    // the following fields + an empty one        assertTrue(unindexedFieldNames.contains("unindexed"));        assertTrue(unindexedFieldNames.contains("unindexed2"));    }    private void assertTermDocsCount(String msg,                                     IndexReader reader,                                     Term term,                                     int expected)    throws IOException    {        TermDocs tdocs = null;        try {            tdocs = reader.termDocs(term);            assertNotNull(msg + ", null TermDocs", tdocs);            int count = 0;            while(tdocs.next()) {                count++;            }            assertEquals(msg + ", count mismatch", expected, count);        } finally {            if (tdocs != null)                try { tdocs.close(); } catch (Exception e) { }        }    }    public void testBasicDelete() throws IOException    {        Directory dir = new RAMDirectory();        IndexWriter writer = null;        IndexReader reader = null;        Term searchTerm = new Term("content", "aaa");        //  add 100 documents with term : aaa        writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);        for (int i = 0; i < 100; i++)        {            addDoc(writer, searchTerm.text());        }        writer.close();        // OPEN READER AT THIS POINT - this should fix the view of the        // index at the point of having 100 "aaa" documents and 0 "bbb"        reader = IndexReader.open(dir);        assertEquals("first docFreq", 100, reader.docFreq(searchTerm));        assertTermDocsCount("first reader", reader, searchTerm, 100);        // DELETE DOCUMENTS CONTAINING TERM: aaa        int deleted = 0;        reader = IndexReader.open(dir);        deleted = reader.delete(searchTerm);        assertEquals("deleted count", 100, deleted);        assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));        assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);        reader.close();        // CREATE A NEW READER and re-test        reader = IndexReader.open(dir);        assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm));        assertTermDocsCount("deleted termDocs", reader, searchTerm, 0);        reader.close();    }    public void testDeleteReaderWriterConflictUnoptimized() throws IOException{      deleteReaderWriterConflict(false);    }        public void testDeleteReaderWriterConflictOptimized() throws IOException{        deleteReaderWriterConflict(true);    }    private void deleteReaderWriterConflict(boolean optimize) throws IOException    {        //Directory dir = new RAMDirectory();        Directory dir = getDirectory(true);        Term searchTerm = new Term("content", "aaa");        Term searchTerm2 = new Term("content", "bbb");        //  add 100 documents with term : aaa        IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);        for (int i = 0; i < 100; i++)        {            addDoc(writer, searchTerm.text());        }        writer.close();        // OPEN READER AT THIS POINT - this should fix the view of the        // index at the point of having 100 "aaa" documents and 0 "bbb"        IndexReader reader = IndexReader.open(dir);        assertEquals("first docFreq", 100, reader.docFreq(searchTerm));        assertEquals("first docFreq", 0, reader.docFreq(searchTerm2));        assertTermDocsCount("first reader", reader, searchTerm, 100);        assertTermDocsCount("first reader", reader, searchTerm2, 0);        // add 100 documents with term : bbb        writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), false);        for (int i = 0; i < 100; i++)        {            addDoc(writer, searchTerm2.text());        }        // REQUEST OPTIMIZATION        // This causes a new segment to become current for all subsequent        // searchers. Because of this, deletions made via a previously open        // reader, which would be applied to that reader's segment, are lost        // for subsequent searchers/readers        if(optimize)          writer.optimize();        writer.close();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -