📄 testindexreader.java
字号:
package org.apache.lucene.index;/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */import junit.framework.TestCase;import junit.framework.TestSuite;import junit.textui.TestRunner;import org.apache.lucene.store.Directory;import org.apache.lucene.store.RAMDirectory;import org.apache.lucene.store.FSDirectory;import org.apache.lucene.store.LockObtainFailedException;import org.apache.lucene.store.AlreadyClosedException;import org.apache.lucene.analysis.standard.StandardAnalyzer;import org.apache.lucene.analysis.WhitespaceAnalyzer;import org.apache.lucene.document.Document;import org.apache.lucene.document.Field;import org.apache.lucene.search.IndexSearcher;import org.apache.lucene.search.Hits;import org.apache.lucene.search.TermQuery;import org.apache.lucene.util._TestUtil;import java.util.Collection;import java.util.Arrays;import java.io.IOException;import java.io.FileNotFoundException;import java.io.File;import org.apache.lucene.store.MockRAMDirectory;public class TestIndexReader extends TestCase{ /** Main for running test case by itself. */ public static void main(String args[]) { TestRunner.run (new TestSuite(TestIndexReader.class));// TestRunner.run (new TestIndexReader("testBasicDelete"));// TestRunner.run (new TestIndexReader("testDeleteReaderWriterConflict"));// TestRunner.run (new TestIndexReader("testDeleteReaderReaderConflict"));// TestRunner.run (new TestIndexReader("testFilesOpenClose")); } public TestIndexReader(String name) { super(name); } public void testIsCurrent() throws Exception { RAMDirectory d = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true); addDocumentWithFields(writer); writer.close(); // set up reader: IndexReader reader = IndexReader.open(d); assertTrue(reader.isCurrent()); // modify index by adding another document: writer = new IndexWriter(d, new StandardAnalyzer(), false); addDocumentWithFields(writer); writer.close(); assertFalse(reader.isCurrent()); // re-create index: writer = new IndexWriter(d, new StandardAnalyzer(), true); addDocumentWithFields(writer); writer.close(); assertFalse(reader.isCurrent()); reader.close(); d.close(); } /** * Tests the IndexReader.getFieldNames implementation * @throws Exception on error */ public void testGetFieldNames() throws Exception { RAMDirectory d = new MockRAMDirectory(); // set up writer IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(), true); addDocumentWithFields(writer); writer.close(); // set up reader IndexReader reader = IndexReader.open(d); Collection fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL); assertTrue(fieldNames.contains("keyword")); assertTrue(fieldNames.contains("text")); assertTrue(fieldNames.contains("unindexed")); assertTrue(fieldNames.contains("unstored")); reader.close(); // add more documents writer = new IndexWriter(d, new StandardAnalyzer(), false); // want to get some more segments here for (int i = 0; i < 5*writer.getMergeFactor(); i++) { addDocumentWithFields(writer); } // new fields are in some different segments (we hope) for (int i = 0; i < 5*writer.getMergeFactor(); i++) { addDocumentWithDifferentFields(writer); } // new termvector fields for (int i = 0; i < 5*writer.getMergeFactor(); i++) { addDocumentWithTermVectorFields(writer); } writer.close(); // verify fields again reader = IndexReader.open(d); fieldNames = reader.getFieldNames(IndexReader.FieldOption.ALL); assertEquals(13, fieldNames.size()); // the following fields assertTrue(fieldNames.contains("keyword")); assertTrue(fieldNames.contains("text")); assertTrue(fieldNames.contains("unindexed")); assertTrue(fieldNames.contains("unstored")); assertTrue(fieldNames.contains("keyword2")); assertTrue(fieldNames.contains("text2")); assertTrue(fieldNames.contains("unindexed2")); assertTrue(fieldNames.contains("unstored2")); assertTrue(fieldNames.contains("tvnot")); assertTrue(fieldNames.contains("termvector")); assertTrue(fieldNames.contains("tvposition")); assertTrue(fieldNames.contains("tvoffset")); assertTrue(fieldNames.contains("tvpositionoffset")); // verify that only indexed fields were returned fieldNames = reader.getFieldNames(IndexReader.FieldOption.INDEXED); assertEquals(11, fieldNames.size()); // 6 original + the 5 termvector fields assertTrue(fieldNames.contains("keyword")); assertTrue(fieldNames.contains("text")); assertTrue(fieldNames.contains("unstored")); assertTrue(fieldNames.contains("keyword2")); assertTrue(fieldNames.contains("text2")); assertTrue(fieldNames.contains("unstored2")); assertTrue(fieldNames.contains("tvnot")); assertTrue(fieldNames.contains("termvector")); assertTrue(fieldNames.contains("tvposition")); assertTrue(fieldNames.contains("tvoffset")); assertTrue(fieldNames.contains("tvpositionoffset")); // verify that only unindexed fields were returned fieldNames = reader.getFieldNames(IndexReader.FieldOption.UNINDEXED); assertEquals(2, fieldNames.size()); // the following fields assertTrue(fieldNames.contains("unindexed")); assertTrue(fieldNames.contains("unindexed2")); // verify index term vector fields fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR); assertEquals(1, fieldNames.size()); // 1 field has term vector only assertTrue(fieldNames.contains("termvector")); fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION); assertEquals(1, fieldNames.size()); // 4 fields are indexed with term vectors assertTrue(fieldNames.contains("tvposition")); fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_OFFSET); assertEquals(1, fieldNames.size()); // 4 fields are indexed with term vectors assertTrue(fieldNames.contains("tvoffset")); fieldNames = reader.getFieldNames(IndexReader.FieldOption.TERMVECTOR_WITH_POSITION_OFFSET); assertEquals(1, fieldNames.size()); // 4 fields are indexed with term vectors assertTrue(fieldNames.contains("tvpositionoffset")); reader.close(); d.close(); } private void assertTermDocsCount(String msg, IndexReader reader, Term term, int expected) throws IOException { TermDocs tdocs = null; try { tdocs = reader.termDocs(term); assertNotNull(msg + ", null TermDocs", tdocs); int count = 0; while(tdocs.next()) { count++; } assertEquals(msg + ", count mismatch", expected, count); } finally { if (tdocs != null) tdocs.close(); } } public void testBasicDelete() throws IOException { Directory dir = new MockRAMDirectory(); IndexWriter writer = null; IndexReader reader = null; Term searchTerm = new Term("content", "aaa"); // add 100 documents with term : aaa writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true); for (int i = 0; i < 100; i++) { addDoc(writer, searchTerm.text()); } writer.close(); // OPEN READER AT THIS POINT - this should fix the view of the // index at the point of having 100 "aaa" documents and 0 "bbb" reader = IndexReader.open(dir); assertEquals("first docFreq", 100, reader.docFreq(searchTerm)); assertTermDocsCount("first reader", reader, searchTerm, 100); reader.close(); // DELETE DOCUMENTS CONTAINING TERM: aaa int deleted = 0; reader = IndexReader.open(dir); deleted = reader.deleteDocuments(searchTerm); assertEquals("deleted count", 100, deleted); assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm)); assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); // open a 2nd reader to make sure first reader can // commit its changes (.del) while second reader // is open: IndexReader reader2 = IndexReader.open(dir); reader.close(); // CREATE A NEW READER and re-test reader = IndexReader.open(dir); assertEquals("deleted docFreq", 100, reader.docFreq(searchTerm)); assertTermDocsCount("deleted termDocs", reader, searchTerm, 0); reader.close(); reader2.close(); dir.close(); } // Make sure attempts to make changes after reader is // closed throws IOException: public void testChangesAfterClose() throws IOException { Directory dir = new RAMDirectory(); IndexWriter writer = null; IndexReader reader = null; Term searchTerm = new Term("content", "aaa"); // add 11 documents with term : aaa writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true); for (int i = 0; i < 11; i++) { addDoc(writer, searchTerm.text()); } writer.close(); reader = IndexReader.open(dir); // Close reader: reader.close(); // Then, try to make changes: try { reader.deleteDocument(4); fail("deleteDocument after close failed to throw IOException"); } catch (AlreadyClosedException e) { // expected } try { reader.setNorm(5, "aaa", 2.0f); fail("setNorm after close failed to throw IOException"); } catch (AlreadyClosedException e) { // expected } try { reader.undeleteAll(); fail("undeleteAll after close failed to throw IOException"); } catch (AlreadyClosedException e) { // expected } } // Make sure we get lock obtain failed exception with 2 writers: public void testLockObtainFailed() throws IOException { Directory dir = new RAMDirectory(); IndexWriter writer = null; IndexReader reader = null; Term searchTerm = new Term("content", "aaa"); // add 11 documents with term : aaa writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true); for (int i = 0; i < 11; i++) { addDoc(writer, searchTerm.text()); } // Create reader: reader = IndexReader.open(dir); // Try to make changes try { reader.deleteDocument(4); fail("deleteDocument should have hit LockObtainFailedException"); } catch (LockObtainFailedException e) { // expected } try { reader.setNorm(5, "aaa", 2.0f); fail("setNorm should have hit LockObtainFailedException"); } catch (LockObtainFailedException e) { // expected } try { reader.undeleteAll(); fail("undeleteAll should have hit LockObtainFailedException"); } catch (LockObtainFailedException e) { // expected } writer.close(); reader.close(); } // Make sure you can set norms & commit even if a reader // is open against the index: public void testWritingNorms() throws IOException { String tempDir = System.getProperty("tempDir"); if (tempDir == null) throw new IOException("tempDir undefined, cannot run test"); File indexDir = new File(tempDir, "lucenetestnormwriter"); Directory dir = FSDirectory.getDirectory(indexDir); IndexWriter writer = null; IndexReader reader = null; Term searchTerm = new Term("content", "aaa"); // add 1 documents with term : aaa writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true); addDoc(writer, searchTerm.text()); writer.close(); // now open reader & set norm for doc 0 reader = IndexReader.open(dir); reader.setNorm(0, "content", (float) 2.0); // we should be holding the write lock now: assertTrue("locked", IndexReader.isLocked(dir)); reader.commit(); // we should not be holding the write lock now: assertTrue("not locked", !IndexReader.isLocked(dir)); // open a 2nd reader: IndexReader reader2 = IndexReader.open(dir); // set norm again for doc 0 reader.setNorm(0, "content", (float) 3.0); assertTrue("locked", IndexReader.isLocked(dir)); reader.close();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -