⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 testindexwriter.java

📁 lucene2.2.0版本
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
        Directory dir = new RAMDirectory();        IndexWriter writer = null;        writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);        // add 100 documents        for (int i = 0; i < 100; i++) {            addDoc(writer);        }        // close        writer.close();        long gen = SegmentInfos.getCurrentSegmentGeneration(dir);        assertTrue("segment generation should be > 1 but got " + gen, gen > 1);        String fileNameIn = SegmentInfos.getCurrentSegmentFileName(dir);        String fileNameOut = IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS,                                                                   "",                                                                   1+gen);        IndexInput in = dir.openInput(fileNameIn);        IndexOutput out = dir.createOutput(fileNameOut);        long length = in.length();        for(int i=0;i<length-1;i++) {          out.writeByte(in.readByte());        }        in.close();        out.close();        dir.deleteFile(fileNameIn);        IndexReader reader = null;        try {          reader = IndexReader.open(dir);          fail("reader did not hit IOException on opening a corrupt index");        } catch (Exception e) {        }        if (reader != null) {          reader.close();        }    }    public void testChangesAfterClose() throws IOException {        Directory dir = new RAMDirectory();        IndexWriter writer = null;        writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);        addDoc(writer);        // close        writer.close();        try {          addDoc(writer);          fail("did not hit AlreadyClosedException");        } catch (AlreadyClosedException e) {          // expected        }    }      // Simulate a corrupt index by removing one of the cfs    // files and make sure we get an IOException trying to    // open the index:    public void testSimulatedCorruptIndex2() throws IOException {        Directory dir = new RAMDirectory();        IndexWriter writer = null;        writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);        // add 100 documents        for (int i = 0; i < 100; i++) {            addDoc(writer);        }        // close        writer.close();        long gen = SegmentInfos.getCurrentSegmentGeneration(dir);        assertTrue("segment generation should be > 1 but got " + gen, gen > 1);        String[] files = dir.list();        for(int i=0;i<files.length;i++) {          if (files[i].endsWith(".cfs")) {            dir.deleteFile(files[i]);            break;          }        }        IndexReader reader = null;        try {          reader = IndexReader.open(dir);          fail("reader did not hit IOException on opening a corrupt index");        } catch (Exception e) {        }        if (reader != null) {          reader.close();        }    }    /*     * Simple test for "commit on close": open writer with     * autoCommit=false, so it will only commit on close,     * then add a bunch of docs, making sure reader does not     * see these docs until writer is closed.     */    public void testCommitOnClose() throws IOException {        Directory dir = new RAMDirectory();              IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);        for (int i = 0; i < 14; i++) {          addDoc(writer);        }        writer.close();        Term searchTerm = new Term("content", "aaa");                IndexSearcher searcher = new IndexSearcher(dir);        Hits hits = searcher.search(new TermQuery(searchTerm));        assertEquals("first number of hits", 14, hits.length());        searcher.close();        IndexReader reader = IndexReader.open(dir);        writer = new IndexWriter(dir, false, new WhitespaceAnalyzer());        for(int i=0;i<3;i++) {          for(int j=0;j<11;j++) {            addDoc(writer);          }          searcher = new IndexSearcher(dir);          hits = searcher.search(new TermQuery(searchTerm));          assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length());          searcher.close();          assertTrue("reader should have still been current", reader.isCurrent());        }        // Now, close the writer:        writer.close();        assertFalse("reader should not be current now", reader.isCurrent());        searcher = new IndexSearcher(dir);        hits = searcher.search(new TermQuery(searchTerm));        assertEquals("reader did not see changes after writer was closed", 47, hits.length());        searcher.close();    }    /*     * Simple test for "commit on close": open writer with     * autoCommit=false, so it will only commit on close,     * then add a bunch of docs, making sure reader does not     * see them until writer has closed.  Then instead of     * closing the writer, call abort and verify reader sees     * nothing was added.  Then verify we can open the index     * and add docs to it.     */    public void testCommitOnCloseAbort() throws IOException {      Directory dir = new RAMDirectory();            IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true);      for (int i = 0; i < 14; i++) {        addDoc(writer);      }      writer.close();      Term searchTerm = new Term("content", "aaa");              IndexSearcher searcher = new IndexSearcher(dir);      Hits hits = searcher.search(new TermQuery(searchTerm));      assertEquals("first number of hits", 14, hits.length());      searcher.close();      writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);      for(int j=0;j<17;j++) {        addDoc(writer);      }      // Delete all docs:      writer.deleteDocuments(searchTerm);      searcher = new IndexSearcher(dir);      hits = searcher.search(new TermQuery(searchTerm));      assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length());      searcher.close();      // Now, close the writer:      writer.abort();      assertNoUnreferencedFiles(dir, "unreferenced files remain after abort()");      searcher = new IndexSearcher(dir);      hits = searcher.search(new TermQuery(searchTerm));      assertEquals("saw changes after writer.abort", 14, hits.length());      searcher.close();                // Now make sure we can re-open the index, add docs,      // and all is good:      writer = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);      for(int i=0;i<12;i++) {        for(int j=0;j<17;j++) {          addDoc(writer);        }        searcher = new IndexSearcher(dir);        hits = searcher.search(new TermQuery(searchTerm));        assertEquals("reader incorrectly sees changes from writer with autoCommit disabled", 14, hits.length());        searcher.close();      }      writer.close();      searcher = new IndexSearcher(dir);      hits = searcher.search(new TermQuery(searchTerm));      assertEquals("didn't see changes after close", 218, hits.length());      searcher.close();      dir.close();    }    /*     * Verify that a writer with "commit on close" indeed     * cleans up the temp segments created after opening     * that are not referenced by the starting segments     * file.  We check this by using MockRAMDirectory to     * measure max temp disk space used.     */    public void testCommitOnCloseDiskUsage() throws IOException {      MockRAMDirectory dir = new MockRAMDirectory();            IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);      for(int j=0;j<30;j++) {        addDocWithIndex(writer, j);      }      writer.close();      dir.resetMaxUsedSizeInBytes();      long startDiskUsage = dir.getMaxUsedSizeInBytes();      writer  = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);      for(int j=0;j<1470;j++) {        addDocWithIndex(writer, j);      }      long midDiskUsage = dir.getMaxUsedSizeInBytes();      dir.resetMaxUsedSizeInBytes();      writer.optimize();      writer.close();      long endDiskUsage = dir.getMaxUsedSizeInBytes();      // Ending index is 50X as large as starting index; due      // to 2X disk usage normally we allow 100X max      // transient usage.  If something is wrong w/ deleter      // and it doesn't delete intermediate segments then it      // will exceed this 100X:      // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage);      assertTrue("writer used to much space while adding documents when autoCommit=false",                      midDiskUsage < 100*startDiskUsage);      assertTrue("writer used to much space after close when autoCommit=false",                      endDiskUsage < 100*startDiskUsage);    }    /*     * Verify that calling optimize when writer is open for     * "commit on close" works correctly both for abort()     * and close().     */    public void testCommitOnCloseOptimize() throws IOException {      RAMDirectory dir = new RAMDirectory();            IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);      for(int j=0;j<17;j++) {        addDocWithIndex(writer, j);      }      writer.close();      writer  = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);      writer.optimize();      // Open a reader before closing (commiting) the writer:      IndexReader reader = IndexReader.open(dir);      // Reader should see index as unoptimized at this      // point:      assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());      reader.close();      // Abort the writer:      writer.abort();      assertNoUnreferencedFiles(dir, "aborted writer after optimize");      // Open a reader after aborting writer:      reader = IndexReader.open(dir);      // Reader should still see index as unoptimized:      assertFalse("Reader incorrectly sees that the index is optimized", reader.isOptimized());      reader.close();      writer  = new IndexWriter(dir, false, new WhitespaceAnalyzer(), false);      writer.optimize();      writer.close();      assertNoUnreferencedFiles(dir, "aborted writer after optimize");      // Open a reader after aborting writer:      reader = IndexReader.open(dir);      // Reader should still see index as unoptimized:      assertTrue("Reader incorrectly sees that the index is unoptimized", reader.isOptimized());      reader.close();    }    // Make sure that a Directory implementation that does    // not use LockFactory at all (ie overrides makeLock and    // implements its own private locking) works OK.  This    // was raised on java-dev as loss of backwards    // compatibility.    public void testNullLockFactory() throws IOException {      final class MyRAMDirectory extends RAMDirectory {        private LockFactory myLockFactory;        MyRAMDirectory() {          lockFactory = null;          myLockFactory = new SingleInstanceLockFactory();        }        public Lock makeLock(String name) {          return myLockFactory.makeLock(name);        }      }            Directory dir = new MyRAMDirectory();      IndexWriter writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);      for (int i = 0; i < 100; i++) {        addDoc(writer);      }      writer.close();      IndexReader reader = IndexReader.open(dir);      Term searchTerm = new Term("content", "aaa");              IndexSearcher searcher = new IndexSearcher(dir);      Hits hits = searcher.search(new TermQuery(searchTerm));      assertEquals("did not get right number of hits", 100, hits.length());      writer.close();      writer  = new IndexWriter(dir, new WhitespaceAnalyzer(), true);      writer.close();      dir.close();    }    private void rmDir(File dir) {        File[] files = dir.listFiles();        if (files != null) {          for (int i = 0; i < files.length; i++) {            files[i].delete();          }        }        dir.delete();    }}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -