⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 indexwriter.java

📁 Lucene a java open-source SearchEngine Framework
💻 JAVA
📖 第 1 页 / 共 5 页
字号:
    docWriter.setInfoStream(infoStream);    deleter.setInfoStream(infoStream);    if (infoStream != null)      messageState();  }  private void messageState() {    message("setInfoStream: dir=" + directory +            " autoCommit=" + autoCommit +            " mergePolicy=" + mergePolicy +            " mergeScheduler=" + mergeScheduler +            " ramBufferSizeMB=" + docWriter.getRAMBufferSizeMB() +            " maxBuffereDocs=" + docWriter.getMaxBufferedDocs() +            " maxBuffereDeleteTerms=" + docWriter.getMaxBufferedDeleteTerms() +            " maxFieldLength=" + maxFieldLength +            " index=" + segString());  }  /**   * Returns the current infoStream in use by this writer.   * @see #setInfoStream   */  public PrintStream getInfoStream() {    ensureOpen();    return infoStream;  }  /**   * Sets the maximum time to wait for a write lock (in milliseconds) for this instance of IndexWriter.  @see   * @see #setDefaultWriteLockTimeout to change the default value for all instances of IndexWriter.   */  public void setWriteLockTimeout(long writeLockTimeout) {    ensureOpen();    this.writeLockTimeout = writeLockTimeout;  }  /**   * Returns allowed timeout when acquiring the write lock.   * @see #setWriteLockTimeout   */  public long getWriteLockTimeout() {    ensureOpen();    return writeLockTimeout;  }  /**   * Sets the default (for any instance of IndexWriter) maximum time to wait for a write lock (in   * milliseconds).   */  public static void setDefaultWriteLockTimeout(long writeLockTimeout) {    IndexWriter.WRITE_LOCK_TIMEOUT = writeLockTimeout;  }  /**   * Returns default write lock timeout for newly   * instantiated IndexWriters.   * @see #setDefaultWriteLockTimeout   */  public static long getDefaultWriteLockTimeout() {    return IndexWriter.WRITE_LOCK_TIMEOUT;  }  /**   * Flushes all changes to an index and closes all   * associated files.   *   * <p> If an Exception is hit during close, eg due to disk   * full or some other reason, then both the on-disk index   * and the internal state of the IndexWriter instance will   * be consistent.  However, the close will not be complete   * even though part of it (flushing buffered documents)   * may have succeeded, so the write lock will still be   * held.</p>   *    * <p> If you can correct the underlying cause (eg free up   * some disk space) then you can call close() again.   * Failing that, if you want to force the write lock to be   * released (dangerous, because you may then lose buffered   * docs in the IndexWriter instance) then you can do   * something like this:</p>   *   * <pre>   * try {   *   writer.close();   * } finally {   *   if (IndexReader.isLocked(directory)) {   *     IndexReader.unlock(directory);   *   }   * }   * </pre>   *   * after which, you must be certain not to use the writer   * instance anymore.</p>   * @throws CorruptIndexException if the index is corrupt   * @throws IOException if there is a low-level IO error   */  public void close() throws CorruptIndexException, IOException {    close(true);  }  /**   * Closes the index with or without waiting for currently   * running merges to finish.  This is only meaningful when   * using a MergeScheduler that runs merges in background   * threads.   * @param waitForMerges if true, this call will block   * until all merges complete; else, it will ask all   * running merges to abort, wait until those merges have   * finished (which should be at most a few seconds), and   * then return.   */  public void close(boolean waitForMerges) throws CorruptIndexException, IOException {    boolean doClose;    // If any methods have hit OutOfMemoryError, then abort    // on close, in case the internal state of IndexWriter    // or DocumentsWriter is corrupt    if (hitOOM)      abort();    synchronized(this) {      // Ensure that only one thread actually gets to do the closing:      if (!closing) {        doClose = true;        closing = true;      } else        doClose = false;    }    if (doClose)      closeInternal(waitForMerges);    else      // Another thread beat us to it (is actually doing the      // close), so we will block until that other thread      // has finished closing      waitForClose();  }  synchronized private void waitForClose() {    while(!closed && closing) {      try {        wait();      } catch (InterruptedException ie) {      }    }  }  private void closeInternal(boolean waitForMerges) throws CorruptIndexException, IOException {    try {      if (infoStream != null)        message("now flush at close");      docWriter.close();      // Only allow a new merge to be triggered if we are      // going to wait for merges:      flush(waitForMerges, true);      if (waitForMerges)        // Give merge scheduler last chance to run, in case        // any pending merges are waiting:        mergeScheduler.merge(this);      mergePolicy.close();      finishMerges(waitForMerges);      mergeScheduler.close();      synchronized(this) {        if (commitPending) {          boolean success = false;          try {            segmentInfos.write(directory);         // now commit changes            success = true;          } finally {            if (!success) {              if (infoStream != null)                message("hit exception committing segments file during close");              deletePartialSegmentsFile();            }          }          if (infoStream != null)            message("close: wrote segments file \"" + segmentInfos.getCurrentSegmentFileName() + "\"");          deleter.checkpoint(segmentInfos, true);          commitPending = false;          rollbackSegmentInfos = null;        }        if (infoStream != null)          message("at close: " + segString());        docWriter = null;        deleter.close();      }            if (closeDir)        directory.close();      if (writeLock != null) {        writeLock.release();                          // release write lock        writeLock = null;      }      closed = true;    } catch (OutOfMemoryError oom) {      hitOOM = true;      throw oom;    } finally {      synchronized(this) {        if (!closed) {          closing = false;          if (infoStream != null)            message("hit exception while closing");        }        notifyAll();      }    }  }  /** Tells the docWriter to close its currently open shared   *  doc stores (stored fields & vectors files).   *  Return value specifices whether new doc store files are compound or not.   */  private synchronized boolean flushDocStores() throws IOException {    List files = docWriter.files();    boolean useCompoundDocStore = false;    if (files.size() > 0) {      String docStoreSegment;      boolean success = false;      try {        docStoreSegment = docWriter.closeDocStore();        success = true;      } finally {        if (!success) {          if (infoStream != null)            message("hit exception closing doc store segment");          docWriter.abort(null);        }      }      useCompoundDocStore = mergePolicy.useCompoundDocStore(segmentInfos);            if (useCompoundDocStore && docStoreSegment != null) {        // Now build compound doc store file        success = false;        final int numSegments = segmentInfos.size();        final String compoundFileName = docStoreSegment + "." + IndexFileNames.COMPOUND_FILE_STORE_EXTENSION;        try {          CompoundFileWriter cfsWriter = new CompoundFileWriter(directory, compoundFileName);          final int size = files.size();          for(int i=0;i<size;i++)            cfsWriter.addFile((String) files.get(i));                // Perform the merge          cfsWriter.close();          for(int i=0;i<numSegments;i++) {            SegmentInfo si = segmentInfos.info(i);            if (si.getDocStoreOffset() != -1 &&                si.getDocStoreSegment().equals(docStoreSegment))              si.setDocStoreIsCompoundFile(true);          }          checkpoint();          success = true;        } finally {          if (!success) {            if (infoStream != null)              message("hit exception building compound file doc store for segment " + docStoreSegment);                        // Rollback to no compound file            for(int i=0;i<numSegments;i++) {              SegmentInfo si = segmentInfos.info(i);              if (si.getDocStoreOffset() != -1 &&                  si.getDocStoreSegment().equals(docStoreSegment))                si.setDocStoreIsCompoundFile(false);            }            deleter.deleteFile(compoundFileName);            deletePartialSegmentsFile();          }        }        deleter.checkpoint(segmentInfos, false);      }    }    return useCompoundDocStore;  }  /** Release the write lock, if needed. */  protected void finalize() throws Throwable {    try {      if (writeLock != null) {        writeLock.release();                        // release write lock        writeLock = null;      }    } finally {      super.finalize();    }  }  /** Returns the Directory used by this index. */  public Directory getDirectory() {    ensureOpen();    return directory;  }  /** Returns the analyzer used by this index. */  public Analyzer getAnalyzer() {    ensureOpen();    return analyzer;  }  /** Returns the number of documents currently in this index. */  public synchronized int docCount() {    ensureOpen();    int count = docWriter.getNumDocsInRAM();    for (int i = 0; i < segmentInfos.size(); i++) {      SegmentInfo si = segmentInfos.info(i);      count += si.docCount;    }    return count;  }  /**   * The maximum number of terms that will be indexed for a single field in a   * document.  This limits the amount of memory required for indexing, so that   * collections with very large files will not crash the indexing process by   * running out of memory.<p/>   * Note that this effectively truncates large documents, excluding from the   * index terms that occur further in the document.  If you know your source   * documents are large, be sure to set this value high enough to accomodate   * the expected size.  If you set it to Integer.MAX_VALUE, then the only limit   * is your memory, but you should anticipate an OutOfMemoryError.<p/>   * By default, no more than 10,000 terms will be indexed for a field.   *   */  private int maxFieldLength = DEFAULT_MAX_FIELD_LENGTH;  /**   * Adds a document to this index.  If the document contains more than   * {@link #setMaxFieldLength(int)} terms for a given field, the remainder are   * discarded.

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -