⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 segmentreader.java

📁 全文检索lucene2.0的源码 请笑纳
💻 JAVA
📖 第 1 页 / 共 3 页
字号:
      freqStream = cfsDir.openInput(segment + ".frq", readBufferSize);      if (anyProx)        proxStream = cfsDir.openInput(segment + ".prx", readBufferSize);      openNorms(cfsDir, readBufferSize);      if (doOpenStores && fieldInfos.hasVectors()) { // open term vector files only as needed        final String vectorsSegment;        if (si.getDocStoreOffset() != -1)          vectorsSegment = si.getDocStoreSegment();        else          vectorsSegment = segment;        termVectorsReaderOrig = new TermVectorsReader(storeDir, vectorsSegment, fieldInfos, readBufferSize, si.getDocStoreOffset(), si.docCount);      }      success = true;    } finally {      // With lock-less commits, it's entirely possible (and      // fine) to hit a FileNotFound exception above.  In      // this case, we want to explicitly close any subset      // of things that were opened so that we don't have to      // wait for a GC to do so.      if (!success) {        doClose();      }    }  }    private void loadDeletedDocs() throws IOException {    // NOTE: the bitvector is stored using the regular directory, not cfs    if (hasDeletions(si)) {      deletedDocs = new BitVector(directory(), si.getDelFileName());           assert si.getDelCount() == deletedDocs.count() :         "delete count mismatch: info=" + si.getDelCount() + " vs BitVector=" + deletedDocs.count();      // Verify # deletes does not exceed maxDoc for this      // segment:      assert si.getDelCount() <= maxDoc() :         "delete count mismatch: " + deletedDocs.count() + ") exceeds max doc (" + maxDoc() + ") for segment " + si.name;    } else      assert si.getDelCount() == 0;  }    protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException {    DirectoryIndexReader newReader;        if (infos.size() == 1) {      SegmentInfo si = infos.info(0);      if (segment.equals(si.name) && si.getUseCompoundFile() == SegmentReader.this.si.getUseCompoundFile()) {        newReader = reopenSegment(si);      } else {         // segment not referenced anymore, reopen not possible        // or segment format changed        newReader = SegmentReader.get(readOnly, infos, infos.info(0), false);      }    } else {      if (readOnly)        return new ReadOnlyMultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null);      else        return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null, false);    }        return newReader;  }    synchronized SegmentReader reopenSegment(SegmentInfo si) throws CorruptIndexException, IOException {    boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions())                                   && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName()));    boolean normsUpToDate = true;        boolean[] fieldNormsChanged = new boolean[fieldInfos.size()];    if (normsUpToDate) {      for (int i = 0; i < fieldInfos.size(); i++) {        if (!this.si.getNormFileName(i).equals(si.getNormFileName(i))) {          normsUpToDate = false;          fieldNormsChanged[i] = true;        }      }    }    if (normsUpToDate && deletionsUpToDate) {      return this;    }              // clone reader    SegmentReader clone;    if (readOnly)       clone = new ReadOnlySegmentReader();    else      clone = new SegmentReader();    boolean success = false;    try {      clone.readOnly = readOnly;      clone.directory = directory;      clone.si = si;      clone.segment = segment;      clone.readBufferSize = readBufferSize;      clone.cfsReader = cfsReader;      clone.storeCFSReader = storeCFSReader;        clone.fieldInfos = fieldInfos;      clone.tis = tis;      clone.freqStream = freqStream;      clone.proxStream = proxStream;      clone.termVectorsReaderOrig = termVectorsReaderOrig;              // we have to open a new FieldsReader, because it is not thread-safe      // and can thus not be shared among multiple SegmentReaders      // TODO: Change this in case FieldsReader becomes thread-safe in the future      final String fieldsSegment;        Directory storeDir = directory();            if (si.getDocStoreOffset() != -1) {        fieldsSegment = si.getDocStoreSegment();        if (storeCFSReader != null) {          storeDir = storeCFSReader;        }      } else {        fieldsSegment = segment;        if (cfsReader != null) {          storeDir = cfsReader;        }      }        if (fieldsReader != null) {        clone.fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize,                                        si.getDocStoreOffset(), si.docCount);      }                  if (!deletionsUpToDate) {        // load deleted docs        clone.deletedDocs = null;        clone.loadDeletedDocs();      } else {        clone.deletedDocs = this.deletedDocs;      }        clone.norms = new HashMap();      if (!normsUpToDate) {        // load norms        for (int i = 0; i < fieldNormsChanged.length; i++) {          // copy unchanged norms to the cloned reader and incRef those norms          if (!fieldNormsChanged[i]) {            String curField = fieldInfos.fieldInfo(i).name;            Norm norm = (Norm) this.norms.get(curField);            norm.incRef();            clone.norms.put(curField, norm);          }        }                clone.openNorms(si.getUseCompoundFile() ? cfsReader : directory(), readBufferSize);      } else {        Iterator it = norms.keySet().iterator();        while (it.hasNext()) {          String field = (String) it.next();          Norm norm = (Norm) norms.get(field);          norm.incRef();          clone.norms.put(field, norm);        }      }        if (clone.singleNormStream == null) {        for (int i = 0; i < fieldInfos.size(); i++) {          FieldInfo fi = fieldInfos.fieldInfo(i);          if (fi.isIndexed && !fi.omitNorms) {            Directory d = si.getUseCompoundFile() ? cfsReader : directory();            String fileName = si.getNormFileName(fi.number);            if (si.hasSeparateNorms(fi.number)) {              continue;            }                if (fileName.endsWith("." + IndexFileNames.NORMS_EXTENSION)) {              clone.singleNormStream = d.openInput(fileName, readBufferSize);                  break;            }          }        }        }            success = true;    } finally {      if (this.referencedSegmentReader != null) {        // this reader shares resources with another SegmentReader,        // so we increment the other readers refCount. We don't        // increment the refCount of the norms because we did        // that already for the shared norms        clone.referencedSegmentReader = this.referencedSegmentReader;        referencedSegmentReader.incRefReaderNotNorms();      } else {        // this reader wasn't reopened, so we increment this        // readers refCount        clone.referencedSegmentReader = this;        incRefReaderNotNorms();      }            if (!success) {        // An exception occured during reopen, we have to decRef the norms        // that we incRef'ed already and close singleNormsStream and FieldsReader        clone.decRef();      }    }        return clone;  }  protected void commitChanges() throws IOException {    if (deletedDocsDirty) {               // re-write deleted      si.advanceDelGen();      // We can write directly to the actual name (vs to a      // .tmp & renaming it) because the file is not live      // until segments file is written:      deletedDocs.write(directory(), si.getDelFileName());            si.setDelCount(si.getDelCount()+pendingDeleteCount);    }    if (undeleteAll && si.hasDeletions()) {      si.clearDelGen();      si.setDelCount(0);    }    if (normsDirty) {               // re-write norms      si.setNumFields(fieldInfos.size());      Iterator it = norms.values().iterator();      while (it.hasNext()) {        Norm norm = (Norm) it.next();        if (norm.dirty) {          norm.reWrite(si);        }      }    }    deletedDocsDirty = false;    normsDirty = false;    undeleteAll = false;  }  FieldsReader getFieldsReader() {    return fieldsReader;  }  protected void doClose() throws IOException {    boolean hasReferencedReader = (referencedSegmentReader != null);    termVectorsLocal.close();    if (hasReferencedReader) {      referencedSegmentReader.decRefReaderNotNorms();      referencedSegmentReader = null;    }    deletedDocs = null;    // close the single norms stream    if (singleNormStream != null) {      // we can close this stream, even if the norms      // are shared, because every reader has it's own       // singleNormStream      singleNormStream.close();      singleNormStream = null;    }        // re-opened SegmentReaders have their own instance of FieldsReader    if (fieldsReader != null) {      fieldsReader.close();    }    if (!hasReferencedReader) {       // close everything, nothing is shared anymore with other readers      if (tis != null) {        tis.close();      }        if (freqStream != null)        freqStream.close();      if (proxStream != null)        proxStream.close();        if (termVectorsReaderOrig != null)        termVectorsReaderOrig.close();        if (cfsReader != null)        cfsReader.close();        if (storeCFSReader != null)        storeCFSReader.close();            // maybe close directory      super.doClose();    }  }  static boolean hasDeletions(SegmentInfo si) throws IOException {    // Don't call ensureOpen() here (it could affect performance)    return si.hasDeletions();  }  public boolean hasDeletions() {    // Don't call ensureOpen() here (it could affect performance)    return deletedDocs != null;  }  static boolean usesCompoundFile(SegmentInfo si) throws IOException {    return si.getUseCompoundFile();  }  static boolean hasSeparateNorms(SegmentInfo si) throws IOException {    return si.hasSeparateNorms();  }  protected void doDelete(int docNum) {    if (deletedDocs == null)      deletedDocs = new BitVector(maxDoc());    deletedDocsDirty = true;    undeleteAll = false;    if (!deletedDocs.getAndSet(docNum))      pendingDeleteCount++;  }  protected void doUndeleteAll() {      deletedDocs = null;      deletedDocsDirty = false;      undeleteAll = true;  }  List files() throws IOException {    return new ArrayList(si.files());  }  public TermEnum terms() {    ensureOpen();    return tis.terms();  }  public TermEnum terms(Term t) throws IOException {    ensureOpen();    return tis.terms(t);  }  FieldInfos getFieldInfos() {    return fieldInfos;  }  /**   * @throws CorruptIndexException if the index is corrupt   * @throws IOException if there is a low-level IO error   */  public synchronized Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException {    ensureOpen();    if (isDeleted(n))      throw new IllegalArgumentException              ("attempt to access a deleted document");    return fieldsReader.doc(n, fieldSelector);  }  public synchronized boolean isDeleted(int n) {    return (deletedDocs != null && deletedDocs.get(n));  }  public TermDocs termDocs() throws IOException {    ensureOpen();    return new SegmentTermDocs(this);  }  public TermPositions termPositions() throws IOException {    ensureOpen();

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -