📄 segmentreader.java
字号:
} } } private void loadDeletedDocs() throws IOException { // NOTE: the bitvector is stored using the regular directory, not cfs if (hasDeletions(si)) { deletedDocs = new BitVector(directory(), si.getDelFileName()); // Verify # deletes does not exceed maxDoc for this segment: if (deletedDocs.count() > maxDoc()) { throw new CorruptIndexException("number of deletes (" + deletedDocs.count() + ") exceeds max doc (" + maxDoc() + ") for segment " + si.name); } } } protected synchronized DirectoryIndexReader doReopen(SegmentInfos infos) throws CorruptIndexException, IOException { DirectoryIndexReader newReader; if (infos.size() == 1) { SegmentInfo si = infos.info(0); if (segment.equals(si.name) && si.getUseCompoundFile() == SegmentReader.this.si.getUseCompoundFile()) { newReader = reopenSegment(si); } else { // segment not referenced anymore, reopen not possible // or segment format changed newReader = SegmentReader.get(infos, infos.info(0), false); } } else { return new MultiSegmentReader(directory, infos, closeDirectory, new SegmentReader[] {this}, null, null); } return newReader; } synchronized SegmentReader reopenSegment(SegmentInfo si) throws CorruptIndexException, IOException { boolean deletionsUpToDate = (this.si.hasDeletions() == si.hasDeletions()) && (!si.hasDeletions() || this.si.getDelFileName().equals(si.getDelFileName())); boolean normsUpToDate = true; boolean[] fieldNormsChanged = new boolean[fieldInfos.size()]; if (normsUpToDate) { for (int i = 0; i < fieldInfos.size(); i++) { if (!this.si.getNormFileName(i).equals(si.getNormFileName(i))) { normsUpToDate = false; fieldNormsChanged[i] = true; } } } if (normsUpToDate && deletionsUpToDate) { return this; } // clone reader SegmentReader clone = new SegmentReader(); boolean success = false; try { clone.directory = directory; clone.si = si; clone.segment = segment; clone.readBufferSize = readBufferSize; clone.cfsReader = cfsReader; clone.storeCFSReader = storeCFSReader; clone.fieldInfos = fieldInfos; clone.tis = tis; clone.freqStream = freqStream; clone.proxStream = proxStream; clone.termVectorsReaderOrig = termVectorsReaderOrig; // we have to open a new FieldsReader, because it is not thread-safe // and can thus not be shared among multiple SegmentReaders // TODO: Change this in case FieldsReader becomes thread-safe in the future final String fieldsSegment; Directory storeDir = directory(); if (si.getDocStoreOffset() != -1) { fieldsSegment = si.getDocStoreSegment(); if (storeCFSReader != null) { storeDir = storeCFSReader; } } else { fieldsSegment = segment; if (cfsReader != null) { storeDir = cfsReader; } } if (fieldsReader != null) { clone.fieldsReader = new FieldsReader(storeDir, fieldsSegment, fieldInfos, readBufferSize, si.getDocStoreOffset(), si.docCount); } if (!deletionsUpToDate) { // load deleted docs clone.deletedDocs = null; clone.loadDeletedDocs(); } else { clone.deletedDocs = this.deletedDocs; } clone.norms = new HashMap(); if (!normsUpToDate) { // load norms for (int i = 0; i < fieldNormsChanged.length; i++) { // copy unchanged norms to the cloned reader and incRef those norms if (!fieldNormsChanged[i]) { String curField = fieldInfos.fieldInfo(i).name; Norm norm = (Norm) this.norms.get(curField); norm.incRef(); clone.norms.put(curField, norm); } } clone.openNorms(si.getUseCompoundFile() ? cfsReader : directory(), readBufferSize); } else { Iterator it = norms.keySet().iterator(); while (it.hasNext()) { String field = (String) it.next(); Norm norm = (Norm) norms.get(field); norm.incRef(); clone.norms.put(field, norm); } } if (clone.singleNormStream == null) { for (int i = 0; i < fieldInfos.size(); i++) { FieldInfo fi = fieldInfos.fieldInfo(i); if (fi.isIndexed && !fi.omitNorms) { Directory d = si.getUseCompoundFile() ? cfsReader : directory(); String fileName = si.getNormFileName(fi.number); if (si.hasSeparateNorms(fi.number)) { continue; } if (fileName.endsWith("." + IndexFileNames.NORMS_EXTENSION)) { clone.singleNormStream = d.openInput(fileName, readBufferSize); break; } } } } success = true; } finally { if (this.referencedSegmentReader != null) { // this reader shares resources with another SegmentReader, // so we increment the other readers refCount. We don't // increment the refCount of the norms because we did // that already for the shared norms clone.referencedSegmentReader = this.referencedSegmentReader; referencedSegmentReader.incRefReaderNotNorms(); } else { // this reader wasn't reopened, so we increment this // readers refCount clone.referencedSegmentReader = this; incRefReaderNotNorms(); } if (!success) { // An exception occured during reopen, we have to decRef the norms // that we incRef'ed already and close singleNormsStream and FieldsReader clone.decRef(); } } return clone; } protected void commitChanges() throws IOException { if (deletedDocsDirty) { // re-write deleted si.advanceDelGen(); // We can write directly to the actual name (vs to a // .tmp & renaming it) because the file is not live // until segments file is written: deletedDocs.write(directory(), si.getDelFileName()); } if (undeleteAll && si.hasDeletions()) { si.clearDelGen(); } if (normsDirty) { // re-write norms si.setNumFields(fieldInfos.size()); Iterator it = norms.values().iterator(); while (it.hasNext()) { Norm norm = (Norm) it.next(); if (norm.dirty) { norm.reWrite(si); } } } deletedDocsDirty = false; normsDirty = false; undeleteAll = false; } FieldsReader getFieldsReader() { return fieldsReader; } protected void doClose() throws IOException { boolean hasReferencedReader = (referencedSegmentReader != null); if (hasReferencedReader) { referencedSegmentReader.decRefReaderNotNorms(); referencedSegmentReader = null; } deletedDocs = null; // close the single norms stream if (singleNormStream != null) { // we can close this stream, even if the norms // are shared, because every reader has it's own // singleNormStream singleNormStream.close(); singleNormStream = null; } // re-opened SegmentReaders have their own instance of FieldsReader if (fieldsReader != null) { fieldsReader.close(); } if (!hasReferencedReader) { // close everything, nothing is shared anymore with other readers if (tis != null) { tis.close(); } if (freqStream != null) freqStream.close(); if (proxStream != null) proxStream.close(); if (termVectorsReaderOrig != null) termVectorsReaderOrig.close(); if (cfsReader != null) cfsReader.close(); if (storeCFSReader != null) storeCFSReader.close(); // maybe close directory super.doClose(); } } static boolean hasDeletions(SegmentInfo si) throws IOException { // Don't call ensureOpen() here (it could affect performance) return si.hasDeletions(); } public boolean hasDeletions() { // Don't call ensureOpen() here (it could affect performance) return deletedDocs != null; } static boolean usesCompoundFile(SegmentInfo si) throws IOException { return si.getUseCompoundFile(); } static boolean hasSeparateNorms(SegmentInfo si) throws IOException { return si.hasSeparateNorms(); } protected void doDelete(int docNum) { if (deletedDocs == null) deletedDocs = new BitVector(maxDoc()); deletedDocsDirty = true; undeleteAll = false; deletedDocs.set(docNum); } protected void doUndeleteAll() { deletedDocs = null; deletedDocsDirty = false; undeleteAll = true; } Vector files() throws IOException { return new Vector(si.files()); } public TermEnum terms() { ensureOpen(); return tis.terms(); } public TermEnum terms(Term t) throws IOException { ensureOpen(); return tis.terms(t); } FieldInfos getFieldInfos() { return fieldInfos; } /** * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ public synchronized Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { ensureOpen(); if (isDeleted(n)) throw new IllegalArgumentException ("attempt to access a deleted document"); return fieldsReader.doc(n, fieldSelector); } public synchronized boolean isDeleted(int n) { return (deletedDocs != null && deletedDocs.get(n)); } public TermDocs termDocs() throws IOException { ensureOpen(); return new SegmentTermDocs(this); } public TermPositions termPositions() throws IOException { ensureOpen(); return new SegmentTermPositions(this); } public int docFreq(Term t) throws IOException { ensureOpen(); TermInfo ti = tis.get(t); if (ti != null) return ti.docFreq; else return 0; } public int numDocs() { // Don't call ensureOpen() here (it could affect performance) int n = maxDoc(); if (deletedDocs != null)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -