📄 indexreader.java
字号:
n++; } } finally { docs.close(); } return n; } /** Undeletes all documents currently marked as deleted in this index.*/ public final synchronized void undeleteAll() throws IOException{ if(directoryOwner) aquireWriteLock(); hasChanges = true; doUndeleteAll(); } /** Implements actual undeleteAll() in subclass. */ protected abstract void doUndeleteAll() throws IOException; /** * Should internally checkpoint state that will change * during commit so that we can rollback if necessary. */ void startCommit() { if (directoryOwner) { rollbackSegmentInfos = (SegmentInfos) segmentInfos.clone(); } rollbackHasChanges = hasChanges; } /** * Rolls back state to just before the commit (this is * called by commit() if there is some exception while * committing). */ void rollbackCommit() { if (directoryOwner) { for(int i=0;i<segmentInfos.size();i++) { // Rollback each segmentInfo. Because the // SegmentReader holds a reference to the // SegmentInfo we can't [easily] just replace // segmentInfos, so we reset it in place instead: segmentInfos.info(i).reset(rollbackSegmentInfos.info(i)); } rollbackSegmentInfos = null; } hasChanges = rollbackHasChanges; } /** * Commit changes resulting from delete, undeleteAll, or * setNorm operations * * If an exception is hit, then either no changes or all * changes will have been committed to the index * (transactional semantics). * * @throws IOException */ protected final synchronized void commit() throws IOException{ if(hasChanges){ if (deleter == null) { // In the MultiReader case, we share this deleter // across all SegmentReaders: setDeleter(new IndexFileDeleter(segmentInfos, directory)); } if(directoryOwner){ // Should not be necessary: no prior commit should // have left pending files, so just defensive: deleter.clearPendingFiles(); String oldInfoFileName = segmentInfos.getCurrentSegmentFileName(); String nextSegmentsFileName = segmentInfos.getNextSegmentFileName(); // Checkpoint the state we are about to change, in // case we have to roll back: startCommit(); boolean success = false; try { doCommit(); segmentInfos.write(directory); success = true; } finally { if (!success) { // Rollback changes that were made to // SegmentInfos but failed to get [fully] // committed. This way this reader instance // remains consistent (matched to what's // actually in the index): rollbackCommit(); // Erase any pending files that we were going to delete: deleter.clearPendingFiles(); // Remove possibly partially written next // segments file: deleter.deleteFile(nextSegmentsFileName); // Recompute deletable files & remove them (so // partially written .del files, etc, are // removed): deleter.findDeletableFiles(); deleter.deleteFiles(); } } // Attempt to delete all files we just obsoleted: deleter.deleteFile(oldInfoFileName); deleter.commitPendingFiles(); if (writeLock != null) { writeLock.release(); // release write lock writeLock = null; } } else doCommit(); } hasChanges = false; } protected void setDeleter(IndexFileDeleter deleter) { this.deleter = deleter; } protected IndexFileDeleter getDeleter() { return deleter; } /** Implements commit. */ protected abstract void doCommit() throws IOException; /** * Closes files associated with this index. * Also saves any new deletions to disk. * No other methods should be called after this has been called. */ public final synchronized void close() throws IOException { commit(); doClose(); if(closeDirectory) directory.close(); } /** Implements close. */ protected abstract void doClose() throws IOException; /** Release the write lock, if needed. */ protected void finalize() throws Throwable { try { if (writeLock != null) { writeLock.release(); // release write lock writeLock = null; } } finally { super.finalize(); } } /** * Get a list of unique field names that exist in this index and have the specified * field option information. * @param fldOption specifies which field option should be available for the returned fields * @return Collection of Strings indicating the names of the fields. * @see IndexReader.FieldOption */ public abstract Collection getFieldNames(FieldOption fldOption); /** * Returns <code>true</code> iff the index in the named directory is * currently locked. * @param directory the directory to check for a lock * @throws IOException if there is a problem with accessing the index */ public static boolean isLocked(Directory directory) throws IOException { return directory.makeLock(IndexWriter.WRITE_LOCK_NAME).isLocked(); } /** * Returns <code>true</code> iff the index in the named directory is * currently locked. * @param directory the directory to check for a lock * @throws IOException if there is a problem with accessing the index */ public static boolean isLocked(String directory) throws IOException { Directory dir = FSDirectory.getDirectory(directory); boolean result = isLocked(dir); dir.close(); return result; } /** * Forcibly unlocks the index in the named directory. * <P> * Caution: this should only be used by failure recovery code, * when it is known that no other process nor thread is in fact * currently accessing this index. */ public static void unlock(Directory directory) throws IOException { directory.makeLock(IndexWriter.WRITE_LOCK_NAME).release(); } /** * Prints the filename and size of each file within a given compound file. * Add the -extract flag to extract files to the current working directory. * In order to make the extracted version of the index work, you have to copy * the segments file from the compound index into the directory where the extracted files are stored. * @param args Usage: org.apache.lucene.index.IndexReader [-extract] <cfsfile> */ public static void main(String [] args) { String filename = null; boolean extract = false; for (int i = 0; i < args.length; ++i) { if (args[i].equals("-extract")) { extract = true; } else if (filename == null) { filename = args[i]; } } if (filename == null) { System.out.println("Usage: org.apache.lucene.index.IndexReader [-extract] <cfsfile>"); return; } Directory dir = null; CompoundFileReader cfr = null; try { File file = new File(filename); String dirname = file.getAbsoluteFile().getParent(); filename = file.getName(); dir = FSDirectory.getDirectory(dirname); cfr = new CompoundFileReader(dir, filename); String [] files = cfr.list(); Arrays.sort(files); // sort the array of filename so that the output is more readable for (int i = 0; i < files.length; ++i) { long len = cfr.fileLength(files[i]); if (extract) { System.out.println("extract " + files[i] + " with " + len + " bytes to local directory..."); IndexInput ii = cfr.openInput(files[i]); FileOutputStream f = new FileOutputStream(files[i]); // read and write with a small buffer, which is more effectiv than reading byte by byte byte[] buffer = new byte[1024]; int chunk = buffer.length; while(len > 0) { final int bufLen = (int) Math.min(chunk, len); ii.readBytes(buffer, 0, bufLen); f.write(buffer, 0, bufLen); len -= bufLen; } f.close(); ii.close(); } else System.out.println(files[i] + ": " + len + " bytes"); } } catch (IOException ioe) { ioe.printStackTrace(); } finally { try { if (dir != null) dir.close(); if (cfr != null) cfr.close(); } catch (IOException ioe) { ioe.printStackTrace(); } } }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -