📄 indexwriter.cs
字号:
FlushRamSegments();
}
}
/// <summary> Expert: Return the total size of all index files currently cached in
/// memory. Useful for size management with flushRamDocs()
/// </summary>
public long RamSizeInBytes()
{
return ramDirectory.SizeInBytes();
}
/// <summary> Expert: Return the number of documents whose segments are currently
/// cached in memory. Useful when calling flushRamSegments()
/// </summary>
public int NumRamDocs()
{
lock (this)
{
return ramSegmentInfos.Count;
}
}
/// <summary>Incremental segment merger. </summary>
private void MaybeMergeSegments(int startUpperBound)
{
long lowerBound = - 1;
long upperBound = startUpperBound;
while (upperBound < maxMergeDocs)
{
int minSegment = segmentInfos.Count;
int maxSegment = - 1;
// find merge-worthy segments
while (--minSegment >= 0)
{
SegmentInfo si = segmentInfos.Info(minSegment);
if (maxSegment == - 1 && si.docCount > lowerBound && si.docCount <= upperBound)
{
// start from the rightmost* segment whose doc count is in
// bounds
maxSegment = minSegment;
}
else if (si.docCount > upperBound)
{
// until the segment whose doc count exceeds upperBound
break;
}
}
minSegment++;
maxSegment++;
int numSegments = maxSegment - minSegment;
if (numSegments < mergeFactor)
{
break;
}
else
{
bool exceedsUpperLimit = false;
// number of merge-worthy segments may exceed mergeFactor when
// mergeFactor and/or maxBufferedDocs change(s)
while (numSegments >= mergeFactor)
{
// merge the leftmost* mergeFactor segments
int docCount = MergeSegments(segmentInfos, minSegment, minSegment + mergeFactor);
numSegments -= mergeFactor;
if (docCount > upperBound)
{
// continue to merge the rest of the worthy segments on
// this level
minSegment++;
exceedsUpperLimit = true;
}
else
{
// if the merged segment does not exceed upperBound,
// consider
// this segment for further merges on this same level
numSegments++;
}
}
if (!exceedsUpperLimit)
{
// if none of the merged segments exceed upperBound, done
break;
}
}
lowerBound = upperBound;
upperBound *= mergeFactor;
}
}
/// <summary> Merges the named range of segments, replacing them in the stack with a
/// single segment.
/// </summary>
private int MergeSegments(SegmentInfos sourceSegments, int minSegment, int end)
{
// We may be called solely because there are deletes
// pending, in which case doMerge is false:
bool doMerge = end > 0;
System.String mergedName = NewSegmentName();
SegmentMerger merger = null;
System.Collections.ArrayList segmentsToDelete = System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(10));
System.String segmentsInfosFileName = segmentInfos.GetCurrentSegmentFileName();
System.String nextSegmentsFileName = segmentInfos.GetNextSegmentFileName();
SegmentInfo newSegment = null;
int mergedDocCount = 0;
// This is try/finally to make sure merger's readers are closed:
try
{
if (doMerge)
{
if (infoStream != null)
infoStream.Write("merging segments");
merger = new SegmentMerger(this, mergedName);
for (int i = minSegment; i < end; i++)
{
SegmentInfo si = sourceSegments.Info(i);
if (infoStream != null)
infoStream.Write(" " + si.name + " (" + si.docCount + " docs)");
IndexReader reader = SegmentReader.Get(si); // no need to
// set deleter
// (yet)
merger.Add(reader);
if ((reader.Directory() == this.directory) || (reader.Directory() == this.ramDirectory))
segmentsToDelete.Add(reader); // queue segment
// for deletion
}
}
SegmentInfos rollback = null;
bool success = false;
// This is try/finally to rollback our internal state
// if we hit exception when doing the merge:
try
{
if (doMerge)
{
mergedDocCount = merger.Merge();
if (infoStream != null)
{
infoStream.WriteLine(" into " + mergedName + " (" + mergedDocCount + " docs)");
}
newSegment = new SegmentInfo(mergedName, mergedDocCount, directory, false, true);
}
if (!inTransaction && (sourceSegments != ramSegmentInfos || bufferedDeleteTerms.Count > 0))
{
// Now save the SegmentInfo instances that
// we are replacing:
rollback = (SegmentInfos) segmentInfos.Clone();
}
if (doMerge)
{
if (sourceSegments == ramSegmentInfos)
{
segmentInfos.Add(newSegment);
}
else
{
for (int i = end - 1; i > minSegment; i--)
// remove old infos & add new
sourceSegments.RemoveAt(i);
segmentInfos[minSegment] = newSegment;
}
}
if (sourceSegments == ramSegmentInfos)
{
// Should not be necessary: no prior commit should
// have left pending files, so just defensive:
deleter.ClearPendingFiles();
MaybeApplyDeletes(doMerge);
DoAfterFlush();
}
if (!inTransaction)
{
segmentInfos.Write(directory); // commit before deleting
}
else
{
commitPending = true;
}
success = true;
}
finally
{
if (success)
{
// The non-ram-segments case is already committed
// (above), so all the remains for ram segments case
// is to clear the ram segments:
if (sourceSegments == ramSegmentInfos)
{
ramSegmentInfos.Clear();
}
}
else if (!inTransaction)
{
// Must rollback so our state matches index:
if (sourceSegments == ramSegmentInfos && 0 == bufferedDeleteTerms.Count)
{
// Simple case: newSegment may or may not have
// been added to the end of our segment infos,
// so just check & remove if so:
if (newSegment != null && segmentInfos.Count > 0 && segmentInfos.Info(segmentInfos.Count - 1) == newSegment)
{
segmentInfos.RemoveAt(segmentInfos.Count - 1);
}
}
else if (rollback != null)
{
// Rollback the individual SegmentInfo
// instances, but keep original SegmentInfos
// instance (so we don't try to write again the
// same segments_N file -- write once):
segmentInfos.Clear();
segmentInfos.AddRange(rollback);
}
// Erase any pending files that we were going to delete:
// i.e. old del files added by SegmentReader.doCommit()
deleter.ClearPendingFiles();
// Delete any partially created files:
deleter.DeleteFile(nextSegmentsFileName);
deleter.FindDeletableFiles();
deleter.DeleteFiles();
}
}
}
finally
{
// close readers before we attempt to delete now-obsolete segments
if (doMerge)
merger.CloseReaders();
}
if (!inTransaction)
{
// Attempt to delete all files we just obsoleted:
deleter.DeleteFile(segmentsInfosFileName); // delete old segments_N
// file
deleter.DeleteSegments(segmentsToDelete); // delete now-unused
// segments
// Includes the old del files
deleter.CommitPendingFiles();
}
else
{
deleter.AddPendingFile(segmentsInfosFileName); // delete old
// segments_N file
deleter.DeleteSegments(segmentsToDelete, protectedSegments); // delete
// now-unused
// segments
}
if (useCompoundFile && doMerge)
{
segmentsInfosFileName = nextSegmentsFileName;
nextSegmentsFileName = segmentInfos.GetNextSegmentFileName();
System.Collections.ArrayList filesToDelete;
bool success = false;
try
{
filesToDelete = merger.CreateCompoundFile(mergedName + ".cfs");
newSegment.SetUseCompoundFile(true);
if (!inTransaction)
{
segmentInfos.Write(directory); // commit again so readers
// know we've switched this
// segment to a compound
// file
}
success = true;
}
finally
{
if (!success && !inTransaction)
{
// Must rollback:
newSegment.SetUseCompoundFile(false);
deleter.DeleteFile(mergedName + ".cfs");
deleter.DeleteFile(nextSegmentsFileName);
}
}
if (!inTransaction)
{
deleter.DeleteFile(segmentsInfosFileName); // delete old
// segments_N file
}
// We can delete these segments whether or not we are
// in a transaction because we had just written them
// above so they can't need protection by the
// transaction:
deleter.DeleteFiles(filesToDelete); // delete now-unused segments
}
return mergedDocCount;
}
// Called during flush to apply any buffered deletes. If
// doMerge is true then a new segment was just created and
// flushed from the ram segments.
private void MaybeApplyDeletes(bool doMerge)
{
if (bufferedDeleteTerms.Count > 0)
{
if (infoStream != null)
infoStream.WriteLine("flush " + numBufferedDeleteTerms + " buffered deleted terms on " + segmentInfos.Count + " segments.");
if (doMerge)
{
IndexReader reader = null;
try
{
reader = SegmentReader.Get(segmentInfos.Info(segmentInfos.Count - 1));
reader.SetDeleter(deleter);
// Apply delete terms to the segment just flushed from ram
// apply appropriately so that a delete term is only applied
// to
// the documents buffered before it, not those buffered
// after it.
ApplyDeletesSelectively(bufferedDeleteTerms, reader);
}
finally
{
if (reader != null)
reader.Close();
}
}
int infosEnd = segmentInfos.Count;
if (doMerge)
{
infosEnd--;
}
for (int i = 0; i < infosEnd; i++)
{
IndexReader reader = null;
try
{
reader = SegmentReader.Get(segmentInfos.Info(i));
reader.SetDeleter(deleter);
// Apply delete terms to disk segments
// except the one just flushed from ram.
ApplyDeletes(bufferedDeleteTerms, reader);
}
finally
{
if (reader != null)
reader.Close();
}
}
// Clean up bufferedDeleteTerms.
bufferedDeleteTerms.Clear();
numBufferedDeleteTerms = 0;
}
}
private bool CheckNonDecreasingLevels(int start)
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -