📄 testindexwriterdelete.java
字号:
if (diskRatio >= 6.0) { rate = 0.0; } if (debug) { System.out.println("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (debug) { System.out.println("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.setMaxSizeInBytes(thisDiskFree); dir.setRandomIOExceptionRate(rate, diskFree); try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { if (updates) { Document d = new Document(); d.add(new Field("id", Integer.toString(i), Field.Store.YES, Field.Index.UN_TOKENIZED)); d.add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.TOKENIZED)); modifier.updateDocument(new Term("id", Integer.toString(docId)), d); } else { // deletes modifier.deleteDocuments(new Term("id", Integer.toString(docId))); // modifier.setNorm(docId, "contents", (float)2.0); } docId += 12; } } modifier.close(); success = true; if (0 == x) { done = true; } } catch (IOException e) { if (debug) { System.out.println(" hit IOException: " + e); e.printStackTrace(System.out); } err = e; if (1 == x) { e.printStackTrace(); fail(testName + " hit IOException after disk space was freed up"); } } // Whether we succeeded or failed, check that all // un-referenced files were in fact deleted (ie, // we did not create garbage). Just create a // new IndexFileDeleter, have it delete // unreferenced files, then verify that in fact // no files were deleted: String[] startFiles = dir.list(); SegmentInfos infos = new SegmentInfos(); infos.read(dir); new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null); String[] endFiles = dir.list(); Arrays.sort(startFiles); Arrays.sort(endFiles); // for(int i=0;i<startFiles.length;i++) { // System.out.println(" startFiles: " + i + ": " + startFiles[i]); // } if (!Arrays.equals(startFiles, endFiles)) { String successStr; if (success) { successStr = "success"; } else { successStr = "IOException"; err.printStackTrace(); } fail("reader.close() failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes): before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles)); } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.open(dir); } catch (IOException e) { e.printStackTrace(); fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } IndexSearcher searcher = new IndexSearcher(newReader); Hits hits = null; try { hits = searcher.search(new TermQuery(searchTerm)); } catch (IOException e) { e.printStackTrace(); fail(testName + ": exception when searching: " + e); } int result2 = hits.length(); if (success) { if (x == 0 && result2 != END_COUNT) { fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { // It's possible that the first exception was // "recoverable" wrt pending deletes, in which // case the pending deletes are retained and // then re-flushing (with plenty of disk // space) will succeed in flushing the // deletes: fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } searcher.close(); newReader.close(); if (result2 == END_COUNT) { break; } } dir.close(); // Try again with 10 more bytes of free space: diskFree += 10; } } } // This test tests that buffered deletes are cleared when // an Exception is hit during flush. public void testErrorAfterApplyDeletes() throws IOException { MockRAMDirectory.Failure failure = new MockRAMDirectory.Failure() { boolean sawMaybe = false; boolean failed = false; public MockRAMDirectory.Failure reset() { sawMaybe = false; failed = false; return this; } public void eval(MockRAMDirectory dir) throws IOException { if (sawMaybe && !failed) { boolean seen = false; StackTraceElement[] trace = new Exception().getStackTrace(); for (int i = 0; i < trace.length; i++) { if ("applyDeletes".equals(trace[i].getMethodName())) { seen = true; break; } } if (!seen) { // Only fail once we are no longer in applyDeletes failed = true; throw new IOException("fail after applyDeletes"); } } if (!failed) { StackTraceElement[] trace = new Exception().getStackTrace(); for (int i = 0; i < trace.length; i++) { if ("applyDeletes".equals(trace[i].getMethodName())) { sawMaybe = true; break; } } } } }; // create a couple of files String[] keywords = { "1", "2" }; String[] unindexed = { "Netherlands", "Italy" }; String[] unstored = { "Amsterdam has lots of bridges", "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; for(int pass=0;pass<2;pass++) { boolean autoCommit = (0==pass); MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); modifier.setUseCompoundFile(true); modifier.setMaxBufferedDeleteTerms(2); dir.failOn(failure.reset()); for (int i = 0; i < keywords.length; i++) { Document doc = new Document(); doc.add(new Field("id", keywords[i], Field.Store.YES, Field.Index.UN_TOKENIZED)); doc.add(new Field("country", unindexed[i], Field.Store.YES, Field.Index.NO)); doc.add(new Field("contents", unstored[i], Field.Store.NO, Field.Index.TOKENIZED)); doc.add(new Field("city", text[i], Field.Store.YES, Field.Index.TOKENIZED)); modifier.addDocument(doc); } // flush (and commit if ac) modifier.optimize(); // commit if !ac if (!autoCommit) { modifier.close(); } // one of the two files hits Term term = new Term("city", "Amsterdam"); int hitCount = getHitCount(dir, term); assertEquals(1, hitCount); // open the writer again (closed above) if (!autoCommit) { modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer()); modifier.setUseCompoundFile(true); } // delete the doc // max buf del terms is two, so this is buffered modifier.deleteDocuments(term); // add a doc (needed for the !ac case; see below) // doc remains buffered Document doc = new Document(); modifier.addDocument(doc); // flush the changes, the buffered deletes, and the new doc // The failure object will fail on the first write after the del // file gets created when processing the buffered delete // in the ac case, this will be when writing the new segments // files so we really don't need the new doc, but it's harmless // in the !ac case, a new segments file won't be created but in // this case, creation of the cfs file happens next so we need // the doc (to test that it's okay that we don't lose deletes if // failing while creating the cfs file boolean failed = false; try { modifier.flush(); } catch (IOException ioe) { failed = true; } assertTrue(failed); // The flush above failed, so we need to retry it (which will // succeed, because the failure is a one-shot) if (!autoCommit) { modifier.close(); } else { modifier.flush(); } hitCount = getHitCount(dir, term); // If the delete was not cleared then hit count will // be 0. With autoCommit=false, we hit the exception // on creating the compound file, so the delete was // flushed successfully. assertEquals(autoCommit ? 1:0, hitCount); if (autoCommit) { modifier.close(); } dir.close(); } } // This test tests that the files created by the docs writer before // a segment is written are cleaned up if there's an i/o error public void testErrorInDocsWriterAdd() throws IOException { MockRAMDirectory.Failure failure = new MockRAMDirectory.Failure() { boolean failed = false; public MockRAMDirectory.Failure reset() { failed = false; return this; } public void eval(MockRAMDirectory dir) throws IOException { if (!failed) { failed = true; throw new IOException("fail in add doc"); } } }; // create a couple of files String[] keywords = { "1", "2" }; String[] unindexed = { "Netherlands", "Italy" }; String[] unstored = { "Amsterdam has lots of bridges", "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; for(int pass=0;pass<2;pass++) { boolean autoCommit = (0==pass); MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); dir.failOn(failure.reset()); for (int i = 0; i < keywords.length; i++) { Document doc = new Document(); doc.add(new Field("id", keywords[i], Field.Store.YES, Field.Index.UN_TOKENIZED)); doc.add(new Field("country", unindexed[i], Field.Store.YES, Field.Index.NO)); doc.add(new Field("contents", unstored[i], Field.Store.NO, Field.Index.TOKENIZED)); doc.add(new Field("city", text[i], Field.Store.YES, Field.Index.TOKENIZED)); try { modifier.addDocument(doc); } catch (IOException io) { break; } } String[] startFiles = dir.list(); SegmentInfos infos = new SegmentInfos(); infos.read(dir); new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null); String[] endFiles = dir.list(); if (!Arrays.equals(startFiles, endFiles)) { fail("docswriter abort() failed to delete unreferenced files:\n before delete:\n " + arrayToString(startFiles) + "\n after delete:\n " + arrayToString(endFiles)); } modifier.close(); } } private String arrayToString(String[] l) { String s = ""; for (int i = 0; i < l.length; i++) { if (i > 0) { s += "\n "; } s += l[i]; } return s; }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -