📄 luceneindexupdate.java
字号:
package chapter5;
import java.util.Date;
import java.io.*;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.SimpleAnalyzer;
public class LuceneIndexUpdate {
private static String Dest_Index_Path = "D:\\workshop\\TextIndex1";
private static String Text_File_Path = "D:\\workshop\\ch2\\indexsample.txt";
private static String Text_update_Path = "D:\\workshop\\ch2\\indexsample2.txt";
public static void main(String[] args) {
try {
File file = new File(Text_File_Path); // 原始文件
Directory dir = FSDirectory.getDirectory(Dest_Index_Path,false); // 索引目录
Analyzer TextAnalyzer = new SimpleAnalyzer(); // 文档分析器
IndexWriter TextIndex = new IndexWriter(dir,TextAnalyzer,true); // 生成索引器对象
TextIndex.setUseCompoundFile(true);
Document document = new Document(); // 新建空文档
Field field_name = new Field("path", file.getName(),
Field.Store.YES,Field.Index.UN_TOKENIZED);
document.add(field_name); // 添加文件名域
FileInputStream inputfile=new FileInputStream(file); // 文件输入流
int len=inputfile.available();
byte[] buffer = new byte[len];
inputfile.read(buffer); // 读取文件内容
inputfile.close();
String contentext = new String(buffer);
Field field_content = new Field( "content", contentext,
Field.Store.YES,Field.Index.TOKENIZED );
document.add(field_content); // 添加文件内容域
TextIndex.addDocument(document); // 添加索引文档
TextIndex.optimize();
TextIndex.close();
display(file.getName()); // 输出替换前结果
IndexWriter TextIndex2 = new IndexWriter(dir,TextAnalyzer,true); // 重建索引器
TextIndex2.setUseCompoundFile(true);
File file2 = new File(Text_update_Path);
Document document2 = new Document(); // 生成空文档对象
Field field_name2 = new Field("path", file2.getName(), // 生成文件名域
Field.Store.YES,Field.Index.UN_TOKENIZED);
document2.add(field_name2);
FileInputStream inputfile2=new FileInputStream(file2); // 读取文件内容
int len2=inputfile2.available();
byte[] buffer2 = new byte[len2];
inputfile2.read(buffer2);
inputfile2.close();
String contentext2 = new String(buffer2);
Field field_content2 = new Field( "content", contentext2,
Field.Store.YES,Field.Index.TOKENIZED );
document2.add(field_content2); // 添加文件内容域
//Term term = new Term("path", file.getName() ); // 新建语汇单元
Term term = new Term("content", "Lucene" ); // 新建语汇单元
TextIndex2.updateDocument(term, document2); // 替换原有匹配文档
TextIndex2.optimize();
TextIndex2.close();
display(file2.getName()); // 输出替换后结果
} catch (IOException e) {
e.printStackTrace();
}
}
public static void display(String words) throws IOException
{ // 显示结果
try {
IndexSearcher searcher = new IndexSearcher( Dest_Index_Path ); // 检索器
Term term = new Term("path", words ); // 单词项
Query query = new TermQuery(term); // 检索单元
System.out.println("Query words:");
System.out.println(" " + query.toString());
Hits hits = searcher.search(query); // 提交检索
System.out.println("Search result:");
for(int i=0; i < hits.length(); i++) // 输出结果
{
if( hits.doc(i).getField("content")!= null)
System.out.println(" Content: " + hits.doc(i).getField("content").stringValue());
System.out.println(" Path : " + hits.doc(i).getField("path").stringValue());
}
} catch (IOException e)
{
e.printStackTrace();
}
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -