📄 wareindex.java
字号:
package com.hapark.lucene;
import java.io.File;
import java.util.Date;
import java.util.List;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import com.hapark.model.Search;
/**
* 张超
* ago52030@163.com
* @author Administrator
*
*/
public class WareIndex {
public void create(List list) throws Exception {
/* 这里放索引文件的位置 */
File indexDir = new File("d:\\index");
Analyzer luceneAnalyzer = new StandardAnalyzer();
IndexWriter indexWriter = new IndexWriter(indexDir, luceneAnalyzer,true);
long startTime = new Date().getTime();
//增加document到索引去
for (int i=0; i<list.size();i++){
Search search = (Search)list.get(i);
System.out.println("Data " + search
+ "正在被索引....");
Document doc = new Document();
Field FieldId = new Field("id", String.valueOf(search.getId()),Field.Store.YES, Field.Index.NO);
Field FieldTitle = new Field("title", search.getTitle(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS);
Field FieldContent = new Field("content", search.getContent(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS);
Field FieldCity = new Field("city", search.getCity(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS);
Field FieldUrl = new Field("url", search.getUrl(), Field.Store.YES,Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS);
doc.add(FieldId);
doc.add(FieldTitle);
doc.add(FieldContent);
doc.add(FieldCity);
doc.add(FieldUrl);
indexWriter.addDocument(doc);
}
//optimize()方法是对索引进行优化
indexWriter.optimize();
indexWriter.close();
//测试一下索引的时间
long endTime = new Date().getTime();
System.out.println("这花费了" + (endTime - startTime)+ " 毫秒来增加到索引里面去!");
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -