您的位置:首页 > 其它

Lucene 2.0 建立索引 与 1.4区别

2007-02-05 15:27 435 查看
建立索引的过程:

package org.apache.lucene;
import org.apache.lucene.index.*;
import org.apache.lucene.analysis.standard.*;
import org.apache.lucene.document.*;
import java.io.*;
import java.util.Date;
//import org.apache.lucene.store.*;;
/**
* @author cao
*
* TODO To change the template for this generated type comment go to
* Window - Preferences - Java - Code Style - Code Templates
*/
public class Indexer {

public static void main(String[] args) throws Exception{
if (args.length != 2){
throw new Exception ("usage: java " + Indexer.class.getName() + "<index dir> <data dir>");
}
File indexDir = new File(args[0]); //在指定的目录创建索引
File dataDir = new File(args[1]); //索引此目录下的文件

long start = new Date().getTime();
int numIndexed = index(indexDir, dataDir);
long end = new Date().getTime();
System.out.println("Indexing " + numIndexed + " files took " + (end - start ) + " milliseconds");
}

public static int index(File indexDir, File dataDir) throws IOException {
if (!dataDir.exists() || !dataDir.isDirectory()){
throw new IOException(dataDir + "does not exit or is not a directory");
}
IndexWriter writer = new IndexWriter(indexDir , new StandardAnalyzer(),true);
writer.setUseCompoundFile(false);
indexDirectory(writer,dataDir);
int numIndexed = writer.docCount();
writer.optimize(); //索引优化
writer.close(); //关闭索引
return numIndexed;
}

private static void indexDirectory(IndexWriter writer , File dir) throws IOException{
File[] files = dir.listFiles();
for(int i = 0; i < files.length; i++){
File f = files[i];
if(f.isDirectory()){
indexDirectory(writer,f);
}else if(f.getName().endsWith(".txt")){
indexFile(writer,f);
}
}
}

// method to actually index a file using Lucene
private static void indexFile(IndexWriter writer, File f)
throws IOException {
if (f.isHidden() || !f.exists() || !f.canRead()) {
return;
}
System.out.println("Indexing " + f.getCanonicalPath());
Document doc = new Document();
//doc.add(Field.Text("contents", new FileReader(f))); //Lucene 1.4 调用方法
//doc.add(Field.UnIndexed("filename", f.getCanonicalPath())); //Lucene 1.4 调用方法

Reader reader = new BufferedReader(new FileReader(f));
doc.add(new Field("contents",reader));
doc.add(new Field("filename", f.getCanonicalPath(),Field.Store.YES,Field.Index.TOKENIZED));
writer.addDocument(doc);
}

}

以下是转贴: 1.4 和 2.0 区别:

// 创建索引
public void indexFiles() {
// 创建索引文件存放路径
File indexDir = new File("E://lucene_Learning//lucene-2.0.0src//src//demo//index");

try {
Date start = new Date();
// 创建分析器,主要用于从文本中抽取那些需要建立索引的内容,把不需要参与建索引的文本内容去掉.
// 比如去掉一些a the之类的常用词,还有决定是否大小写敏感.
StandardAnalyzer standardAnalyzer = new StandardAnalyzer();
// 参数true用于确定是否覆盖原有索引的
IndexWriter indexWriter = new IndexWriter(indexDir, standardAnalyzer, true);
indexWriter.setMergeFactor(100);
indexWriter.setMaxBufferedDocs(100);
// 只索引这个Field的前5000个字,默认为10000
indexWriter.setMaxFieldLength(5000);
// 从数据库取出所有纪录
List articleList = articleManager.getArticles(null);
for (int i = 0; i < articleList.size(); i++) {
Article article = (Article) articleList.get(i);
// 在Document方法是创建索引的具体代码
Document doc = Document(article);
indexWriter.addDocument(doc);
}
// Optimize的过程就是要减少剩下的Segment的数量,尽量让它们处于一个文件中.
indexWriter.optimize();
indexWriter.close();
Date end = new Date();
System.out.println("create index: " + (end.getTime() - start.getTime()) + " total milliseconds");
} catch (IOException e) {
System.out.println(" caught a " + e.getClass() + "/n with message: " + e.getMessage());
}
}
public static Document Document(Article article)
throws java.io.IOException {
Document doc = new Document();
// 为article表的主健创建索引,关于Field的几个参数下面有详细解释
Field fieldId = new Field("uid", article.getArticleId(), Field.Store.YES, Field.Index.UN_TOKENIZED, Field.TermVector.YES);
// 为detail字段创建索引,detail在DB中是clob字段,内容为html文本
String contentHtml = article.getDetail();
Reader read = new StringReader(contentHtml);
// 用HTMLParser把detail字段中的HTML分析成文本在索引
// HTMLParser这个类可以在lucene的demo中找到
HTMLParser htmlParser = new HTMLParser(read);
BufferedReader breader = new BufferedReader(htmlParser.getReader());
String htmlContent ="";
String tempContent = breader.readLine();
while (tempContent != null && tempContent.length() > 0) {
htmlContent = htmlContent + tempContent;
tempContent = breader.readLine();
}
Field fieldContents = new Field("content", htmlContent,
Field.Store.COMPRESS, Field.Index.TOKENIZED,Field.TermVector.YES);
// db中的每条纪录对应一个doc,每个字段对应一个field
doc.add(fieldId);
doc.add(fieldContents);
return doc;
}
// 搜索文件,keyword是你在页面上输入的查找关键字,这里查找的是detail字段
public List searchFiles(String keyword){
String index = "E://lucene_Learning//lucene-2.0.0src//src//demo//index";
// hitsList用来保存db的纪录,这些纪录可以通过查询结果取到
List hitsList = new ArrayList();
try {
Date start = new Date();
IndexReader reader = IndexReader.open(index);
Searcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new StandardAnalyzer();
QueryParser parser = new QueryParser("content", analyzer);
// 解析查询关键字,比如输入的是以空格等分开的多个查询关键字,这里解析后,可以多条件查询
Query query = parser.parse(keyword);
// hits用来保存查询结果,这里的hits相当于sql中的result
Hits hits = searcher.search(query);
for (int i = 0; i < hits.length(); i++) {
Document doc = hits.doc(i);
// 获得article表的主健
String id = doc.get("uid");
// 根据主健去db中取纪录,返回到hitsList中
try {
Article article = articleManager.getArticle(id);
} catch (ObjectRetrievalFailureException e) {
article = null;
}
// 如果没有找到该纪录,表示该纪录已经不存在,不必添加到hitsList中
if(article!=null) hitsList.add(article);
}
searcher.close();
reader.close();
Date end = new Date();
System.out.println("search files: " + (end.getTime() - start.getTime()) + " total milliseconds");
} catch (IOException e) {
System.out.println(" caught a " + e.getClass() + "/n with message: " + e.getMessage());
} catch (ParseException e) {
System.out.println(" caught a " + e.getClass() + "/n with message: " + e.getMessage());
}
return hitsList;
}
// 删除索引
public void deleteIndex(){
String index = "E://lucene_Learning//lucene-2.0.0src//src//demo//index";
try {
Date start = new Date();
IndexReader reader = IndexReader.open(index);
int numFiles = reader.numDocs();
for (int i = 0; i < numFiles; i++) {
// 这里的删除只是给文档做一个删除标记,你可以看到执行deleteDocument后会产生一个del后缀的文件,
// 用来记录这些标记过的文件
reader.deleteDocument(i);
}
reader.close();
Date end = new Date();
System.out.println("delete index: " + (end.getTime() - start.getTime()) + " total milliseconds");
} catch (IOException e) {
System.out.println(" caught a " + e.getClass() + "/n with message: " + e.getMessage());
}

}
// 恢复已删除的索引
public void unDeleteIndex(){
String index = "E://lucene_Learning//lucene-2.0.0src//src//demo//index";
try {
IndexReader reader = IndexReader.open(index);
reader.undeleteAll();
reader.close();
} catch (IOException e) {
System.out.println(" caught a " + e.getClass() + "/n with message: " + e.getMessage());
}

}

Field就像我们学过的数据库中的字段,简单的说,就是一个名值对。这个域有三种属性,分别是

isStored - 是否被存储
isIndexed - 是否被索引
isTokenized - 是否分词

这些属性的组合又构成了四种不同类型的Field,而且各有用途

Stored
Indexed
Tokenized
Keyword

Y

Y

N

UnIndexed

Y

N

N

UnStored

N

Y

Y

Text: String

Y

Y

Y

Text : Reader

N

Y

Y

关于Field,2.0.0版本和1.4.3版本方法相比改动比较大,具体见下表

1.4.3版本中的下面方法都被Field(String name, String value, Store store, Index index, TermVector termVector)取代
Keyword(String name, String value) // only version 1.4.3
存储、索引、不分词,用于URI(比如MSN聊天记录的日期域、比如MP3文件的文件全路径等等)
Field(String name, String value, Field.Store.YES, Field.Index.UN_TOKENIZED) // version 2.0.0

UnIndexed(String name, String value) // only version 1.4.3
存储、不索引、不分词,比如文件的全路径
Field(String name, String value,Field.Store.YES, Field.Index.NO)// version 2.0.0

UnStored(String name, String value) // only version 1.4.3
不存储、索引、分词,比如HTML的正文、Word的内容等等,这部分内容是要被索引的,但是由于具体内容通常很大,没有必要再进行存储,可以到时候根据URI再来挖取。所以,这部分只分词、索引,而不存储。
Field(String name, String value,Field.Store.YES, Field.Index.TOKENIZED)// version 2.0.0

Text(String name, String value) // only version 1.4.3
存储、索引、分词,比如文件的各种属性,比如MP3文件的歌手、专辑等等。Field.Store.YES, Field(String name, String value,Field.Index.TOKENIZED)// version 2.0.0

Text(String name, Reader value) // only version 1.4.3
Field(String name, Reader reader) // version 2.0.0
不存储、索引、分词。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: