您的位置:首页 > 产品设计 > UI/UE

史上最详细的luence使用讲解,增删改查

2017-08-01 15:37 127 查看
不多BB,luence的一切坑,全在注释里写清楚了

注:本人使用的事luence5.0.0的版本,

但是在 5.3.1版本中 BooleanLuence的联合查询过时了  目前还不知道为什么。。。

所依赖的jar包

<!-- https://mvnrepository.com/artifact/org.apache.lucene/lucene-core -->
<dependencies>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-core</artifactId>
<version>5.0.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.lucene/lucene-queryparser -->
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-queryparser</artifactId>
<version>5.0.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.lucene/lucene-analyzers-common -->
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-analyzers-common</artifactId>
<version>5.0.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.lucene/lucene-analyzers-smartcn -->
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-analyzers-smartcn</artifactId>
<version>5.0.0</version>
</dependency>
<!-- https://mvnrepository.com/artifact/org.apache.lucene/lucene-highlighter -->
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-highlighter</artifactId>
<version>5.0.0</version>
</dependency>

<!-- https://mvnrepository.com/artifact/junit/junit -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.12</version>
<scope>test</scope>
</dependency>
</dependencies>

完善的luence使用Demo 注释详细

package com.spongeli.luence;

import java.io.IOException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TermRangeQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.junit.Before;
import org.junit.Test;

/**
* @author itw_lichao01
*
*/
public class LuenceDemo {

private IndexWriter indexWriter;

private IndexReader indexReader;
private IndexSearcher indexSearcher;

private String ids_c[] = {"1","2","3"};
private String citys_c[] = {"qingdao","nanjing","shanghai"};
private String descs_c[] = {"Lucene是apache软件基金会4 jakarta项目组的一个子项目,是一个开放源代码的全文检索引擎工具包,但它不是一个完整的全文检索引擎,而是一个全文检索引擎的架构,提供了完整的查询引擎和索引引擎,部分文本分析引擎(英文与德文两种西方语言)。Lucene的目的是为软件开发人员提供一个简单易用的工具包,以方便的在目标系统中实现全文检索的功能,或者是以此为基础建立起完整的全文检索引擎。Lucene是一套用于全文检索和搜寻的开源程式库",
"Lucene最初是由Doug Cutting开发的,在SourceForge的网站上提供下载。在2001年9月做为高质量的开源Java产品加入到Apache软件基金会的 Jakarta家族中。随着每个版本的发布,这个项目得到明显的增强,也吸引了更多的用户和开发人员。2004年7月,Lucene1.4版正式发布,10月的1.4.2版本做了一次bug修正。表1.1显示了Lucene的发布历史。",
"最后,转移到apache软件基金会后,借助于apache软件基金会的网络平台,程序员可以方便的和开发者、其它程序员交流,促成资源的共享,甚至直接获得已经编写完备的扩充功能。最后,虽然Lucene使用Java语言写成,但是开放源代码社区的程序员正在不懈的将之使用各种传统语言实现(例如.net framework[14]),在遵守Lucene索引文件格式的基础上,使得Lucene能够运行在各种各样的平台上,系统管理员可以根据当前的平台适合的语言来合理的选择。"};

private String ids_y[] = {"1","2","3"};
private String citys_y[] = {"qingdao","nanjing","shanghai"};
private String descs_y[] = {"Qingdao is a buautify city","Nanjing as a xxxxx","shanghai is a yyyyyy city"};
/**
* 获取索引写人器
* @throws IOException
*/
//@Before
public void createIndex() throws IOException{
/* 创建索引生成在硬盘的位置(指:索引的目录位置) */
Directory d = FSDirectory.open(Paths.get("/luenceIndex"));

/* 创建索引的配置类
* StandardAnalyzer : 是luence自带的标准分词器 只对英文起作用 不支持中文
* SmartChineseAnalyzer : 第三方 开发的中文分词器
* 注意:添加索引的分词器必须和读取索引的分词器一致
* */
Analyzer analyzer_c = new SmartChineseAnalyzer();
Analyzer analyzer_y = new StandardAnalyzer();
IndexWriterConfig conf = new IndexWriterConfig(analyzer_y);
/**
* conf 中的其他设置 请参考
* https://www.oschina.net/question/197218_83427 */
/* 获取到索引的读写器 */
IndexWriter indexWriter = new IndexWriter(d, conf);
this.indexWriter = indexWriter;
};
/**
* 添加索引 addDocument
* @throws IOException
*/
@Test
public void indexWriter() throws IOException{

/**
* 包含基本类型的属性
* 字符串类型的属性
* text类型的属性 text默认分词 其余的不会分词
* 也可以 用一个流来读取txt的文本,并且文本里的内容会进行分词 可以分词查找
*
* Field.Store.YES 表示 将该字段存储到硬盘上
* Field.Store.NO 表示不存储到硬盘上
*/
for (int i = 0; i < ids_y.length; i++) {
Document doc = new Document();
doc.add(new StringField("id_y", ids_y[i], Field.Store.YES));
doc.add(new StringField("city_y", citys_y[i], Field.Store.YES));
doc.add(new TextField("desc_y", descs_y[i], Field.Store.YES));
//doc.add(new TextField("file",new FileReader(new File(""))));
indexWriter.addDocument(doc);
}
System.out.println("返回目前有效的索引数:"+indexWriter.numDocs());
System.out.println("返回目前最大的索引数:"+indexWriter.maxDoc());
/**
* 必须要关闭
* luence对索引有自带事物机制,如果没有关闭index或者没有提交将插入索引会出现无效
* */
this.indexWriter.commit();
}
/***
* updateLuence:
* luence的索引更新实际上是对索引进行了先删除后增加的操作,
* (updateDocument:方法针对Term如果找到相同的索引则更新最后一条,如果没有发现 则增加一个条索引)
* 但是,luence对索引进行了保护操作在进行删除索引时,并不是将索引直接删除,而是将索引置为无效索引
* 所以会出现最大索引和最大有效所以的区别
* 但是在删除索引时,luence提供了直接强制删除的操作
* @throws IOException
*/
@Test
public void updateLuence() throws IOException{
Document doc = new Document();
doc.add(new StringField("id_y","4",Field.Store.YES));
doc.add(new StringField("city_y", "JiuQuan", Field.Store.YES));
doc.add(new TextField("desc_y", "JiuQuan is a buautify city", Field.Store.YES));

Term term = new Term("id_y","1");
this.indexWriter.updateDocument(term, doc);
indexWriter.commit();
System.out.println("返回目前有效的索引数:"+indexWriter.numDocs());
System.out.println("返回目前最大的索引数:"+indexWriter.maxDoc());
indexWriter.close();
}
/**
* deleteLuence
* @throws IOException
* @throws ParseException
*/
@Test
public void deleteLuence() throws Exception{
//第一种 通过term查询删除
/*Term terms = new Term("id_y","1");
this.indexWriter.deleteDocuments(terms);*/

//第二种 解析器查询删除
String field = "desc_y";
/**
* 搜索支持简答的 并集和或集 中间用空格隔开
* AND
* OR(可省略)
* 还有其他的一大堆 可百度
* http://www.oschina.net/question/1092_560 */
String keyword = "shanghai AND city";
//String keyword = "shanghai city";
Analyzer analyzer = new StandardAnalyzer();
QueryParser perse = new QueryParser(field, analyzer);
Query query = perse.parse(keyword);
this.indexWriter.deleteDocuments(query);

//第三种 全部删除
//this.indexWriter.deleteAll();
//强制删除 不进入回收站
indexWriter.forceMergeDeletes();

indexWriter.commit();

System.out.println("返回目前有效的索引数:"+indexWriter.numDocs());
System.out.println("返回目前最大的索
c6f9
引数:"+indexWriter.maxDoc());
}
/**
* 获取索引读取的实现类
* @throws IOException
*/
@Before
public void initLuenceReader() throws IOException{
/* 创建读取索引在硬盘的位置(指:索引的目录位置) */
Directory d = FSDirectory.open(Paths.get("/luenceIndex"));
/* 创建索引读取器 */
this.indexReader = DirectoryReader.open(d);
/* 创建索引查询器 */
this.indexSearcher = new IndexSearcher(indexReader);
}
/***
* 标准分词查询
* @throws Exception
*/
@Test
public void QueryParser() throws Exception{
//需要查的属性
String field = "desc_y";
//需要查的值
/**
* 支持AND OR 等查询
*/
String q = "shanghai AND city";
//选取相似度最高的10前十条
int size = 10 ;
//对所查的值进行标准分词
/**
* 分词需和存入时的分词器一致
*/
Analyzer analyzer = new StandardAnalyzer();
QueryParser queryParser = new QueryParser(field, analyzer);
Query parse = queryParser.parse(q);
//进行查询
TopDocs topDocs = indexSearcher.search(parse, size);
System.out.println("查询:"+q+" 关键字,查到的记录数:"+topDocs.totalHits);
/* 获取数据 */
if(topDocs.totalHits > 0){
List<Document> list = new ArrayList<Document>();
for (ScoreDoc docs : topDocs.scoreDocs) {
list.add(indexSearcher.doc(docs.doc));
}
List<Map<String,Object>> docToMap = docToMap(list);
System.out.println("查到的数据:"+docToMap.size()+"多个!");
for (Map<String, Object> map : docToMap) {
System.out.println("===============================================");
for (Iterator<Entry<String, Object>> iterator = map.entrySet().iterator(); iterator.hasNext();) {
Entry<String, Object> next = iterator.next();
System.out.println(next.getKey()+"===="+next.getValue());
}
}
}
}
/**
* 单个关键词查询
* TermQuery
* @throws IOException
*/
@Test
public void termQuery() throws IOException{
//需要查的属性
String field = "desc_y";
//需要查的值
String q = "shanghai";

Term term = new Term(field,q);
Query query=new TermQuery(term);
TopDocs topDocs = indexSearcher.search(query, 10);

System.out.println("查询:"+q+" 关键字,查到的记录数:"+topDocs.totalHits);
/* 获取数据 */
if(topDocs.totalHits > 0){
List<Document> list = new ArrayList<Document>();
for (ScoreDoc docs : topDocs.scoreDocs) {
list.add(indexSearcher.doc(docs.doc));
}
List<Map<String,Object>> docToMap = docToMap(list);
System.out.println("查到的数据:"+docToMap.size()+"多个!");
for (Map<String, Object> map : docToMap) {
System.out.println("===============================================");
for (Iterator<Entry<String, Object>> iterator = map.entrySet().iterator(); iterator.hasNext();) {
Entry<String, Object> next = iterator.next();
System.out.println(next.getKey()+"===="+next.getValue());
}
}
}
}
/**
* 多字段联合查询
* booleanTerm
* @throws IOException
*/
@Test
public void booleanTerm() throws IOException{
TermQuery query1 = new TermQuery(new Term("desc_y", "shanghai"));
TermQuery query2 = new TermQuery(new Term("id_y", "2"));
BooleanQuery booleanQuery = new BooleanQuery();
/**
* 参数如下:
* BooleanClause.Occur.MUST:必须包含,类似于逻辑运算的与<br/>
* BooleanClause.Occur.MUST_NOT:必须不包含,类似于逻辑运算的非<br/>
* BooleanClause.Occur.SHOULD:可以包含,类似于逻辑运算的或<br/>
*/
booleanQuery.add(query1,BooleanClause.Occur.SHOULD);
booleanQuery.add(query2,BooleanClause.Occur.SHOULD);

TopDocs topDocs = indexSearcher.search(booleanQuery, 10);

System.out.println("查询:联合 关键字,查到的记录数:"+topDocs.totalHits);
/* 获取数据 */
if(topDocs.totalHits > 0){
List<Document> list = new ArrayList<Document>();
for (ScoreDoc docs : topDocs.scoreDocs) {
list.add(indexSearcher.doc(docs.doc));
}
List<Map<String,Object>> docToMap = docToMap(list);
System.out.println("查到的数据:"+docToMap.size()+"多个!");
for (Map<String, Object> map : docToMap) {
System.out.println("===============================================");
for (Iterator<Entry<String, Object>> iterator = map.entrySet().iterator(); iterator.hasNext();) {
Entry<String, Object> next = iterator.next();
System.out.println(next.getKey()+"===="+next.getValue());
}
}
}
}
/**
* 范围查询
* TermRangeQuery
* @throws IOException
*/
@Test
public void termRangeQuery() throws IOException{
/**
* TermRangeQuery rangequery=TermRangeQuery.newStringRange(字段名, 起始值, 终止值, 起始值是否包含边界, 终止值是否包含边界);
*/
TermRangeQuery idQuery = TermRangeQuery.newStringRange("id_y", "1", "3", true, true);
//支持日期
//TermRangeQuery idQuery2 = TermRangeQuery.newStringRange("birthdays","2011-03-09", "2013-01-07", true, true);
TopDocs topDocs = indexSearcher.search(idQuery, 10);

System.out.println("查询:范围搜索,查到的记录数:"+topDocs.totalHits);
/* 获取数据 */
if(topDocs.totalHits > 0){
List<Document> list = new ArrayList<Document>();
for (ScoreDoc docs : topDocs.scoreDocs) {
list.add(indexSearcher.doc(docs.doc));
}
List<Map<String,Object>> docToMap = docToMap(list);
System.out.println("查到的数据:"+docToMap.size()+"多个!");
for (Map<String, Object> map : docToMap) {
System.out.println("===============================================");
for (Iterator<Entry<String, Object>> iterator = map.entrySet().iterator(); iterator.hasNext();) {
Entry<String, Object> next = iterator.next();
System.out.println(next.getKey()+"===="+next.getValue());
}
}
}
}
/**
* 获取数据的方法
* @param docs
* @return
*/
public List<Map<String,Object>> docToMap(List<Document> docs){
List<Map<String,Object>> list = new ArrayList<Map<String,Object>>();
for (Document doc : docs) {
Map<String,Object> map = new HashMap<String,Object>();
map.put("id_y", doc.get("id_y"));
map.put("city_y", doc.get("city_y"));
map.put("desc_y", doc.get("desc_y"));
list.add(map);
}
return list;
}
}
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签:  luence的增删改查