被客户拉着要解决系统LUCENE索引的一个问题,头疼了很多天,也上网找了好多例子,网上发现这是LUCENE一直存在的一个问题,但是牺牲我今天的假期,总算大致找到解决方案了,记录一下。
系统上线运行几个月,但是用户一直反馈LUCENE索引会把匹配度很低的记录放到前面,匹配度高的却排到了后面,有的甚至要翻好几页才能找到目标记录,比如输入“西湖科技园”进行搜索,匹配到的前面优先选项是:“西湖区***科技园”,这样不仅影响用户工作效率,还容易导致用户选择错误记录。
由于这项功能不由我负责,之前是其他同事做的,刚接手时连LUCENE是什么都不知道,抽时间跟代码,调试,再加上万能的百度,终于知道大概怎么回事,继而开始动工。
我的想法是前台输入的词语作为整个匹配单元去LUCENE文件里面匹配,经过疯狂的搜索,今天总算被我找到了,具体代码如下:
package com.ai.zj.inter.sys.rm;
import java.io.File;
import java.util.HashSet;
import java.util.Set;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKQueryParser;
public class SearchTester {
private static Directory dir;
private static StandardAnalyzer a;
private static IndexSearcher searcher;
/**
* @param args
*/
public static void main(String[] args) throws Exception {
String filePath="C:\\571";
dir=FSDirectory.open(new File(filePath));
System.out.println("=========================dir:"+dir);
//buildIndex();
searcher = new IndexSearcher(dir);
testSearch();
}
static void testSearch() throws Exception {
System.out.println("===================================test");
//必须
BooleanQuery query = new BooleanQuery();
Term word1 = new Term("DETAIL_ALL_NAME", "西");
Term word2 = new Term("DETAIL_ALL_NAME","湖");
Term word3 = new Term("DETAIL_ALL_NAME","科");
Term word4 = new Term("DETAIL_ALL_NAME","技");
Term word5 = new Term("DETAIL_ALL_NAME","园");
Term word6 = new Term("DETAIL_ALL_NAME","古");
Term word7 = new Term("DETAIL_ALL_NAME","荡");
PhraseQuery fact = null;
fact = new PhraseQuery();
PhraseQuery fact2 = new PhraseQuery();
fact2.add(word1);
fact2.add(word2);
fact2.add(word3);
fact2.add(word4);
fact2.add(word5);
fact2.setSlop(0);
fact2.setBoost(100f);
query.add(fact2,BooleanClause.Occur.SHOULD);
fact.add(word6);
fact.add(word7);
// fact.add(word3);
// fact.add(word4);
// fact.add(word5);
fact.setSlop(0);
fact.setBoost(0f);
query.add(fact,BooleanClause.Occur.SHOULD);
String[] queries = "西湖科技园".split(" ");
int length = queries.length;
String[] fields = new String[length];
BooleanClause.Occur[] flags = new BooleanClause.Occur[length];
for(int i = 0; i < length; i++) {
fields[i] = "DETAIL_ALL_NAME_SORT";
flags[i] = BooleanClause.Occur.MUST;
}
Query fact1 = IKQueryParser.parseMultiField(fields, queries, flags);
//query.add(fact1, BooleanClause.Occur.SHOULD);
TopDocs topDocCollector = searcher.search(query,10);
ScoreDoc[] hits = topDocCollector.scoreDocs;
for (int i = 0; i < hits.length; i++) {
org.apache.lucene.document.Document doc = searcher.doc(hits[i].doc);
System.out.println("---------------------搜索到的值:"+doc.get("DETAIL_ALL_NAME").replace("+", "") + "_评分:"+hits[i].score);
}
}
static void buildIndex() throws Exception {
Set stopWords = new HashSet();
// 创建分析器
a = new StandardAnalyzer(Version.LUCENE_CURRENT, stopWords);
//a = new IKAnalyzer();
IndexWriter writer = new IndexWriter(dir, a, IndexWriter.MaxFieldLength.UNLIMITED);
Document doc = new Document();
doc.add(new Field("id", "1", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("DETAIL_ALL_NAME","浙江省杭州市西湖区西湖科技园西园七路八号浙江移动手机阅读基地2楼203" , Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("id", "2", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("DETAIL_ALL_NAME","浙江省杭州市西湖区古荡科技园古荡大厦" , Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("id", "3", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("DETAIL_ALL_NAME","浙江省杭州市上城区西湖人家酒楼" , Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
doc = new Document();
doc.add(new Field("id", "3", Field.Store.YES, Field.Index.NOT_ANALYZED));
doc.add(new Field("DETAIL_ALL_NAME","浙江省杭州市上城区科技园莫干山路西湖明珠大楼" , Field.Store.YES, Field.Index.ANALYZED));
writer.addDocument(doc);
//String tempStr = recombinationStr("浙江省杭州市西湖区西湖科技园西园七路八号浙江移动手机阅读基地2楼203","+");
//System.out.println("================================="+tempStr);
System.out.println(doc);
writer.close();
}
/**
* 字符串重组
* @param rStr 原字符串
* @param addStr
* @return
*/
public static String recombinationStr(String rStr, String addStr) {
String word = "";
String nextword = "";
StringBuffer nStr = new StringBuffer();
for (int i = 0; i < rStr.length() - 1; i++) {
word = rStr.substring(i, i + 1);
nextword = rStr.substring(i + 1, i + 2);
if (i == 0) {
nStr.append(word);
}
if (!isChinese(word) && !isChinese(nextword)) {
nStr.append(addStr);
}
nStr.append(nextword);
}
return nStr.toString();
}
/**
* 判定是否是中文
* @param str
* @return
*/
public static boolean isChinese(String str)
{
return str.matches("[\\u4e00-\\u9fbb]+");
}
}
PhraseQuery 这是一种很有用的Query,通过设置梯度可以控制要查找的分词里面能否有其他字。两篇很不错的博文也记录下来:http://blog.csdn.net/hongfu_/article/details/1933366
http://san-yun.iteye.com/blog/1935834
http://blog.csdn.net/quzishen/article/details/5928883
2014.10.19 晚