lucene 程序编写,简单易懂

jar:

    Maven: org.apache.lucene:lucene-analyzers-common:5.3.1
    Maven: org.apache.lucene:lucene-analyzers-smartcn:5.3.1
    Maven: org.apache.lucene:lucene-core:5.3.1
    Maven: org.apache.lucene:lucene-highlighter:5.3.1
    Maven: org.apache.lucene:lucene-join:5.3.1
    Maven: org.apache.lucene:lucene-memory:5.3.1
    Maven: org.apache.lucene:lucene-queries:5.3.1
    Maven: org.apache.lucene:lucene-queryparser:5.3.1

    Maven: org.apache.lucene:lucene-sandbox:5.3.1

import org.apache.commons.lang.StringEscapeUtils;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.StringField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.highlight.Fragmenter;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;


public class lucene{

   private Directory dir=null;
   /**
    * 获取IndexWriter实例
    * @return
    * @throws Exception
    */
   private IndexWriter getWriter()throws Exception{
      dir=FSDirectory.open(Paths.get("D://lucene"));  //指定索引存放位置
      SmartChineseAnalyzer analyzer=new SmartChineseAnalyzer();
      IndexWriterConfig iwc=new IndexWriterConfig(analyzer);
      IndexWriter writer=new IndexWriter(dir, iwc);
      return writer;
   }
   
   /**
    * 添加博客索引
    * @param blog
    */
   public void addIndex(Blog blog)throws Exception{
      IndexWriter writer=getWriter();
      Document doc=new Document();
      doc.add(new StringField("id",String.valueOf(blog.getId()),Field.Store.YES));
      doc.add(new TextField("title",blog.getTitle(),Field.Store.YES));
      doc.add(new StringField("releaseDate",DateUtil.formatDate(new Date(), "yyyy-MM-dd"),Field.Store.YES));
      doc.add(new TextField("content",blog.getContentNoTag(),Field.Store.YES));
      writer.addDocument(doc);
      writer.close();
   }
   
   /**
    * 更新博客索引
    * @param blog
    * @throws Exception
    */
   public void updateIndex(Blog blog)throws Exception{
      IndexWriter writer=getWriter();
      Document doc=new Document();
      doc.add(new StringField("id",String.valueOf(blog.getId()),Field.Store.YES));
      doc.add(new TextField("title",blog.getTitle(),Field.Store.YES));
      doc.add(new StringField("releaseDate",DateUtil.formatDate(new Date(), "yyyy-MM-dd"),Field.Store.YES));
      doc.add(new TextField("content",blog.getContentNoTag(),Field.Store.YES));
      writer.updateDocument(new Term("id", String.valueOf(blog.getId())), doc);
      writer.close();
   }
   
   /**
    * 删除指定博客的索引
    * @param blogId
    * @throws Exception
    */
   public void deleteIndex(String blogId)throws Exception{
      IndexWriter writer=getWriter();
      writer.deleteDocuments(new Term("id",blogId));
      writer.forceMergeDeletes(); // 强制删除
      writer.commit();
      writer.close();
   }
   
   /**
    * 查询博客信息
    * @param q 查询关键字
    * @return
    * @throws Exception
    */
   public List<Blog> searchBlog(String q)throws Exception{
      dir=FSDirectory.open(Paths.get("D://lucene"));
      IndexReader reader = DirectoryReader.open(dir);
      IndexSearcher is=new IndexSearcher(reader);
      BooleanQuery.Builder booleanQuery = new BooleanQuery.Builder();
      SmartChineseAnalyzer analyzer=new SmartChineseAnalyzer();
      QueryParser parser=new QueryParser("title",analyzer);
      Query query=parser.parse(q);
      QueryParser parser2=new QueryParser("content",analyzer);
      Query query2=parser2.parse(q);
      booleanQuery.add(query,BooleanClause.Occur.SHOULD);
      booleanQuery.add(query2,BooleanClause.Occur.SHOULD);
      TopDocs hits=is.search(booleanQuery.build(), 100);
      QueryScorer scorer=new QueryScorer(query);  
      Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);  
      SimpleHTMLFormatter simpleHTMLFormatter=new SimpleHTMLFormatter("<b><font color='red'>","</font></b>");
      Highlighter highlighter=new Highlighter(simpleHTMLFormatter, scorer);
      highlighter.setTextFragmenter(fragmenter);  
      List<Blog> blogList=new LinkedList<Blog>();
      for(ScoreDoc scoreDoc:hits.scoreDocs){
         Document doc=is.doc(scoreDoc.doc);
         Blog blog=new Blog();
         blog.setId(Integer.parseInt(doc.get(("id"))));
         blog.setReleaseDateStr(doc.get(("releaseDate")));
         String title=doc.get("title");
         String content=StringEscapeUtils.escapeHtml(doc.get("content"));
         if(title!=null){
            TokenStream tokenStream = analyzer.tokenStream("title", new StringReader(title));
            String hTitle=highlighter.getBestFragment(tokenStream, title);
            if(StringUtil.isEmpty(hTitle)){
               blog.setTitle(title);
            }else{
               blog.setTitle(hTitle);             
            }
         }
         if(content!=null){
            TokenStream tokenStream = analyzer.tokenStream("content", new StringReader(content)); 
            String hContent=highlighter.getBestFragment(tokenStream, content);
            if(StringUtil.isEmpty(hContent)){
               if(content.length()<=200){
                  blog.setContent(content);
               }else{
                  blog.setContent(content.substring(0, 200));                   
               }
            }else{
               blog.setContent(hContent);             
            }
         }
         blogList.add(blog);
      }
      return blogList;
   }
}
发布了12 篇原创文章 · 获赞 1 · 访问量 1316

猜你喜欢

转载自blog.csdn.net/weixin_39728880/article/details/80547680