ElasticSearch学习笔记-同义词记录

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/wulinshishen/article/details/77800921
同义词扩大了一个匹配文件的范围。正如 词干提取 或者 部分匹配 ,同义词的字段不应该被单独使用,而应该与一个针对主字段的查询操作一起使用,这个主字段应该包含纯净格式的原始文本。
第一种实现方式:
同义词可以取代现有的语汇单元或 通过使用同义词语汇单元过滤器,添加到语汇单元流中。首先,我们定义了一个同义词类型的语汇单元过滤器。然后我们创建了一个使用同义词类型的语汇单元过滤器的自定义分析器。
PUT http://localhost:9200/temp_index
{
  "settings": {
    "analysis": {
      "filter": {
        "my_synonym_filter": {
          "type": "synonym", 
          "expand": true,
          "ignore_case": true
          "synonyms_path" : "analysis/synonym.txt"
          "synonyms": [ 
            "british,english",
            "queen,monarch"
            "usa, america, united states => usa"
          ]
        }
      },
      "analyzer": {
        "my_synonyms": {
          "tokenizer": "ik_max_word",
          "filter": [
            "lowercase",
            "my_synonym_filter" 
          ]
        }
      }
    }
  }
}
同义词一般格式:
简单扩展:我们可以把同义词列表中的任意一个词扩展成同义词列表所有的词。
举例 "jump,hop,leap"
简单收缩:把左边的多个同义词映射到了右边的单个词。它必须同时应用于索引和查询阶段,以确保查询词项映射到索引中存在的同一个值。
举例 "leap,hop => jump"
类型扩展:类型扩展是完全不同于简单收缩或扩张,并不是平等看待所有的同义词,而是扩大了词的意义,使被拓展的词更为通用。
举例"cat => cat,pet","kitten => kitten,cat,pet","dog => dog,pet""puppy => puppy,dog,pet"
修改Mapping映射相关配置
"content": {  
        "type": "string",  
        "term_vector": "with_positions_offsets",  
        "analyzer": "my_synonyms",  
        "search_analyzer": "my_synonyms"  
      }
第二种实现方式:
动态同义词插件,可以参考https://github.com/bells/elasticsearch-analysis-dynamic-synonym,里面有很详细的介绍动态同义词插件的安装和使用。

第三种实现方式:
JavaAPI实现关键词查询关联同义词查询
import java.io.IOException;
import java.io.Reader;

import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

public class IKTokenizer extends Tokenizer {

	private IKSegmenter _IKImplement = null;

	private final CharTermAttribute termAtt;
	
	private final OffsetAttribute offsetAtt;
	
	private final TypeAttribute typeAtt;

	private int endPosition;
	
	public IKTokenizer(boolean useSmart) {
		offsetAtt = addAttribute(OffsetAttribute.class);
		termAtt = addAttribute(CharTermAttribute.class);
		typeAtt = addAttribute(TypeAttribute.class);
		_IKImplement = new IKSegmenter(input, useSmart);
	}

	public IKTokenizer(Reader in, boolean useSmart) {
		offsetAtt = addAttribute(OffsetAttribute.class);
		termAtt = addAttribute(CharTermAttribute.class);
		typeAtt = addAttribute(TypeAttribute.class);
		_IKImplement = new IKSegmenter(input, useSmart);
	}

	@Override
	public boolean incrementToken() throws IOException {
		clearAttributes();
		Lexeme nextLexeme = _IKImplement.next();
		if (nextLexeme != null) {
			termAtt.append(nextLexeme.getLexemeText());
			termAtt.setLength(nextLexeme.getLength());
			offsetAtt.setOffset(nextLexeme.getBeginPosition(), nextLexeme.getEndPosition());
			endPosition = nextLexeme.getEndPosition();
			typeAtt.setType(nextLexeme.getLexemeTypeString());
			return true;
		}
		return false;
	}

	@Override
	public void reset() throws IOException {
		super.reset();
		_IKImplement.reset(input);
	}

	@Override
	public final void end() {
		int finalOffset = correctOffset(this.endPosition);
		offsetAtt.setOffset(finalOffset, finalOffset);
	}

}
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
import org.apache.lucene.analysis.synonym.SynonymFilterFactory;
import org.apache.lucene.analysis.util.ClasspathResourceLoader;
import org.apache.lucene.util.Version;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class IKSynonymsAnalyzer extends Analyzer {
	
	private Logger LOG = LoggerFactory.getLogger(IKSynonymsAnalyzer.class);
	
	private Boolean useSmart = null;
	
	public IKSynonymsAnalyzer() {
		
	}
	
	public IKSynonymsAnalyzer(boolean useSmart) {
		this.useSmart = useSmart;
	}

	@Override
	protected TokenStreamComponents createComponents(String fieldName) {
		Map<String, String> filterArgs = new HashMap<String, String>();
		filterArgs.put("synonyms", "elastic/synonyms_1.txt,elastic/synonyms_2.txt");
		filterArgs.put("luceneMatchVersion", Version.LUCENE_5_5_2.toString());
		filterArgs.put("expand", "true");
		SynonymFilterFactory factory = new SynonymFilterFactory(filterArgs);
		try {
			factory.inform(new ClasspathResourceLoader());
		} catch (IOException e) {
			LOG.error(e.getMessage(), e);
		}
		Tokenizer tokenizer = null == useSmart ? new WhitespaceTokenizer() : new IKTokenizer(useSmart);
		return new TokenStreamComponents(tokenizer, factory.create(tokenizer));  
	} 
	
}
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.cisiondata.modules.elastic.analyzer.IKSynonymsAnalyzer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

public class ElasticUtils {

	private static Logger LOG = LoggerFactory.getLogger(ElasticUtils.class);
	
	private static Analyzer ikanalyzer = new IKSynonymsAnalyzer();
	
	/**
	 * 分词
	 * @param input
	 * @param userSmart  true 用智能分词   false 细粒度分词
	 * @return
	 */
	public static String[] analyze(String input, boolean userSmart) {
		List<String> results = new ArrayList<String>();
		try {
			IKSegmenter ikSeg = new IKSegmenter(new StringReader(input.trim()), userSmart);
			for (Lexeme lexeme = ikSeg.next(); lexeme != null; lexeme = ikSeg.next()) {
				results.add(lexeme.getLexemeText());
			}
		} catch (Exception e) {
			LOG.error(e.getMessage(), e);
		}
		return results.toArray(new String[0]);
	}
	
	public static String[] convertSynonyms(String input) {
		return convertSynonyms(ikanalyzer, input);
	}

	/**
	 * 同义词匹配,返回TokenStream
	 */
	public static String[] convertSynonyms(Analyzer analyzer, String input) {
		Set<String> results = new HashSet<String>();
		TokenStream tokenStream = analyzer.tokenStream("fields", input);
		CharTermAttribute termAttribute = tokenStream.addAttribute(CharTermAttribute.class);
		try {
			tokenStream.reset();
			while (tokenStream.incrementToken()) {
				results.add(termAttribute.toString());
			}
			tokenStream.end();
			tokenStream.close();
		} catch (Exception e) {
			LOG.error(e.getMessage(), e);
		}
		return results.toArray(new String[0]);
	}
	
}
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder();
String[] keywords = ElasticUtils.convertSynonyms(valueString);
for (int i = 0, len = keywords.length; i < len; i++) {
boolQueryBuilder.should(QueryBuilders.matchPhraseQuery(name, keywords[i]));
}

 
  


猜你喜欢

转载自blog.csdn.net/wulinshishen/article/details/77800921