chp2-2-2_fmm_word_seg segments sentences through the maximum forward matching algorithm

Segment the sentence by the maximum forward matching algorithm

# Defined in Section 2.2.2
def load_dict():
    f = open("/Users/Documents/NLP_data/plm-nlp-code-main/chp2/lexicon.txt")
    lexicon = set()  # 定义一个空集合,存储可变的不重复元素序列
    max_len = 0
    for line in f:
        word = line.strip()  # 去除字符串左右的空格
        lexicon.add(word)  # 如果某元素不在集合中,则向其内添加该元素
        if len(word) > max_len:  # 元素的max_len重置
            max_len = len(word)
    f.close()  # 关闭文件
        
    return lexicon,max_len

# 通过最大正向匹配算法对句子进行切分
def fmm_word_seg(sentence, lexicon, max_len):
    """
    sentence:待分词的句子
    lexicon:词典(所有单词集合)
    max_len:词典中最长单词长度
    """
    begin = 0
    end = min(begin + max_len, len(sentence))
    words = []
    while begin < end:
        word = sentence[begin:end]  # 截取输入句子中最长单词长度窗口
        print("word",word,"begin:", begin,"end:",end)
        if word in lexicon or end - begin == 1:  # end - begin == 1表明到达最长单词的尾部
            words.append(word)
            begin = end
            end = min(begin + max_len, len(sentence))  # 切分出一个词之后,整体向前推进一个word的长度
            print("="*5,"word",word,"begin:", begin,"end:",end)
        else:
            end -= 1
    return words


lexicon, max_len =  load_dict()
words = fmm_word_seg(input("请输入句子:"), lexicon, max_len)
for word in words:
    print(word, )

>>>
请输入句子:我爱深度学习
word 我爱深度 begin: 0 end: 4
word 我爱深 begin: 0 end: 3
word 我爱 begin: 0 end: 2
word 我 begin: 0 end: 1
===== word 我 begin: 1 end: 5
word 爱深度学 begin: 1 end: 5
word 爱深度 begin: 1 end: 4
word 爱深 begin: 1 end: 3
word 爱 begin: 1 end: 2
===== word 爱 begin: 2 end: 6
word 深度学习 begin: 2 end: 6
===== word 深度学习 begin: 6 end: 6
深度学习

lexicon = set()
lexicon
>>>
{
    
    '',
 '存续期',
 '吹响',
 '干线网',
 '夜半',
 '到位关',
 '倭寇',
 '嬉闹',
 '储备粮',
 '国术会',
 '欧盟',
 '璀瑰',
 '各口村',
 '印度教',
 ...
 }
lexicon.add("机器学习")

# update(x),将x添加到集合中,且参数可以是列表、元组、字典等
s = set(('a', 'cc', 'f'))
# 添加字典只能添加不可变的--键
dict_1 = {
    
    'name': 'bb', 'age': 'cc', 'f': 11}
s.update(dict_1)
print("添加字典"+str(s))
>>>
添加字典{
    
    'name', 'cc', 'a', 'age', 'f'}

Guess you like

Origin blog.csdn.net/weixin_42782150/article/details/126960971