1.文本切分
#对于一个文本字符串,可以使用Python的string.split()方法将其切分
mySent = 'This book is the best book on python or M.L. I have ever laid eyes upon'
words = mySent.split(' ')
#Python中有一些内嵌的方法,可以将字符串全部转换成小写(.lower())或者大写(.upper())
[a.lower() for a in words]
>>['this','book','is','the','best','book','on','python','or','m.l.','i','have','ever','laid','eyes','upon']
#上面标点符号也被当成了词的一部分。可以使用正则表达式来切分句子,其中分隔符是除单词、数字外的任意字符串
import re
words = re.split(r'\W*',mySent)
[a.lower() for a in words if len(a)>0 ]#只返回长度大于0的字符串
>>['this','book','is','the','best','book','on','python','or','m','l','i','have','ever','laid','eyes','upon']
2.贝叶斯相关函数(在我的这篇博客中有详细介绍https://blog.csdn.net/qq_24946843/article/details/84192067)
#构建词表
def vocabularyTable(dataSet):
vocabSet = set([])
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)
#构建词向量
def doc2vec(vocabSet,document):
docVec = [0]*len(vocabSet)
for word in document:
if (word in vocabSet):
docVec[vocabSet.index(word)] = 1
return docVec
#训练贝叶斯分类器
import numpy as np
import math
def trainBayes(trainVec,classLabel):
numData = len(trainVec)
numWords = len(trainVec[0])
pAbusive = sum(classLabel)/float(numData)
p0num = np.ones(numWords); p1num = np.ones(numWords)
p0sum = 2; p1sum = 2
p1Vect =np.array([0]*numWords); p0Vect = np.array([0]*numWords)
for i in range(numData):
if(classLabel[i]==1):
p1num += trainVec[i]
p1sum += sum(trainVec[i])
else:
p0num += trainVec[i]
p0sum += sum(trainVec[i])
for i in range(numWords):
p1Vect[i] = math.log(p1num[i]/p1sum)
p0Vect[i] = math.log(p0num[i]/p0sum)
return pAbusive,p1Vect,p0Vect
#贝叶斯分类函数
def classify(docVec,pClass1,p1Vect,p0Vect):
p1 = sum(docVec * p1Vect)+math.log(pClass1)
p0 = sum(docVec *p0Vect) +math.log(1-pClass1)
if(p1>p0):
return 1
else:
return 0
3、文件解析(文本切分)
def textParse(email):
import re
words = re.split(r'\W*',email)
return [a.lower() for a in words if(len(a)>2)]
4、垃圾邮件测试
def spamTest():
docList = []; classList = []
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt'%i).read())
docList.append(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt'%i).read())
docList.append(wordList)
classList.append(0)
vocabList = vocabularyTable(docList)
#随机抽取10封邮件用来测试
trainingSet = list(range(50)); testSet = []
for i in range(10):
#这地方不能用50,只能用len(trainingSet),因为删除一个元素之后只有49个元素了,
#索引最大值为48,下一次循环若生成的随机数为49则超过了索引
randIndex = int(np.random.uniform(0,len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat = []; trainClass = []
for index in trainingSet:
trainMat.append(doc2vec(vocabList,docList[index]))
trainClass.append(classList[index])
#训练
pSpam,p1Vect,p0Vect = trainBayes(trainMat,trainClass)
#测试
errorCount = 0
for index in testSet:
wordVect = doc2vec(vocabList,docList[index])
if(classify(np.array(wordVect),pSpam,p1Vect,p0Vect)!= classList[index]):
errorCount+=1
print('the real class is %d,the predict class is %d'%(classList[index],classify(np.array(wordVect),pSpam,p1Vect,p0Vect)))
print(docList[index])
print('the erroe rate is %.2f'%(float(errorCount)/len(testSet)))
spamTest()
>>the erroe rate is 0.00
spamTest()
>>the real class is 1,the predict class is 0
['home', 'based', 'business', 'opportunity', 'knocking', 'your', 'door', 'don抰', 'rude', 'and', 'let', 'this', 'chance', 'you', 'can', 'earn', 'great', 'income', 'and', 'find', 'your', 'financial', 'life', 'transformed', 'learn', 'more', 'here', 'your', 'success', 'work', 'from', 'home', 'finder', 'experts']
the erroe rate is 0.10
因为用来测试的电子邮件是随机选择的,所以每次的输出结果可能有些差别。比如上面运行的结果,第一次错误率为0,第二次错误率为0.1,即10封邮件中有一封预测错误。
如果想要更好的估计是错误率,那么就应该将上述过程重复多次,然后取平均值。
这里一直出现的错误是将垃圾邮件误判为正常邮件。相比之下,将垃圾邮件误判为正常邮件要比将正常邮件归到垃圾邮件好。