《python数据分析与挖掘实战》第12章协同过滤推荐算法程序实现

本文是作者首次根据实际数据进行的一次完整的数据分析,所以在程序的实现上是尽量选择自己熟悉的语句,笔者认为应先能用一种方法解决问题,然后再结合别人的分析思路,优化自己的解决方案。

1.连接数据库,获得原始数据

import pandas as pd
import pymysql
conn = pymysql.connect(host='localhost', user='root', password='password', port=3306, db='liuyan', charset='utf8')
cur = conn.cursor()     # 创建一个游标
cursor = cur.execute('select * from all_gzdata')     # 执行sql语句
data = cur.fetchall()     # 获取全部数据
from pandas import DataFrame
df = DataFrame(list(data), columns=['realIP', 'realAreacode', 'userAgent', 'userOS',
                                    'userID', 'clientID', 'timestamp', 'timestamp_format',
                                    'pagePath', 'ymd', 'fullURL', 'fullURLId', 'hostname',
                                    'pageTitle', 'pageTitleCategoryId', 'pageTitleCategoryName',
                                    'pageTitleKw', 'fullReferrer', 'fullReferrerURL', 'organicKeyword',
                                    'source'])
cur.close()
conn.close()

2.数据探索分析

探索分析是进行数据分析的第一步,也是让我们能对实际数据的情况有个大体的了解,获得其内在的规律。

2.1网页类型分析(网页类型是指“网址类型”中的前3位数字)

#网页类型分析(网页类型是指“网址类型”的前3位)
df['urlcat'] = df['fullURLId'].str.extract('(\d{3})')
counts3 = df.groupby('urlcat').size().sort_values(ascending=False)
counts3 = counts3.reset_index()
counts3.columns = ['index', 'num']
counts3['prec'] = counts3['num'] / counts3['num'].sum() * 100
#针对咨询类内部统计
counts_101 = df[df['urlcat'] == '101'].groupby('fullURLId').size().sort_values(ascending=False)
counts_101 = counts_101.reset_index()
counts_101.columns = ['index', 'num']
counts_101['prec'] = counts_101['num'] / counts_101['num'].sum() * 100
#针对知识类内部统计
# 统计107类别的情况
def count107(i):
    j = i[['fullURL']][i['fullURLId'].str.contains('107')].copy()     # 找出类别包含107的网址
    j['type'] = None     #添加空列
    j['type'][j['fullURL'].str.contains('info/.+?/')] = u'知识首页'
    j['type'][j['fullURL'].str.contains('info/.+?/.+?')] = u'知识列表页'
    j['type'][j['fullURL'].str.contains('/\d+?_*\d+?\.html')] = u'知识内容页'
    return j['type'].value_counts()
counts_107 = count107(df)
counts_107 = counts_107.reset_index()
counts_107.columns = ['index', 'num']
counts_107['prec'] = counts_107['num'] / counts_107['num'].sum() * 100
#带问号字符网址网址类型统计
df['baohan'] = 0
df['baohan'][df['fullURL'].str.contains('\?{0}')] = 0
df['baohan'][df['fullURL'].str.contains('\?')] = 1
df['baohan'].value_counts()
counts_bh = df[df['baohan'] == 1].groupby(df['fullURLId']).size()
counts_bh = counts_bh.reset_index()
counts_bh.columns = ['index', 'num']
#瞎逛用户点击行为分析
df['xiaguang'] = 0
df['xiaguang'][df['fullURL'].str.contains('http.*?/$')] = 1
df['xiaguang'].value_counts()
counts_xg = df[df['xiaguang'] == 1].groupby(df['urlcat']).size()
counts_xg = counts_xg.reset_index()
counts_xg.columns = ['index', 'num']
counts_xg['prec'] = counts_xg['num'] / counts_xg['num'].sum() * 100

2.2点击次数分析

#点击次数分析
df['dj'] = 1
dianji = df['dj'].groupby(df['realIP']).size()     # 统计各个IP出现的次数
dianji = dianji.reset_index()
dianji.columns = ['realIP', 'num']
dianjicishu = dianji.groupby('num').size()     # 统计各个‘不同点击次数’分别出现的概率
dianjicishu = dianjicishu.reset_index()
dianjicishu.columns = ['cishu', 'renshu']
dianjicishu['prec'] = dianjicishu['renshu'] / dianjicishu['renshu'].sum() * 100     # 用户百分比
dianjicishu['jilu_prec'] = dianjicishu['renshu'] / df['dj'].sum() * 100     # 记录百分比
#浏览7次以上的用户分析表
qici = dianjicishu[dianjicishu['cishu'] > 7]
del qici['prec']
del qici['jilu_prec']
import numpy as np
qici2 = DataFrame([['8-50', 0], ['50-100', 0], ['100以上', 0]], columns=['点击次数', '用户数'])
qici2['用户数'][0] = qici[(7 < qici['cishu']) & (qici['cishu'] < 50)]['renshu'].sum()
qici2['用户数'][1] = qici[(qici['cishu'] > 50) & (qici['cishu'] < 100)]['renshu'].sum()
qici2['用户数'][2] = qici[qici['cishu'] > 100]['renshu'].sum()
#浏览一次的用户行为分析
dianji_one = df.groupby(['realIP', 'fullURLId']).size()
dianji_one = dianji_one.reset_index()
dianji_one.columns = ['realIP', 'fullURLId', 'num']
dianji_one = dianji_one[dianji_one['num'] == 1]
dianji_one2 = dianji_one.groupby('fullURLId').size()
dianji_one2 = dianji_one2.reset_index()
dianji_one2.columns = ['fullURLId', 'num']
dianji_one2['prec'] = dianji_one2['num'] / dianji_one2['num'].sum() * 100
dianji_one2 = dianji_one2.sort_values('prec', ascending=False)
#点击一次用户浏览网页统计
dianji_one_web = df.groupby(['fullURL', 'realIP']).size()
dianji_one_web = dianji_one_web.reset_index()
dianji_one_web.columns = ['fullURL', 'realIP', 'num']
dianji_one_web = dianji_one_web[dianji_one_web['num'] == 1]
dianji_one_web2 = dianji_one_web.groupby('fullURL').size()
dianji_one_web2 = dianji_one_web2.reset_index()
dianji_one_web2.columns = ['fullURL', 'num']

2.3 网页排名

#点击率排名
dianji_web = df.groupby('fullURL').size()
dianji_web = dianji_web.reset_index()
dianji_web.columns = ['fullURL', 'num']
dianji_web = dianji_web.sort_values('num', ascending=False)
#类型点击数
dianji2 = df.groupby(['urlcat', 'realIP']).size()
dianji2 = dianji2.reset_index()
dianji2.columns = ['urlcat', 'realIP', 'num']
dianji2['counts'] = 1
leixingdianji = DataFrame([['知识类(包含专题和知识)', 0, 0, 0.0],
                           ['咨询类', 0, 0, 0.0]], columns=['html网页类型','总点击次数', '用户数', '平均点击率'])
leixingdianji['总点击次数'][0] = dianji2[dianji2['urlcat'] == '107']['num'].sum()
leixingdianji['总点击次数'][1] = dianji2[dianji2['urlcat'] == '101']['num'].sum()
leixingdianji['用户数'][0] = dianji2[dianji2['urlcat'] == '107']['counts'].sum()
leixingdianji['用户数'][1] = dianji2[dianji2['urlcat'] == '101']['counts'].sum()
leixingdianji['平均点击率'][0] = leixingdianji['总点击次数'][0] / leixingdianji['用户数'][0]
leixingdianji['平均点击率'][1] = leixingdianji['总点击次数'][1] / leixingdianji['用户数'][1]

3.数据预处理

在原始数据的探索分析的基础上,发现与分析无关或者是模型需要处理的数据,针对此类数据进行处理。此时的数据预处理包括:数据清洗,数据变换,属性规约。

3.1数据清洗(即删除掉不相关的记录以及重复的记录)

#删除中间类型的网页(带midques_关键字)
data1= df[~df['fullURL'].str.contains('midques_')]     # 共2989条记录
count1 = df[df['fullURL'].str.contains('midques_')]['fullURL'].count()     # 删除数据记录的条数=7
df['fullURL'].count()     # 原始数据记录=2996

#删除(快车-律师助手)律师的浏览信息
count2 = data1[data1['pageTitle'] == '法律快车-律师助手']['pageTitle'].count()     # 删除数据记录的条数=76
data2 = data1[~(data1['pageTitle'] == '法律快车-律师助手')]     # 2913条记录
#删除咨询发布成功
count3 = data2[data2['pageTitle'] == '咨询发布成功']['pageTitle'].count()     # 删除数据记录的条数=14
data3 = data2[~(data2['pageTitle'] == '咨询发布成功')]     # 2899条记录
#删除主网址不包含关键字,关键字有ask,info,fagui,lawyer
data4 = data3[((data3['fullURL'].str.contains('ask')) | (data3['fullURL'].str.contains('info')) |
               (data3['fullURL'].str.contains('fagui')) | (data3['fullURL'].str.contains('lawyer')))]     # 2667条记录
count4 = data3[~((data3['fullURL'].str.contains('ask')) | (data3['fullURL'].str.contains('info')) |
                 (data3['fullURL'].str.contains('fagui')) | (data3['fullURL'].str.contains('lawyer')))]['fullURL'].count()     # 删除数据记录的条数=232
#删除快搜与免费发布咨询的记录
count5 = data4[((data4['pageTitle'].str.contains('法律快搜')) | (data4['pageTitle'].str.contains('免费发布法律咨询')))]['pageTitle'].count()     # 删除数据记录的条数=38
data5 = data4[~((data4['pageTitle'].str.contains('法律快搜')) | (data4['pageTitle'].str.contains('免费发布法律咨询')))]     # 2629条记录
#删除其他类别带有?的记录(其他类别即199)
data6 = data5[~((data5['fullURL'].str.contains('\?')) & (data5['fullURLId'].str.contains('199')))]     # 2627条记录
count6 = data5[(data5['fullURL'].str.contains('\?')) & (data5['fullURLId'].str.contains('199'))]['fullURL'].count()     # 删除数据记录的条数=2
#删除无.html点击行为的用户记录
count7 = data6[~data6['fullURL'].str.contains('html')]['fullURL'].count()     # 删除数据记录的条数=107
data7 = data6[data6['fullURL'].str.contains('html')]     # 2520条记录
#删除重复记录
data7[data7.duplicated(['realIP', 'timestamp', 'fullURL'])][['realIP', 'timestamp', 'fullURL']].sort_values(
    ['realIP', 'timestamp'])     # df.duplicated()返回一个布尔型Series,表示各行是否重复行
data8 = data7.drop_duplicates()     # 1583条记录

3.2数据变换

data8_2=data8.copy()
data8_2['fullURL']=data8_2['fullURL'].str.replace('_\d.html','.html')
#data8_2[data8_2['fullURL'].str.contains('_\d\.html')]     # 可用来检验是否所有的翻页网址已处理完全
data8_2=data8_2.drop_duplicates()     # 删除重复记录
del data8_2['urlcat']

#把处理好的数据保存到数据库
####运用Navicat Premium 创建表gzdata   #注:``中的内容被认定为普通字符串,因此可以创建一些非常规方式或者关键字命名的表名。
                                                # 即名字带上反引号,其实是为了防止当字段为关键字时,用这个符号就可以不报错了
把清洗过的数据添加到python的list中,留待后面添加到数据库中使用:
(注: 此处的数据是一条记录一条记录入库的)
def insertNews(data):
    for index in range(len(data)):
        newsobj = data.iloc[index]
        newsobjs = [newsobj.get('realIP'), newsobj.get('realAreacode'),newsobj.get('userAgent'),newsobj.get('userOS'),
                    newsobj.get('userID'), newsobj.get('clientID'),newsobj.get('timestamp'),newsobj.get('timestamp_format'),
                    newsobj.get('pagePath'), newsobj.get('ymd'),newsobj.get('fullURL'), newsobj.get('fullURLId'),
                    newsobj.get('hostname'),newsobj.get('pageTitle'),newsobj.get('pageTitleCategoryId'),
                    newsobj.get('pageTitleCategoryName'),newsobj.get('pageTitleKw'),
                    newsobj.get('fullReferrer'), newsobj.get('fullReferrerURL'),newsobj.get('organicKeyword'), newsobj.get('source')]
        conn = pymysql.connect(host='localhost', user='root', password='password', port=3306, db='liuyan',charset='gbk')
        cur = conn.cursor()
        cur.execute("insert into gzdata(realIP,realAreacode,userAgent,userOS,userID,clientID,timestamp,timestamp_format,pagePath,ymd,fullURL,fullURLId,hostname,pageTitle,pageTitleCategoryId,pageTitleCategoryName,pageTitleKw,fullReferrer,fullReferrerURL,organicKeyword,source)"
                    "VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')" %tuple(newsobjs))
        #一定要记得提交,不然无法保存新建或者修改的数据
        conn.commit()
        # 关闭游标
        cur.close()
        # 关闭连接
        conn.close()
insertNews(data8_2)

#重新从数据库加载变换后的数据
import pymysql
from pandas import DataFrame
conn2=pymysql.connect(host='localhost',user='root',password='password',port=3306,db='liuyan',charset='utf8')
cur2=conn2.cursor()
cursor2=cur2.execute('select * from gzdata')
data2=cur2.fetchall()
df2=DataFrame(list(data2),columns=['realIP', 'realAreacode', 'userAgent',
                                  'userOS','userID', 'clientID', 'timestamp',
                                  'timestamp_format','pagePath', 'ymd',
                                  'fullURL', 'fullURLId', 'hostname',
                                  'pageTitle', 'pageTitleCategoryId',
                                  'pageTitleCategoryName','pageTitleKw',
                                  'fullReferrer', 'fullReferrerURL',
                                  'organicKeyword','source'])
df2['type']='weizhi'
df2['urlcat'] = df2['fullURLId'].str.extract('(\d{3})')
df2.loc[df2['urlcat']=='101','type']='zixun'
df2.loc[df2['urlcat']=='107','type']=u'zhishi'
#把其他(199)页面的所属类别错误的,进行重新分类
df2.loc[((df2['fullURL'].str.contains('ask'))|(df2['fullURL'].str.contains('askzt'))),'type']='zixun'     # 所有带有‘ask’或者'askzt'的网址
df2.loc[((df2['fullURL'].str.contains('zhishi'))|(df2['fullURL'].str.contains('faguizt'))),'type']='zhishi'     # 所有带‘zhishi’或者‘faguizt’的网址
df2.groupby('type').size()                                      #df2[df2.type=='weizhi'][['fullURL','fullURLId','urlcat','type']]检验分类为'weizhi'的网址的特点
#进一步细分类别
df2['type2']='weizhi2'
df2.loc[((df2['type']=='zhishi')&(df2['fullURL'].str.contains('info'))),'type2']=df2['fullURL'].str.extract('info\/(.*?)\/',expand=False)
df2['type3']='weizhi3'
df2.loc[((df2['type']=='zhishi')&(df2['fullURL'].str.contains('info'))),'type3']=df2['fullURL'].str.extract('info\/.*?\/(.*?)\/',expand=False)
#婚姻知识点击次数统计
hunyin_count=df2[((df2['type']=='zhishi')&(df2['type2']=='hunyin'))].groupby('fullURL').size()
hunyin_count=hunyin_count.reset_index()
hunyin_count.columns=['fullURL','num']
hunyin_count2=hunyin_count.groupby('num').size()
hunyin_count2=hunyin_count2.reset_index()
hunyin_count2.columns=['点击次数','网页个数']
hunyin_count.groupby('num').sum()

3.3 属性规约

由于推荐系统模型的输入数据需要,需对处理后的数据进行属性规约,提取模型需要的属性。本案例中模型需要的数据属性为用户和用户访问的网页。

import numpy as np
a=df2['fullURL'].unique()     # 去除重复的网址,此时a的数据类型为数组。
b=sorted(a.tolist())     # 查看'fullURL'列有多少不同的元素,等价代码是sorted(set(df2['fullURL']))
goods_matrix=DataFrame(np.arange(len(df2['realIP'][:1000])*len(b[:200])).reshape(len(df2['realIP'][:1000]),len(b[:200])),
                       index=df2['realIP'][:1000],columns=b[:200])     # 以用户IP为索引,网址为列标题构建dataframe,其中用户IP1000个,网址200个。

注:由于本人的电脑配置太低,就选取了1000个用户的信息继续进行分析,所以接下来的结果不是很好。

4.模型构建

协同过滤推荐算法分为基于用户的和基于物品的,本案例选择基于物品的协同过滤算法,即回答“将哪个物品推荐给用户B?”。

4.1生成用户对物品的兴趣矩阵

#把上述的goods_matrix转换为(所有用户对所有物品的评分或者喜好程度的)矩阵,即0-1矩阵。
import time
start=time.clock()
for i in range(goods_matrix.index.size):
    for j in range(goods_matrix.columns.size):
        if df2.loc[i,'fullURL']==goods_matrix.columns[j]:
            goods_matrix.iloc[i, j]=1     # 如果此时df2.loc[i,'fullURL']与goods_matrix中第j列标题相等,则取值为1,否则为0
        else:
            goods_matrix.iloc[i, j]=0
end=time.clock()
print(end-start,"seconds process time")

4.2获取训练集和测试集

#随机打乱goods_matrix的顺序,然后挑选10%的用户行为数据作为测试集,剩下的90%的用户行为数据作为训练集。
import random
#random.shuffle(goods_matrix)              # 注意:random.shuffle(list)返回None,也就是说没有返回值,它是直接在原来的数据上进行操作,改变原来数据的顺序。
all_matrix=np.random.permutation(goods_matrix)          # 注意:random.permutation(list)不直接在原来的数据上进行操作,返回一个新的打乱顺序的数据序列,并不改变原来的数据。all_matrix的数据类型是数组。
all_matrix2=DataFrame(all_matrix,index=df2['realIP'][:1000],columns=b[:200])   # 把数组转换为dataframe
#选取训练集和测试集
train=all_matrix2.iloc[:int(len(all_matrix)*0.9),:]
test=all_matrix2.iloc[int(len(all_matrix)*0.9):,:]
train2=train.as_matrix()     #训练集
test2=test.as_matrix()     #测试集

4.3构建协同过滤算法

import numpy as np
def Jaccard(a,b):     # 自定义杰卡德相似系数函数,仅对0-1矩阵有效
    return 1.0*(a*b).sum()/(a+b-a*b).sum()

class Recommender():
    sim = None     # 相似度矩阵
    def similarity(self,x,distance):
        y = np.ones((len(x), len(x)))
        for i in range(len(x)):
            for j in range(len(x)):
                y[i, j] = distance(x[i], x[j])
        return y
    def fit(self,x, distance=Jaccard):     # 训练函数,计算相似度矩阵,其中x为所有用户对所有物品的喜好程度(0-1矩阵)
        self.sim = self.similarity(x, distance)

    def recommend(self,a):     # 推荐函数
        return np.dot(self.sim,a)*(1-a)

4.4 预测测试集上的用户行为

#在训练集上建立模型,在测试集上对用户行为进行预测,统计出相应的测评指标。
train3=train2.T     # 因为上述推荐函数的设置问题,此时要对训练集和测试集进行转置,才能运用此函数。
test3=test2.T
r=Recommender()
sim=r.fit(train3)        # 获得相似度矩阵
result=r.recommend(test3)     # 获得推荐结果
sim2=DataFrame(sim)
result2=DataFrame(result)   # 转换成dataframe,并设置相应的索引和列标题
result2.index=test.columns
result2.columns=test.index

4.5向每个IP推荐K个网址

此处的推荐函数是借鉴了博客点击打开链接的做法。

from pandas import Series
def tuijian_result(K,recomMatrix):
    recomMatrix.fillna(0,inplace=True)
    xietong = ['xietong' + str(K) for K in range(1, K + 1)]
    tuijian = DataFrame([], index=recomMatrix.columns, columns=xietong)
    for i in range(len(recomMatrix.columns)):
        temp = result2.sort_values(by=recomMatrix.columns[i], ascending=False)
        k = 0
        while k < K:
            tuijian.iloc[i, k] = temp.index[k]
            if temp.iloc[k, i] == 0:
                tuijian.iloc[i, k:K] = np.nan
                break
            k = k + 1
        return tuijian

start1=time.clock()
final_result=tuijian_result(3,result2)
end1=time.clock()
至此,本文只给出了一种推荐算法,还可以考虑其他推荐模型,并对比各个模型在不同推荐值的情况下的评价指标值,并计算各个模型下的F1指标。

猜你喜欢

转载自blog.csdn.net/m0_37215794/article/details/80803067