2019腾讯广告算法大赛之整理测试数据集以及构造训练集

版权声明:版权归世界上所有无产阶级所有 https://blog.csdn.net/qq_41776781/article/details/89845831

在重构训练样本之前我们首先需要对测试集中样本进行整理,因为训练的样本要和测试样本在维度上(属性列)要保持一致的。首先看一下原始样本的格式:

除了人群定向这一列属性需要根据关键字进行分裂之外,对于其他属性我没有做任何修改,Okay!!!对于人群定向的修改可以分成两种情况,

第一种: 当该条记录中的关键字是(属性列名: 具体值)对于出现的属性列进行保存,没出现的属性列直接设置成-1

第二种: 其中的关键字是all,首先统计一下user_data每一列的所有取值(data.drop_duplicates()),然后将赋值给对应的列

最后测试样本的样式是:

既然测试集已经给出,所以先在的目标就是对之前生成的文件进行拼接操作,最后拼接的结果和测试样本一样即可。数据拼接的步骤如下:

第一: 要将广告操作数据集中的人群定向,修改成和测试集一样的格式

第二: 将静态广告数据和广告操作进行拼接操作,进行pd.merge()

第二: 将生成带有点击量的曝光日志数据和广告数据按照广告ID进行拼接操作。

最后训练集构造的格式如下

对测试集重构之后的代码:

# -*- coding: utf-8 -*-
# @Time    : 2019/5/3 8:53
# @Author  : YYLin
# @Email   : [email protected]
# @File    : Redo_Dataload_Sample_Data.py
import pandas as pd
import sys
import operator
from functools import reduce

Test_Sample_Data = []
Test_Sample_Data_columns = ['ad_id', 'ad_bid', 'num_click', 'Ad_material_size', 'Ad_Industry_Id', 'Commodity_type',
                            'Delivery_time', 'age', 'gender', 'area', 'education', 'device', 'consuptionAbility',
                            'status', 'connectionType', 'behavior']

# 为数据集增加列名称 其中的num_click全部设置成-3
Test_Sample_Data.append(Test_Sample_Data_columns)
int_num_click = -3

# 测试样本中人群定向是all的时候 利用原始的user.data将数据集划分
user_data = pd.read_csv('../Dataset/tencent-dataset-19/dataset-for-user/userFeature_1000.csv')
print("*************user_data************", user_data.info())
User_age = user_data['Age'].drop_duplicates(keep='first', inplace=False)
User_age = list(User_age)
User_age = [str(x) for x in User_age]
all_age = ' '.join(User_age)
# print("all_age的样式是:\n", all_age, type(all_age))

User_Gender = user_data['Gender'].drop_duplicates(keep='first', inplace=False)
User_Gender = list(User_Gender)
User_Gender = [str(x) for x in User_Gender]
all_Gender = ' '.join(User_Gender)

# 因为地域这列属性可以取多值 所以需要对其合并成一维数组之后 然后在执行去重操作
User_Area = user_data['Area']
User_Area = list(User_Area)
for i, temp_line in enumerate(User_Area):
    User_Area[i] = temp_line.strip().split(',')
User_Area = reduce(operator.add, User_Area)
# print("User_Area转化成一维数组之后前20个数据是", User_Area[0:20], type(User_Area))
User_Area_set = list(set(User_Area))
# print("User_Area经过去重之后前20个数据是", User_Area_set[0:20], len(User_Area_set))
User_Area = [str(x) for x in User_Area]
all_Area = ' '.join(User_Area)
print("all_Area的类型是:\n", type(all_Area), type(all_Area[1]), )
# print("在用户文件之中地域的取值为:\n", all_Area[0:10], len(all_Area))

User_Education = user_data['Education'].drop_duplicates(keep='first', inplace=False)
User_Education = list(User_Education)
User_Education = [str(x) for x in User_Education]
all_Education = ' '.join(User_Education)
print("all_Education的类型是:\n", len(all_Education), type(all_Education), type(all_Education[1]))


User_Consuption_Ability = user_data['Consuption_Ability'].drop_duplicates(keep='first', inplace=False)
User_Consuption_Ability = list(User_Consuption_Ability)
User_Consuption_Ability = [str(x) for x in User_Consuption_Ability]
all_Consuption_Ability = ' '.join(User_Consuption_Ability)

User_Device = user_data['Device'].drop_duplicates(keep='first', inplace=False)
User_Device = list(User_Device)
User_Device = [str(x) for x in User_Device]
all_Device = ' '.join(User_Device)


# 对于工作可能是取多值的情况 所以参照地域的取值方式
User_Work_Status = user_data['Work_Status']
User_Work_Status = list(User_Work_Status)
for i, temp_line in enumerate(User_Work_Status):
    if ',' in temp_line:
        # print("temp_line是:", temp_line)
        User_Work_Status[i] = temp_line.strip().split(',')
    else:
        User_Work_Status[i] = list(temp_line)
# print("经过修改操作之后的User_Work_Status是:", User_Work_Status[0:10], type(User_Work_Status))
User_Work_Status = reduce(operator.add, User_Work_Status)
User_Work_Status = list(set(User_Work_Status))
User_Work_Status = [str(x) for x in User_Work_Status]
all_Work_Status = ' '.join(User_Work_Status)
# print("最后User_Work_Status的取值范围是:\n", all_Work_Status)

User_Connection_Type = user_data['Connection_Type'].drop_duplicates(keep='first', inplace=False)
User_Connection_Type = list(User_Connection_Type)
User_Connection_Type = [str(x) for x in User_Connection_Type]
all_Connection_Type = ' '.join(User_Connection_Type)

# 该方法的目的是找到Behavior中所有唯一值,当出现all的时候 将Behavior的值赋值给该条数据,
# 但是发现数据集中Behavior太多 暂时不执行该操作
User_Behavior = user_data['Behavior']
User_Behavior = list(User_Behavior)
print("原始数据集中用户行为的结果为:\n", type(User_Behavior[0:-1]))
# 将二维数组转化成一维数组
for i, temp_line in enumerate(User_Behavior):
    if ',' in temp_line:
        User_Behavior[i] = temp_line.strip().split(',')
    else:
        print("Behavior中异常的数据是:", User_Behavior[i])
        User_Behavior[i] = list(temp_line)
        # del User_Behavior[i]
# User_Behavior.pop(0)
# 首先将数据降维到一维数组 然后去掉list中重复的元素
User_Behavior = reduce(operator.add, User_Behavior)
User_Behavior = list(set(User_Behavior))
Str_User_Behavior = [str(x) for x in User_Behavior]
all_Behavior = ' '.join(Str_User_Behavior)
print("用户数据集中Behavior的取值范围是", len(User_Behavior))
print("用户数据集中所有的属性值已加载完毕!!!!")


# 需要重写测试集中的人群定向
with open('../Dataset/tencent-dataset-19/test_sample.dat', 'r') as f:
    for i, line in enumerate(f):
        # 测试的时候使用的数据
        # if i >= 3:
            # break
            # sys.exit()

        # 原始数据每列属性的含义 修改数据之后每列属性的含义
        # Sample_id ad_id Creation_time Ad_material_size Ad_Industry_Id Commodity_type Commerce_id Account_id
        # Delivery_time Chose_People ad_bid
        # 'ad_id', 'ad_bid', 'num_click', 'Ad_material_size', 'Ad_Industry_Id', 'Commodity_type', 'Delivery_time',

        # 定义一个临时的数组用于缓存数据集 首先加载的属性是直接能够从原始数据中
        save_line = []
        line = line.strip().split('\t')
        # print("line:", line, '\n', 'line[9]:', line[9], type(line))

        save_line.append(line[1])
        save_line.append(line[10])
        save_line.append(int_num_click)
        save_line.append(line[3])
        save_line.append(line[4])
        save_line.append(line[5])

        # 对于属性中存在的多值属性将其中的逗号转化成空格 验证成功
        tmp_line_6 = line[8].strip().split(',')
        line[8] = ' '.join(tmp_line_6)
        save_line.append(line[8])
        # print("最后用于保存的数据的格式:\n", save_line)

        # 对文件中存在的人群定向分离出各个子节点
        tmp_line = line[9].strip().split('|')
        userFeature_dict = {}

        for each in tmp_line:
            each_list = each.split(':')
            userFeature_dict[each_list[0]] = ' '.join(each_list[1:])

        # print(result_of_line_9)
        value_age = ''
        value_gender = ''
        value_area = ''
        value_education = ''
        value_device = ''
        value_consuptionAbility = ''
        value_status = ''
        value_connectionType = ''
        value_behavior = ''

        # 当定向人群是all的时候 因为Behavior的数据比较大, 所以在使用Behavior值使用-2代替
        value_all = 'all'

        if value_all in userFeature_dict.keys():
            value_age = all_age
            value_gender = all_Gender
            value_area = all_Area
            value_education = all_Education
            value_consuptionAbility = all_Consuption_Ability
            value_device = all_Device
            value_status = all_Work_Status
            value_connectionType = all_Connection_Type
            value_behavior = all_Behavior
        else:
            if 'age' in userFeature_dict.keys():
                value_age = userFeature_dict['age']
                # print(userFeature_dict['age'], type(userFeature_dict['age']))
            if 'gender' in userFeature_dict.keys():
                value_gender = userFeature_dict['gender']
                # print(userFeature_dict['gender'], type(userFeature_dict['gender']))
            if 'area' in userFeature_dict.keys():
                value_area = userFeature_dict['area']
                # print(userFeature_dict['area'], type(userFeature_dict['area']))
            if 'education' in userFeature_dict.keys():
                value_education = userFeature_dict['education']
                # print(userFeature_dict['education'], type(userFeature_dict['education']))
            if 'device' in userFeature_dict.keys():
                value_device = userFeature_dict['device']
                # print(userFeature_dict['device'], type(userFeature_dict['device']))
            if 'consuptionAbility' in userFeature_dict.keys():
                value_consuptionAbility = userFeature_dict['consuptionAbility']
                # print(userFeature_dict['consuptionAbility'], type(userFeature_dict['consuptionAbility']))
            if 'status' in userFeature_dict.keys():
                value_status = userFeature_dict['status']
                # print(userFeature_dict['value_status'], type(userFeature_dict['value_status']))
            if 'connectionType' in userFeature_dict.keys():
                value_connectionType = userFeature_dict['connectionType']
            if 'behavior' in userFeature_dict.keys():
                value_behavior = userFeature_dict['behavior']
        # 对于人群定向列属性 指定的属性列是: 'age', 'gender', 'area', 'education',
        # 'device', 'consuptionAbility', 'status', 'connectionType', 'behavior'
        save_line.append(value_age)
        save_line.append(value_gender)
        save_line.append(value_area)
        save_line.append(value_education)
        save_line.append(value_device)
        save_line.append(value_consuptionAbility)
        save_line.append(value_status)
        save_line.append(value_connectionType)
        save_line.append(value_behavior)

        # 保存最后的结果数据集
        Test_Sample_Data.append(save_line)
        if i == 2:
            print("******最后用于保存结果的数据格式是:\n", Test_Sample_Data[3][10], '\n', len(Test_Sample_Data[3][10]))

# 测试成功!!!!!数据集保存正确
user_feature = pd.DataFrame(Test_Sample_Data)
user_feature.to_csv('../Dataset/dataset_for_train/Test_Sample_Data_all.csv', index=False, header=None)



将广告数据集和用户数据集进行拼接操作:

# -*- coding: utf-8 -*-
# @Time    : 2019/5/1 15:55
# @Author  : YYLin
# @Email   : [email protected]
# @File    : Generator_Label_For_Train.py
import pandas as pd
import datetime
import numpy as np
# 生成点击数并且暂时删除测试集中没有的属性
Total_Exposure_Log_Data = pd.read_csv('../Dataset/dataset_for_train/Total_Exposure_Log_Data.csv')
print("原始数据集中的样式是:\n", Total_Exposure_Log_Data.info())
tfa = Total_Exposure_Log_Data.Ad_Request_Time.astype(str).apply(lambda x: datetime.datetime(int(x[:4]),
                                                                          int(x[5:7]),
                                                                          int(x[8:10]),
                                                                          int(x[11:13]),
                                                                          int(x[14:16]),
                                                                          int(x[17:])))

Total_Exposure_Log_Data['tfa_year'] = np.array([x.year for x in tfa])
Total_Exposure_Log_Data['tfa_month'] = np.array([x.month for x in tfa])
Total_Exposure_Log_Data['tfa_day'] = np.array([x.day for x in tfa])
print("增加单独的年月日之后的数据形状是:\n", Total_Exposure_Log_Data.info())

Group_Exposure_Data = Total_Exposure_Log_Data.groupby(['tfa_year', 'tfa_month', 'tfa_day', 'ad_id', 'Ad_bid']).size().reset_index()
Group_Exposure_Data = Group_Exposure_Data.rename(columns={0: 'num_click'})
print("按照年月日 广告id和广告竞价进行分组之后的数据是:\n", Group_Exposure_Data.head(5))

# 将曝光数据按照年月日 广告id和广告竞价删除重复的元素之后进行合并
Total_Exposure_Log_Data_one = Total_Exposure_Log_Data.drop_duplicates(subset=['tfa_year', 'tfa_month', 'tfa_day', 'ad_id', 'Ad_bid'] ,keep="first").reset_index(drop=True)
Clicks_of_Exposure_Data = pd.merge(Total_Exposure_Log_Data_one, Group_Exposure_Data, on=['tfa_year', 'tfa_month', 'tfa_day', 'ad_id', 'Ad_bid'])

# 删除测试集中没有的相关属性 并将结果进行保存
Clicks_of_Exposure_Data.drop('Ad_Request_id', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('Ad_Request_Time', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('user_id', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('Ad_material_size', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('Ad_pctr', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('Ad_quality_ecpm', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('Ad_total_Ecpm', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('tfa_year', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('tfa_month', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('tfa_day', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('Ad_pos_id', axis=1, inplace=True)
Clicks_of_Exposure_Data.drop('Ad_bid', axis=1, inplace=True)

print("广告数据集中需要保存的信息格式是:\n", Clicks_of_Exposure_Data.info())
Clicks_of_Exposure_Data.to_csv('../Dataset/dataset_for_train/Clicks_of_Exposure_Data.csv', index=False)

# 将曝光日志按照ID和静态广告数据进行拼接操作
Ad_Static_Data = pd.read_csv('../Dataset/dataset_for_train/Ad_Static_Feature_Data.csv')
Ad_Static_Data.drop('Commodity_id', axis=1, inplace=True)
Ad_Static_Data.drop('Ad_account_id', axis=1, inplace=True)
Ad_Static_Data.drop('Creation_time', axis=1, inplace=True)
print("*********静态数据集的样式是:\n", Ad_Static_Data.info())
Merce_Ad_Static_and_Exposure_Data = pd.merge(Clicks_of_Exposure_Data, Ad_Static_Data, on=['ad_id'])

# 读取广告操作数据集并拼接数据集
Op_Ad_Data = pd.read_csv('../Dataset/dataset_for_train/Ad_Operation_Data.csv').drop_duplicates(['ad_id'])
Op_Ad_Data.drop('Create_modify_time', axis=1, inplace=True)

Dataset_For_Train = pd.merge(Op_Ad_Data, Merce_Ad_Static_and_Exposure_Data, on=['ad_id'])
print("最后数据集保存的样式是:\n", Dataset_For_Train.info())
Dataset_For_Train.to_csv('../Dataset/dataset_for_train/Dataset_For_Train.csv', index=False)

生成训练样本的代码:

# -*- coding: utf-8 -*-
# @Time    : 2019/5/3 14:33
# @Author  : YYLin
# @Email   : [email protected]
# @File    : Redo_Dataload_For_Train.py
# 测试样本中的格式是
# ad_id ad_bid num_click Ad_material_size Ad_Industry_Id Commodity_type Delivery_time
# age gender area education device consuptionAbility status connectionType behavior
# 首先使用pandas将数据集划分成三个部分 分别是单独属性 人群定向文件 以及最后的投放时间
import pandas as pd
from functools import reduce
import operator
import sys

data_for_train = pd.read_csv('../Dataset/dataset_for_train/dataset_for_train.csv')

data_for_chose_people = data_for_train['Chose_People']
data_for_chose_time = data_for_train['Delivery_time']

data_for_train.drop('Chose_People', axis=1, inplace=True)
data_for_train.drop('Delivery_time', axis=1, inplace=True)
data_for_train = data_for_train[
    ['ad_id', 'ad_bid', 'num_click', 'Ad_material_size', 'Ad_Industry_Id', 'Commodity_type']]

data_for_chose_people.to_csv('../Dataset/data/Chose_people.csv', index=False)
data_for_chose_time.to_csv('../Dataset/data/Chose_time.csv', index=False)
data_for_train.to_csv('../Dataset/data/data_for_train_test.csv', index=False)

time_line = []
count_line_of_time = 1
time_line.append('Delivery_time')

with open('../Dataset/data/Chose_time.csv', 'r') as f:
    for i, line in enumerate(f):
        # 测试代码的时候使用
        # if i >= 2:
            # break
        count_line_of_time = count_line_of_time + 1
        tmp_line = line.strip().split(',')
        line = ' '.join(tmp_line)
        time_line.append(line)
chose_time = pd.DataFrame(time_line)
chose_time.to_csv('../Dataset/data/chose_time.csv', index=False, header=False)

# 删除已经运行结束的变量 节省内存
del chose_time, time_line

people_line = []
count_line_of_people = 1
people_line_columns = ['age', 'gender', 'area', 'education', 'device', 'consuptionAbility', 'status', 'connectionType',
                       'behavior']
people_line.append(people_line_columns)

# 从user_data中提取相关的属性信息
# 测试样本中人群定向是all的时候 利用原始的user.data将数据集划分
user_data = pd.read_csv('../Dataset/tencent-dataset-19/dataset-for-user/userFeature_1000.csv')
print("*************user_data************", user_data.info())
User_age = user_data['Age'].drop_duplicates(keep='first', inplace=False)
User_age = list(User_age)
User_age = [str(x) for x in User_age]
all_age = ' '.join(User_age)
# print("all_age的样式是:\n", all_age, type(all_age))

User_Gender = user_data['Gender'].drop_duplicates(keep='first', inplace=False)
User_Gender = list(User_Gender)
User_Gender = [str(x) for x in User_Gender]
all_Gender = ' '.join(User_Gender)

# 因为地域这列属性可以取多值 所以需要对其合并成一维数组之后 然后在执行去重操作
User_Area = user_data['Area']
User_Area = list(User_Area)
for i, temp_line in enumerate(User_Area):
    User_Area[i] = temp_line.strip().split(',')
User_Area = reduce(operator.add, User_Area)
# print("User_Area转化成一维数组之后前20个数据是", User_Area[0:20], type(User_Area))
User_Area_set = list(set(User_Area))
# print("User_Area经过去重之后前20个数据是", User_Area_set[0:20], len(User_Area_set))
User_Area = [str(x) for x in User_Area]
all_Area = ' '.join(User_Area)
print("all_Area的类型是:\n", type(all_Area), type(all_Area[1]), )
# print("在用户文件之中地域的取值为:\n", all_Area[0:10], len(all_Area))

User_Education = user_data['Education'].drop_duplicates(keep='first', inplace=False)
User_Education = list(User_Education)
User_Education = [str(x) for x in User_Education]
all_Education = ' '.join(User_Education)
print("all_Education的类型是:\n", len(all_Education), type(all_Education), type(all_Education[1]))


User_Consuption_Ability = user_data['Consuption_Ability'].drop_duplicates(keep='first', inplace=False)
User_Consuption_Ability = list(User_Consuption_Ability)
User_Consuption_Ability = [str(x) for x in User_Consuption_Ability]
all_Consuption_Ability = ' '.join(User_Consuption_Ability)

User_Device = user_data['Device'].drop_duplicates(keep='first', inplace=False)
User_Device = list(User_Device)
User_Device = [str(x) for x in User_Device]
all_Device = ' '.join(User_Device)


# 对于工作可能是取多值的情况 所以参照地域的取值方式
User_Work_Status = user_data['Work_Status']
User_Work_Status = list(User_Work_Status)
for i, temp_line in enumerate(User_Work_Status):
    if ',' in temp_line:
        # print("temp_line是:", temp_line)
        User_Work_Status[i] = temp_line.strip().split(',')
    else:
        User_Work_Status[i] = list(temp_line)
# print("经过修改操作之后的User_Work_Status是:", User_Work_Status[0:10], type(User_Work_Status))
User_Work_Status = reduce(operator.add, User_Work_Status)
User_Work_Status = list(set(User_Work_Status))
User_Work_Status = [str(x) for x in User_Work_Status]
all_Work_Status = ' '.join(User_Work_Status)
# print("最后User_Work_Status的取值范围是:\n", all_Work_Status)

User_Connection_Type = user_data['Connection_Type'].drop_duplicates(keep='first', inplace=False)
User_Connection_Type = list(User_Connection_Type)
User_Connection_Type = [str(x) for x in User_Connection_Type]
all_Connection_Type = ' '.join(User_Connection_Type)

# 该方法的目的是找到Behavior中所有唯一值,当出现all的时候 将Behavior的值赋值给该条数据,
# 但是发现数据集中Behavior太多 暂时不执行该操作
User_Behavior = user_data['Behavior']
User_Behavior = list(User_Behavior)
print("原始数据集中用户行为的结果为:\n", type(User_Behavior[0:-1]))
# 将二维数组转化成一维数组
for i, temp_line in enumerate(User_Behavior):
    if ',' in temp_line:
        User_Behavior[i] = temp_line.strip().split(',')
    else:
        # print("Behavior中异常的数据是:", User_Behavior[i])
        User_Behavior[i] = list(temp_line)
        # del User_Behavior[i]

# User_Behavior.pop(0)
# 首先将数据降维到一维数组 然后去掉list中重复的元素
User_Behavior = reduce(operator.add, User_Behavior)
User_Behavior = list(set(User_Behavior))
Str_User_Behavior = [str(x) for x in User_Behavior]
all_Behavior = ' '.join(Str_User_Behavior)
print("用户数据集中Behavior的取值范围是", len(User_Behavior))

with open('../Dataset/data/Chose_people.csv', 'r') as f:
    for i, line in enumerate(f):
        # 测试代码的时候使用
        # if i >= 2:
            # break
        count_line_of_people = count_line_of_people + 1

        if i%10000 == 0:
            print("我已经执行了%d条数据了"%(i))
        # 开始处理人群定向数据集 定向人群属性列的格式是:
        #  'age', 'gender', 'area', 'education', 'device',
        #  'consuptionAbility', 'status', 'connectionType', 'behavior'

        # 对文件中存在的人群定向分离出各个子节点
        tmp_line = line.strip().split('|')
        userFeature_dict = {}

        for each in tmp_line:
            each_list = each.split(':')
            userFeature_dict[each_list[0]] = ' '.join(each_list[1:])

        # print(result_of_line_9)
        value_age = ''
        value_gender = ''
        value_area = ''
        value_education = ''
        value_device = ''
        value_consuptionAbility = ''
        value_status = ''
        value_connectionType = ''
        value_behavior = ''

        # 当定向人群是all的时候 需要特殊处理
        value_all = 'all'
        save_line = []

        if value_all in userFeature_dict.keys():
            value_age = all_age
            value_gender = all_Gender
            value_area = all_Area
            value_education = all_Education
            value_consuptionAbility = all_Consuption_Ability
            value_device = all_Device
            value_status = all_Work_Status
            value_connectionType = all_Connection_Type
            value_behavior = all_Behavior
        else:
            if 'age' in userFeature_dict.keys():
                value_age = userFeature_dict['age']
                # print(userFeature_dict['age'], type(userFeature_dict['age']))
            if 'gender' in userFeature_dict.keys():
                value_gender = userFeature_dict['gender']
                # print(userFeature_dict['gender'], type(userFeature_dict['gender']))
            if 'area' in userFeature_dict.keys():
                value_area = userFeature_dict['area']
                # print(userFeature_dict['area'], type(userFeature_dict['area']))
            if 'education' in userFeature_dict.keys():
                value_education = userFeature_dict['education']
                # print(userFeature_dict['education'], type(userFeature_dict['education']))
            if 'device' in userFeature_dict.keys():
                value_device = userFeature_dict['device']
                # print(userFeature_dict['device'], type(userFeature_dict['device']))
            if 'consuptionAbility' in userFeature_dict.keys():
                value_consuptionAbility = userFeature_dict['consuptionAbility']
                # print(userFeature_dict['consuptionAbility'], type(userFeature_dict['consuptionAbility']))
            if 'status' in userFeature_dict.keys():
                value_status = userFeature_dict['status']
                # print(userFeature_dict['value_status'], type(userFeature_dict['value_status']))
            if 'connectionType' in userFeature_dict.keys():
                value_connectionType = userFeature_dict['connectionType']
            if 'behavior' in userFeature_dict.keys():
                value_behavior = userFeature_dict['behavior']
        # 对于人群定向列属性 指定的属性列是: 'age', 'gender', 'area', 'education',
        # 'device', 'consuptionAbility', 'status', 'connectionType', 'behavior'
        save_line.append(value_age)
        save_line.append(value_gender)
        save_line.append(value_area)
        save_line.append(value_education)
        save_line.append(value_device)
        save_line.append(value_consuptionAbility)
        save_line.append(value_status)
        save_line.append(value_connectionType)
        save_line.append(value_behavior)
        people_line.append(save_line)

if count_line_of_people != count_line_of_time:
    print("数据集中的人群定向和指定时间行数不相等,系统退出")
    sys.exit()

print("***********程序已经加载完毕,正在保存数据***************")
chose_people = pd.DataFrame(people_line)
chose_people.to_csv('../Dataset/data/chose_people.csv', index=False, header=False)
print("已经保存好人群定向的数据,开始将三个数据进行拼接操作")
# 最后将三个已保存的数据进行拼接即可
Test_Sample_Data_time = pd.read_csv('../Dataset/data/chose_time.csv')
Test_Sample_Data_people = pd.read_csv('../Dataset/data/chose_people.csv')
Test_Sample_Data_train = pd.read_csv('../Dataset/data/data_for_train_test.csv')

result = pd.concat([Test_Sample_Data_train, Test_Sample_Data_time, Test_Sample_Data_people], axis=1)
result.to_csv('../Dataset/dataset_for_train/result_for_train_all.csv', index=False)



猜你喜欢

转载自blog.csdn.net/qq_41776781/article/details/89845831