CCF&滴滴出行——路况状态时空预测AI算法竞赛baseline方案一(DNN模型)

楼主最近在参加一个比赛:废话不多说,比赛链接如下:
https://www.datafountain.cn/competitions/466
现在分享自己的代码,该方案目前成绩在65/2865

运行环境: python 3.7.9 + tensorflow 2.0.0 + keras 2.3.1
Name:Model_DNN

一:数据预处理+特征工程
本部分代码最后输出一个经过处理的训练集

# Version: python 3.7.9  tensorflow 2.0.0 keras 2.3.1
# Name:Model DNN
import numpy as np
import pandas as pd
import os
import datetime
from tqdm import tqdm
from collections import Counter
import warnings
warnings.filterwarnings("ignore")


def get_info(x):
    return [i.split(":")[-1] for i in x.split(" ")]
def get_speed(x):
    return np.array([i.split(",")[0] for i in x],dtype='float16')
def get_eta(x):
    return np.array([i.split(",")[1] for i in x],dtype="float16")
def get_state(x):
    return np.array([i.split(",")[2] for i in x])
def get_cnt(x):
    return np.array([i.split(",")[3] for i in x],dtype="int16")


def get_feature(input_file_path_his, input_file_path_attr,input_file_path_topo, mode):
    # his
    df = pd.read_csv(input_file_path_his, sep=";", header=None)
    df["link"] = df[0].apply(lambda x: x.split(" ")[0]).astype(int)
    df["label"] = df[0].apply(lambda x: x.split(" ") [1]).astype(int)
    df["current_slice_id"] = df[0].apply(lambda x: x.split(" ")[2]).astype(int)
    df["future_slice_id"] = df[0].apply(lambda x: x.split(" ")[3]).astype(int)
    df["time_diff"] = df["future_slice_id"] - df["current_slice_id"]
    df = df.drop([0], axis=1)

    if mode == "is_train":
        df["label"] = df["label"].map(lambda x: 3 if x >= 3 else x)
        df['label'] -= 1
    else:
        df = df.drop(["label"], axis=1)

    df["current_state_last"] = df[1].apply(lambda x: x.split(" ")[-1].split(":")[-1])
        # 路况速度,eta速度,路况状态,参与路况计算的车辆数
    df["current_speed"] = df["current_state_last"].apply(lambda x: x.split(",")[0])
    df["current_eat_speed"] = df["current_state_last"].apply(lambda x: x.split(",")[1])
    df["current_state"] = df["current_state_last"].apply(lambda x: x.split(",")[2])
    df["current_count"] = df["current_state_last"].apply(lambda x: x.split(",")[3])
    df = df.drop(["current_state_last"], axis=1)
    for i in tqdm(range(1, 6, 1)):
        flag = f"his_{(6-i)*7}"
        df["history_info"] = df[i].apply(get_info)

        # speed
        df["his_speed"] = df["history_info"].apply(get_speed)
        df[f'{flag}_speed_mean'] = df["his_speed"].apply(lambda x: x.mean())

        # eta
        df["his_eta"] = df["history_info"].apply(get_eta)
        df[f"{flag}_eta_mean"] = df["his_eta"].apply(lambda x: x.mean())


        # state
        df["his_state"] = df["history_info"].apply(get_state)
        df[f"{flag}_state_max"] = df["his_state"].apply(lambda x: Counter(x).most_common()[0][0])
        df[f"{flag}_state_min"] = df["his_state"].apply(lambda x: Counter(x).most_common()[-1][0])

        # cnt
        df["his_cnt"] = df["history_info"].apply(get_cnt)
        df[f"{flag}_cnt_mean"] = df["his_cnt"].apply(lambda x: x.mean())
        df = df.drop([i, "history_info", "his_speed", "his_eta", "his_state", "his_cnt"], axis=1)
        # break

    df2 = pd.read_csv(input_file_path_attr, sep='\t',
                       names=['link', 'length', 'direction', 'path_class', 'speed_class',
                              'LaneNum', 'speed_limit', 'level', 'width'], header=None)
    df = df.merge(df2, on='link', how='left')

    if mode =="is_train":
        output_file_path =f"./data/{mode}_{input_file_path_his.split('/')[-1].split('.')[0]}" +".csv"
        df.to_csv(output_file_path,index =False,mode='w', header=True)

    else:
        output_file_path=f"./data/{input_file_path_his.split('/')[-1].split('.')[0]}" +".csv"
        df.to_csv(output_file_path,index = False,mode='w', header=True)
    # print(df.dtypes)

if __name__ =="__main__":
    print(datetime.datetime.now())
    #训练集
    get_feature(input_file_path_his="D:/traffic-fix/input_data/traffic/20190701.txt",\
                input_file_path_attr="D:/traffic-fix/input_data/road_attribute/attr.txt",\
                input_file_path_topo="D:/traffic-fix/input_data/road_topo/topo.txt",mode="is_train")
    #测试集
    get_feature(input_file_path_his="D:/traffic-fix/test_data/20190801_testdata.txt",\
                input_file_path_attr="D:/traffic-fix/input_data/road_attribute/attr.txt",\
                input_file_path_topo="D:/traffic-fix/input_data/road_topo/topo.txt",mode="is_test")
    print(datetime.datetime.now())

二:建立模型+模型评估

# Version: python 3.7.9  tensorflow 2.0.0 keras 2.3.1
# Name:Model DNN
# =======================================================================

import tensorflow as tf
import keras as K
from keras_applications import vgg16
import pandas as pd
import numpy as np
import datetime
from keras import losses
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer

def Dnn_Model(train=None,label=None, test=None, use_features=None,categorical_feats=None, n_class=3):
    input_length=train[use_features].shape[0]
    input_dim=train[use_features].shape[1]
    
    Y= np_utils.to_categorical(train[label],num_classes=3)
    train_x, test_x, train_y, test_y = train_test_split(train[use_features], Y,
                                                        train_size=0.7, test_size=0.3, random_state=0)
    # 2. 定义模型
    init = K.initializers.glorot_uniform(seed=1)
    model = K.models.Sequential()
    model.add(K.layers.Dense(units=5, input_shape=(input_dim,), kernel_initializer=init, activation='relu'))
    model.add(K.layers.Dense(units=6, kernel_initializer=init, activation='relu'))
    model.add(K.layers.Dense(units=n_class, kernel_initializer=init, activation='softmax'))
    # rmsprop可以自定义,也可以使用默认值
    model.compile(loss="categorical_crossentropy",
                  optimizer="rmsprop",
                  metrics=["accuracy"])

    b_size=128
    max_epochs=10
    model.fit(train_x, train_y, batch_size=b_size, epochs=max_epochs, shuffle=True,
              validation_data=(test_x, test_y),verbose=1)
    loss_and_metrics = model.evaluate(test_x, test_y, batch_size=128)
    print(loss_and_metrics)

    predictions = model.predict(test[use_features])

    test["label"] = np.argmax(predictions, axis=1) + 1
    return test[["link", 'current_slice_id', 'future_slice_id', "label"]]



if __name__ =="__main__":
    train = pd.read_csv('./data/is_train_20190701.csv')
    test = pd.read_csv("./data/20190801_testdata.csv")
    # print(train.dtypes)
    # print(train["link_id_length"].unique())
    
    del_feature = ['link','label']
    use_features = [i for i in train.columns if i not in del_feature]
    category = ["direction","pathclass","speedclass","LaneNum","level"]
    print(datetime.datetime.now())
    submit =Dnn_Model(train=train,label="label", test=test, use_features=use_features,
                      categorical_feats=None, n_class=3)
    submit.to_csv('submit.csv', index=False, encoding='utf8')

    print(datetime.datetime.now())

猜你喜欢

转载自blog.csdn.net/yuekangwei/article/details/110201384
今日推荐