充电桩故障检测(xgboost)

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/sunshunli/article/details/80597877
from sklearn.metrics import accuracy_score
from sklearn.model_selection import StratifiedKFold
import xgboost as xgb
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import sklearn.preprocessing as preprocessing

# 导入数据

col_names = ["ID","K1K2驱动信号","电子锁驱动信号","急停信号","门禁信号","THDV-M","THDI-M","label"]
data = pd.read_csv("data_train.csv",names=col_names)

dataset_X = data[["K1K2驱动信号","电子锁驱动信号","急停信号","门禁信号","THDV-M","THDI-M"]]#二维数组的格式
dataset_Y = data[["label"]].as_matrix()
dataset_Y=np.array(dataset_Y).reshape(len(dataset_Y))
data.fillna(0)
#数据预处理
scaler = preprocessing.StandardScaler()
lists = ["K1K2驱动信号", "电子锁驱动信号", "急停信号", "门禁信号", "THDV-M", "THDI-M", ]
for list in lists:
        data[list] = scaler.fit_transform(data[[list]])
#划分数据集
x_train, x_test, y_train, y_test = train_test_split(dataset_X, dataset_Y,test_size=0.2,random_state=1)

xgb_val = xgb.DMatrix(x_test, label=y_test)
xgb_train = xgb.DMatrix(x_train, label=y_train)
xgb_test = xgb.DMatrix(x_test)
#参数
params = {
    'booster': 'gbtree',
    # 'objective': 'multi:softmax',  # 多分类的问题、
    # 'objective': 'multi:softprob',   # 多分类概率
    'objective': 'binary:logistic',
    'eval_metric': 'logloss',
    # 'num_class': 9,  # 类别数,与 multisoftmax 并用
    'gamma': 0.1,  # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。
    'max_depth': 8,  # 构建树的深度,越大越容易过拟合    important
    'alpha': 0,  # L1正则化系数
    'lambda': 11,  # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。
    'subsample': 0.7,  # 随机采样训练样本
    'colsample_bytree': 0.5,  # 生成树时进行的列采样
    # 'min_child_weight': 2,
    'min_child_weight': 3,
    # 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言
    # ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
    # 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。
    'silent': 0,  # 设置成1则没有运行信息输出,最好是设置为0.
    'eta': 0.2,  # 如同学习率
    'seed': 100,
    'nthread': -1,  # cpu 线程数
    'missing': 1,
    'scale_pos_weight': (np.sum(y_train == 0) / np.sum(y_train == 1)),
# 用来处理正负样本不均衡的问题,通常取:sum(negative cases) / sum(positive cases)
#     'eval_metric': 'auc'
}

params['is_unbalance']='true'
params['metric'] = 'auc'

num_rounds = 4000 # 迭代次数
watchlist = [(xgb_train, 'train'), (xgb_val, 'val')]

# 交叉验证
result = xgb.cv(params, xgb_train, num_boost_round=10, nfold=4, early_stopping_rounds=200, verbose_eval=True,
                folds=StratifiedKFold(n_splits=6).split(x_train, y_train))
# early_stopping_rounds 当设置的迭代次数较大时,early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练

# 训练模型并保存

model = xgb.train(params, xgb_train, num_rounds,watchlist)
# model.save_model('../data/model/xgb.model')  # 用于存储训练出的模型

preds = model.predict(xgb_test)
print(preds)

for i in range(len(preds)):
    if preds[i]>0.51:
        preds[i]=1
    else:
        preds[i]=0


y_predict = [int(item) for item in preds]
print(len(y_predict))
acc=accuracy_score(y_predict,y_test)
print(acc)

#
def read_data():
    col_names = ["ID", "K1K2驱动信号", "电子锁驱动信号", "急停信号", "门禁信号", "THDV-M", "THDI-M"]
    data = pd.read_csv("data_test.csv", names=col_names)
       # print([图片]data.info())
    data = data.fillna(0)
    return data[["K1K2驱动信号", "电子锁驱动信号", "急停信号", "门禁信号", "THDV-M", "THDI-M"]],data["ID"]
#
subdata ,Id = read_data()
xgb_test1 = xgb.DMatrix(subdata)
y_predict = model.predict(xgb_test1)
print(y_predict)
for i in range(len(y_predict)):
    if y_predict[i]>0.51:
        y_predict[i]=int(1)
    else:
        y_predict[i]=int(0)


y_predict = [int(item) for item in y_predict]
print(y_predict)
print(subdata)


submission = pd.DataFrame({
        "id":Id,
        "predictrion": y_predict
    })
submission.to_csv("one1.csv",index=None,header=None)



猜你喜欢

转载自blog.csdn.net/sunshunli/article/details/80597877
今日推荐