LightGBM两种使用方式(亲测,lightgbm版本是 2.2.3)

原生形式使用lightgbm(import lightgbm as lgb)#

 

Copy

import lightgbm as lgb from sklearn.metrics import mean_squared_error from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split # 加载数据 iris = load_iris() data = iris.data target = iris.target # 划分训练集和测试集 X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2) print("Train data length:", len(X_train)) print("Test data length:", len(X_test)) # 转换为Dataset数据格式 lgb_train = lgb.Dataset(X_train, y_train) lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train) # 参数 params = { 'task': 'train', 'boosting_type': 'gbdt', # 设置提升类型 'objective': 'regression', # 目标函数 'metric': {'l2', 'auc'}, # 评估函数 'num_leaves': 31, # 叶子节点数 'learning_rate': 0.05, # 学习速率 'feature_fraction': 0.9, # 建树的特征选择比例 'bagging_fraction': 0.8, # 建树的样本采样比例 'bagging_freq': 5, # k 意味着每 k 次迭代执行bagging 'verbose': 1 # <0 显示致命的, =0 显示错误 (警告), >0 显示信息 } # 模型训练 gbm = lgb.train(params, lgb_train, num_boost_round=20, valid_sets=lgb_eval, early_stopping_rounds=5) # 模型保存 gbm.save_model('model.txt') # 模型加载 gbm = lgb.Booster(model_file='model.txt') # 模型预测 y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration) # 模型评估 print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)

Sklearn接口形式使用lightgbm(from lightgbm import LGBMRegressor)#

 

Copy

from lightgbm import LGBMRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import GridSearchCV from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.externals import joblib # 加载数据 iris = load_iris() data = iris.data target = iris.target # 划分训练数据和测试数据 X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=0.2) # 模型训练 gbm = LGBMRegressor(objective='regression', num_leaves=31, learning_rate=0.05, n_estimators=20) gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric='l1', early_stopping_rounds=5) # 模型存储 joblib.dump(gbm, 'loan_model.pkl') # 模型加载 gbm = joblib.load('loan_model.pkl') # 模型预测 y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_) # 模型评估 print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5) # 特征重要度 print('Feature importances:', list(gbm.feature_importances_)) # 网格搜索,参数优化 estimator = LGBMRegressor(num_leaves=31) param_grid = { 'learning_rate': [0.01, 0.1, 1], 'n_estimators': [20, 40] } gbm = GridSearchCV(estimator, param_grid) gbm.fit(X_train, y_train) print('Best parameters found by grid search are:', gbm.best_params_)

作者:chenxiangzhen

出处:https://www.cnblogs.com/chenxiangzhen/p/10894306.html

本站使用「署名 4.0 国际」创作共享协议,转载请在文章明显位置注明作者及出处。

来源:https://www.cnblogs.com/chenxiangzhen/p/10894306.html

发布了44 篇原创文章 · 获赞 130 · 访问量 137万+

猜你喜欢

转载自blog.csdn.net/gb4215287/article/details/105210690