随机森林预测算法的实现

版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/Der_Jiang/article/details/89737580

本文拟采用随机森林实现空气质量的预测。

实现环境:python3.5

所需包:pandas 、numpy、matplotlib、csv等。引入包如下:

import pandas as pd
import numpy as np

import matplotlib.pyplot as plt
import matplotlib
import csv
import random
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split

实现过程:

  1.数据获取。

本文数据来源:天气后报网:http://www.tianqihoubao.com。通过python的requests库爬取2018年绵阳历史空气质量信息,并将数据存入csv文件。

import time
import requests
from bs4 import BeautifulSoup

headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
}
for i in range(1, 13):
    time.sleep(5)
    # 把1转换为01
    # 获取2018年空气质量数据
    url = 'http://www.tianqihoubao.com/aqi/mianyang-2018' + str("%02d" % i) + '.html'  
    response = requests.get(url=url, headers=headers)
    soup = BeautifulSoup(response.text, 'html.parser')
    tr = soup.find_all('tr')
    # 去除标签栏
    for j in tr[1:]:
        td = j.find_all('td')
        Date = td[0].get_text().strip()
        Quality_grade = td[1].get_text().strip()
        AQI = td[2].get_text().strip()
        AQI_rank = td[3].get_text().strip()
        PM = td[4].get_text()
        PM10=td[5].get_text()
        So2=td[6].get_text()
        No2=td[7].get_text()
        Co=td[8].get_text()
        O3=td[9].get_text()
        with open('air_mianyang_2018.csv', 'a+', encoding='utf-8-sig') as f:
            f.write(Date + ',' + Quality_grade + ',' + AQI + ',' + AQI_rank + ',' + PM + ','+PM10+','+So2+','+No2+','+Co+','+O3+'\n')
 

获取数据后,在第一列插入id和表头,数据如下:

  2.数据分析。

通过pandas读取数据,查看数据分布情况。

   data=pd.read_csv('t1.csv',index_col=0,encoding='gbk')
   print("数据相关信息:\n")
   print (data.describe())
   print("DataHead")
   print (data.head())
   print (data.shape)
   index=data.index
   class_names=np.unique(data.iloc[:,1])
   print("Classnames:")
   print (class_names)

  3.划分测试集、训练集,选取特征值。

划分测试集、训练集,并查看训练集和测试集相关信息。选取PM 、PM10、So2、No2、Co、O3作为特征值。

data_train, data_test= train_test_split(data,test_size=0.1, random_state=0)
           print ("训练集统计描述:\n",data_train.describe().round(2))
           print ("验证集统计描述:\n",data_test.describe().round(2))
           print ("训练集信息:\n",data_train.iloc[:,2].value_counts())  
           print ("验证集信息:\n",data_test.iloc[:,2].value_counts())   

X_train=data_train.iloc[:,4:10]#  data_train.iloc[:,0:-2]     
           X_test=data_test.iloc[:,4:10] #data_train.iloc[:,0:-2]
           feature=data_train.iloc[:,4:10].columns
           print (feature)

  4.选取参数,构建预测模型。

#因变量数据AQI
   y_train=data_train.iloc[:,2]
   y_test=data_test.iloc[:,2]

#查看各变量间的相关系数
   data.drop([u'Quality_grade'],axis = 1).corr()
   import seaborn as sns
   sns.set(style="ticks", color_codes=True);
   palette = sns.xkcd_palette(['dark blue', 'dark green', 'gold', 'orange'])
   # 画散点图矩阵
   sns.pairplot(data.drop([u'Quality_grade'],axis = 1), diag_kind = 'kde', plot_kws=dict(alpha = 0.7))
   plt.show()

   ##参数选择
   from sklearn.model_selection import   GridSearchCV 
   from sklearn.model_selection import RandomizedSearchCV
   criterion=['mae','mse'] #决策树属性['gini','entropy']   回归:mae,mse
   n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
   max_features = ['auto', 'sqrt']
   max_depth = [int(x) for x in np.linspace(10, 100, num = 10)]
   max_depth.append(None)
   min_samples_split = [2, 5, 10]
   min_samples_leaf = [1, 2, 4]
   bootstrap = [True, False]
   random_grid = {'criterion':criterion,
               'n_estimators': n_estimators,
               'max_features': max_features,
               'max_depth': max_depth,
               'min_samples_split': min_samples_split,
               'min_samples_leaf': min_samples_leaf,
               'bootstrap': bootstrap}
  # #构建模型 随机森林
  clf= RandomForestRegressor()
  clf_random = RandomizedSearchCV(estimator=clf, param_distributions=random_grid,
                              n_iter = 10,  
                              cv = 3, verbose=2, random_state=42, n_jobs=1)
  #回归
  clf_random.fit(X_train,y_train)
  print (clf_random.best_params_)
 

  5.模型训练、验证和评估。

  rf=RandomForestRegressor(criterion='mse',bootstrap=False,max_features='sqrt', max_depth=20,min_samples_split=10,   n_estimators=1200,min_samples_leaf=2)
  rf.fit(X_train, y_train) 
  y_train_pred=rf.predict(X_train)
  y_test_pred=rf.predict(X_test)
  print(rf.feature_importances_)
  #变量重要性  
  plt.barh(range(len(rf.feature_importances_)), rf.feature_importances_,tick_label = ['PM','PM10','So2','No2','Co','O3'])#,tick_label = class_names
plt.title('The importance of params')
plt.show()

  from sklearn.metrics import mean_squared_error,explained_variance_score,mean_absolute_error,r2_score
  print ("决策树模型评估--训练集:")
  print ('训练r^2:',rf.score(X_train,y_train))
  print ('均方差',mean_squared_error(y_train,y_train_pred))
  print ('绝对差',mean_absolute_error(y_train,y_train_pred))
  print ('解释度',explained_variance_score(y_train,y_train_pred))
  print ("决策树模型评估--验证集:")
  print ('验证r^2:',rf.score(X_test,y_test))
  print ('均方差',mean_squared_error(y_test,y_test_pred))
  print ('绝对差',mean_absolute_error(y_test,y_test_pred))
  print ('解释度',explained_variance_score(y_test,y_test_pred))

  6.预测。

  data_pred=pd.read_csv('predict.csv',index_col=0,encoding='gb2312')
  index=data_pred.index
  print("预测:")
  print(index)
  y_pred=rf.predict(data_pred.values[:,4:10]) 
  #将预测结果保存到文件中
  result_reg=pd.DataFrame(index)
  result_reg['AQI']=y_pred
  result_reg.to_csv('result_reg_city.txt',encoding='gb2312')

参考博客:https://blog.csdn.net/ziyin_2013/article/details/85481574

猜你喜欢

转载自blog.csdn.net/Der_Jiang/article/details/89737580