kaggle1 - 决策树

pandas常用操作

  data["name1"]            选择data中名字为: name1中的一列

# 数据预处理

  data.dropna(axis=0)  # 取出掉缺失值 按行

melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'Lattitude', 'Longtitude']
X = melbourne_data[melbourne_features]
X.describe()# 描述一下数据
# 分隔数据

from sklearn.model_selection import train_test_split
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state = 0)  # 随机数有利于重现结果

# 训练数据

  from sklearn.model_selection import train_test_split
  # Specify the model
  iowa_model = DecisionTreeRegressor(random_state=1)

  # Fit iowa_model with the training data.
  iowa_model.fit(train_X,train_y)

# 对预测集数据作出预测

  # Predict with all validation observations
  val_predictions = iowa_model.predict(val_X)

# 计算模型的误差

  from sklearn.metrics import mean_absolute_error
  val_mae = mean_absolute_error(val_predictions,val_y)

  # uncomment following line to see the validation_mae
  print(val_mae)

# 定义一个方法去,尝试不同的参数,来优化你的模型

def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):

model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
model.fit(train_X, train_y)
preds_val = model.predict(val_X)
mae = mean_absolute_error(val_y, preds_val)
return(mae)

# 最后决定好了,你就不需要,数据来做验证了,全部用来训练吧! 

猜你喜欢

转载自www.cnblogs.com/liu247/p/11073910.html