kaggle1 - Decision Tree

pandas common operations

  data [ "name1"] is selected in data name: one of name1

# Data preprocessing

  data.dropna (axis = 0) # missing values ​​taken off line

melbourne_features = ['Rooms', 'Bathroom', 'Landsize', 'Lattitude', 'Longtitude']
X = melbourne_data[melbourne_features]
The X- . DESCRIBE () # describe data
# Partition data

from sklearn.model_selection Import train_test_split
train_X , val_X , train_y , val_y = train_test_split ( X- , Y , random_state = 0 ) # conducive to replicate the results of random numbers

# training data

  from sklearn.model_selection import train_test_split
  # Specify the model
  iowa_model = DecisionTreeRegressor(random_state=1)

  # Fit iowa_model with the training data.
  iowa_model.fit(train_X,train_y)

# Make a forecast for the prospective collection of data

  # Predict with all validation observations
  val_predictions = iowa_model.predict(val_X)

Error calculation model #

  from sklearn.metrics import mean_absolute_error
  val_mae = mean_absolute_error(val_predictions,val_y)

  # uncomment following line to see the validation_mae
  print(val_mae)

# Define a method to try different parameters to optimize your model

def get_mae(max_leaf_nodes, train_X, val_X, train_y, val_y):

model = DecisionTreeRegressor(max_leaf_nodes=max_leaf_nodes, random_state=0)
model.fit(train_X, train_y)
preds_val = model.predict(val_X)
mae = mean_absolute_error(val_y, preds_val)
return(mae)

Well # final decision, you do not need, do data validation, and training for all of it! 

Guess you like

Origin www.cnblogs.com/liu247/p/11073910.html