RandomForest base model of tensorflow (Random Forest) algorithm

Random Forest algorithm principle refer to Part I: Random Forests. Data is still MNIST data set.

code show as below:

 

__future__ Import print_function from # Ignore All GPUs, TF Random Forest does not Benefit from IT. Import os Import tensorflow AS TF from tensorflow.contrib.tensor_forest.python Import tensor_forest from tensorflow.python.ops Import Resources os.environ [ " CUDA_VISIBLE_DEVICES "] =" " # MNIST import data from tensorflow.examples.tutorials.mnist import Input_Data MNIST = input_data.read_data_sets (" ./ tmp / data / ", one_hot = False) # parameters num_steps = 500 # Total steps Train to batch_size 1024 # = number of samples per batch num_classes = 10 # 10 digits => 10 classification num_features = 784 # per picture 28x28 pixels =>784 wherein num_trees = 10





















= 1000 max_nodes # input data X-tf.placeholder = (tf.float32, Shape = [None, NUM_FEATURES]) # numerical labels in random forest (class ID) the Y = tf.placeholder (tf.int32, Shape = [None]) # random forest parameter hparams = tensor_forest.ForestHParams (num_classes = num_classes,                                     NUM_FEATURES = NUM_FEATURES,                                     num_trees = num_trees,                                     max_nodes = max_nodes) .fill () # establish random forest forest_graph = tensor_forest.RandomForestGraphs (hparams) # Get FIG training, calculated loss rate train_op forest_graph.training_graph = (X-, the Y) loss_op forest_graph.training_loss = (X-, the Y) # calculation accuracy



















infer_op, _, _ = forest_graph.inference_graph (X-)
correct_prediction = tf.equal (tf.argmax (infer_op,. 1), tf.cast (the Y, tf.int64))
accuracy_op = tf.reduce_mean (tf.cast (correct_prediction, tf.float32)) # initialize variables and forest resources init_vars = tf.group (tf.global_variables_initializer (),                     resources.initialize_resources (resources.shared_resources ())) # start TensorFlow session sess = tf.Session () # initialize sess.run (init_vars) # training for i in the Range (1, num_steps + 1):   # prepare data for   # acquiring a number of picture data   batch_x, batch_y = mnist.train.next_batch (batch_size)   _, L = sess.run ( [train_op, loss_op], feed_dict = {X: batch_x, Y: batch_y})

















  if i % 50 == 0 or i == 1:
      acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})
      print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))

# 测试模型
test_x, test_y = mnist.test.images, mnist.test.labels
print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))
---------------------

Guess you like

Origin www.cnblogs.com/hyhy904/p/11182994.html