tf.group,tf.tuple,tf.control_dependencies,randomForest

tf.group

tf.group(
    *inputs,
    **kwargs
)

该函数就是把参数中的操作作为一个组和,把这些操作作为为一个操作
group的参数是一个一个operation,而不是一个list(这就是input前面加 * 的原因,注意一定是一个个的)
return: 返回是一个op

tf.tuple

tf.tuple(
    tensors,
    name=None,
    control_inputs=None
)

参数说明:
tensors: 是一个list
name: (可选) 为这个操作声明一个名字
control_inputs: 在返回之前需要完成的操作
return: 其返回的是tensors里面各个tensor组合的一个list,

tf.group 和 tf.tuple的代码例子

import tensorflow as tf
w = tf.Variable(1)
mul = tf.multiply(w, 2)
add = tf.add(w, 2)
group = tf.group(mul, add) #注意这里是一个一个的哈
tuple = tf.tuple([mul, add])#注意这里是一个list哈
sess=tf.Session()
sess.run(tf.global_variables_initializer())
print(sess.run([mul,add,group,w]))
print(sess.run(tuple))

输出的是:

[2, 3, None, 1]
[2, 3]

tf.identity()

这是一个赋值操作, 注意这是一个op(操作) 。y=tf.identity(x) 效果等价于y=x 但是前者是一个操作,在graph中是一个op 后者在不是

tf.control_dependencies():

tf.control_dependencies()设计是用来控制计算流图的,给图中的某些计算指定顺序。
比如:我们想要获取参数更新后的值,那么我们可以这么组织我们的代码。

说一下:

control_dependencies的意义是: 在执行with包含的操作前,注意一定是操作,否则不woker:(在这里就是 updated_weight = tf.identity(weight) )前,先执行control_dependencies中的操作(在这里就是 opt)

opt = tf.train.Optimizer().minize(loss)   
with tf.control_dependencies([opt]):#注意书写格式
     updated_weight = tf.identity(weight)
with tf.Session() as sess:
	 tf.global_variables_initializer().run()#这种书写格式只能在with里面 因为会默认找到sess
	 sess.run(updated_weight, feed_dict={...}) # 这样每次得到的都是更新后的weight 

with tf.control_dependencies()所包含的不是op时:

x = tf.Variable(0.0)

x_plus_1 = tf.assign_add(x, 1) #返回一个op,表示给变量x加1的操作,这里是一个op 不是返回一个tensor

with tf.control_dependencies([x_plus_1]):
    y = x#这仅仅是一个 赋值,不是op
    
init = tf.global_variables_initializer()
with tf.Session() as session:
    init.run()
    for i in range(5):
        print(y.eval())

输出:

0.0
0.0
0.0
0.0
0.0

with tf.control_dependencies()所包含的是op时:

x = tf.Variable(0.0)

x_plus_1 = tf.assign_add(x, 1) #返回一个op,表示给变量x加1的操作,这里是一个op 不是返回一个tensor

with tf.control_dependencies([x_plus_1]):
    y = x
    op1=tf.group(y)
    
init = tf.global_variables_initializer()
with tf.Session() as session:
    init.run()
    for i in range(5):
        session.run(op1) #这是一个操作,所以会执行x_plus_1将x加1
        print(y.eval())  #因为x已经加1 所以y会获取x加1后的值,注意:执行这句话的时候,x_plus_1还是没执行

输出:

1.0
2.0
3.0
4.0
5.0

tensorflow实现RandomForest

# -*- encoding:utf-8 -*-
import tensorflow as tf
from tensorflow.python.ops import resources
from tensorflow.contrib.tensor_forest.python import tensor_forest
import os

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("../../mnist_data/", one_hot=False)

num_steps=500
batch_size=1024
num_features=784
num_trees=10
num_classes = 10 
max_nodes = 1000

X=tf.placeholder(tf.float32,shape=[None,784])
Y=tf.placeholder(tf.int32,shape=[None])


# Random Forest Parameters 随机树的参数
hparams = tensor_forest.ForestHParams(num_classes=num_classes,
                                   num_features=num_features,
                                   num_trees=num_trees,
                                   max_nodes=max_nodes).fill()

forest_graph = tensor_forest.RandomForestGraphs(hparams)
train_op = forest_graph.training_graph(X,Y)
loss_op=forest_graph.training_loss(X,Y)

infer_op,_,_ = forest_graph.inference_graph(X)
correct_prediction = tf.equal(tf.argmax(infer_op,1),tf.cast(Y,tf.int64))
accuracy_op=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))

init_vars=tf.group(tf.global_variables_initializer(),resources.initialize_resources(resources.shared_resources()))
sess=tf.Session()
sess.run(init_vars)


# Training
for i in range(1, num_steps + 1):
    # Prepare Data
    # Get the next batch of MNIST data (only images are needed, not labels)
    batch_x, batch_y = mnist.train.next_batch(batch_size)
    _, l = sess.run([train_op, loss_op], feed_dict={X: batch_x, Y: batch_y})
    if i % 50 == 0 or i == 1:
        acc = sess.run(accuracy_op, feed_dict={X: batch_x, Y: batch_y})
        print('Step %i, Loss: %f, Acc: %f' % (i, l, acc))

# Test Model
test_x, test_y = mnist.test.images, mnist.test.labels
print("Test Accuracy:", sess.run(accuracy_op, feed_dict={X: test_x, Y: test_y}))

输出:

Step 1, Loss: -1.000000, Acc: 0.431641
Step 50, Loss: -254.800003, Acc: 0.870117
Step 100, Loss: -539.599976, Acc: 0.881836
Step 150, Loss: -829.599976, Acc: 0.911133
Step 200, Loss: -1001.000000, Acc: 0.921875
Step 250, Loss: -1001.000000, Acc: 0.922852
Step 300, Loss: -1001.000000, Acc: 0.928711
Step 350, Loss: -1001.000000, Acc: 0.924805
Step 400, Loss: -1001.000000, Acc: 0.911133
Step 450, Loss: -1001.000000, Acc: 0.900391
Step 500, Loss: -1001.000000, Acc: 0.921875
Test Accuracy: 0.9204

参照博客:
tensorflow学习笔记(四十一):control dependencies
随机森林实现

猜你喜欢

转载自blog.csdn.net/qq_32806793/article/details/83375757
tf
今日推荐