TensorFlow (七): Estimator

简介

之前用 TensorFlow 写的神经网络都比较裸, 这次介绍的 Estimator 则是对神经网络的一种封装.

代码

import tensorflow as tf 
import numpy as np 
import tensorflow.contrib.slim as slim 
import mnist_loader as ml 
from tensorflow.estimator import DNNClassifier
from tensorflow.estimator.inputs import numpy_input_fn


x_train, y_train = ml.load_mnist("./MNIST_DATA", "train")
x_test, y_test = ml.load_mnist("./MNIST_DATA", "t10k")

x_train = x_train / 255.0
x_test = x_test / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)

train_input_fn = numpy_input_fn(
				 				x = {"x": x_train}, 
				 				y = y_train,
				 				shuffle = True,
				 				num_epochs = None)

test_input_fn = numpy_input_fn(
							    x = {"x": x_test},
							    y = y_test,
							    shuffle = False)

pred_input_fn = numpy_input_fn(
								x = {"x": x_test[:100]},
								shuffle = False)

feature_x = tf.feature_column.numeric_column("x", shape = [784])
feature_columns = [feature_x]
num_hidden_units = [64, 32]

model = DNNClassifier(
					  feature_columns = feature_columns,
					  hidden_units = num_hidden_units,
					  activation_fn = tf.nn.relu,
					  n_classes = 10)

model.train(input_fn = train_input_fn, steps = 2000)
result = model.evaluate(input_fn = test_input_fn)
print(result)
predictions = model.predict(input_fn = pred_input_fn)
cls = [p["classes"] for p in predictions]
cls = np.array(cls, dtype = np.int32).squeeze()
accuracy = np.mean(np.equal(cls, y_test[:100]).astype(np.int32))
print(accuracy)

可以看到其中一个变化是, 所有的输入都变成了一个 input_function, 这样做的好处是将原本的数组形式变成了一种类似于迭代器的形式, 从而为下一篇要讲到的 TFRecorder 提供了编程基础.
还有一个变化是, 神经网络被封装了, 在这个代码中神经网络使用的是官方的API, 在下文将介绍如何使用自己的逻辑构建神经网络.

自定义 Model

import tensorflow as tf 
import numpy as np 
import tensorflow.contrib.slim as slim 
import mnist_loader as ml 
from tensorflow.estimator.inputs import numpy_input_fn

def model_fn(features, labels, mode, params = None):
	x = features["x"]

	net = tf.reshape(x, [-1, 28, 28, 1])
	net = slim.conv2d(net, 16, [3, 3])
	net = slim.flatten(net)
	net = slim.fully_connected(net, 32)
	logits = slim.fully_connected(net, 10, activation_fn = None)
	y = tf.nn.softmax(logits)
	y_cls = tf.argmax(y, axis = 1)

	if mode == tf.estimator.ModeKeys.PREDICT:
		return tf.estimator.EstimatorSpec(mode = mode, predictions = y_cls)
	else:
		loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = labels))
		optimizer = tf.train.AdamOptimizer(0.1)
		train_op = optimizer.minimize(loss, global_step = tf.train.get_global_step())
		metrics = {
			"accuracy": tf.metrics.accuracy(y_cls, labels)
		}
		spec = tf.estimator.EstimatorSpec(
											mode = mode,
											loss = loss,
											train_op = train_op,
											eval_metric_ops = metrics)
		return spec

x_train, y_train = ml.load_mnist("./MNIST_DATA", "train")
x_test, y_test = ml.load_mnist("./MNIST_DATA", "t10k")

x_train = x_train / 255.0
x_test = x_test / 255.0
y_train = y_train.astype(np.int32)
y_test = y_test.astype(np.int32)

train_input_fn = numpy_input_fn(
				 				x = {"x": x_train}, 
				 				y = y_train,
				 				shuffle = True,
				 				num_epochs = None)

test_input_fn = numpy_input_fn(
							    x = {"x": x_test},
							    y = y_test,
							    shuffle = False)

pred_input_fn = numpy_input_fn(
								x = {"x": x_test[:100]},
								shuffle = False)

model = tf.estimator.Estimator(model_fn = model_fn)

model.train(input_fn = train_input_fn, steps = 100)
result = model.evaluate(input_fn = test_input_fn)
print(result)
predictions = model.predict(input_fn = pred_input_fn)
# cls = [p["classes"] for p in predictions]
cls = np.array(list(predictions), dtype = np.int32).squeeze()
accuracy = np.mean(np.equal(cls, y_test[:100]).astype(np.int32))
print(accuracy)

猜你喜欢

转载自blog.csdn.net/vinceee__/article/details/88410921