TensorFlow (三):逻辑回归

逻辑回归

代码

import tensorflow as tf 
import numpy as np 
import matplotlib.pyplot as plt 

# generate data

data_size = 500
x_data = np.random.rand(data_size, 2)
noise = np.random.rand(data_size)
noise = (noise - np.mean(noise)) * 0.5
y_data = ((1 - x_data[:, 1]) > x_data[:, 0]).astype(np.float32)

boundary = int(0.7 * data_size)
x_train = x_data[:boundary]
y_train = y_data[:boundary]
x_test = x_data[boundary:]
y_test = y_data[boundary:]

x_true = x_data[np.where(y_data)]
x_false = x_data[np.where(y_data == 0)]

# modeling

def sigmoid(x):
	return 1.0 / (1 + tf.exp(-x))

def accuracy(threshold = 0.5):
	return tf.reduce_mean(tf.cast(tf.equal(tf.cast(y > threshold, tf.float32), y_[:, None]), tf.float32))

my_w = 0
my_b = 0
learning_rate = 1.0
nb_epoch = 100
with tf.Graph().as_default():
	x = tf.placeholder(dtype = tf.float32, shape = [None, 2])
	w = tf.Variable(tf.random_uniform([2, 1]), dtype = tf.float32)
	b = tf.Variable(tf.random_uniform([]), dtype = tf.float32)
	y = sigmoid(tf.matmul(x, w) + b)
	y_ = tf.placeholder(dtype = tf.float32, shape = [None])

	cost = -tf.reduce_mean(y_[:, None] * tf.log(y) + (1 - y_[:, None]) * tf.log(1 - y))
	train = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

	train_cost_history = []
	train_acc_history = []
	test_cost_history = []
	test_acc_history = []
	accs = []
	with tf.Session() as sess:
		sess.run(tf.global_variables_initializer())
		for i in range(nb_epoch):
			# print("epoch %d" % i)
			_, c, acc = sess.run([train, cost, accuracy()], feed_dict={x:x_train, y_:y_train})
			train_cost_history.append(c)
			train_acc_history.append(acc)
			c, acc = sess.run([cost, accuracy()], feed_dict={x:x_test, y_:y_test})
			test_cost_history.append(c)
			test_acc_history.append(acc)
		my_w = w.eval()
		my_b = b.eval()


# visualize

plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.title('Data')
plt.scatter(x_true[:, 0], x_true[:, 1], 15.0, 'b', marker = 'x')
plt.scatter(x_false[:, 0], x_false[:, 1], 15.0, 'g', marker = 'o')

k = -my_w[0, 0] / my_w[1, 0]
b = -my_b / my_w[1, 0]
plt.plot(np.linspace(0, 1, 20), np.linspace(0, 1, 20) * k + b, 'r--')

plt.subplot(1, 2, 2)
plt.title('Loss')
plt.plot(train_cost_history, '--')
plt.plot(train_acc_history, '-')
plt.plot(test_cost_history, '--')
plt.plot(test_acc_history, '-')
plt.legend(['train cost', 'train acc', 'test cost', 'test acc'])

plt.show()

结果

在这里插入图片描述

猜你喜欢

转载自blog.csdn.net/vinceee__/article/details/88113724
今日推荐