基于TensorFlow的线性支持向量机

1、导入编程库。

import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets

2、创建会话,加载数据

sess = tf.Session()
iris = datasets.load_iris()

x_vals = np.array([[x[0], x[3]] for x in iris.data])
y_vals = np.array([1 if y==0 else -1 for y in iris.target])

3、分割数据

train_indices = np.random.choice(len(x_vals),round(len(x_vals)*0.8), replace = False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))

x_vals_train = x_vals[train_indices]
y_vals_train = y_vals[train_indices]

x_vals_test = x_vals[test_indices]
y_vals_test = y_vals[test_indices]

4、设置变量等

learning_rate = 0.1
batch_size = 100
x_data = tf.placeholder(shape=[None,2], dtype = tf.float32)
y_target = tf.placeholder(shape =[None,1],dtype = tf.float32)

A = tf.Variable(tf.random_normal(shape=[2,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))

5、声明模型输出

model_output = tf.subtract(tf.matmul(x_data,A),b)

6、声明损失函数

l2_norm = tf.reduce_sum(tf.square(A))
alpha = tf.constant([0.1])
classification_term = tf.reduce_mean(tf.maximum(0.,tf.subtract(1., tf.multiply(model_output,y_target))))


#loss = tf.reduce_mean(tf.truediv(demming_numerator,demming_denominator))
#loss = tf.reduce_mean(tf.square(y_target - model_output))
loss = tf.add(classification_term, tf.multiply(alpha,l2_norm))

7、声明准确度函数和预测函数

prediction = tf.sign(model_output)
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target),tf.float32))

8、声明优化器,加载模型,遍历迭代进行训练

init = tf.global_variables_initializer()
sess.run(init)
my_opt = tf.train.GradientDescentOptimizer(learning_rate)
train_step = my_opt.minimize(loss)

loss_vec = []
train_accuracy = []
test_accuracy = []

for i  in range(500):
    rand_index = np.random.choice(len(x_vals_train),size = batch_size)
    rand_x =x_vals_train[rand_index]
    rand_y = np.transpose([y_vals_train[rand_index]])
    sess.run(train_step,feed_dict={x_data:rand_x,y_target:rand_y})
    temp_loss = sess.run(loss ,feed_dict={x_data:rand_x,y_target:rand_y})
    loss_vec.append(temp_loss)

    train_acc_temp = sess.run(accuracy,feed_dict={x_data:x_vals_train,y_target:np.transpose([y_vals_train])})
    train_accuracy.append(train_acc_temp)

    test_acc_temp = sess.run(accuracy,feed_dict={x_data:x_vals_test,y_target:np.transpose([y_vals_test])})
    test_accuracy.append(test_acc_temp)
[[a1],[a2]] = sess.run(A)
[[b]] = sess.run(b)
slope = -a2/a1
y_intercept = b/a1
x1_vals = [d[1] for d in x_vals]
best_fit = []
for i in x_vals:
    best_fit.append(slope*i + y_intercept)    

setosa_x = [d[1] for i,d in enumerate(x_vals) if y_vals[i] ==1]
setosa_y = [d[0] for i,d in enumerate(x_vals) if y_vals[i] ==1]

not_setosa_x = [d[1] for i,d in enumerate(x_vals) if y_vals[i] ==-1]
not_setosa_y = [d[0] for i,d in enumerate(x_vals) if y_vals[i] ==-1]

9、输出结果

plt.plot(setosa_x,setosa_y, 'o', label = 'I.setosa')
plt.plot(not_setosa_x,not_setosa_y, 'o', label = 'Not.setosa')
plt.plot(x1_vals, best_fit, 'r-', label = 'Linear Separator', linewidth=3)
plt.ylim([0,10])

plt.legend(loc = 'lower right')
plt.title('Sepal Length vs Pedal Width')
plt.xlabel('Pedal Width')
plt.ylabel('Sepal Width')
plt.show()

plt.plot(train_accuracy, 'k-', label= 'Trainning Accuracy')
plt.plot(test_accuracy, 'r-',label='Test Accuracy')
plt.show()

plt.plot(loss_vec, 'k-')
plt.show()

猜你喜欢

转载自blog.csdn.net/moge19/article/details/82635164