Linear SVM

供个人学习记录,来源于:
https://github.com/machinelearningmindset/TensorFlow-Course#why-use-tensorflow
SVM参考:
https://blog.csdn.net/qq_30534935/article/details/83064552
https://blog.csdn.net/qq_30534935/article/details/83154969
https://blog.csdn.net/qq_30534935/article/details/83182659

import os 
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' 
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
import random
import sys

delta = 1.0
Reg_param = 1.0
C_param = 0.1
batch_size = 32
num_steps = 1000
is_evaluation = True
initial_learning_rate = 0.1

def loss_fn(W,b,x_data,y_target):
  logits = tf.add(tf.matmul(x_data,W),b)   #matmul矩阵乘法 add减法
  norm_term = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W),W)),2)   #transpose矩阵转置  divide除法
  classification_loss = tf.reduce_mean(tf.maximum(0.,tf.subtract(delta,tf.multiply(logits,y_target)))) #maxinum最大值  multiply数值乘法
  total_loss = tf.add(tf.multiply(C_param,classification_loss),tf.multiply(Reg_param,norm_term)) 
  return total_loss

def inference_fn(W,b,x_data,y_target):
  prediction = tf.sign(tf.add(tf.matmul(x_data,W),b))  #sign判断正负
  accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction,y_target),tf.float32))  #cast数据格式转换
  return accuracy

def next_batch_fn(x_train,y_train,num_samples=batch_size):
  index = np.random.choice(len(x_train),size=num_samples)  #random.choice随机选取
  X_batch = x_train[index]
  y_batch = np.transpose([y_train[index]])
  return X_batch,y_batch

iris = datasets.load_iris()  #下载iris数据集
X = iris.data[:,:2]   #提取前两列参数
y = np.array([1 if label==0 else -1 for label in iris.target])

my_randoms = np.random.choice(X.shape[0],X.shape[0],replace=False)
train_indices = my_randoms[0:int(0.5*X.shape[0])]
test_indices = my_randoms[int(0.5*X.shape[0]):]

x_train = X[train_indices]
y_train = y[train_indices]
x_test = X[test_indices]
y_test = y[test_indices]

x_data = tf.placeholder(shape=[None,X.shape[1]],dtype=tf.float32)
y_target = tf.placeholder(shape=[None,1],dtype=tf.float32)
W = tf.Variable(tf.random_normal(shape=[X.shape[1],1]))
bias = tf.Variable(tf.random_normal(shape=[1,1]))

total_loss = loss_fn(W,bias,x_data,y_target)
accuracy = inference_fn(W,bias,x_data,y_target)
train_op = tf.train.GradientDescentOptimizer(initial_learning_rate).minimize(total_loss)

sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)

for step_idx in range(num_steps):
  X_batch,y_batch = next_batch_fn(x_train,y_train,num_samples=batch_size)
  sess.run(train_op,feed_dict={x_data:X_batch,y_target:y_batch})
  loss_step = sess.run(total_loss,feed_dict={x_data:X_batch,y_target:y_batch})
  train_acc_step = sess.run(accuracy,feed_dict={x_data:x_train,y_target:np.transpose([y_train])})
  test_acc_step = sess.run(accuracy,feed_dict={x_data:x_test,y_target:np.transpose([y_test])})
  if (step_idx+1)%100==0:
    print('step #%d,training accuracy=%% %.2f,testing accuracy=%% %.2f' %(step_idx+1,float(100*train_acc_step),float(100*test_acc_step)))  #%%输出%

if is_evaluation:
  [[w1],[w2]] = sess.run(W)
  print(sess.run(W))
  [[b]] = sess.run(bias)
  x_line = [data[1] for data in X]
  line = []
  line = [-w2/w1*i-b/w1 for i in x_line]  #[w1,w2]*[data[0],data[1]]T+b=0
  for index,data in enumerate(X):  #enumerate列举
    if y[index] == 1:
      positive_X = data[1]
      positive_y = data[0]
    elif y[index] == -1:
      negative_X = data[1]
      negative_y = data[0]
    else:
      sys.exit("Invalid label!")
  plt.plot(positive_X,positive_y,'+',label='Positive')
  plt.plot(negative_X,negative_y,'o',label='Negative')
  plt.plot(x_line,line,'r-',label='Separator',linewidth=3)
  plt.legend(loc='best')
  plt.title('Linear SVM')
  plt.show()

猜你喜欢

转载自blog.csdn.net/qq_30534935/article/details/89060638
svm
今日推荐