import tensorflow as tf import numpy as np import matplotlib.pyplot as plt BATCH_SIZE = 30 #定义一次喂入的数据为30个 seed = 2 #################1.建立数据集,并画出可视化的散点##################### #基于seed产生随机数 rng = np.random.RandomState(seed) X = rng.randn(300,2) #300行2列。均值为0,方差为1的正态分布数据。 Y_=[int(x0*x0+x1*x1 < 2) for (x0,x1) in X] #若平方和<2,则标注为1,赋给Y_;否则标注为0,赋给Y_ Y_c =[['red' if y else 'blue']for y in Y_] #遍历Y_,如果不为0,标注为'red';为0标注为'blue' X = np.vstack(X).reshape(-1,2) #让X是n行两列。 Y_ = np.vstack(Y_).reshape(-1,1) #让Y_是n行一列。 print(X) print(Y_) print(Y_c) plt.scatter(X[:,0],X[:,1],c=np.squeeze(Y_c)) #从Y_c中取对应颜色赋给c plt.show() ##################2. 定义神经网络的输入输出参数,定义前向传播过程######## def get_weight(shape,regularizer): #为了方便,定义了一个生成参数w的函数。参数为shape和正则化权重。 w=tf.Variable(tf.random_normal(shape),dtype=tf.float32) tf.add_to_collection('losses',tf.contrib.layers.l2_regularizer(regularizer)(w)) return w def get_bias(shape): b=tf.Variable(tf.constant(0.01,shape=shape)) return b x=tf.placeholder(tf.float32,shape=(None,2)) y_=tf.placeholder(tf.float32,shape=(None,1)) w1=get_weight([2,11],0.01) #w1为2行11列。注意shape是列表的形式给出的。正则化权重为0.01 b1=get_bias([11]) y1=tf.nn.relu(tf.matmul(x,w1)+b1) w2=get_weight([11,1],0.01) b2=get_bias([1]) y=tf.matmul(y1,w2)+b2 #输出层不过激活函数 ##################3. 定义损失函数############# #定义损失函数 loss_mse = tf.reduce_mean(tf.square(y-y_)) #均方误差损失函数 loss_total = loss_mse + tf.add_n(tf.get_collection('losses')) #均方误差损失函数,加上每一个正则化w的损失 #################4.1 定义反向传播过程:不含正则化。######################## train_step = tf.train.AdamOptimizer(0.0001).minimize(loss_mse) #这个损失函数不包括正则化 with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) STEPS = 40000 for i in range(STEPS): start = (i*BATCH_SIZE) % 300 end = start + BATCH_SIZE sess.run(train_step,feed_dict={x:X[start:end],y_:Y_[start:end]}) if i%2000 ==0: loss_mse_v = sess.run(loss_mse,feed_dict={x:X,y_:Y_}) #每2000轮打印一下loss值 print("After %d steps,loss is: %f" %(i,loss_mse_v)) xx,yy = np.mgrid[-3:3:0.01,-3:3:0.01] #生成网格坐标点 grid = np.c_[xx.ravel(),yy.ravel()] #将xx,yy拉直,合并成一个2列的矩阵,得到一个网格坐标点的集合 probs = sess.run(y,feed_dict={x:grid}) #将网格坐标点喂入神经网络,probs为输出 probs = probs.reshape(xx.shape) #probs的shape调整成xx的样子 print("w1:",sess.run(w1)) print("w2:",sess.run(b1)) print("w3:",sess.run(w2)) print("w4:",sess.run(b2)) plt.scatter(X[:,0],X[:,1],c=np.squeeze(Y_c)) plt.contour(xx,yy,probs,levels=[.5]) #给所有值为0.5的点上色 plt.show() #####################4.2 定义反向传播方法:包含正则化################### train_step = tf.train.AdamOptimizer(0.0001).minimize(loss_total) #包含正则化 with tf.Session() as sess: init_op = tf.global_variables_initializer() sess.run(init_op) STEPS=40000 for i in range(STEPS): start = (i*BATCH_SIZE)%300 end= start + BATCH_SIZE sess.run(train_step,feed_dict={x:X[start:end],y_:Y_[start:end]}) if i% 2000 ==0: loss_v = sess.run(loss_total,feed_dict={x:X,y_:Y_}) print("after %d steps,loss is %f" % (i,loss_v)) xx,yy=np.mgrid[-3:3:0.01,-3:3:0.01] grid = np.c_[xx.ravel(),yy.ravel()] probs = sess.run(y,feed_dict={x:grid}) probs = probs.reshape(xx.shape) print("w1:",sess.run(w1)) print("w2:",sess.run(b1)) print("w3:",sess.run(w2)) print("w4:",sess.run(b2)) plt.scatter(X[:,0],X[:,1],c=np.squeeze(Y_c)) plt.contour(xx,yy,probs,levels=[.5]) #给所有值为0.5的点上色 plt.show() |