运用DBN 自定义图片输入源码(python3 可直接运行,亲测可用)

代码

  1 #urllib is used to download the utils file from deeplearning.net
  2 import urllib.request
  3 response = urllib.request.urlopen('http://deeplearning.net/tutorial/code/utils.py')
  4 content = response.read()
  5 target = open('utils.py', 'wb+')
  6 target.write(content)
  7 target.close()
  8 #Import the math function for calculations
  9 import math
 10 #Tensorflow library. Used to implement machine learning models
 11 import tensorflow as tf
 12 #Numpy contains helpful functions for efficient mathematical calculations
 13 import numpy as np
 14 #Image library for image manipulation
 15 from PIL import Image
 16 #import Image
 17 #Utils file
 18 import os
 19 
 20 
 21 def read_and_decode(filename):
 22     #根据文件名生成一个队列
 23     filename_queue = tf.train.string_input_producer([filename])
 24 
 25     reader = tf.TFRecordReader()
 26     _, serialized_example = reader.read(filename_queue)   #返回文件名和文件
 27     features = tf.parse_single_example(serialized_example,
 28                                        features={
 29                                            'label': tf.FixedLenFeature([], tf.string),
 30                                            'img_raw' : tf.FixedLenFeature([], tf.string),
 31                                        })
 32 
 33     img = tf.decode_raw(features['img_raw'], tf.uint8)
 34     img = tf.reshape(img, [125, 125, 3])
 35     img = tf.cast(img, tf.float32) * (1. / 255) - 0.5
 36 
 37     label = tf.decode_raw(features['label'], tf.uint8)
 38     label = tf.reshape(label, [125, 125, 3])
 39     label = tf.cast(label, tf.float32) * (1. / 255) - 0.5
 40 
 41     return img, label
 42 img, label = read_and_decode("train.tfrecords")
 43 img_batch, label_batch = tf.train.shuffle_batch([img, label],
 44                                                     batch_size=3, capacity=2000,
 45                                                     min_after_dequeue=1000)
 46 print(img)
 47 init = tf.initialize_all_variables()
 48 print(img_batch)
 49 with tf.Session() as sess:
 50     sess.run(init)
 51     threads = tf.train.start_queue_runners(sess=sess)
 52     for i in range(1):
 53         val, lab = sess.run([img_batch, label_batch])
 54         # 我们也可以根据需要对val, l进行处理
 55         #l = to_categorical(l, 12)
 56         val = val.reshape((-1, (125 * 125 * 3)))
 57         lab = lab.reshape((-1, (125 * 125 * 3)))
 58         trX=val
 59         trY=lab
 60 
 61 '''
 62 #导入MNIST数据
 63 
 64 #Getting the MNIST data provided by Tensorflow
 65 old_v = tf.compat.v1.logging.get_verbosity()
 66 tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
 67 from tensorflow.examples.tutorials.mnist import input_data
 68 
 69 #Loading in the mnist data
 70 mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
 71 tf.compat.v1.logging.set_verbosity(old_v)
 72 trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images,\
 73     mnist.test.labels
 74 
 75 
 76 '''
 77 
 78 
 79 
 80 #构建RBM层
 81 
 82 #Class that defines the behavior of the RBM
 83 class RBM(object):
 84 
 85     def __init__(self, input_size, output_size):
 86         #Defining the hyperparameters
 87         self._input_size = input_size #Size of input
 88         self._output_size = output_size #Size of output
 89         self.epochs = 5 #Amount of training iterations
 90         self.learning_rate = 1.0 #The step used in gradient descent
 91         self.batchsize = 100 #The size of how much data will be used for training per sub iteration
 92 
 93         #Initializing weights and biases as matrices full of zeroes
 94         self.w = np.zeros([input_size, output_size], np.float32) #Creates and initializes the weights with 0
 95         self.hb = np.zeros([output_size], np.float32) #Creates and initializes the hidden biases with 0
 96         self.vb = np.zeros([input_size], np.float32) #Creates and initializes the visible biases with 0
 97 
 98 
 99     #Fits the result from the weighted visible layer plus the bias into a sigmoid curve
100     def prob_h_given_v(self, visible, w, hb):
101         #Sigmoid
102         return tf.nn.sigmoid(tf.matmul(visible, w) + hb)
103 
104     #Fits the result from the weighted hidden layer plus the bias into a sigmoid curve
105     def prob_v_given_h(self, hidden, w, vb):
106         return tf.nn.sigmoid(tf.matmul(hidden, tf.transpose(w)) + vb)
107 
108     #Generate the sample probability
109     def sample_prob(self, probs):
110         return tf.nn.relu(tf.sign(probs - tf.random_uniform(tf.shape(probs))))
111 
112     #Training method for the model
113     def train(self, X):
114         #Create the placeholders for our parameters
115         _w = tf.placeholder("float", [self._input_size, self._output_size])
116         _hb = tf.placeholder("float", [self._output_size])
117         _vb = tf.placeholder("float", [self._input_size])
118 
119         prv_w = np.zeros([self._input_size, self._output_size], np.float32) #Creates and initializes the weights with 0
120         prv_hb = np.zeros([self._output_size], np.float32) #Creates and initializes the hidden biases with 0
121         prv_vb = np.zeros([self._input_size], np.float32) #Creates and initializes the visible biases with 0
122 
123         print("_w",_w.shape)
124         print("_hb",_hb.shape)
125         print("_vb",_vb.shape)
126         print("prv_w", prv_w.shape)
127         print("prv_hb", prv_hb.shape)
128         print("prv_vb", prv_vb.shape)
129 
130 
131         cur_w = np.zeros([self._input_size, self._output_size], np.float32)
132         cur_hb = np.zeros([self._output_size], np.float32)
133         cur_vb = np.zeros([self._input_size], np.float32)
134         v0 = tf.placeholder("float", [None, self._input_size])
135 
136         #Initialize with sample probabilities
137         h0 = self.sample_prob(self.prob_h_given_v(v0, _w, _hb))
138         v1 = self.sample_prob(self.prob_v_given_h(h0, _w, _vb))
139         h1 = self.prob_h_given_v(v1, _w, _hb)
140 
141         #Create the Gradients
142         positive_grad = tf.matmul(tf.transpose(v0), h0)
143         negative_grad = tf.matmul(tf.transpose(v1), h1)
144 
145         #Update learning rates for the layers
146         update_w = _w + self.learning_rate *(positive_grad - negative_grad) / tf.to_float(tf.shape(v0)[0])
147         update_vb = _vb +  self.learning_rate * tf.reduce_mean(v0 - v1, 0)
148         update_hb = _hb +  self.learning_rate * tf.reduce_mean(h0 - h1, 0)
149 
150         #Find the error rate
151         err = tf.reduce_mean(tf.square(v0 - v1))
152 
153         #Training loop
154         with tf.Session() as sess:
155             sess.run(tf.initialize_all_variables())
156             #For each epoch
157             for epoch in range(self.epochs):
158                 #For each step/batch
159                 for start, end in zip(range(0, len(X), self.batchsize),range(self.batchsize,len(X), self.batchsize)):
160                     batch = X[start:end]
161 
162                     #Update the rates
163                     cur_w = sess.run(update_w, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
164                     cur_hb = sess.run(update_hb, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
165                     cur_vb = sess.run(update_vb, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
166                     prv_w = cur_w
167                     prv_hb = cur_hb
168                     prv_vb = cur_vb
169                 error=sess.run(err, feed_dict={v0: X, _w: cur_w, _vb: cur_vb, _hb: cur_hb})
170                 print('Epoch: %d' % epoch,'reconstruction error: %f' % error)
171             self.w = prv_w
172             self.hb = prv_hb
173             self.vb = prv_vb
174 
175     #Create expected output for our DBN
176     def rbm_outpt(self, X):
177         input_X = tf.constant(X)
178         _w = tf.constant(self.w)
179         _hb = tf.constant(self.hb)
180         out = tf.nn.sigmoid(tf.matmul(input_X, _w) + _hb)
181         with tf.Session() as sess:
182             sess.run(tf.global_variables_initializer())
183             return sess.run(out)
184 
185 #建立DBN
186 
187 RBM_hidden_sizes = [500, 200 , 50 ] #create 2 layers of RBM with size 400 and 100
188 
189 #Since we are training, set input as training data
190 inpX = trX
191 
192 #Create list to hold our RBMs
193 rbm_list = []
194 
195 #Size of inputs is the number of inputs in the training set
196 print("AAAA")
197 print(inpX.shape)
198 input_size = inpX.shape[1]
199 
200 #For each RBM we want to generate
201 for i, size in enumerate(RBM_hidden_sizes):
202     print('RBM: ',i,' ',input_size,'->', size)
203     rbm_list.append(RBM(input_size, size))
204     input_size = size
205 
206 
207 
208 
209 #神经网络
210 
211 class NN(object):
212 
213     def __init__(self, sizes, X, Y):
214         #Initialize hyperparameters
215         self._sizes = sizes
216         self._X = X
217         self._Y = Y
218         self.w_list = []
219         self.b_list = []
220         self._learning_rate =  1.0
221         self._momentum = 0.0
222         self._epoches = 10
223         self._batchsize = 100
224         input_size = X.shape[1]
225 
226         #initialization loop
227         for size in self._sizes + [Y.shape[1]]:
228             #Define upper limit for the uniform distribution range
229             max_range = 4 * math.sqrt(6. / (input_size + size))
230 
231             #Initialize weights through a random uniform distribution
232             self.w_list.append(
233                 np.random.uniform( -max_range, max_range, [input_size, size]).astype(np.float32))
234 
235             #Initialize bias as zeroes
236             self.b_list.append(np.zeros([size], np.float32))
237             input_size = size
238 
239     #load data from rbm
240     def load_from_rbms(self, dbn_sizes,rbm_list):
241         #Check if expected sizes are correct
242         assert len(dbn_sizes) == len(self._sizes)
243 
244         for i in range(len(self._sizes)):
245             #Check if for each RBN the expected sizes are correct
246             assert dbn_sizes[i] == self._sizes[i]
247 
248         #If everything is correct, bring over the weights and biases
249         for i in range(len(self._sizes)):
250             self.w_list[i] = rbm_list[i].w
251             self.b_list[i] = rbm_list[i].hb
252 
253     #Training method
254     def train(self):
255         #Create placeholders for input, weights, biases, output
256         _a = [None] * (len(self._sizes) + 2)
257         _w = [None] * (len(self._sizes) + 1)
258         _b = [None] * (len(self._sizes) + 1)
259         _a[0] = tf.placeholder("float", [None, self._X.shape[1]])
260         y = tf.placeholder("float", [None, self._Y.shape[1]])
261         print("AAAAAA")
262         print(self._X.shape[1])
263         print(self._Y.shape[1])
264         #Define variables and activation functoin
265         for i in range(len(self._sizes) + 1):
266             _w[i] = tf.Variable(self.w_list[i])
267             _b[i] = tf.Variable(self.b_list[i])
268         for i in range(1, len(self._sizes) + 2):
269             _a[i] = tf.nn.sigmoid(tf.matmul(_a[i - 1], _w[i - 1]) + _b[i - 1])
270 
271         #Define the cost function
272         cost = tf.reduce_mean(tf.square(_a[-1] - y))
273 
274         #Define the training operation (Momentum Optimizer minimizing the Cost function)
275         train_op = tf.train.MomentumOptimizer(
276             self._learning_rate, self._momentum).minimize(cost)
277 
278         #Prediction operation
279         predict_op = tf.argmax(_a[-1], 1)
280 
281         #Training Loop
282         with tf.Session() as sess:
283             #Initialize Variables
284             sess.run(tf.global_variables_initializer())
285 
286             #For each epoch
287             for i in range(self._epoches):
288 
289                 #For each step
290                 for start, end in zip(
291                     range(0, len(self._X), self._batchsize), range(self._batchsize, len(self._X), self._batchsize)):
292 
293                     #Run the training operation on the input data
294                     sess.run(train_op, feed_dict={
295                         _a[0]: self._X[start:end], y: self._Y[start:end]})
296                 for j in range(len(self._sizes) + 1):
297                     #Retrieve weights and biases
298                     self.w_list[j] = sess.run(_w[j])
299                     self.b_list[j] = sess.run(_b[j])
300 
301                 print("Accuracy rating for epoch " + str(i) + ": " + str(np.mean(np.argmax(self._Y, axis=1) ==
302                               sess.run(predict_op, feed_dict={_a[0]: self._X, y: self._Y}))))
303 
304 
305 if __name__ =='__main__':
306     ##训练数据集
307     # For each RBM in our list
308     for rbm in rbm_list:
309         print('New RBM:')
310         # Train a new one
311         rbm.train(inpX)
312         # Return the output layer
313         inpX = rbm.rbm_outpt(inpX)
314 
315     print("正在训练。。。。。。")
316     nNet = NN(RBM_hidden_sizes, trX, trY)
317     nNet.load_from_rbms(RBM_hidden_sizes, rbm_list)
318     nNet.train()

猜你喜欢

转载自www.cnblogs.com/smartisn/p/12445518.html
DBN