六、猫狗分类

 

http://ai-atest.bj.bcebos.com/cifar-10-python.tar.gz

 

  1 #导入需要的包
  2 import paddle as paddle
  3 import paddle.fluid as fluid
  4 import numpy as np
  5 from PIL import Image
  6 import matplotlib.pyplot as plt
  7 import os
  8 
  9 
 10 BATCH_SIZE = 128
 11 #用于训练的数据提供器
 12 train_reader = paddle.batch(
 13     paddle.reader.shuffle(paddle.dataset.cifar.train10(), 
 14                           buf_size=128*100),           
 15     batch_size=BATCH_SIZE)                                
 16 #用于测试的数据提供器
 17 test_reader = paddle.batch(
 18     paddle.dataset.cifar.test10(),                            
 19     batch_size=BATCH_SIZE) 
 20 
 21 
 22 
 23 def convolutional_neural_network(img):
 24     # 第一个卷积-池化层
 25     conv_pool_1 = fluid.nets.simple_img_conv_pool(
 26         input=img,         # 输入图像
 27         filter_size=5,     # 滤波器的大小
 28         num_filters=20,    # filter 的数量。它与输出的通道相同
 29         pool_size=2,       # 池化核大小2*2
 30         pool_stride=2,     # 池化步长
 31         act="relu")        # 激活类型
 32     conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
 33     # 第二个卷积-池化层
 34     conv_pool_2 = fluid.nets.simple_img_conv_pool(
 35         input=conv_pool_1,
 36         filter_size=5,
 37         num_filters=50,
 38         pool_size=2,
 39         pool_stride=2,
 40         act="relu")
 41     conv_pool_2 = fluid.layers.batch_norm(conv_pool_2)
 42     # 第三个卷积-池化层
 43     conv_pool_3 = fluid.nets.simple_img_conv_pool(
 44         input=conv_pool_2,
 45         filter_size=5,
 46         num_filters=50,
 47         pool_size=2,
 48         pool_stride=2,
 49         act="relu")
 50     # 以softmax为激活函数的全连接输出层,10类数据输出10个数字
 51     prediction = fluid.layers.fc(input=conv_pool_3, size=10, act='softmax')
 52     return prediction
 53 
 54 
 55 
 56 #定义输入数据
 57 data_shape = [3, 32, 32]
 58 images = fluid.layers.data(name='images', shape=data_shape, dtype='float32')
 59 label = fluid.layers.data(name='label', shape=[1], dtype='int64')
 60 
 61 
 62 
 63 # 获取分类器,用cnn进行分类
 64 predict =  convolutional_neural_network(images)
 65 
 66 
 67 
 68 # 获取损失函数和准确率
 69 cost = fluid.layers.cross_entropy(input=predict, label=label) # 交叉熵
 70 avg_cost = fluid.layers.mean(cost)                            # 计算cost中所有元素的平均值
 71 acc = fluid.layers.accuracy(input=predict, label=label)       #使用输入和标签计算准确率
 72 
 73 
 74 
 75 # 获取测试程序
 76 test_program = fluid.default_main_program().clone(for_test=True)
 77 
 78 # 定义优化方法
 79 optimizer =fluid.optimizer.Adam(learning_rate=0.001)
 80 optimizer.minimize(avg_cost)
 81 print("完成")
 82 
 83 
 84 
 85 # 定义使用CPU还是GPU,使用CPU时use_cuda = False,使用GPU时use_cuda = True
 86 use_cuda = False
 87 place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
 88 exe = fluid.Executor(place)              #创建一个Executor实例exe
 89 exe.run(fluid.default_startup_program()) #Executor的run()方法执行startup_program(),进行参数初始化
 90 
 91 
 92 
 93 feeder = fluid.DataFeeder( feed_list=[images, label],place=place)
 94 
 95 
 96 
 97 
 98 all_train_iter=0
 99 all_train_iters=[]
100 all_train_costs=[]
101 all_train_accs=[]
102 
103 def draw_train_process(title,iters,costs,accs,label_cost,lable_acc):
104     plt.title(title, fontsize=24)
105     plt.xlabel("iter", fontsize=20)
106     plt.ylabel("cost/acc", fontsize=20)
107     plt.plot(iters, costs,color='red',label=label_cost) 
108     plt.plot(iters, accs,color='green',label=lable_acc) 
109     plt.legend()
110     plt.grid()
111     plt.show()
112 
113 
114 
115 
116 EPOCH_NUM = 30
117 model_save_dir = "/home/aistudio/work/catdog.inference.model"
118 
119 for pass_id in range(EPOCH_NUM):
120     # 开始训练
121     for batch_id, data in enumerate(train_reader()):                        #遍历train_reader的迭代器,并为数据加上索引batch_id
122         train_cost,train_acc = exe.run(program=fluid.default_main_program(),#运行主程序
123                              feed=feeder.feed(data),                        #喂入一个batch的数据
124                              fetch_list=[avg_cost, acc])                    #fetch均方误差和准确率
125 
126         
127         all_train_iter=all_train_iter+BATCH_SIZE
128         all_train_iters.append(all_train_iter)
129         all_train_costs.append(train_cost[0])
130         all_train_accs.append(train_acc[0])
131         
132         #每100次batch打印一次训练、进行一次测试
133         if batch_id % 100 == 0:                                             
134             print('Pass:%d, Batch:%d, Cost:%0.5f, Accuracy:%0.5f' % 
135             (pass_id, batch_id, train_cost[0], train_acc[0]))
136             
137 
138     # 开始测试
139     test_costs = []                                                         #测试的损失值
140     test_accs = []                                                          #测试的准确率
141     for batch_id, data in enumerate(test_reader()):
142         test_cost, test_acc = exe.run(program=test_program,                 #执行测试程序
143                                       feed=feeder.feed(data),               #喂入数据
144                                       fetch_list=[avg_cost, acc])           #fetch 误差、准确率
145         test_costs.append(test_cost[0])                                     #记录每个batch的误差
146         test_accs.append(test_acc[0])                                       #记录每个batch的准确率
147     
148     # 求测试结果的平均值
149     test_cost = (sum(test_costs) / len(test_costs))                         #计算误差平均值(误差和/误差的个数)
150     test_acc = (sum(test_accs) / len(test_accs))                            #计算准确率平均值( 准确率的和/准确率的个数)
151     print('Test:%d, Cost:%0.5f, ACC:%0.5f' % (pass_id, test_cost, test_acc))
152     
153 #保存模型
154 # 如果保存路径不存在就创建
155 if not os.path.exists(model_save_dir):
156     os.makedirs(model_save_dir)
157 print ('save models to %s' % (model_save_dir))
158 fluid.io.save_inference_model(model_save_dir,
159                               ['images'],
160                               [predict],
161                               exe)
162 print('训练模型保存完成!')
163 draw_train_process("training",all_train_iters,all_train_costs,all_train_accs,"trainning cost","trainning acc")
164 
165 infer_exe = fluid.Executor(place)
166 inference_scope = fluid.core.Scope()
167 
168 
169 
170 def load_image(file):
171         #打开图片
172         im = Image.open(file)
173         #将图片调整为跟训练数据一样的大小  32*32,                   设定ANTIALIAS,即抗锯齿.resize是缩放
174         im = im.resize((32, 32), Image.ANTIALIAS)
175         #建立图片矩阵 类型为float32
176         im = np.array(im).astype(np.float32)
177         #矩阵转置 
178         im = im.transpose((2, 0, 1))                               
179         #将像素值从【0-255】转换为【0-1】
180         im = im / 255.0
181         #print(im)       
182         im = np.expand_dims(im, axis=0)
183         # 保持和之前输入image维度一致
184         print('im_shape的维度:',im.shape)
185         return im
186     
187     
188     
189     
190     
191 with fluid.scope_guard(inference_scope):
192     #从指定目录中加载 推理model(inference model)
193     [inference_program, # 预测用的program
194      feed_target_names, # 是一个str列表,它包含需要在推理 Program 中提供数据的变量的名称。 
195      fetch_targets] = fluid.io.load_inference_model(model_save_dir,#fetch_targets:是一个 Variable 列表,从中我们可以得到推断结果。
196                                                     infer_exe)     #infer_exe: 运行 inference model的 executor
197     
198     infer_path='dog2.jpg'
199     img = Image.open(infer_path)
200     plt.imshow(img)   
201     plt.show()    
202     
203     img = load_image(infer_path)
204 
205     results = infer_exe.run(inference_program,                 #运行预测程序
206                             feed={feed_target_names[0]: img},  #喂入要预测的img
207                             fetch_list=fetch_targets)          #得到推测结果
208     print('results',results)
209     label_list = [
210         "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse",
211         "ship", "truck"
212         ]
213     print("infer results: %s" % label_list[np.argmax(results[0])])

猜你喜欢

转载自www.cnblogs.com/aiqinger/p/13172859.html