语义分割之SegNet训练预测自己的数据集

具体可参见博客PyQt5实现深度学习平台Demo(三)- Anaconda3配置tensorflow2.3.1及如何转化tensorflow1.x系列代码_jiugeshao的专栏-CSDN博客

深度学习(十九)——FCN, SegNet, DeconvNet, DeepLab, ENet, GCN_antkillerfarm的专栏-CSDN博客

图像分割:SegNet&DeconvNet和代码用例 - 知乎

深度学习(十九)——FCN, SegNet, DeconvNet, DeepLab, ENet, GCN_antkillerfarm的专栏-CSDN博客

下面说明Segnet的代码实现

本博客是在keras-segmentation/utils.py at master · lsh1994/keras-segmentation · GitHub基础上进行的代码实验,做了部分修改

pycharm所构造的项目中的文件结构如下:

data中所用数据集为我之前博客语义分割之FCN_jiugeshao的专栏-CSDN博客中所用的数据集

代码下载下来后,更改下utils.py中的class MaxUpooling2D类为如下:

class MaxUnpooling2D(Layer):
    def __init__(self, size=(2, 2), **kwargs):
        super(MaxUnpooling2D, self).__init__(**kwargs)
        self.size = size

    def call(self, inputs, output_shape=None):
        updates, mask = inputs[0], inputs[1]
        print(updates.shape)
        print(mask.shape)

        with tf.compat.v1.variable_scope(self.name):
            mask = K.cast(mask, 'int32')
            input_shape = tf.shape(updates, out_type='int32')

            if output_shape is None:
                output_shape = (
                    input_shape[0],
                    input_shape[1] * self.size[0],
                    input_shape[2] * self.size[1],
                    input_shape[3])

            print(output_shape)
            ret = tf.scatter_nd(K.expand_dims(K.flatten(mask)),
                                  K.flatten(updates),
                                  [K.prod(output_shape)])
            print(ret.shape)

            input_shape = updates.shape
            out_shape = [-1,
                         input_shape[1] * self.size[0],
                         input_shape[2] * self.size[1],
                         input_shape[3]]
            print((K.reshape(ret, out_shape)).shape)
            print(output_shape)
        return K.reshape(ret, out_shape)


    def get_config(self):
        config = super().get_config().copy()
        config.update({
            'size': self.size
        })
        return config

    def compute_output_shape(self, input_shape):
        mask_shape = input_shape[1]
        return (
                mask_shape[0],
                mask_shape[1]*self.size[0],
                mask_shape[2]*self.size[1],
                mask_shape[3]
                )

按照前面博客数据集,生成的标注图像的名称中含有_gt字符(对比原图名),可以用如下脚本批量去除

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:Icecream.Shao

import os
import sys

filepath= r'data/valannot'
fileList = os.listdir(filepath)
currentpath = os.getcwd()
os.chdir(filepath)

for fileName in fileList:
    result = "_gt.png" in fileName
    if(result):
      os.rename(fileName,fileName.replace("_gt.png",".png"))

os.chdir(currentpath)

针对自己构造的数据集,train.py中修改后的代码如下:

from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard

import LoadBatches
from Models import FCN8, FCN32, SegNet, UNet
from tensorflow.keras import optimizers
import math
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.keras.optimizers import Adam
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)
import random
import cv2
import numpy as np
from skimage import io,data,color

#############################################################################
train_images_path = "data/train/"
train_segs_path = "data/trainannot/"
train_batch_size = 2
n_classes = 2

epochs = 60

input_height = 288
input_width = 512


val_images_path = "data/val/"
val_segs_path = "data/valannot/"
val_batch_size = 2

key = "segnet"


##################################

method = {
    "fcn32": FCN32.FCN32,
    "fcn8": FCN8.FCN8,
    'segnet': SegNet.SegNet,
    'unet': UNet.UNet}

m = method[key](n_classes, input_height=input_height, input_width=input_width)
m.compile(
    loss='categorical_crossentropy',
    optimizer=Adam(lr=0.001),
    metrics=['acc'])


G = LoadBatches.imageSegmentationGenerator(train_images_path,
                                           train_segs_path, train_batch_size, n_classes=n_classes, input_height=input_height, input_width=input_width)

G_test = LoadBatches.imageSegmentationGenerator(val_images_path,
                                                val_segs_path, val_batch_size, n_classes=n_classes, input_height=input_height, input_width=input_width)
checkpoint = ModelCheckpoint(
    filepath="output/%s_model.h5" %
    key,
    monitor='acc',
    mode='auto',
    save_best_only='True')
tensorboard = TensorBoard(log_dir='output/log_%s_model' % key)

m.fit_generator(generator=G,
                steps_per_epoch=100,
                epochs=epochs, callbacks=[checkpoint, tensorboard],
                verbose=2,
                validation_data=G_test,
                validation_steps=10,
                shuffle=True)

predict.py中的代码为:

import LoadBatches
from tensorflow.keras.models import load_model
from Models import FCN32, FCN8, SegNet, UNet
import glob
import cv2
import numpy as np
import random
import random
import cv2
import numpy as np
from skimage import io,data,color
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.keras.optimizers import Adam
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
tf.compat.v1.keras.backend.set_session(sess)

n_classes = 2

key = "segnet"

method = {
    "fcn32": FCN32.FCN32,
    "fcn8": FCN8.FCN8,
    "segnet": SegNet.SegNet,
    'unet': UNet.UNet}

images_path = "data/val/"
segs_path = "data/valannot/"

input_height = 288
input_width = 512

colors = [
    (random.randint(
        0, 255), random.randint(
            0, 255), random.randint(
                0, 255)) for _ in range(n_classes)]

##########################################################################


def label2color(colors, n_classes, seg):
    seg_color = np.zeros((seg.shape[0], seg.shape[1], 3))
    for c in range(n_classes):
        seg_color[:, :, 0] += ((seg == c) *
                               (colors[c][0])).astype('uint8')
        seg_color[:, :, 1] += ((seg == c) *
                               (colors[c][1])).astype('uint8')
        seg_color[:, :, 2] += ((seg == c) *
                               (colors[c][2])).astype('uint8')
    seg_color = seg_color.astype(np.uint8)
    return seg_color


def getcenteroffset(shape, input_height, input_width):
    short_edge = min(shape[:2])
    xx = int((shape[0] - short_edge) / 2)
    yy = int((shape[1] - short_edge) / 2)
    return xx, yy


images = sorted(
    glob.glob(
        images_path +
        "*.bmp") +
    glob.glob(
        images_path +
        "*.png") +
    glob.glob(
        images_path +
        "*.jpeg"))
segmentations = sorted(glob.glob(segs_path + "*.bmp") +
                       glob.glob(segs_path + "*.png") + glob.glob(segs_path + "*.jpeg"))


# m = load_model("output/%s_model.h5" % key)
m = method[key](2, 288, 512)  # 有自定义层时,不能直接加载模型

m.compile(
    loss='categorical_crossentropy',
    optimizer=Adam(lr=0.001),
    metrics=['acc'])


m.load_weights("output/%s_model.h5" % key)

# colors = [
#     (random.randint(
#         0, 255), random.randint(
#         0, 255), random.randint(
#         0, 255)) for _ in range(n_classes)]
#
# im1 = cv2.imread("data/35.jpg")
# im = cv2.resize(im1,(512,512))
# # xx, yy = getcenteroffset(im.shape, input_height, input_width)
# # im = im[xx:xx + input_height, yy:yy + input_width, :]
# im = im[np.newaxis, ...]
# pr = m.predict(im)[0]
# # pr = (pr*255).astype(np.float)
# # pr = pr[:,1].reshape((input_height, input_width))
# # pr[0,0] = 1.0;
# # pr[0,1] = 1.0;
# # pr[1, 0] = 1.0;
# # pr[1, 1] = 1.0;
# pr = pr.astype('float64')
# ptr = pr[:, 1].reshape(512, 512)
# ptr = cv2.resize(ptr,(1280,720))
# io.imshow(ptr)
# io.show()
#
for i, (imgName, segName) in enumerate(zip(images, segmentations)):

    print("%d/%d %s" % (i + 1, len(images), imgName))

    im = cv2.imread(imgName, 1)
    # im=cv2.resize(im,(input_height,input_width))
   # xx, yy = getcenteroffset(im.shape, input_height, input_width)
    #im = im[xx:xx + input_height, yy:yy + input_width, :]

    seg = cv2.imread(segName, 0)
    #seg= cv2.resize(seg,interpolation=cv2.INTER_NEAREST)
   # seg = seg[xx:xx + input_height, yy:yy + input_width]

    pr = m.predict(np.expand_dims(LoadBatches.getImageArr(im), 0))[0]
    pr = pr.reshape((input_height, input_width, n_classes)).argmax(axis=2)

    cv2.imshow("img", im)
    cv2.imshow("seg_predict_res", label2color(colors, n_classes, pr))
    cv2.imshow("seg", label2color(colors, n_classes, seg))

    cv2.waitKey()

检测效果如下:

原图为:

检测效果图:

当时的标注图为:

相关代码及数据集见如下链接:

链接:https://pan.baidu.com/s/1NKYY7IoDCkorxN3T47RWoA 
提取码:ojmg 
复制这段内容后打开百度网盘手机App,操作更方便哦--来自百度网盘超级会员V5的分享

Guess you like

Origin blog.csdn.net/jiugeshao/article/details/114210595