语义分割——Enet模型实现

利用训练好的Enet模型完成,图片和视频的语义分割(适合小白入门)
代码下载
下面附上对于代码的解读

# USAGE
# python segment.py --model enet-cityscapes/enet-model.net --classes enet-cityscapes/enet-classes.txt --colors enet-cityscapes/enet-colors.txt --image images/example_01.png

# import the necessary packages
import numpy as np
import argparse
import imutils
import time
import cv2

# python3 segment.py --model enet-cityscapes/enet-model.net --classes enet-cityscapes/enet-classes.txt --colors enet-cityscapes/enet-colors.txt --image images/example_03.jpg
# 创建参数解析器,从命令行解析参数
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
	help="path to deep learning segmentation model")
ap.add_argument("-c", "--classes", required=True,
	help="path to .txt file containing class labels")
ap.add_argument("-i", "--image", required=True,
	help="path to input image")
ap.add_argument("-l", "--colors", type=str,
	help="path to .txt file containing colors for labels")
ap.add_argument("-w", "--width", type=int, default=500,
	help="desired width (in pixels) of input image")
args = vars(ap.parse_args())#创建完参数后,将他们解析并存储在args变量中

'''#如果不想从命令行输入,直接在解释器中运行
ap = argparse.ArgumentParser()#ap = argparse.ArgumentParser()是用来创建参数解析器的
ap.add_argument("-m", "--model", default='./enet-cityscapes/enet-model.net',
	help="path to deep learning segmentation model")
ap.add_argument("-c", "--classes", default='./enet-cityscapes/enet-classes.txt',
	help="path to .txt file containing class labels")
ap.add_argument("-i", "--image", default='./images/example_03.jpg',
	help="path to input image")
ap.add_argument("-l", "--colors", default='./enet-cityscapes/enet-colors.txt',type=str,
	help="path to .txt file containing colors for labels")
ap.add_argument("-w", "--width", type=int, default=500,
	help="desired width (in pixels) of input image")
args = vars(ap.parse_args())#创建完参数后,将他们解析并存在args 变量中
'''


# (1)载入类标签 load the class label names
CLASSES = open(args["classes"]).read().strip().split("\n")
# (2)载入颜色
#  if a colors file was supplied, load it from disk
if args["colors"]:
	COLORS = open(args["colors"]).read().strip().split("\n")
	COLORS = [np.array(c.split(",")).astype("int") for c in COLORS]
	COLORS = np.array(COLORS, dtype="uint8")
	
# otherwise, we need to randomly generate RGB colors for each class
# label
else:
	# initialize a list of colors to represent each class label in
	# the mask (starting with 'black' for the background/unlabeled
	# regions)
	np.random.seed(42)
	COLORS = np.random.randint(0, 255, size=(len(CLASSES) - 1, 3),
		dtype="uint8")
	COLORS = np.vstack([[0, 0, 0], COLORS]).astype("uint8")

# initialize the legend visualization
legend = np.zeros(((len(CLASSES) * 25) + 25, 300, 3), dtype="uint8")

# loop over the class names + colors
for (i, (className, color)) in enumerate(zip(CLASSES, COLORS)):
	# draw the class name + color on the legend
	color = [int(c) for c in color]
	cv2.putText(legend, className, (5, (i * 25) + 17),
		cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
	cv2.rectangle(legend, (100, (i * 25)), (300, (i * 25) + 25),
		tuple(color), -1)


# 3、加载序列化模型 load our serialized model from disk 
print("[INFO] loading model...")
net = cv2.dnn.readNet(args["model"])

# 4、载入图片 load the input image, resize it, and construct a blob from it,
# but keeping mind mind that the original input image dimensions
# ENet was trained on was 1024x512
image = cv2.imread(args["image"])
image = imutils.resize(image, width=args["width"])
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (1024, 512), 0,#从磁盘中加载图片
	swapRB=True, crop=False)

# 5、往网络中输入调整好的图片,.forward() 输出结果 perform a forward pass using the segmentation model
#注意在语义分割中,.forward()的输出是类别*图片height*图片width 的三维数组
net.setInput(blob)
start = time.time()
output = net.forward()
end = time.time()
print(output)
# show the amount of time inference took
print("[INFO] inference took {:.4f} seconds".format(end - start))

# infer the total number of classes along with the spatial dimensions
# of the mask image via the shape of the output array
(numClasses, height, width) = output.shape[1:4]
print(output.shape)#(1, 20, 512, 1024)
print (type(output))#numpy.ndarray
#6、利用argmax()求最大值索引函数,求出在20个(类别数)特征图上每一个位置上的最大值索引
# our output class ID map will be num_classes x height x width in
# size, so we take the argmax to find the class label with the
# largest probability for each and every (x, y)-coordinate in the
# image
classMap = np.argmax(output[0], axis=0)# np.argmax(output[0], axis=0)取每一个像素点在20个特征图上对应坐标位置上的最大值索引
#所以生成的classmap是和图像大小一样的,类别索引,即每一个像素点都有一个类别索引
print(output[0])
print(output[0].shape)
#7、根据求出的classmap,可以找到相应的颜色,建立一个遮罩mask
# given the class ID map, we can map each of the class IDs to its
# corresponding color
mask = COLORS[classMap]
print(COLORS.shape)
#8 调整大小
# resize the mask and class map such that its dimensions match the
# original size of the input image (we're not using the class map
# here for anything else but this is how you would resize it just in
# case you wanted to extract specific pixels/classes)
mask = cv2.resize(mask, (image.shape[1], image.shape[0]),
	interpolation=cv2.INTER_NEAREST)
classMap = cv2.resize(classMap, (image.shape[1], image.shape[0]),
	interpolation=cv2.INTER_NEAREST)

# perform a weighted combination of the input image with the mask to
# form an output visualization
output = ((0.6 * image) + (0.4 * mask)).astype("uint8")

# show the input and output images
cv2.imshow("Legend", legend)
cv2.imshow("Input", image)
cv2.imshow("Output", output)
cv2.waitKey(0)
发布了46 篇原创文章 · 获赞 9 · 访问量 1万+

猜你喜欢

转载自blog.csdn.net/weixin_43826596/article/details/100511698
今日推荐