基于深度学习的变化检测算法实现

我是研究生期间研究主要研究SAR影像的变化检测,这是一段简单的基于深度学习的变化检测方法,以CNN实现。
后续我会把整个代码的思路写上,由于这个是自己写的代码,如果有哪些地方写的不太好请大家见谅,如果有不懂或者发现了问题欢迎大家进行评论。

import tensorflow as tf
import numpy as np
import time
import cv2 as cv
# 读取影像
path = 'E:/data changing2/'
img1 = cv.imread(path+'1_1997.05.tif')
img2 = cv.imread(path+'1_1997.08.tif')

img1 = cv.copyMakeBorder(img1, 2, 2, 2, 2, cv.BORDER_REFLECT) #给影像填充边界
img2 = cv.copyMakeBorder(img2, 2, 2, 2, 2, cv.BORDER_REFLECT)

img_row = img1.shape[0] # 行
img_columns = img1.shape[1] # 列

img1 = np.asarray(img1, np.float32)
img2 = np.asarray(img2, np.float32)

# 生成差异图
img_change = []
temp = np.longfloat(0)
for i in range(img_row):
for j in range(img_columns):
temp = np.square(img1[i][j][0]-img2[i][j][0]) + np.square(img1[i][j][1]-img2[i][j][1])\
+ np.square(img1[i][j][2]-img2[i][j][2])
temp = np.sqrt(temp)
img_change.append(temp)

# img_change = np.asarray(img_change, np.float32)
# max_ = img_change.max()
# min_ = img_change.min()
max_ = max(img_change)
min_ = min(img_change)
print('max = ', max_, 'min = ', min_)
for i in range(len(img_change)):
img_change[i] = (img_change[i]-min_)/(max_ - min_)*255

# 生成差异图和差异二值图
img_gray = [[0 for col in range(img_columns)] for row in range(img_row)]
img_gray = np.asarray(img_gray, np.float32)

k = 0
for i in range(img_row):
for j in range(img_columns):
img_gray[i][j] = img_change[k]
k += 1

img_gray01 = cv.imread(path + 'FCM.bmp', 0) # 使用一个比较粗的结果来筛选纯净样本
img_gray01 = cv.copyMakeBorder(img_gray01, 2, 2, 2, 2, cv.BORDER_REFLECT)
img_gray = np.asarray(img_gray, np.uint8)
img_gray01 = np.asarray(img_gray01, np.uint8)

# io.imsave(path + 'chayitu.bmp', img_gray)
# io.imsave(path + '2zhitu.bmp', img_gray01)
print("差异图,基础二值图生成完毕")

扫描二维码关注公众号,回复: 6824606 查看本文章


# 1. 给 img_change 排序(只排序(T%*2)以加快程序运行速度) 2. 选出(T% * 2)的样本作为训练数据集并生成相应的图像
# 1

# T 阈值
T = 0.02
T_num = int(len(img_change)*T)
print("changdu:", len(img_change))
print('T:', T_num)
# 将img_change的值赋给img_change_sort(切记不能用 = !!!! 这个会共享数据)
img_change_sort = []
for i in range(len(img_change)):
img_change_sort.append(img_change[i])
label_change = []
for i in range(len(img_change)):
label_change.append(i)

for i in range(T_num):
temp_max = img_change_sort[i]
img_label_1 = i
for j in range(i, len(img_change)):
if img_change_sort[j] > temp_max:
temp_max = img_change_sort[j]
img_label_1 = j
img_change_sort[img_label_1] = img_change_sort[i]
img_change_sort[i] = temp_max

label_change[i] = label_change[img_label_1]
label_change[img_label_1] = i

for i in range(T_num):
temp_min = img_change_sort[len(img_change_sort)-i-1]
img_label_0 = len(img_change_sort) - i - 1
for j in range(len(img_change)-i):
if img_change_sort[j] < temp_min:
temp_min = img_change_sort[j]
img_label_0 = j
img_change_sort[img_label_0] = img_change_sort[len(img_change_sort)-i-1]
img_change_sort[len(img_change_sort)-i-1] = temp_min

label_change[len(img_change_sort)-i-1] = label_change[img_label_0]
label_change[img_label_0] = len(img_change_sort) - i - 1

# 2
data = []
label = []
data1 = []
data0 = []
img_1 = [[0 for col1 in range(img_columns)] for row1 in range(img_row)]
ratio = 0.88
data_1_count = 0
data_0_count = 0
for i in range(img_row):
for j in range(img_columns):
img_1[i][j] = 125
for i in range(T_num):
region = []
row_1 = int(label_change[i]/img_columns)
column_1 = label_change[i] % img_columns
if row_1 < 2 or column_1 < 2 or row_1 >= (img_row-2) or column_1 >= (img_columns - 2):
continue
else:
count = 0
for k in range(row_1 - 2, row_1 + 3):
for l in range(column_1 - 2, column_1 + 3):
if img_gray01[k][l] == 255:
count += 1
if count / 25 >= ratio:
# if count / 49 >= 0.5:
for k in range(row_1 - 2, row_1 + 3):
for l in range(column_1 - 2, column_1 + 3):
for m in range(3):
region.append(img1[k][l][m])
for n in range(3):
region.append(img2[k][l][n])
data1.append(region)
data_1_count += 1
img_1[row_1][column_1] = 255
data1 = np.asarray(data1, np.float32)

for i in range(T_num):
row_0 = int(label_change[len(img_change)-i-1] / img_columns)
column_0 = label_change[len(img_change)-i-1] % img_columns
region = []
if row_0 < 2 or column_0 < 2 or row_0 >= (img_row-2) or column_0 >= (img_columns - 2):
continue
else:
count = 0
for k in range(row_0 - 2, row_0 + 3):
for l in range(column_0 - 2, column_0 + 3):
if img_gray01[k][l] == 0:
count += 1
if count / 25 >= ratio:
# if count / 25 >= 0.6:
for k in range(row_0 - 2, row_0 + 3):
for l in range(column_0 - 2, column_0 + 3):
for m in range(3):
region.append(img1[k][l][m])
for n in range(3):
region.append(img2[k][l][n])
data0.append(region)
# label.append(0)
data_0_count += 1
img_1[row_0][column_0] = 0
img_1 = np.asarray(img_1, np.int64)
cv.imwrite(path + 'xunlianyangben.bmp', img_1)
data0 = np.asarray(data0, np.float32)

c = min(data1.shape[0], data0.shape[0])
for i in range(c):
data.append(data1[i])
label.append(1)
data.append(data0[i])
label.append(0)

data = np.asarray(data, np.float32)
label = np.asarray(label, np.int32)
print(data_1_count, data_0_count)
print('data.shape:', np.shape(data))


# 制作测试样本
data_test = []
label_test = []
for i in range(202):
for j in range(img_columns):
if i < 2 or j < 2 or i >= (img_row-2) or j >= (img_columns - 2):
continue
else:
region = []
for k in range(i - 2, i + 3):
for l in range(j - 2, j + 3):
for m in range(3):
region.append(img1[k][l][m])
for n in range(3):
region.append(img2[k][l][n])
data_test.append(region)
label_test.append(img_gray01[i][j]/255)
data_test = np.asarray(data_test, np.float32)
label_test = np.asarray(label_test, np.int32)

data_test1 = []
label_test1 = []
for i in range(202, img_row):
for j in range(img_columns):
if i < 2 or j < 2 or i >= (img_row-2) or j >= (img_columns - 2):
continue
else:
region = []
for k in range(i - 2, i + 3):
for l in range(j - 2, j + 3):
for m in range(3):
region.append(img1[k][l][m])
for n in range(3):
region.append(img2[k][l][n])
data_test1.append(region)
label_test1.append(img_gray01[i][j]/255)
data_test1 = np.asarray(data_test1, np.float32)
label_test1 = np.asarray(label_test1, np.int32)

print(data_test1.shape)
print(data_test.shape)

# 打乱样本顺序
num_example = data.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
data = data[arr]
label = label[arr]

data = np.reshape(data, [len(data), 5, 5, 6])
data_test = np.reshape(data_test, [len(data_test), 5, 5, 6])
data_test1 = np.reshape(data_test1, [len(data_test1), 5, 5, 6])
# -----------------构建网络----------------------
# 占位符
x = tf.placeholder(tf.float32, shape=[None, 5, 5, 6], name='x')
y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')


# 第一个卷积层(5——>5)
conv1 = tf.layers.conv2d(
inputs=x,
filters=32,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))

# 第二个卷积层(5->5)
conv2 = tf.layers.conv2d(
inputs=conv1,
filters=32,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=1)

re1 = tf.reshape(pool1, [-1, 4 * 4 * 32])

# 全连接层
logits = tf.layers.dense(inputs=re1,
units=2,
activation=tf.nn.softmax,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))


loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits)
train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


# 定义一个函数,按批次取数据
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]


# 训练模型,可将n_epoch设置更大一些

n_epoch = 20
batch_size = 128
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
for epoch in range(n_epoch):
start_time = time.time()

# training
train_loss, train_acc, n_batch = 0, 0, 0
for x_train_a, y_train_a in minibatches(data, label, batch_size, shuffle=True):
_, err, ac = sess.run([train_op, loss, acc], feed_dict={x: x_train_a, y_: y_train_a})
train_loss += err
train_acc += ac
n_batch += 1
print(" train loss: %f" % (train_loss / n_batch))
# print(" train acc: %f" % (train_acc / n_batch))


# 用训练好的模型得出结果
final_img = []
final = sess.run(logits, feed_dict={x: data_test, y_: label_test})
for i in range(len(data_test)):
if final[i][0] >= final[i][1]:
finals = 0
else:
finals = 255
final_img.append(finals)
print(len(final_img))
k = 0

img_final2 = [[125 for i in range(img_columns-4)] for j in range(img_row-4)]
for i in range(200):
for j in range(img_columns-4):
img_final2[i][j] = final_img[k]
k += 1

final_img1 = []
final1 = sess.run(logits, feed_dict={x: data_test1, y_: label_test1})
for i in range(len(data_test1)):
if final1[i][0] >= final1[i][1]:
finals = 0
else:
finals = 255
final_img1.append(finals)
print(len(final_img1))
k = 0
for i in range(img_row-4-200):
for j in range(img_columns-4):
img_final2[i+200][j] = final_img1[k]
k += 1

img_final2 = np.asarray(img_final2, np.uint8)
cv.imwrite(path + 'final/2zhitu_final_not_use_CNN_5_new.bmp', img_final2)
sess.close()
---------------------

猜你喜欢

转载自www.cnblogs.com/liyanyan665/p/11211016.html