if not USE_QUEUE_LOADING:
print ("not use queue loading, just sequential loading...")
train_input = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMG_SIZE[0], IMG_SIZE[1], 1))
train_gt = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMG_SIZE[0], IMG_SIZE[1], 1))
else:
print ("use queue loading")
train_input_single = tf.placeholder(tf.float32, shape=(IMG_SIZE[0], IMG_SIZE[1], 1))#设置变量类型
train_gt_single = tf.placeholder(tf.float32, shape=(IMG_SIZE[0], IMG_SIZE[1], 1))
#定义队列的容量,待传入对象的类型/尺寸
q = tf.FIFOQueue(10000, [tf.float32, tf.float32], [[IMG_SIZE[0], IMG_SIZE[1], 1], [IMG_SIZE[0], IMG_SIZE[1], 1]])#先入先出队列
enqueue_op = q.enqueue([train_input_single, train_gt_single])#一次入10000个块
train_input, train_gt = q.dequeue_many(BATCH_SIZE)#每次出一个批的块
#一定要打乱train_list的顺序
if USE_QUEUE_LOADING:
# create threads
num_thread=20
coord = tf.train.Coordinator() #创建一个线程管理器(协调器)对象。
for i in range(num_thread):
length = len(train_list)//num_thread#每一个线程处理length长度的多个块
t = threading.Thread(target=load_and_enqueue, args=(coord, train_list[i*length:(i+1)*length],enqueue_op, train_input_single, train_gt_single, i, num_thread))
threads.append(t)
t.start()
#读取每一个线程内部的信息
#file_list=train_list[i*length:(i+1)*length]
def load_and_enqueue(coord,file_list,enqueue_op,train_input_single,train_gt_single,i,num_thread=1):
length=len(file_list)
count=0
try:
while not coord.should_stop():
i = count%length
input_image=scipy.io.loadmat(file_list[i][1])('patch').reshape([img_size[0],img_size[1],1])
get_image = scipy.io.loadmat(file_list[i][0])('patch').reshape([img_size[0],img_size[1],1])
sess.run(equeue_op,feed_dict={train_input_single:input_image,train_gt_single:gt_image})
count+=1
expect Exception as e:
print ("stopping...", idx, e)