Tensorflow实现多GPU并行

Tebsorflow开源实现多GPU训练cifar10数据集:cifar10_multi_gpu_train.py

Tensorflow开源实现cifar10神经网络:cifar10.py
Tensorflow中的并行分为模型并行和数据并行。模型并行需要根据不同模型设计不同的并行方式,其主要原理是将模型中不同计算节点放在不同硬件资源上运算。比较通用且能简便地实现大规模并行的方式是数据并行,同时使用多个硬件资源来计算不同batch的数据梯度,然后汇总梯度进行全局更新。
数据并行几乎适用于所有深度学习模型,总是可以利用多块GPU同时训练多个batch数据,运行在每块GPU上的模型都基于同一个神经网络,网络结构一样,并且共享模型参数。

import os
import re
import time
import numpy as np
import tensorflow as tf
import cifar10_input
import cifar10

batch_size = 128
max_steps = 1000
num_gpus = 1  # gpu数量


# 在scope下生成神经网络并返回scope下的loss
def tower_loss(scope):
    images, labels = cifar10.distorted_inputs()
    logits = cifar10.inference(images)  # 生成神经网络
    _ = cifar10.loss(logits, labels)  # 不直接返回loss而是放到collection
    losses = tf.get_collection('losses', scope)  # 获取当前GPU上的loss(通过scope限定范围)
    total_loss = tf.add_n(losses, name='total_loss')
    return total_loss


'''
外层是不同GPU计算的梯度,内层是某个GPU对应的不同var的值
tower_grads = 
[[(grad0_gpu0, var0_gpu0), (grad1_gpu0, var1_gpu0),...],
 [(grad0_gpu1, var0_gpu1), (grad1_gpu1, var1_gpu1),...]]
zip(*tower_grads)= 相当于转置了
[[(grad0_gpu0, var0_gpu0), (grad0_gpu1, var0, gpu1),...],
 [(grad1_gpu0, var1_gpu0), (grad1_gpu1, var1_gpu1),...]]
'''


def average_gradients(tower_grads):
    average_grads = []
    for grad_and_vars in zip(*tower_grads):
        grads = [tf.concat(tf.expand_dims(g, 0), 0) for g, _ in grad_and_vars]
        grad = tf.reduce_mean(grads, 0)
        grad_and_var = (grad, grad_and_vars[0][1])
        # [(grad0, var0),(grad1, var1),...]
        average_grads.append(grad_and_var)
    return average_grads


def train():
    # 默认的计算设备为CPU
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        # []表示没有维度,为一个数
        # trainable=False,不会加入GraphKeys.TRAINABLE_VARIABLES参与训练
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)
        num_batches_per_epoch = cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / batch_size
        decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
        # https://tensorflow.google.cn/api_docs/python/tf/train/exponential_decay
        # decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
        # staircase is True, then global_step / decay_steps is an integer division
        lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
                                        global_step,
                                        decay_steps,
                                        cifar10.LEARNING_RATE_DECAY_FACTOR,
                                        staircase=True)
        opt = tf.train.GradientDescentOptimizer(lr)

        tower_grads = []
        for i in range(num_gpus):
            with tf.device('/gpu:%d' % i):
                with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
                    loss = tower_loss(scope)
                    # 让神经网络的变量可以重用,所有GPU使用完全相同的参数
                    # 让下一个tower重用参数
                    tf.get_variable_scope().reuse_variables()
                    grads = opt.compute_gradients(loss)
                    tower_grads.append(grads)
        grads = average_gradients(tower_grads)
        apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

        saver = tf.train.Saver(tf.all_variables())
        init = tf.global_variables_initializer()
        # True会自动选择一个存在并且支持的设备来运行
        sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
        sess.run(init)
        tf.train.start_queue_runners(sess=sess)

        for step in range(max_steps):
            start_time = time.time()
            _, loss_value = sess.run([apply_gradient_op, loss])
            duration = time.time() - start_time

            if step % 10 == 0:
                num_examples_per_step = batch_size * num_gpus
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = duration / num_gpus

                print('step %d, loss=%.2f(%.1f examples/sec;%.3f sec/batch)'
                      % (step, loss_value, examples_per_sec, sec_per_batch))

            if step % 1000 == 0 or (step + 1) == max_steps:
                saver.save(sess, 'model.ckpt', global_step=step)


if __name__ == '__main__':
    train()

猜你喜欢

转载自blog.csdn.net/winycg/article/details/79759294
今日推荐