TRICKS IN DEEP LEARNING
IN THIS DOC , ONLY WITH LITTLE BRIEF EXPLANATION, RECORED IN DAILY STUDY
Last update 2018.4.7
############################################################################
1、变量初始化
-----初始化变量
var = tf.Variable(tf.random_normal([2, 3], stddev=0.2, mean=0.0))
tf.random_normal()
tf.truncated_normal()
tf.random_uniform()
tf.random_gamma()
############################################################################
2、Loss Func
--A--交叉熵H(p,q)刻画的是两个概率分布之间的距离,常用于分类问题
-- y_表示真实值
cross_entropy = -tf.reduce_mean(
y_*tf.clip_by_value(y, 1e-10, 1.0)
)
cross_entropy = tf.reduce_mean( -tf.reduce_sum(y_ * tf.log( y), reduction_indices=[1]))
因为交叉熵一般会与softmax 回归一起使用,所以Tensorflow封装了函数
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(y,y_)
得到softmax回归之后的交叉熵
--B---MSE 均方误差,常用于回归问题
-- y_表示真实值
mse = tf.reduce_mean(tf.square( y_ - y))
------自定义损失函数常用基本函数
tf.reduce_sum(); tf.select();tf.greater()
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1]))
############################################################################
3、weights_with_L2_loss
def weights_with_loss(shape, wl=None):
"""
获取带有L2_Loss的权重, 并添加到collection loss 中
最后我们可以使用 loss = tf.add_n(tf.get_collection("loss"), name='total_loss')
计算出总体loss
weights_with_loss 一般不用于第一层和最后一层,多见于全连接层
:param shape: weights_shape
:param wl: weights_loss_ratio
:return: weights
"""
w = tf.Variable(tf.truncated_normal(shape=shape, stddev=0.01, dtype=tf.float32))
if wl is not None:
weights_loss = tf.multiply(tf.nn.l2_loss(w), wl, name='weights_loss')
tf.add_to_collection("loss", weights_loss)
return w
############################################################################
4、batch_normallization
def batch_normalization(self, input, decay=0.9, eps=1e-5):
"""
Batch Normalization
Result in:
* Reduce DropOut
* Sparse Dependencies on Initial-value(e.g. weight, bias)
* Accelerate Convergence
* Enable to increase training rate
Usage: apply to (after)conv_layers
Args: output of convolution or fully-connection layer
Returns: Normalized batch
"""
shape = input.get_shape().as_list()
n_out = shape[-1]
beta = tf.Variable(tf.zeros([n_out]))
gamma = tf.Variable(tf.ones([n_out]))
if len(shape) == 2:
batch_mean, batch_var = tf.nn.moments(input, [0])
else:
batch_mean, batch_var = tf.nn.moments(input, [0, 1, 2])
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(self.train_phase, mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
return tf.nn.batch_normalization(input, mean, var, beta, gamma, eps)
############################################################################
5、LRN
def LRN(x, R, alpha, beta, name=None, bias=1.0):
"""
LRN apply to (after)conv_layers
:param x: input_tensor
:param R: depth_radius
:param alpha: alpha in math formula
:param beta: beta in match formula
:param name:
:param bias:
:return:
"""
return tf.nn.local_response_normalization(x, depth_radius=R, alpha=alpha,
beta=beta, bias=bias, name=name)
############################################################################
5、gradient decent
----gradient decent & backpropagation
gradient decent :主要用于优化单个参数的取值 【所谓梯度就是一阶导数】
backpropagation: 给出了一个高效的方式在所有参数上使用梯度下降法
需要注意:
(1)gradient decent不能保证全局最优
(2)损失函数实在所有训练数据上的损失和,故gradient decent计算时间很长
|
^
gradient decent Adam(折中方式,每次计算一个batch的损失函数和) SGD
############################################################################
6、learning rate
-----learning_rate 决定了参数每次更新的幅度
-----decayed_learning_rate:
global_step = tf.Variable(0,tf.int32)
learning_rate = tf.train.exponential_decay(
0.1, global_step, 100, 0.96, staircase = True)
.....
learning_rate = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step )
每100轮过后 lr 乘以 0.96
############################################################################
7、full connection
----经典全连接层:
tf.nn.relu(tf,matmul(x,w)+biases)
----全连接层一般会和 dropout连用, 防止过拟合
############################################################################
8、PCA
def RGB_PCA(images):
pixels = images.reshape(-1, images.shape[-1])
idx = np.random.random_integers(0, pixels.shape[0], 1000000)
pixels = [pixels[i] for i in idx]
pixels = np.array(pixels, dtype=np.uint8).T
m = np.mean(pixels)/256.
C = np.cov(pixels)/(256.*256.)
l, v = np.linalg.eig(C)
return l, v, m
def RGB_variations(image, eig_val, eig_vec):
a = np.random.randn(3)
v = np.array([a[0]*eig_val[0], a[1]*eig_val[1], a[2]*eig_val[2]])
variation = np.dot(eig_vec, v)
return image + variation
l,v,m = RGB_PCA(img)
img = RGB_variations(img,l,v)
imshow(img)