coursera deep learning course4 week4

Face recognition

  1. face verification vs. face recognition
    这里写图片描述

  2. one-shot learning
    比如你要为公司设置一个人脸识别,但是一般情况,你不会有太多员工的照片,要是按照以往的做法输入一张图片,然后用softmax输出,效果会很照顾,因为你没有足够的样本。并且当一个新员工加入时,softmax又需要添加一个输出,是难以实现的。
    这里写图片描述

  3. similarity function
    这里写图片描述

  4. siamese network
    这里写图片描述
    训练一个网络,输入图片,输出为一个向量(图片中为128维),这个输出向量可以看作是输入图片的encoding。两个输出向量之间用前面的相似函数进行计算

  5. triplet loss
    这里写图片描述

    这里写图片描述

这里写图片描述
A,P,N三张图片最好不要随机选择,因为这样会使得图中的关系式很容易满足,从而反向传播难以进行有效的学习。这图中左下角的论文中有详细的介绍。

6.变体
这里写图片描述
可以在后面加一层逻辑回归。
precompute : 系统可以只存储某个人的输出向量,而将未经训练的人进行预测,得到输出向量,再进行比较,节约空间。

neural style transfer

损失函数
这里写图片描述
这里写图片描述

def compute_content_cost(a_C, a_G):
    """
    Computes the content cost

    Arguments:
    a_C -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image C 
    a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing content of the image G    
    Returns: 
    J_content -- scalar that you compute using equation 1 above.
    """
    # Retrieve dimensions from a_G (≈1 line)
    m, n_H, n_W, n_C = a_G.get_shape().as_list()    
    # Reshape a_C and a_G (≈2 lines)
    a_C_unrolled = tf.reshape(tf.transpose(a_C, perm=[0,3,1,2]),[n_C,-1])
    a_G_unrolled = tf.reshape(tf.transpose(a_G, perm=[0,3,1,2]),[n_C,-1])
    J_content = (1/(4*n_H*n_W*n_C))*(tf.reduce_sum(np.square(a_C_unrolled-a_G_unrolled)))
    return J_content

def gram_matrix(A):
    """
    Argument:
    A -- matrix of shape (n_C, n_H*n_W)

    Returns:
    GA -- Gram matrix of A, of shape (n_C, n_C)
    """
    GA = tf.matmul(A, tf.transpose(A))
    return GA

def compute_layer_style_cost(a_S, a_G):
    """
    Arguments:
    a_S -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image S 
    a_G -- tensor of dimension (1, n_H, n_W, n_C), hidden layer activations representing style of the image G

    Returns: 
    J_style_layer -- tensor representing a scalar value, style cost defined above by equation (2)
    """
    # Retrieve dimensions from a_G (≈1 line)
    m, n_H, n_W, n_C = a_G.get_shape().as_list()
    # Reshape the images to have them of shape (n_C, n_H*n_W) (≈2 lines)
    a_S = tf.reshape(tf.transpose(a_S, perm=[0,3,1,2]),[n_C, -1])
    a_G = tf.reshape(tf.transpose(a_G, perm=[0,3,1,2]),[n_C, -1])
    # Computing gram_matrices for both images S and G (≈2 lines)
    GS = gram_matrix(a_S)
    GG = gram_matrix(a_G)
    # Computing the loss (≈1 line)
    J_style_layer = 1/(4*np.square(n_C)*np.square(n_H*n_W))*tf.reduce_sum(np.square(GS-GG))
    return J_style_layer

STYLE_LAYERS = [
    ('conv1_1', 0.2),
    ('conv2_1', 0.2),
    ('conv3_1', 0.2),
    ('conv4_1', 0.2),
    ('conv5_1', 0.2)]

def compute_style_cost(model, STYLE_LAYERS):
    """
    Computes the overall style cost from several chosen layers 
    Arguments:
    model -- our tensorflow model
    STYLE_LAYERS -- A python list containing:
                        - the names of the layers we would like to extract style from
                        - a coefficient for each of them

    Returns: 
    J_style -- tensor representing a scalar value, style cost defined above by equation (2)
    """

    # initialize the overall style cost
    J_style = 0
    for layer_name, coeff in STYLE_LAYERS
        # Select the output tensor of the currently selected layer
        out = model[layer_name]

        # Set a_S to be the hidden layer activation from the layer we have selected, by running the session on out
        a_S = sess.run(out)

        # Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name] 
        # and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
        # when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
        a_G = out

        # Compute style_cost for the current layer
        J_style_layer = compute_layer_style_cost(a_S, a_G)

        # Add coeff * J_style_layer of this layer to overall style cost
        J_style += coeff * J_style_layer

    return J_style

def total_cost(J_content, J_style, alpha = 10, beta = 40):
    """
    Computes the total cost function

    Arguments:
    J_content -- content cost coded above
    J_style -- style cost coded above
    alpha -- hyperparameter weighting the importance of the content cost
    beta -- hyperparameter weighting the importance of the style cost

    Returns:
    J -- total cost as defined by the formula above.
    """
    J = alpha*J_content + beta*J_style
    return J 

# Reset the graph 
tf.reset_default_graph()
# Start interactive session
sess = tf.InteractiveSession()
content_image = scipy.misc.imread("images/louvre_small.jpg")
content_image = reshape_and_normalize_image(content_image)
style_image = scipy.misc.imread("images/monet.jpg")
style_image = reshape_and_normalize_image(style_image)
#Now, we initialize the "generated" image as a noisy image created from the content_image. By initializing the pixels of the generated image to be mostly noise but still slightly correlated with the content image, this will help the content of the "generated" image more rapidly match the content of the "content" image. 
def generate_noise_image(content_image, noise_ratio = CONFIG.NOISE_RATIO):
    """
    Generates a noisy image by adding random noise to the content_image
    """
    # Generate a random noise_image
    noise_image = np.random.uniform(-20, 20, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')  
    # Set the input_image to be a weighted average of the content_image and a noise_image
    input_image = noise_image * noise_ratio + content_image * (1 - noise_ratio)   
    return input_image

generated_image = generate_noise_image(content_image)
imshow(generated_image[0])
print(generated_image.shape)
model = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
# Assign the content image to be the input of the VGG model.  
sess.run(model['input'].assign(content_image))

# Select the output tensor of layer conv4_2
out = model['conv4_2']

# Set a_C to be the hidden layer activation from the layer we have selected
a_C = sess.run(out)
print(a_C.shape)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2'] 
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
print(a_G)
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
sess.run(model['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(model, STYLE_LAYERS)
J = total_cost(J_content, J_style, alpha = 10, beta = 40)
optimizer = tf.train.AdamOptimizer(2.0)
train_step = optimizer.minimize(J)

def model_nn(sess, input_image, num_iterations = 200):
    # Initialize global variables (you need to run the session on the initializer)
    sess.run(tf.global_variables_initializer())
    # Run the noisy input image (initial generated image) through the model. Use assign().
    sess.run(model['input'].assign(input_image))    
    for i in range(num_iterations):

        # Run the session on the train_step to minimize the total cost
        sess.run(train_step)  
        # Compute the generated image by running the session on the current model['input']
        generated_image = sess.run( model["input"])
        # Print every 20 iteration.
        if i%20 == 0:
            Jt, Jc, Js = sess.run([J, J_content, J_style])
            print("Iteration " + str(i) + " :")
            print("total cost = " + str(Jt))
            print("content cost = " + str(Jc))
            print("style cost = " + str(Js))     
            # save current generated image in the "/output" directory
            save_image("output/" + str(i) + ".png", generated_image)
    # save last generated image
    save_image('output/generated_image.jpg', generated_image) 
    return generated_image

猜你喜欢

转载自blog.csdn.net/yb564645735/article/details/78897320
今日推荐