Regularization,Dropout,动量与学习率,交叉验证

交叉验证

for epoch in range(500):
	# 60k
	idx = tf.range(60000)
	idx = tf.random.shuffle(idx)
	x_train, y_train = tf.gather(x, idx[:50000]), tf.gather(y, idx[:50000])
	x_val, y_val = tf.gather(x, idx[-10000:]), tf.gather(y, idx[-10000:])
	db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
	db_train.map(prepocess).shuffle(50000).batch(128)
	
	db_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))
	db_val.map(prepocess).shuffle(50000).batch(128)
network.fit(db_train_val, epochs=6, validation_split=0.1, validation_freq=2)

Regularization

l2_model = keras.models.Sequential([
	keras.layers.Dense(16, kernel_regularizer=keras.regularizers_l2(0.001), activation=tf.nn.relu, input_shape=(NUM_WORDS,)),
	keras.layers.Dense(16, kernel_regularizer=keras.regularizers_l2(0.001), activation=tf.nn.relu),
	keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
for step, (x, y) in enumerate(db):
	with tf.GradientTape() as tape:
		#
		loss = tf.reduce_mean(tf.losses.categorical_crossent	ropy(y_onehot, out, from_logits=True))
		loss_regularization = []
		for p in network.trainable_variables:
			loss_regularization.append(tf.nn.l2_loss(p))
			
		loss_regularization = tf.reduce_sum(tf.stack(loss_regularization))

		loss = loss + 0.0001 * loss_regularization
		grads = tape.gradient(loss, network.trainable_variables)
		optimizer.apply_gradients(zip(grads, network.trainable_variables))		

动量与学习率

optimizer = optimizers.SGD(learning_rate=0.02, momentum=0.9)
optimizer = optimizers.RMSprop(learning_rate=0.02, momentum=0.9)

optimizer = optimizers.Adam(learning_rate=0.02, beta_1=0.9, beta_2=0.999)

optimizer = optimizers.SGD(learning_rate=0.2)
for epoch in range(100):
    optimizer.learning_rate = 0.2 * (100-epoch)/100

Dropout

network = Sequential([layers.Dense(256, activation='relu'),
                      layers.Dropout(0.5),
                      layers.Dense(128, activation='relu'),
                      layers.Dropout(0.5),
                      layers.Dense(64, activation='relu'),
                      layers.Dense(32, activation='relu'),
                      layers.Dense(10)])

for step, (x, y) in enumerate(db):
    with tf.GradientTape() as tape:
        x = tf.reshape(x, (-1, 28*28))
        out = network(x, training=True)

    # test
    out = network(x, training=False)

猜你喜欢

转载自blog.csdn.net/qq_46456049/article/details/112866868
今日推荐