Python 学习笔记2

环境anaconda+pycharm;注意一定要讲tensorflow添加到pycharm中。

2-3 Fetch    Feed

import tensorflow as tf
import os            # 解决了一个问题
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# 2-3 fetch and feed
# Fetch
input1 = tf.constant(3, 1)
input2 = tf.constant(2, 1)
input3 = tf.constant(5, 1)

add = tf.add(input2, input3)
mu1 = tf.multiply(input1, add)

with tf.Session() as sess:
    result = sess.run([mu1, add])
    print(result)

# Feed
# 创建占位符
input4 = tf.placeholder(tf.float32)
input5 = tf.placeholder(tf.float32)
output = tf.multiply(input4, input5)
with tf.Session() as sess:
    # Feed的数据以字典形式传入
    print(sess.run(output, feed_dict={input4: [7.1], input5: [2.2]}))
"D:\Program files\Anaconda3\envs\Scripts\python.exe" "D:/software date/python data/TF2_3.py"
[21.0, 7.0]
[15.62]


Process finished with exit code 0

2-4 利用Tensorflow优化y=kx+b

import tensorflow as tf
import numpy as np
import os            # 解决了一个问题
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# 使用numpy生成100个随机点
x_data = np.random.rand(100)
y_data = x_data*0.1 + 0.2

# 构造一个线性模型
b = tf.Variable(0.)
k = tf.Variable(0.)
y = k*x_data+b

# 二次代价函数
loss = tf.reduce_mean(tf.square(y_data-y))
# 定义一个梯度下降法来进行训练的优化器
optimizer = tf.train.GradientDescentOptimizer(0.2)
# 定义一个最小化代价函数
train = optimizer.minimize(loss)

init = tf.global_variables_initializer()

with tf.Session() as sess:
    sess.run(init)
    for step in range(201):
        sess.run(train)
        if step%20 == 0:
            print(step, sess.run([k, b]))
"D:\Program files\Anaconda3\envs\Scripts\python.exe" "D:/software date/python data/TF2_4examply.py"
0 [0.055051524, 0.10061687]
20 [0.104488775, 0.19753294]
40 [0.10271263, 0.19850917]
60 [0.10163926, 0.19909908]
80 [0.100990616, 0.19945557]
100 [0.10059865, 0.19967099]
120 [0.100361764, 0.19980118]
140 [0.100218624, 0.19987985]
160 [0.10013211, 0.19992739]
180 [0.100079834, 0.19995612]
200 [0.10004826, 0.19997348]


Process finished with exit code 0

猜你喜欢

转载自blog.csdn.net/qq_39683287/article/details/80414960
今日推荐