tensorflow环境numpy的np.dot函数图文实例详解

说明

参考文档:https://docs.scipy.org/doc/numpy/reference/generated/numpy.dot.html

Examples

>>> np.dot(3, 4)
12

Neither argument is complex-conjugated:

>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)

For 2-D arrays it is the matrix product:

>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
       [2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128


a= 

1    0

0    1

b=

4    1

2    2

c=

4    1

2    2

c[0][0] = a[0][0]*b[0][0] + a[0][1]*b[1][0]  = 1*4 + 0*2 = 4

c[0][1] = a[0][0]*b[0][1] + a[0][1]*b[1][1]  = 1*1 + 0*2 = 1

c[1][0] = a[1][0]*b[0][0] + a[1][1]*b[1][0]  = 0*4 + 1*2 = 2

c[1][1] = a[1][0]*b[0][1] + a[1][1]*b[1][1]  = 0*1 + 1*2 = 2

np.dot([0.100, 0.200], x_data) # 矩阵乘法,本例中 1x2矩阵和2x10矩阵相乘,生成 1x10 矩阵,y_data 是label

[0.100, 0.200]  和 x_data 矩阵相乘

代码

import tensorflow as tf
import numpy as np
# 使用 NumPy 生成假数据(phony data), 总共 100 个点.
x_data = np.float32(np.random.rand(2, 100)) # 随机输入   生成一个矩阵 2(行)x 100 (列),矩阵每个值是随机生成
print(type(x_data)) # <class 'numpy.ndarray'>
print(x_data,'x_data')
'''
[[0.12674105 0.00940122 0.07277795 0.9123788  0.3328406  0.9444381
  0.6055279  0.64667445 0.73974556 0.4120124  0.295901   0.6070144
  0.24027748 0.32522872 0.58303046 0.36997396 0.72491044 0.3085387
  0.98572326 0.56797415 0.77981323 0.9393369  0.8438025  0.84795505
  0.7963426  0.06219643 0.02335597 0.9032827  0.31301862 0.34202585
  0.95036435 0.9879567  0.1707238  0.6908862  0.1874137  0.9538864
  0.34621492 0.05223277 0.9661594  0.6536706  0.25394776 0.15870926
  0.25546506 0.8021634  0.5342054  0.9650746  0.14164777 0.11355668
  0.68016535 0.49662435 0.44676393 0.96647537 0.74680984 0.97783595
  0.16987218 0.5165892  0.03670025 0.1161337  0.92863137 0.5561378
  0.94904417 0.3074884  0.2937792  0.2988969  0.10077494 0.44481236
  0.2673188  0.1730211  0.92676103 0.0203045  0.8566593  0.3216042
  0.1956624  0.893628   0.07471467 0.47818503 0.50530815 0.47087556
  0.55530554 0.62106043 0.02671042 0.20506936 0.8529999  0.2791799
  0.13248189 0.10684191 0.99057996 0.81578094 0.3560043  0.9941732
  0.01211992 0.70294666 0.874204   0.17011194 0.57243586 0.46096712
  0.55362546 0.13103732 0.33205432 0.9112315 ]
 [0.41159454 0.8030941  0.8494727  0.07223957 0.8198312  0.3327569
  0.3395143  0.36160228 0.4212181  0.928957   0.78998893 0.77986264
  0.41591215 0.04196386 0.27376962 0.48752084 0.16496418 0.10931992
  0.76396525 0.75966537 0.56203276 0.01275572 0.36627325 0.5705694
  0.21870467 0.15705732 0.64641947 0.35116425 0.0925107  0.06669281
  0.70159847 0.7892662  0.92388797 0.10669845 0.4495063  0.25164276
  0.00411235 0.851238   0.16079018 0.63931656 0.8260055  0.37639168
  0.23106015 0.89222825 0.24110495 0.99796605 0.40152848 0.5485328
  0.29641378 0.2995646  0.66540414 0.17631945 0.7019764  0.14118035
  0.9841724  0.477139   0.49928534 0.17205484 0.71616805 0.53298604
  0.12909463 0.8093505  0.5223442  0.9620953  0.50029945 0.08471368
  0.273831   0.39023155 0.79685265 0.71078175 0.81288815 0.72806597
  0.43924046 0.27451718 0.75727195 0.8504235  0.31160808 0.46692026
  0.6874157  0.89696825 0.27840462 0.7677089  0.367681   0.39661327
  0.47909454 0.98150015 0.3876977  0.6117366  0.5798809  0.76738846
  0.02788231 0.5634784  0.5136815  0.6387473  0.06345847 0.25339594
  0.51389307 0.7180905  0.90238345 0.7164283 ]] x_data
'''
y_data = np.dot([0.100, 0.200], x_data) + 0.30 # 矩阵乘法,本例中 1x2矩阵和2x10矩阵相乘,生成 1x10 矩阵,y_data 是label
# y_data[0] = 0.1 x x_data{0][0] + 0.2 x x_data{1][0] +0.3
#             0.1 x 0.12674105  + 0.2 x 0.41159454
sum = 0.1 * x_data[0][0] + 0.2 * x_data[1][0] + 0.3
print( sum) # 0.3949930131435394
print( 0.1 * x_data[0][0] + 0.2 * x_data[1][0] + 0.3) # 0.3949930131435394

print(type(y_data)) # <class 'numpy.ndarray'>
print(y_data,'y_data')
'''
[0.39499301 0.46155894 0.47717234 0.40568579 0.4972503  0.46099519
 0.42845564 0.4369879  0.45821818 0.52699264 0.48758789 0.51667397
 0.40721018 0.34091564 0.41305697 0.43450156 0.40548388 0.35271785
 0.55136538 0.50873049 0.49038787 0.39648483 0.4576349  0.49890938
 0.4233752  0.33763111 0.43161949 0.46056112 0.349804   0.34754115
 0.53535613 0.55664892 0.50184997 0.39042831 0.40864263 0.44571719
 0.33544396 0.47547088 0.42877398 0.49323037 0.49059588 0.39114926
 0.37175854 0.55866199 0.40164153 0.59610067 0.39447047 0.42106222
 0.42729929 0.40957536 0.47775722 0.43191143 0.51507627 0.42601967
 0.5138217  0.44708672 0.40352709 0.34602434 0.53609675 0.46221099
 0.42072334 0.49261894 0.43384675 0.52230875 0.41013738 0.36142397
 0.38149808 0.39534842 0.55204663 0.4441868  0.54824356 0.47777361
 0.40741433 0.44426624 0.45892586 0.51790321 0.41285243 0.44047161
 0.4930137  0.54149969 0.35835197 0.47404872 0.45883619 0.40724064
 0.4090671  0.50698422 0.47659754 0.50392541 0.45157661 0.55289502
 0.30678845 0.48299035 0.49015669 0.44476065 0.36993528 0.3967759
 0.45814116 0.45672183 0.51368212 0.53440881] y_data
'''
# 构造一个线性模型
#
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
y = tf.matmul(W, x_data) + b
# 最小化方差
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# 初始化变量
init = tf.initialize_all_variables()
# 启动图 (graph)
sess = tf.Session()
sess.run(init)
# 拟合平面
# for step in xrange(0, 201):
for step in range(0, 201):
    sess.run(train)
    if step % 20 == 0:
        print (step, sess.run(W), sess.run(b))

'''
0 [[ 0.07569373 -0.20223653]] [1.1474622]
20 [[-0.00667263 -0.00587752]] [0.46395978]
40 [[0.05893954 0.12751232]] [0.3595805]
60 [[0.08486852 0.17405346]] [0.3215561]
80 [[0.09450016 0.19066   ]] [0.30778757]
100 [[0.09801002 0.19663146]] [0.30281204]
120 [[0.09928108 0.19878435]] [0.30101523]
140 [[0.09974041 0.1995612 ]] [0.30036652]
160 [[0.09990627 0.19984157]] [0.30013233]
180 [[0.09996617 0.19994283]] [0.30004776]
200 [[0.0999878  0.19997936]] [0.30001724]
'''
# 得到最佳拟合结果 W: [[0.100 0.200]], b: [0.300]

猜你喜欢

转载自blog.csdn.net/wyx100/article/details/80467130
今日推荐