import tensorflow as tf
#定义变量
var =tf.Variable(<initial value>,name)
get_var=tf.get_variable(name, shape=None, initializer=None)
#两者的区别是tf.Variable能自动处理命名冲突,而tf.get_variable直接报错
#常量:tf.constant(value, dtype=None, shape=None, name=’const’)
con_var = tf.Variable(tf.constant([1,2,3,4,5,6,7]))
#output:con_var = [1,2,3,4,5,6,7]
con_var = tf.Variable(tf.constant(1.0, shape=[2, 3])
#output: con_var = [[1.0,1.0,1.0], [1.0,1.0,1.0]]
#constant_initializer
con_var = [1,2,3,4,5,6]
con_in_var= tf.constant_initializer(con_var)
x = tf.get_variable(‘x’, shape =[2,3], initializer = con_in_var)
#output:x=[[1,2,3],[4,5,6]]
#当定义的维数大于原有的维数时,会重复输出最后一个值
x = tf.get_variable(‘x’, shape=[2,4], initializer=con_in_var)
#output:x = [[1,2,3,4],[5,6,6,6]]
#初始化0/1:tf.zeros([row_dim, col_dim])/tf.ones([row_dim, col_dim])
zero_var = tf.Variable(tf.zeros([2,2]))
#output:zero_var = [[0,0], [0,0]]
#定义相似变量:tf.zeros_like(var)/tf.ones_like(var)
zero_similar = tf.zeros_like(zero_var)
#output:zero_var = [[0,0], [0,0]]
#常量填充:tf.fill([row_dim, col_dim], constant)
fill_var = tf.Variable(tf.fill([2,3], -1))
#output:fill_var = [[-1,-1,-1],[-1,-1,-1]]
#产生序列:tf.linspace(start, stop, num, name = None),也可写成lin_space
lin_var = tf.Variable(tf.linspace(10.0, 12.0, 3, name =’linspace’))
#output:lin_var = [10.0, 11.0, 12.0]
#注意:区间的开始和结束就是序列的开始和结束
#产生序列:tf.range(start, limit, delta, dtype = None, name = ‘range’)
range_var = tf.Variable(tf.range(3, 18, 3))
#output:range_var = [3, 6, 9, 12, 15]
#注意:最大不会超过limit,且不包含limit
#随机数:tf.random_type()
#tf.random_normal(shape, mean = 0, stddev = 1.0, dtype = tf.float32, seed = None, name = None)
#在正态分布中产生随机数
ran_nor_var = tf.Variable(tf.random_normal([2, 3], mean = 0, stddev = 1.0))
#output:ran_nor_var = [[ 0.90410209 ,1.26647866, -0.2628631],
[-0.79755908, 0.35044804, -0.36429995]]
#当seed设置整数时,每次产生的随机数相同
#tf.random_uniform(shape, minval = 0, maxval = None, dtype = tf.float32, seed = None, name = None)
#在均匀分布中产生随机数
ran_uni_var = tf.Variable(tf.random_uniform([2, 3], 2, 4))
#output:ran_uni_var = [[ 2.81662631, 2.69823909, 3.59916234],
[ 3.6251266 , 2.05515909,
2.72103643]]
#tf.truncated_normal(shape, mean = 0, stddev = 1.0, dtype = tf.float32, seed = None, name = None )
#从截断的正态分布中输出随机值
#在正态分布曲线中,包含三个区间(μ-σ,μ+σ), (μ-2σ,μ+2σ), (μ-3σ,μ+3σ)
#则tf.truncated_normal()的取值范围为(μ-2σ,μ+2σ)
trun_var = tf.Variable(tf.truncated_normal([2, 2], seed =2))
#output: trun_var = [[-0.85811085, -0.19662298], [ 0.13895045, -1.22127676]]
#tf.one_hot(indices, depth, on_value = None, off_value = None, axis = None, dtype = None, name = None)
#indices:张量;depth:每一维的维数;on_value:指定数值,默认1;
#off_value:默认0;axis:要填充的轴,默认-1
one_var = tf.Variable(tf.one_hot([0, 2, -1, 1], depth = 3, on_value = 5, off_value = 0, axis = 1))
#output:one_var = [ [ 5, 0, 0 ], [ 0, 0, 5, ], [ 0, 0, 0 ], [ 0, 5, 0 ] ]
#one_var的维数分别对应indices的维数和depth,on_value值对应的索引值即为indices每一维对应的值
#再来一个例子
one_var = tf.Variable(tf.one_hot([[0,2],[1,-1]], depth = 3, on_value = 1, off_value = 0, axis = -1))
#output:one_var = [[[1, 0, 0 ], [0, 0, 1]],
[[0, 1, 0],
[0, 0, 0]]]
#tensorflow中的变量不能直接输出,需要建立会话,有两种常用方法
#(1)
sess = tf.InteractiveSession()
#(2)
sess = tf.Session()
#初始化单个变量
zero_var.initializer.run()
#初始化所有变量
sess.run(tf.global_variables_initializer())
#用tf.Session()建立的会话没有eval()方法
print(zero_var.eval())
sess.close()
#占位符tf.placeholder(dtype, shape = None, name = None)
place_var = tf.placeholder(tf.float32, shape = [2, 2])
y = tf.matmul(place_var, place_var)
var = [[1,2],[3,4]]
#采用这种形式在运行完程序之后会自动关闭会话,否则要手动关闭
with tf.Session() as sess:
(缩进)sess.run(tf.global_variables_initializer())
(缩进)#feed_dict是字典类型,用于初始化变量,place_var只能接受数组
(缩进)y = sess.run(y, feed_dict = {place_var: var})
(缩进)print(y)
#output:y = [[ 7. 10.],
[ 15. 22.]]