#定义计算图 g = tf.Graph() with g.as_default(): #placeholder为占位符,执行会话时候指定填充对象 x = tf.placeholder(name='x', shape=[], dtype=tf.string) y = tf.placeholder(name='y', shape=[], dtype=tf.string) z = tf.string_join([x,y],name = 'join',separator=' ')
#执行计算图 with tf.Session(graph = g) as sess: print(sess.run(fetches = z,feed_dict = {x:"hello",y:"world"}))
import tensorflow as tf g = tf.compat.v1.Graph() with g.as_default(): x = tf.compat.v1.placeholder(name='x', shape=[], dtype=tf.string) y = tf.compat.v1.placeholder(name='y', shape=[], dtype=tf.string) z = tf.strings.join([x,y],name = "join",separator = " ")
with tf.compat.v1.Session(graph = g) as sess: # fetches的结果非常像一个函数的返回值,而feed_dict中的占位符相当于函数的参数序列。 result = sess.run(fetches = z,feed_dict = {x:"hello",y:"world"}) print(result)
x = tf.Variable(0.0,name = "x",dtype = tf.float32) a = tf.constant(1.0) b = tf.constant(-2.0) c = tf.constant(1.0)
with tf.GradientTape() as tape: y = a*tf.pow(x,2) + b*x + c
dy_dx = tape.gradient(y,x)
# 对常量张量也可以求导,需要增加watch with tf.GradientTape() as tape: tape.watch([a,b,c]) y = a*tf.pow(x,2) + b*x + c dy_dx,dy_da,dy_db,dy_dc = tape.gradient(y,[x,a,b,c])
# 可以求二阶导数 with tf.GradientTape() as tape2: with tf.GradientTape() as tape1: y = a*tf.pow(x,2) + b*x + c dy_dx = tape1.gradient(y,x) dy2_dx2 = tape2.gradient(dy_dx,x)
# 可以在autograph中使用 @tf.function deff(x): a = tf.constant(1.0) b = tf.constant(-2.0) c = tf.constant(1.0)
# 自变量转换成tf.float32 x = tf.cast(x,tf.float32) with tf.GradientTape() as tape: tape.watch(x) y = a*tf.pow(x,2)+b*x+c dy_dx = tape.gradient(y,x)
x = tf.Variable(0.0,name = "x",dtype = tf.float32) a = tf.constant(1.0) b = tf.constant(-2.0) c = tf.constant(1.0)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01) for _ inrange(1000): with tf.GradientTape() as tape: y = a*tf.pow(x,2) + b*x + c dy_dx = tape.gradient(y,x) optimizer.apply_gradients(grads_and_vars=[(dy_dx,x)])
x = tf.Variable(0.0,name = "x",dtype = tf.float32) optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
@tf.function defminimizef(): a = tf.constant(1.0) b = tf.constant(-2.0) c = tf.constant(1.0)
for _ in tf.range(1000): #注意autograph时使用tf.range(1000)而不是range(1000) with tf.GradientTape() as tape: y = a*tf.pow(x,2) + b*x + c dy_dx = tape.gradient(y,x) optimizer.apply_gradients(grads_and_vars=[(dy_dx,x)])